wip
This commit is contained in:
0
system/loggerd/tests/__init__.py
Normal file
0
system/loggerd/tests/__init__.py
Normal file
91
system/loggerd/tests/loggerd_tests_common.py
Normal file
91
system/loggerd/tests/loggerd_tests_common.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import os
|
||||
import random
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
import openpilot.system.loggerd.deleter as deleter
|
||||
import openpilot.system.loggerd.uploader as uploader
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
from openpilot.system.loggerd.xattr_cache import setxattr
|
||||
|
||||
|
||||
def create_random_file(file_path: Path, size_mb: float, lock: bool = False, upload_xattr: bytes = None) -> None:
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if lock:
|
||||
lock_path = str(file_path) + ".lock"
|
||||
os.close(os.open(lock_path, os.O_CREAT | os.O_EXCL))
|
||||
|
||||
chunks = 128
|
||||
chunk_bytes = int(size_mb * 1024 * 1024 / chunks)
|
||||
data = os.urandom(chunk_bytes)
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
for _ in range(chunks):
|
||||
f.write(data)
|
||||
|
||||
if upload_xattr is not None:
|
||||
setxattr(str(file_path), uploader.UPLOAD_ATTR_NAME, upload_xattr)
|
||||
|
||||
class MockResponse():
|
||||
def __init__(self, text, status_code):
|
||||
self.text = text
|
||||
self.status_code = status_code
|
||||
|
||||
class MockApi():
|
||||
def __init__(self, dongle_id):
|
||||
pass
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
return MockResponse('{"url": "http://localhost/does/not/exist", "headers": {}}', 200)
|
||||
|
||||
def get_token(self):
|
||||
return "fake-token"
|
||||
|
||||
class MockApiIgnore():
|
||||
def __init__(self, dongle_id):
|
||||
pass
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
return MockResponse('', 412)
|
||||
|
||||
def get_token(self):
|
||||
return "fake-token"
|
||||
|
||||
class UploaderTestCase(unittest.TestCase):
|
||||
f_type = "UNKNOWN"
|
||||
|
||||
root: Path
|
||||
seg_num: int
|
||||
seg_format: str
|
||||
seg_format2: str
|
||||
seg_dir: str
|
||||
|
||||
def set_ignore(self):
|
||||
uploader.Api = MockApiIgnore
|
||||
|
||||
def setUp(self):
|
||||
uploader.Api = MockApi
|
||||
uploader.fake_upload = True
|
||||
uploader.force_wifi = True
|
||||
uploader.allow_sleep = False
|
||||
self.seg_num = random.randint(1, 300)
|
||||
self.seg_format = "00000004--0ac3964c96--{}"
|
||||
self.seg_format2 = "00000005--4c4e99b08b--{}"
|
||||
self.seg_dir = self.seg_format.format(self.seg_num)
|
||||
|
||||
self.params = Params()
|
||||
self.params.put("IsOffroad", "1")
|
||||
self.params.put("DongleId", "0000000000000000")
|
||||
|
||||
def make_file_with_data(self, f_dir: str, fn: str, size_mb: float = .1, lock: bool = False,
|
||||
upload_xattr: bytes = None, preserve_xattr: bytes = None) -> Path:
|
||||
file_path = Path(Paths.log_root()) / f_dir / fn
|
||||
create_random_file(file_path, size_mb, lock, upload_xattr)
|
||||
|
||||
if preserve_xattr is not None:
|
||||
setxattr(str(file_path.parent), deleter.PRESERVE_ATTR_NAME, preserve_xattr)
|
||||
|
||||
return file_path
|
||||
123
system/loggerd/tests/test_deleter.py
Normal file
123
system/loggerd/tests/test_deleter.py
Normal file
@@ -0,0 +1,123 @@
|
||||
#!/usr/bin/env python3
|
||||
import time
|
||||
import threading
|
||||
import unittest
|
||||
from collections import namedtuple
|
||||
from pathlib import Path
|
||||
from collections.abc import Sequence
|
||||
|
||||
import openpilot.system.loggerd.deleter as deleter
|
||||
from openpilot.common.timeout import Timeout, TimeoutException
|
||||
from openpilot.system.loggerd.tests.loggerd_tests_common import UploaderTestCase
|
||||
|
||||
Stats = namedtuple("Stats", ['f_bavail', 'f_blocks', 'f_frsize'])
|
||||
|
||||
|
||||
class TestDeleter(UploaderTestCase):
|
||||
def fake_statvfs(self, d):
|
||||
return self.fake_stats
|
||||
|
||||
def setUp(self):
|
||||
self.f_type = "fcamera.hevc"
|
||||
super().setUp()
|
||||
self.fake_stats = Stats(f_bavail=0, f_blocks=10, f_frsize=4096)
|
||||
deleter.os.statvfs = self.fake_statvfs
|
||||
|
||||
def start_thread(self):
|
||||
self.end_event = threading.Event()
|
||||
self.del_thread = threading.Thread(target=deleter.deleter_thread, args=[self.end_event])
|
||||
self.del_thread.daemon = True
|
||||
self.del_thread.start()
|
||||
|
||||
def join_thread(self):
|
||||
self.end_event.set()
|
||||
self.del_thread.join()
|
||||
|
||||
def test_delete(self):
|
||||
f_path = self.make_file_with_data(self.seg_dir, self.f_type, 1)
|
||||
|
||||
self.start_thread()
|
||||
|
||||
try:
|
||||
with Timeout(2, "Timeout waiting for file to be deleted"):
|
||||
while f_path.exists():
|
||||
time.sleep(0.01)
|
||||
finally:
|
||||
self.join_thread()
|
||||
|
||||
def assertDeleteOrder(self, f_paths: Sequence[Path], timeout: int = 5) -> None:
|
||||
deleted_order = []
|
||||
|
||||
self.start_thread()
|
||||
try:
|
||||
with Timeout(timeout, "Timeout waiting for files to be deleted"):
|
||||
while True:
|
||||
for f in f_paths:
|
||||
if not f.exists() and f not in deleted_order:
|
||||
deleted_order.append(f)
|
||||
if len(deleted_order) == len(f_paths):
|
||||
break
|
||||
time.sleep(0.01)
|
||||
except TimeoutException:
|
||||
print("Not deleted:", [f for f in f_paths if f not in deleted_order])
|
||||
raise
|
||||
finally:
|
||||
self.join_thread()
|
||||
|
||||
self.assertEqual(deleted_order, f_paths, "Files not deleted in expected order")
|
||||
|
||||
def test_delete_order(self):
|
||||
self.assertDeleteOrder([
|
||||
self.make_file_with_data(self.seg_format.format(0), self.f_type),
|
||||
self.make_file_with_data(self.seg_format.format(1), self.f_type),
|
||||
self.make_file_with_data(self.seg_format2.format(0), self.f_type),
|
||||
])
|
||||
|
||||
def test_delete_many_preserved(self):
|
||||
self.assertDeleteOrder([
|
||||
self.make_file_with_data(self.seg_format.format(0), self.f_type),
|
||||
self.make_file_with_data(self.seg_format.format(1), self.f_type, preserve_xattr=deleter.PRESERVE_ATTR_VALUE),
|
||||
self.make_file_with_data(self.seg_format.format(2), self.f_type),
|
||||
] + [
|
||||
self.make_file_with_data(self.seg_format2.format(i), self.f_type, preserve_xattr=deleter.PRESERVE_ATTR_VALUE)
|
||||
for i in range(5)
|
||||
])
|
||||
|
||||
def test_delete_last(self):
|
||||
self.assertDeleteOrder([
|
||||
self.make_file_with_data(self.seg_format.format(1), self.f_type),
|
||||
self.make_file_with_data(self.seg_format2.format(0), self.f_type),
|
||||
self.make_file_with_data(self.seg_format.format(0), self.f_type, preserve_xattr=deleter.PRESERVE_ATTR_VALUE),
|
||||
self.make_file_with_data("boot", self.seg_format[:-4]),
|
||||
self.make_file_with_data("crash", self.seg_format2[:-4]),
|
||||
])
|
||||
|
||||
def test_no_delete_when_available_space(self):
|
||||
f_path = self.make_file_with_data(self.seg_dir, self.f_type)
|
||||
|
||||
block_size = 4096
|
||||
available = (10 * 1024 * 1024 * 1024) / block_size # 10GB free
|
||||
self.fake_stats = Stats(f_bavail=available, f_blocks=10, f_frsize=block_size)
|
||||
|
||||
self.start_thread()
|
||||
start_time = time.monotonic()
|
||||
while f_path.exists() and time.monotonic() - start_time < 2:
|
||||
time.sleep(0.01)
|
||||
self.join_thread()
|
||||
|
||||
self.assertTrue(f_path.exists(), "File deleted with available space")
|
||||
|
||||
def test_no_delete_with_lock_file(self):
|
||||
f_path = self.make_file_with_data(self.seg_dir, self.f_type, lock=True)
|
||||
|
||||
self.start_thread()
|
||||
start_time = time.monotonic()
|
||||
while f_path.exists() and time.monotonic() - start_time < 2:
|
||||
time.sleep(0.01)
|
||||
self.join_thread()
|
||||
|
||||
self.assertTrue(f_path.exists(), "File deleted when locked")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
156
system/loggerd/tests/test_encoder.py
Normal file
156
system/loggerd/tests/test_encoder.py
Normal file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python3
|
||||
import math
|
||||
import os
|
||||
import pytest
|
||||
import random
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
from parameterized import parameterized
|
||||
from tqdm import trange
|
||||
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.timeout import Timeout
|
||||
from openpilot.system.hardware import TICI
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
|
||||
SEGMENT_LENGTH = 2
|
||||
FULL_SIZE = 2507572
|
||||
CAMERAS = [
|
||||
("fcamera.hevc", 20, FULL_SIZE, "roadEncodeIdx"),
|
||||
("dcamera.hevc", 20, FULL_SIZE, "driverEncodeIdx"),
|
||||
("ecamera.hevc", 20, FULL_SIZE, "wideRoadEncodeIdx"),
|
||||
("qcamera.ts", 20, 130000, None),
|
||||
]
|
||||
|
||||
# we check frame count, so we don't have to be too strict on size
|
||||
FILE_SIZE_TOLERANCE = 0.5
|
||||
|
||||
|
||||
@pytest.mark.tici # TODO: all of loggerd should work on PC
|
||||
class TestEncoder(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self._clear_logs()
|
||||
os.environ["LOGGERD_TEST"] = "1"
|
||||
os.environ["LOGGERD_SEGMENT_LENGTH"] = str(SEGMENT_LENGTH)
|
||||
|
||||
def tearDown(self):
|
||||
self._clear_logs()
|
||||
|
||||
def _clear_logs(self):
|
||||
if os.path.exists(Paths.log_root()):
|
||||
shutil.rmtree(Paths.log_root())
|
||||
|
||||
def _get_latest_segment_path(self):
|
||||
last_route = sorted(Path(Paths.log_root()).iterdir())[-1]
|
||||
return os.path.join(Paths.log_root(), last_route)
|
||||
|
||||
# TODO: this should run faster than real time
|
||||
@parameterized.expand([(True, ), (False, )])
|
||||
def test_log_rotation(self, record_front):
|
||||
Params().put_bool("RecordFront", record_front)
|
||||
|
||||
managed_processes['sensord'].start()
|
||||
managed_processes['loggerd'].start()
|
||||
managed_processes['encoderd'].start()
|
||||
|
||||
time.sleep(1.0)
|
||||
managed_processes['camerad'].start()
|
||||
|
||||
num_segments = int(os.getenv("SEGMENTS", random.randint(10, 15)))
|
||||
|
||||
# wait for loggerd to make the dir for first segment
|
||||
route_prefix_path = None
|
||||
with Timeout(int(SEGMENT_LENGTH*3)):
|
||||
while route_prefix_path is None:
|
||||
try:
|
||||
route_prefix_path = self._get_latest_segment_path().rsplit("--", 1)[0]
|
||||
except Exception:
|
||||
time.sleep(0.1)
|
||||
|
||||
def check_seg(i):
|
||||
# check each camera file size
|
||||
counts = []
|
||||
first_frames = []
|
||||
for camera, fps, size, encode_idx_name in CAMERAS:
|
||||
if not record_front and "dcamera" in camera:
|
||||
continue
|
||||
|
||||
file_path = f"{route_prefix_path}--{i}/{camera}"
|
||||
|
||||
# check file exists
|
||||
self.assertTrue(os.path.exists(file_path), f"segment #{i}: '{file_path}' missing")
|
||||
|
||||
# TODO: this ffprobe call is really slow
|
||||
# check frame count
|
||||
cmd = f"ffprobe -v error -select_streams v:0 -count_packets -show_entries stream=nb_read_packets -of csv=p=0 {file_path}"
|
||||
if TICI:
|
||||
cmd = "LD_LIBRARY_PATH=/usr/local/lib " + cmd
|
||||
|
||||
expected_frames = fps * SEGMENT_LENGTH
|
||||
probe = subprocess.check_output(cmd, shell=True, encoding='utf8')
|
||||
frame_count = int(probe.split('\n')[0].strip())
|
||||
counts.append(frame_count)
|
||||
|
||||
self.assertEqual(frame_count, expected_frames,
|
||||
f"segment #{i}: {camera} failed frame count check: expected {expected_frames}, got {frame_count}")
|
||||
|
||||
# sanity check file size
|
||||
file_size = os.path.getsize(file_path)
|
||||
self.assertTrue(math.isclose(file_size, size, rel_tol=FILE_SIZE_TOLERANCE),
|
||||
f"{file_path} size {file_size} isn't close to target size {size}")
|
||||
|
||||
# Check encodeIdx
|
||||
if encode_idx_name is not None:
|
||||
rlog_path = f"{route_prefix_path}--{i}/rlog"
|
||||
msgs = [m for m in LogReader(rlog_path) if m.which() == encode_idx_name]
|
||||
encode_msgs = [getattr(m, encode_idx_name) for m in msgs]
|
||||
|
||||
valid = [m.valid for m in msgs]
|
||||
segment_idxs = [m.segmentId for m in encode_msgs]
|
||||
encode_idxs = [m.encodeId for m in encode_msgs]
|
||||
frame_idxs = [m.frameId for m in encode_msgs]
|
||||
|
||||
# Check frame count
|
||||
self.assertEqual(frame_count, len(segment_idxs))
|
||||
self.assertEqual(frame_count, len(encode_idxs))
|
||||
|
||||
# Check for duplicates or skips
|
||||
self.assertEqual(0, segment_idxs[0])
|
||||
self.assertEqual(len(set(segment_idxs)), len(segment_idxs))
|
||||
|
||||
self.assertTrue(all(valid))
|
||||
|
||||
self.assertEqual(expected_frames * i, encode_idxs[0])
|
||||
first_frames.append(frame_idxs[0])
|
||||
self.assertEqual(len(set(encode_idxs)), len(encode_idxs))
|
||||
|
||||
self.assertEqual(1, len(set(first_frames)))
|
||||
|
||||
if TICI:
|
||||
expected_frames = fps * SEGMENT_LENGTH
|
||||
self.assertEqual(min(counts), expected_frames)
|
||||
shutil.rmtree(f"{route_prefix_path}--{i}")
|
||||
|
||||
try:
|
||||
for i in trange(num_segments):
|
||||
# poll for next segment
|
||||
with Timeout(int(SEGMENT_LENGTH*10), error_msg=f"timed out waiting for segment {i}"):
|
||||
while Path(f"{route_prefix_path}--{i+1}") not in Path(Paths.log_root()).iterdir():
|
||||
time.sleep(0.1)
|
||||
check_seg(i)
|
||||
finally:
|
||||
managed_processes['loggerd'].stop()
|
||||
managed_processes['encoderd'].stop()
|
||||
managed_processes['camerad'].stop()
|
||||
managed_processes['sensord'].stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
74
system/loggerd/tests/test_logger.cc
Normal file
74
system/loggerd/tests/test_logger.cc
Normal file
@@ -0,0 +1,74 @@
|
||||
#include "catch2/catch.hpp"
|
||||
#include "system/loggerd/logger.h"
|
||||
|
||||
typedef cereal::Sentinel::SentinelType SentinelType;
|
||||
|
||||
void verify_segment(const std::string &route_path, int segment, int max_segment, int required_event_cnt) {
|
||||
const std::string segment_path = route_path + "--" + std::to_string(segment);
|
||||
SentinelType begin_sentinel = segment == 0 ? SentinelType::START_OF_ROUTE : SentinelType::START_OF_SEGMENT;
|
||||
SentinelType end_sentinel = segment == max_segment - 1 ? SentinelType::END_OF_ROUTE : SentinelType::END_OF_SEGMENT;
|
||||
|
||||
REQUIRE(!util::file_exists(segment_path + "/rlog.lock"));
|
||||
for (const char *fn : {"/rlog", "/qlog"}) {
|
||||
const std::string log_file = segment_path + fn;
|
||||
std::string log = util::read_file(log_file);
|
||||
REQUIRE(!log.empty());
|
||||
int event_cnt = 0, i = 0;
|
||||
kj::ArrayPtr<const capnp::word> words((capnp::word *)log.data(), log.size() / sizeof(capnp::word));
|
||||
while (words.size() > 0) {
|
||||
try {
|
||||
capnp::FlatArrayMessageReader reader(words);
|
||||
auto event = reader.getRoot<cereal::Event>();
|
||||
words = kj::arrayPtr(reader.getEnd(), words.end());
|
||||
if (i == 0) {
|
||||
REQUIRE(event.which() == cereal::Event::INIT_DATA);
|
||||
} else if (i == 1) {
|
||||
REQUIRE(event.which() == cereal::Event::SENTINEL);
|
||||
REQUIRE(event.getSentinel().getType() == begin_sentinel);
|
||||
REQUIRE(event.getSentinel().getSignal() == 0);
|
||||
} else if (words.size() > 0) {
|
||||
REQUIRE(event.which() == cereal::Event::CLOCKS);
|
||||
++event_cnt;
|
||||
} else {
|
||||
// the last event must be SENTINEL
|
||||
REQUIRE(event.which() == cereal::Event::SENTINEL);
|
||||
REQUIRE(event.getSentinel().getType() == end_sentinel);
|
||||
REQUIRE(event.getSentinel().getSignal() == (end_sentinel == SentinelType::END_OF_ROUTE ? 1 : 0));
|
||||
}
|
||||
++i;
|
||||
} catch (const kj::Exception &ex) {
|
||||
INFO("failed parse " << i << " exception :" << ex.getDescription());
|
||||
REQUIRE(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
REQUIRE(event_cnt == required_event_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
void write_msg(LoggerState *logger) {
|
||||
MessageBuilder msg;
|
||||
msg.initEvent().initClocks();
|
||||
logger->write(msg.toBytes(), true);
|
||||
}
|
||||
|
||||
TEST_CASE("logger") {
|
||||
const int segment_cnt = 100;
|
||||
const std::string log_root = "/tmp/test_logger";
|
||||
system(("rm " + log_root + " -rf").c_str());
|
||||
std::string route_name;
|
||||
{
|
||||
LoggerState logger(log_root);
|
||||
route_name = logger.routeName();
|
||||
for (int i = 0; i < segment_cnt; ++i) {
|
||||
REQUIRE(logger.next());
|
||||
REQUIRE(util::file_exists(logger.segmentPath() + "/rlog.lock"));
|
||||
REQUIRE(logger.segment() == i);
|
||||
write_msg(&logger);
|
||||
}
|
||||
logger.setExitSignal(1);
|
||||
}
|
||||
for (int i = 0; i < segment_cnt; ++i) {
|
||||
verify_segment(log_root + "/" + route_name, i, segment_cnt, 1);
|
||||
}
|
||||
}
|
||||
285
system/loggerd/tests/test_loggerd.py
Normal file
285
system/loggerd/tests/test_loggerd.py
Normal file
@@ -0,0 +1,285 @@
|
||||
#!/usr/bin/env python3
|
||||
import numpy as np
|
||||
import os
|
||||
import re
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from flaky import flaky
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from cereal import log
|
||||
from cereal.services import SERVICE_LIST
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.timeout import Timeout
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
from openpilot.system.loggerd.xattr_cache import getxattr
|
||||
from openpilot.system.loggerd.deleter import PRESERVE_ATTR_NAME, PRESERVE_ATTR_VALUE
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
from openpilot.system.version import get_version
|
||||
from openpilot.tools.lib.helpers import RE
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from cereal.visionipc import VisionIpcServer, VisionStreamType
|
||||
from openpilot.common.transformations.camera import DEVICE_CAMERAS
|
||||
|
||||
SentinelType = log.Sentinel.SentinelType
|
||||
|
||||
CEREAL_SERVICES = [f for f in log.Event.schema.union_fields if f in SERVICE_LIST
|
||||
and SERVICE_LIST[f].should_log and "encode" not in f.lower()]
|
||||
|
||||
|
||||
class TestLoggerd:
|
||||
def _get_latest_log_dir(self):
|
||||
log_dirs = sorted(Path(Paths.log_root()).iterdir(), key=lambda f: f.stat().st_mtime)
|
||||
return log_dirs[-1]
|
||||
|
||||
def _get_log_dir(self, x):
|
||||
for l in x.splitlines():
|
||||
for p in l.split(' '):
|
||||
path = Path(p.strip())
|
||||
if path.is_dir():
|
||||
return path
|
||||
return None
|
||||
|
||||
def _get_log_fn(self, x):
|
||||
for l in x.splitlines():
|
||||
for p in l.split(' '):
|
||||
path = Path(p.strip())
|
||||
if path.is_file():
|
||||
return path
|
||||
return None
|
||||
|
||||
def _gen_bootlog(self):
|
||||
with Timeout(5):
|
||||
out = subprocess.check_output("./bootlog", cwd=os.path.join(BASEDIR, "system/loggerd"), encoding='utf-8')
|
||||
|
||||
log_fn = self._get_log_fn(out)
|
||||
|
||||
# check existence
|
||||
assert log_fn is not None
|
||||
|
||||
return log_fn
|
||||
|
||||
def _check_init_data(self, msgs):
|
||||
msg = msgs[0]
|
||||
assert msg.which() == 'initData'
|
||||
|
||||
def _check_sentinel(self, msgs, route):
|
||||
start_type = SentinelType.startOfRoute if route else SentinelType.startOfSegment
|
||||
assert msgs[1].sentinel.type == start_type
|
||||
|
||||
end_type = SentinelType.endOfRoute if route else SentinelType.endOfSegment
|
||||
assert msgs[-1].sentinel.type == end_type
|
||||
|
||||
def _publish_random_messages(self, services: list[str]) -> dict[str, list]:
|
||||
pm = messaging.PubMaster(services)
|
||||
|
||||
managed_processes["loggerd"].start()
|
||||
for s in services:
|
||||
assert pm.wait_for_readers_to_update(s, timeout=5)
|
||||
|
||||
sent_msgs = defaultdict(list)
|
||||
for _ in range(random.randint(2, 10) * 100):
|
||||
for s in services:
|
||||
try:
|
||||
m = messaging.new_message(s)
|
||||
except Exception:
|
||||
m = messaging.new_message(s, random.randint(2, 10))
|
||||
pm.send(s, m)
|
||||
sent_msgs[s].append(m)
|
||||
|
||||
for s in services:
|
||||
assert pm.wait_for_readers_to_update(s, timeout=5)
|
||||
managed_processes["loggerd"].stop()
|
||||
|
||||
return sent_msgs
|
||||
|
||||
def test_init_data_values(self):
|
||||
os.environ["CLEAN"] = random.choice(["0", "1"])
|
||||
|
||||
dongle = ''.join(random.choice(string.printable) for n in range(random.randint(1, 100)))
|
||||
fake_params = [
|
||||
# param, initData field, value
|
||||
("DongleId", "dongleId", dongle),
|
||||
("GitCommit", "gitCommit", "commit"),
|
||||
("GitCommitDate", "gitCommitDate", "date"),
|
||||
("GitBranch", "gitBranch", "branch"),
|
||||
("GitRemote", "gitRemote", "remote"),
|
||||
]
|
||||
params = Params()
|
||||
for k, _, v in fake_params:
|
||||
params.put(k, v)
|
||||
params.put("AccessToken", "abc")
|
||||
|
||||
lr = list(LogReader(str(self._gen_bootlog())))
|
||||
initData = lr[0].initData
|
||||
|
||||
assert initData.dirty != bool(os.environ["CLEAN"])
|
||||
assert initData.version == get_version()
|
||||
|
||||
if os.path.isfile("/proc/cmdline"):
|
||||
with open("/proc/cmdline") as f:
|
||||
assert list(initData.kernelArgs) == f.read().strip().split(" ")
|
||||
|
||||
with open("/proc/version") as f:
|
||||
assert initData.kernelVersion == f.read()
|
||||
|
||||
# check params
|
||||
logged_params = {entry.key: entry.value for entry in initData.params.entries}
|
||||
expected_params = {k for k, _, __ in fake_params} | {'AccessToken', 'BootCount'}
|
||||
assert set(logged_params.keys()) == expected_params, set(logged_params.keys()) ^ expected_params
|
||||
assert logged_params['AccessToken'] == b'', f"DONT_LOG param value was logged: {repr(logged_params['AccessToken'])}"
|
||||
for param_key, initData_key, v in fake_params:
|
||||
assert getattr(initData, initData_key) == v
|
||||
assert logged_params[param_key].decode() == v
|
||||
|
||||
@flaky(max_runs=3)
|
||||
def test_rotation(self):
|
||||
os.environ["LOGGERD_TEST"] = "1"
|
||||
Params().put("RecordFront", "1")
|
||||
|
||||
d = DEVICE_CAMERAS[("tici", "ar0231")]
|
||||
expected_files = {"rlog", "qlog", "qcamera.ts", "fcamera.hevc", "dcamera.hevc", "ecamera.hevc"}
|
||||
streams = [(VisionStreamType.VISION_STREAM_ROAD, (d.fcam.width, d.fcam.height, 2048*2346, 2048, 2048*1216), "roadCameraState"),
|
||||
(VisionStreamType.VISION_STREAM_DRIVER, (d.dcam.width, d.dcam.height, 2048*2346, 2048, 2048*1216), "driverCameraState"),
|
||||
(VisionStreamType.VISION_STREAM_WIDE_ROAD, (d.ecam.width, d.ecam.height, 2048*2346, 2048, 2048*1216), "wideRoadCameraState")]
|
||||
|
||||
pm = messaging.PubMaster(["roadCameraState", "driverCameraState", "wideRoadCameraState"])
|
||||
vipc_server = VisionIpcServer("camerad")
|
||||
for stream_type, frame_spec, _ in streams:
|
||||
vipc_server.create_buffers_with_sizes(stream_type, 40, False, *(frame_spec))
|
||||
vipc_server.start_listener()
|
||||
|
||||
num_segs = random.randint(2, 5)
|
||||
length = random.randint(1, 3)
|
||||
os.environ["LOGGERD_SEGMENT_LENGTH"] = str(length)
|
||||
managed_processes["loggerd"].start()
|
||||
managed_processes["encoderd"].start()
|
||||
assert pm.wait_for_readers_to_update("roadCameraState", timeout=5)
|
||||
|
||||
fps = 20.0
|
||||
for n in range(1, int(num_segs*length*fps)+1):
|
||||
for stream_type, frame_spec, state in streams:
|
||||
dat = np.empty(frame_spec[2], dtype=np.uint8)
|
||||
vipc_server.send(stream_type, dat[:].flatten().tobytes(), n, n/fps, n/fps)
|
||||
|
||||
camera_state = messaging.new_message(state)
|
||||
frame = getattr(camera_state, state)
|
||||
frame.frameId = n
|
||||
pm.send(state, camera_state)
|
||||
|
||||
for _, _, state in streams:
|
||||
assert pm.wait_for_readers_to_update(state, timeout=5, dt=0.001)
|
||||
|
||||
managed_processes["loggerd"].stop()
|
||||
managed_processes["encoderd"].stop()
|
||||
|
||||
route_path = str(self._get_latest_log_dir()).rsplit("--", 1)[0]
|
||||
for n in range(num_segs):
|
||||
p = Path(f"{route_path}--{n}")
|
||||
logged = {f.name for f in p.iterdir() if f.is_file()}
|
||||
diff = logged ^ expected_files
|
||||
assert len(diff) == 0, f"didn't get all expected files. run={_} seg={n} {route_path=}, {diff=}\n{logged=} {expected_files=}"
|
||||
|
||||
def test_bootlog(self):
|
||||
# generate bootlog with fake launch log
|
||||
launch_log = ''.join(str(random.choice(string.printable)) for _ in range(100))
|
||||
with open("/tmp/launch_log", "w") as f:
|
||||
f.write(launch_log)
|
||||
|
||||
bootlog_path = self._gen_bootlog()
|
||||
lr = list(LogReader(str(bootlog_path)))
|
||||
|
||||
# check length
|
||||
assert len(lr) == 2 # boot + initData
|
||||
|
||||
self._check_init_data(lr)
|
||||
|
||||
# check msgs
|
||||
bootlog_msgs = [m for m in lr if m.which() == 'boot']
|
||||
assert len(bootlog_msgs) == 1
|
||||
|
||||
# sanity check values
|
||||
boot = bootlog_msgs.pop().boot
|
||||
assert abs(boot.wallTimeNanos - time.time_ns()) < 5*1e9 # within 5s
|
||||
assert boot.launchLog == launch_log
|
||||
|
||||
for fn in ["console-ramoops", "pmsg-ramoops-0"]:
|
||||
path = Path(os.path.join("/sys/fs/pstore/", fn))
|
||||
if path.is_file():
|
||||
with open(path, "rb") as f:
|
||||
expected_val = f.read()
|
||||
bootlog_val = [e.value for e in boot.pstore.entries if e.key == fn][0]
|
||||
assert expected_val == bootlog_val
|
||||
|
||||
# next one should increment by one
|
||||
bl1 = re.match(RE.LOG_ID_V2, bootlog_path.name)
|
||||
bl2 = re.match(RE.LOG_ID_V2, self._gen_bootlog().name)
|
||||
assert bl1.group('uid') != bl2.group('uid')
|
||||
assert int(bl1.group('count')) == 0 and int(bl2.group('count')) == 1
|
||||
|
||||
def test_qlog(self):
|
||||
qlog_services = [s for s in CEREAL_SERVICES if SERVICE_LIST[s].decimation is not None]
|
||||
no_qlog_services = [s for s in CEREAL_SERVICES if SERVICE_LIST[s].decimation is None]
|
||||
|
||||
services = random.sample(qlog_services, random.randint(2, min(10, len(qlog_services)))) + \
|
||||
random.sample(no_qlog_services, random.randint(2, min(10, len(no_qlog_services))))
|
||||
sent_msgs = self._publish_random_messages(services)
|
||||
|
||||
qlog_path = os.path.join(self._get_latest_log_dir(), "qlog")
|
||||
lr = list(LogReader(qlog_path))
|
||||
|
||||
# check initData and sentinel
|
||||
self._check_init_data(lr)
|
||||
self._check_sentinel(lr, True)
|
||||
|
||||
recv_msgs = defaultdict(list)
|
||||
for m in lr:
|
||||
recv_msgs[m.which()].append(m)
|
||||
|
||||
for s, msgs in sent_msgs.items():
|
||||
recv_cnt = len(recv_msgs[s])
|
||||
|
||||
if s in no_qlog_services:
|
||||
# check services with no specific decimation aren't in qlog
|
||||
assert recv_cnt == 0, f"got {recv_cnt} {s} msgs in qlog"
|
||||
else:
|
||||
# check logged message count matches decimation
|
||||
expected_cnt = (len(msgs) - 1) // SERVICE_LIST[s].decimation + 1
|
||||
assert recv_cnt == expected_cnt, f"expected {expected_cnt} msgs for {s}, got {recv_cnt}"
|
||||
|
||||
def test_rlog(self):
|
||||
services = random.sample(CEREAL_SERVICES, random.randint(5, 10))
|
||||
sent_msgs = self._publish_random_messages(services)
|
||||
|
||||
lr = list(LogReader(os.path.join(self._get_latest_log_dir(), "rlog")))
|
||||
|
||||
# check initData and sentinel
|
||||
self._check_init_data(lr)
|
||||
self._check_sentinel(lr, True)
|
||||
|
||||
# check all messages were logged and in order
|
||||
lr = lr[2:-1] # slice off initData and both sentinels
|
||||
for m in lr:
|
||||
sent = sent_msgs[m.which()].pop(0)
|
||||
sent.clear_write_flag()
|
||||
assert sent.to_bytes() == m.as_builder().to_bytes()
|
||||
|
||||
def test_preserving_flagged_segments(self):
|
||||
services = set(random.sample(CEREAL_SERVICES, random.randint(5, 10))) | {"userFlag"}
|
||||
self._publish_random_messages(services)
|
||||
|
||||
segment_dir = self._get_latest_log_dir()
|
||||
assert getxattr(segment_dir, PRESERVE_ATTR_NAME) == PRESERVE_ATTR_VALUE
|
||||
|
||||
def test_not_preserving_unflagged_segments(self):
|
||||
services = set(random.sample(CEREAL_SERVICES, random.randint(5, 10))) - {"userFlag"}
|
||||
self._publish_random_messages(services)
|
||||
|
||||
segment_dir = self._get_latest_log_dir()
|
||||
assert getxattr(segment_dir, PRESERVE_ATTR_NAME) is None
|
||||
|
||||
2
system/loggerd/tests/test_runner.cc
Normal file
2
system/loggerd/tests/test_runner.cc
Normal file
@@ -0,0 +1,2 @@
|
||||
#define CATCH_CONFIG_MAIN
|
||||
#include "catch2/catch.hpp"
|
||||
190
system/loggerd/tests/test_uploader.py
Normal file
190
system/loggerd/tests/test_uploader.py
Normal file
@@ -0,0 +1,190 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import time
|
||||
import threading
|
||||
import unittest
|
||||
import logging
|
||||
import json
|
||||
from pathlib import Path
|
||||
from openpilot.system.hardware.hw import Paths
|
||||
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
from openpilot.system.loggerd.uploader import main, UPLOAD_ATTR_NAME, UPLOAD_ATTR_VALUE
|
||||
|
||||
from openpilot.system.loggerd.tests.loggerd_tests_common import UploaderTestCase
|
||||
|
||||
|
||||
class FakeLogHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
logging.Handler.__init__(self)
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.upload_order = list()
|
||||
self.upload_ignored = list()
|
||||
|
||||
def emit(self, record):
|
||||
try:
|
||||
j = json.loads(record.getMessage())
|
||||
if j["event"] == "upload_success":
|
||||
self.upload_order.append(j["key"])
|
||||
if j["event"] == "upload_ignored":
|
||||
self.upload_ignored.append(j["key"])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log_handler = FakeLogHandler()
|
||||
cloudlog.addHandler(log_handler)
|
||||
|
||||
|
||||
class TestUploader(UploaderTestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
log_handler.reset()
|
||||
|
||||
def start_thread(self):
|
||||
self.end_event = threading.Event()
|
||||
self.up_thread = threading.Thread(target=main, args=[self.end_event])
|
||||
self.up_thread.daemon = True
|
||||
self.up_thread.start()
|
||||
|
||||
def join_thread(self):
|
||||
self.end_event.set()
|
||||
self.up_thread.join()
|
||||
|
||||
def gen_files(self, lock=False, xattr: bytes = None, boot=True) -> list[Path]:
|
||||
f_paths = []
|
||||
for t in ["qlog", "rlog", "dcamera.hevc", "fcamera.hevc"]:
|
||||
f_paths.append(self.make_file_with_data(self.seg_dir, t, 1, lock=lock, upload_xattr=xattr))
|
||||
|
||||
if boot:
|
||||
f_paths.append(self.make_file_with_data("boot", f"{self.seg_dir}", 1, lock=lock, upload_xattr=xattr))
|
||||
return f_paths
|
||||
|
||||
def gen_order(self, seg1: list[int], seg2: list[int], boot=True) -> list[str]:
|
||||
keys = []
|
||||
if boot:
|
||||
keys += [f"boot/{self.seg_format.format(i)}.bz2" for i in seg1]
|
||||
keys += [f"boot/{self.seg_format2.format(i)}.bz2" for i in seg2]
|
||||
keys += [f"{self.seg_format.format(i)}/qlog.bz2" for i in seg1]
|
||||
keys += [f"{self.seg_format2.format(i)}/qlog.bz2" for i in seg2]
|
||||
return keys
|
||||
|
||||
def test_upload(self):
|
||||
self.gen_files(lock=False)
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(5)
|
||||
self.join_thread()
|
||||
|
||||
exp_order = self.gen_order([self.seg_num], [])
|
||||
|
||||
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
|
||||
self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload")
|
||||
self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice")
|
||||
for f_path in exp_order:
|
||||
self.assertEqual(os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME), UPLOAD_ATTR_VALUE, "All files not uploaded")
|
||||
|
||||
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
|
||||
|
||||
def test_upload_with_wrong_xattr(self):
|
||||
self.gen_files(lock=False, xattr=b'0')
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(5)
|
||||
self.join_thread()
|
||||
|
||||
exp_order = self.gen_order([self.seg_num], [])
|
||||
|
||||
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
|
||||
self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload")
|
||||
self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice")
|
||||
for f_path in exp_order:
|
||||
self.assertEqual(os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME), UPLOAD_ATTR_VALUE, "All files not uploaded")
|
||||
|
||||
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
|
||||
|
||||
def test_upload_ignored(self):
|
||||
self.set_ignore()
|
||||
self.gen_files(lock=False)
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(5)
|
||||
self.join_thread()
|
||||
|
||||
exp_order = self.gen_order([self.seg_num], [])
|
||||
|
||||
self.assertTrue(len(log_handler.upload_order) == 0, "Some files were not ignored")
|
||||
self.assertFalse(len(log_handler.upload_ignored) < len(exp_order), "Some files failed to ignore")
|
||||
self.assertFalse(len(log_handler.upload_ignored) > len(exp_order), "Some files were ignored twice")
|
||||
for f_path in exp_order:
|
||||
self.assertEqual(os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME), UPLOAD_ATTR_VALUE, "All files not ignored")
|
||||
|
||||
self.assertTrue(log_handler.upload_ignored == exp_order, "Files ignored in wrong order")
|
||||
|
||||
def test_upload_files_in_create_order(self):
|
||||
seg1_nums = [0, 1, 2, 10, 20]
|
||||
for i in seg1_nums:
|
||||
self.seg_dir = self.seg_format.format(i)
|
||||
self.gen_files(boot=False)
|
||||
seg2_nums = [5, 50, 51]
|
||||
for i in seg2_nums:
|
||||
self.seg_dir = self.seg_format2.format(i)
|
||||
self.gen_files(boot=False)
|
||||
|
||||
exp_order = self.gen_order(seg1_nums, seg2_nums, boot=False)
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(5)
|
||||
self.join_thread()
|
||||
|
||||
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
|
||||
self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload")
|
||||
self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice")
|
||||
for f_path in exp_order:
|
||||
self.assertEqual(os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME), UPLOAD_ATTR_VALUE, "All files not uploaded")
|
||||
|
||||
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
|
||||
|
||||
def test_no_upload_with_lock_file(self):
|
||||
self.start_thread()
|
||||
|
||||
time.sleep(0.25)
|
||||
f_paths = self.gen_files(lock=True, boot=False)
|
||||
|
||||
# allow enough time that files should have been uploaded if they would be uploaded
|
||||
time.sleep(5)
|
||||
self.join_thread()
|
||||
|
||||
for f_path in f_paths:
|
||||
fn = f_path.with_suffix(f_path.suffix.replace(".bz2", ""))
|
||||
uploaded = UPLOAD_ATTR_NAME in os.listxattr(fn) and os.getxattr(fn, UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE
|
||||
self.assertFalse(uploaded, "File upload when locked")
|
||||
|
||||
def test_no_upload_with_xattr(self):
|
||||
self.gen_files(lock=False, xattr=UPLOAD_ATTR_VALUE)
|
||||
|
||||
self.start_thread()
|
||||
# allow enough time that files could upload twice if there is a bug in the logic
|
||||
time.sleep(5)
|
||||
self.join_thread()
|
||||
|
||||
self.assertEqual(len(log_handler.upload_order), 0, "File uploaded again")
|
||||
|
||||
def test_clear_locks_on_startup(self):
|
||||
f_paths = self.gen_files(lock=True, boot=False)
|
||||
self.start_thread()
|
||||
time.sleep(1)
|
||||
self.join_thread()
|
||||
|
||||
for f_path in f_paths:
|
||||
lock_path = f_path.with_suffix(f_path.suffix + ".lock")
|
||||
self.assertFalse(lock_path.is_file(), "File lock not cleared on startup")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user