Add openpilot tests
This commit is contained in:
1
selfdrive/car/tests/.gitignore
vendored
Normal file
1
selfdrive/car/tests/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.bz2
|
||||
12
selfdrive/car/tests/big_cars_test.sh
Normal file
12
selfdrive/car/tests/big_cars_test.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
BASEDIR=$(realpath "$SCRIPT_DIR/../../../")
|
||||
cd $BASEDIR
|
||||
|
||||
MAX_EXAMPLES=300
|
||||
INTERNAL_SEG_CNT=300
|
||||
FILEREADER_CACHE=1
|
||||
INTERNAL_SEG_LIST=selfdrive/car/tests/test_models_segs.txt
|
||||
|
||||
cd selfdrive/car/tests && pytest test_models.py test_car_interfaces.py
|
||||
299
selfdrive/car/tests/routes.py
Normal file
299
selfdrive/car/tests/routes.py
Normal file
@@ -0,0 +1,299 @@
|
||||
#!/usr/bin/env python3
|
||||
from typing import NamedTuple
|
||||
|
||||
from openpilot.selfdrive.car.chrysler.values import CAR as CHRYSLER
|
||||
from openpilot.selfdrive.car.gm.values import CAR as GM
|
||||
from openpilot.selfdrive.car.ford.values import CAR as FORD
|
||||
from openpilot.selfdrive.car.honda.values import CAR as HONDA
|
||||
from openpilot.selfdrive.car.hyundai.values import CAR as HYUNDAI
|
||||
from openpilot.selfdrive.car.nissan.values import CAR as NISSAN
|
||||
from openpilot.selfdrive.car.mazda.values import CAR as MAZDA
|
||||
from openpilot.selfdrive.car.subaru.values import CAR as SUBARU
|
||||
from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA
|
||||
from openpilot.selfdrive.car.volkswagen.values import CAR as VOLKSWAGEN
|
||||
from openpilot.selfdrive.car.tesla.values import CAR as TESLA
|
||||
from openpilot.selfdrive.car.body.values import CAR as COMMA
|
||||
|
||||
# TODO: add routes for these cars
|
||||
non_tested_cars = [
|
||||
FORD.F_150_MK14,
|
||||
GM.CADILLAC_ATS,
|
||||
GM.HOLDEN_ASTRA,
|
||||
GM.MALIBU,
|
||||
HYUNDAI.GENESIS_G90,
|
||||
HONDA.ODYSSEY_CHN,
|
||||
VOLKSWAGEN.CRAFTER_MK2, # need a route from an ACC-equipped Crafter
|
||||
SUBARU.FORESTER_HYBRID,
|
||||
]
|
||||
|
||||
|
||||
class CarTestRoute(NamedTuple):
|
||||
route: str
|
||||
car_model: str | None
|
||||
segment: int | None = None
|
||||
|
||||
|
||||
routes = [
|
||||
CarTestRoute("efdf9af95e71cd84|2022-05-13--19-03-31", COMMA.BODY),
|
||||
|
||||
CarTestRoute("0c94aa1e1296d7c6|2021-05-05--19-48-37", CHRYSLER.JEEP_GRAND_CHEROKEE),
|
||||
CarTestRoute("91dfedae61d7bd75|2021-05-22--20-07-52", CHRYSLER.JEEP_GRAND_CHEROKEE_2019),
|
||||
CarTestRoute("420a8e183f1aed48|2020-03-05--07-15-29", CHRYSLER.PACIFICA_2017_HYBRID),
|
||||
CarTestRoute("43a685a66291579b|2021-05-27--19-47-29", CHRYSLER.PACIFICA_2018),
|
||||
CarTestRoute("378472f830ee7395|2021-05-28--07-38-43", CHRYSLER.PACIFICA_2018_HYBRID),
|
||||
CarTestRoute("8190c7275a24557b|2020-01-29--08-33-58", CHRYSLER.PACIFICA_2019_HYBRID),
|
||||
CarTestRoute("3d84727705fecd04|2021-05-25--08-38-56", CHRYSLER.PACIFICA_2020),
|
||||
CarTestRoute("221c253375af4ee9|2022-06-15--18-38-24", CHRYSLER.RAM_1500),
|
||||
CarTestRoute("8fb5eabf914632ae|2022-08-04--17-28-53", CHRYSLER.RAM_HD, segment=6),
|
||||
CarTestRoute("3379c85aeedc8285|2023-12-07--17-49-39", CHRYSLER.DODGE_DURANGO),
|
||||
|
||||
CarTestRoute("54827bf84c38b14f|2023-01-25--14-14-11", FORD.BRONCO_SPORT_MK1),
|
||||
CarTestRoute("f8eaaccd2a90aef8|2023-05-04--15-10-09", FORD.ESCAPE_MK4),
|
||||
CarTestRoute("62241b0c7fea4589|2022-09-01--15-32-49", FORD.EXPLORER_MK6),
|
||||
CarTestRoute("e886087f430e7fe7|2023-06-16--23-06-36", FORD.FOCUS_MK4),
|
||||
CarTestRoute("bd37e43731e5964b|2023-04-30--10-42-26", FORD.MAVERICK_MK1),
|
||||
CarTestRoute("112e4d6e0cad05e1|2023-11-14--08-21-43", FORD.F_150_LIGHTNING_MK1),
|
||||
CarTestRoute("83a4e056c7072678|2023-11-13--16-51-33", FORD.MUSTANG_MACH_E_MK1),
|
||||
#TestRoute("f1b4c567731f4a1b|2018-04-30--10-15-35", FORD.FUSION),
|
||||
|
||||
CarTestRoute("7cc2a8365b4dd8a9|2018-12-02--12-10-44", GM.ACADIA),
|
||||
CarTestRoute("aa20e335f61ba898|2019-02-05--16-59-04", GM.BUICK_REGAL),
|
||||
CarTestRoute("75a6bcb9b8b40373|2023-03-11--22-47-33", GM.BUICK_LACROSSE),
|
||||
CarTestRoute("e746f59bc96fd789|2024-01-31--22-25-58", GM.EQUINOX),
|
||||
CarTestRoute("ef8f2185104d862e|2023-02-09--18-37-13", GM.ESCALADE),
|
||||
CarTestRoute("46460f0da08e621e|2021-10-26--07-21-46", GM.ESCALADE_ESV),
|
||||
CarTestRoute("168f8b3be57f66ae|2023-09-12--21-44-42", GM.ESCALADE_ESV_2019),
|
||||
CarTestRoute("c950e28c26b5b168|2018-05-30--22-03-41", GM.VOLT),
|
||||
CarTestRoute("f08912a233c1584f|2022-08-11--18-02-41", GM.BOLT_EUV, segment=1),
|
||||
CarTestRoute("555d4087cf86aa91|2022-12-02--12-15-07", GM.BOLT_EUV, segment=14), # Bolt EV
|
||||
CarTestRoute("38aa7da107d5d252|2022-08-15--16-01-12", GM.SILVERADO),
|
||||
CarTestRoute("5085c761395d1fe6|2023-04-07--18-20-06", GM.TRAILBLAZER),
|
||||
|
||||
CarTestRoute("0e7a2ba168465df5|2020-10-18--14-14-22", HONDA.ACURA_RDX_3G),
|
||||
CarTestRoute("a74b011b32b51b56|2020-07-26--17-09-36", HONDA.CIVIC),
|
||||
CarTestRoute("a859a044a447c2b0|2020-03-03--18-42-45", HONDA.CRV_EU),
|
||||
CarTestRoute("68aac44ad69f838e|2021-05-18--20-40-52", HONDA.CRV),
|
||||
CarTestRoute("14fed2e5fa0aa1a5|2021-05-25--14-59-42", HONDA.CRV_HYBRID),
|
||||
CarTestRoute("52f3e9ae60c0d886|2021-05-23--15-59-43", HONDA.FIT),
|
||||
CarTestRoute("2c4292a5cd10536c|2021-08-19--21-32-15", HONDA.FREED),
|
||||
CarTestRoute("03be5f2fd5c508d1|2020-04-19--18-44-15", HONDA.HRV),
|
||||
CarTestRoute("320098ff6c5e4730|2023-04-13--17-47-46", HONDA.HRV_3G),
|
||||
CarTestRoute("917b074700869333|2021-05-24--20-40-20", HONDA.ACURA_ILX),
|
||||
CarTestRoute("08a3deb07573f157|2020-03-06--16-11-19", HONDA.ACCORD), # 1.5T
|
||||
CarTestRoute("1da5847ac2488106|2021-05-24--19-31-50", HONDA.ACCORD), # 2.0T
|
||||
CarTestRoute("085ac1d942c35910|2021-03-25--20-11-15", HONDA.ACCORD), # 2021 with new style HUD msgs
|
||||
CarTestRoute("07585b0da3c88459|2021-05-26--18-52-04", HONDA.ACCORD), # hybrid
|
||||
CarTestRoute("f29e2b57a55e7ad5|2021-03-24--20-52-38", HONDA.ACCORD), # hybrid, 2021 with new style HUD msgs
|
||||
CarTestRoute("1ad763dd22ef1a0e|2020-02-29--18-37-03", HONDA.CRV_5G),
|
||||
CarTestRoute("0a96f86fcfe35964|2020-02-05--07-25-51", HONDA.ODYSSEY),
|
||||
CarTestRoute("d83f36766f8012a5|2020-02-05--18-42-21", HONDA.CIVIC_BOSCH_DIESEL),
|
||||
CarTestRoute("f0890d16a07a236b|2021-05-25--17-27-22", HONDA.INSIGHT),
|
||||
CarTestRoute("07d37d27996096b6|2020-03-04--21-57-27", HONDA.PILOT),
|
||||
CarTestRoute("684e8f96bd491a0e|2021-11-03--11-08-42", HONDA.PILOT), # Passport
|
||||
CarTestRoute("0a78dfbacc8504ef|2020-03-04--13-29-55", HONDA.CIVIC_BOSCH),
|
||||
CarTestRoute("f34a60d68d83b1e5|2020-10-06--14-35-55", HONDA.ACURA_RDX),
|
||||
CarTestRoute("54fd8451b3974762|2021-04-01--14-50-10", HONDA.RIDGELINE),
|
||||
CarTestRoute("2d5808fae0b38ac6|2021-09-01--17-14-11", HONDA.HONDA_E),
|
||||
CarTestRoute("f44aa96ace22f34a|2021-12-22--06-22-31", HONDA.CIVIC_2022),
|
||||
|
||||
CarTestRoute("87d7f06ade479c2e|2023-09-11--23-30-11", HYUNDAI.AZERA_6TH_GEN),
|
||||
CarTestRoute("66189dd8ec7b50e6|2023-09-20--07-02-12", HYUNDAI.AZERA_HEV_6TH_GEN),
|
||||
CarTestRoute("6fe86b4e410e4c37|2020-07-22--16-27-13", HYUNDAI.HYUNDAI_GENESIS),
|
||||
CarTestRoute("b5d6dc830ad63071|2022-12-12--21-28-25", HYUNDAI.GENESIS_GV60_EV_1ST_GEN, segment=12),
|
||||
CarTestRoute("70c5bec28ec8e345|2020-08-08--12-22-23", HYUNDAI.GENESIS_G70),
|
||||
CarTestRoute("ca4de5b12321bd98|2022-10-18--21-15-59", HYUNDAI.GENESIS_GV70_1ST_GEN),
|
||||
CarTestRoute("6b301bf83f10aa90|2020-11-22--16-45-07", HYUNDAI.GENESIS_G80),
|
||||
CarTestRoute("0bbe367c98fa1538|2023-09-16--00-16-49", HYUNDAI.CUSTIN_1ST_GEN),
|
||||
CarTestRoute("f0709d2bc6ca451f|2022-10-15--08-13-54", HYUNDAI.SANTA_CRUZ_1ST_GEN),
|
||||
CarTestRoute("4dbd55df87507948|2022-03-01--09-45-38", HYUNDAI.SANTA_FE),
|
||||
CarTestRoute("bf43d9df2b660eb0|2021-09-23--14-16-37", HYUNDAI.SANTA_FE_2022),
|
||||
CarTestRoute("37398f32561a23ad|2021-11-18--00-11-35", HYUNDAI.SANTA_FE_HEV_2022),
|
||||
CarTestRoute("656ac0d830792fcc|2021-12-28--14-45-56", HYUNDAI.SANTA_FE_PHEV_2022, segment=1),
|
||||
CarTestRoute("de59124955b921d8|2023-06-24--00-12-50", HYUNDAI.KIA_CARNIVAL_4TH_GEN),
|
||||
CarTestRoute("409c9409979a8abc|2023-07-11--09-06-44", HYUNDAI.KIA_CARNIVAL_4TH_GEN), # Chinese model
|
||||
CarTestRoute("e0e98335f3ebc58f|2021-03-07--16-38-29", HYUNDAI.KIA_CEED),
|
||||
CarTestRoute("7653b2bce7bcfdaa|2020-03-04--15-34-32", HYUNDAI.KIA_OPTIMA_G4),
|
||||
CarTestRoute("018654717bc93d7d|2022-09-19--23-11-10", HYUNDAI.KIA_OPTIMA_G4_FL, segment=0),
|
||||
CarTestRoute("f9716670b2481438|2023-08-23--14-49-50", HYUNDAI.KIA_OPTIMA_H),
|
||||
CarTestRoute("6a42c1197b2a8179|2023-09-21--10-23-44", HYUNDAI.KIA_OPTIMA_H_G4_FL),
|
||||
CarTestRoute("c75a59efa0ecd502|2021-03-11--20-52-55", HYUNDAI.KIA_SELTOS),
|
||||
CarTestRoute("5b7c365c50084530|2020-04-15--16-13-24", HYUNDAI.SONATA),
|
||||
CarTestRoute("b2a38c712dcf90bd|2020-05-18--18-12-48", HYUNDAI.SONATA_LF),
|
||||
CarTestRoute("c344fd2492c7a9d2|2023-12-11--09-03-23", HYUNDAI.STARIA_4TH_GEN),
|
||||
CarTestRoute("fb3fd42f0baaa2f8|2022-03-30--15-25-05", HYUNDAI.TUCSON),
|
||||
CarTestRoute("db68bbe12250812c|2022-12-05--00-54-12", HYUNDAI.TUCSON_4TH_GEN), # 2023
|
||||
CarTestRoute("36e10531feea61a4|2022-07-25--13-37-42", HYUNDAI.TUCSON_4TH_GEN), # hybrid
|
||||
CarTestRoute("5875672fc1d4bf57|2020-07-23--21-33-28", HYUNDAI.KIA_SORENTO),
|
||||
CarTestRoute("1d0d000db3370fd0|2023-01-04--22-28-42", HYUNDAI.KIA_SORENTO_4TH_GEN, segment=5),
|
||||
CarTestRoute("fc19648042eb6896|2023-08-16--11-43-27", HYUNDAI.KIA_SORENTO_HEV_4TH_GEN, segment=14),
|
||||
CarTestRoute("628935d7d3e5f4f7|2022-11-30--01-12-46", HYUNDAI.KIA_SORENTO_HEV_4TH_GEN), # plug-in hybrid
|
||||
CarTestRoute("9c917ba0d42ffe78|2020-04-17--12-43-19", HYUNDAI.PALISADE),
|
||||
CarTestRoute("05a8f0197fdac372|2022-10-19--14-14-09", HYUNDAI.IONIQ_5), # HDA2
|
||||
CarTestRoute("eb4eae1476647463|2023-08-26--18-07-04", HYUNDAI.IONIQ_6, segment=6), # HDA2
|
||||
CarTestRoute("3f29334d6134fcd4|2022-03-30--22-00-50", HYUNDAI.IONIQ_PHEV_2019),
|
||||
CarTestRoute("fa8db5869167f821|2021-06-10--22-50-10", HYUNDAI.IONIQ_PHEV),
|
||||
CarTestRoute("e1107f9d04dfb1e2|2023-09-05--22-32-12", HYUNDAI.IONIQ_PHEV), # openpilot longitudinal enabled
|
||||
CarTestRoute("2c5cf2dd6102e5da|2020-12-17--16-06-44", HYUNDAI.IONIQ_EV_2020),
|
||||
CarTestRoute("610ebb9faaad6b43|2020-06-13--15-28-36", HYUNDAI.IONIQ_EV_LTD),
|
||||
CarTestRoute("2c5cf2dd6102e5da|2020-06-26--16-00-08", HYUNDAI.IONIQ),
|
||||
CarTestRoute("012c95f06918eca4|2023-01-15--11-19-36", HYUNDAI.IONIQ), # openpilot longitudinal enabled
|
||||
CarTestRoute("ab59fe909f626921|2021-10-18--18-34-28", HYUNDAI.IONIQ_HEV_2022),
|
||||
CarTestRoute("22d955b2cd499c22|2020-08-10--19-58-21", HYUNDAI.KONA),
|
||||
CarTestRoute("efc48acf44b1e64d|2021-05-28--21-05-04", HYUNDAI.KONA_EV),
|
||||
CarTestRoute("f90d3cd06caeb6fa|2023-09-06--17-15-47", HYUNDAI.KONA_EV), # openpilot longitudinal enabled
|
||||
CarTestRoute("ff973b941a69366f|2022-07-28--22-01-19", HYUNDAI.KONA_EV_2022, segment=11),
|
||||
CarTestRoute("1618132d68afc876|2023-08-27--09-32-14", HYUNDAI.KONA_EV_2ND_GEN, segment=13),
|
||||
CarTestRoute("49f3c13141b6bc87|2021-07-28--08-05-13", HYUNDAI.KONA_HEV),
|
||||
CarTestRoute("5dddcbca6eb66c62|2020-07-26--13-24-19", HYUNDAI.KIA_STINGER),
|
||||
CarTestRoute("5b50b883a4259afb|2022-11-09--15-00-42", HYUNDAI.KIA_STINGER_2022),
|
||||
CarTestRoute("d624b3d19adce635|2020-08-01--14-59-12", HYUNDAI.VELOSTER),
|
||||
CarTestRoute("d545129f3ca90f28|2022-10-19--09-22-54", HYUNDAI.KIA_EV6), # HDA2
|
||||
CarTestRoute("68d6a96e703c00c9|2022-09-10--16-09-39", HYUNDAI.KIA_EV6), # HDA1
|
||||
CarTestRoute("9b25e8c1484a1b67|2023-04-13--10-41-45", HYUNDAI.KIA_EV6),
|
||||
CarTestRoute("007d5e4ad9f86d13|2021-09-30--15-09-23", HYUNDAI.KIA_K5_2021),
|
||||
CarTestRoute("c58dfc9fc16590e0|2023-01-14--13-51-48", HYUNDAI.KIA_K5_HEV_2020),
|
||||
CarTestRoute("78ad5150de133637|2023-09-13--16-15-57", HYUNDAI.KIA_K8_HEV_1ST_GEN),
|
||||
CarTestRoute("50c6c9b85fd1ff03|2020-10-26--17-56-06", HYUNDAI.KIA_NIRO_EV),
|
||||
CarTestRoute("b153671049a867b3|2023-04-05--10-00-30", HYUNDAI.KIA_NIRO_EV_2ND_GEN),
|
||||
CarTestRoute("173219cf50acdd7b|2021-07-05--10-27-41", HYUNDAI.KIA_NIRO_PHEV),
|
||||
CarTestRoute("23349923ba5c4e3b|2023-12-02--08-51-54", HYUNDAI.KIA_NIRO_PHEV_2022),
|
||||
CarTestRoute("34a875f29f69841a|2021-07-29--13-02-09", HYUNDAI.KIA_NIRO_HEV_2021),
|
||||
CarTestRoute("db04d2c63990e3ba|2023-02-08--16-52-39", HYUNDAI.KIA_NIRO_HEV_2ND_GEN),
|
||||
CarTestRoute("50a2212c41f65c7b|2021-05-24--16-22-06", HYUNDAI.KIA_FORTE),
|
||||
CarTestRoute("192283cdbb7a58c2|2022-10-15--01-43-18", HYUNDAI.KIA_SPORTAGE_5TH_GEN),
|
||||
CarTestRoute("09559f1fcaed4704|2023-11-16--02-24-57", HYUNDAI.KIA_SPORTAGE_5TH_GEN), # openpilot longitudinal
|
||||
CarTestRoute("b3537035ffe6a7d6|2022-10-17--15-23-49", HYUNDAI.KIA_SPORTAGE_5TH_GEN), # hybrid
|
||||
CarTestRoute("c5ac319aa9583f83|2021-06-01--18-18-31", HYUNDAI.ELANTRA),
|
||||
CarTestRoute("734ef96182ddf940|2022-10-02--16-41-44", HYUNDAI.ELANTRA_GT_I30),
|
||||
CarTestRoute("82e9cdd3f43bf83e|2021-05-15--02-42-51", HYUNDAI.ELANTRA_2021),
|
||||
CarTestRoute("715ac05b594e9c59|2021-06-20--16-21-07", HYUNDAI.ELANTRA_HEV_2021),
|
||||
CarTestRoute("7120aa90bbc3add7|2021-08-02--07-12-31", HYUNDAI.SONATA_HYBRID),
|
||||
CarTestRoute("715ac05b594e9c59|2021-10-27--23-24-56", HYUNDAI.GENESIS_G70_2020),
|
||||
CarTestRoute("6b0d44d22df18134|2023-05-06--10-36-55", HYUNDAI.GENESIS_GV80),
|
||||
|
||||
CarTestRoute("00c829b1b7613dea|2021-06-24--09-10-10", TOYOTA.ALPHARD_TSS2),
|
||||
CarTestRoute("912119ebd02c7a42|2022-03-19--07-24-50", TOYOTA.ALPHARD_TSS2), # hybrid
|
||||
CarTestRoute("000cf3730200c71c|2021-05-24--10-42-05", TOYOTA.AVALON),
|
||||
CarTestRoute("0bb588106852abb7|2021-05-26--12-22-01", TOYOTA.AVALON_2019),
|
||||
CarTestRoute("87bef2930af86592|2021-05-30--09-40-54", TOYOTA.AVALON_2019), # hybrid
|
||||
CarTestRoute("e9966711cfb04ce3|2022-01-11--07-59-43", TOYOTA.AVALON_TSS2),
|
||||
CarTestRoute("eca1080a91720a54|2022-03-17--13-32-29", TOYOTA.AVALON_TSS2), # hybrid
|
||||
CarTestRoute("6cdecc4728d4af37|2020-02-23--15-44-18", TOYOTA.CAMRY),
|
||||
CarTestRoute("2f37c007683e85ba|2023-09-02--14-39-44", TOYOTA.CAMRY), # openpilot longitudinal, with radar CAN filter
|
||||
CarTestRoute("54034823d30962f5|2021-05-24--06-37-34", TOYOTA.CAMRY), # hybrid
|
||||
CarTestRoute("3456ad0cd7281b24|2020-12-13--17-45-56", TOYOTA.CAMRY_TSS2),
|
||||
CarTestRoute("ffccc77938ddbc44|2021-01-04--16-55-41", TOYOTA.CAMRY_TSS2), # hybrid
|
||||
CarTestRoute("4e45c89c38e8ec4d|2021-05-02--02-49-28", TOYOTA.COROLLA),
|
||||
CarTestRoute("5f5afb36036506e4|2019-05-14--02-09-54", TOYOTA.COROLLA_TSS2),
|
||||
CarTestRoute("5ceff72287a5c86c|2019-10-19--10-59-02", TOYOTA.COROLLA_TSS2), # hybrid
|
||||
CarTestRoute("d2525c22173da58b|2021-04-25--16-47-04", TOYOTA.PRIUS),
|
||||
CarTestRoute("b14c5b4742e6fc85|2020-07-28--19-50-11", TOYOTA.RAV4),
|
||||
CarTestRoute("32a7df20486b0f70|2020-02-06--16-06-50", TOYOTA.RAV4H),
|
||||
CarTestRoute("cdf2f7de565d40ae|2019-04-25--03-53-41", TOYOTA.RAV4_TSS2),
|
||||
CarTestRoute("a5c341bb250ca2f0|2022-05-18--16-05-17", TOYOTA.RAV4_TSS2_2022),
|
||||
CarTestRoute("ad5a3fa719bc2f83|2023-10-17--19-48-42", TOYOTA.RAV4_TSS2_2023),
|
||||
CarTestRoute("7e34a988419b5307|2019-12-18--19-13-30", TOYOTA.RAV4_TSS2), # hybrid
|
||||
CarTestRoute("2475fb3eb2ffcc2e|2022-04-29--12-46-23", TOYOTA.RAV4_TSS2_2022), # hybrid
|
||||
CarTestRoute("7a31f030957b9c85|2023-04-01--14-12-51", TOYOTA.LEXUS_ES),
|
||||
CarTestRoute("37041c500fd30100|2020-12-30--12-17-24", TOYOTA.LEXUS_ES), # hybrid
|
||||
CarTestRoute("e6a24be49a6cd46e|2019-10-29--10-52-42", TOYOTA.LEXUS_ES_TSS2),
|
||||
CarTestRoute("f49e8041283f2939|2019-05-30--11-51-51", TOYOTA.LEXUS_ES_TSS2), # hybrid
|
||||
CarTestRoute("da23c367491f53e2|2021-05-21--09-09-11", TOYOTA.LEXUS_CTH, segment=3),
|
||||
CarTestRoute("32696cea52831b02|2021-11-19--18-13-30", TOYOTA.LEXUS_RC),
|
||||
CarTestRoute("ab9b64a5e5960cba|2023-10-24--17-32-08", TOYOTA.LEXUS_GS_F),
|
||||
CarTestRoute("886fcd8408d570e9|2020-01-29--02-18-55", TOYOTA.LEXUS_RX),
|
||||
CarTestRoute("d27ad752e9b08d4f|2021-05-26--19-39-51", TOYOTA.LEXUS_RX), # hybrid
|
||||
CarTestRoute("01b22eb2ed121565|2020-02-02--11-25-51", TOYOTA.LEXUS_RX_TSS2),
|
||||
CarTestRoute("b74758c690a49668|2020-05-20--15-58-57", TOYOTA.LEXUS_RX_TSS2), # hybrid
|
||||
CarTestRoute("964c09eb11ca8089|2020-11-03--22-04-00", TOYOTA.LEXUS_NX),
|
||||
CarTestRoute("ec429c0f37564e3c|2020-02-01--17-28-12", TOYOTA.LEXUS_NX), # hybrid
|
||||
CarTestRoute("3fd5305f8b6ca765|2021-04-28--19-26-49", TOYOTA.LEXUS_NX_TSS2),
|
||||
CarTestRoute("09ae96064ed85a14|2022-06-09--12-22-31", TOYOTA.LEXUS_NX_TSS2), # hybrid
|
||||
CarTestRoute("4765fbbf59e3cd88|2024-02-06--17-45-32", TOYOTA.LEXUS_LC_TSS2),
|
||||
CarTestRoute("0a302ffddbb3e3d3|2020-02-08--16-19-08", TOYOTA.HIGHLANDER_TSS2),
|
||||
CarTestRoute("437e4d2402abf524|2021-05-25--07-58-50", TOYOTA.HIGHLANDER_TSS2), # hybrid
|
||||
CarTestRoute("3183cd9b021e89ce|2021-05-25--10-34-44", TOYOTA.HIGHLANDER),
|
||||
CarTestRoute("80d16a262e33d57f|2021-05-23--20-01-43", TOYOTA.HIGHLANDER), # hybrid
|
||||
CarTestRoute("eb6acd681135480d|2019-06-20--20-00-00", TOYOTA.SIENNA),
|
||||
CarTestRoute("2e07163a1ba9a780|2019-08-25--13-15-13", TOYOTA.LEXUS_IS),
|
||||
CarTestRoute("649bf2997ada6e3a|2023-08-08--18-04-22", TOYOTA.LEXUS_IS_TSS2),
|
||||
CarTestRoute("0a0de17a1e6a2d15|2020-09-21--21-24-41", TOYOTA.PRIUS_TSS2),
|
||||
CarTestRoute("9b36accae406390e|2021-03-30--10-41-38", TOYOTA.MIRAI),
|
||||
CarTestRoute("cd9cff4b0b26c435|2021-05-13--15-12-39", TOYOTA.CHR),
|
||||
CarTestRoute("57858ede0369a261|2021-05-18--20-34-20", TOYOTA.CHR), # hybrid
|
||||
CarTestRoute("ea8fbe72b96a185c|2023-02-08--15-11-46", TOYOTA.CHR_TSS2),
|
||||
CarTestRoute("ea8fbe72b96a185c|2023-02-22--09-20-34", TOYOTA.CHR_TSS2), # openpilot longitudinal, with smartDSU
|
||||
CarTestRoute("6719965b0e1d1737|2023-02-09--22-44-05", TOYOTA.CHR_TSS2), # hybrid
|
||||
CarTestRoute("6719965b0e1d1737|2023-08-29--06-40-05", TOYOTA.CHR_TSS2), # hybrid, openpilot longitudinal, radar disabled
|
||||
CarTestRoute("14623aae37e549f3|2021-10-24--01-20-49", TOYOTA.PRIUS_V),
|
||||
|
||||
CarTestRoute("202c40641158a6e5|2021-09-21--09-43-24", VOLKSWAGEN.ARTEON_MK1),
|
||||
CarTestRoute("2c68dda277d887ac|2021-05-11--15-22-20", VOLKSWAGEN.ATLAS_MK1),
|
||||
#CarTestRoute("ffcd23abbbd02219|2024-02-28--14-59-38", VOLKSWAGEN.CADDY_MK3),
|
||||
CarTestRoute("cae14e88932eb364|2021-03-26--14-43-28", VOLKSWAGEN.GOLF_MK7), # Stock ACC
|
||||
CarTestRoute("3cfdec54aa035f3f|2022-10-13--14-58-58", VOLKSWAGEN.GOLF_MK7), # openpilot longitudinal
|
||||
CarTestRoute("58a7d3b707987d65|2021-03-25--17-26-37", VOLKSWAGEN.JETTA_MK7),
|
||||
CarTestRoute("4d134e099430fba2|2021-03-26--00-26-06", VOLKSWAGEN.PASSAT_MK8),
|
||||
CarTestRoute("3cfdec54aa035f3f|2022-07-19--23-45-10", VOLKSWAGEN.PASSAT_NMS),
|
||||
CarTestRoute("0cd0b7f7e31a3853|2021-11-03--19-30-22", VOLKSWAGEN.POLO_MK6),
|
||||
CarTestRoute("064d1816e448f8eb|2022-09-29--15-32-34", VOLKSWAGEN.SHARAN_MK2),
|
||||
CarTestRoute("7d82b2f3a9115f1f|2021-10-21--15-39-42", VOLKSWAGEN.TAOS_MK1),
|
||||
CarTestRoute("2744c89a8dda9a51|2021-07-24--21-28-06", VOLKSWAGEN.TCROSS_MK1),
|
||||
CarTestRoute("2cef8a0b898f331a|2021-03-25--20-13-57", VOLKSWAGEN.TIGUAN_MK2),
|
||||
CarTestRoute("a589dcc642fdb10a|2021-06-14--20-54-26", VOLKSWAGEN.TOURAN_MK2),
|
||||
CarTestRoute("a459f4556782eba1|2021-09-19--09-48-00", VOLKSWAGEN.TRANSPORTER_T61),
|
||||
CarTestRoute("0cd0b7f7e31a3853|2021-11-18--00-38-32", VOLKSWAGEN.TROC_MK1),
|
||||
CarTestRoute("07667b885add75fd|2021-01-23--19-48-42", VOLKSWAGEN.AUDI_A3_MK3),
|
||||
CarTestRoute("6c6b466346192818|2021-06-06--14-17-47", VOLKSWAGEN.AUDI_Q2_MK1),
|
||||
CarTestRoute("0cd0b7f7e31a3853|2021-12-03--03-12-05", VOLKSWAGEN.AUDI_Q3_MK2),
|
||||
CarTestRoute("8f205bdd11bcbb65|2021-03-26--01-00-17", VOLKSWAGEN.SEAT_ATECA_MK1),
|
||||
CarTestRoute("fc6b6c9a3471c846|2021-05-27--13-39-56", VOLKSWAGEN.SEAT_LEON_MK3),
|
||||
CarTestRoute("0bbe367c98fa1538|2023-03-04--17-46-11", VOLKSWAGEN.SKODA_FABIA_MK4),
|
||||
CarTestRoute("12d6ae3057c04b0d|2021-09-15--00-04-07", VOLKSWAGEN.SKODA_KAMIQ_MK1),
|
||||
CarTestRoute("12d6ae3057c04b0d|2021-09-04--21-21-21", VOLKSWAGEN.SKODA_KAROQ_MK1),
|
||||
CarTestRoute("90434ff5d7c8d603|2021-03-15--12-07-31", VOLKSWAGEN.SKODA_KODIAQ_MK1),
|
||||
CarTestRoute("66e5edc3a16459c5|2021-05-25--19-00-29", VOLKSWAGEN.SKODA_OCTAVIA_MK3),
|
||||
CarTestRoute("026b6d18fba6417f|2021-03-26--09-17-04", VOLKSWAGEN.SKODA_SCALA_MK1),
|
||||
CarTestRoute("b2e9858e29db492b|2021-03-26--16-58-42", VOLKSWAGEN.SKODA_SUPERB_MK3),
|
||||
|
||||
CarTestRoute("3c8f0c502e119c1c|2020-06-30--12-58-02", SUBARU.ASCENT),
|
||||
CarTestRoute("c321c6b697c5a5ff|2020-06-23--11-04-33", SUBARU.FORESTER),
|
||||
CarTestRoute("791340bc01ed993d|2019-03-10--16-28-08", SUBARU.IMPREZA),
|
||||
CarTestRoute("8bf7e79a3ce64055|2021-05-24--09-36-27", SUBARU.IMPREZA_2020),
|
||||
CarTestRoute("8de015561e1ea4a0|2023-08-29--17-08-31", SUBARU.IMPREZA), # openpilot longitudinal
|
||||
# CarTestRoute("c3d1ccb52f5f9d65|2023-07-22--01-23-20", SUBARU.OUTBACK, segment=9), # gen2 longitudinal, eyesight disabled
|
||||
CarTestRoute("1bbe6bf2d62f58a8|2022-07-14--17-11-43", SUBARU.OUTBACK, segment=10),
|
||||
CarTestRoute("c56e69bbc74b8fad|2022-08-18--09-43-51", SUBARU.LEGACY, segment=3),
|
||||
CarTestRoute("f4e3a0c511a076f4|2022-08-04--16-16-48", SUBARU.CROSSTREK_HYBRID, segment=2),
|
||||
CarTestRoute("7fd1e4f3a33c1673|2022-12-04--15-09-53", SUBARU.FORESTER_2022, segment=4),
|
||||
CarTestRoute("f3b34c0d2632aa83|2023-07-23--20-43-25", SUBARU.OUTBACK_2023, segment=7),
|
||||
CarTestRoute("99437cef6d5ff2ee|2023-03-13--21-21-38", SUBARU.ASCENT_2023, segment=7),
|
||||
# Pre-global, dashcam
|
||||
CarTestRoute("95441c38ae8c130e|2020-06-08--12-10-17", SUBARU.FORESTER_PREGLOBAL),
|
||||
CarTestRoute("df5ca7660000fba8|2020-06-16--17-37-19", SUBARU.LEGACY_PREGLOBAL),
|
||||
CarTestRoute("5ab784f361e19b78|2020-06-08--16-30-41", SUBARU.OUTBACK_PREGLOBAL),
|
||||
CarTestRoute("e19eb5d5353b1ac1|2020-08-09--14-37-56", SUBARU.OUTBACK_PREGLOBAL_2018),
|
||||
|
||||
CarTestRoute("fbbfa6af821552b9|2020-03-03--08-09-43", NISSAN.XTRAIL),
|
||||
CarTestRoute("5b7c365c50084530|2020-03-25--22-10-13", NISSAN.LEAF),
|
||||
CarTestRoute("22c3dcce2dd627eb|2020-12-30--16-38-48", NISSAN.LEAF_IC),
|
||||
CarTestRoute("059ab9162e23198e|2020-05-30--09-41-01", NISSAN.ROGUE),
|
||||
CarTestRoute("b72d3ec617c0a90f|2020-12-11--15-38-17", NISSAN.ALTIMA),
|
||||
|
||||
CarTestRoute("32a319f057902bb3|2020-04-27--15-18-58", MAZDA.CX5),
|
||||
CarTestRoute("10b5a4b380434151|2020-08-26--17-11-45", MAZDA.CX9),
|
||||
CarTestRoute("74f1038827005090|2020-08-26--20-05-50", MAZDA.MAZDA3),
|
||||
CarTestRoute("fb53c640f499b73d|2021-06-01--04-17-56", MAZDA.MAZDA6),
|
||||
CarTestRoute("f6d5b1a9d7a1c92e|2021-07-08--06-56-59", MAZDA.CX9_2021),
|
||||
CarTestRoute("a4af1602d8e668ac|2022-02-03--12-17-07", MAZDA.CX5_2022),
|
||||
|
||||
CarTestRoute("6c14ee12b74823ce|2021-06-30--11-49-02", TESLA.AP1_MODELS),
|
||||
CarTestRoute("bb50caf5f0945ab1|2021-06-19--17-20-18", TESLA.AP2_MODELS),
|
||||
#CarTestRoute("66c1699b7697267d/2024-03-03--13-09-53", TESLA.MODELS_RAVEN),
|
||||
|
||||
# Segments that test specific issues
|
||||
# Controls mismatch due to interceptor threshold
|
||||
CarTestRoute("cfb32f0fb91b173b|2022-04-06--14-54-45", HONDA.CIVIC, segment=21),
|
||||
# Controls mismatch due to standstill threshold
|
||||
CarTestRoute("bec2dcfde6a64235|2022-04-08--14-21-32", HONDA.CRV_HYBRID, segment=22),
|
||||
]
|
||||
67
selfdrive/car/tests/test_can_fingerprint.py
Normal file
67
selfdrive/car/tests/test_can_fingerprint.py
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env python3
|
||||
from parameterized import parameterized
|
||||
import unittest
|
||||
|
||||
from cereal import log, messaging
|
||||
from openpilot.selfdrive.car.car_helpers import FRAME_FINGERPRINT, can_fingerprint
|
||||
from openpilot.selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS
|
||||
|
||||
|
||||
class TestCanFingerprint(unittest.TestCase):
|
||||
@parameterized.expand(list(FINGERPRINTS.items()))
|
||||
def test_can_fingerprint(self, car_model, fingerprints):
|
||||
"""Tests online fingerprinting function on offline fingerprints"""
|
||||
|
||||
for fingerprint in fingerprints: # can have multiple fingerprints for each platform
|
||||
can = messaging.new_message('can', 1)
|
||||
can.can = [log.CanData(address=address, dat=b'\x00' * length, src=src)
|
||||
for address, length in fingerprint.items() for src in (0, 1)]
|
||||
|
||||
fingerprint_iter = iter([can])
|
||||
empty_can = messaging.new_message('can', 0)
|
||||
car_fingerprint, finger = can_fingerprint(lambda: next(fingerprint_iter, empty_can)) # noqa: B023
|
||||
|
||||
self.assertEqual(car_fingerprint, car_model)
|
||||
self.assertEqual(finger[0], fingerprint)
|
||||
self.assertEqual(finger[1], fingerprint)
|
||||
self.assertEqual(finger[2], {})
|
||||
|
||||
def test_timing(self):
|
||||
# just pick any CAN fingerprinting car
|
||||
car_model = 'CHEVROLET BOLT EUV 2022'
|
||||
fingerprint = FINGERPRINTS[car_model][0]
|
||||
|
||||
cases = []
|
||||
|
||||
# case 1 - one match, make sure we keep going for 100 frames
|
||||
can = messaging.new_message('can', 1)
|
||||
can.can = [log.CanData(address=address, dat=b'\x00' * length, src=src)
|
||||
for address, length in fingerprint.items() for src in (0, 1)]
|
||||
cases.append((FRAME_FINGERPRINT, car_model, can))
|
||||
|
||||
# case 2 - no matches, make sure we keep going for 100 frames
|
||||
can = messaging.new_message('can', 1)
|
||||
can.can = [log.CanData(address=1, dat=b'\x00' * 1, src=src) for src in (0, 1)] # uncommon address
|
||||
cases.append((FRAME_FINGERPRINT, None, can))
|
||||
|
||||
# case 3 - multiple matches, make sure we keep going for 200 frames to try to eliminate some
|
||||
can = messaging.new_message('can', 1)
|
||||
can.can = [log.CanData(address=2016, dat=b'\x00' * 8, src=src) for src in (0, 1)] # common address
|
||||
cases.append((FRAME_FINGERPRINT * 2, None, can))
|
||||
|
||||
for expected_frames, car_model, can in cases:
|
||||
with self.subTest(expected_frames=expected_frames, car_model=car_model):
|
||||
frames = 0
|
||||
|
||||
def test():
|
||||
nonlocal frames
|
||||
frames += 1
|
||||
return can # noqa: B023
|
||||
|
||||
car_fingerprint, _ = can_fingerprint(test)
|
||||
self.assertEqual(car_fingerprint, car_model)
|
||||
self.assertEqual(frames, expected_frames + 2) # TODO: fix extra frames
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -22,7 +22,7 @@ from openpilot.selfdrive.test.fuzzy_generation import DrawType, FuzzyGenerator
|
||||
|
||||
ALL_ECUS = list({ecu for ecus in FW_VERSIONS.values() for ecu in ecus.keys()})
|
||||
|
||||
MAX_EXAMPLES = int(os.environ.get('MAX_EXAMPLES', '20'))
|
||||
MAX_EXAMPLES = int(os.environ.get('MAX_EXAMPLES', '40'))
|
||||
|
||||
|
||||
def get_fuzzy_car_interface_args(draw: DrawType) -> dict:
|
||||
|
||||
97
selfdrive/car/tests/test_docs.py
Normal file
97
selfdrive/car/tests/test_docs.py
Normal file
@@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
from collections import defaultdict
|
||||
import os
|
||||
import re
|
||||
import unittest
|
||||
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.selfdrive.car.car_helpers import interfaces
|
||||
from openpilot.selfdrive.car.docs import CARS_MD_OUT, CARS_MD_TEMPLATE, generate_cars_md, get_all_car_info
|
||||
from openpilot.selfdrive.car.docs_definitions import Cable, Column, PartType, Star
|
||||
from openpilot.selfdrive.car.honda.values import CAR as HONDA
|
||||
from openpilot.selfdrive.car.values import PLATFORMS
|
||||
from openpilot.selfdrive.debug.dump_car_info import dump_car_info
|
||||
from openpilot.selfdrive.debug.print_docs_diff import print_car_info_diff
|
||||
|
||||
|
||||
class TestCarDocs(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.all_cars = get_all_car_info()
|
||||
|
||||
def test_generator(self):
|
||||
generated_cars_md = generate_cars_md(self.all_cars, CARS_MD_TEMPLATE)
|
||||
with open(CARS_MD_OUT) as f:
|
||||
current_cars_md = f.read()
|
||||
|
||||
self.assertEqual(generated_cars_md, current_cars_md,
|
||||
"Run selfdrive/car/docs.py to update the compatibility documentation")
|
||||
|
||||
def test_docs_diff(self):
|
||||
dump_path = os.path.join(BASEDIR, "selfdrive", "car", "tests", "cars_dump")
|
||||
dump_car_info(dump_path)
|
||||
print_car_info_diff(dump_path)
|
||||
os.remove(dump_path)
|
||||
|
||||
def test_duplicate_years(self):
|
||||
make_model_years = defaultdict(list)
|
||||
for car in self.all_cars:
|
||||
with self.subTest(car_info_name=car.name):
|
||||
make_model = (car.make, car.model)
|
||||
for year in car.year_list:
|
||||
self.assertNotIn(year, make_model_years[make_model], f"{car.name}: Duplicate model year")
|
||||
make_model_years[make_model].append(year)
|
||||
|
||||
def test_missing_car_info(self):
|
||||
all_car_info_platforms = [name for name, config in PLATFORMS.items()]
|
||||
for platform in sorted(interfaces.keys()):
|
||||
with self.subTest(platform=platform):
|
||||
self.assertTrue(platform in all_car_info_platforms, f"Platform: {platform} doesn't have a CarInfo entry")
|
||||
|
||||
def test_naming_conventions(self):
|
||||
# Asserts market-standard car naming conventions by brand
|
||||
for car in self.all_cars:
|
||||
with self.subTest(car=car):
|
||||
tokens = car.model.lower().split(" ")
|
||||
if car.car_name == "hyundai":
|
||||
self.assertNotIn("phev", tokens, "Use `Plug-in Hybrid`")
|
||||
self.assertNotIn("hev", tokens, "Use `Hybrid`")
|
||||
if "plug-in hybrid" in car.model.lower():
|
||||
self.assertIn("Plug-in Hybrid", car.model, "Use correct capitalization")
|
||||
if car.make != "Kia":
|
||||
self.assertNotIn("ev", tokens, "Use `Electric`")
|
||||
elif car.car_name == "toyota":
|
||||
if "rav4" in tokens:
|
||||
self.assertIn("RAV4", car.model, "Use correct capitalization")
|
||||
|
||||
def test_torque_star(self):
|
||||
# Asserts brand-specific assumptions around steering torque star
|
||||
for car in self.all_cars:
|
||||
with self.subTest(car=car):
|
||||
# honda sanity check, it's the definition of a no torque star
|
||||
if car.car_fingerprint in (HONDA.ACCORD, HONDA.CIVIC, HONDA.CRV, HONDA.ODYSSEY, HONDA.PILOT):
|
||||
self.assertEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has full torque star")
|
||||
elif car.car_name in ("toyota", "hyundai"):
|
||||
self.assertNotEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has no torque star")
|
||||
|
||||
def test_year_format(self):
|
||||
for car in self.all_cars:
|
||||
with self.subTest(car=car):
|
||||
self.assertIsNone(re.search(r"\d{4}-\d{4}", car.name), f"Format years correctly: {car.name}")
|
||||
|
||||
def test_harnesses(self):
|
||||
for car in self.all_cars:
|
||||
with self.subTest(car=car):
|
||||
if car.name == "comma body":
|
||||
raise unittest.SkipTest
|
||||
|
||||
car_part_type = [p.part_type for p in car.car_parts.all_parts()]
|
||||
car_parts = list(car.car_parts.all_parts())
|
||||
self.assertTrue(len(car_parts) > 0, f"Need to specify car parts: {car.name}")
|
||||
self.assertTrue(car_part_type.count(PartType.connector) == 1, f"Need to specify one harness connector: {car.name}")
|
||||
self.assertTrue(car_part_type.count(PartType.mount) == 1, f"Need to specify one mount: {car.name}")
|
||||
self.assertTrue(Cable.right_angle_obd_c_cable_1_5ft in car_parts, f"Need to specify a right angle OBD-C cable (1.5ft): {car.name}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
96
selfdrive/car/tests/test_fingerprints.py
Normal file
96
selfdrive/car/tests/test_fingerprints.py
Normal file
@@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
|
||||
# messages reserved for CAN based ignition (see can_ignition_hook function in panda/board/drivers/can)
|
||||
# (addr, len)
|
||||
CAN_IGNITION_MSGS = {
|
||||
'gm': [(0x1F1, 8), (0x160, 5)],
|
||||
#'tesla' : [(0x348, 8)],
|
||||
}
|
||||
|
||||
def _get_fingerprints():
|
||||
# read all the folders in selfdrive/car and return a dict where:
|
||||
# - keys are all the car names that which we have a fingerprint dict for
|
||||
# - values are dicts of fingeprints for each trim
|
||||
fingerprints = {}
|
||||
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
|
||||
car_name = car_folder.split('/')[-1]
|
||||
try:
|
||||
fingerprints[car_name] = __import__(f'selfdrive.car.{car_name}.values', fromlist=['FINGERPRINTS']).FINGERPRINTS
|
||||
except (ImportError, OSError, AttributeError):
|
||||
pass
|
||||
|
||||
return fingerprints
|
||||
|
||||
|
||||
def check_fingerprint_consistency(f1, f2):
|
||||
# return false if it finds a fingerprint fully included in another
|
||||
# max message worth checking is 1800, as above that they usually come too infrequently and not
|
||||
# usable for fingerprinting
|
||||
|
||||
max_msg = 1800
|
||||
|
||||
is_f1_in_f2 = True
|
||||
for k in f1:
|
||||
if (k not in f2 or f1[k] != f2[k]) and k < max_msg:
|
||||
is_f1_in_f2 = False
|
||||
|
||||
is_f2_in_f1 = True
|
||||
for k in f2:
|
||||
if (k not in f1 or f2[k] != f1[k]) and k < max_msg:
|
||||
is_f2_in_f1 = False
|
||||
|
||||
return not is_f1_in_f2 and not is_f2_in_f1
|
||||
|
||||
|
||||
def check_can_ignition_conflicts(fingerprints, brands):
|
||||
# loops through all the fingerprints and exits if CAN ignition dedicated messages
|
||||
# are found in unexpected fingerprints
|
||||
|
||||
for brand_can, msgs_can in CAN_IGNITION_MSGS.items():
|
||||
for i, f in enumerate(fingerprints):
|
||||
for msg_can in msgs_can:
|
||||
if brand_can != brands[i] and msg_can[0] in f and msg_can[1] == f[msg_can[0]]:
|
||||
print("CAN ignition dedicated msg %d with len %d found in %s fingerprints!" % (msg_can[0], msg_can[1], brands[i]))
|
||||
print("TEST FAILED")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fingerprints = _get_fingerprints()
|
||||
|
||||
fingerprints_flat: list[dict] = []
|
||||
car_names = []
|
||||
brand_names = []
|
||||
for brand in fingerprints:
|
||||
for car in fingerprints[brand]:
|
||||
fingerprints_flat += fingerprints[brand][car]
|
||||
for _ in range(len(fingerprints[brand][car])):
|
||||
car_names.append(car)
|
||||
brand_names.append(brand)
|
||||
|
||||
# first check if CAN ignition specific messages are unexpectedly included in other fingerprints
|
||||
check_can_ignition_conflicts(fingerprints_flat, brand_names)
|
||||
|
||||
valid = True
|
||||
for idx1, f1 in enumerate(fingerprints_flat):
|
||||
for idx2, f2 in enumerate(fingerprints_flat):
|
||||
if idx1 < idx2 and not check_fingerprint_consistency(f1, f2):
|
||||
valid = False
|
||||
print(f"Those two fingerprints are inconsistent {car_names[idx1]} {car_names[idx2]}")
|
||||
print("")
|
||||
print(', '.join("%d: %d" % v for v in sorted(f1.items())))
|
||||
print("")
|
||||
print(', '.join("%d: %d" % v for v in sorted(f2.items())))
|
||||
print("")
|
||||
|
||||
print(f"Found {len(fingerprints_flat)} individual fingerprints")
|
||||
if not valid or len(fingerprints_flat) == 0:
|
||||
print("TEST FAILED")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("TEST SUCCESSFUL")
|
||||
313
selfdrive/car/tests/test_fw_fingerprint.py
Normal file
313
selfdrive/car/tests/test_fw_fingerprint.py
Normal file
@@ -0,0 +1,313 @@
|
||||
#!/usr/bin/env python3
|
||||
import random
|
||||
import time
|
||||
import unittest
|
||||
from collections import defaultdict
|
||||
from parameterized import parameterized
|
||||
from unittest import mock
|
||||
|
||||
from cereal import car
|
||||
from openpilot.selfdrive.car.car_helpers import interfaces
|
||||
from openpilot.selfdrive.car.fingerprints import FW_VERSIONS
|
||||
from openpilot.selfdrive.car.fw_versions import FW_QUERY_CONFIGS, FUZZY_EXCLUDE_ECUS, VERSIONS, build_fw_dict, \
|
||||
match_fw_to_car, get_brand_ecu_matches, get_fw_versions, get_present_ecus
|
||||
from openpilot.selfdrive.car.vin import get_vin
|
||||
|
||||
CarFw = car.CarParams.CarFw
|
||||
Ecu = car.CarParams.Ecu
|
||||
|
||||
ECU_NAME = {v: k for k, v in Ecu.schema.enumerants.items()}
|
||||
|
||||
|
||||
class FakeSocket:
|
||||
def receive(self, non_blocking=False):
|
||||
pass
|
||||
|
||||
def send(self, msg):
|
||||
pass
|
||||
|
||||
|
||||
class TestFwFingerprint(unittest.TestCase):
|
||||
def assertFingerprints(self, candidates, expected):
|
||||
candidates = list(candidates)
|
||||
self.assertEqual(len(candidates), 1, f"got more than one candidate: {candidates}")
|
||||
self.assertEqual(candidates[0], expected)
|
||||
|
||||
@parameterized.expand([(b, c, e[c], n) for b, e in VERSIONS.items() for c in e for n in (True, False)])
|
||||
def test_exact_match(self, brand, car_model, ecus, test_non_essential):
|
||||
config = FW_QUERY_CONFIGS[brand]
|
||||
CP = car.CarParams.new_message()
|
||||
for _ in range(100):
|
||||
fw = []
|
||||
for ecu, fw_versions in ecus.items():
|
||||
# Assume non-essential ECUs apply to all cars, so we catch cases where Car A with
|
||||
# missing ECUs won't match to Car B where only Car B has labeled non-essential ECUs
|
||||
if ecu[0] in config.non_essential_ecus and test_non_essential:
|
||||
continue
|
||||
|
||||
ecu_name, addr, sub_addr = ecu
|
||||
fw.append({"ecu": ecu_name, "fwVersion": random.choice(fw_versions), 'brand': brand,
|
||||
"address": addr, "subAddress": 0 if sub_addr is None else sub_addr})
|
||||
CP.carFw = fw
|
||||
_, matches = match_fw_to_car(CP.carFw, allow_fuzzy=False)
|
||||
if not test_non_essential:
|
||||
self.assertFingerprints(matches, car_model)
|
||||
else:
|
||||
# if we're removing ECUs we expect some match loss, but it shouldn't mismatch
|
||||
if len(matches) != 0:
|
||||
self.assertFingerprints(matches, car_model)
|
||||
|
||||
@parameterized.expand([(b, c, e[c]) for b, e in VERSIONS.items() for c in e])
|
||||
def test_custom_fuzzy_match(self, brand, car_model, ecus):
|
||||
# Assert brand-specific fuzzy fingerprinting function doesn't disagree with standard fuzzy function
|
||||
config = FW_QUERY_CONFIGS[brand]
|
||||
if config.match_fw_to_car_fuzzy is None:
|
||||
raise unittest.SkipTest("Brand does not implement custom fuzzy fingerprinting function")
|
||||
|
||||
CP = car.CarParams.new_message()
|
||||
for _ in range(5):
|
||||
fw = []
|
||||
for ecu, fw_versions in ecus.items():
|
||||
ecu_name, addr, sub_addr = ecu
|
||||
fw.append({"ecu": ecu_name, "fwVersion": random.choice(fw_versions), 'brand': brand,
|
||||
"address": addr, "subAddress": 0 if sub_addr is None else sub_addr})
|
||||
CP.carFw = fw
|
||||
_, matches = match_fw_to_car(CP.carFw, allow_exact=False, log=False)
|
||||
brand_matches = config.match_fw_to_car_fuzzy(build_fw_dict(CP.carFw), VERSIONS[brand])
|
||||
|
||||
# If both have matches, they must agree
|
||||
if len(matches) == 1 and len(brand_matches) == 1:
|
||||
self.assertEqual(matches, brand_matches)
|
||||
|
||||
@parameterized.expand([(b, c, e[c]) for b, e in VERSIONS.items() for c in e])
|
||||
def test_fuzzy_match_ecu_count(self, brand, car_model, ecus):
|
||||
# Asserts that fuzzy matching does not count matching FW, but ECU address keys
|
||||
valid_ecus = [e for e in ecus if e[0] not in FUZZY_EXCLUDE_ECUS]
|
||||
if not len(valid_ecus):
|
||||
raise unittest.SkipTest("Car model has no compatible ECUs for fuzzy matching")
|
||||
|
||||
fw = []
|
||||
for ecu in valid_ecus:
|
||||
ecu_name, addr, sub_addr = ecu
|
||||
for _ in range(5):
|
||||
# Add multiple FW versions to simulate ECU returning to multiple queries in a brand
|
||||
fw.append({"ecu": ecu_name, "fwVersion": random.choice(ecus[ecu]), 'brand': brand,
|
||||
"address": addr, "subAddress": 0 if sub_addr is None else sub_addr})
|
||||
CP = car.CarParams.new_message(carFw=fw)
|
||||
_, matches = match_fw_to_car(CP.carFw, allow_exact=False, log=False)
|
||||
|
||||
# Assert no match if there are not enough unique ECUs
|
||||
unique_ecus = {(f['address'], f['subAddress']) for f in fw}
|
||||
if len(unique_ecus) < 2:
|
||||
self.assertEqual(len(matches), 0, car_model)
|
||||
# There won't always be a match due to shared FW, but if there is it should be correct
|
||||
elif len(matches):
|
||||
self.assertFingerprints(matches, car_model)
|
||||
|
||||
def test_fw_version_lists(self):
|
||||
for car_model, ecus in FW_VERSIONS.items():
|
||||
with self.subTest(car_model=car_model.value):
|
||||
for ecu, ecu_fw in ecus.items():
|
||||
with self.subTest(ecu):
|
||||
duplicates = {fw for fw in ecu_fw if ecu_fw.count(fw) > 1}
|
||||
self.assertFalse(len(duplicates), f'{car_model}: Duplicate FW versions: Ecu.{ECU_NAME[ecu[0]]}, {duplicates}')
|
||||
self.assertGreater(len(ecu_fw), 0, f'{car_model}: No FW versions: Ecu.{ECU_NAME[ecu[0]]}')
|
||||
|
||||
def test_all_addrs_map_to_one_ecu(self):
|
||||
for brand, cars in VERSIONS.items():
|
||||
addr_to_ecu = defaultdict(set)
|
||||
for ecus in cars.values():
|
||||
for ecu_type, addr, sub_addr in ecus.keys():
|
||||
addr_to_ecu[(addr, sub_addr)].add(ecu_type)
|
||||
ecus_for_addr = addr_to_ecu[(addr, sub_addr)]
|
||||
ecu_strings = ", ".join([f'Ecu.{ECU_NAME[ecu]}' for ecu in ecus_for_addr])
|
||||
self.assertLessEqual(len(ecus_for_addr), 1, f"{brand} has multiple ECUs that map to one address: {ecu_strings} -> ({hex(addr)}, {sub_addr})")
|
||||
|
||||
def test_data_collection_ecus(self):
|
||||
# Asserts no extra ECUs are in the fingerprinting database
|
||||
for brand, config in FW_QUERY_CONFIGS.items():
|
||||
for car_model, ecus in VERSIONS[brand].items():
|
||||
bad_ecus = set(ecus).intersection(config.extra_ecus)
|
||||
with self.subTest(car_model=car_model.value):
|
||||
self.assertFalse(len(bad_ecus), f'{car_model}: Fingerprints contain ECUs added for data collection: {bad_ecus}')
|
||||
|
||||
def test_blacklisted_ecus(self):
|
||||
blacklisted_addrs = (0x7c4, 0x7d0) # includes A/C ecu and an unknown ecu
|
||||
for car_model, ecus in FW_VERSIONS.items():
|
||||
with self.subTest(car_model=car_model.value):
|
||||
CP = interfaces[car_model][0].get_non_essential_params(car_model)
|
||||
if CP.carName == 'subaru':
|
||||
for ecu in ecus.keys():
|
||||
self.assertNotIn(ecu[1], blacklisted_addrs, f'{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})')
|
||||
|
||||
elif CP.carName == "chrysler":
|
||||
# Some HD trucks have a combined TCM and ECM
|
||||
if CP.carFingerprint.startswith("RAM HD"):
|
||||
for ecu in ecus.keys():
|
||||
self.assertNotEqual(ecu[0], Ecu.transmission, f"{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})")
|
||||
|
||||
def test_missing_versions_and_configs(self):
|
||||
brand_versions = set(VERSIONS.keys())
|
||||
brand_configs = set(FW_QUERY_CONFIGS.keys())
|
||||
if len(brand_configs - brand_versions):
|
||||
with self.subTest():
|
||||
self.fail(f"Brands do not implement FW_VERSIONS: {brand_configs - brand_versions}")
|
||||
|
||||
if len(brand_versions - brand_configs):
|
||||
with self.subTest():
|
||||
self.fail(f"Brands do not implement FW_QUERY_CONFIG: {brand_versions - brand_configs}")
|
||||
|
||||
# Ensure each brand has at least 1 ECU to query, and extra ECU retrieval
|
||||
for brand, config in FW_QUERY_CONFIGS.items():
|
||||
self.assertEqual(len(config.get_all_ecus({}, include_extra_ecus=False)), 0)
|
||||
self.assertEqual(config.get_all_ecus({}), set(config.extra_ecus))
|
||||
self.assertGreater(len(config.get_all_ecus(VERSIONS[brand])), 0)
|
||||
|
||||
def test_fw_request_ecu_whitelist(self):
|
||||
for brand, config in FW_QUERY_CONFIGS.items():
|
||||
with self.subTest(brand=brand):
|
||||
whitelisted_ecus = {ecu for r in config.requests for ecu in r.whitelist_ecus}
|
||||
brand_ecus = {fw[0] for car_fw in VERSIONS[brand].values() for fw in car_fw}
|
||||
brand_ecus |= {ecu[0] for ecu in config.extra_ecus}
|
||||
|
||||
# each ecu in brand's fw versions + extra ecus needs to be whitelisted at least once
|
||||
ecus_not_whitelisted = brand_ecus - whitelisted_ecus
|
||||
|
||||
ecu_strings = ", ".join([f'Ecu.{ECU_NAME[ecu]}' for ecu in ecus_not_whitelisted])
|
||||
self.assertFalse(len(whitelisted_ecus) and len(ecus_not_whitelisted),
|
||||
f'{brand.title()}: ECUs not in any FW query whitelists: {ecu_strings}')
|
||||
|
||||
def test_fw_requests(self):
|
||||
# Asserts equal length request and response lists
|
||||
for brand, config in FW_QUERY_CONFIGS.items():
|
||||
with self.subTest(brand=brand):
|
||||
for request_obj in config.requests:
|
||||
self.assertEqual(len(request_obj.request), len(request_obj.response))
|
||||
|
||||
# No request on the OBD port (bus 1, multiplexed) should be run on an aux panda
|
||||
self.assertFalse(request_obj.auxiliary and request_obj.bus == 1 and request_obj.obd_multiplexing,
|
||||
f"{brand.title()}: OBD multiplexed request is marked auxiliary: {request_obj}")
|
||||
|
||||
def test_brand_ecu_matches(self):
|
||||
empty_response = {brand: set() for brand in FW_QUERY_CONFIGS}
|
||||
self.assertEqual(get_brand_ecu_matches(set()), empty_response)
|
||||
|
||||
# we ignore bus
|
||||
expected_response = empty_response | {'toyota': {(0x750, 0xf)}}
|
||||
self.assertEqual(get_brand_ecu_matches({(0x758, 0xf, 99)}), expected_response)
|
||||
|
||||
|
||||
class TestFwFingerprintTiming(unittest.TestCase):
|
||||
N: int = 5
|
||||
TOL: float = 0.05
|
||||
|
||||
# for patched functions
|
||||
current_obd_multiplexing: bool
|
||||
total_time: float
|
||||
|
||||
def fake_set_obd_multiplexing(self, _, obd_multiplexing):
|
||||
"""The 10Hz blocking params loop adds on average 50ms to the query time for each OBD multiplexing change"""
|
||||
if obd_multiplexing != self.current_obd_multiplexing:
|
||||
self.current_obd_multiplexing = obd_multiplexing
|
||||
self.total_time += 0.1 / 2
|
||||
|
||||
def fake_get_data(self, timeout):
|
||||
self.total_time += timeout
|
||||
return {}
|
||||
|
||||
def _benchmark_brand(self, brand, num_pandas):
|
||||
fake_socket = FakeSocket()
|
||||
self.total_time = 0
|
||||
with (mock.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing),
|
||||
mock.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)):
|
||||
for _ in range(self.N):
|
||||
# Treat each brand as the most likely (aka, the first) brand with OBD multiplexing initially on
|
||||
self.current_obd_multiplexing = True
|
||||
|
||||
t = time.perf_counter()
|
||||
get_fw_versions(fake_socket, fake_socket, brand, num_pandas=num_pandas)
|
||||
self.total_time += time.perf_counter() - t
|
||||
|
||||
return self.total_time / self.N
|
||||
|
||||
def _assert_timing(self, avg_time, ref_time):
|
||||
self.assertLess(avg_time, ref_time + self.TOL)
|
||||
self.assertGreater(avg_time, ref_time - self.TOL, "Performance seems to have improved, update test refs.")
|
||||
|
||||
def test_startup_timing(self):
|
||||
# Tests worse-case VIN query time and typical present ECU query time
|
||||
vin_ref_times = {'worst': 1.2, 'best': 0.6} # best assumes we go through all queries to get a match
|
||||
present_ecu_ref_time = 0.75
|
||||
|
||||
def fake_get_ecu_addrs(*_, timeout):
|
||||
self.total_time += timeout
|
||||
return set()
|
||||
|
||||
fake_socket = FakeSocket()
|
||||
self.total_time = 0.0
|
||||
with (mock.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing),
|
||||
mock.patch("openpilot.selfdrive.car.fw_versions.get_ecu_addrs", fake_get_ecu_addrs)):
|
||||
for _ in range(self.N):
|
||||
self.current_obd_multiplexing = True
|
||||
get_present_ecus(fake_socket, fake_socket, num_pandas=2)
|
||||
self._assert_timing(self.total_time / self.N, present_ecu_ref_time)
|
||||
print(f'get_present_ecus, query time={self.total_time / self.N} seconds')
|
||||
|
||||
for name, args in (('worst', {}), ('best', {'retry': 1})):
|
||||
with self.subTest(name=name):
|
||||
self.total_time = 0.0
|
||||
with (mock.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)):
|
||||
for _ in range(self.N):
|
||||
get_vin(fake_socket, fake_socket, (0, 1), **args)
|
||||
self._assert_timing(self.total_time / self.N, vin_ref_times[name])
|
||||
print(f'get_vin {name} case, query time={self.total_time / self.N} seconds')
|
||||
|
||||
def test_fw_query_timing(self):
|
||||
total_ref_time = {1: 8.4, 2: 9.3}
|
||||
brand_ref_times = {
|
||||
1: {
|
||||
'gm': 1.0,
|
||||
'body': 0.1,
|
||||
'chrysler': 0.3,
|
||||
'ford': 1.5,
|
||||
'honda': 0.55,
|
||||
'hyundai': 1.05,
|
||||
'mazda': 0.1,
|
||||
'nissan': 0.8,
|
||||
'subaru': 0.45,
|
||||
'tesla': 0.3,
|
||||
'toyota': 1.6,
|
||||
'volkswagen': 0.65,
|
||||
},
|
||||
2: {
|
||||
'ford': 1.6,
|
||||
'hyundai': 1.85,
|
||||
'tesla': 0.3,
|
||||
}
|
||||
}
|
||||
|
||||
total_times = {1: 0.0, 2: 0.0}
|
||||
for num_pandas in (1, 2):
|
||||
for brand, config in FW_QUERY_CONFIGS.items():
|
||||
with self.subTest(brand=brand, num_pandas=num_pandas):
|
||||
avg_time = self._benchmark_brand(brand, num_pandas)
|
||||
total_times[num_pandas] += avg_time
|
||||
avg_time = round(avg_time, 2)
|
||||
|
||||
ref_time = brand_ref_times[num_pandas].get(brand)
|
||||
if ref_time is None:
|
||||
# ref time should be same as 1 panda if no aux queries
|
||||
ref_time = brand_ref_times[num_pandas - 1][brand]
|
||||
|
||||
self._assert_timing(avg_time, ref_time)
|
||||
print(f'{brand=}, {num_pandas=}, {len(config.requests)=}, avg FW query time={avg_time} seconds')
|
||||
|
||||
for num_pandas in (1, 2):
|
||||
with self.subTest(brand='all_brands', num_pandas=num_pandas):
|
||||
total_time = round(total_times[num_pandas], 2)
|
||||
self._assert_timing(total_time, total_ref_time[num_pandas])
|
||||
print(f'all brands, total FW query time={total_time} seconds')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
101
selfdrive/car/tests/test_lateral_limits.py
Normal file
101
selfdrive/car/tests/test_lateral_limits.py
Normal file
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env python3
|
||||
from collections import defaultdict
|
||||
import importlib
|
||||
from parameterized import parameterized_class
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from openpilot.common.realtime import DT_CTRL
|
||||
from openpilot.selfdrive.car.car_helpers import interfaces
|
||||
from openpilot.selfdrive.car.fingerprints import all_known_cars
|
||||
from openpilot.selfdrive.car.interfaces import get_torque_params
|
||||
from openpilot.selfdrive.car.subaru.values import CAR as SUBARU
|
||||
|
||||
CAR_MODELS = all_known_cars()
|
||||
|
||||
# ISO 11270 - allowed up jerk is strictly lower than recommended limits
|
||||
MAX_LAT_ACCEL = 3.0 # m/s^2
|
||||
MAX_LAT_JERK_UP = 2.5 # m/s^3
|
||||
MAX_LAT_JERK_DOWN = 5.0 # m/s^3
|
||||
MAX_LAT_JERK_UP_TOLERANCE = 0.5 # m/s^3
|
||||
|
||||
# jerk is measured over half a second
|
||||
JERK_MEAS_T = 0.5
|
||||
|
||||
# TODO: put these cars within limits
|
||||
ABOVE_LIMITS_CARS = [
|
||||
SUBARU.LEGACY,
|
||||
SUBARU.OUTBACK,
|
||||
]
|
||||
|
||||
car_model_jerks: defaultdict[str, dict[str, float]] = defaultdict(dict)
|
||||
|
||||
|
||||
@parameterized_class('car_model', [(c,) for c in sorted(CAR_MODELS)])
|
||||
class TestLateralLimits(unittest.TestCase):
|
||||
car_model: str
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
CarInterface, _, _ = interfaces[cls.car_model]
|
||||
CP = CarInterface.get_non_essential_params(cls.car_model)
|
||||
|
||||
if CP.dashcamOnly:
|
||||
raise unittest.SkipTest("Platform is behind dashcamOnly")
|
||||
|
||||
# TODO: test all platforms
|
||||
if CP.lateralTuning.which() != 'torque':
|
||||
raise unittest.SkipTest
|
||||
|
||||
if CP.notCar:
|
||||
raise unittest.SkipTest
|
||||
|
||||
if CP.carFingerprint in ABOVE_LIMITS_CARS:
|
||||
raise unittest.SkipTest
|
||||
|
||||
CarControllerParams = importlib.import_module(f'selfdrive.car.{CP.carName}.values').CarControllerParams
|
||||
cls.control_params = CarControllerParams(CP)
|
||||
cls.torque_params = get_torque_params(cls.car_model)
|
||||
|
||||
@staticmethod
|
||||
def calculate_0_5s_jerk(control_params, torque_params):
|
||||
steer_step = control_params.STEER_STEP
|
||||
max_lat_accel = torque_params['MAX_LAT_ACCEL_MEASURED']
|
||||
|
||||
# Steer up/down delta per 10ms frame, in percentage of max torque
|
||||
steer_up_per_frame = control_params.STEER_DELTA_UP / control_params.STEER_MAX / steer_step
|
||||
steer_down_per_frame = control_params.STEER_DELTA_DOWN / control_params.STEER_MAX / steer_step
|
||||
|
||||
# Lateral acceleration reached in 0.5 seconds, clipping to max torque
|
||||
accel_up_0_5_sec = min(steer_up_per_frame * JERK_MEAS_T / DT_CTRL, 1.0) * max_lat_accel
|
||||
accel_down_0_5_sec = min(steer_down_per_frame * JERK_MEAS_T / DT_CTRL, 1.0) * max_lat_accel
|
||||
|
||||
# Convert to m/s^3
|
||||
return accel_up_0_5_sec / JERK_MEAS_T, accel_down_0_5_sec / JERK_MEAS_T
|
||||
|
||||
def test_jerk_limits(self):
|
||||
up_jerk, down_jerk = self.calculate_0_5s_jerk(self.control_params, self.torque_params)
|
||||
car_model_jerks[self.car_model] = {"up_jerk": up_jerk, "down_jerk": down_jerk}
|
||||
self.assertLessEqual(up_jerk, MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE)
|
||||
self.assertLessEqual(down_jerk, MAX_LAT_JERK_DOWN)
|
||||
|
||||
def test_max_lateral_accel(self):
|
||||
self.assertLessEqual(self.torque_params["MAX_LAT_ACCEL_MEASURED"], MAX_LAT_ACCEL)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = unittest.main(exit=False)
|
||||
|
||||
print(f"\n\n---- Lateral limit report ({len(CAR_MODELS)} cars) ----\n")
|
||||
|
||||
max_car_model_len = max([len(car_model) for car_model in car_model_jerks])
|
||||
for car_model, _jerks in sorted(car_model_jerks.items(), key=lambda i: i[1]['up_jerk'], reverse=True):
|
||||
violation = _jerks["up_jerk"] > MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE or \
|
||||
_jerks["down_jerk"] > MAX_LAT_JERK_DOWN
|
||||
violation_str = " - VIOLATION" if violation else ""
|
||||
|
||||
print(f"{car_model:{max_car_model_len}} - up jerk: {round(_jerks['up_jerk'], 2):5} " +
|
||||
f"m/s^3, down jerk: {round(_jerks['down_jerk'], 2):5} m/s^3{violation_str}")
|
||||
|
||||
# exit with test result
|
||||
sys.exit(not result.result.wasSuccessful())
|
||||
488
selfdrive/car/tests/test_models.py
Normal file
488
selfdrive/car/tests/test_models.py
Normal file
@@ -0,0 +1,488 @@
|
||||
#!/usr/bin/env python3
|
||||
import capnp
|
||||
import os
|
||||
import importlib
|
||||
import pytest
|
||||
import random
|
||||
import unittest
|
||||
from collections import defaultdict, Counter
|
||||
import hypothesis.strategies as st
|
||||
from hypothesis import Phase, given, settings
|
||||
from parameterized import parameterized_class
|
||||
|
||||
from cereal import messaging, log, car
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.realtime import DT_CTRL
|
||||
from openpilot.selfdrive.car import gen_empty_fingerprint
|
||||
from openpilot.selfdrive.car.fingerprints import all_known_cars
|
||||
from openpilot.selfdrive.car.car_helpers import FRAME_FINGERPRINT, interfaces
|
||||
from openpilot.selfdrive.car.honda.values import CAR as HONDA, HondaFlags
|
||||
from openpilot.selfdrive.car.tests.routes import non_tested_cars, routes, CarTestRoute
|
||||
from openpilot.selfdrive.controls.controlsd import Controls
|
||||
from openpilot.selfdrive.test.helpers import read_segment_list
|
||||
from openpilot.system.hardware.hw import DEFAULT_DOWNLOAD_CACHE_ROOT
|
||||
from openpilot.tools.lib.logreader import LogReader, internal_source, openpilotci_source
|
||||
from openpilot.tools.lib.route import SegmentName
|
||||
|
||||
from panda.tests.libpanda import libpanda_py
|
||||
|
||||
EventName = car.CarEvent.EventName
|
||||
PandaType = log.PandaState.PandaType
|
||||
SafetyModel = car.CarParams.SafetyModel
|
||||
|
||||
NUM_JOBS = int(os.environ.get("NUM_JOBS", "1"))
|
||||
JOB_ID = int(os.environ.get("JOB_ID", "0"))
|
||||
INTERNAL_SEG_LIST = os.environ.get("INTERNAL_SEG_LIST", "")
|
||||
INTERNAL_SEG_CNT = int(os.environ.get("INTERNAL_SEG_CNT", "0"))
|
||||
MAX_EXAMPLES = int(os.environ.get("MAX_EXAMPLES", "300"))
|
||||
CI = os.environ.get("CI", None) is not None
|
||||
|
||||
|
||||
def get_test_cases() -> list[tuple[str, CarTestRoute | None]]:
|
||||
# build list of test cases
|
||||
test_cases = []
|
||||
if not len(INTERNAL_SEG_LIST):
|
||||
routes_by_car = defaultdict(set)
|
||||
for r in routes:
|
||||
routes_by_car[r.car_model].add(r)
|
||||
|
||||
for i, c in enumerate(sorted(all_known_cars())):
|
||||
if i % NUM_JOBS == JOB_ID:
|
||||
test_cases.extend(sorted((c, r) for r in routes_by_car.get(c, (None,))))
|
||||
|
||||
else:
|
||||
segment_list = read_segment_list(os.path.join(BASEDIR, INTERNAL_SEG_LIST))
|
||||
segment_list = random.sample(segment_list, INTERNAL_SEG_CNT or len(segment_list))
|
||||
for platform, segment in segment_list:
|
||||
segment_name = SegmentName(segment)
|
||||
test_cases.append((platform, CarTestRoute(segment_name.route_name.canonical_name, platform,
|
||||
segment=segment_name.segment_num)))
|
||||
return test_cases
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.shared_download_cache
|
||||
class TestCarModelBase(unittest.TestCase):
|
||||
car_model: str | None = None
|
||||
test_route: CarTestRoute | None = None
|
||||
test_route_on_bucket: bool = True # whether the route is on the preserved CI bucket
|
||||
|
||||
can_msgs: list[capnp.lib.capnp._DynamicStructReader]
|
||||
fingerprint: dict[int, dict[int, int]]
|
||||
elm_frame: int | None
|
||||
car_safety_mode_frame: int | None
|
||||
|
||||
@classmethod
|
||||
def get_testing_data_from_logreader(cls, lr):
|
||||
car_fw = []
|
||||
can_msgs = []
|
||||
cls.elm_frame = None
|
||||
cls.car_safety_mode_frame = None
|
||||
cls.fingerprint = gen_empty_fingerprint()
|
||||
experimental_long = False
|
||||
for msg in lr:
|
||||
if msg.which() == "can":
|
||||
can_msgs.append(msg)
|
||||
if len(can_msgs) <= FRAME_FINGERPRINT:
|
||||
for m in msg.can:
|
||||
if m.src < 64:
|
||||
cls.fingerprint[m.src][m.address] = len(m.dat)
|
||||
|
||||
elif msg.which() == "carParams":
|
||||
car_fw = msg.carParams.carFw
|
||||
if msg.carParams.openpilotLongitudinalControl:
|
||||
experimental_long = True
|
||||
if cls.car_model is None and not cls.ci:
|
||||
cls.car_model = msg.carParams.carFingerprint
|
||||
|
||||
# Log which can frame the panda safety mode left ELM327, for CAN validity checks
|
||||
elif msg.which() == 'pandaStates':
|
||||
for ps in msg.pandaStates:
|
||||
if cls.elm_frame is None and ps.safetyModel != SafetyModel.elm327:
|
||||
cls.elm_frame = len(can_msgs)
|
||||
if cls.car_safety_mode_frame is None and ps.safetyModel not in \
|
||||
(SafetyModel.elm327, SafetyModel.noOutput):
|
||||
cls.car_safety_mode_frame = len(can_msgs)
|
||||
|
||||
elif msg.which() == 'pandaStateDEPRECATED':
|
||||
if cls.elm_frame is None and msg.pandaStateDEPRECATED.safetyModel != SafetyModel.elm327:
|
||||
cls.elm_frame = len(can_msgs)
|
||||
if cls.car_safety_mode_frame is None and msg.pandaStateDEPRECATED.safetyModel not in \
|
||||
(SafetyModel.elm327, SafetyModel.noOutput):
|
||||
cls.car_safety_mode_frame = len(can_msgs)
|
||||
|
||||
if len(can_msgs) > int(50 / DT_CTRL):
|
||||
return car_fw, can_msgs, experimental_long
|
||||
|
||||
raise Exception("no can data found")
|
||||
|
||||
@classmethod
|
||||
def get_testing_data(cls):
|
||||
test_segs = (2, 1, 0)
|
||||
if cls.test_route.segment is not None:
|
||||
test_segs = (cls.test_route.segment,)
|
||||
|
||||
is_internal = len(INTERNAL_SEG_LIST)
|
||||
|
||||
for seg in test_segs:
|
||||
segment_range = f"{cls.test_route.route}/{seg}"
|
||||
|
||||
try:
|
||||
lr = LogReader(segment_range, default_source=internal_source if is_internal else openpilotci_source)
|
||||
return cls.get_testing_data_from_logreader(lr)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Route is not in CI bucket, assume either user has access (private), or it is public
|
||||
# test_route_on_ci_bucket will fail when running in CI
|
||||
if not is_internal:
|
||||
cls.test_route_on_bucket = False
|
||||
|
||||
for seg in test_segs:
|
||||
segment_range = f"{cls.test_route.route}/{seg}"
|
||||
try:
|
||||
lr = LogReader(segment_range)
|
||||
return cls.get_testing_data_from_logreader(lr)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
raise Exception(f"Route: {repr(cls.test_route.route)} with segments: {test_segs} not found or no CAN msgs found. Is it uploaded and public?")
|
||||
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
if cls.__name__ == 'TestCarModel' or cls.__name__.endswith('Base'):
|
||||
raise unittest.SkipTest
|
||||
|
||||
if 'FILTER' in os.environ:
|
||||
if not cls.car_model.startswith(tuple(os.environ.get('FILTER').split(','))):
|
||||
raise unittest.SkipTest
|
||||
|
||||
if cls.test_route is None:
|
||||
if cls.car_model in non_tested_cars:
|
||||
print(f"Skipping tests for {cls.car_model}: missing route")
|
||||
raise unittest.SkipTest
|
||||
raise Exception(f"missing test route for {cls.car_model}")
|
||||
|
||||
car_fw, can_msgs, experimental_long = cls.get_testing_data()
|
||||
|
||||
# if relay is expected to be open in the route
|
||||
cls.openpilot_enabled = cls.car_safety_mode_frame is not None
|
||||
|
||||
cls.can_msgs = sorted(can_msgs, key=lambda msg: msg.logMonoTime)
|
||||
|
||||
cls.CarInterface, cls.CarController, cls.CarState = interfaces[cls.car_model]
|
||||
cls.CP = cls.CarInterface.get_params(cls.car_model, cls.fingerprint, car_fw, experimental_long, docs=False)
|
||||
assert cls.CP
|
||||
assert cls.CP.carFingerprint == cls.car_model
|
||||
|
||||
os.environ["COMMA_CACHE"] = DEFAULT_DOWNLOAD_CACHE_ROOT
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
del cls.can_msgs
|
||||
|
||||
def setUp(self):
|
||||
self.CI = self.CarInterface(self.CP.copy(), self.CarController, self.CarState)
|
||||
assert self.CI
|
||||
|
||||
Params().put_bool("OpenpilotEnabledToggle", self.openpilot_enabled)
|
||||
|
||||
# TODO: check safetyModel is in release panda build
|
||||
self.safety = libpanda_py.libpanda
|
||||
|
||||
cfg = self.CP.safetyConfigs[-1]
|
||||
set_status = self.safety.set_safety_hooks(cfg.safetyModel.raw, cfg.safetyParam)
|
||||
self.assertEqual(0, set_status, f"failed to set safetyModel {cfg}")
|
||||
self.safety.init_tests()
|
||||
|
||||
def test_car_params(self):
|
||||
if self.CP.dashcamOnly:
|
||||
self.skipTest("no need to check carParams for dashcamOnly")
|
||||
|
||||
# make sure car params are within a valid range
|
||||
self.assertGreater(self.CP.mass, 1)
|
||||
|
||||
if self.CP.steerControlType != car.CarParams.SteerControlType.angle:
|
||||
tuning = self.CP.lateralTuning.which()
|
||||
if tuning == 'pid':
|
||||
self.assertTrue(len(self.CP.lateralTuning.pid.kpV))
|
||||
elif tuning == 'torque':
|
||||
self.assertTrue(self.CP.lateralTuning.torque.kf > 0)
|
||||
else:
|
||||
raise Exception("unknown tuning")
|
||||
|
||||
def test_car_interface(self):
|
||||
# TODO: also check for checksum violations from can parser
|
||||
can_invalid_cnt = 0
|
||||
can_valid = False
|
||||
CC = car.CarControl.new_message()
|
||||
|
||||
for i, msg in enumerate(self.can_msgs):
|
||||
CS = self.CI.update(CC, (msg.as_builder().to_bytes(),))
|
||||
self.CI.apply(CC, msg.logMonoTime)
|
||||
|
||||
if CS.canValid:
|
||||
can_valid = True
|
||||
|
||||
# wait max of 2s for low frequency msgs to be seen
|
||||
if i > 200 or can_valid:
|
||||
can_invalid_cnt += not CS.canValid
|
||||
|
||||
self.assertEqual(can_invalid_cnt, 0)
|
||||
|
||||
def test_radar_interface(self):
|
||||
RadarInterface = importlib.import_module(f'selfdrive.car.{self.CP.carName}.radar_interface').RadarInterface
|
||||
RI = RadarInterface(self.CP)
|
||||
assert RI
|
||||
|
||||
# Since OBD port is multiplexed to bus 1 (commonly radar bus) while fingerprinting,
|
||||
# start parsing CAN messages after we've left ELM mode and can expect CAN traffic
|
||||
error_cnt = 0
|
||||
for i, msg in enumerate(self.can_msgs[self.elm_frame:]):
|
||||
rr = RI.update((msg.as_builder().to_bytes(),))
|
||||
if rr is not None and i > 50:
|
||||
error_cnt += car.RadarData.Error.canError in rr.errors
|
||||
self.assertEqual(error_cnt, 0)
|
||||
|
||||
def test_panda_safety_rx_checks(self):
|
||||
if self.CP.dashcamOnly:
|
||||
self.skipTest("no need to check panda safety for dashcamOnly")
|
||||
|
||||
start_ts = self.can_msgs[0].logMonoTime
|
||||
|
||||
failed_addrs = Counter()
|
||||
for can in self.can_msgs:
|
||||
# update panda timer
|
||||
t = (can.logMonoTime - start_ts) / 1e3
|
||||
self.safety.set_timer(int(t))
|
||||
|
||||
# run all msgs through the safety RX hook
|
||||
for msg in can.can:
|
||||
if msg.src >= 64:
|
||||
continue
|
||||
|
||||
to_send = libpanda_py.make_CANPacket(msg.address, msg.src % 4, msg.dat)
|
||||
if self.safety.safety_rx_hook(to_send) != 1:
|
||||
failed_addrs[hex(msg.address)] += 1
|
||||
|
||||
# ensure all msgs defined in the addr checks are valid
|
||||
self.safety.safety_tick_current_safety_config()
|
||||
if t > 1e6:
|
||||
self.assertTrue(self.safety.safety_config_valid())
|
||||
|
||||
# Don't check relay malfunction on disabled routes (relay closed),
|
||||
# or before fingerprinting is done (elm327 and noOutput)
|
||||
if self.openpilot_enabled and t / 1e4 > self.car_safety_mode_frame:
|
||||
self.assertFalse(self.safety.get_relay_malfunction())
|
||||
else:
|
||||
self.safety.set_relay_malfunction(False)
|
||||
|
||||
self.assertFalse(len(failed_addrs), f"panda safety RX check failed: {failed_addrs}")
|
||||
|
||||
# ensure RX checks go invalid after small time with no traffic
|
||||
self.safety.set_timer(int(t + (2*1e6)))
|
||||
self.safety.safety_tick_current_safety_config()
|
||||
self.assertFalse(self.safety.safety_config_valid())
|
||||
|
||||
def test_panda_safety_tx_cases(self, data=None):
|
||||
"""Asserts we can tx common messages"""
|
||||
if self.CP.notCar:
|
||||
self.skipTest("Skipping test for notCar")
|
||||
|
||||
def test_car_controller(car_control):
|
||||
now_nanos = 0
|
||||
msgs_sent = 0
|
||||
CI = self.CarInterface(self.CP, self.CarController, self.CarState)
|
||||
for _ in range(round(10.0 / DT_CTRL)): # make sure we hit the slowest messages
|
||||
CI.update(car_control, [])
|
||||
_, sendcan = CI.apply(car_control, now_nanos)
|
||||
|
||||
now_nanos += DT_CTRL * 1e9
|
||||
msgs_sent += len(sendcan)
|
||||
for addr, _, dat, bus in sendcan:
|
||||
to_send = libpanda_py.make_CANPacket(addr, bus % 4, dat)
|
||||
self.assertTrue(self.safety.safety_tx_hook(to_send), (addr, dat, bus))
|
||||
|
||||
# Make sure we attempted to send messages
|
||||
self.assertGreater(msgs_sent, 50)
|
||||
|
||||
# Make sure we can send all messages while inactive
|
||||
CC = car.CarControl.new_message()
|
||||
test_car_controller(CC)
|
||||
|
||||
# Test cancel + general messages (controls_allowed=False & cruise_engaged=True)
|
||||
self.safety.set_cruise_engaged_prev(True)
|
||||
CC = car.CarControl.new_message(cruiseControl={'cancel': True})
|
||||
test_car_controller(CC)
|
||||
|
||||
# Test resume + general messages (controls_allowed=True & cruise_engaged=True)
|
||||
self.safety.set_controls_allowed(True)
|
||||
CC = car.CarControl.new_message(cruiseControl={'resume': True})
|
||||
test_car_controller(CC)
|
||||
|
||||
# Skip stdout/stderr capture with pytest, causes elevated memory usage
|
||||
@pytest.mark.nocapture
|
||||
@settings(max_examples=MAX_EXAMPLES, deadline=None,
|
||||
phases=(Phase.reuse, Phase.generate, Phase.shrink))
|
||||
@given(data=st.data())
|
||||
def test_panda_safety_carstate_fuzzy(self, data):
|
||||
"""
|
||||
For each example, pick a random CAN message on the bus and fuzz its data,
|
||||
checking for panda state mismatches.
|
||||
"""
|
||||
|
||||
if self.CP.dashcamOnly:
|
||||
self.skipTest("no need to check panda safety for dashcamOnly")
|
||||
|
||||
valid_addrs = [(addr, bus, size) for bus, addrs in self.fingerprint.items() for addr, size in addrs.items()]
|
||||
address, bus, size = data.draw(st.sampled_from(valid_addrs))
|
||||
|
||||
msg_strategy = st.binary(min_size=size, max_size=size)
|
||||
msgs = data.draw(st.lists(msg_strategy, min_size=20))
|
||||
|
||||
CC = car.CarControl.new_message()
|
||||
|
||||
for dat in msgs:
|
||||
# due to panda updating state selectively, only edges are expected to match
|
||||
# TODO: warm up CarState with real CAN messages to check edge of both sources
|
||||
# (eg. toyota's gasPressed is the inverse of a signal being set)
|
||||
prev_panda_gas = self.safety.get_gas_pressed_prev()
|
||||
prev_panda_brake = self.safety.get_brake_pressed_prev()
|
||||
prev_panda_regen_braking = self.safety.get_regen_braking_prev()
|
||||
prev_panda_vehicle_moving = self.safety.get_vehicle_moving()
|
||||
prev_panda_cruise_engaged = self.safety.get_cruise_engaged_prev()
|
||||
prev_panda_acc_main_on = self.safety.get_acc_main_on()
|
||||
|
||||
to_send = libpanda_py.make_CANPacket(address, bus, dat)
|
||||
self.safety.safety_rx_hook(to_send)
|
||||
|
||||
can = messaging.new_message('can', 1)
|
||||
can.can = [log.CanData(address=address, dat=dat, src=bus)]
|
||||
|
||||
CS = self.CI.update(CC, (can.to_bytes(),))
|
||||
|
||||
if self.safety.get_gas_pressed_prev() != prev_panda_gas:
|
||||
self.assertEqual(CS.gasPressed, self.safety.get_gas_pressed_prev())
|
||||
|
||||
if self.safety.get_brake_pressed_prev() != prev_panda_brake:
|
||||
# TODO: remove this exception once this mismatch is resolved
|
||||
brake_pressed = CS.brakePressed
|
||||
if CS.brakePressed and not self.safety.get_brake_pressed_prev():
|
||||
if self.CP.carFingerprint in (HONDA.PILOT, HONDA.RIDGELINE) and CS.brake > 0.05:
|
||||
brake_pressed = False
|
||||
|
||||
self.assertEqual(brake_pressed, self.safety.get_brake_pressed_prev())
|
||||
|
||||
if self.safety.get_regen_braking_prev() != prev_panda_regen_braking:
|
||||
self.assertEqual(CS.regenBraking, self.safety.get_regen_braking_prev())
|
||||
|
||||
if self.safety.get_vehicle_moving() != prev_panda_vehicle_moving:
|
||||
self.assertEqual(not CS.standstill, self.safety.get_vehicle_moving())
|
||||
|
||||
if not (self.CP.carName == "honda" and not (self.CP.flags & HondaFlags.BOSCH)):
|
||||
if self.safety.get_cruise_engaged_prev() != prev_panda_cruise_engaged:
|
||||
self.assertEqual(CS.cruiseState.enabled, self.safety.get_cruise_engaged_prev())
|
||||
|
||||
if self.CP.carName == "honda":
|
||||
if self.safety.get_acc_main_on() != prev_panda_acc_main_on:
|
||||
self.assertEqual(CS.cruiseState.available, self.safety.get_acc_main_on())
|
||||
|
||||
def test_panda_safety_carstate(self):
|
||||
"""
|
||||
Assert that panda safety matches openpilot's carState
|
||||
"""
|
||||
if self.CP.dashcamOnly:
|
||||
self.skipTest("no need to check panda safety for dashcamOnly")
|
||||
|
||||
CC = car.CarControl.new_message()
|
||||
|
||||
# warm up pass, as initial states may be different
|
||||
for can in self.can_msgs[:300]:
|
||||
self.CI.update(CC, (can.as_builder().to_bytes(), ))
|
||||
for msg in filter(lambda m: m.src in range(64), can.can):
|
||||
to_send = libpanda_py.make_CANPacket(msg.address, msg.src % 4, msg.dat)
|
||||
self.safety.safety_rx_hook(to_send)
|
||||
|
||||
controls_allowed_prev = False
|
||||
CS_prev = car.CarState.new_message()
|
||||
checks = defaultdict(int)
|
||||
controlsd = Controls(CI=self.CI)
|
||||
controlsd.initialized = True
|
||||
for idx, can in enumerate(self.can_msgs):
|
||||
CS = self.CI.update(CC, (can.as_builder().to_bytes(), ))
|
||||
for msg in filter(lambda m: m.src in range(64), can.can):
|
||||
to_send = libpanda_py.make_CANPacket(msg.address, msg.src % 4, msg.dat)
|
||||
ret = self.safety.safety_rx_hook(to_send)
|
||||
self.assertEqual(1, ret, f"safety rx failed ({ret=}): {to_send}")
|
||||
|
||||
# Skip first frame so CS_prev is properly initialized
|
||||
if idx == 0:
|
||||
CS_prev = CS
|
||||
# Button may be left pressed in warm up period
|
||||
if not self.CP.pcmCruise:
|
||||
self.safety.set_controls_allowed(0)
|
||||
continue
|
||||
|
||||
# TODO: check rest of panda's carstate (steering, ACC main on, etc.)
|
||||
|
||||
checks['gasPressed'] += CS.gasPressed != self.safety.get_gas_pressed_prev()
|
||||
checks['standstill'] += CS.standstill == self.safety.get_vehicle_moving()
|
||||
|
||||
# TODO: remove this exception once this mismatch is resolved
|
||||
brake_pressed = CS.brakePressed
|
||||
if CS.brakePressed and not self.safety.get_brake_pressed_prev():
|
||||
if self.CP.carFingerprint in (HONDA.PILOT, HONDA.RIDGELINE) and CS.brake > 0.05:
|
||||
brake_pressed = False
|
||||
checks['brakePressed'] += brake_pressed != self.safety.get_brake_pressed_prev()
|
||||
checks['regenBraking'] += CS.regenBraking != self.safety.get_regen_braking_prev()
|
||||
|
||||
if self.CP.pcmCruise:
|
||||
# On most pcmCruise cars, openpilot's state is always tied to the PCM's cruise state.
|
||||
# On Honda Nidec, we always engage on the rising edge of the PCM cruise state, but
|
||||
# openpilot brakes to zero even if the min ACC speed is non-zero (i.e. the PCM disengages).
|
||||
if self.CP.carName == "honda" and not (self.CP.flags & HondaFlags.BOSCH):
|
||||
# only the rising edges are expected to match
|
||||
if CS.cruiseState.enabled and not CS_prev.cruiseState.enabled:
|
||||
checks['controlsAllowed'] += not self.safety.get_controls_allowed()
|
||||
else:
|
||||
checks['controlsAllowed'] += not CS.cruiseState.enabled and self.safety.get_controls_allowed()
|
||||
|
||||
# TODO: fix notCar mismatch
|
||||
if not self.CP.notCar:
|
||||
checks['cruiseState'] += CS.cruiseState.enabled != self.safety.get_cruise_engaged_prev()
|
||||
else:
|
||||
# Check for enable events on rising edge of controls allowed
|
||||
controlsd.update_events(CS)
|
||||
controlsd.CS_prev = CS
|
||||
button_enable = (any(evt.enable for evt in CS.events) and
|
||||
not any(evt == EventName.pedalPressed for evt in controlsd.events.names))
|
||||
mismatch = button_enable != (self.safety.get_controls_allowed() and not controls_allowed_prev)
|
||||
checks['controlsAllowed'] += mismatch
|
||||
controls_allowed_prev = self.safety.get_controls_allowed()
|
||||
if button_enable and not mismatch:
|
||||
self.safety.set_controls_allowed(False)
|
||||
|
||||
if self.CP.carName == "honda":
|
||||
checks['mainOn'] += CS.cruiseState.available != self.safety.get_acc_main_on()
|
||||
|
||||
CS_prev = CS
|
||||
|
||||
failed_checks = {k: v for k, v in checks.items() if v > 0}
|
||||
self.assertFalse(len(failed_checks), f"panda safety doesn't agree with openpilot: {failed_checks}")
|
||||
|
||||
@unittest.skipIf(not CI, "Accessing non CI-bucket routes is allowed only when not in CI")
|
||||
def test_route_on_ci_bucket(self):
|
||||
self.assertTrue(self.test_route_on_bucket, "Route not on CI bucket. " +
|
||||
"This is fine to fail for WIP car ports, just let us know and we can upload your routes to the CI bucket.")
|
||||
|
||||
|
||||
@parameterized_class(('car_model', 'test_route'), get_test_cases())
|
||||
@pytest.mark.xdist_group_class_property('test_route')
|
||||
class TestCarModel(TestCarModelBase):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
3884
selfdrive/car/tests/test_models_segs.txt
Normal file
3884
selfdrive/car/tests/test_models_segs.txt
Normal file
File diff suppressed because it is too large
Load Diff
23
selfdrive/car/tests/test_platform_configs.py
Normal file
23
selfdrive/car/tests/test_platform_configs.py
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import unittest
|
||||
|
||||
from openpilot.selfdrive.car.values import PLATFORMS
|
||||
|
||||
|
||||
class TestPlatformConfigs(unittest.TestCase):
|
||||
def test_configs(self):
|
||||
|
||||
for platform in PLATFORMS.values():
|
||||
with self.subTest(platform=str(platform)):
|
||||
self.assertTrue(platform.config._frozen)
|
||||
|
||||
if platform != "mock":
|
||||
self.assertIn("pt", platform.config.dbc_dict)
|
||||
self.assertTrue(len(platform.config.platform_str) > 0)
|
||||
|
||||
self.assertIsNotNone(platform.config.specs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
0
selfdrive/controls/tests/__init__.py
Normal file
0
selfdrive/controls/tests/__init__.py
Normal file
135
selfdrive/controls/tests/test_alerts.py
Normal file
135
selfdrive/controls/tests/test_alerts.py
Normal file
@@ -0,0 +1,135 @@
|
||||
#!/usr/bin/env python3
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
import unittest
|
||||
import random
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
||||
from cereal import log, car
|
||||
from cereal.messaging import SubMaster
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.selfdrive.controls.lib.events import Alert, EVENTS, ET
|
||||
from openpilot.selfdrive.controls.lib.alertmanager import set_offroad_alert
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS
|
||||
|
||||
AlertSize = log.ControlsState.AlertSize
|
||||
|
||||
OFFROAD_ALERTS_PATH = os.path.join(BASEDIR, "selfdrive/controls/lib/alerts_offroad.json")
|
||||
|
||||
# TODO: add callback alerts
|
||||
ALERTS = []
|
||||
for event_types in EVENTS.values():
|
||||
for alert in event_types.values():
|
||||
ALERTS.append(alert)
|
||||
|
||||
|
||||
class TestAlerts(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
with open(OFFROAD_ALERTS_PATH) as f:
|
||||
cls.offroad_alerts = json.loads(f.read())
|
||||
|
||||
# Create fake objects for callback
|
||||
cls.CS = car.CarState.new_message()
|
||||
cls.CP = car.CarParams.new_message()
|
||||
cfg = [c for c in CONFIGS if c.proc_name == 'controlsd'][0]
|
||||
cls.sm = SubMaster(cfg.pubs)
|
||||
|
||||
def test_events_defined(self):
|
||||
# Ensure all events in capnp schema are defined in events.py
|
||||
events = car.CarEvent.EventName.schema.enumerants
|
||||
|
||||
for name, e in events.items():
|
||||
if not name.endswith("DEPRECATED"):
|
||||
fail_msg = "%s @%d not in EVENTS" % (name, e)
|
||||
self.assertTrue(e in EVENTS.keys(), msg=fail_msg)
|
||||
|
||||
# ensure alert text doesn't exceed allowed width
|
||||
def test_alert_text_length(self):
|
||||
font_path = os.path.join(BASEDIR, "selfdrive/assets/fonts")
|
||||
regular_font_path = os.path.join(font_path, "Inter-SemiBold.ttf")
|
||||
bold_font_path = os.path.join(font_path, "Inter-Bold.ttf")
|
||||
semibold_font_path = os.path.join(font_path, "Inter-SemiBold.ttf")
|
||||
|
||||
max_text_width = 2160 - 300 # full screen width is usable, minus sidebar
|
||||
draw = ImageDraw.Draw(Image.new('RGB', (0, 0)))
|
||||
|
||||
fonts = {
|
||||
AlertSize.small: [ImageFont.truetype(semibold_font_path, 74)],
|
||||
AlertSize.mid: [ImageFont.truetype(bold_font_path, 88),
|
||||
ImageFont.truetype(regular_font_path, 66)],
|
||||
}
|
||||
|
||||
for alert in ALERTS:
|
||||
if not isinstance(alert, Alert):
|
||||
alert = alert(self.CP, self.CS, self.sm, metric=False, soft_disable_time=100)
|
||||
|
||||
# for full size alerts, both text fields wrap the text,
|
||||
# so it's unlikely that they would go past the max width
|
||||
if alert.alert_size in (AlertSize.none, AlertSize.full):
|
||||
continue
|
||||
|
||||
for i, txt in enumerate([alert.alert_text_1, alert.alert_text_2]):
|
||||
if i >= len(fonts[alert.alert_size]):
|
||||
break
|
||||
|
||||
font = fonts[alert.alert_size][i]
|
||||
left, _, right, _ = draw.textbbox((0, 0), txt, font)
|
||||
width = right - left
|
||||
msg = f"type: {alert.alert_type} msg: {txt}"
|
||||
self.assertLessEqual(width, max_text_width, msg=msg)
|
||||
|
||||
def test_alert_sanity_check(self):
|
||||
for event_types in EVENTS.values():
|
||||
for event_type, a in event_types.items():
|
||||
# TODO: add callback alerts
|
||||
if not isinstance(a, Alert):
|
||||
continue
|
||||
|
||||
if a.alert_size == AlertSize.none:
|
||||
self.assertEqual(len(a.alert_text_1), 0)
|
||||
self.assertEqual(len(a.alert_text_2), 0)
|
||||
elif a.alert_size == AlertSize.small:
|
||||
self.assertGreater(len(a.alert_text_1), 0)
|
||||
self.assertEqual(len(a.alert_text_2), 0)
|
||||
elif a.alert_size == AlertSize.mid:
|
||||
self.assertGreater(len(a.alert_text_1), 0)
|
||||
self.assertGreater(len(a.alert_text_2), 0)
|
||||
else:
|
||||
self.assertGreater(len(a.alert_text_1), 0)
|
||||
|
||||
self.assertGreaterEqual(a.duration, 0.)
|
||||
|
||||
if event_type not in (ET.WARNING, ET.PERMANENT, ET.PRE_ENABLE):
|
||||
self.assertEqual(a.creation_delay, 0.)
|
||||
|
||||
def test_offroad_alerts(self):
|
||||
params = Params()
|
||||
for a in self.offroad_alerts:
|
||||
# set the alert
|
||||
alert = copy.copy(self.offroad_alerts[a])
|
||||
set_offroad_alert(a, True)
|
||||
alert['extra'] = ''
|
||||
self.assertTrue(json.dumps(alert) == params.get(a, encoding='utf8'))
|
||||
|
||||
# then delete it
|
||||
set_offroad_alert(a, False)
|
||||
self.assertTrue(params.get(a) is None)
|
||||
|
||||
def test_offroad_alerts_extra_text(self):
|
||||
params = Params()
|
||||
for i in range(50):
|
||||
# set the alert
|
||||
a = random.choice(list(self.offroad_alerts))
|
||||
alert = self.offroad_alerts[a]
|
||||
set_offroad_alert(a, True, extra_text="a"*i)
|
||||
|
||||
written_alert = json.loads(params.get(a, encoding='utf8'))
|
||||
self.assertTrue("a"*i == written_alert['extra'])
|
||||
self.assertTrue(alert["text"] == written_alert['text'])
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
158
selfdrive/controls/tests/test_cruise_speed.py
Normal file
158
selfdrive/controls/tests/test_cruise_speed.py
Normal file
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env python3
|
||||
import itertools
|
||||
import numpy as np
|
||||
import unittest
|
||||
|
||||
from parameterized import parameterized_class
|
||||
from cereal import log
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.selfdrive.controls.lib.drive_helpers import VCruiseHelper, V_CRUISE_MIN, V_CRUISE_MAX, V_CRUISE_INITIAL, IMPERIAL_INCREMENT
|
||||
from cereal import car
|
||||
from openpilot.common.conversions import Conversions as CV
|
||||
from openpilot.selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
|
||||
|
||||
ButtonEvent = car.CarState.ButtonEvent
|
||||
ButtonType = car.CarState.ButtonEvent.Type
|
||||
|
||||
|
||||
def run_cruise_simulation(cruise, e2e, t_end=20.):
|
||||
man = Maneuver(
|
||||
'',
|
||||
duration=t_end,
|
||||
initial_speed=max(cruise - 1., 0.0),
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=100,
|
||||
cruise_values=[cruise],
|
||||
prob_lead_values=[0.0],
|
||||
breakpoints=[0.],
|
||||
e2e=e2e,
|
||||
)
|
||||
valid, output = man.evaluate()
|
||||
assert valid
|
||||
return output[-1, 3]
|
||||
|
||||
|
||||
@parameterized_class(("e2e", "personality", "speed"), itertools.product(
|
||||
[True, False], # e2e
|
||||
log.LongitudinalPersonality.schema.enumerants, # personality
|
||||
[5,35])) # speed
|
||||
class TestCruiseSpeed(unittest.TestCase):
|
||||
def test_cruise_speed(self):
|
||||
params = Params()
|
||||
params.put("LongitudinalPersonality", str(self.personality))
|
||||
print(f'Testing {self.speed} m/s')
|
||||
cruise_speed = float(self.speed)
|
||||
|
||||
simulation_steady_state = run_cruise_simulation(cruise_speed, self.e2e)
|
||||
self.assertAlmostEqual(simulation_steady_state, cruise_speed, delta=.01, msg=f'Did not reach {self.speed} m/s')
|
||||
|
||||
|
||||
# TODO: test pcmCruise
|
||||
@parameterized_class(('pcm_cruise',), [(False,)])
|
||||
class TestVCruiseHelper(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.CP = car.CarParams(pcmCruise=self.pcm_cruise)
|
||||
self.v_cruise_helper = VCruiseHelper(self.CP)
|
||||
self.reset_cruise_speed_state()
|
||||
|
||||
def reset_cruise_speed_state(self):
|
||||
# Two resets previous cruise speed
|
||||
for _ in range(2):
|
||||
self.v_cruise_helper.update_v_cruise(car.CarState(cruiseState={"available": False}), enabled=False, is_metric=False)
|
||||
|
||||
def enable(self, v_ego, experimental_mode):
|
||||
# Simulates user pressing set with a current speed
|
||||
self.v_cruise_helper.initialize_v_cruise(car.CarState(vEgo=v_ego), experimental_mode)
|
||||
|
||||
def test_adjust_speed(self):
|
||||
"""
|
||||
Asserts speed changes on falling edges of buttons.
|
||||
"""
|
||||
|
||||
self.enable(V_CRUISE_INITIAL * CV.KPH_TO_MS, False)
|
||||
|
||||
for btn in (ButtonType.accelCruise, ButtonType.decelCruise):
|
||||
for pressed in (True, False):
|
||||
CS = car.CarState(cruiseState={"available": True})
|
||||
CS.buttonEvents = [ButtonEvent(type=btn, pressed=pressed)]
|
||||
|
||||
self.v_cruise_helper.update_v_cruise(CS, enabled=True, is_metric=False)
|
||||
self.assertEqual(pressed, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last)
|
||||
|
||||
def test_rising_edge_enable(self):
|
||||
"""
|
||||
Some car interfaces may enable on rising edge of a button,
|
||||
ensure we don't adjust speed if enabled changes mid-press.
|
||||
"""
|
||||
|
||||
# NOTE: enabled is always one frame behind the result from button press in controlsd
|
||||
for enabled, pressed in ((False, False),
|
||||
(False, True),
|
||||
(True, False)):
|
||||
CS = car.CarState(cruiseState={"available": True})
|
||||
CS.buttonEvents = [ButtonEvent(type=ButtonType.decelCruise, pressed=pressed)]
|
||||
self.v_cruise_helper.update_v_cruise(CS, enabled=enabled, is_metric=False)
|
||||
if pressed:
|
||||
self.enable(V_CRUISE_INITIAL * CV.KPH_TO_MS, False)
|
||||
|
||||
# Expected diff on enabling. Speed should not change on falling edge of pressed
|
||||
self.assertEqual(not pressed, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last)
|
||||
|
||||
def test_resume_in_standstill(self):
|
||||
"""
|
||||
Asserts we don't increment set speed if user presses resume/accel to exit cruise standstill.
|
||||
"""
|
||||
|
||||
self.enable(0, False)
|
||||
|
||||
for standstill in (True, False):
|
||||
for pressed in (True, False):
|
||||
CS = car.CarState(cruiseState={"available": True, "standstill": standstill})
|
||||
CS.buttonEvents = [ButtonEvent(type=ButtonType.accelCruise, pressed=pressed)]
|
||||
self.v_cruise_helper.update_v_cruise(CS, enabled=True, is_metric=False)
|
||||
|
||||
# speed should only update if not at standstill and button falling edge
|
||||
should_equal = standstill or pressed
|
||||
self.assertEqual(should_equal, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last)
|
||||
|
||||
def test_set_gas_pressed(self):
|
||||
"""
|
||||
Asserts pressing set while enabled with gas pressed sets
|
||||
the speed to the maximum of vEgo and current cruise speed.
|
||||
"""
|
||||
|
||||
for v_ego in np.linspace(0, 100, 101):
|
||||
self.reset_cruise_speed_state()
|
||||
self.enable(V_CRUISE_INITIAL * CV.KPH_TO_MS, False)
|
||||
|
||||
# first decrement speed, then perform gas pressed logic
|
||||
expected_v_cruise_kph = self.v_cruise_helper.v_cruise_kph - IMPERIAL_INCREMENT
|
||||
expected_v_cruise_kph = max(expected_v_cruise_kph, v_ego * CV.MS_TO_KPH) # clip to min of vEgo
|
||||
expected_v_cruise_kph = float(np.clip(round(expected_v_cruise_kph, 1), V_CRUISE_MIN, V_CRUISE_MAX))
|
||||
|
||||
CS = car.CarState(vEgo=float(v_ego), gasPressed=True, cruiseState={"available": True})
|
||||
CS.buttonEvents = [ButtonEvent(type=ButtonType.decelCruise, pressed=False)]
|
||||
self.v_cruise_helper.update_v_cruise(CS, enabled=True, is_metric=False)
|
||||
|
||||
# TODO: fix skipping first run due to enabled on rising edge exception
|
||||
if v_ego == 0.0:
|
||||
continue
|
||||
self.assertEqual(expected_v_cruise_kph, self.v_cruise_helper.v_cruise_kph)
|
||||
|
||||
def test_initialize_v_cruise(self):
|
||||
"""
|
||||
Asserts allowed cruise speeds on enabling with SET.
|
||||
"""
|
||||
|
||||
for experimental_mode in (True, False):
|
||||
for v_ego in np.linspace(0, 100, 101):
|
||||
self.reset_cruise_speed_state()
|
||||
self.assertFalse(self.v_cruise_helper.v_cruise_initialized)
|
||||
|
||||
self.enable(float(v_ego), experimental_mode)
|
||||
self.assertTrue(V_CRUISE_INITIAL <= self.v_cruise_helper.v_cruise_kph <= V_CRUISE_MAX)
|
||||
self.assertTrue(self.v_cruise_helper.v_cruise_initialized)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
47
selfdrive/controls/tests/test_following_distance.py
Normal file
47
selfdrive/controls/tests/test_following_distance.py
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python3
|
||||
import unittest
|
||||
import itertools
|
||||
from parameterized import parameterized_class
|
||||
|
||||
from openpilot.common.params import Params
|
||||
from cereal import log
|
||||
|
||||
from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import desired_follow_distance, get_T_FOLLOW
|
||||
from openpilot.selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
|
||||
|
||||
|
||||
def run_following_distance_simulation(v_lead, t_end=100.0, e2e=False):
|
||||
man = Maneuver(
|
||||
'',
|
||||
duration=t_end,
|
||||
initial_speed=float(v_lead),
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=100,
|
||||
speed_lead_values=[v_lead],
|
||||
breakpoints=[0.],
|
||||
e2e=e2e,
|
||||
)
|
||||
valid, output = man.evaluate()
|
||||
assert valid
|
||||
return output[-1,2] - output[-1,1]
|
||||
|
||||
|
||||
@parameterized_class(("e2e", "personality", "speed"), itertools.product(
|
||||
[True, False], # e2e
|
||||
[log.LongitudinalPersonality.relaxed, # personality
|
||||
log.LongitudinalPersonality.standard,
|
||||
log.LongitudinalPersonality.aggressive],
|
||||
[0,10,35])) # speed
|
||||
class TestFollowingDistance(unittest.TestCase):
|
||||
def test_following_distance(self):
|
||||
params = Params()
|
||||
params.put("LongitudinalPersonality", str(self.personality))
|
||||
v_lead = float(self.speed)
|
||||
simulation_steady_state = run_following_distance_simulation(v_lead, e2e=self.e2e)
|
||||
correct_steady_state = desired_follow_distance(v_lead, v_lead, get_T_FOLLOW(self.personality))
|
||||
err_ratio = 0.2 if self.e2e else 0.1
|
||||
self.assertAlmostEqual(simulation_steady_state, correct_steady_state, delta=(err_ratio * correct_steady_state + .5))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
89
selfdrive/controls/tests/test_lateral_mpc.py
Normal file
89
selfdrive/controls/tests/test_lateral_mpc.py
Normal file
@@ -0,0 +1,89 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from openpilot.selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc
|
||||
from openpilot.selfdrive.controls.lib.drive_helpers import CAR_ROTATION_RADIUS
|
||||
from openpilot.selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import N as LAT_MPC_N
|
||||
|
||||
|
||||
def run_mpc(lat_mpc=None, v_ref=30., x_init=0., y_init=0., psi_init=0., curvature_init=0.,
|
||||
lane_width=3.6, poly_shift=0.):
|
||||
|
||||
if lat_mpc is None:
|
||||
lat_mpc = LateralMpc()
|
||||
lat_mpc.set_weights(1., .1, 0.0, .05, 800)
|
||||
|
||||
y_pts = poly_shift * np.ones(LAT_MPC_N + 1)
|
||||
heading_pts = np.zeros(LAT_MPC_N + 1)
|
||||
curv_rate_pts = np.zeros(LAT_MPC_N + 1)
|
||||
|
||||
x0 = np.array([x_init, y_init, psi_init, curvature_init])
|
||||
p = np.column_stack([v_ref * np.ones(LAT_MPC_N + 1),
|
||||
CAR_ROTATION_RADIUS * np.ones(LAT_MPC_N + 1)])
|
||||
|
||||
# converge in no more than 10 iterations
|
||||
for _ in range(10):
|
||||
lat_mpc.run(x0, p,
|
||||
y_pts, heading_pts, curv_rate_pts)
|
||||
return lat_mpc.x_sol
|
||||
|
||||
|
||||
class TestLateralMpc(unittest.TestCase):
|
||||
|
||||
def _assert_null(self, sol, curvature=1e-6):
|
||||
for i in range(len(sol)):
|
||||
self.assertAlmostEqual(sol[0,i,1], 0., delta=curvature)
|
||||
self.assertAlmostEqual(sol[0,i,2], 0., delta=curvature)
|
||||
self.assertAlmostEqual(sol[0,i,3], 0., delta=curvature)
|
||||
|
||||
def _assert_simmetry(self, sol, curvature=1e-6):
|
||||
for i in range(len(sol)):
|
||||
self.assertAlmostEqual(sol[0,i,1], -sol[1,i,1], delta=curvature)
|
||||
self.assertAlmostEqual(sol[0,i,2], -sol[1,i,2], delta=curvature)
|
||||
self.assertAlmostEqual(sol[0,i,3], -sol[1,i,3], delta=curvature)
|
||||
self.assertAlmostEqual(sol[0,i,0], sol[1,i,0], delta=curvature)
|
||||
|
||||
def test_straight(self):
|
||||
sol = run_mpc()
|
||||
self._assert_null(np.array([sol]))
|
||||
|
||||
def test_y_symmetry(self):
|
||||
sol = []
|
||||
for y_init in [-0.5, 0.5]:
|
||||
sol.append(run_mpc(y_init=y_init))
|
||||
self._assert_simmetry(np.array(sol))
|
||||
|
||||
def test_poly_symmetry(self):
|
||||
sol = []
|
||||
for poly_shift in [-1., 1.]:
|
||||
sol.append(run_mpc(poly_shift=poly_shift))
|
||||
self._assert_simmetry(np.array(sol))
|
||||
|
||||
def test_curvature_symmetry(self):
|
||||
sol = []
|
||||
for curvature_init in [-0.1, 0.1]:
|
||||
sol.append(run_mpc(curvature_init=curvature_init))
|
||||
self._assert_simmetry(np.array(sol))
|
||||
|
||||
def test_psi_symmetry(self):
|
||||
sol = []
|
||||
for psi_init in [-0.1, 0.1]:
|
||||
sol.append(run_mpc(psi_init=psi_init))
|
||||
self._assert_simmetry(np.array(sol))
|
||||
|
||||
def test_no_overshoot(self):
|
||||
y_init = 1.
|
||||
sol = run_mpc(y_init=y_init)
|
||||
for y in list(sol[:,1]):
|
||||
self.assertGreaterEqual(y_init, abs(y))
|
||||
|
||||
def test_switch_convergence(self):
|
||||
lat_mpc = LateralMpc()
|
||||
sol = run_mpc(lat_mpc=lat_mpc, poly_shift=3.0, v_ref=7.0)
|
||||
right_psi_deg = np.degrees(sol[:,2])
|
||||
sol = run_mpc(lat_mpc=lat_mpc, poly_shift=-3.0, v_ref=7.0)
|
||||
left_psi_deg = np.degrees(sol[:,2])
|
||||
np.testing.assert_almost_equal(right_psi_deg, -left_psi_deg, decimal=3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
36
selfdrive/controls/tests/test_leads.py
Normal file
36
selfdrive/controls/tests/test_leads.py
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python3
|
||||
import unittest
|
||||
|
||||
import cereal.messaging as messaging
|
||||
|
||||
from openpilot.selfdrive.test.process_replay import replay_process_with_name
|
||||
from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA
|
||||
|
||||
|
||||
class TestLeads(unittest.TestCase):
|
||||
def test_radar_fault(self):
|
||||
# if there's no radar-related can traffic, radard should either not respond or respond with an error
|
||||
# this is tightly coupled with underlying car radar_interface implementation, but it's a good sanity check
|
||||
def single_iter_pkg():
|
||||
# single iter package, with meaningless cans and empty carState/modelV2
|
||||
msgs = []
|
||||
for _ in range(5):
|
||||
can = messaging.new_message("can", 1)
|
||||
cs = messaging.new_message("carState")
|
||||
msgs.append(can.as_reader())
|
||||
msgs.append(cs.as_reader())
|
||||
model = messaging.new_message("modelV2")
|
||||
msgs.append(model.as_reader())
|
||||
|
||||
return msgs
|
||||
|
||||
msgs = [m for _ in range(3) for m in single_iter_pkg()]
|
||||
out = replay_process_with_name("radard", msgs, fingerprint=TOYOTA.COROLLA_TSS2)
|
||||
states = [m for m in out if m.which() == "radarState"]
|
||||
failures = [not state.valid and len(state.radarState.radarErrors) for state in states]
|
||||
|
||||
self.assertTrue(len(states) == 0 or all(failures))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
120
selfdrive/controls/tests/test_startup.py
Normal file
120
selfdrive/controls/tests/test_startup.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import os
|
||||
from parameterized import parameterized
|
||||
|
||||
from cereal import log, car
|
||||
import cereal.messaging as messaging
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.selfdrive.boardd.boardd_api_impl import can_list_to_can_capnp
|
||||
from openpilot.selfdrive.car.fingerprints import _FINGERPRINTS
|
||||
from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA
|
||||
from openpilot.selfdrive.car.mazda.values import CAR as MAZDA
|
||||
from openpilot.selfdrive.controls.lib.events import EVENT_NAME
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
|
||||
EventName = car.CarEvent.EventName
|
||||
Ecu = car.CarParams.Ecu
|
||||
|
||||
COROLLA_FW_VERSIONS = [
|
||||
(Ecu.engine, 0x7e0, None, b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00'),
|
||||
(Ecu.abs, 0x7b0, None, b'F152602190\x00\x00\x00\x00\x00\x00'),
|
||||
(Ecu.eps, 0x7a1, None, b'8965B02181\x00\x00\x00\x00\x00\x00'),
|
||||
(Ecu.fwdRadar, 0x750, 0xf, b'8821F4702100\x00\x00\x00\x00'),
|
||||
(Ecu.fwdCamera, 0x750, 0x6d, b'8646F0201101\x00\x00\x00\x00'),
|
||||
(Ecu.dsu, 0x791, None, b'881510201100\x00\x00\x00\x00'),
|
||||
]
|
||||
COROLLA_FW_VERSIONS_FUZZY = COROLLA_FW_VERSIONS[:-1] + [(Ecu.dsu, 0x791, None, b'xxxxxx')]
|
||||
COROLLA_FW_VERSIONS_NO_DSU = COROLLA_FW_VERSIONS[:-1]
|
||||
|
||||
CX5_FW_VERSIONS = [
|
||||
(Ecu.engine, 0x7e0, None, b'PYNF-188K2-F\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
|
||||
(Ecu.abs, 0x760, None, b'K123-437K2-E\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
|
||||
(Ecu.eps, 0x730, None, b'KJ01-3210X-G-00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
|
||||
(Ecu.fwdRadar, 0x764, None, b'K123-67XK2-F\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
|
||||
(Ecu.fwdCamera, 0x706, None, b'B61L-67XK2-T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
|
||||
(Ecu.transmission, 0x7e1, None, b'PYNC-21PS1-B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
|
||||
]
|
||||
|
||||
|
||||
@parameterized.expand([
|
||||
# TODO: test EventName.startup for release branches
|
||||
|
||||
# officially supported car
|
||||
(EventName.startupMaster, TOYOTA.COROLLA, COROLLA_FW_VERSIONS, "toyota"),
|
||||
(EventName.startupMaster, TOYOTA.COROLLA, COROLLA_FW_VERSIONS, "toyota"),
|
||||
|
||||
# dashcamOnly car
|
||||
(EventName.startupNoControl, MAZDA.CX5, CX5_FW_VERSIONS, "mazda"),
|
||||
(EventName.startupNoControl, MAZDA.CX5, CX5_FW_VERSIONS, "mazda"),
|
||||
|
||||
# unrecognized car with no fw
|
||||
(EventName.startupNoFw, None, None, ""),
|
||||
(EventName.startupNoFw, None, None, ""),
|
||||
|
||||
# unrecognized car
|
||||
(EventName.startupNoCar, None, COROLLA_FW_VERSIONS[:1], "toyota"),
|
||||
(EventName.startupNoCar, None, COROLLA_FW_VERSIONS[:1], "toyota"),
|
||||
|
||||
# fuzzy match
|
||||
(EventName.startupMaster, TOYOTA.COROLLA, COROLLA_FW_VERSIONS_FUZZY, "toyota"),
|
||||
(EventName.startupMaster, TOYOTA.COROLLA, COROLLA_FW_VERSIONS_FUZZY, "toyota"),
|
||||
])
|
||||
def test_startup_alert(expected_event, car_model, fw_versions, brand):
|
||||
controls_sock = messaging.sub_sock("controlsState")
|
||||
pm = messaging.PubMaster(['can', 'pandaStates'])
|
||||
|
||||
params = Params()
|
||||
params.put_bool("OpenpilotEnabledToggle", True)
|
||||
|
||||
# Build capnn version of FW array
|
||||
if fw_versions is not None:
|
||||
car_fw = []
|
||||
cp = car.CarParams.new_message()
|
||||
for ecu, addr, subaddress, version in fw_versions:
|
||||
f = car.CarParams.CarFw.new_message()
|
||||
f.ecu = ecu
|
||||
f.address = addr
|
||||
f.fwVersion = version
|
||||
f.brand = brand
|
||||
|
||||
if subaddress is not None:
|
||||
f.subAddress = subaddress
|
||||
|
||||
car_fw.append(f)
|
||||
cp.carVin = "1" * 17
|
||||
cp.carFw = car_fw
|
||||
params.put("CarParamsCache", cp.to_bytes())
|
||||
else:
|
||||
os.environ['SKIP_FW_QUERY'] = '1'
|
||||
|
||||
managed_processes['controlsd'].start()
|
||||
|
||||
assert pm.wait_for_readers_to_update('can', 5)
|
||||
pm.send('can', can_list_to_can_capnp([[0, 0, b"", 0]]))
|
||||
|
||||
assert pm.wait_for_readers_to_update('pandaStates', 5)
|
||||
msg = messaging.new_message('pandaStates', 1)
|
||||
msg.pandaStates[0].pandaType = log.PandaState.PandaType.uno
|
||||
pm.send('pandaStates', msg)
|
||||
|
||||
# fingerprint
|
||||
if (car_model is None) or (fw_versions is not None):
|
||||
finger = {addr: 1 for addr in range(1, 100)}
|
||||
else:
|
||||
finger = _FINGERPRINTS[car_model][0]
|
||||
|
||||
msgs = [[addr, 0, b'\x00'*length, 0] for addr, length in finger.items()]
|
||||
for _ in range(1000):
|
||||
# controlsd waits for boardd to echo back that it has changed the multiplexing mode
|
||||
if not params.get_bool("ObdMultiplexingChanged"):
|
||||
params.put_bool("ObdMultiplexingChanged", True)
|
||||
|
||||
pm.send('can', can_list_to_can_capnp(msgs))
|
||||
assert pm.wait_for_readers_to_update('can', 5, dt=0.001), f"step: {_}"
|
||||
|
||||
ctrls = messaging.drain_sock(controls_sock)
|
||||
if len(ctrls):
|
||||
event_name = ctrls[0].controlsState.alertType.split("/")[0]
|
||||
assert EVENT_NAME[expected_event] == event_name, f"expected {EVENT_NAME[expected_event]} for '{car_model}', got {event_name}"
|
||||
break
|
||||
else:
|
||||
raise Exception(f"failed to fingerprint {car_model}")
|
||||
109
selfdrive/controls/tests/test_state_machine.py
Normal file
109
selfdrive/controls/tests/test_state_machine.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
import unittest
|
||||
|
||||
from cereal import car, log
|
||||
from openpilot.common.realtime import DT_CTRL
|
||||
from openpilot.selfdrive.car.car_helpers import interfaces
|
||||
from openpilot.selfdrive.controls.controlsd import Controls, SOFT_DISABLE_TIME
|
||||
from openpilot.selfdrive.controls.lib.events import Events, ET, Alert, Priority, AlertSize, AlertStatus, VisualAlert, \
|
||||
AudibleAlert, EVENTS
|
||||
from openpilot.selfdrive.car.mock.values import CAR as MOCK
|
||||
|
||||
State = log.ControlsState.OpenpilotState
|
||||
|
||||
# The event types that maintain the current state
|
||||
MAINTAIN_STATES = {State.enabled: (None,), State.disabled: (None,), State.softDisabling: (ET.SOFT_DISABLE,),
|
||||
State.preEnabled: (ET.PRE_ENABLE,), State.overriding: (ET.OVERRIDE_LATERAL, ET.OVERRIDE_LONGITUDINAL)}
|
||||
ALL_STATES = tuple(State.schema.enumerants.values())
|
||||
# The event types checked in DISABLED section of state machine
|
||||
ENABLE_EVENT_TYPES = (ET.ENABLE, ET.PRE_ENABLE, ET.OVERRIDE_LATERAL, ET.OVERRIDE_LONGITUDINAL)
|
||||
|
||||
|
||||
def make_event(event_types):
|
||||
event = {}
|
||||
for ev in event_types:
|
||||
event[ev] = Alert("", "", AlertStatus.normal, AlertSize.small, Priority.LOW,
|
||||
VisualAlert.none, AudibleAlert.none, 1.)
|
||||
EVENTS[0] = event
|
||||
return 0
|
||||
|
||||
|
||||
class TestStateMachine(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
CarInterface, CarController, CarState = interfaces[MOCK.MOCK]
|
||||
CP = CarInterface.get_non_essential_params(MOCK.MOCK)
|
||||
CI = CarInterface(CP, CarController, CarState)
|
||||
|
||||
self.controlsd = Controls(CI=CI)
|
||||
self.controlsd.events = Events()
|
||||
self.controlsd.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
|
||||
self.CS = car.CarState()
|
||||
|
||||
def test_immediate_disable(self):
|
||||
for state in ALL_STATES:
|
||||
for et in MAINTAIN_STATES[state]:
|
||||
self.controlsd.events.add(make_event([et, ET.IMMEDIATE_DISABLE]))
|
||||
self.controlsd.state = state
|
||||
self.controlsd.state_transition(self.CS)
|
||||
self.assertEqual(State.disabled, self.controlsd.state)
|
||||
self.controlsd.events.clear()
|
||||
|
||||
def test_user_disable(self):
|
||||
for state in ALL_STATES:
|
||||
for et in MAINTAIN_STATES[state]:
|
||||
self.controlsd.events.add(make_event([et, ET.USER_DISABLE]))
|
||||
self.controlsd.state = state
|
||||
self.controlsd.state_transition(self.CS)
|
||||
self.assertEqual(State.disabled, self.controlsd.state)
|
||||
self.controlsd.events.clear()
|
||||
|
||||
def test_soft_disable(self):
|
||||
for state in ALL_STATES:
|
||||
if state == State.preEnabled: # preEnabled considers NO_ENTRY instead
|
||||
continue
|
||||
for et in MAINTAIN_STATES[state]:
|
||||
self.controlsd.events.add(make_event([et, ET.SOFT_DISABLE]))
|
||||
self.controlsd.state = state
|
||||
self.controlsd.state_transition(self.CS)
|
||||
self.assertEqual(self.controlsd.state, State.disabled if state == State.disabled else State.softDisabling)
|
||||
self.controlsd.events.clear()
|
||||
|
||||
def test_soft_disable_timer(self):
|
||||
self.controlsd.state = State.enabled
|
||||
self.controlsd.events.add(make_event([ET.SOFT_DISABLE]))
|
||||
self.controlsd.state_transition(self.CS)
|
||||
for _ in range(int(SOFT_DISABLE_TIME / DT_CTRL)):
|
||||
self.assertEqual(self.controlsd.state, State.softDisabling)
|
||||
self.controlsd.state_transition(self.CS)
|
||||
|
||||
self.assertEqual(self.controlsd.state, State.disabled)
|
||||
|
||||
def test_no_entry(self):
|
||||
# Make sure noEntry keeps us disabled
|
||||
for et in ENABLE_EVENT_TYPES:
|
||||
self.controlsd.events.add(make_event([ET.NO_ENTRY, et]))
|
||||
self.controlsd.state_transition(self.CS)
|
||||
self.assertEqual(self.controlsd.state, State.disabled)
|
||||
self.controlsd.events.clear()
|
||||
|
||||
def test_no_entry_pre_enable(self):
|
||||
# preEnabled with noEntry event
|
||||
self.controlsd.state = State.preEnabled
|
||||
self.controlsd.events.add(make_event([ET.NO_ENTRY, ET.PRE_ENABLE]))
|
||||
self.controlsd.state_transition(self.CS)
|
||||
self.assertEqual(self.controlsd.state, State.preEnabled)
|
||||
|
||||
def test_maintain_states(self):
|
||||
# Given current state's event type, we should maintain state
|
||||
for state in ALL_STATES:
|
||||
for et in MAINTAIN_STATES[state]:
|
||||
self.controlsd.state = state
|
||||
self.controlsd.events.add(make_event([et]))
|
||||
self.controlsd.state_transition(self.CS)
|
||||
self.assertEqual(self.controlsd.state, state)
|
||||
self.controlsd.events.clear()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
1
selfdrive/locationd/test/.gitignore
vendored
Normal file
1
selfdrive/locationd/test/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
out/
|
||||
0
selfdrive/locationd/test/__init__.py
Normal file
0
selfdrive/locationd/test/__init__.py
Normal file
117
selfdrive/locationd/test/test_calibrationd.py
Normal file
117
selfdrive/locationd/test/test_calibrationd.py
Normal file
@@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
import random
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from cereal import log
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.selfdrive.locationd.calibrationd import Calibrator, INPUTS_NEEDED, INPUTS_WANTED, BLOCK_SIZE, MIN_SPEED_FILTER, \
|
||||
MAX_YAW_RATE_FILTER, SMOOTH_CYCLES, HEIGHT_INIT, MAX_ALLOWED_PITCH_SPREAD, MAX_ALLOWED_YAW_SPREAD
|
||||
|
||||
|
||||
def process_messages(c, cam_odo_calib, cycles,
|
||||
cam_odo_speed=MIN_SPEED_FILTER + 1,
|
||||
carstate_speed=MIN_SPEED_FILTER + 1,
|
||||
cam_odo_yr=0.0,
|
||||
cam_odo_speed_std=1e-3,
|
||||
cam_odo_height_std=1e-3):
|
||||
old_rpy_weight_prev = 0.0
|
||||
for _ in range(cycles):
|
||||
assert (old_rpy_weight_prev - c.old_rpy_weight < 1/SMOOTH_CYCLES + 1e-3)
|
||||
old_rpy_weight_prev = c.old_rpy_weight
|
||||
c.handle_v_ego(carstate_speed)
|
||||
c.handle_cam_odom([cam_odo_speed,
|
||||
np.sin(cam_odo_calib[2]) * cam_odo_speed,
|
||||
-np.sin(cam_odo_calib[1]) * cam_odo_speed],
|
||||
[0.0, 0.0, cam_odo_yr],
|
||||
[0.0, 0.0, 0.0],
|
||||
[cam_odo_speed_std, cam_odo_speed_std, cam_odo_speed_std],
|
||||
[0.0, 0.0, HEIGHT_INIT.item()],
|
||||
[cam_odo_height_std, cam_odo_height_std, cam_odo_height_std])
|
||||
|
||||
class TestCalibrationd(unittest.TestCase):
|
||||
|
||||
def test_read_saved_params(self):
|
||||
msg = messaging.new_message('liveCalibration')
|
||||
msg.liveCalibration.validBlocks = random.randint(1, 10)
|
||||
msg.liveCalibration.rpyCalib = [random.random() for _ in range(3)]
|
||||
msg.liveCalibration.height = [random.random() for _ in range(1)]
|
||||
Params().put("CalibrationParams", msg.to_bytes())
|
||||
c = Calibrator(param_put=True)
|
||||
|
||||
np.testing.assert_allclose(msg.liveCalibration.rpyCalib, c.rpy)
|
||||
np.testing.assert_allclose(msg.liveCalibration.height, c.height)
|
||||
self.assertEqual(msg.liveCalibration.validBlocks, c.valid_blocks)
|
||||
|
||||
|
||||
def test_calibration_basics(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED)
|
||||
self.assertEqual(c.valid_blocks, INPUTS_WANTED)
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
np.testing.assert_allclose(c.height, HEIGHT_INIT)
|
||||
c.reset()
|
||||
|
||||
|
||||
def test_calibration_low_speed_reject(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed=MIN_SPEED_FILTER - 1)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, carstate_speed=MIN_SPEED_FILTER - 1)
|
||||
self.assertEqual(c.valid_blocks, 0)
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
np.testing.assert_allclose(c.height, HEIGHT_INIT)
|
||||
|
||||
|
||||
def test_calibration_yaw_rate_reject(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_yr=MAX_YAW_RATE_FILTER)
|
||||
self.assertEqual(c.valid_blocks, 0)
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
np.testing.assert_allclose(c.height, HEIGHT_INIT)
|
||||
|
||||
|
||||
def test_calibration_speed_std_reject(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed_std=1e3)
|
||||
self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
|
||||
|
||||
def test_calibration_speed_std_height_reject(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_height_std=1e3)
|
||||
self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
|
||||
|
||||
def test_calibration_auto_reset(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
|
||||
self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
|
||||
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0], atol=1e-3)
|
||||
process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*0.9, MAX_ALLOWED_YAW_SPREAD*0.9], BLOCK_SIZE + 10)
|
||||
self.assertEqual(c.valid_blocks, INPUTS_NEEDED + 1)
|
||||
self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.calibrated)
|
||||
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
|
||||
self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
|
||||
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0])
|
||||
process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], BLOCK_SIZE + 10)
|
||||
self.assertEqual(c.valid_blocks, 1)
|
||||
self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating)
|
||||
np.testing.assert_allclose(c.rpy, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], atol=1e-2)
|
||||
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
|
||||
self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
|
||||
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0])
|
||||
process_messages(c, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], BLOCK_SIZE + 10)
|
||||
self.assertEqual(c.valid_blocks, 1)
|
||||
self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating)
|
||||
np.testing.assert_allclose(c.rpy, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], atol=1e-2)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
94
selfdrive/locationd/test/test_locationd.py
Normal file
94
selfdrive/locationd/test/test_locationd.py
Normal file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import random
|
||||
import unittest
|
||||
import time
|
||||
import capnp
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from cereal.services import SERVICE_LIST
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.transformations.coordinates import ecef2geodetic
|
||||
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
|
||||
|
||||
class TestLocationdProc(unittest.TestCase):
|
||||
LLD_MSGS = ['gpsLocationExternal', 'cameraOdometry', 'carState', 'liveCalibration',
|
||||
'accelerometer', 'gyroscope', 'magnetometer']
|
||||
|
||||
def setUp(self):
|
||||
random.seed(123489234)
|
||||
|
||||
self.pm = messaging.PubMaster(self.LLD_MSGS)
|
||||
|
||||
self.params = Params()
|
||||
self.params.put_bool("UbloxAvailable", True)
|
||||
managed_processes['locationd'].prepare()
|
||||
managed_processes['locationd'].start()
|
||||
|
||||
def tearDown(self):
|
||||
managed_processes['locationd'].stop()
|
||||
|
||||
def get_msg(self, name, t):
|
||||
try:
|
||||
msg = messaging.new_message(name)
|
||||
except capnp.lib.capnp.KjException:
|
||||
msg = messaging.new_message(name, 0)
|
||||
|
||||
if name == "gpsLocationExternal":
|
||||
msg.gpsLocationExternal.flags = 1
|
||||
msg.gpsLocationExternal.verticalAccuracy = 1.0
|
||||
msg.gpsLocationExternal.speedAccuracy = 1.0
|
||||
msg.gpsLocationExternal.bearingAccuracyDeg = 1.0
|
||||
msg.gpsLocationExternal.vNED = [0.0, 0.0, 0.0]
|
||||
msg.gpsLocationExternal.latitude = float(self.lat)
|
||||
msg.gpsLocationExternal.longitude = float(self.lon)
|
||||
msg.gpsLocationExternal.unixTimestampMillis = t * 1e6
|
||||
msg.gpsLocationExternal.altitude = float(self.alt)
|
||||
#if name == "gnssMeasurements":
|
||||
# msg.gnssMeasurements.measTime = t
|
||||
# msg.gnssMeasurements.positionECEF.value = [self.x , self.y, self.z]
|
||||
# msg.gnssMeasurements.positionECEF.std = [0,0,0]
|
||||
# msg.gnssMeasurements.positionECEF.valid = True
|
||||
# msg.gnssMeasurements.velocityECEF.value = []
|
||||
# msg.gnssMeasurements.velocityECEF.std = [0,0,0]
|
||||
# msg.gnssMeasurements.velocityECEF.valid = True
|
||||
elif name == 'cameraOdometry':
|
||||
msg.cameraOdometry.rot = [0.0, 0.0, 0.0]
|
||||
msg.cameraOdometry.rotStd = [0.0, 0.0, 0.0]
|
||||
msg.cameraOdometry.trans = [0.0, 0.0, 0.0]
|
||||
msg.cameraOdometry.transStd = [0.0, 0.0, 0.0]
|
||||
msg.logMonoTime = t
|
||||
msg.valid = True
|
||||
return msg
|
||||
|
||||
def test_params_gps(self):
|
||||
self.params.remove('LastGPSPosition')
|
||||
|
||||
self.x = -2710700 + (random.random() * 1e5)
|
||||
self.y = -4280600 + (random.random() * 1e5)
|
||||
self.z = 3850300 + (random.random() * 1e5)
|
||||
self.lat, self.lon, self.alt = ecef2geodetic([self.x, self.y, self.z])
|
||||
|
||||
# get fake messages at the correct frequency, listed in services.py
|
||||
msgs = []
|
||||
for sec in range(65):
|
||||
for name in self.LLD_MSGS:
|
||||
for j in range(int(SERVICE_LIST[name].frequency)):
|
||||
msgs.append(self.get_msg(name, int((sec + j / SERVICE_LIST[name].frequency) * 1e9)))
|
||||
|
||||
for msg in sorted(msgs, key=lambda x: x.logMonoTime):
|
||||
self.pm.send(msg.which(), msg)
|
||||
if msg.which() == "cameraOdometry":
|
||||
self.pm.wait_for_readers_to_update(msg.which(), 0.1, dt=0.005)
|
||||
time.sleep(1) # wait for async params write
|
||||
|
||||
lastGPS = json.loads(self.params.get('LastGPSPosition'))
|
||||
self.assertAlmostEqual(lastGPS['latitude'], self.lat, places=3)
|
||||
self.assertAlmostEqual(lastGPS['longitude'], self.lon, places=3)
|
||||
self.assertAlmostEqual(lastGPS['altitude'], self.alt, places=3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
228
selfdrive/locationd/test/test_locationd_scenarios.py
Normal file
228
selfdrive/locationd/test/test_locationd_scenarios.py
Normal file
@@ -0,0 +1,228 @@
|
||||
#!/usr/bin/env python3
|
||||
import pytest
|
||||
import unittest
|
||||
import numpy as np
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import replay_process_with_name
|
||||
|
||||
TEST_ROUTE = "ff2bd20623fcaeaa|2023-09-05--10-14-54/4"
|
||||
GPS_MESSAGES = ['gpsLocationExternal', 'gpsLocation']
|
||||
SELECT_COMPARE_FIELDS = {
|
||||
'yaw_rate': ['angularVelocityCalibrated', 'value', 2],
|
||||
'roll': ['orientationNED', 'value', 0],
|
||||
'gps_flag': ['gpsOK'],
|
||||
'inputs_flag': ['inputsOK'],
|
||||
'sensors_flag': ['sensorsOK'],
|
||||
}
|
||||
JUNK_IDX = 100
|
||||
|
||||
|
||||
class Scenario(Enum):
|
||||
BASE = 'base'
|
||||
GPS_OFF = 'gps_off'
|
||||
GPS_OFF_MIDWAY = 'gps_off_midway'
|
||||
GPS_ON_MIDWAY = 'gps_on_midway'
|
||||
GPS_TUNNEL = 'gps_tunnel'
|
||||
GYRO_OFF = 'gyro_off'
|
||||
GYRO_SPIKE_MIDWAY = 'gyro_spike_midway'
|
||||
ACCEL_OFF = 'accel_off'
|
||||
ACCEL_SPIKE_MIDWAY = 'accel_spike_midway'
|
||||
|
||||
|
||||
def get_select_fields_data(logs):
|
||||
def get_nested_keys(msg, keys):
|
||||
val = None
|
||||
for key in keys:
|
||||
val = getattr(msg if val is None else val, key) if isinstance(key, str) else val[key]
|
||||
return val
|
||||
llk = [x.liveLocationKalman for x in logs if x.which() == 'liveLocationKalman']
|
||||
data = defaultdict(list)
|
||||
for msg in llk:
|
||||
for key, fields in SELECT_COMPARE_FIELDS.items():
|
||||
data[key].append(get_nested_keys(msg, fields))
|
||||
for key in data:
|
||||
data[key] = np.array(data[key][JUNK_IDX:], dtype=float)
|
||||
return data
|
||||
|
||||
|
||||
def run_scenarios(scenario, logs):
|
||||
if scenario == Scenario.BASE:
|
||||
pass
|
||||
|
||||
elif scenario == Scenario.GPS_OFF:
|
||||
logs = sorted([x for x in logs if x.which() not in GPS_MESSAGES], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.GPS_OFF_MIDWAY:
|
||||
non_gps = [x for x in logs if x.which() not in GPS_MESSAGES]
|
||||
gps = [x for x in logs if x.which() in GPS_MESSAGES]
|
||||
logs = sorted(non_gps + gps[: len(gps) // 2], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.GPS_ON_MIDWAY:
|
||||
non_gps = [x for x in logs if x.which() not in GPS_MESSAGES]
|
||||
gps = [x for x in logs if x.which() in GPS_MESSAGES]
|
||||
logs = sorted(non_gps + gps[len(gps) // 2:], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.GPS_TUNNEL:
|
||||
non_gps = [x for x in logs if x.which() not in GPS_MESSAGES]
|
||||
gps = [x for x in logs if x.which() in GPS_MESSAGES]
|
||||
logs = sorted(non_gps + gps[:len(gps) // 4] + gps[-len(gps) // 4:], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.GYRO_OFF:
|
||||
logs = sorted([x for x in logs if x.which() != 'gyroscope'], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.GYRO_SPIKE_MIDWAY:
|
||||
non_gyro = [x for x in logs if x.which() not in 'gyroscope']
|
||||
gyro = [x for x in logs if x.which() in 'gyroscope']
|
||||
temp = gyro[len(gyro) // 2].as_builder()
|
||||
temp.gyroscope.gyroUncalibrated.v[0] += 3.0
|
||||
gyro[len(gyro) // 2] = temp.as_reader()
|
||||
logs = sorted(non_gyro + gyro, key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.ACCEL_OFF:
|
||||
logs = sorted([x for x in logs if x.which() != 'accelerometer'], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.ACCEL_SPIKE_MIDWAY:
|
||||
non_accel = [x for x in logs if x.which() not in 'accelerometer']
|
||||
accel = [x for x in logs if x.which() in 'accelerometer']
|
||||
temp = accel[len(accel) // 2].as_builder()
|
||||
temp.accelerometer.acceleration.v[0] += 10.0
|
||||
accel[len(accel) // 2] = temp.as_reader()
|
||||
logs = sorted(non_accel + accel, key=lambda x: x.logMonoTime)
|
||||
|
||||
replayed_logs = replay_process_with_name(name='locationd', lr=logs)
|
||||
return get_select_fields_data(logs), get_select_fields_data(replayed_logs)
|
||||
|
||||
|
||||
@pytest.mark.xdist_group("test_locationd_scenarios")
|
||||
@pytest.mark.shared_download_cache
|
||||
class TestLocationdScenarios(unittest.TestCase):
|
||||
"""
|
||||
Test locationd with different scenarios. In all these scenarios, we expect the following:
|
||||
- locationd kalman filter should never go unstable (we care mostly about yaw_rate, roll, gpsOK, inputsOK, sensorsOK)
|
||||
- faulty values should be ignored, with appropriate flags set
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.logs = list(LogReader(TEST_ROUTE))
|
||||
|
||||
def test_base(self):
|
||||
"""
|
||||
Test: unchanged log
|
||||
Expected Result:
|
||||
- yaw_rate: unchanged
|
||||
- roll: unchanged
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.BASE, self.logs)
|
||||
self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
|
||||
self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
|
||||
|
||||
def test_gps_off(self):
|
||||
"""
|
||||
Test: no GPS message for the entire segment
|
||||
Expected Result:
|
||||
- yaw_rate: unchanged
|
||||
- roll:
|
||||
- gpsOK: False
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.GPS_OFF, self.logs)
|
||||
self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
|
||||
self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
|
||||
self.assertTrue(np.all(replayed_data['gps_flag'] == 0.0))
|
||||
|
||||
def test_gps_off_midway(self):
|
||||
"""
|
||||
Test: no GPS message for the second half of the segment
|
||||
Expected Result:
|
||||
- yaw_rate: unchanged
|
||||
- roll:
|
||||
- gpsOK: True for the first half, False for the second half
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.GPS_OFF_MIDWAY, self.logs)
|
||||
self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
|
||||
self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
|
||||
self.assertTrue(np.diff(replayed_data['gps_flag'])[512] == -1.0)
|
||||
|
||||
def test_gps_on_midway(self):
|
||||
"""
|
||||
Test: no GPS message for the first half of the segment
|
||||
Expected Result:
|
||||
- yaw_rate: unchanged
|
||||
- roll:
|
||||
- gpsOK: False for the first half, True for the second half
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.GPS_ON_MIDWAY, self.logs)
|
||||
self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
|
||||
self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(1.5)))
|
||||
self.assertTrue(np.diff(replayed_data['gps_flag'])[505] == 1.0)
|
||||
|
||||
def test_gps_tunnel(self):
|
||||
"""
|
||||
Test: no GPS message for the middle section of the segment
|
||||
Expected Result:
|
||||
- yaw_rate: unchanged
|
||||
- roll:
|
||||
- gpsOK: False for the middle section, True for the rest
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.GPS_TUNNEL, self.logs)
|
||||
self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
|
||||
self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
|
||||
self.assertTrue(np.diff(replayed_data['gps_flag'])[213] == -1.0)
|
||||
self.assertTrue(np.diff(replayed_data['gps_flag'])[805] == 1.0)
|
||||
|
||||
def test_gyro_off(self):
|
||||
"""
|
||||
Test: no gyroscope message for the entire segment
|
||||
Expected Result:
|
||||
- yaw_rate: 0
|
||||
- roll: 0
|
||||
- sensorsOK: False
|
||||
"""
|
||||
_, replayed_data = run_scenarios(Scenario.GYRO_OFF, self.logs)
|
||||
self.assertTrue(np.allclose(replayed_data['yaw_rate'], 0.0))
|
||||
self.assertTrue(np.allclose(replayed_data['roll'], 0.0))
|
||||
self.assertTrue(np.all(replayed_data['sensors_flag'] == 0.0))
|
||||
|
||||
def test_gyro_spikes(self):
|
||||
"""
|
||||
Test: a gyroscope spike in the middle of the segment
|
||||
Expected Result:
|
||||
- yaw_rate: unchanged
|
||||
- roll: unchanged
|
||||
- inputsOK: False for some time after the spike, True for the rest
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.GYRO_SPIKE_MIDWAY, self.logs)
|
||||
self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
|
||||
self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
|
||||
self.assertTrue(np.diff(replayed_data['inputs_flag'])[500] == -1.0)
|
||||
self.assertTrue(np.diff(replayed_data['inputs_flag'])[694] == 1.0)
|
||||
|
||||
def test_accel_off(self):
|
||||
"""
|
||||
Test: no accelerometer message for the entire segment
|
||||
Expected Result:
|
||||
- yaw_rate: 0
|
||||
- roll: 0
|
||||
- sensorsOK: False
|
||||
"""
|
||||
_, replayed_data = run_scenarios(Scenario.ACCEL_OFF, self.logs)
|
||||
self.assertTrue(np.allclose(replayed_data['yaw_rate'], 0.0))
|
||||
self.assertTrue(np.allclose(replayed_data['roll'], 0.0))
|
||||
self.assertTrue(np.all(replayed_data['sensors_flag'] == 0.0))
|
||||
|
||||
def test_accel_spikes(self):
|
||||
"""
|
||||
ToDo:
|
||||
Test: an accelerometer spike in the middle of the segment
|
||||
Expected Result: Right now, the kalman filter is not robust to small spikes like it is to gyroscope spikes.
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.ACCEL_SPIKE_MIDWAY, self.logs)
|
||||
self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
|
||||
self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
9
selfdrive/test/.gitignore
vendored
Normal file
9
selfdrive/test/.gitignore
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
out/
|
||||
docker_out/
|
||||
|
||||
process_replay/diff.txt
|
||||
process_replay/model_diff.txt
|
||||
valgrind_logs.txt
|
||||
|
||||
*.bz2
|
||||
*.hevc
|
||||
19
selfdrive/test/ci_shell.sh
Normal file
19
selfdrive/test/ci_shell.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)"
|
||||
OP_ROOT="$DIR/../../"
|
||||
|
||||
if [ -z "$BUILD" ]; then
|
||||
docker pull ghcr.io/commaai/openpilot-base:latest
|
||||
else
|
||||
docker build --cache-from ghcr.io/commaai/openpilot-base:latest -t ghcr.io/commaai/openpilot-base:latest -f $OP_ROOT/Dockerfile.openpilot_base .
|
||||
fi
|
||||
|
||||
docker run \
|
||||
-it \
|
||||
--rm \
|
||||
--volume $OP_ROOT:$OP_ROOT \
|
||||
--workdir $PWD \
|
||||
--env PYTHONPATH=$OP_ROOT \
|
||||
ghcr.io/commaai/openpilot-base:latest \
|
||||
/bin/bash
|
||||
58
selfdrive/test/ciui.py
Normal file
58
selfdrive/test/ciui.py
Normal file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
import signal
|
||||
import subprocess
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
|
||||
from PyQt5.QtCore import QTimer
|
||||
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel
|
||||
from openpilot.selfdrive.ui.qt.python_helpers import set_main_window
|
||||
|
||||
class Window(QWidget):
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
|
||||
layout = QVBoxLayout()
|
||||
self.setLayout(layout)
|
||||
|
||||
self.l = QLabel("jenkins runner")
|
||||
layout.addWidget(self.l)
|
||||
layout.addStretch(1)
|
||||
layout.setContentsMargins(20, 20, 20, 20)
|
||||
|
||||
cmds = [
|
||||
"cat /etc/hostname",
|
||||
"echo AGNOS v$(cat /VERSION)",
|
||||
"uptime -p",
|
||||
]
|
||||
self.labels = {}
|
||||
for c in cmds:
|
||||
self.labels[c] = QLabel(c)
|
||||
layout.addWidget(self.labels[c])
|
||||
|
||||
self.setStyleSheet("""
|
||||
* {
|
||||
color: white;
|
||||
font-size: 55px;
|
||||
background-color: black;
|
||||
font-family: "JetBrains Mono";
|
||||
}
|
||||
""")
|
||||
|
||||
self.timer = QTimer()
|
||||
self.timer.timeout.connect(self.update)
|
||||
self.timer.start(10 * 1000)
|
||||
self.update()
|
||||
|
||||
def update(self):
|
||||
for cmd, label in self.labels.items():
|
||||
out = subprocess.run(cmd, capture_output=True,
|
||||
shell=True, check=False, encoding='utf8').stdout
|
||||
label.setText(out.strip())
|
||||
|
||||
if __name__ == "__main__":
|
||||
app = QApplication([])
|
||||
w = Window()
|
||||
set_main_window(w)
|
||||
app.exec_()
|
||||
11
selfdrive/test/cpp_harness.py
Normal file
11
selfdrive/test/cpp_harness.py
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env python3
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from openpilot.common.prefix import OpenpilotPrefix
|
||||
|
||||
|
||||
with OpenpilotPrefix():
|
||||
ret = subprocess.call(sys.argv[1:])
|
||||
|
||||
exit(ret)
|
||||
26
selfdrive/test/docker_build.sh
Normal file
26
selfdrive/test/docker_build.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# To build sim and docs, you can run the following to mount the scons cache to the same place as in CI:
|
||||
# mkdir -p .ci_cache/scons_cache
|
||||
# sudo mount --bind /tmp/scons_cache/ .ci_cache/scons_cache
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
OPENPILOT_DIR=$SCRIPT_DIR/../../
|
||||
if [ -n "$TARGET_ARCHITECTURE" ]; then
|
||||
PLATFORM="linux/$TARGET_ARCHITECTURE"
|
||||
TAG_SUFFIX="-$TARGET_ARCHITECTURE"
|
||||
else
|
||||
PLATFORM="linux/$(uname -m)"
|
||||
TAG_SUFFIX=""
|
||||
fi
|
||||
|
||||
source $SCRIPT_DIR/docker_common.sh $1 "$TAG_SUFFIX"
|
||||
|
||||
DOCKER_BUILDKIT=1 docker buildx build --provenance false --pull --platform $PLATFORM --load --cache-to type=inline --cache-from type=registry,ref=$REMOTE_TAG -t $REMOTE_TAG -t $LOCAL_TAG -f $OPENPILOT_DIR/$DOCKER_FILE $OPENPILOT_DIR
|
||||
|
||||
if [ -n "$PUSH_IMAGE" ]; then
|
||||
docker push $REMOTE_TAG
|
||||
docker tag $REMOTE_TAG $REMOTE_SHA_TAG
|
||||
docker push $REMOTE_SHA_TAG
|
||||
fi
|
||||
21
selfdrive/test/docker_common.sh
Normal file
21
selfdrive/test/docker_common.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
if [ "$1" = "base" ]; then
|
||||
export DOCKER_IMAGE=openpilot-base
|
||||
export DOCKER_FILE=Dockerfile.openpilot_base
|
||||
elif [ "$1" = "sim" ]; then
|
||||
export DOCKER_IMAGE=openpilot-sim
|
||||
export DOCKER_FILE=tools/sim/Dockerfile.sim
|
||||
elif [ "$1" = "prebuilt" ]; then
|
||||
export DOCKER_IMAGE=openpilot-prebuilt
|
||||
export DOCKER_FILE=Dockerfile.openpilot
|
||||
else
|
||||
echo "Invalid docker build image: '$1'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export DOCKER_REGISTRY=ghcr.io/commaai
|
||||
export COMMIT_SHA=$(git rev-parse HEAD)
|
||||
|
||||
TAG_SUFFIX=$2
|
||||
LOCAL_TAG=$DOCKER_IMAGE$TAG_SUFFIX
|
||||
REMOTE_TAG=$DOCKER_REGISTRY/$LOCAL_TAG
|
||||
REMOTE_SHA_TAG=$DOCKER_REGISTRY/$LOCAL_TAG:$COMMIT_SHA
|
||||
25
selfdrive/test/docker_tag_multiarch.sh
Normal file
25
selfdrive/test/docker_tag_multiarch.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: $0 <base|docs|sim|prebuilt|cl> <arch1> <arch2> ..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
ARCHS=("${@:2}")
|
||||
|
||||
source $SCRIPT_DIR/docker_common.sh $1
|
||||
|
||||
MANIFEST_AMENDS=""
|
||||
for ARCH in ${ARCHS[@]}; do
|
||||
MANIFEST_AMENDS="$MANIFEST_AMENDS --amend $REMOTE_TAG-$ARCH:$COMMIT_SHA"
|
||||
done
|
||||
|
||||
docker manifest create $REMOTE_TAG $MANIFEST_AMENDS
|
||||
docker manifest create $REMOTE_SHA_TAG $MANIFEST_AMENDS
|
||||
|
||||
if [[ -n "$PUSH_IMAGE" ]]; then
|
||||
docker manifest push $REMOTE_TAG
|
||||
docker manifest push $REMOTE_SHA_TAG
|
||||
fi
|
||||
@@ -1,6 +1,7 @@
|
||||
import capnp
|
||||
import hypothesis.strategies as st
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
from typing import Any
|
||||
from collections.abc import Callable
|
||||
|
||||
from cereal import log
|
||||
|
||||
@@ -12,7 +13,7 @@ class FuzzyGenerator:
|
||||
self.draw = draw
|
||||
self.real_floats = real_floats
|
||||
|
||||
def generate_native_type(self, field: str) -> st.SearchStrategy[Union[bool, int, float, str, bytes]]:
|
||||
def generate_native_type(self, field: str) -> st.SearchStrategy[bool | int | float | str | bytes]:
|
||||
def floats(**kwargs) -> st.SearchStrategy[float]:
|
||||
allow_nan = not self.real_floats
|
||||
allow_infinity = not self.real_floats
|
||||
@@ -67,18 +68,18 @@ class FuzzyGenerator:
|
||||
else:
|
||||
return self.generate_struct(field.schema)
|
||||
|
||||
def generate_struct(self, schema: capnp.lib.capnp._StructSchema, event: Optional[str] = None) -> st.SearchStrategy[Dict[str, Any]]:
|
||||
full_fill: List[str] = list(schema.non_union_fields)
|
||||
single_fill: List[str] = [event] if event else [self.draw(st.sampled_from(schema.union_fields))] if schema.union_fields else []
|
||||
def generate_struct(self, schema: capnp.lib.capnp._StructSchema, event: str = None) -> st.SearchStrategy[dict[str, Any]]:
|
||||
full_fill: list[str] = list(schema.non_union_fields)
|
||||
single_fill: list[str] = [event] if event else [self.draw(st.sampled_from(schema.union_fields))] if schema.union_fields else []
|
||||
return st.fixed_dictionaries({field: self.generate_field(schema.fields[field]) for field in full_fill + single_fill})
|
||||
|
||||
@classmethod
|
||||
def get_random_msg(cls, draw: DrawType, struct: capnp.lib.capnp._StructModule, real_floats: bool = False) -> Dict[str, Any]:
|
||||
def get_random_msg(cls, draw: DrawType, struct: capnp.lib.capnp._StructModule, real_floats: bool = False) -> dict[str, Any]:
|
||||
fg = cls(draw, real_floats=real_floats)
|
||||
data: Dict[str, Any] = draw(fg.generate_struct(struct.schema))
|
||||
data: dict[str, Any] = draw(fg.generate_struct(struct.schema))
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def get_random_event_msg(cls, draw: DrawType, events: List[str], real_floats: bool = False) -> List[Dict[str, Any]]:
|
||||
def get_random_event_msg(cls, draw: DrawType, events: list[str], real_floats: bool = False) -> list[dict[str, Any]]:
|
||||
fg = cls(draw, real_floats=real_floats)
|
||||
return [draw(fg.generate_struct(log.Event.schema, e)) for e in sorted(events)]
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import http.server
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
|
||||
from functools import wraps
|
||||
@@ -72,7 +74,29 @@ def noop(*args, **kwargs):
|
||||
|
||||
|
||||
def read_segment_list(segment_list_path):
|
||||
with open(segment_list_path, "r") as f:
|
||||
with open(segment_list_path) as f:
|
||||
seg_list = f.read().splitlines()
|
||||
|
||||
return [(platform[2:], segment) for platform, segment in zip(seg_list[::2], seg_list[1::2], strict=True)]
|
||||
|
||||
|
||||
def with_http_server(func, handler=http.server.BaseHTTPRequestHandler, setup=None):
|
||||
@wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
host = '127.0.0.1'
|
||||
server = http.server.HTTPServer((host, 0), handler)
|
||||
port = server.server_port
|
||||
t = threading.Thread(target=server.serve_forever)
|
||||
t.start()
|
||||
|
||||
if setup is not None:
|
||||
setup(host, port)
|
||||
|
||||
try:
|
||||
return func(*args, f'http://{host}:{port}', **kwargs)
|
||||
finally:
|
||||
server.shutdown()
|
||||
server.server_close()
|
||||
t.join()
|
||||
|
||||
return inner
|
||||
|
||||
1
selfdrive/test/longitudinal_maneuvers/.gitignore
vendored
Normal file
1
selfdrive/test/longitudinal_maneuvers/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
out/*
|
||||
0
selfdrive/test/longitudinal_maneuvers/__init__.py
Normal file
0
selfdrive/test/longitudinal_maneuvers/__init__.py
Normal file
71
selfdrive/test/longitudinal_maneuvers/maneuver.py
Normal file
71
selfdrive/test/longitudinal_maneuvers/maneuver.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import numpy as np
|
||||
from openpilot.selfdrive.test.longitudinal_maneuvers.plant import Plant
|
||||
|
||||
|
||||
class Maneuver:
|
||||
def __init__(self, title, duration, **kwargs):
|
||||
# Was tempted to make a builder class
|
||||
self.distance_lead = kwargs.get("initial_distance_lead", 200.0)
|
||||
self.speed = kwargs.get("initial_speed", 0.0)
|
||||
self.lead_relevancy = kwargs.get("lead_relevancy", 0)
|
||||
|
||||
self.breakpoints = kwargs.get("breakpoints", [0.0, duration])
|
||||
self.speed_lead_values = kwargs.get("speed_lead_values", [0.0 for i in range(len(self.breakpoints))])
|
||||
self.prob_lead_values = kwargs.get("prob_lead_values", [1.0 for i in range(len(self.breakpoints))])
|
||||
self.cruise_values = kwargs.get("cruise_values", [50.0 for i in range(len(self.breakpoints))])
|
||||
|
||||
self.only_lead2 = kwargs.get("only_lead2", False)
|
||||
self.only_radar = kwargs.get("only_radar", False)
|
||||
self.ensure_start = kwargs.get("ensure_start", False)
|
||||
self.enabled = kwargs.get("enabled", True)
|
||||
self.e2e = kwargs.get("e2e", False)
|
||||
self.force_decel = kwargs.get("force_decel", False)
|
||||
|
||||
self.duration = duration
|
||||
self.title = title
|
||||
|
||||
def evaluate(self):
|
||||
plant = Plant(
|
||||
lead_relevancy=self.lead_relevancy,
|
||||
speed=self.speed,
|
||||
distance_lead=self.distance_lead,
|
||||
enabled=self.enabled,
|
||||
only_lead2=self.only_lead2,
|
||||
only_radar=self.only_radar,
|
||||
e2e=self.e2e,
|
||||
force_decel=self.force_decel,
|
||||
)
|
||||
|
||||
valid = True
|
||||
logs = []
|
||||
while plant.current_time < self.duration:
|
||||
speed_lead = np.interp(plant.current_time, self.breakpoints, self.speed_lead_values)
|
||||
prob = np.interp(plant.current_time, self.breakpoints, self.prob_lead_values)
|
||||
cruise = np.interp(plant.current_time, self.breakpoints, self.cruise_values)
|
||||
log = plant.step(speed_lead, prob, cruise)
|
||||
|
||||
d_rel = log['distance_lead'] - log['distance'] if self.lead_relevancy else 200.
|
||||
v_rel = speed_lead - log['speed'] if self.lead_relevancy else 0.
|
||||
log['d_rel'] = d_rel
|
||||
log['v_rel'] = v_rel
|
||||
logs.append(np.array([plant.current_time,
|
||||
log['distance'],
|
||||
log['distance_lead'],
|
||||
log['speed'],
|
||||
speed_lead,
|
||||
log['acceleration']]))
|
||||
|
||||
if d_rel < .4 and (self.only_radar or prob > 0.5):
|
||||
print("Crashed!!!!")
|
||||
valid = False
|
||||
|
||||
if self.ensure_start and log['v_rel'] > 0 and log['speeds'][-1] <= 0.1:
|
||||
print('LongitudinalPlanner not starting!')
|
||||
valid = False
|
||||
if self.force_decel and log['speed'] > 1e-1 and log['acceleration'] > -0.04:
|
||||
print('Not stopping with force decel')
|
||||
valid = False
|
||||
|
||||
|
||||
print("maneuver end", valid)
|
||||
return valid, np.array(logs)
|
||||
172
selfdrive/test/longitudinal_maneuvers/plant.py
Normal file
172
selfdrive/test/longitudinal_maneuvers/plant.py
Normal file
@@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env python3
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
from cereal import log
|
||||
import cereal.messaging as messaging
|
||||
from openpilot.common.realtime import Ratekeeper, DT_MDL
|
||||
from openpilot.selfdrive.controls.lib.longcontrol import LongCtrlState
|
||||
from openpilot.selfdrive.modeld.constants import ModelConstants
|
||||
from openpilot.selfdrive.controls.lib.longitudinal_planner import LongitudinalPlanner
|
||||
from openpilot.selfdrive.controls.radard import _LEAD_ACCEL_TAU
|
||||
|
||||
|
||||
class Plant:
|
||||
messaging_initialized = False
|
||||
|
||||
def __init__(self, lead_relevancy=False, speed=0.0, distance_lead=2.0,
|
||||
enabled=True, only_lead2=False, only_radar=False, e2e=False, force_decel=False):
|
||||
self.rate = 1. / DT_MDL
|
||||
|
||||
if not Plant.messaging_initialized:
|
||||
Plant.radar = messaging.pub_sock('radarState')
|
||||
Plant.controls_state = messaging.pub_sock('controlsState')
|
||||
Plant.car_state = messaging.pub_sock('carState')
|
||||
Plant.plan = messaging.sub_sock('longitudinalPlan')
|
||||
Plant.messaging_initialized = True
|
||||
|
||||
self.v_lead_prev = 0.0
|
||||
|
||||
self.distance = 0.
|
||||
self.speed = speed
|
||||
self.acceleration = 0.0
|
||||
self.speeds = []
|
||||
|
||||
# lead car
|
||||
self.lead_relevancy = lead_relevancy
|
||||
self.distance_lead = distance_lead
|
||||
self.enabled = enabled
|
||||
self.only_lead2 = only_lead2
|
||||
self.only_radar = only_radar
|
||||
self.e2e = e2e
|
||||
self.force_decel = force_decel
|
||||
|
||||
self.rk = Ratekeeper(self.rate, print_delay_threshold=100.0)
|
||||
self.ts = 1. / self.rate
|
||||
time.sleep(0.1)
|
||||
self.sm = messaging.SubMaster(['longitudinalPlan'])
|
||||
|
||||
from openpilot.selfdrive.car.honda.values import CAR
|
||||
from openpilot.selfdrive.car.honda.interface import CarInterface
|
||||
|
||||
self.planner = LongitudinalPlanner(CarInterface.get_non_essential_params(CAR.CIVIC), init_v=self.speed)
|
||||
|
||||
@property
|
||||
def current_time(self):
|
||||
return float(self.rk.frame) / self.rate
|
||||
|
||||
def step(self, v_lead=0.0, prob=1.0, v_cruise=50.):
|
||||
# ******** publish a fake model going straight and fake calibration ********
|
||||
# note that this is worst case for MPC, since model will delay long mpc by one time step
|
||||
radar = messaging.new_message('radarState')
|
||||
control = messaging.new_message('controlsState')
|
||||
car_state = messaging.new_message('carState')
|
||||
model = messaging.new_message('modelV2')
|
||||
a_lead = (v_lead - self.v_lead_prev)/self.ts
|
||||
self.v_lead_prev = v_lead
|
||||
|
||||
if self.lead_relevancy:
|
||||
d_rel = np.maximum(0., self.distance_lead - self.distance)
|
||||
v_rel = v_lead - self.speed
|
||||
if self.only_radar:
|
||||
status = True
|
||||
elif prob > .5:
|
||||
status = True
|
||||
else:
|
||||
status = False
|
||||
else:
|
||||
d_rel = 200.
|
||||
v_rel = 0.
|
||||
prob = 0.0
|
||||
status = False
|
||||
|
||||
lead = log.RadarState.LeadData.new_message()
|
||||
lead.dRel = float(d_rel)
|
||||
lead.yRel = float(0.0)
|
||||
lead.vRel = float(v_rel)
|
||||
lead.aRel = float(a_lead - self.acceleration)
|
||||
lead.vLead = float(v_lead)
|
||||
lead.vLeadK = float(v_lead)
|
||||
lead.aLeadK = float(a_lead)
|
||||
# TODO use real radard logic for this
|
||||
lead.aLeadTau = float(_LEAD_ACCEL_TAU)
|
||||
lead.status = status
|
||||
lead.modelProb = float(prob)
|
||||
if not self.only_lead2:
|
||||
radar.radarState.leadOne = lead
|
||||
radar.radarState.leadTwo = lead
|
||||
|
||||
# Simulate model predicting slightly faster speed
|
||||
# this is to ensure lead policy is effective when model
|
||||
# does not predict slowdown in e2e mode
|
||||
position = log.XYZTData.new_message()
|
||||
position.x = [float(x) for x in (self.speed + 0.5) * np.array(ModelConstants.T_IDXS)]
|
||||
model.modelV2.position = position
|
||||
velocity = log.XYZTData.new_message()
|
||||
velocity.x = [float(x) for x in (self.speed + 0.5) * np.ones_like(ModelConstants.T_IDXS)]
|
||||
model.modelV2.velocity = velocity
|
||||
acceleration = log.XYZTData.new_message()
|
||||
acceleration.x = [float(x) for x in np.zeros_like(ModelConstants.T_IDXS)]
|
||||
model.modelV2.acceleration = acceleration
|
||||
|
||||
control.controlsState.longControlState = LongCtrlState.pid if self.enabled else LongCtrlState.off
|
||||
control.controlsState.vCruise = float(v_cruise * 3.6)
|
||||
control.controlsState.experimentalMode = self.e2e
|
||||
control.controlsState.forceDecel = self.force_decel
|
||||
car_state.carState.vEgo = float(self.speed)
|
||||
car_state.carState.standstill = self.speed < 0.01
|
||||
|
||||
# ******** get controlsState messages for plotting ***
|
||||
sm = {'radarState': radar.radarState,
|
||||
'carState': car_state.carState,
|
||||
'controlsState': control.controlsState,
|
||||
'modelV2': model.modelV2}
|
||||
self.planner.update(sm)
|
||||
self.speed = self.planner.v_desired_filter.x
|
||||
self.acceleration = self.planner.a_desired
|
||||
self.speeds = self.planner.v_desired_trajectory.tolist()
|
||||
fcw = self.planner.fcw
|
||||
self.distance_lead = self.distance_lead + v_lead * self.ts
|
||||
|
||||
# ******** run the car ********
|
||||
#print(self.distance, speed)
|
||||
if self.speed <= 0:
|
||||
self.speed = 0
|
||||
self.acceleration = 0
|
||||
self.distance = self.distance + self.speed * self.ts
|
||||
|
||||
# *** radar model ***
|
||||
if self.lead_relevancy:
|
||||
d_rel = np.maximum(0., self.distance_lead - self.distance)
|
||||
v_rel = v_lead - self.speed
|
||||
else:
|
||||
d_rel = 200.
|
||||
v_rel = 0.
|
||||
|
||||
# print at 5hz
|
||||
# if (self.rk.frame % (self.rate // 5)) == 0:
|
||||
# print("%2.2f sec %6.2f m %6.2f m/s %6.2f m/s2 lead_rel: %6.2f m %6.2f m/s"
|
||||
# % (self.current_time, self.distance, self.speed, self.acceleration, d_rel, v_rel))
|
||||
|
||||
|
||||
# ******** update prevs ********
|
||||
self.rk.monitor_time()
|
||||
|
||||
return {
|
||||
"distance": self.distance,
|
||||
"speed": self.speed,
|
||||
"acceleration": self.acceleration,
|
||||
"speeds": self.speeds,
|
||||
"distance_lead": self.distance_lead,
|
||||
"fcw": fcw,
|
||||
}
|
||||
|
||||
# simple engage in standalone mode
|
||||
def plant_thread():
|
||||
plant = Plant()
|
||||
while 1:
|
||||
plant.step()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
plant_thread()
|
||||
160
selfdrive/test/longitudinal_maneuvers/test_longitudinal.py
Normal file
160
selfdrive/test/longitudinal_maneuvers/test_longitudinal.py
Normal file
@@ -0,0 +1,160 @@
|
||||
#!/usr/bin/env python3
|
||||
import itertools
|
||||
import unittest
|
||||
from parameterized import parameterized_class
|
||||
|
||||
from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import STOP_DISTANCE
|
||||
from openpilot.selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
|
||||
|
||||
|
||||
# TODO: make new FCW tests
|
||||
def create_maneuvers(kwargs):
|
||||
maneuvers = [
|
||||
Maneuver(
|
||||
'approach stopped car at 25m/s, initial distance: 120m',
|
||||
duration=20.,
|
||||
initial_speed=25.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=120.,
|
||||
speed_lead_values=[30., 0.],
|
||||
breakpoints=[0., 1.],
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
'approach stopped car at 20m/s, initial distance 90m',
|
||||
duration=20.,
|
||||
initial_speed=20.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=90.,
|
||||
speed_lead_values=[20., 0.],
|
||||
breakpoints=[0., 1.],
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
'steady state following a car at 20m/s, then lead decel to 0mph at 1m/s^2',
|
||||
duration=50.,
|
||||
initial_speed=20.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=35.,
|
||||
speed_lead_values=[20., 20., 0.],
|
||||
breakpoints=[0., 15., 35.0],
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
'steady state following a car at 20m/s, then lead decel to 0mph at 2m/s^2',
|
||||
duration=50.,
|
||||
initial_speed=20.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=35.,
|
||||
speed_lead_values=[20., 20., 0.],
|
||||
breakpoints=[0., 15., 25.0],
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
'steady state following a car at 20m/s, then lead decel to 0mph at 3m/s^2',
|
||||
duration=50.,
|
||||
initial_speed=20.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=35.,
|
||||
speed_lead_values=[20., 20., 0.],
|
||||
breakpoints=[0., 15., 21.66],
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
'steady state following a car at 20m/s, then lead decel to 0mph at 3+m/s^2',
|
||||
duration=40.,
|
||||
initial_speed=20.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=35.,
|
||||
speed_lead_values=[20., 20., 0.],
|
||||
prob_lead_values=[0., 1., 1.],
|
||||
cruise_values=[20., 20., 20.],
|
||||
breakpoints=[2., 2.01, 8.8],
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
"approach stopped car at 20m/s, with prob_lead_values",
|
||||
duration=30.,
|
||||
initial_speed=20.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=120.,
|
||||
speed_lead_values=[0.0, 0., 0.],
|
||||
prob_lead_values=[0.0, 0., 1.],
|
||||
cruise_values=[20., 20., 20.],
|
||||
breakpoints=[0.0, 2., 2.01],
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
"approach slower cut-in car at 20m/s",
|
||||
duration=20.,
|
||||
initial_speed=20.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=50.,
|
||||
speed_lead_values=[15., 15.],
|
||||
breakpoints=[1., 11.],
|
||||
only_lead2=True,
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
"stay stopped behind radar override lead",
|
||||
duration=20.,
|
||||
initial_speed=0.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=10.,
|
||||
speed_lead_values=[0., 0.],
|
||||
prob_lead_values=[0., 0.],
|
||||
breakpoints=[1., 11.],
|
||||
only_radar=True,
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
"NaN recovery",
|
||||
duration=30.,
|
||||
initial_speed=15.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=60.,
|
||||
speed_lead_values=[0., 0., 0.0],
|
||||
breakpoints=[1., 1.01, 11.],
|
||||
cruise_values=[float("nan"), 15., 15.],
|
||||
**kwargs,
|
||||
),
|
||||
Maneuver(
|
||||
'cruising at 25 m/s while disabled',
|
||||
duration=20.,
|
||||
initial_speed=25.,
|
||||
lead_relevancy=False,
|
||||
enabled=False,
|
||||
**kwargs,
|
||||
),
|
||||
]
|
||||
if not kwargs['force_decel']:
|
||||
# controls relies on planner commanding to move for stock-ACC resume spamming
|
||||
maneuvers.append(Maneuver(
|
||||
"resume from a stop",
|
||||
duration=20.,
|
||||
initial_speed=0.,
|
||||
lead_relevancy=True,
|
||||
initial_distance_lead=STOP_DISTANCE,
|
||||
speed_lead_values=[0., 0., 2.],
|
||||
breakpoints=[1., 10., 15.],
|
||||
ensure_start=True,
|
||||
**kwargs,
|
||||
))
|
||||
return maneuvers
|
||||
|
||||
|
||||
@parameterized_class(("e2e", "force_decel"), itertools.product([True, False], repeat=2))
|
||||
class LongitudinalControl(unittest.TestCase):
|
||||
e2e: bool
|
||||
force_decel: bool
|
||||
|
||||
def test_maneuver(self):
|
||||
for maneuver in create_maneuvers({"e2e": self.e2e, "force_decel": self.force_decel}):
|
||||
with self.subTest(title=maneuver.title, e2e=maneuver.e2e, force_decel=maneuver.force_decel):
|
||||
print(maneuver.title, f'in {"e2e" if maneuver.e2e else "acc"} mode')
|
||||
valid, _ = maneuver.evaluate()
|
||||
self.assertTrue(valid)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main(failfast=True)
|
||||
8
selfdrive/test/loop_until_fail.sh
Normal file
8
selfdrive/test/loop_until_fail.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Loop something forever until it fails, for verifying new tests
|
||||
|
||||
while true; do
|
||||
$@
|
||||
done
|
||||
2
selfdrive/test/process_replay/.gitignore
vendored
Normal file
2
selfdrive/test/process_replay/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
fakedata/
|
||||
debayer_diff.txt
|
||||
126
selfdrive/test/process_replay/README.md
Normal file
126
selfdrive/test/process_replay/README.md
Normal file
@@ -0,0 +1,126 @@
|
||||
# Process replay
|
||||
|
||||
Process replay is a regression test designed to identify any changes in the output of a process. This test replays a segment through individual processes and compares the output to a known good replay. Each make is represented in the test with a segment.
|
||||
|
||||
If the test fails, make sure that you didn't unintentionally change anything. If there are intentional changes, the reference logs will be updated.
|
||||
|
||||
Use `test_processes.py` to run the test locally.
|
||||
Use `FILEREADER_CACHE='1' test_processes.py` to cache log files.
|
||||
|
||||
Currently the following processes are tested:
|
||||
|
||||
* controlsd
|
||||
* radard
|
||||
* plannerd
|
||||
* calibrationd
|
||||
* dmonitoringd
|
||||
* locationd
|
||||
* paramsd
|
||||
* ubloxd
|
||||
* torqued
|
||||
|
||||
### Usage
|
||||
```
|
||||
Usage: test_processes.py [-h] [--whitelist-procs PROCS] [--whitelist-cars CARS] [--blacklist-procs PROCS]
|
||||
[--blacklist-cars CARS] [--ignore-fields FIELDS] [--ignore-msgs MSGS] [--update-refs] [--upload-only]
|
||||
Regression test to identify changes in a process's output
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--whitelist-procs PROCS Whitelist given processes from the test (e.g. controlsd)
|
||||
--whitelist-cars WHITELIST_CARS Whitelist given cars from the test (e.g. HONDA)
|
||||
--blacklist-procs BLACKLIST_PROCS Blacklist given processes from the test (e.g. controlsd)
|
||||
--blacklist-cars BLACKLIST_CARS Blacklist given cars from the test (e.g. HONDA)
|
||||
--ignore-fields IGNORE_FIELDS Extra fields or msgs to ignore (e.g. carState.events)
|
||||
--ignore-msgs IGNORE_MSGS Msgs to ignore (e.g. onroadEvents)
|
||||
--update-refs Updates reference logs using current commit
|
||||
--upload-only Skips testing processes and uploads logs from previous test run
|
||||
```
|
||||
|
||||
## Forks
|
||||
|
||||
openpilot forks can use this test with their own reference logs, by default `test_proccess.py` saves logs locally.
|
||||
|
||||
To generate new logs:
|
||||
|
||||
`./test_processes.py`
|
||||
|
||||
Then, check in the new logs using git-lfs. Make sure to also update the `ref_commit` file to the current commit.
|
||||
|
||||
## API
|
||||
|
||||
Process replay test suite exposes programmatic APIs for simultaneously running processes or groups of processes on provided logs.
|
||||
|
||||
```py
|
||||
def replay_process_with_name(name: Union[str, Iterable[str]], lr: LogIterable, *args, **kwargs) -> List[capnp._DynamicStructReader]:
|
||||
|
||||
def replay_process(
|
||||
cfg: Union[ProcessConfig, Iterable[ProcessConfig]], lr: LogIterable, frs: Optional[Dict[str, Any]] = None,
|
||||
fingerprint: Optional[str] = None, return_all_logs: bool = False, custom_params: Optional[Dict[str, Any]] = None, disable_progress: bool = False
|
||||
) -> List[capnp._DynamicStructReader]:
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```py
|
||||
from openpilot.selfdrive.test.process_replay import replay_process_with_name
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
|
||||
lr = LogReader(...)
|
||||
|
||||
# provide a name of the process to replay
|
||||
output_logs = replay_process_with_name('locationd', lr)
|
||||
|
||||
# or list of names
|
||||
output_logs = replay_process_with_name(['ubloxd', 'locationd'], lr)
|
||||
```
|
||||
|
||||
Supported processes:
|
||||
* controlsd
|
||||
* radard
|
||||
* plannerd
|
||||
* calibrationd
|
||||
* dmonitoringd
|
||||
* locationd
|
||||
* paramsd
|
||||
* ubloxd
|
||||
* torqued
|
||||
* modeld
|
||||
* dmonitoringmodeld
|
||||
|
||||
Certain processes may require an initial state, which is usually supplied within `Params` and persisting from segment to segment (e.g CalibrationParams, LiveParameters). The `custom_params` is dictionary used to prepopulate `Params` with arbitrary values. The `get_custom_params_from_lr` helper is provided to fetch meaningful values from log files.
|
||||
|
||||
```py
|
||||
from openpilot.selfdrive.test.process_replay import get_custom_params_from_lr
|
||||
|
||||
previous_segment_lr = LogReader(...)
|
||||
current_segment_lr = LogReader(...)
|
||||
|
||||
custom_params = get_custom_params_from_lr(previous_segment_lr, 'last')
|
||||
|
||||
output_logs = replay_process_with_name('calibrationd', lr, custom_params=custom_params)
|
||||
```
|
||||
|
||||
Replaying processes that use VisionIPC (e.g. modeld, dmonitoringmodeld) require additional `frs` dictionary with camera states as keys and `FrameReader` objects as values.
|
||||
|
||||
```py
|
||||
from openpilot.tools.lib.framereader import FrameReader
|
||||
|
||||
frs = {
|
||||
'roadCameraState': FrameReader(...),
|
||||
'wideRoadCameraState': FrameReader(...),
|
||||
'driverCameraState': FrameReader(...),
|
||||
}
|
||||
|
||||
output_logs = replay_process_with_name(['modeld', 'dmonitoringmodeld'], lr, frs=frs)
|
||||
```
|
||||
|
||||
To capture stdout/stderr of the replayed process, `captured_output_store` can be provided.
|
||||
|
||||
```py
|
||||
output_store = dict()
|
||||
# pass dictionary by reference, it will be filled with standard outputs - even if process replay fails
|
||||
output_logs = replay_process_with_name(['radard', 'plannerd'], lr, captured_output_store=output_store)
|
||||
|
||||
# entries with captured output in format { 'out': '...', 'err': '...' } will be added to provided dictionary for each replayed process
|
||||
print(output_store['radard']['out']) # radard stdout
|
||||
print(output_store['radard']['err']) # radard stderr
|
||||
```
|
||||
2
selfdrive/test/process_replay/__init__.py
Normal file
2
selfdrive/test/process_replay/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS, get_process_config, get_custom_params_from_lr, \
|
||||
replay_process, replay_process_with_name # noqa: F401
|
||||
59
selfdrive/test/process_replay/capture.py
Normal file
59
selfdrive/test/process_replay/capture.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from typing import no_type_check
|
||||
|
||||
class FdRedirect:
|
||||
def __init__(self, file_prefix: str, fd: int):
|
||||
fname = os.path.join("/tmp", f"{file_prefix}.{fd}")
|
||||
if os.path.exists(fname):
|
||||
os.unlink(fname)
|
||||
self.dest_fd = os.open(fname, os.O_WRONLY | os.O_CREAT)
|
||||
self.dest_fname = fname
|
||||
self.source_fd = fd
|
||||
os.set_inheritable(self.dest_fd, True)
|
||||
|
||||
def __del__(self):
|
||||
os.close(self.dest_fd)
|
||||
|
||||
def purge(self) -> None:
|
||||
os.unlink(self.dest_fname)
|
||||
|
||||
def read(self) -> bytes:
|
||||
with open(self.dest_fname, "rb") as f:
|
||||
return f.read() or b""
|
||||
|
||||
def link(self) -> None:
|
||||
os.dup2(self.dest_fd, self.source_fd)
|
||||
|
||||
|
||||
class ProcessOutputCapture:
|
||||
def __init__(self, proc_name: str, prefix: str):
|
||||
prefix = f"{proc_name}_{prefix}"
|
||||
self.stdout_redirect = FdRedirect(prefix, 1)
|
||||
self.stderr_redirect = FdRedirect(prefix, 2)
|
||||
|
||||
def __del__(self):
|
||||
self.stdout_redirect.purge()
|
||||
self.stderr_redirect.purge()
|
||||
|
||||
@no_type_check # ipython classes have incompatible signatures
|
||||
def link_with_current_proc(self) -> None:
|
||||
try:
|
||||
# prevent ipykernel from redirecting stdout/stderr of python subprocesses
|
||||
from ipykernel.iostream import OutStream
|
||||
if isinstance(sys.stdout, OutStream):
|
||||
sys.stdout = sys.__stdout__
|
||||
if isinstance(sys.stderr, OutStream):
|
||||
sys.stderr = sys.__stderr__
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# link stdout/stderr to the fifo
|
||||
self.stdout_redirect.link()
|
||||
self.stderr_redirect.link()
|
||||
|
||||
def read_outerr(self) -> tuple[str, str]:
|
||||
out_str = self.stdout_redirect.read().decode()
|
||||
err_str = self.stderr_redirect.read().decode()
|
||||
return out_str, err_str
|
||||
150
selfdrive/test/process_replay/compare_logs.py
Normal file
150
selfdrive/test/process_replay/compare_logs.py
Normal file
@@ -0,0 +1,150 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import math
|
||||
import capnp
|
||||
import numbers
|
||||
import dictdiffer
|
||||
from collections import Counter
|
||||
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
|
||||
EPSILON = sys.float_info.epsilon
|
||||
|
||||
|
||||
def remove_ignored_fields(msg, ignore):
|
||||
msg = msg.as_builder()
|
||||
for key in ignore:
|
||||
attr = msg
|
||||
keys = key.split(".")
|
||||
if msg.which() != keys[0] and len(keys) > 1:
|
||||
continue
|
||||
|
||||
for k in keys[:-1]:
|
||||
# indexing into list
|
||||
if k.isdigit():
|
||||
attr = attr[int(k)]
|
||||
else:
|
||||
attr = getattr(attr, k)
|
||||
|
||||
v = getattr(attr, keys[-1])
|
||||
if isinstance(v, bool):
|
||||
val = False
|
||||
elif isinstance(v, numbers.Number):
|
||||
val = 0
|
||||
elif isinstance(v, (list, capnp.lib.capnp._DynamicListBuilder)):
|
||||
val = []
|
||||
else:
|
||||
raise NotImplementedError(f"Unknown type: {type(v)}")
|
||||
setattr(attr, keys[-1], val)
|
||||
return msg
|
||||
|
||||
|
||||
def compare_logs(log1, log2, ignore_fields=None, ignore_msgs=None, tolerance=None,):
|
||||
if ignore_fields is None:
|
||||
ignore_fields = []
|
||||
if ignore_msgs is None:
|
||||
ignore_msgs = []
|
||||
tolerance = EPSILON if tolerance is None else tolerance
|
||||
|
||||
log1, log2 = (
|
||||
[m for m in log if m.which() not in ignore_msgs]
|
||||
for log in (log1, log2)
|
||||
)
|
||||
|
||||
if len(log1) != len(log2):
|
||||
cnt1 = Counter(m.which() for m in log1)
|
||||
cnt2 = Counter(m.which() for m in log2)
|
||||
raise Exception(f"logs are not same length: {len(log1)} VS {len(log2)}\n\t\t{cnt1}\n\t\t{cnt2}")
|
||||
|
||||
diff = []
|
||||
for msg1, msg2 in zip(log1, log2, strict=True):
|
||||
if msg1.which() != msg2.which():
|
||||
raise Exception("msgs not aligned between logs")
|
||||
|
||||
msg1 = remove_ignored_fields(msg1, ignore_fields)
|
||||
msg2 = remove_ignored_fields(msg2, ignore_fields)
|
||||
|
||||
if msg1.to_bytes() != msg2.to_bytes():
|
||||
msg1_dict = msg1.as_reader().to_dict(verbose=True)
|
||||
msg2_dict = msg2.as_reader().to_dict(verbose=True)
|
||||
|
||||
dd = dictdiffer.diff(msg1_dict, msg2_dict, ignore=ignore_fields)
|
||||
|
||||
# Dictdiffer only supports relative tolerance, we also want to check for absolute
|
||||
# TODO: add this to dictdiffer
|
||||
def outside_tolerance(diff):
|
||||
try:
|
||||
if diff[0] == "change":
|
||||
a, b = diff[2]
|
||||
finite = math.isfinite(a) and math.isfinite(b)
|
||||
if finite and isinstance(a, numbers.Number) and isinstance(b, numbers.Number):
|
||||
return abs(a - b) > max(tolerance, tolerance * max(abs(a), abs(b)))
|
||||
except TypeError:
|
||||
pass
|
||||
return True
|
||||
|
||||
dd = list(filter(outside_tolerance, dd))
|
||||
|
||||
diff.extend(dd)
|
||||
return diff
|
||||
|
||||
|
||||
def format_process_diff(diff):
|
||||
diff_short, diff_long = "", ""
|
||||
|
||||
if isinstance(diff, str):
|
||||
diff_short += f" {diff}\n"
|
||||
diff_long += f"\t{diff}\n"
|
||||
else:
|
||||
cnt: dict[str, int] = {}
|
||||
for d in diff:
|
||||
diff_long += f"\t{str(d)}\n"
|
||||
|
||||
k = str(d[1])
|
||||
cnt[k] = 1 if k not in cnt else cnt[k] + 1
|
||||
|
||||
for k, v in sorted(cnt.items()):
|
||||
diff_short += f" {k}: {v}\n"
|
||||
|
||||
return diff_short, diff_long
|
||||
|
||||
|
||||
def format_diff(results, log_paths, ref_commit):
|
||||
diff_short, diff_long = "", ""
|
||||
diff_long += f"***** tested against commit {ref_commit} *****\n"
|
||||
|
||||
failed = False
|
||||
for segment, result in list(results.items()):
|
||||
diff_short += f"***** results for segment {segment} *****\n"
|
||||
diff_long += f"***** differences for segment {segment} *****\n"
|
||||
|
||||
for proc, diff in list(result.items()):
|
||||
diff_long += f"*** process: {proc} ***\n"
|
||||
diff_long += f"\tref: {log_paths[segment][proc]['ref']}\n"
|
||||
diff_long += f"\tnew: {log_paths[segment][proc]['new']}\n\n"
|
||||
|
||||
diff_short += f" {proc}\n"
|
||||
|
||||
if isinstance(diff, str) or len(diff):
|
||||
diff_short += f" ref: {log_paths[segment][proc]['ref']}\n"
|
||||
diff_short += f" new: {log_paths[segment][proc]['new']}\n\n"
|
||||
failed = True
|
||||
|
||||
proc_diff_short, proc_diff_long = format_process_diff(diff)
|
||||
|
||||
diff_long += proc_diff_long
|
||||
diff_short += proc_diff_short
|
||||
|
||||
return diff_short, diff_long, failed
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
log1 = list(LogReader(sys.argv[1]))
|
||||
log2 = list(LogReader(sys.argv[2]))
|
||||
ignore_fields = sys.argv[3:] or ["logMonoTime", "controlsState.startMonoTime", "controlsState.cumLagMs"]
|
||||
results = {"segment": {"proc": compare_logs(log1, log2, ignore_fields)}}
|
||||
log_paths = {"segment": {"proc": {"ref": sys.argv[1], "new": sys.argv[2]}}}
|
||||
diff_short, diff_long, failed = format_diff(results, log_paths, None)
|
||||
|
||||
print(diff_long)
|
||||
print(diff_short)
|
||||
1
selfdrive/test/process_replay/debayer_replay_ref_commit
Normal file
1
selfdrive/test/process_replay/debayer_replay_ref_commit
Normal file
@@ -0,0 +1 @@
|
||||
8f9ba7540b4549b4a57312129b8ff678d045f70f
|
||||
203
selfdrive/test/process_replay/migration.py
Normal file
203
selfdrive/test/process_replay/migration.py
Normal file
@@ -0,0 +1,203 @@
|
||||
from collections import defaultdict
|
||||
|
||||
from cereal import messaging
|
||||
from openpilot.selfdrive.test.process_replay.vision_meta import meta_from_encode_index
|
||||
from openpilot.selfdrive.car.toyota.values import EPS_SCALE
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
from panda import Panda
|
||||
|
||||
|
||||
def migrate_all(lr, old_logtime=False, manager_states=False, panda_states=False, camera_states=False):
|
||||
msgs = migrate_sensorEvents(lr, old_logtime)
|
||||
msgs = migrate_carParams(msgs, old_logtime)
|
||||
if manager_states:
|
||||
msgs = migrate_managerState(msgs)
|
||||
if panda_states:
|
||||
msgs = migrate_pandaStates(msgs)
|
||||
msgs = migrate_peripheralState(msgs)
|
||||
if camera_states:
|
||||
msgs = migrate_cameraStates(msgs)
|
||||
|
||||
return msgs
|
||||
|
||||
|
||||
def migrate_managerState(lr):
|
||||
all_msgs = []
|
||||
for msg in lr:
|
||||
if msg.which() != "managerState":
|
||||
all_msgs.append(msg)
|
||||
continue
|
||||
|
||||
new_msg = msg.as_builder()
|
||||
new_msg.managerState.processes = [{'name': name, 'running': True} for name in managed_processes]
|
||||
all_msgs.append(new_msg.as_reader())
|
||||
|
||||
return all_msgs
|
||||
|
||||
|
||||
def migrate_pandaStates(lr):
|
||||
all_msgs = []
|
||||
# TODO: safety param migration should be handled automatically
|
||||
safety_param_migration = {
|
||||
"TOYOTA PRIUS 2017": EPS_SCALE["TOYOTA PRIUS 2017"] | Panda.FLAG_TOYOTA_STOCK_LONGITUDINAL,
|
||||
"TOYOTA RAV4 2017": EPS_SCALE["TOYOTA RAV4 2017"] | Panda.FLAG_TOYOTA_ALT_BRAKE | Panda.FLAG_TOYOTA_GAS_INTERCEPTOR,
|
||||
"KIA EV6 2022": Panda.FLAG_HYUNDAI_EV_GAS | Panda.FLAG_HYUNDAI_CANFD_HDA2,
|
||||
}
|
||||
|
||||
# Migrate safety param base on carState
|
||||
CP = next((m.carParams for m in lr if m.which() == 'carParams'), None)
|
||||
assert CP is not None, "carParams message not found"
|
||||
if CP.carFingerprint in safety_param_migration:
|
||||
safety_param = safety_param_migration[CP.carFingerprint]
|
||||
elif len(CP.safetyConfigs):
|
||||
safety_param = CP.safetyConfigs[0].safetyParam
|
||||
if CP.safetyConfigs[0].safetyParamDEPRECATED != 0:
|
||||
safety_param = CP.safetyConfigs[0].safetyParamDEPRECATED
|
||||
else:
|
||||
safety_param = CP.safetyParamDEPRECATED
|
||||
|
||||
for msg in lr:
|
||||
if msg.which() == 'pandaStateDEPRECATED':
|
||||
new_msg = messaging.new_message('pandaStates', 1)
|
||||
new_msg.valid = msg.valid
|
||||
new_msg.logMonoTime = msg.logMonoTime
|
||||
new_msg.pandaStates[0] = msg.pandaStateDEPRECATED
|
||||
new_msg.pandaStates[0].safetyParam = safety_param
|
||||
all_msgs.append(new_msg.as_reader())
|
||||
elif msg.which() == 'pandaStates':
|
||||
new_msg = msg.as_builder()
|
||||
new_msg.pandaStates[-1].safetyParam = safety_param
|
||||
all_msgs.append(new_msg.as_reader())
|
||||
else:
|
||||
all_msgs.append(msg)
|
||||
|
||||
return all_msgs
|
||||
|
||||
|
||||
def migrate_peripheralState(lr):
|
||||
if any(msg.which() == "peripheralState" for msg in lr):
|
||||
return lr
|
||||
|
||||
all_msg = []
|
||||
for msg in lr:
|
||||
all_msg.append(msg)
|
||||
if msg.which() not in ["pandaStates", "pandaStateDEPRECATED"]:
|
||||
continue
|
||||
|
||||
new_msg = messaging.new_message("peripheralState")
|
||||
new_msg.valid = msg.valid
|
||||
new_msg.logMonoTime = msg.logMonoTime
|
||||
all_msg.append(new_msg.as_reader())
|
||||
|
||||
return all_msg
|
||||
|
||||
|
||||
def migrate_cameraStates(lr):
|
||||
all_msgs = []
|
||||
frame_to_encode_id = defaultdict(dict)
|
||||
# just for encodeId fallback mechanism
|
||||
min_frame_id = defaultdict(lambda: float('inf'))
|
||||
|
||||
for msg in lr:
|
||||
if msg.which() not in ["roadEncodeIdx", "wideRoadEncodeIdx", "driverEncodeIdx"]:
|
||||
continue
|
||||
|
||||
encode_index = getattr(msg, msg.which())
|
||||
meta = meta_from_encode_index(msg.which())
|
||||
|
||||
assert encode_index.segmentId < 1200, f"Encoder index segmentId greater that 1200: {msg.which()} {encode_index.segmentId}"
|
||||
frame_to_encode_id[meta.camera_state][encode_index.frameId] = encode_index.segmentId
|
||||
|
||||
for msg in lr:
|
||||
if msg.which() not in ["roadCameraState", "wideRoadCameraState", "driverCameraState"]:
|
||||
all_msgs.append(msg)
|
||||
continue
|
||||
|
||||
camera_state = getattr(msg, msg.which())
|
||||
min_frame_id[msg.which()] = min(min_frame_id[msg.which()], camera_state.frameId)
|
||||
|
||||
encode_id = frame_to_encode_id[msg.which()].get(camera_state.frameId)
|
||||
if encode_id is None:
|
||||
print(f"Missing encoded frame for camera feed {msg.which()} with frameId: {camera_state.frameId}")
|
||||
if len(frame_to_encode_id[msg.which()]) != 0:
|
||||
continue
|
||||
|
||||
# fallback mechanism for logs without encodeIdx (e.g. logs from before 2022 with dcamera recording disabled)
|
||||
# try to fake encode_id by subtracting lowest frameId
|
||||
encode_id = camera_state.frameId - min_frame_id[msg.which()]
|
||||
print(f"Faking encodeId to {encode_id} for camera feed {msg.which()} with frameId: {camera_state.frameId}")
|
||||
|
||||
new_msg = messaging.new_message(msg.which())
|
||||
new_camera_state = getattr(new_msg, new_msg.which())
|
||||
new_camera_state.frameId = encode_id
|
||||
new_camera_state.encodeId = encode_id
|
||||
# timestampSof was added later so it might be missing on some old segments
|
||||
if camera_state.timestampSof == 0 and camera_state.timestampEof > 25000000:
|
||||
new_camera_state.timestampSof = camera_state.timestampEof - 18000000
|
||||
else:
|
||||
new_camera_state.timestampSof = camera_state.timestampSof
|
||||
new_camera_state.timestampEof = camera_state.timestampEof
|
||||
new_msg.logMonoTime = msg.logMonoTime
|
||||
new_msg.valid = msg.valid
|
||||
|
||||
all_msgs.append(new_msg.as_reader())
|
||||
|
||||
return all_msgs
|
||||
|
||||
|
||||
def migrate_carParams(lr, old_logtime=False):
|
||||
all_msgs = []
|
||||
for msg in lr:
|
||||
if msg.which() == 'carParams':
|
||||
CP = messaging.new_message('carParams')
|
||||
CP.valid = True
|
||||
CP.carParams = msg.carParams.as_builder()
|
||||
for car_fw in CP.carParams.carFw:
|
||||
car_fw.brand = CP.carParams.carName
|
||||
if old_logtime:
|
||||
CP.logMonoTime = msg.logMonoTime
|
||||
msg = CP.as_reader()
|
||||
all_msgs.append(msg)
|
||||
|
||||
return all_msgs
|
||||
|
||||
|
||||
def migrate_sensorEvents(lr, old_logtime=False):
|
||||
all_msgs = []
|
||||
for msg in lr:
|
||||
if msg.which() != 'sensorEventsDEPRECATED':
|
||||
all_msgs.append(msg)
|
||||
continue
|
||||
|
||||
# migrate to split sensor events
|
||||
for evt in msg.sensorEventsDEPRECATED:
|
||||
# build new message for each sensor type
|
||||
sensor_service = ''
|
||||
if evt.which() == 'acceleration':
|
||||
sensor_service = 'accelerometer'
|
||||
elif evt.which() == 'gyro' or evt.which() == 'gyroUncalibrated':
|
||||
sensor_service = 'gyroscope'
|
||||
elif evt.which() == 'light' or evt.which() == 'proximity':
|
||||
sensor_service = 'lightSensor'
|
||||
elif evt.which() == 'magnetic' or evt.which() == 'magneticUncalibrated':
|
||||
sensor_service = 'magnetometer'
|
||||
elif evt.which() == 'temperature':
|
||||
sensor_service = 'temperatureSensor'
|
||||
|
||||
m = messaging.new_message(sensor_service)
|
||||
m.valid = True
|
||||
if old_logtime:
|
||||
m.logMonoTime = msg.logMonoTime
|
||||
|
||||
m_dat = getattr(m, sensor_service)
|
||||
m_dat.version = evt.version
|
||||
m_dat.sensor = evt.sensor
|
||||
m_dat.type = evt.type
|
||||
m_dat.source = evt.source
|
||||
if old_logtime:
|
||||
m_dat.timestamp = evt.timestamp
|
||||
setattr(m_dat, evt.which(), getattr(evt, evt.which()))
|
||||
|
||||
all_msgs.append(m.as_reader())
|
||||
|
||||
return all_msgs
|
||||
249
selfdrive/test/process_replay/model_replay.py
Normal file
249
selfdrive/test/process_replay/model_replay.py
Normal file
@@ -0,0 +1,249 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.system.hardware import PC
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
from openpilot.tools.lib.openpilotci import BASE_URL, get_url
|
||||
from openpilot.selfdrive.test.process_replay.compare_logs import compare_logs, format_diff
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import get_process_config, replay_process
|
||||
from openpilot.system.version import get_commit
|
||||
from openpilot.tools.lib.framereader import FrameReader
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.tools.lib.helpers import save_log
|
||||
|
||||
TEST_ROUTE = "2f4452b03ccb98f0|2022-12-03--13-45-30"
|
||||
SEGMENT = 6
|
||||
MAX_FRAMES = 100 if PC else 600
|
||||
NAV_FRAMES = 50
|
||||
|
||||
NO_NAV = "NO_NAV" in os.environ
|
||||
NO_MODEL = "NO_MODEL" in os.environ
|
||||
SEND_EXTRA_INPUTS = bool(int(os.getenv("SEND_EXTRA_INPUTS", "0")))
|
||||
|
||||
|
||||
def get_log_fn(ref_commit, test_route):
|
||||
return f"{test_route}_model_tici_{ref_commit}.bz2"
|
||||
|
||||
|
||||
def trim_logs_to_max_frames(logs, max_frames, frs_types, include_all_types):
|
||||
all_msgs = []
|
||||
cam_state_counts = defaultdict(int)
|
||||
# keep adding messages until cam states are equal to MAX_FRAMES
|
||||
for msg in sorted(logs, key=lambda m: m.logMonoTime):
|
||||
all_msgs.append(msg)
|
||||
if msg.which() in frs_types:
|
||||
cam_state_counts[msg.which()] += 1
|
||||
|
||||
if all(cam_state_counts[state] == max_frames for state in frs_types):
|
||||
break
|
||||
|
||||
if len(include_all_types) != 0:
|
||||
other_msgs = [m for m in logs if m.which() in include_all_types]
|
||||
all_msgs.extend(other_msgs)
|
||||
|
||||
return all_msgs
|
||||
|
||||
|
||||
def nav_model_replay(lr):
|
||||
sm = messaging.SubMaster(['navModel', 'navThumbnail', 'mapRenderState'])
|
||||
pm = messaging.PubMaster(['liveLocationKalman', 'navRoute'])
|
||||
|
||||
nav = [m for m in lr if m.which() == 'navRoute']
|
||||
llk = [m for m in lr if m.which() == 'liveLocationKalman']
|
||||
assert len(nav) > 0 and len(llk) >= NAV_FRAMES and nav[0].logMonoTime < llk[-NAV_FRAMES].logMonoTime
|
||||
|
||||
log_msgs = []
|
||||
try:
|
||||
assert "MAPBOX_TOKEN" in os.environ
|
||||
os.environ['MAP_RENDER_TEST_MODE'] = '1'
|
||||
Params().put_bool('DmModelInitialized', True)
|
||||
managed_processes['mapsd'].start()
|
||||
managed_processes['navmodeld'].start()
|
||||
|
||||
# setup position and route
|
||||
for _ in range(10):
|
||||
for s in (llk[-NAV_FRAMES], nav[0]):
|
||||
pm.send(s.which(), s.as_builder().to_bytes())
|
||||
sm.update(1000)
|
||||
if sm.updated['navModel']:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if not sm.updated['navModel']:
|
||||
raise Exception("no navmodeld outputs, failed to initialize")
|
||||
|
||||
# drain
|
||||
time.sleep(2)
|
||||
sm.update(0)
|
||||
|
||||
# run replay
|
||||
for n in range(len(llk) - NAV_FRAMES, len(llk)):
|
||||
pm.send(llk[n].which(), llk[n].as_builder().to_bytes())
|
||||
m = messaging.recv_one(sm.sock['navThumbnail'])
|
||||
assert m is not None, f"no navThumbnail, frame={n}"
|
||||
log_msgs.append(m)
|
||||
|
||||
m = messaging.recv_one(sm.sock['mapRenderState'])
|
||||
assert m is not None, f"no mapRenderState, frame={n}"
|
||||
log_msgs.append(m)
|
||||
|
||||
m = messaging.recv_one(sm.sock['navModel'])
|
||||
assert m is not None, f"no navModel response, frame={n}"
|
||||
log_msgs.append(m)
|
||||
finally:
|
||||
managed_processes['mapsd'].stop()
|
||||
managed_processes['navmodeld'].stop()
|
||||
|
||||
return log_msgs
|
||||
|
||||
|
||||
def model_replay(lr, frs):
|
||||
# modeld is using frame pairs
|
||||
modeld_logs = trim_logs_to_max_frames(lr, MAX_FRAMES, {"roadCameraState", "wideRoadCameraState"}, {"roadEncodeIdx", "wideRoadEncodeIdx", "carParams"})
|
||||
dmodeld_logs = trim_logs_to_max_frames(lr, MAX_FRAMES, {"driverCameraState"}, {"driverEncodeIdx", "carParams"})
|
||||
if not SEND_EXTRA_INPUTS:
|
||||
modeld_logs = [msg for msg in modeld_logs if msg.which() not in ["liveCalibration",]]
|
||||
dmodeld_logs = [msg for msg in dmodeld_logs if msg.which() not in ["liveCalibration",]]
|
||||
# initial calibration
|
||||
cal_msg = next(msg for msg in lr if msg.which() == "liveCalibration").as_builder()
|
||||
cal_msg.logMonoTime = lr[0].logMonoTime
|
||||
modeld_logs.insert(0, cal_msg.as_reader())
|
||||
dmodeld_logs.insert(0, cal_msg.as_reader())
|
||||
|
||||
modeld = get_process_config("modeld")
|
||||
dmonitoringmodeld = get_process_config("dmonitoringmodeld")
|
||||
|
||||
modeld_msgs = replay_process(modeld, modeld_logs, frs)
|
||||
dmonitoringmodeld_msgs = replay_process(dmonitoringmodeld, dmodeld_logs, frs)
|
||||
return modeld_msgs + dmonitoringmodeld_msgs
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
update = "--update" in sys.argv
|
||||
replay_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
ref_commit_fn = os.path.join(replay_dir, "model_replay_ref_commit")
|
||||
|
||||
# load logs
|
||||
lr = list(LogReader(get_url(TEST_ROUTE, SEGMENT)))
|
||||
frs = {
|
||||
'roadCameraState': FrameReader(get_url(TEST_ROUTE, SEGMENT, log_type="fcamera"), readahead=True),
|
||||
'driverCameraState': FrameReader(get_url(TEST_ROUTE, SEGMENT, log_type="dcamera"), readahead=True),
|
||||
'wideRoadCameraState': FrameReader(get_url(TEST_ROUTE, SEGMENT, log_type="ecamera"), readahead=True)
|
||||
}
|
||||
|
||||
# Update tile refs
|
||||
if update:
|
||||
import urllib
|
||||
import requests
|
||||
import threading
|
||||
import http.server
|
||||
from openpilot.tools.lib.openpilotci import upload_bytes
|
||||
os.environ['MAPS_HOST'] = 'http://localhost:5000'
|
||||
|
||||
class HTTPRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
assert len(self.path) > 10 # Sanity check on path length
|
||||
r = requests.get(f'https://api.mapbox.com{self.path}', timeout=30)
|
||||
upload_bytes(r.content, urllib.parse.urlparse(self.path).path.lstrip('/'))
|
||||
self.send_response(r.status_code)
|
||||
self.send_header('Content-type','text/html')
|
||||
self.end_headers()
|
||||
self.wfile.write(r.content)
|
||||
|
||||
server = http.server.HTTPServer(("127.0.0.1", 5000), HTTPRequestHandler)
|
||||
thread = threading.Thread(None, server.serve_forever, daemon=True)
|
||||
thread.start()
|
||||
else:
|
||||
os.environ['MAPS_HOST'] = BASE_URL.rstrip('/')
|
||||
|
||||
log_msgs = []
|
||||
# run replays
|
||||
if not NO_MODEL:
|
||||
log_msgs += model_replay(lr, frs)
|
||||
if not NO_NAV:
|
||||
log_msgs += nav_model_replay(lr)
|
||||
|
||||
# get diff
|
||||
failed = False
|
||||
if not update:
|
||||
with open(ref_commit_fn) as f:
|
||||
ref_commit = f.read().strip()
|
||||
log_fn = get_log_fn(ref_commit, TEST_ROUTE)
|
||||
try:
|
||||
all_logs = list(LogReader(BASE_URL + log_fn))
|
||||
cmp_log = []
|
||||
|
||||
# logs are ordered based on type: modelV2, driverStateV2, nav messages (navThumbnail, mapRenderState, navModel)
|
||||
if not NO_MODEL:
|
||||
model_start_index = next(i for i, m in enumerate(all_logs) if m.which() in ("modelV2", "cameraOdometry"))
|
||||
cmp_log += all_logs[model_start_index:model_start_index + MAX_FRAMES*2]
|
||||
dmon_start_index = next(i for i, m in enumerate(all_logs) if m.which() == "driverStateV2")
|
||||
cmp_log += all_logs[dmon_start_index:dmon_start_index + MAX_FRAMES]
|
||||
if not NO_NAV:
|
||||
nav_start_index = next(i for i, m in enumerate(all_logs) if m.which() in ["navThumbnail", "mapRenderState", "navModel"])
|
||||
nav_logs = all_logs[nav_start_index:nav_start_index + NAV_FRAMES*3]
|
||||
cmp_log += nav_logs
|
||||
|
||||
ignore = [
|
||||
'logMonoTime',
|
||||
'modelV2.frameDropPerc',
|
||||
'modelV2.modelExecutionTime',
|
||||
'driverStateV2.modelExecutionTime',
|
||||
'driverStateV2.dspExecutionTime',
|
||||
'navModel.dspExecutionTime',
|
||||
'navModel.modelExecutionTime',
|
||||
'navThumbnail.timestampEof',
|
||||
'mapRenderState.locationMonoTime',
|
||||
'mapRenderState.renderTime',
|
||||
]
|
||||
if PC:
|
||||
ignore += [
|
||||
'modelV2.laneLines.0.t',
|
||||
'modelV2.laneLines.1.t',
|
||||
'modelV2.laneLines.2.t',
|
||||
'modelV2.laneLines.3.t',
|
||||
'modelV2.roadEdges.0.t',
|
||||
'modelV2.roadEdges.1.t',
|
||||
]
|
||||
# TODO this tolerance is absurdly large
|
||||
tolerance = 2.0 if PC else None
|
||||
results: Any = {TEST_ROUTE: {}}
|
||||
log_paths: Any = {TEST_ROUTE: {"models": {'ref': BASE_URL + log_fn, 'new': log_fn}}}
|
||||
results[TEST_ROUTE]["models"] = compare_logs(cmp_log, log_msgs, tolerance=tolerance, ignore_fields=ignore)
|
||||
diff_short, diff_long, failed = format_diff(results, log_paths, ref_commit)
|
||||
|
||||
print(diff_long)
|
||||
print('-------------\n'*5)
|
||||
print(diff_short)
|
||||
with open("model_diff.txt", "w") as f:
|
||||
f.write(diff_long)
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
failed = True
|
||||
|
||||
# upload new refs
|
||||
if (update or failed) and not PC:
|
||||
from openpilot.tools.lib.openpilotci import upload_file
|
||||
|
||||
print("Uploading new refs")
|
||||
|
||||
new_commit = get_commit()
|
||||
log_fn = get_log_fn(new_commit, TEST_ROUTE)
|
||||
save_log(log_fn, log_msgs)
|
||||
try:
|
||||
upload_file(log_fn, os.path.basename(log_fn))
|
||||
except Exception as e:
|
||||
print("failed to upload", e)
|
||||
|
||||
with open(ref_commit_fn, 'w') as f:
|
||||
f.write(str(new_commit))
|
||||
|
||||
print("\n\nNew ref commit: ", new_commit)
|
||||
|
||||
sys.exit(int(failed))
|
||||
1
selfdrive/test/process_replay/model_replay_ref_commit
Normal file
1
selfdrive/test/process_replay/model_replay_ref_commit
Normal file
@@ -0,0 +1 @@
|
||||
e8b359a82316e6dfce3b6fb0fb9684431bfa0a1b
|
||||
800
selfdrive/test/process_replay/process_replay.py
Normal file
800
selfdrive/test/process_replay/process_replay.py
Normal file
@@ -0,0 +1,800 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import time
|
||||
import copy
|
||||
import json
|
||||
import heapq
|
||||
import signal
|
||||
import platform
|
||||
from collections import OrderedDict
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
from collections.abc import Callable, Iterable
|
||||
from tqdm import tqdm
|
||||
import capnp
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from cereal import car
|
||||
from cereal.services import SERVICE_LIST
|
||||
from cereal.visionipc import VisionIpcServer, get_endpoint_name as vipc_get_endpoint_name
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.prefix import OpenpilotPrefix
|
||||
from openpilot.common.timeout import Timeout
|
||||
from openpilot.common.realtime import DT_CTRL
|
||||
from panda.python import ALTERNATIVE_EXPERIENCE
|
||||
from openpilot.selfdrive.car.car_helpers import get_car, interfaces
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
from openpilot.selfdrive.test.process_replay.vision_meta import meta_from_camera_state, available_streams
|
||||
from openpilot.selfdrive.test.process_replay.migration import migrate_all
|
||||
from openpilot.selfdrive.test.process_replay.capture import ProcessOutputCapture
|
||||
from openpilot.tools.lib.logreader import LogIterable
|
||||
from openpilot.tools.lib.framereader import BaseFrameReader
|
||||
|
||||
# Numpy gives different results based on CPU features after version 19
|
||||
NUMPY_TOLERANCE = 1e-7
|
||||
PROC_REPLAY_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
FAKEDATA = os.path.join(PROC_REPLAY_DIR, "fakedata/")
|
||||
|
||||
class DummySocket:
|
||||
def __init__(self):
|
||||
self.data: list[bytes] = []
|
||||
|
||||
def receive(self, non_blocking: bool = False) -> bytes | None:
|
||||
if non_blocking:
|
||||
return None
|
||||
|
||||
return self.data.pop()
|
||||
|
||||
def send(self, data: bytes):
|
||||
self.data.append(data)
|
||||
|
||||
class LauncherWithCapture:
|
||||
def __init__(self, capture: ProcessOutputCapture, launcher: Callable):
|
||||
self.capture = capture
|
||||
self.launcher = launcher
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
self.capture.link_with_current_proc()
|
||||
self.launcher(*args, **kwargs)
|
||||
|
||||
|
||||
class ReplayContext:
|
||||
def __init__(self, cfg):
|
||||
self.proc_name = cfg.proc_name
|
||||
self.pubs = cfg.pubs
|
||||
self.main_pub = cfg.main_pub
|
||||
self.main_pub_drained = cfg.main_pub_drained
|
||||
self.unlocked_pubs = cfg.unlocked_pubs
|
||||
assert(len(self.pubs) != 0 or self.main_pub is not None)
|
||||
|
||||
def __enter__(self):
|
||||
self.open_context()
|
||||
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_obj, exc_tb):
|
||||
self.close_context()
|
||||
|
||||
def open_context(self):
|
||||
messaging.toggle_fake_events(True)
|
||||
messaging.set_fake_prefix(self.proc_name)
|
||||
|
||||
if self.main_pub is None:
|
||||
self.events = OrderedDict()
|
||||
pubs_with_events = [pub for pub in self.pubs if pub not in self.unlocked_pubs]
|
||||
for pub in pubs_with_events:
|
||||
self.events[pub] = messaging.fake_event_handle(pub, enable=True)
|
||||
else:
|
||||
self.events = {self.main_pub: messaging.fake_event_handle(self.main_pub, enable=True)}
|
||||
|
||||
def close_context(self):
|
||||
del self.events
|
||||
|
||||
messaging.toggle_fake_events(False)
|
||||
messaging.delete_fake_prefix()
|
||||
|
||||
@property
|
||||
def all_recv_called_events(self):
|
||||
return [man.recv_called_event for man in self.events.values()]
|
||||
|
||||
@property
|
||||
def all_recv_ready_events(self):
|
||||
return [man.recv_ready_event for man in self.events.values()]
|
||||
|
||||
def send_sync(self, pm, endpoint, dat):
|
||||
self.events[endpoint].recv_called_event.wait()
|
||||
self.events[endpoint].recv_called_event.clear()
|
||||
pm.send(endpoint, dat)
|
||||
self.events[endpoint].recv_ready_event.set()
|
||||
|
||||
def unlock_sockets(self):
|
||||
expected_sets = len(self.events)
|
||||
while expected_sets > 0:
|
||||
index = messaging.wait_for_one_event(self.all_recv_called_events)
|
||||
self.all_recv_called_events[index].clear()
|
||||
self.all_recv_ready_events[index].set()
|
||||
expected_sets -= 1
|
||||
|
||||
def wait_for_recv_called(self):
|
||||
messaging.wait_for_one_event(self.all_recv_called_events)
|
||||
|
||||
def wait_for_next_recv(self, trigger_empty_recv):
|
||||
index = messaging.wait_for_one_event(self.all_recv_called_events)
|
||||
if self.main_pub is not None and self.main_pub_drained and trigger_empty_recv:
|
||||
self.all_recv_called_events[index].clear()
|
||||
self.all_recv_ready_events[index].set()
|
||||
self.all_recv_called_events[index].wait()
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProcessConfig:
|
||||
proc_name: str
|
||||
pubs: list[str]
|
||||
subs: list[str]
|
||||
ignore: list[str]
|
||||
config_callback: Callable | None = None
|
||||
init_callback: Callable | None = None
|
||||
should_recv_callback: Callable | None = None
|
||||
tolerance: float | None = None
|
||||
processing_time: float = 0.001
|
||||
timeout: int = 30
|
||||
simulation: bool = True
|
||||
main_pub: str | None = None
|
||||
main_pub_drained: bool = True
|
||||
vision_pubs: list[str] = field(default_factory=list)
|
||||
ignore_alive_pubs: list[str] = field(default_factory=list)
|
||||
unlocked_pubs: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
class ProcessContainer:
|
||||
def __init__(self, cfg: ProcessConfig):
|
||||
self.prefix = OpenpilotPrefix(clean_dirs_on_exit=False)
|
||||
self.cfg = copy.deepcopy(cfg)
|
||||
self.process = copy.deepcopy(managed_processes[cfg.proc_name])
|
||||
self.msg_queue: list[capnp._DynamicStructReader] = []
|
||||
self.cnt = 0
|
||||
self.pm: messaging.PubMaster | None = None
|
||||
self.sockets: list[messaging.SubSocket] | None = None
|
||||
self.rc: ReplayContext | None = None
|
||||
self.vipc_server: VisionIpcServer | None = None
|
||||
self.environ_config: dict[str, Any] | None = None
|
||||
self.capture: ProcessOutputCapture | None = None
|
||||
|
||||
@property
|
||||
def has_empty_queue(self) -> bool:
|
||||
return len(self.msg_queue) == 0
|
||||
|
||||
@property
|
||||
def pubs(self) -> list[str]:
|
||||
return self.cfg.pubs
|
||||
|
||||
@property
|
||||
def subs(self) -> list[str]:
|
||||
return self.cfg.subs
|
||||
|
||||
def _clean_env(self):
|
||||
for k in self.environ_config.keys():
|
||||
if k in os.environ:
|
||||
del os.environ[k]
|
||||
|
||||
for k in ["PROC_NAME", "SIMULATION"]:
|
||||
if k in os.environ:
|
||||
del os.environ[k]
|
||||
|
||||
def _setup_env(self, params_config: dict[str, Any], environ_config: dict[str, Any]):
|
||||
for k, v in environ_config.items():
|
||||
if len(v) != 0:
|
||||
os.environ[k] = v
|
||||
elif k in os.environ:
|
||||
del os.environ[k]
|
||||
|
||||
os.environ["PROC_NAME"] = self.cfg.proc_name
|
||||
if self.cfg.simulation:
|
||||
os.environ["SIMULATION"] = "1"
|
||||
elif "SIMULATION" in os.environ:
|
||||
del os.environ["SIMULATION"]
|
||||
|
||||
params = Params()
|
||||
for k, v in params_config.items():
|
||||
if isinstance(v, bool):
|
||||
params.put_bool(k, v)
|
||||
else:
|
||||
params.put(k, v)
|
||||
|
||||
self.environ_config = environ_config
|
||||
|
||||
def _setup_vision_ipc(self, all_msgs: LogIterable, frs: dict[str, Any]):
|
||||
assert len(self.cfg.vision_pubs) != 0
|
||||
|
||||
vipc_server = VisionIpcServer("camerad")
|
||||
streams_metas = available_streams(all_msgs)
|
||||
for meta in streams_metas:
|
||||
if meta.camera_state in self.cfg.vision_pubs:
|
||||
frame_size = (frs[meta.camera_state].w, frs[meta.camera_state].h)
|
||||
vipc_server.create_buffers(meta.stream, 2, False, *frame_size)
|
||||
vipc_server.start_listener()
|
||||
|
||||
self.vipc_server = vipc_server
|
||||
self.cfg.vision_pubs = [meta.camera_state for meta in streams_metas if meta.camera_state in self.cfg.vision_pubs]
|
||||
|
||||
def _start_process(self):
|
||||
if self.capture is not None:
|
||||
self.process.launcher = LauncherWithCapture(self.capture, self.process.launcher)
|
||||
self.process.prepare()
|
||||
self.process.start()
|
||||
|
||||
def start(
|
||||
self, params_config: dict[str, Any], environ_config: dict[str, Any],
|
||||
all_msgs: LogIterable, frs: dict[str, BaseFrameReader] | None,
|
||||
fingerprint: str | None, capture_output: bool
|
||||
):
|
||||
with self.prefix as p:
|
||||
self._setup_env(params_config, environ_config)
|
||||
|
||||
if self.cfg.config_callback is not None:
|
||||
params = Params()
|
||||
self.cfg.config_callback(params, self.cfg, all_msgs)
|
||||
|
||||
self.rc = ReplayContext(self.cfg)
|
||||
self.rc.open_context()
|
||||
|
||||
self.pm = messaging.PubMaster(self.cfg.pubs)
|
||||
self.sockets = [messaging.sub_sock(s, timeout=100) for s in self.cfg.subs]
|
||||
|
||||
if len(self.cfg.vision_pubs) != 0:
|
||||
assert frs is not None
|
||||
self._setup_vision_ipc(all_msgs, frs)
|
||||
assert self.vipc_server is not None
|
||||
|
||||
if capture_output:
|
||||
self.capture = ProcessOutputCapture(self.cfg.proc_name, p.prefix)
|
||||
|
||||
self._start_process()
|
||||
|
||||
if self.cfg.init_callback is not None:
|
||||
self.cfg.init_callback(self.rc, self.pm, all_msgs, fingerprint)
|
||||
|
||||
# wait for process to startup
|
||||
with Timeout(10, error_msg=f"timed out waiting for process to start: {repr(self.cfg.proc_name)}"):
|
||||
while not all(self.pm.all_readers_updated(s) for s in self.cfg.pubs if s not in self.cfg.ignore_alive_pubs):
|
||||
time.sleep(0)
|
||||
|
||||
def stop(self):
|
||||
with self.prefix:
|
||||
self.process.signal(signal.SIGKILL)
|
||||
self.process.stop()
|
||||
self.rc.close_context()
|
||||
self.prefix.clean_dirs()
|
||||
self._clean_env()
|
||||
|
||||
def run_step(self, msg: capnp._DynamicStructReader, frs: dict[str, BaseFrameReader] | None) -> list[capnp._DynamicStructReader]:
|
||||
assert self.rc and self.pm and self.sockets and self.process.proc
|
||||
|
||||
output_msgs = []
|
||||
with self.prefix, Timeout(self.cfg.timeout, error_msg=f"timed out testing process {repr(self.cfg.proc_name)}"):
|
||||
end_of_cycle = True
|
||||
if self.cfg.should_recv_callback is not None:
|
||||
end_of_cycle = self.cfg.should_recv_callback(msg, self.cfg, self.cnt)
|
||||
|
||||
self.msg_queue.append(msg)
|
||||
if end_of_cycle:
|
||||
self.rc.wait_for_recv_called()
|
||||
|
||||
# call recv to let sub-sockets reconnect, after we know the process is ready
|
||||
if self.cnt == 0:
|
||||
for s in self.sockets:
|
||||
messaging.recv_one_or_none(s)
|
||||
|
||||
# empty recv on drained pub indicates the end of messages, only do that if there're any
|
||||
trigger_empty_recv = False
|
||||
if self.cfg.main_pub and self.cfg.main_pub_drained:
|
||||
trigger_empty_recv = next((True for m in self.msg_queue if m.which() == self.cfg.main_pub), False)
|
||||
|
||||
for m in self.msg_queue:
|
||||
self.pm.send(m.which(), m.as_builder())
|
||||
# send frames if needed
|
||||
if self.vipc_server is not None and m.which() in self.cfg.vision_pubs:
|
||||
camera_state = getattr(m, m.which())
|
||||
camera_meta = meta_from_camera_state(m.which())
|
||||
assert frs is not None
|
||||
img = frs[m.which()].get(camera_state.frameId, pix_fmt="nv12")[0]
|
||||
self.vipc_server.send(camera_meta.stream, img.flatten().tobytes(),
|
||||
camera_state.frameId, camera_state.timestampSof, camera_state.timestampEof)
|
||||
self.msg_queue = []
|
||||
|
||||
self.rc.unlock_sockets()
|
||||
self.rc.wait_for_next_recv(trigger_empty_recv)
|
||||
|
||||
for socket in self.sockets:
|
||||
ms = messaging.drain_sock(socket)
|
||||
for m in ms:
|
||||
m = m.as_builder()
|
||||
m.logMonoTime = msg.logMonoTime + int(self.cfg.processing_time * 1e9)
|
||||
output_msgs.append(m.as_reader())
|
||||
self.cnt += 1
|
||||
assert self.process.proc.is_alive()
|
||||
|
||||
return output_msgs
|
||||
|
||||
|
||||
def controlsd_fingerprint_callback(rc, pm, msgs, fingerprint):
|
||||
print("start fingerprinting")
|
||||
params = Params()
|
||||
canmsgs = [msg for msg in msgs if msg.which() == "can"][:300]
|
||||
|
||||
# controlsd expects one arbitrary can and pandaState
|
||||
rc.send_sync(pm, "can", messaging.new_message("can", 1))
|
||||
pm.send("pandaStates", messaging.new_message("pandaStates", 1))
|
||||
rc.send_sync(pm, "can", messaging.new_message("can", 1))
|
||||
rc.wait_for_next_recv(True)
|
||||
|
||||
# fingerprinting is done, when CarParams is set
|
||||
while params.get("CarParams") is None:
|
||||
if len(canmsgs) == 0:
|
||||
raise ValueError("Fingerprinting failed. Run out of can msgs")
|
||||
|
||||
m = canmsgs.pop(0)
|
||||
rc.send_sync(pm, "can", m.as_builder().to_bytes())
|
||||
rc.wait_for_next_recv(False)
|
||||
|
||||
|
||||
def get_car_params_callback(rc, pm, msgs, fingerprint):
|
||||
params = Params()
|
||||
if fingerprint:
|
||||
CarInterface, _, _ = interfaces[fingerprint]
|
||||
CP = CarInterface.get_non_essential_params(fingerprint)
|
||||
else:
|
||||
can = DummySocket()
|
||||
sendcan = DummySocket()
|
||||
|
||||
canmsgs = [msg for msg in msgs if msg.which() == "can"]
|
||||
has_cached_cp = params.get("CarParamsCache") is not None
|
||||
assert len(canmsgs) != 0, "CAN messages are required for fingerprinting"
|
||||
assert os.environ.get("SKIP_FW_QUERY", False) or has_cached_cp, \
|
||||
"CarParamsCache is required for fingerprinting. Make sure to keep carParams msgs in the logs."
|
||||
|
||||
for m in canmsgs[:300]:
|
||||
can.send(m.as_builder().to_bytes())
|
||||
_, CP = get_car(can, sendcan, Params().get_bool("ExperimentalLongitudinalEnabled"))
|
||||
params.put("CarParams", CP.to_bytes())
|
||||
return CP
|
||||
|
||||
|
||||
def controlsd_rcv_callback(msg, cfg, frame):
|
||||
# no sendcan until controlsd is initialized
|
||||
if msg.which() != "can":
|
||||
return False
|
||||
|
||||
socks = [
|
||||
s for s in cfg.subs if
|
||||
frame % int(SERVICE_LIST[msg.which()].frequency / SERVICE_LIST[s].frequency) == 0
|
||||
]
|
||||
if "sendcan" in socks and (frame - 1) < 2000:
|
||||
socks.remove("sendcan")
|
||||
return len(socks) > 0
|
||||
|
||||
|
||||
def calibration_rcv_callback(msg, cfg, frame):
|
||||
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
|
||||
# should_recv always true to increment frame
|
||||
return (frame - 1) == 0 or msg.which() == 'cameraOdometry'
|
||||
|
||||
|
||||
def torqued_rcv_callback(msg, cfg, frame):
|
||||
# should_recv always true to increment frame
|
||||
return (frame - 1) == 0 or msg.which() == 'liveLocationKalman'
|
||||
|
||||
|
||||
def dmonitoringmodeld_rcv_callback(msg, cfg, frame):
|
||||
return msg.which() == "driverCameraState"
|
||||
|
||||
|
||||
class ModeldCameraSyncRcvCallback:
|
||||
def __init__(self):
|
||||
self.road_present = False
|
||||
self.wide_road_present = False
|
||||
self.is_dual_camera = True
|
||||
|
||||
def __call__(self, msg, cfg, frame):
|
||||
self.is_dual_camera = len(cfg.vision_pubs) == 2
|
||||
if msg.which() == "roadCameraState":
|
||||
self.road_present = True
|
||||
elif msg.which() == "wideRoadCameraState":
|
||||
self.wide_road_present = True
|
||||
|
||||
if self.road_present and self.wide_road_present:
|
||||
self.road_present, self.wide_road_present = False, False
|
||||
return True
|
||||
elif self.road_present and not self.is_dual_camera:
|
||||
self.road_present = False
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class MessageBasedRcvCallback:
|
||||
def __init__(self, trigger_msg_type):
|
||||
self.trigger_msg_type = trigger_msg_type
|
||||
|
||||
def __call__(self, msg, cfg, frame):
|
||||
return msg.which() == self.trigger_msg_type
|
||||
|
||||
|
||||
class FrequencyBasedRcvCallback:
|
||||
def __init__(self, trigger_msg_type):
|
||||
self.trigger_msg_type = trigger_msg_type
|
||||
|
||||
def __call__(self, msg, cfg, frame):
|
||||
if msg.which() != self.trigger_msg_type:
|
||||
return False
|
||||
|
||||
resp_sockets = [
|
||||
s for s in cfg.subs
|
||||
if frame % max(1, int(SERVICE_LIST[msg.which()].frequency / SERVICE_LIST[s].frequency)) == 0
|
||||
]
|
||||
return bool(len(resp_sockets))
|
||||
|
||||
|
||||
def controlsd_config_callback(params, cfg, lr):
|
||||
controlsState = None
|
||||
initialized = False
|
||||
for msg in lr:
|
||||
if msg.which() == "controlsState":
|
||||
controlsState = msg.controlsState
|
||||
if initialized:
|
||||
break
|
||||
elif msg.which() == "onroadEvents":
|
||||
initialized = car.CarEvent.EventName.controlsInitializing not in [e.name for e in msg.onroadEvents]
|
||||
|
||||
assert controlsState is not None and initialized, "controlsState never initialized"
|
||||
params.put("ReplayControlsState", controlsState.as_builder().to_bytes())
|
||||
|
||||
|
||||
def locationd_config_pubsub_callback(params, cfg, lr):
|
||||
ublox = params.get_bool("UbloxAvailable")
|
||||
sub_keys = ({"gpsLocation", } if ublox else {"gpsLocationExternal", })
|
||||
|
||||
cfg.pubs = set(cfg.pubs) - sub_keys
|
||||
|
||||
|
||||
CONFIGS = [
|
||||
ProcessConfig(
|
||||
proc_name="controlsd",
|
||||
pubs=[
|
||||
"can", "deviceState", "pandaStates", "peripheralState", "liveCalibration", "driverMonitoringState",
|
||||
"longitudinalPlan", "liveLocationKalman", "liveParameters", "radarState",
|
||||
"modelV2", "driverCameraState", "roadCameraState", "wideRoadCameraState", "managerState",
|
||||
"testJoystick", "liveTorqueParameters", "accelerometer", "gyroscope"
|
||||
],
|
||||
subs=["controlsState", "carState", "carControl", "sendcan", "onroadEvents", "carParams"],
|
||||
ignore=["logMonoTime", "controlsState.startMonoTime", "controlsState.cumLagMs"],
|
||||
config_callback=controlsd_config_callback,
|
||||
init_callback=controlsd_fingerprint_callback,
|
||||
should_recv_callback=controlsd_rcv_callback,
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
processing_time=0.004,
|
||||
main_pub="can",
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="radard",
|
||||
pubs=["can", "carState", "modelV2"],
|
||||
subs=["radarState", "liveTracks"],
|
||||
ignore=["logMonoTime", "radarState.cumLagMs"],
|
||||
init_callback=get_car_params_callback,
|
||||
should_recv_callback=MessageBasedRcvCallback("can"),
|
||||
main_pub="can",
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="plannerd",
|
||||
pubs=["modelV2", "carControl", "carState", "controlsState", "radarState"],
|
||||
subs=["longitudinalPlan", "uiPlan"],
|
||||
ignore=["logMonoTime", "longitudinalPlan.processingDelay", "longitudinalPlan.solverExecutionTime"],
|
||||
init_callback=get_car_params_callback,
|
||||
should_recv_callback=FrequencyBasedRcvCallback("modelV2"),
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="calibrationd",
|
||||
pubs=["carState", "cameraOdometry", "carParams"],
|
||||
subs=["liveCalibration"],
|
||||
ignore=["logMonoTime"],
|
||||
should_recv_callback=calibration_rcv_callback,
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="dmonitoringd",
|
||||
pubs=["driverStateV2", "liveCalibration", "carState", "modelV2", "controlsState"],
|
||||
subs=["driverMonitoringState"],
|
||||
ignore=["logMonoTime"],
|
||||
should_recv_callback=FrequencyBasedRcvCallback("driverStateV2"),
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="locationd",
|
||||
pubs=[
|
||||
"cameraOdometry", "accelerometer", "gyroscope", "gpsLocationExternal",
|
||||
"liveCalibration", "carState", "gpsLocation"
|
||||
],
|
||||
subs=["liveLocationKalman"],
|
||||
ignore=["logMonoTime"],
|
||||
config_callback=locationd_config_pubsub_callback,
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="paramsd",
|
||||
pubs=["liveLocationKalman", "carState"],
|
||||
subs=["liveParameters"],
|
||||
ignore=["logMonoTime"],
|
||||
init_callback=get_car_params_callback,
|
||||
should_recv_callback=FrequencyBasedRcvCallback("liveLocationKalman"),
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
processing_time=0.004,
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="ubloxd",
|
||||
pubs=["ubloxRaw"],
|
||||
subs=["ubloxGnss", "gpsLocationExternal"],
|
||||
ignore=["logMonoTime"],
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="torqued",
|
||||
pubs=["liveLocationKalman", "carState", "carControl"],
|
||||
subs=["liveTorqueParameters"],
|
||||
ignore=["logMonoTime"],
|
||||
init_callback=get_car_params_callback,
|
||||
should_recv_callback=torqued_rcv_callback,
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="modeld",
|
||||
pubs=["roadCameraState", "wideRoadCameraState", "liveCalibration", "driverMonitoringState"],
|
||||
subs=["modelV2", "cameraOdometry"],
|
||||
ignore=["logMonoTime", "modelV2.frameDropPerc", "modelV2.modelExecutionTime"],
|
||||
should_recv_callback=ModeldCameraSyncRcvCallback(),
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
processing_time=0.020,
|
||||
main_pub=vipc_get_endpoint_name("camerad", meta_from_camera_state("roadCameraState").stream),
|
||||
main_pub_drained=False,
|
||||
vision_pubs=["roadCameraState", "wideRoadCameraState"],
|
||||
ignore_alive_pubs=["wideRoadCameraState"],
|
||||
init_callback=get_car_params_callback,
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="dmonitoringmodeld",
|
||||
pubs=["liveCalibration", "driverCameraState"],
|
||||
subs=["driverStateV2"],
|
||||
ignore=["logMonoTime", "driverStateV2.modelExecutionTime", "driverStateV2.dspExecutionTime"],
|
||||
should_recv_callback=dmonitoringmodeld_rcv_callback,
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
processing_time=0.020,
|
||||
main_pub=vipc_get_endpoint_name("camerad", meta_from_camera_state("driverCameraState").stream),
|
||||
main_pub_drained=False,
|
||||
vision_pubs=["driverCameraState"],
|
||||
ignore_alive_pubs=["driverCameraState"],
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_process_config(name: str) -> ProcessConfig:
|
||||
try:
|
||||
return copy.deepcopy(next(c for c in CONFIGS if c.proc_name == name))
|
||||
except StopIteration as ex:
|
||||
raise Exception(f"Cannot find process config with name: {name}") from ex
|
||||
|
||||
|
||||
def get_custom_params_from_lr(lr: LogIterable, initial_state: str = "first") -> dict[str, Any]:
|
||||
"""
|
||||
Use this to get custom params dict based on provided logs.
|
||||
Useful when replaying following processes: calibrationd, paramsd, torqued
|
||||
The params may be based on first or last message of given type (carParams, liveCalibration, liveParameters, liveTorqueParameters) in the logs.
|
||||
"""
|
||||
|
||||
car_params = [m for m in lr if m.which() == "carParams"]
|
||||
live_calibration = [m for m in lr if m.which() == "liveCalibration"]
|
||||
live_parameters = [m for m in lr if m.which() == "liveParameters"]
|
||||
live_torque_parameters = [m for m in lr if m.which() == "liveTorqueParameters"]
|
||||
|
||||
assert initial_state in ["first", "last"]
|
||||
msg_index = 0 if initial_state == "first" else -1
|
||||
|
||||
assert len(car_params) > 0, "carParams required for initial state of liveParameters and CarParamsPrevRoute"
|
||||
CP = car_params[msg_index].carParams
|
||||
|
||||
custom_params = {
|
||||
"CarParamsPrevRoute": CP.as_builder().to_bytes()
|
||||
}
|
||||
|
||||
if len(live_calibration) > 0:
|
||||
custom_params["CalibrationParams"] = live_calibration[msg_index].as_builder().to_bytes()
|
||||
if len(live_parameters) > 0:
|
||||
lp_dict = live_parameters[msg_index].to_dict()
|
||||
lp_dict["carFingerprint"] = CP.carFingerprint
|
||||
custom_params["LiveParameters"] = json.dumps(lp_dict)
|
||||
if len(live_torque_parameters) > 0:
|
||||
custom_params["LiveTorqueParameters"] = live_torque_parameters[msg_index].as_builder().to_bytes()
|
||||
|
||||
return custom_params
|
||||
|
||||
|
||||
def replay_process_with_name(name: str | Iterable[str], lr: LogIterable, *args, **kwargs) -> list[capnp._DynamicStructReader]:
|
||||
if isinstance(name, str):
|
||||
cfgs = [get_process_config(name)]
|
||||
elif isinstance(name, Iterable):
|
||||
cfgs = [get_process_config(n) for n in name]
|
||||
else:
|
||||
raise ValueError("name must be str or collections of strings")
|
||||
|
||||
return replay_process(cfgs, lr, *args, **kwargs)
|
||||
|
||||
|
||||
def replay_process(
|
||||
cfg: ProcessConfig | Iterable[ProcessConfig], lr: LogIterable, frs: dict[str, BaseFrameReader] = None,
|
||||
fingerprint: str = None, return_all_logs: bool = False, custom_params: dict[str, Any] = None,
|
||||
captured_output_store: dict[str, dict[str, str]] = None, disable_progress: bool = False
|
||||
) -> list[capnp._DynamicStructReader]:
|
||||
if isinstance(cfg, Iterable):
|
||||
cfgs = list(cfg)
|
||||
else:
|
||||
cfgs = [cfg]
|
||||
|
||||
all_msgs = migrate_all(lr, old_logtime=True,
|
||||
manager_states=True,
|
||||
panda_states=any("pandaStates" in cfg.pubs for cfg in cfgs),
|
||||
camera_states=any(len(cfg.vision_pubs) != 0 for cfg in cfgs))
|
||||
process_logs = _replay_multi_process(cfgs, all_msgs, frs, fingerprint, custom_params, captured_output_store, disable_progress)
|
||||
|
||||
if return_all_logs:
|
||||
keys = {m.which() for m in process_logs}
|
||||
modified_logs = [m for m in all_msgs if m.which() not in keys]
|
||||
modified_logs.extend(process_logs)
|
||||
modified_logs.sort(key=lambda m: int(m.logMonoTime))
|
||||
log_msgs = modified_logs
|
||||
else:
|
||||
log_msgs = process_logs
|
||||
|
||||
return log_msgs
|
||||
|
||||
|
||||
def _replay_multi_process(
|
||||
cfgs: list[ProcessConfig], lr: LogIterable, frs: dict[str, BaseFrameReader] | None, fingerprint: str | None,
|
||||
custom_params: dict[str, Any] | None, captured_output_store: dict[str, dict[str, str]] | None, disable_progress: bool
|
||||
) -> list[capnp._DynamicStructReader]:
|
||||
if fingerprint is not None:
|
||||
params_config = generate_params_config(lr=lr, fingerprint=fingerprint, custom_params=custom_params)
|
||||
env_config = generate_environ_config(fingerprint=fingerprint)
|
||||
else:
|
||||
CP = next((m.carParams for m in lr if m.which() == "carParams"), None)
|
||||
params_config = generate_params_config(lr=lr, CP=CP, custom_params=custom_params)
|
||||
env_config = generate_environ_config(CP=CP)
|
||||
|
||||
# validate frs and vision pubs
|
||||
all_vision_pubs = [pub for cfg in cfgs for pub in cfg.vision_pubs]
|
||||
if len(all_vision_pubs) != 0:
|
||||
assert frs is not None, "frs must be provided when replaying process using vision streams"
|
||||
assert all(meta_from_camera_state(st) is not None for st in all_vision_pubs), \
|
||||
f"undefined vision stream spotted, probably misconfigured process: (vision pubs: {all_vision_pubs})"
|
||||
required_vision_pubs = {m.camera_state for m in available_streams(lr)} & set(all_vision_pubs)
|
||||
assert all(st in frs for st in required_vision_pubs), f"frs for this process must contain following vision streams: {required_vision_pubs}"
|
||||
|
||||
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
|
||||
log_msgs = []
|
||||
try:
|
||||
containers = []
|
||||
for cfg in cfgs:
|
||||
container = ProcessContainer(cfg)
|
||||
containers.append(container)
|
||||
container.start(params_config, env_config, all_msgs, frs, fingerprint, captured_output_store is not None)
|
||||
|
||||
all_pubs = {pub for container in containers for pub in container.pubs}
|
||||
all_subs = {sub for container in containers for sub in container.subs}
|
||||
lr_pubs = all_pubs - all_subs
|
||||
pubs_to_containers = {pub: [container for container in containers if pub in container.pubs] for pub in all_pubs}
|
||||
|
||||
pub_msgs = [msg for msg in all_msgs if msg.which() in lr_pubs]
|
||||
# external queue for messages taken from logs; internal queue for messages generated by processes, which will be republished
|
||||
external_pub_queue: list[capnp._DynamicStructReader] = pub_msgs.copy()
|
||||
internal_pub_queue: list[capnp._DynamicStructReader] = []
|
||||
# heap for maintaining the order of messages generated by processes, where each element: (logMonoTime, index in internal_pub_queue)
|
||||
internal_pub_index_heap: list[tuple[int, int]] = []
|
||||
|
||||
pbar = tqdm(total=len(external_pub_queue), disable=disable_progress)
|
||||
while len(external_pub_queue) != 0 or (len(internal_pub_index_heap) != 0 and not all(c.has_empty_queue for c in containers)):
|
||||
if len(internal_pub_index_heap) == 0 or (len(external_pub_queue) != 0 and external_pub_queue[0].logMonoTime < internal_pub_index_heap[0][0]):
|
||||
msg = external_pub_queue.pop(0)
|
||||
pbar.update(1)
|
||||
else:
|
||||
_, index = heapq.heappop(internal_pub_index_heap)
|
||||
msg = internal_pub_queue[index]
|
||||
|
||||
target_containers = pubs_to_containers[msg.which()]
|
||||
for container in target_containers:
|
||||
output_msgs = container.run_step(msg, frs)
|
||||
for m in output_msgs:
|
||||
if m.which() in all_pubs:
|
||||
internal_pub_queue.append(m)
|
||||
heapq.heappush(internal_pub_index_heap, (m.logMonoTime, len(internal_pub_queue) - 1))
|
||||
log_msgs.extend(output_msgs)
|
||||
finally:
|
||||
for container in containers:
|
||||
container.stop()
|
||||
if captured_output_store is not None:
|
||||
assert container.capture is not None
|
||||
out, err = container.capture.read_outerr()
|
||||
captured_output_store[container.cfg.proc_name] = {"out": out, "err": err}
|
||||
|
||||
return log_msgs
|
||||
|
||||
|
||||
def generate_params_config(lr=None, CP=None, fingerprint=None, custom_params=None) -> dict[str, Any]:
|
||||
params_dict = {
|
||||
"OpenpilotEnabledToggle": True,
|
||||
"DisengageOnAccelerator": True,
|
||||
"DisableLogging": False,
|
||||
}
|
||||
|
||||
if custom_params is not None:
|
||||
params_dict.update(custom_params)
|
||||
if lr is not None:
|
||||
has_ublox = any(msg.which() == "ubloxGnss" for msg in lr)
|
||||
params_dict["UbloxAvailable"] = has_ublox
|
||||
is_rhd = next((msg.driverMonitoringState.isRHD for msg in lr if msg.which() == "driverMonitoringState"), False)
|
||||
params_dict["IsRhdDetected"] = is_rhd
|
||||
|
||||
if CP is not None:
|
||||
if CP.alternativeExperience == ALTERNATIVE_EXPERIENCE.DISABLE_DISENGAGE_ON_GAS:
|
||||
params_dict["DisengageOnAccelerator"] = False
|
||||
|
||||
if fingerprint is None:
|
||||
if CP.fingerprintSource == "fw":
|
||||
params_dict["CarParamsCache"] = CP.as_builder().to_bytes()
|
||||
|
||||
if CP.openpilotLongitudinalControl:
|
||||
params_dict["ExperimentalLongitudinalEnabled"] = True
|
||||
|
||||
if CP.notCar:
|
||||
params_dict["JoystickDebugMode"] = True
|
||||
|
||||
return params_dict
|
||||
|
||||
|
||||
def generate_environ_config(CP=None, fingerprint=None, log_dir=None) -> dict[str, Any]:
|
||||
environ_dict = {}
|
||||
if platform.system() != "Darwin":
|
||||
environ_dict["PARAMS_ROOT"] = "/dev/shm/params"
|
||||
if log_dir is not None:
|
||||
environ_dict["LOG_ROOT"] = log_dir
|
||||
|
||||
environ_dict["REPLAY"] = "1"
|
||||
|
||||
# Regen or python process
|
||||
if CP is not None and fingerprint is None:
|
||||
if CP.fingerprintSource == "fw":
|
||||
environ_dict['SKIP_FW_QUERY'] = ""
|
||||
environ_dict['FINGERPRINT'] = ""
|
||||
else:
|
||||
environ_dict['SKIP_FW_QUERY'] = "1"
|
||||
environ_dict['FINGERPRINT'] = CP.carFingerprint
|
||||
elif fingerprint is not None:
|
||||
environ_dict['SKIP_FW_QUERY'] = "1"
|
||||
environ_dict['FINGERPRINT'] = fingerprint
|
||||
else:
|
||||
environ_dict["SKIP_FW_QUERY"] = ""
|
||||
environ_dict["FINGERPRINT"] = ""
|
||||
|
||||
return environ_dict
|
||||
|
||||
|
||||
def check_openpilot_enabled(msgs: LogIterable) -> bool:
|
||||
cur_enabled_count = 0
|
||||
max_enabled_count = 0
|
||||
for msg in msgs:
|
||||
if msg.which() == "carParams":
|
||||
if msg.carParams.notCar:
|
||||
return True
|
||||
elif msg.which() == "controlsState":
|
||||
if msg.controlsState.active:
|
||||
cur_enabled_count += 1
|
||||
else:
|
||||
cur_enabled_count = 0
|
||||
max_enabled_count = max(max_enabled_count, cur_enabled_count)
|
||||
|
||||
return max_enabled_count > int(10. / DT_CTRL)
|
||||
1
selfdrive/test/process_replay/ref_commit
Normal file
1
selfdrive/test/process_replay/ref_commit
Normal file
@@ -0,0 +1 @@
|
||||
43efe1cf08cba8c86bc1ae8234b3d3d084a40e5d
|
||||
158
selfdrive/test/process_replay/regen.py
Normal file
158
selfdrive/test/process_replay/regen.py
Normal file
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import argparse
|
||||
import time
|
||||
import capnp
|
||||
import numpy as np
|
||||
|
||||
from typing import Any
|
||||
from collections.abc import Iterable
|
||||
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS, FAKEDATA, ProcessConfig, replay_process, get_process_config, \
|
||||
check_openpilot_enabled, get_custom_params_from_lr
|
||||
from openpilot.selfdrive.test.process_replay.vision_meta import DRIVER_FRAME_SIZES
|
||||
from openpilot.selfdrive.test.update_ci_routes import upload_route
|
||||
from openpilot.tools.lib.route import Route
|
||||
from openpilot.tools.lib.framereader import FrameReader, BaseFrameReader, FrameType
|
||||
from openpilot.tools.lib.logreader import LogReader, LogIterable
|
||||
from openpilot.tools.lib.helpers import save_log
|
||||
|
||||
|
||||
class DummyFrameReader(BaseFrameReader):
|
||||
def __init__(self, w: int, h: int, frame_count: int, pix_val: int):
|
||||
self.pix_val = pix_val
|
||||
self.w, self.h = w, h
|
||||
self.frame_count = frame_count
|
||||
self.frame_type = FrameType.raw
|
||||
|
||||
def get(self, idx, count=1, pix_fmt="yuv420p"):
|
||||
if pix_fmt == "rgb24":
|
||||
shape = (self.h, self.w, 3)
|
||||
elif pix_fmt == "nv12" or pix_fmt == "yuv420p":
|
||||
shape = (int((self.h * self.w) * 3 / 2),)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
return [np.full(shape, self.pix_val, dtype=np.uint8) for _ in range(count)]
|
||||
|
||||
@staticmethod
|
||||
def zero_dcamera():
|
||||
return DummyFrameReader(*DRIVER_FRAME_SIZES["tici"], 1200, 0)
|
||||
|
||||
|
||||
def regen_segment(
|
||||
lr: LogIterable, frs: dict[str, Any] = None,
|
||||
processes: Iterable[ProcessConfig] = CONFIGS, disable_tqdm: bool = False
|
||||
) -> list[capnp._DynamicStructReader]:
|
||||
all_msgs = sorted(lr, key=lambda m: m.logMonoTime)
|
||||
custom_params = get_custom_params_from_lr(all_msgs)
|
||||
|
||||
print("Replayed processes:", [p.proc_name for p in processes])
|
||||
print("\n\n", "*"*30, "\n\n", sep="")
|
||||
|
||||
output_logs = replay_process(processes, all_msgs, frs, return_all_logs=True, custom_params=custom_params, disable_progress=disable_tqdm)
|
||||
|
||||
return output_logs
|
||||
|
||||
|
||||
def setup_data_readers(
|
||||
route: str, sidx: int, use_route_meta: bool,
|
||||
needs_driver_cam: bool = True, needs_road_cam: bool = True, dummy_driver_cam: bool = False
|
||||
) -> tuple[LogReader, dict[str, Any]]:
|
||||
if use_route_meta:
|
||||
r = Route(route)
|
||||
lr = LogReader(r.log_paths()[sidx])
|
||||
frs = {}
|
||||
if needs_road_cam and len(r.camera_paths()) > sidx and r.camera_paths()[sidx] is not None:
|
||||
frs['roadCameraState'] = FrameReader(r.camera_paths()[sidx])
|
||||
if needs_road_cam and len(r.ecamera_paths()) > sidx and r.ecamera_paths()[sidx] is not None:
|
||||
frs['wideRoadCameraState'] = FrameReader(r.ecamera_paths()[sidx])
|
||||
if needs_driver_cam:
|
||||
if dummy_driver_cam:
|
||||
frs['driverCameraState'] = DummyFrameReader.zero_dcamera()
|
||||
elif len(r.dcamera_paths()) > sidx and r.dcamera_paths()[sidx] is not None:
|
||||
device_type = next(str(msg.initData.deviceType) for msg in lr if msg.which() == "initData")
|
||||
assert device_type != "neo", "Driver camera not supported on neo segments. Use dummy dcamera."
|
||||
frs['driverCameraState'] = FrameReader(r.dcamera_paths()[sidx])
|
||||
else:
|
||||
lr = LogReader(f"cd:/{route.replace('|', '/')}/{sidx}/rlog.bz2")
|
||||
frs = {}
|
||||
if needs_road_cam:
|
||||
frs['roadCameraState'] = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/fcamera.hevc")
|
||||
if next((True for m in lr if m.which() == "wideRoadCameraState"), False):
|
||||
frs['wideRoadCameraState'] = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/ecamera.hevc")
|
||||
if needs_driver_cam:
|
||||
if dummy_driver_cam:
|
||||
frs['driverCameraState'] = DummyFrameReader.zero_dcamera()
|
||||
else:
|
||||
device_type = next(str(msg.initData.deviceType) for msg in lr if msg.which() == "initData")
|
||||
assert device_type != "neo", "Driver camera not supported on neo segments. Use dummy dcamera."
|
||||
frs['driverCameraState'] = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/dcamera.hevc")
|
||||
|
||||
return lr, frs
|
||||
|
||||
|
||||
def regen_and_save(
|
||||
route: str, sidx: int, processes: str | Iterable[str] = "all", outdir: str = FAKEDATA,
|
||||
upload: bool = False, use_route_meta: bool = False, disable_tqdm: bool = False, dummy_driver_cam: bool = False
|
||||
) -> str:
|
||||
if not isinstance(processes, str) and not hasattr(processes, "__iter__"):
|
||||
raise ValueError("whitelist_proc must be a string or iterable")
|
||||
|
||||
if processes != "all":
|
||||
if isinstance(processes, str):
|
||||
raise ValueError(f"Invalid value for processes: {processes}")
|
||||
|
||||
replayed_processes = []
|
||||
for d in processes:
|
||||
cfg = get_process_config(d)
|
||||
replayed_processes.append(cfg)
|
||||
else:
|
||||
replayed_processes = CONFIGS
|
||||
|
||||
all_vision_pubs = {pub for cfg in replayed_processes for pub in cfg.vision_pubs}
|
||||
lr, frs = setup_data_readers(route, sidx, use_route_meta,
|
||||
needs_driver_cam="driverCameraState" in all_vision_pubs,
|
||||
needs_road_cam="roadCameraState" in all_vision_pubs or "wideRoadCameraState" in all_vision_pubs,
|
||||
dummy_driver_cam=dummy_driver_cam)
|
||||
output_logs = regen_segment(lr, frs, replayed_processes, disable_tqdm=disable_tqdm)
|
||||
|
||||
log_dir = os.path.join(outdir, time.strftime("%Y-%m-%d--%H-%M-%S--0", time.gmtime()))
|
||||
rel_log_dir = os.path.relpath(log_dir)
|
||||
rpath = os.path.join(log_dir, "rlog.bz2")
|
||||
|
||||
os.makedirs(log_dir)
|
||||
save_log(rpath, output_logs, compress=True)
|
||||
|
||||
print("\n\n", "*"*30, "\n\n", sep="")
|
||||
print("New route:", rel_log_dir, "\n")
|
||||
|
||||
if not check_openpilot_enabled(output_logs):
|
||||
raise Exception("Route did not engage for long enough")
|
||||
|
||||
if upload:
|
||||
upload_route(rel_log_dir)
|
||||
|
||||
return rel_log_dir
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def comma_separated_list(string):
|
||||
return string.split(",")
|
||||
|
||||
all_procs = [p.proc_name for p in CONFIGS]
|
||||
parser = argparse.ArgumentParser(description="Generate new segments from old ones")
|
||||
parser.add_argument("--upload", action="store_true", help="Upload the new segment to the CI bucket")
|
||||
parser.add_argument("--outdir", help="log output dir", default=FAKEDATA)
|
||||
parser.add_argument("--dummy-dcamera", action='store_true', help="Use dummy blank driver camera")
|
||||
parser.add_argument("--whitelist-procs", type=comma_separated_list, default=all_procs,
|
||||
help="Comma-separated whitelist of processes to regen (e.g. controlsd,radard)")
|
||||
parser.add_argument("--blacklist-procs", type=comma_separated_list, default=[],
|
||||
help="Comma-separated blacklist of processes to regen (e.g. controlsd,radard)")
|
||||
parser.add_argument("route", type=str, help="The source route")
|
||||
parser.add_argument("seg", type=int, help="Segment in source route")
|
||||
args = parser.parse_args()
|
||||
|
||||
blacklist_set = set(args.blacklist_procs)
|
||||
processes = [p for p in args.whitelist_procs if p not in blacklist_set]
|
||||
regen_and_save(args.route, args.seg, processes=processes, upload=args.upload, outdir=args.outdir, dummy_driver_cam=args.dummy_dcamera)
|
||||
54
selfdrive/test/process_replay/regen_all.py
Normal file
54
selfdrive/test/process_replay/regen_all.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import concurrent.futures
|
||||
import os
|
||||
import random
|
||||
import traceback
|
||||
from tqdm import tqdm
|
||||
|
||||
from openpilot.common.prefix import OpenpilotPrefix
|
||||
from openpilot.selfdrive.test.process_replay.regen import regen_and_save
|
||||
from openpilot.selfdrive.test.process_replay.test_processes import FAKEDATA, source_segments as segments
|
||||
from openpilot.tools.lib.route import SegmentName
|
||||
|
||||
|
||||
def regen_job(segment, upload, disable_tqdm):
|
||||
with OpenpilotPrefix():
|
||||
sn = SegmentName(segment[1])
|
||||
fake_dongle_id = 'regen' + ''.join(random.choice('0123456789ABCDEF') for _ in range(11))
|
||||
try:
|
||||
relr = regen_and_save(sn.route_name.canonical_name, sn.segment_num, upload=upload, use_route_meta=False,
|
||||
outdir=os.path.join(FAKEDATA, fake_dongle_id), disable_tqdm=disable_tqdm, dummy_driver_cam=True)
|
||||
relr = '|'.join(relr.split('/')[-2:])
|
||||
return f' ("{segment[0]}", "{relr}"), '
|
||||
except Exception as e:
|
||||
err = f" {segment} failed: {str(e)}"
|
||||
err += traceback.format_exc()
|
||||
err += "\n\n"
|
||||
return err
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
all_cars = {car for car, _ in segments}
|
||||
|
||||
parser = argparse.ArgumentParser(description="Generate new segments from old ones")
|
||||
parser.add_argument("-j", "--jobs", type=int, default=1)
|
||||
parser.add_argument("--no-upload", action="store_true")
|
||||
parser.add_argument("--whitelist-cars", type=str, nargs="*", default=all_cars,
|
||||
help="Whitelist given cars from the test (e.g. HONDA)")
|
||||
parser.add_argument("--blacklist-cars", type=str, nargs="*", default=[],
|
||||
help="Blacklist given cars from the test (e.g. HONDA)")
|
||||
args = parser.parse_args()
|
||||
|
||||
tested_cars = set(args.whitelist_cars) - set(args.blacklist_cars)
|
||||
tested_cars = {c.upper() for c in tested_cars}
|
||||
tested_segments = [(car, segment) for car, segment in segments if car in tested_cars]
|
||||
|
||||
with concurrent.futures.ProcessPoolExecutor(max_workers=args.jobs) as pool:
|
||||
p = pool.map(regen_job, tested_segments, [not args.no_upload] * len(tested_segments), [args.jobs > 1] * len(tested_segments))
|
||||
msg = "Copy these new segments into test_processes.py:"
|
||||
for seg in tqdm(p, desc="Generating segments", total=len(tested_segments)):
|
||||
msg += "\n" + str(seg)
|
||||
print()
|
||||
print()
|
||||
print(msg)
|
||||
196
selfdrive/test/process_replay/test_debayer.py
Normal file
196
selfdrive/test/process_replay/test_debayer.py
Normal file
@@ -0,0 +1,196 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import bz2
|
||||
import numpy as np
|
||||
|
||||
import pyopencl as cl # install with `PYOPENCL_CL_PRETEND_VERSION=2.0 pip install pyopencl`
|
||||
|
||||
from openpilot.system.hardware import PC, TICI
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.tools.lib.openpilotci import BASE_URL
|
||||
from openpilot.system.version import get_commit
|
||||
from openpilot.system.camerad.snapshot.snapshot import yuv_to_rgb
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.tools.lib.filereader import FileReader
|
||||
|
||||
TEST_ROUTE = "8345e3b82948d454|2022-05-04--13-45-33/0"
|
||||
|
||||
FRAME_WIDTH = 1928
|
||||
FRAME_HEIGHT = 1208
|
||||
FRAME_STRIDE = 2896
|
||||
|
||||
UV_WIDTH = FRAME_WIDTH // 2
|
||||
UV_HEIGHT = FRAME_HEIGHT // 2
|
||||
UV_SIZE = UV_WIDTH * UV_HEIGHT
|
||||
|
||||
|
||||
def get_frame_fn(ref_commit, test_route, tici=True):
|
||||
return f"{test_route}_debayer{'_tici' if tici else ''}_{ref_commit}.bz2"
|
||||
|
||||
|
||||
def bzip_frames(frames):
|
||||
data = b''
|
||||
for y, u, v in frames:
|
||||
data += y.tobytes()
|
||||
data += u.tobytes()
|
||||
data += v.tobytes()
|
||||
return bz2.compress(data)
|
||||
|
||||
|
||||
def unbzip_frames(url):
|
||||
with FileReader(url) as f:
|
||||
dat = f.read()
|
||||
|
||||
data = bz2.decompress(dat)
|
||||
|
||||
res = []
|
||||
for y_start in range(0, len(data), FRAME_WIDTH * FRAME_HEIGHT + UV_SIZE * 2):
|
||||
u_start = y_start + FRAME_WIDTH * FRAME_HEIGHT
|
||||
v_start = u_start + UV_SIZE
|
||||
|
||||
y = np.frombuffer(data[y_start: u_start], dtype=np.uint8).reshape((FRAME_HEIGHT, FRAME_WIDTH))
|
||||
u = np.frombuffer(data[u_start: v_start], dtype=np.uint8).reshape((UV_HEIGHT, UV_WIDTH))
|
||||
v = np.frombuffer(data[v_start: v_start + UV_SIZE], dtype=np.uint8).reshape((UV_HEIGHT, UV_WIDTH))
|
||||
|
||||
res.append((y, u, v))
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def init_kernels(frame_offset=0):
|
||||
ctx = cl.create_some_context(interactive=False)
|
||||
|
||||
with open(os.path.join(BASEDIR, 'system/camerad/cameras/real_debayer.cl')) as f:
|
||||
build_args = ' -cl-fast-relaxed-math -cl-denorms-are-zero -cl-single-precision-constant' + \
|
||||
f' -DFRAME_STRIDE={FRAME_STRIDE} -DRGB_WIDTH={FRAME_WIDTH} -DRGB_HEIGHT={FRAME_HEIGHT} -DFRAME_OFFSET={frame_offset} -DCAM_NUM=0'
|
||||
if PC:
|
||||
build_args += ' -DHALF_AS_FLOAT=1 -cl-std=CL2.0'
|
||||
debayer_prg = cl.Program(ctx, f.read()).build(options=build_args)
|
||||
|
||||
return ctx, debayer_prg
|
||||
|
||||
def debayer_frame(ctx, debayer_prg, data, rgb=False):
|
||||
q = cl.CommandQueue(ctx)
|
||||
|
||||
yuv_buff = np.empty(FRAME_WIDTH * FRAME_HEIGHT + UV_SIZE * 2, dtype=np.uint8)
|
||||
|
||||
cam_g = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=data)
|
||||
yuv_g = cl.Buffer(ctx, cl.mem_flags.WRITE_ONLY, FRAME_WIDTH * FRAME_HEIGHT + UV_SIZE * 2)
|
||||
|
||||
local_worksize = (20, 20) if TICI else (4, 4)
|
||||
ev1 = debayer_prg.debayer10(q, (UV_WIDTH, UV_HEIGHT), local_worksize, cam_g, yuv_g)
|
||||
cl.enqueue_copy(q, yuv_buff, yuv_g, wait_for=[ev1]).wait()
|
||||
cl.enqueue_barrier(q)
|
||||
|
||||
y = yuv_buff[:FRAME_WIDTH*FRAME_HEIGHT].reshape((FRAME_HEIGHT, FRAME_WIDTH))
|
||||
u = yuv_buff[FRAME_WIDTH*FRAME_HEIGHT:FRAME_WIDTH*FRAME_HEIGHT+UV_SIZE].reshape((UV_HEIGHT, UV_WIDTH))
|
||||
v = yuv_buff[FRAME_WIDTH*FRAME_HEIGHT+UV_SIZE:].reshape((UV_HEIGHT, UV_WIDTH))
|
||||
|
||||
if rgb:
|
||||
return yuv_to_rgb(y, u, v)
|
||||
else:
|
||||
return y, u, v
|
||||
|
||||
|
||||
def debayer_replay(lr):
|
||||
ctx, debayer_prg = init_kernels()
|
||||
|
||||
frames = []
|
||||
for m in lr:
|
||||
if m.which() == 'roadCameraState':
|
||||
cs = m.roadCameraState
|
||||
if cs.image:
|
||||
data = np.frombuffer(cs.image, dtype=np.uint8)
|
||||
img = debayer_frame(ctx, debayer_prg, data)
|
||||
|
||||
frames.append(img)
|
||||
|
||||
return frames
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
update = "--update" in sys.argv
|
||||
replay_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
ref_commit_fn = os.path.join(replay_dir, "debayer_replay_ref_commit")
|
||||
|
||||
# load logs
|
||||
lr = list(LogReader(TEST_ROUTE))
|
||||
|
||||
# run replay
|
||||
frames = debayer_replay(lr)
|
||||
|
||||
# get diff
|
||||
failed = False
|
||||
diff = ''
|
||||
yuv_i = ['y', 'u', 'v']
|
||||
if not update:
|
||||
with open(ref_commit_fn) as f:
|
||||
ref_commit = f.read().strip()
|
||||
frame_fn = get_frame_fn(ref_commit, TEST_ROUTE, tici=TICI)
|
||||
|
||||
try:
|
||||
cmp_frames = unbzip_frames(BASE_URL + frame_fn)
|
||||
|
||||
if len(frames) != len(cmp_frames):
|
||||
failed = True
|
||||
diff += 'amount of frames not equal\n'
|
||||
|
||||
for i, (frame, cmp_frame) in enumerate(zip(frames, cmp_frames, strict=True)):
|
||||
for j in range(3):
|
||||
fr = frame[j]
|
||||
cmp_f = cmp_frame[j]
|
||||
if fr.shape != cmp_f.shape:
|
||||
failed = True
|
||||
diff += f'frame shapes not equal for ({i}, {yuv_i[j]})\n'
|
||||
diff += f'{ref_commit}: {cmp_f.shape}\n'
|
||||
diff += f'HEAD: {fr.shape}\n'
|
||||
elif not np.array_equal(fr, cmp_f):
|
||||
failed = True
|
||||
if np.allclose(fr, cmp_f, atol=1):
|
||||
diff += f'frames not equal for ({i}, {yuv_i[j]}), but are all close\n'
|
||||
else:
|
||||
diff += f'frames not equal for ({i}, {yuv_i[j]})\n'
|
||||
|
||||
frame_diff = np.abs(np.subtract(fr, cmp_f))
|
||||
diff_len = len(np.nonzero(frame_diff)[0])
|
||||
if diff_len > 10000:
|
||||
diff += f'different at a large amount of pixels ({diff_len})\n'
|
||||
else:
|
||||
diff += 'different at (frame, yuv, pixel, ref, HEAD):\n'
|
||||
for k in zip(*np.nonzero(frame_diff), strict=True):
|
||||
diff += f'{i}, {yuv_i[j]}, {k}, {cmp_f[k]}, {fr[k]}\n'
|
||||
|
||||
if failed:
|
||||
print(diff)
|
||||
with open("debayer_diff.txt", "w") as f:
|
||||
f.write(diff)
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
failed = True
|
||||
|
||||
# upload new refs
|
||||
if update or (failed and TICI):
|
||||
from openpilot.tools.lib.openpilotci import upload_file
|
||||
|
||||
print("Uploading new refs")
|
||||
|
||||
frames_bzip = bzip_frames(frames)
|
||||
|
||||
new_commit = get_commit()
|
||||
frame_fn = os.path.join(replay_dir, get_frame_fn(new_commit, TEST_ROUTE, tici=TICI))
|
||||
with open(frame_fn, "wb") as f2:
|
||||
f2.write(frames_bzip)
|
||||
|
||||
try:
|
||||
upload_file(frame_fn, os.path.basename(frame_fn))
|
||||
except Exception as e:
|
||||
print("failed to upload", e)
|
||||
|
||||
if update:
|
||||
with open(ref_commit_fn, 'w') as f:
|
||||
f.write(str(new_commit))
|
||||
|
||||
print("\nNew ref commit: ", new_commit)
|
||||
|
||||
sys.exit(int(failed))
|
||||
33
selfdrive/test/process_replay/test_fuzzy.py
Normal file
33
selfdrive/test/process_replay/test_fuzzy.py
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python3
|
||||
import copy
|
||||
from hypothesis import given, HealthCheck, Phase, settings
|
||||
import hypothesis.strategies as st
|
||||
from parameterized import parameterized
|
||||
import unittest
|
||||
|
||||
from cereal import log
|
||||
from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA
|
||||
from openpilot.selfdrive.test.fuzzy_generation import FuzzyGenerator
|
||||
import openpilot.selfdrive.test.process_replay.process_replay as pr
|
||||
|
||||
# These processes currently fail because of unrealistic data breaking assumptions
|
||||
# that openpilot makes causing error with NaN, inf, int size, array indexing ...
|
||||
# TODO: Make each one testable
|
||||
NOT_TESTED = ['controlsd', 'plannerd', 'calibrationd', 'dmonitoringd', 'paramsd', 'dmonitoringmodeld', 'modeld']
|
||||
|
||||
TEST_CASES = [(cfg.proc_name, copy.deepcopy(cfg)) for cfg in pr.CONFIGS if cfg.proc_name not in NOT_TESTED]
|
||||
|
||||
class TestFuzzProcesses(unittest.TestCase):
|
||||
|
||||
# TODO: make this faster and increase examples
|
||||
@parameterized.expand(TEST_CASES)
|
||||
@given(st.data())
|
||||
@settings(phases=[Phase.generate, Phase.target], max_examples=10, deadline=1000, suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large])
|
||||
def test_fuzz_process(self, proc_name, cfg, data):
|
||||
msgs = FuzzyGenerator.get_random_event_msg(data.draw, events=cfg.pubs, real_floats=True)
|
||||
lr = [log.Event.new_message(**m).as_reader() for m in msgs]
|
||||
cfg.timeout = 5
|
||||
pr.replay_process(cfg, lr, fingerprint=TOYOTA.COROLLA_TSS2, disable_progress=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
231
selfdrive/test/process_replay/test_processes.py
Normal file
231
selfdrive/test/process_replay/test_processes.py
Normal file
@@ -0,0 +1,231 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import concurrent.futures
|
||||
import os
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from tqdm import tqdm
|
||||
from typing import Any
|
||||
|
||||
from openpilot.selfdrive.car.car_helpers import interface_names
|
||||
from openpilot.tools.lib.openpilotci import get_url, upload_file
|
||||
from openpilot.selfdrive.test.process_replay.compare_logs import compare_logs, format_diff
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS, PROC_REPLAY_DIR, FAKEDATA, check_openpilot_enabled, replay_process
|
||||
from openpilot.system.version import get_commit
|
||||
from openpilot.tools.lib.filereader import FileReader
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.tools.lib.helpers import save_log
|
||||
|
||||
source_segments = [
|
||||
("BODY", "937ccb7243511b65|2022-05-24--16-03-09--1"), # COMMA.BODY
|
||||
("HYUNDAI", "02c45f73a2e5c6e9|2021-01-01--19-08-22--1"), # HYUNDAI.SONATA
|
||||
("HYUNDAI2", "d545129f3ca90f28|2022-11-07--20-43-08--3"), # HYUNDAI.KIA_EV6 (+ QCOM GPS)
|
||||
("TOYOTA", "0982d79ebb0de295|2021-01-04--17-13-21--13"), # TOYOTA.PRIUS
|
||||
("TOYOTA2", "0982d79ebb0de295|2021-01-03--20-03-36--6"), # TOYOTA.RAV4
|
||||
("TOYOTA3", "f7d7e3538cda1a2a|2021-08-16--08-55-34--6"), # TOYOTA.COROLLA_TSS2
|
||||
("HONDA", "eb140f119469d9ab|2021-06-12--10-46-24--27"), # HONDA.CIVIC (NIDEC)
|
||||
("HONDA2", "7d2244f34d1bbcda|2021-06-25--12-25-37--26"), # HONDA.ACCORD (BOSCH)
|
||||
("CHRYSLER", "4deb27de11bee626|2021-02-20--11-28-55--8"), # CHRYSLER.PACIFICA_2018_HYBRID
|
||||
("RAM", "17fc16d840fe9d21|2023-04-26--13-28-44--5"), # CHRYSLER.RAM_1500
|
||||
("SUBARU", "341dccd5359e3c97|2022-09-12--10-35-33--3"), # SUBARU.OUTBACK
|
||||
("GM", "0c58b6a25109da2b|2021-02-23--16-35-50--11"), # GM.VOLT
|
||||
("GM2", "376bf99325883932|2022-10-27--13-41-22--1"), # GM.BOLT_EUV
|
||||
("NISSAN", "35336926920f3571|2021-02-12--18-38-48--46"), # NISSAN.XTRAIL
|
||||
("VOLKSWAGEN", "de9592456ad7d144|2021-06-29--11-00-15--6"), # VOLKSWAGEN.GOLF
|
||||
("MAZDA", "bd6a637565e91581|2021-10-30--15-14-53--4"), # MAZDA.CX9_2021
|
||||
("FORD", "54827bf84c38b14f|2023-01-26--21-59-07--4"), # FORD.BRONCO_SPORT_MK1
|
||||
|
||||
# Enable when port is tested and dashcamOnly is no longer set
|
||||
#("TESLA", "bb50caf5f0945ab1|2021-06-19--17-20-18--3"), # TESLA.AP2_MODELS
|
||||
#("VOLKSWAGEN2", "3cfdec54aa035f3f|2022-07-19--23-45-10--2"), # VOLKSWAGEN.PASSAT_NMS
|
||||
]
|
||||
|
||||
segments = [
|
||||
("BODY", "regen997DF2697CB|2023-10-30--23-14-29--0"),
|
||||
("HYUNDAI", "regen2A9D2A8E0B4|2023-10-30--23-13-34--0"),
|
||||
("HYUNDAI2", "regen6CA24BC3035|2023-10-30--23-14-28--0"),
|
||||
("TOYOTA", "regen5C019D76307|2023-10-30--23-13-31--0"),
|
||||
("TOYOTA2", "regen5DCADA88A96|2023-10-30--23-14-57--0"),
|
||||
("TOYOTA3", "regen7204CA3A498|2023-10-30--23-15-55--0"),
|
||||
("HONDA", "regen048F8FA0B24|2023-10-30--23-15-53--0"),
|
||||
("HONDA2", "regen7D2D3F82D5B|2023-10-30--23-15-55--0"),
|
||||
("CHRYSLER", "regen7125C42780C|2023-10-30--23-16-21--0"),
|
||||
("RAM", "regen2731F3213D2|2023-10-30--23-18-11--0"),
|
||||
("SUBARU", "regen86E4C1B4DDD|2023-10-30--23-18-14--0"),
|
||||
("GM", "regenF6393D64745|2023-10-30--23-17-18--0"),
|
||||
("GM2", "regen220F830C05B|2023-10-30--23-18-39--0"),
|
||||
("NISSAN", "regen4F671F7C435|2023-10-30--23-18-40--0"),
|
||||
("VOLKSWAGEN", "regen8BDFE7307A0|2023-10-30--23-19-36--0"),
|
||||
("MAZDA", "regen2E9F1A15FD5|2023-10-30--23-20-36--0"),
|
||||
("FORD", "regen6D39E54606E|2023-10-30--23-20-54--0"),
|
||||
]
|
||||
|
||||
# dashcamOnly makes don't need to be tested until a full port is done
|
||||
excluded_interfaces = ["mock", "tesla"]
|
||||
|
||||
BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"
|
||||
REF_COMMIT_FN = os.path.join(PROC_REPLAY_DIR, "ref_commit")
|
||||
EXCLUDED_PROCS = {"modeld", "dmonitoringmodeld"}
|
||||
|
||||
|
||||
def run_test_process(data):
|
||||
segment, cfg, args, cur_log_fn, ref_log_path, lr_dat = data
|
||||
res = None
|
||||
if not args.upload_only:
|
||||
lr = LogReader.from_bytes(lr_dat)
|
||||
res, log_msgs = test_process(cfg, lr, segment, ref_log_path, cur_log_fn, args.ignore_fields, args.ignore_msgs)
|
||||
# save logs so we can upload when updating refs
|
||||
save_log(cur_log_fn, log_msgs)
|
||||
|
||||
if args.update_refs or args.upload_only:
|
||||
print(f'Uploading: {os.path.basename(cur_log_fn)}')
|
||||
assert os.path.exists(cur_log_fn), f"Cannot find log to upload: {cur_log_fn}"
|
||||
upload_file(cur_log_fn, os.path.basename(cur_log_fn))
|
||||
os.remove(cur_log_fn)
|
||||
return (segment, cfg.proc_name, res)
|
||||
|
||||
|
||||
def get_log_data(segment):
|
||||
r, n = segment.rsplit("--", 1)
|
||||
with FileReader(get_url(r, n)) as f:
|
||||
return (segment, f.read())
|
||||
|
||||
|
||||
def test_process(cfg, lr, segment, ref_log_path, new_log_path, ignore_fields=None, ignore_msgs=None):
|
||||
if ignore_fields is None:
|
||||
ignore_fields = []
|
||||
if ignore_msgs is None:
|
||||
ignore_msgs = []
|
||||
|
||||
ref_log_msgs = list(LogReader(ref_log_path))
|
||||
|
||||
try:
|
||||
log_msgs = replay_process(cfg, lr, disable_progress=True)
|
||||
except Exception as e:
|
||||
raise Exception("failed on segment: " + segment) from e
|
||||
|
||||
# check to make sure openpilot is engaged in the route
|
||||
if cfg.proc_name == "controlsd":
|
||||
if not check_openpilot_enabled(log_msgs):
|
||||
# FIXME: these segments should work, but the replay enabling logic is too brittle
|
||||
if segment not in ("regen6CA24BC3035|2023-10-30--23-14-28--0", "regen7D2D3F82D5B|2023-10-30--23-15-55--0"):
|
||||
return f"Route did not enable at all or for long enough: {new_log_path}", log_msgs
|
||||
|
||||
try:
|
||||
return compare_logs(ref_log_msgs, log_msgs, ignore_fields + cfg.ignore, ignore_msgs, cfg.tolerance), log_msgs
|
||||
except Exception as e:
|
||||
return str(e), log_msgs
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
all_cars = {car for car, _ in segments}
|
||||
all_procs = {cfg.proc_name for cfg in CONFIGS if cfg.proc_name not in EXCLUDED_PROCS}
|
||||
|
||||
cpu_count = os.cpu_count() or 1
|
||||
|
||||
parser = argparse.ArgumentParser(description="Regression test to identify changes in a process's output")
|
||||
parser.add_argument("--whitelist-procs", type=str, nargs="*", default=all_procs,
|
||||
help="Whitelist given processes from the test (e.g. controlsd)")
|
||||
parser.add_argument("--whitelist-cars", type=str, nargs="*", default=all_cars,
|
||||
help="Whitelist given cars from the test (e.g. HONDA)")
|
||||
parser.add_argument("--blacklist-procs", type=str, nargs="*", default=[],
|
||||
help="Blacklist given processes from the test (e.g. controlsd)")
|
||||
parser.add_argument("--blacklist-cars", type=str, nargs="*", default=[],
|
||||
help="Blacklist given cars from the test (e.g. HONDA)")
|
||||
parser.add_argument("--ignore-fields", type=str, nargs="*", default=[],
|
||||
help="Extra fields or msgs to ignore (e.g. carState.events)")
|
||||
parser.add_argument("--ignore-msgs", type=str, nargs="*", default=[],
|
||||
help="Msgs to ignore (e.g. carEvents)")
|
||||
parser.add_argument("--update-refs", action="store_true",
|
||||
help="Updates reference logs using current commit")
|
||||
parser.add_argument("--upload-only", action="store_true",
|
||||
help="Skips testing processes and uploads logs from previous test run")
|
||||
parser.add_argument("-j", "--jobs", type=int, default=max(cpu_count - 2, 1),
|
||||
help="Max amount of parallel jobs")
|
||||
args = parser.parse_args()
|
||||
|
||||
tested_procs = set(args.whitelist_procs) - set(args.blacklist_procs)
|
||||
tested_cars = set(args.whitelist_cars) - set(args.blacklist_cars)
|
||||
tested_cars = {c.upper() for c in tested_cars}
|
||||
|
||||
full_test = (tested_procs == all_procs) and (tested_cars == all_cars) and all(len(x) == 0 for x in (args.ignore_fields, args.ignore_msgs))
|
||||
upload = args.update_refs or args.upload_only
|
||||
os.makedirs(os.path.dirname(FAKEDATA), exist_ok=True)
|
||||
|
||||
if upload:
|
||||
assert full_test, "Need to run full test when updating refs"
|
||||
|
||||
try:
|
||||
with open(REF_COMMIT_FN) as f:
|
||||
ref_commit = f.read().strip()
|
||||
except FileNotFoundError:
|
||||
print("Couldn't find reference commit")
|
||||
sys.exit(1)
|
||||
|
||||
cur_commit = get_commit()
|
||||
if not cur_commit:
|
||||
raise Exception("Couldn't get current commit")
|
||||
|
||||
print(f"***** testing against commit {ref_commit} *****")
|
||||
|
||||
# check to make sure all car brands are tested
|
||||
if full_test:
|
||||
untested = (set(interface_names) - set(excluded_interfaces)) - {c.lower() for c in tested_cars}
|
||||
assert len(untested) == 0, f"Cars missing routes: {str(untested)}"
|
||||
|
||||
log_paths: defaultdict[str, dict[str, dict[str, str]]] = defaultdict(lambda: defaultdict(dict))
|
||||
with concurrent.futures.ProcessPoolExecutor(max_workers=args.jobs) as pool:
|
||||
if not args.upload_only:
|
||||
download_segments = [seg for car, seg in segments if car in tested_cars]
|
||||
log_data: dict[str, LogReader] = {}
|
||||
p1 = pool.map(get_log_data, download_segments)
|
||||
for segment, lr in tqdm(p1, desc="Getting Logs", total=len(download_segments)):
|
||||
log_data[segment] = lr
|
||||
|
||||
pool_args: Any = []
|
||||
for car_brand, segment in segments:
|
||||
if car_brand not in tested_cars:
|
||||
continue
|
||||
|
||||
for cfg in CONFIGS:
|
||||
if cfg.proc_name not in tested_procs:
|
||||
continue
|
||||
|
||||
cur_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{cur_commit}.bz2")
|
||||
if args.update_refs: # reference logs will not exist if routes were just regenerated
|
||||
ref_log_path = get_url(*segment.rsplit("--", 1))
|
||||
else:
|
||||
ref_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{ref_commit}.bz2")
|
||||
ref_log_path = ref_log_fn if os.path.exists(ref_log_fn) else BASE_URL + os.path.basename(ref_log_fn)
|
||||
|
||||
dat = None if args.upload_only else log_data[segment]
|
||||
pool_args.append((segment, cfg, args, cur_log_fn, ref_log_path, dat))
|
||||
|
||||
log_paths[segment][cfg.proc_name]['ref'] = ref_log_path
|
||||
log_paths[segment][cfg.proc_name]['new'] = cur_log_fn
|
||||
|
||||
results: Any = defaultdict(dict)
|
||||
p2 = pool.map(run_test_process, pool_args)
|
||||
for (segment, proc, result) in tqdm(p2, desc="Running Tests", total=len(pool_args)):
|
||||
if not args.upload_only:
|
||||
results[segment][proc] = result
|
||||
|
||||
diff_short, diff_long, failed = format_diff(results, log_paths, ref_commit)
|
||||
if not upload:
|
||||
with open(os.path.join(PROC_REPLAY_DIR, "diff.txt"), "w") as f:
|
||||
f.write(diff_long)
|
||||
print(diff_short)
|
||||
|
||||
if failed:
|
||||
print("TEST FAILED")
|
||||
print("\n\nTo push the new reference logs for this commit run:")
|
||||
print("./test_processes.py --upload-only")
|
||||
else:
|
||||
print("TEST SUCCEEDED")
|
||||
|
||||
else:
|
||||
with open(REF_COMMIT_FN, "w") as f:
|
||||
f.write(cur_commit)
|
||||
print(f"\n\nUpdated reference logs for commit: {cur_commit}")
|
||||
|
||||
sys.exit(int(failed))
|
||||
45
selfdrive/test/process_replay/test_regen.py
Normal file
45
selfdrive/test/process_replay/test_regen.py
Normal file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import unittest
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from openpilot.selfdrive.test.process_replay.regen import regen_segment, DummyFrameReader
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import check_openpilot_enabled
|
||||
from openpilot.tools.lib.openpilotci import get_url
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.tools.lib.framereader import FrameReader
|
||||
|
||||
TESTED_SEGMENTS = [
|
||||
("PRIUS_C2", "0982d79ebb0de295|2021-01-04--17-13-21--13"), # TOYOTA PRIUS 2017: NEO, pandaStateDEPRECATED, no peripheralState, sensorEventsDEPRECATED
|
||||
# Enable these once regen on CI becomes faster or use them for different tests running controlsd in isolation
|
||||
# ("MAZDA_C3", "bd6a637565e91581|2021-10-30--15-14-53--4"), # MAZDA.CX9_2021: TICI, incomplete managerState
|
||||
# ("FORD_C3", "54827bf84c38b14f|2023-01-26--21-59-07--4"), # FORD.BRONCO_SPORT_MK1: TICI
|
||||
]
|
||||
|
||||
|
||||
def ci_setup_data_readers(route, sidx):
|
||||
lr = LogReader(get_url(route, sidx, "rlog"))
|
||||
frs = {
|
||||
'roadCameraState': FrameReader(get_url(route, sidx, "fcamera")),
|
||||
'driverCameraState': DummyFrameReader.zero_dcamera()
|
||||
}
|
||||
if next((True for m in lr if m.which() == "wideRoadCameraState"), False):
|
||||
frs["wideRoadCameraState"] = FrameReader(get_url(route, sidx, "ecamera"))
|
||||
|
||||
return lr, frs
|
||||
|
||||
|
||||
class TestRegen(unittest.TestCase):
|
||||
@parameterized.expand(TESTED_SEGMENTS)
|
||||
def test_engaged(self, case_name, segment):
|
||||
route, sidx = segment.rsplit("--", 1)
|
||||
lr, frs = ci_setup_data_readers(route, sidx)
|
||||
output_logs = regen_segment(lr, frs, disable_tqdm=True)
|
||||
|
||||
engaged = check_openpilot_enabled(output_logs)
|
||||
self.assertTrue(engaged, f"openpilot not engaged in {case_name}")
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
unittest.main()
|
||||
43
selfdrive/test/process_replay/vision_meta.py
Normal file
43
selfdrive/test/process_replay/vision_meta.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from collections import namedtuple
|
||||
from cereal.visionipc import VisionStreamType
|
||||
from openpilot.common.realtime import DT_MDL, DT_DMON
|
||||
from openpilot.common.transformations.camera import tici_f_frame_size, tici_d_frame_size, tici_e_frame_size, eon_f_frame_size, eon_d_frame_size
|
||||
|
||||
VideoStreamMeta = namedtuple("VideoStreamMeta", ["camera_state", "encode_index", "stream", "dt", "frame_sizes"])
|
||||
ROAD_CAMERA_FRAME_SIZES = {"tici": tici_f_frame_size, "tizi": tici_f_frame_size, "neo": eon_f_frame_size}
|
||||
WIDE_ROAD_CAMERA_FRAME_SIZES = {"tici": tici_e_frame_size, "tizi": tici_e_frame_size}
|
||||
DRIVER_FRAME_SIZES = {"tici": tici_d_frame_size, "tizi": tici_d_frame_size, "neo": eon_d_frame_size}
|
||||
VIPC_STREAM_METADATA = [
|
||||
# metadata: (state_msg_type, encode_msg_type, stream_type, dt, frame_sizes)
|
||||
("roadCameraState", "roadEncodeIdx", VisionStreamType.VISION_STREAM_ROAD, DT_MDL, ROAD_CAMERA_FRAME_SIZES),
|
||||
("wideRoadCameraState", "wideRoadEncodeIdx", VisionStreamType.VISION_STREAM_WIDE_ROAD, DT_MDL, WIDE_ROAD_CAMERA_FRAME_SIZES),
|
||||
("driverCameraState", "driverEncodeIdx", VisionStreamType.VISION_STREAM_DRIVER, DT_DMON, DRIVER_FRAME_SIZES),
|
||||
]
|
||||
|
||||
|
||||
def meta_from_camera_state(state):
|
||||
meta = next((VideoStreamMeta(*meta) for meta in VIPC_STREAM_METADATA if meta[0] == state), None)
|
||||
return meta
|
||||
|
||||
|
||||
def meta_from_encode_index(encode_index):
|
||||
meta = next((VideoStreamMeta(*meta) for meta in VIPC_STREAM_METADATA if meta[1] == encode_index), None)
|
||||
return meta
|
||||
|
||||
|
||||
def meta_from_stream_type(stream_type):
|
||||
meta = next((VideoStreamMeta(*meta) for meta in VIPC_STREAM_METADATA if meta[2] == stream_type), None)
|
||||
return meta
|
||||
|
||||
|
||||
def available_streams(lr=None):
|
||||
if lr is None:
|
||||
return [VideoStreamMeta(*meta) for meta in VIPC_STREAM_METADATA]
|
||||
|
||||
result = []
|
||||
for meta in VIPC_STREAM_METADATA:
|
||||
has_cam_state = next((True for m in lr if m.which() == meta[0]), False)
|
||||
if has_cam_state:
|
||||
result.append(VideoStreamMeta(*meta))
|
||||
|
||||
return result
|
||||
2
selfdrive/test/profiling/.gitignore
vendored
Normal file
2
selfdrive/test/profiling/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
cachegrind.out.*
|
||||
*.prof
|
||||
0
selfdrive/test/profiling/__init__.py
Normal file
0
selfdrive/test/profiling/__init__.py
Normal file
91
selfdrive/test/profiling/lib.py
Normal file
91
selfdrive/test/profiling/lib.py
Normal file
@@ -0,0 +1,91 @@
|
||||
from collections import defaultdict, deque
|
||||
from cereal.services import SERVICE_LIST
|
||||
import cereal.messaging as messaging
|
||||
import capnp
|
||||
|
||||
|
||||
class ReplayDone(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class SubSocket():
|
||||
def __init__(self, msgs, trigger):
|
||||
self.i = 0
|
||||
self.trigger = trigger
|
||||
self.msgs = [m.as_builder().to_bytes() for m in msgs if m.which() == trigger]
|
||||
self.max_i = len(self.msgs) - 1
|
||||
|
||||
def receive(self, non_blocking=False):
|
||||
if non_blocking:
|
||||
return None
|
||||
|
||||
if self.i == self.max_i:
|
||||
raise ReplayDone
|
||||
|
||||
while True:
|
||||
msg = self.msgs[self.i]
|
||||
self.i += 1
|
||||
return msg
|
||||
|
||||
|
||||
class PubSocket():
|
||||
def send(self, data):
|
||||
pass
|
||||
|
||||
|
||||
class SubMaster(messaging.SubMaster):
|
||||
def __init__(self, msgs, trigger, services, check_averag_freq=False):
|
||||
self.frame = 0
|
||||
self.data = {}
|
||||
self.ignore_alive = []
|
||||
|
||||
self.alive = {s: True for s in services}
|
||||
self.updated = {s: False for s in services}
|
||||
self.rcv_time = {s: 0. for s in services}
|
||||
self.rcv_frame = {s: 0 for s in services}
|
||||
self.valid = {s: True for s in services}
|
||||
self.freq_ok = {s: True for s in services}
|
||||
self.recv_dts = {s: deque([0.0] * messaging.AVG_FREQ_HISTORY, maxlen=messaging.AVG_FREQ_HISTORY) for s in services}
|
||||
self.logMonoTime = {}
|
||||
self.sock = {}
|
||||
self.freq = {}
|
||||
self.check_average_freq = check_averag_freq
|
||||
self.non_polled_services = []
|
||||
self.ignore_average_freq = []
|
||||
|
||||
# TODO: specify multiple triggers for service like plannerd that poll on more than one service
|
||||
cur_msgs = []
|
||||
self.msgs = []
|
||||
msgs = [m for m in msgs if m.which() in services]
|
||||
|
||||
for msg in msgs:
|
||||
cur_msgs.append(msg)
|
||||
if msg.which() == trigger:
|
||||
self.msgs.append(cur_msgs)
|
||||
cur_msgs = []
|
||||
|
||||
self.msgs = list(reversed(self.msgs))
|
||||
|
||||
for s in services:
|
||||
self.freq[s] = SERVICE_LIST[s].frequency
|
||||
try:
|
||||
data = messaging.new_message(s)
|
||||
except capnp.lib.capnp.KjException:
|
||||
# lists
|
||||
data = messaging.new_message(s, 0)
|
||||
|
||||
self.data[s] = getattr(data, s)
|
||||
self.logMonoTime[s] = 0
|
||||
self.sock[s] = SubSocket(msgs, s)
|
||||
|
||||
def update(self, timeout=None):
|
||||
if not len(self.msgs):
|
||||
raise ReplayDone
|
||||
|
||||
cur_msgs = self.msgs.pop()
|
||||
self.update_msgs(cur_msgs[0].logMonoTime, self.msgs.pop())
|
||||
|
||||
|
||||
class PubMaster(messaging.PubMaster):
|
||||
def __init__(self):
|
||||
self.sock = defaultdict(PubSocket)
|
||||
97
selfdrive/test/profiling/profiler.py
Normal file
97
selfdrive/test/profiling/profiler.py
Normal file
@@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import cProfile
|
||||
import pprofile
|
||||
import pyprof2calltree
|
||||
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.selfdrive.test.profiling.lib import SubMaster, PubMaster, SubSocket, ReplayDone
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS
|
||||
from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA
|
||||
from openpilot.selfdrive.car.honda.values import CAR as HONDA
|
||||
from openpilot.selfdrive.car.volkswagen.values import CAR as VW
|
||||
|
||||
BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"
|
||||
|
||||
CARS = {
|
||||
'toyota': ("0982d79ebb0de295|2021-01-03--20-03-36/6", TOYOTA.RAV4),
|
||||
'honda': ("0982d79ebb0de295|2021-01-08--10-13-10/6", HONDA.CIVIC),
|
||||
"vw": ("ef895f46af5fd73f|2021-05-22--14-06-35/6", VW.AUDI_A3_MK3),
|
||||
}
|
||||
|
||||
|
||||
def get_inputs(msgs, process, fingerprint):
|
||||
for config in CONFIGS:
|
||||
if config.proc_name == process:
|
||||
sub_socks = list(config.pubs)
|
||||
trigger = sub_socks[0]
|
||||
break
|
||||
|
||||
# some procs block on CarParams
|
||||
for msg in msgs:
|
||||
if msg.which() == 'carParams':
|
||||
m = msg.as_builder()
|
||||
m.carParams.carFingerprint = fingerprint
|
||||
Params().put("CarParams", m.carParams.copy().to_bytes())
|
||||
break
|
||||
|
||||
sm = SubMaster(msgs, trigger, sub_socks)
|
||||
pm = PubMaster()
|
||||
if 'can' in sub_socks:
|
||||
can_sock = SubSocket(msgs, 'can')
|
||||
else:
|
||||
can_sock = None
|
||||
return sm, pm, can_sock
|
||||
|
||||
|
||||
def profile(proc, func, car='toyota'):
|
||||
segment, fingerprint = CARS[car]
|
||||
segment = segment.replace('|', '/')
|
||||
rlog_url = f"{BASE_URL}{segment}/rlog.bz2"
|
||||
msgs = list(LogReader(rlog_url)) * int(os.getenv("LOOP", "1"))
|
||||
|
||||
os.environ['FINGERPRINT'] = fingerprint
|
||||
os.environ['SKIP_FW_QUERY'] = "1"
|
||||
os.environ['REPLAY'] = "1"
|
||||
|
||||
def run(sm, pm, can_sock):
|
||||
try:
|
||||
if can_sock is not None:
|
||||
func(sm, pm, can_sock)
|
||||
else:
|
||||
func(sm, pm)
|
||||
except ReplayDone:
|
||||
pass
|
||||
|
||||
# Statistical
|
||||
sm, pm, can_sock = get_inputs(msgs, proc, fingerprint)
|
||||
with pprofile.StatisticalProfile()(period=0.00001) as pr:
|
||||
run(sm, pm, can_sock)
|
||||
pr.dump_stats(f'cachegrind.out.{proc}_statistical')
|
||||
|
||||
# Deterministic
|
||||
sm, pm, can_sock = get_inputs(msgs, proc, fingerprint)
|
||||
with cProfile.Profile() as pr:
|
||||
run(sm, pm, can_sock)
|
||||
pyprof2calltree.convert(pr.getstats(), f'cachegrind.out.{proc}_deterministic')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from openpilot.selfdrive.controls.controlsd import main as controlsd_thread
|
||||
from openpilot.selfdrive.locationd.paramsd import main as paramsd_thread
|
||||
from openpilot.selfdrive.controls.plannerd import main as plannerd_thread
|
||||
|
||||
procs = {
|
||||
'controlsd': controlsd_thread,
|
||||
'paramsd': paramsd_thread,
|
||||
'plannerd': plannerd_thread,
|
||||
}
|
||||
|
||||
proc = sys.argv[1]
|
||||
if proc not in procs:
|
||||
print(f"{proc} not available")
|
||||
sys.exit(0)
|
||||
else:
|
||||
profile(proc, procs[proc])
|
||||
10
selfdrive/test/scons_build_test.sh
Normal file
10
selfdrive/test/scons_build_test.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
BASEDIR=$(realpath "$SCRIPT_DIR/../../")
|
||||
cd $BASEDIR
|
||||
|
||||
# tests that our build system's dependencies are configured properly,
|
||||
# needs a machine with lots of cores
|
||||
scons --clean
|
||||
scons --no-cache --random -j$(nproc)
|
||||
10
selfdrive/test/setup_vsound.sh
Normal file
10
selfdrive/test/setup_vsound.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
{
|
||||
#start pulseaudio daemon
|
||||
sudo pulseaudio -D
|
||||
|
||||
# create a virtual null audio and set it to default device
|
||||
sudo pactl load-module module-null-sink sink_name=virtual_audio
|
||||
sudo pactl set-default-sink virtual_audio
|
||||
} > /dev/null 2>&1
|
||||
19
selfdrive/test/setup_xvfb.sh
Normal file
19
selfdrive/test/setup_xvfb.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Sets up a virtual display for running map renderer and simulator without an X11 display
|
||||
|
||||
DISP_ID=99
|
||||
export DISPLAY=:$DISP_ID
|
||||
|
||||
sudo Xvfb $DISPLAY -screen 0 2160x1080x24 2>/dev/null &
|
||||
|
||||
# check for x11 socket for the specified display ID
|
||||
while [ ! -S /tmp/.X11-unix/X$DISP_ID ]
|
||||
do
|
||||
echo "Waiting for Xvfb..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
touch ~/.Xauthority
|
||||
export XDG_SESSION_TYPE="x11"
|
||||
xset -q
|
||||
@@ -29,7 +29,7 @@ from openpilot.tools.lib.logreader import LogReader
|
||||
|
||||
# Baseline CPU usage by process
|
||||
PROCS = {
|
||||
"selfdrive.controls.controlsd": 41.0,
|
||||
"selfdrive.controls.controlsd": 46.0,
|
||||
"./loggerd": 14.0,
|
||||
"./encoderd": 17.0,
|
||||
"./camerad": 14.5,
|
||||
@@ -424,4 +424,4 @@ class TestOnroad(unittest.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main()
|
||||
unittest.main()
|
||||
|
||||
302
selfdrive/test/test_updated.py
Normal file
302
selfdrive/test/test_updated.py
Normal file
@@ -0,0 +1,302 @@
|
||||
#!/usr/bin/env python3
|
||||
import datetime
|
||||
import os
|
||||
import pytest
|
||||
import time
|
||||
import tempfile
|
||||
import unittest
|
||||
import shutil
|
||||
import signal
|
||||
import subprocess
|
||||
import random
|
||||
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
|
||||
|
||||
@pytest.mark.tici
|
||||
class TestUpdated(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.updated_proc = None
|
||||
|
||||
self.tmp_dir = tempfile.TemporaryDirectory()
|
||||
org_dir = os.path.join(self.tmp_dir.name, "commaai")
|
||||
|
||||
self.basedir = os.path.join(org_dir, "openpilot")
|
||||
self.git_remote_dir = os.path.join(org_dir, "openpilot_remote")
|
||||
self.staging_dir = os.path.join(org_dir, "safe_staging")
|
||||
for d in [org_dir, self.basedir, self.git_remote_dir, self.staging_dir]:
|
||||
os.mkdir(d)
|
||||
|
||||
self.neos_version = os.path.join(org_dir, "neos_version")
|
||||
self.neosupdate_dir = os.path.join(org_dir, "neosupdate")
|
||||
with open(self.neos_version, "w") as f:
|
||||
v = subprocess.check_output(r"bash -c 'source launch_env.sh && echo $REQUIRED_NEOS_VERSION'",
|
||||
cwd=BASEDIR, shell=True, encoding='utf8').strip()
|
||||
f.write(v)
|
||||
|
||||
self.upper_dir = os.path.join(self.staging_dir, "upper")
|
||||
self.merged_dir = os.path.join(self.staging_dir, "merged")
|
||||
self.finalized_dir = os.path.join(self.staging_dir, "finalized")
|
||||
|
||||
# setup local submodule remotes
|
||||
submodules = subprocess.check_output("git submodule --quiet foreach 'echo $name'",
|
||||
shell=True, cwd=BASEDIR, encoding='utf8').split()
|
||||
for s in submodules:
|
||||
sub_path = os.path.join(org_dir, s.split("_repo")[0])
|
||||
self._run(f"git clone {s} {sub_path}.git", cwd=BASEDIR)
|
||||
|
||||
# setup two git repos, a remote and one we'll run updated in
|
||||
self._run([
|
||||
f"git clone {BASEDIR} {self.git_remote_dir}",
|
||||
f"git clone {self.git_remote_dir} {self.basedir}",
|
||||
f"cd {self.basedir} && git submodule init && git submodule update",
|
||||
f"cd {self.basedir} && scons -j{os.cpu_count()} cereal/ common/"
|
||||
])
|
||||
|
||||
self.params = Params(os.path.join(self.basedir, "persist/params"))
|
||||
self.params.clear_all()
|
||||
os.sync()
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
if self.updated_proc is not None:
|
||||
self.updated_proc.terminate()
|
||||
self.updated_proc.wait(30)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
self.tmp_dir.cleanup()
|
||||
|
||||
|
||||
# *** test helpers ***
|
||||
|
||||
|
||||
def _run(self, cmd, cwd=None):
|
||||
if not isinstance(cmd, list):
|
||||
cmd = (cmd,)
|
||||
|
||||
for c in cmd:
|
||||
subprocess.check_output(c, cwd=cwd, shell=True)
|
||||
|
||||
def _get_updated_proc(self):
|
||||
os.environ["PYTHONPATH"] = self.basedir
|
||||
os.environ["GIT_AUTHOR_NAME"] = "testy tester"
|
||||
os.environ["GIT_COMMITTER_NAME"] = "testy tester"
|
||||
os.environ["GIT_AUTHOR_EMAIL"] = "testy@tester.test"
|
||||
os.environ["GIT_COMMITTER_EMAIL"] = "testy@tester.test"
|
||||
os.environ["UPDATER_TEST_IP"] = "localhost"
|
||||
os.environ["UPDATER_LOCK_FILE"] = os.path.join(self.tmp_dir.name, "updater.lock")
|
||||
os.environ["UPDATER_STAGING_ROOT"] = self.staging_dir
|
||||
os.environ["UPDATER_NEOS_VERSION"] = self.neos_version
|
||||
os.environ["UPDATER_NEOSUPDATE_DIR"] = self.neosupdate_dir
|
||||
updated_path = os.path.join(self.basedir, "selfdrive/updated.py")
|
||||
return subprocess.Popen(updated_path, env=os.environ)
|
||||
|
||||
def _start_updater(self, offroad=True, nosleep=False):
|
||||
self.params.put_bool("IsOffroad", offroad)
|
||||
self.updated_proc = self._get_updated_proc()
|
||||
if not nosleep:
|
||||
time.sleep(1)
|
||||
|
||||
def _update_now(self):
|
||||
self.updated_proc.send_signal(signal.SIGHUP)
|
||||
|
||||
# TODO: this should be implemented in params
|
||||
def _read_param(self, key, timeout=1):
|
||||
ret = None
|
||||
start_time = time.monotonic()
|
||||
while ret is None:
|
||||
ret = self.params.get(key, encoding='utf8')
|
||||
if time.monotonic() - start_time > timeout:
|
||||
break
|
||||
time.sleep(0.01)
|
||||
return ret
|
||||
|
||||
def _wait_for_update(self, timeout=30, clear_param=False):
|
||||
if clear_param:
|
||||
self.params.remove("LastUpdateTime")
|
||||
|
||||
self._update_now()
|
||||
t = self._read_param("LastUpdateTime", timeout=timeout)
|
||||
if t is None:
|
||||
raise Exception("timed out waiting for update to complete")
|
||||
|
||||
def _make_commit(self):
|
||||
all_dirs, all_files = [], []
|
||||
for root, dirs, files in os.walk(self.git_remote_dir):
|
||||
if ".git" in root:
|
||||
continue
|
||||
for d in dirs:
|
||||
all_dirs.append(os.path.join(root, d))
|
||||
for f in files:
|
||||
all_files.append(os.path.join(root, f))
|
||||
|
||||
# make a new dir and some new files
|
||||
new_dir = os.path.join(self.git_remote_dir, "this_is_a_new_dir")
|
||||
os.mkdir(new_dir)
|
||||
for _ in range(random.randrange(5, 30)):
|
||||
for d in (new_dir, random.choice(all_dirs)):
|
||||
with tempfile.NamedTemporaryFile(dir=d, delete=False) as f:
|
||||
f.write(os.urandom(random.randrange(1, 1000000)))
|
||||
|
||||
# modify some files
|
||||
for f in random.sample(all_files, random.randrange(5, 50)):
|
||||
with open(f, "w+") as ff:
|
||||
txt = ff.readlines()
|
||||
ff.seek(0)
|
||||
for line in txt:
|
||||
ff.write(line[::-1])
|
||||
|
||||
# remove some files
|
||||
for f in random.sample(all_files, random.randrange(5, 50)):
|
||||
os.remove(f)
|
||||
|
||||
# remove some dirs
|
||||
for d in random.sample(all_dirs, random.randrange(1, 10)):
|
||||
shutil.rmtree(d)
|
||||
|
||||
# commit the changes
|
||||
self._run([
|
||||
"git add -A",
|
||||
"git commit -m 'an update'",
|
||||
], cwd=self.git_remote_dir)
|
||||
|
||||
def _check_update_state(self, update_available):
|
||||
# make sure LastUpdateTime is recent
|
||||
t = self._read_param("LastUpdateTime")
|
||||
last_update_time = datetime.datetime.fromisoformat(t)
|
||||
td = datetime.datetime.utcnow() - last_update_time
|
||||
self.assertLess(td.total_seconds(), 10)
|
||||
self.params.remove("LastUpdateTime")
|
||||
|
||||
# wait a bit for the rest of the params to be written
|
||||
time.sleep(0.1)
|
||||
|
||||
# check params
|
||||
update = self._read_param("UpdateAvailable")
|
||||
self.assertEqual(update == "1", update_available, f"UpdateAvailable: {repr(update)}")
|
||||
self.assertEqual(self._read_param("UpdateFailedCount"), "0")
|
||||
|
||||
# TODO: check that the finalized update actually matches remote
|
||||
# check the .overlay_init and .overlay_consistent flags
|
||||
self.assertTrue(os.path.isfile(os.path.join(self.basedir, ".overlay_init")))
|
||||
self.assertEqual(os.path.isfile(os.path.join(self.finalized_dir, ".overlay_consistent")), update_available)
|
||||
|
||||
|
||||
# *** test cases ***
|
||||
|
||||
|
||||
# Run updated for 100 cycles with no update
|
||||
def test_no_update(self):
|
||||
self._start_updater()
|
||||
for _ in range(100):
|
||||
self._wait_for_update(clear_param=True)
|
||||
self._check_update_state(False)
|
||||
|
||||
# Let the updater run with no update for a cycle, then write an update
|
||||
def test_update(self):
|
||||
self._start_updater()
|
||||
|
||||
# run for a cycle with no update
|
||||
self._wait_for_update(clear_param=True)
|
||||
self._check_update_state(False)
|
||||
|
||||
# write an update to our remote
|
||||
self._make_commit()
|
||||
|
||||
# run for a cycle to get the update
|
||||
self._wait_for_update(timeout=60, clear_param=True)
|
||||
self._check_update_state(True)
|
||||
|
||||
# run another cycle with no update
|
||||
self._wait_for_update(clear_param=True)
|
||||
self._check_update_state(True)
|
||||
|
||||
# Let the updater run for 10 cycles, and write an update every cycle
|
||||
@unittest.skip("need to make this faster")
|
||||
def test_update_loop(self):
|
||||
self._start_updater()
|
||||
|
||||
# run for a cycle with no update
|
||||
self._wait_for_update(clear_param=True)
|
||||
for _ in range(10):
|
||||
time.sleep(0.5)
|
||||
self._make_commit()
|
||||
self._wait_for_update(timeout=90, clear_param=True)
|
||||
self._check_update_state(True)
|
||||
|
||||
# Test overlay re-creation after tracking a new file in basedir's git
|
||||
def test_overlay_reinit(self):
|
||||
self._start_updater()
|
||||
|
||||
overlay_init_fn = os.path.join(self.basedir, ".overlay_init")
|
||||
|
||||
# run for a cycle with no update
|
||||
self._wait_for_update(clear_param=True)
|
||||
self.params.remove("LastUpdateTime")
|
||||
first_mtime = os.path.getmtime(overlay_init_fn)
|
||||
|
||||
# touch a file in the basedir
|
||||
self._run("touch new_file && git add new_file", cwd=self.basedir)
|
||||
|
||||
# run another cycle, should have a new mtime
|
||||
self._wait_for_update(clear_param=True)
|
||||
second_mtime = os.path.getmtime(overlay_init_fn)
|
||||
self.assertTrue(first_mtime != second_mtime)
|
||||
|
||||
# run another cycle, mtime should be same as last cycle
|
||||
self._wait_for_update(clear_param=True)
|
||||
new_mtime = os.path.getmtime(overlay_init_fn)
|
||||
self.assertTrue(second_mtime == new_mtime)
|
||||
|
||||
# Make sure updated exits if another instance is running
|
||||
def test_multiple_instances(self):
|
||||
# start updated and let it run for a cycle
|
||||
self._start_updater()
|
||||
time.sleep(1)
|
||||
self._wait_for_update(clear_param=True)
|
||||
|
||||
# start another instance
|
||||
second_updated = self._get_updated_proc()
|
||||
ret_code = second_updated.wait(timeout=5)
|
||||
self.assertTrue(ret_code is not None)
|
||||
|
||||
|
||||
# *** test cases with NEOS updates ***
|
||||
|
||||
|
||||
# Run updated with no update, make sure it clears the old NEOS update
|
||||
def test_clear_neos_cache(self):
|
||||
# make the dir and some junk files
|
||||
os.mkdir(self.neosupdate_dir)
|
||||
for _ in range(15):
|
||||
with tempfile.NamedTemporaryFile(dir=self.neosupdate_dir, delete=False) as f:
|
||||
f.write(os.urandom(random.randrange(1, 1000000)))
|
||||
|
||||
self._start_updater()
|
||||
self._wait_for_update(clear_param=True)
|
||||
self._check_update_state(False)
|
||||
self.assertFalse(os.path.isdir(self.neosupdate_dir))
|
||||
|
||||
# Let the updater run with no update for a cycle, then write an update
|
||||
@unittest.skip("TODO: only runs on device")
|
||||
def test_update_with_neos_update(self):
|
||||
# bump the NEOS version and commit it
|
||||
self._run([
|
||||
"echo 'export REQUIRED_NEOS_VERSION=3' >> launch_env.sh",
|
||||
"git -c user.name='testy' -c user.email='testy@tester.test' \
|
||||
commit -am 'a neos update'",
|
||||
], cwd=self.git_remote_dir)
|
||||
|
||||
# run for a cycle to get the update
|
||||
self._start_updater()
|
||||
self._wait_for_update(timeout=60, clear_param=True)
|
||||
self._check_update_state(True)
|
||||
|
||||
# TODO: more comprehensive check
|
||||
self.assertTrue(os.path.isdir(self.neosupdate_dir))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
117
selfdrive/test/test_valgrind_replay.py
Normal file
117
selfdrive/test/test_valgrind_replay.py
Normal file
@@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
import subprocess
|
||||
import signal
|
||||
|
||||
if "CI" in os.environ:
|
||||
def tqdm(x):
|
||||
return x
|
||||
else:
|
||||
from tqdm import tqdm # type: ignore
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from collections import namedtuple
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.tools.lib.openpilotci import get_url
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
|
||||
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'command', 'path', 'segment', 'wait_for_response'])
|
||||
|
||||
CONFIGS = [
|
||||
ProcessConfig(
|
||||
proc_name="ubloxd",
|
||||
pub_sub={
|
||||
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
|
||||
},
|
||||
ignore=[],
|
||||
command="./ubloxd",
|
||||
path="system/ubloxd",
|
||||
segment="0375fdf7b1ce594d|2019-06-13--08-32-25--3",
|
||||
wait_for_response=True
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class TestValgrind(unittest.TestCase):
|
||||
def extract_leak_sizes(self, log):
|
||||
if "All heap blocks were freed -- no leaks are possible" in log:
|
||||
return (0,0,0)
|
||||
|
||||
log = log.replace(",","") # fixes casting to int issue with large leaks
|
||||
err_lost1 = log.split("definitely lost: ")[1]
|
||||
err_lost2 = log.split("indirectly lost: ")[1]
|
||||
err_lost3 = log.split("possibly lost: ")[1]
|
||||
definitely_lost = int(err_lost1.split(" ")[0])
|
||||
indirectly_lost = int(err_lost2.split(" ")[0])
|
||||
possibly_lost = int(err_lost3.split(" ")[0])
|
||||
return (definitely_lost, indirectly_lost, possibly_lost)
|
||||
|
||||
def valgrindlauncher(self, arg, cwd):
|
||||
os.chdir(os.path.join(BASEDIR, cwd))
|
||||
# Run valgrind on a process
|
||||
command = "valgrind --leak-check=full " + arg
|
||||
p = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
|
||||
|
||||
while not self.replay_done:
|
||||
time.sleep(0.1)
|
||||
|
||||
# Kill valgrind and extract leak output
|
||||
os.killpg(os.getpgid(p.pid), signal.SIGINT)
|
||||
_, err = p.communicate()
|
||||
error_msg = str(err, encoding='utf-8')
|
||||
with open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "a") as f:
|
||||
f.write(error_msg)
|
||||
f.write(5 * "\n")
|
||||
definitely_lost, indirectly_lost, possibly_lost = self.extract_leak_sizes(error_msg)
|
||||
if max(definitely_lost, indirectly_lost, possibly_lost) > 0:
|
||||
self.leak = True
|
||||
print("LEAKS from", arg, "\nDefinitely lost:", definitely_lost, "\nIndirectly lost", indirectly_lost, "\nPossibly lost", possibly_lost)
|
||||
else:
|
||||
self.leak = False
|
||||
|
||||
def replay_process(self, config, logreader):
|
||||
pub_sockets = list(config.pub_sub.keys()) # We dump data from logs here
|
||||
sub_sockets = [s for _, sub in config.pub_sub.items() for s in sub] # We get responses here
|
||||
pm = messaging.PubMaster(pub_sockets)
|
||||
sm = messaging.SubMaster(sub_sockets)
|
||||
|
||||
print("Sorting logs")
|
||||
all_msgs = sorted(logreader, key=lambda msg: msg.logMonoTime)
|
||||
pub_msgs = [msg for msg in all_msgs if msg.which() in list(config.pub_sub.keys())]
|
||||
|
||||
thread = threading.Thread(target=self.valgrindlauncher, args=(config.command, config.path))
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
while not all(pm.all_readers_updated(s) for s in config.pub_sub.keys()):
|
||||
time.sleep(0)
|
||||
|
||||
for msg in tqdm(pub_msgs):
|
||||
pm.send(msg.which(), msg.as_builder())
|
||||
if config.wait_for_response:
|
||||
sm.update(100)
|
||||
|
||||
self.replay_done = True
|
||||
|
||||
def test_config(self):
|
||||
open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "w").close()
|
||||
|
||||
for cfg in CONFIGS:
|
||||
self.leak = None
|
||||
self.replay_done = False
|
||||
|
||||
r, n = cfg.segment.rsplit("--", 1)
|
||||
lr = LogReader(get_url(r, n))
|
||||
self.replay_process(cfg, lr)
|
||||
|
||||
while self.leak is None:
|
||||
time.sleep(0.1) # Wait for the valgrind to finish
|
||||
|
||||
self.assertFalse(self.leak)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
84
selfdrive/test/update_ci_routes.py
Normal file
84
selfdrive/test/update_ci_routes.py
Normal file
@@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
from openpilot.selfdrive.car.tests.routes import routes as test_car_models_routes
|
||||
from openpilot.selfdrive.test.process_replay.test_processes import source_segments as replay_segments
|
||||
from openpilot.tools.lib.azure_container import AzureContainer
|
||||
from openpilot.tools.lib.openpilotcontainers import DataCIContainer, DataProdContainer, OpenpilotCIContainer
|
||||
|
||||
SOURCES: list[AzureContainer] = [
|
||||
DataProdContainer,
|
||||
DataCIContainer
|
||||
]
|
||||
|
||||
DEST = OpenpilotCIContainer
|
||||
|
||||
def upload_route(path: str, exclude_patterns: Iterable[str] = None) -> None:
|
||||
if exclude_patterns is None:
|
||||
exclude_patterns = [r'dcamera\.hevc']
|
||||
|
||||
r, n = path.rsplit("--", 1)
|
||||
r = '/'.join(r.split('/')[-2:]) # strip out anything extra in the path
|
||||
destpath = f"{r}/{n}"
|
||||
for file in os.listdir(path):
|
||||
if any(re.search(pattern, file) for pattern in exclude_patterns):
|
||||
continue
|
||||
DEST.upload_file(os.path.join(path, file), f"{destpath}/{file}")
|
||||
|
||||
|
||||
def sync_to_ci_public(route: str) -> bool:
|
||||
dest_container, dest_key = DEST.get_client_and_key()
|
||||
key_prefix = route.replace('|', '/')
|
||||
dongle_id = key_prefix.split('/')[0]
|
||||
|
||||
if next(dest_container.list_blob_names(name_starts_with=key_prefix), None) is not None:
|
||||
return True
|
||||
|
||||
print(f"Uploading {route}")
|
||||
for source_container in SOURCES:
|
||||
# assumes az login has been run
|
||||
print(f"Trying {source_container.ACCOUNT}/{source_container.CONTAINER}")
|
||||
_, source_key = source_container.get_client_and_key()
|
||||
cmd = [
|
||||
"azcopy",
|
||||
"copy",
|
||||
f"{source_container.BASE_URL}{key_prefix}?{source_key}",
|
||||
f"{DEST.BASE_URL}{dongle_id}?{dest_key}",
|
||||
"--recursive=true",
|
||||
"--overwrite=false",
|
||||
"--exclude-pattern=*/dcamera.hevc",
|
||||
]
|
||||
|
||||
try:
|
||||
result = subprocess.call(cmd, stdout=subprocess.DEVNULL)
|
||||
if result == 0:
|
||||
print("Success")
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
print("Failed")
|
||||
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
failed_routes = []
|
||||
|
||||
to_sync = sys.argv[1:]
|
||||
|
||||
if not len(to_sync):
|
||||
# sync routes from the car tests routes and process replay
|
||||
to_sync.extend([rt.route for rt in test_car_models_routes])
|
||||
to_sync.extend([s[1].rsplit('--', 1)[0] for s in replay_segments])
|
||||
|
||||
for r in tqdm(to_sync):
|
||||
if not sync_to_ci_public(r):
|
||||
failed_routes.append(r)
|
||||
|
||||
if len(failed_routes):
|
||||
print("failed routes:", failed_routes)
|
||||
Reference in New Issue
Block a user