From b39097a12d12d1d7260a00d61d89b15d426637b1 Mon Sep 17 00:00:00 2001 From: FrogAi <91348155+FrogAi@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:58:47 -0700 Subject: [PATCH] Add openpilot tests --- panda/drivers/linux/test/Makefile | 2 + panda/drivers/linux/test/main.c | 120 + panda/drivers/linux/test/run.sh | 4 + panda/tests/__init__.py | 0 panda/tests/benchmark.py | 43 + panda/tests/black_white_loopback_test.py | 156 + panda/tests/black_white_relay_endurance.py | 164 + panda/tests/black_white_relay_test.py | 135 + panda/tests/bulk_write_test.py | 48 + panda/tests/can_printer.py | 36 + panda/tests/canfd/test_canfd.py | 152 + panda/tests/check_fw_size.py | 97 + panda/tests/ci_shell.sh | 20 + panda/tests/debug_console.py | 62 + .../development/register_hashmap_spread.py | 50 + panda/tests/echo.py | 16 + panda/tests/elm_car_simulator.py | 232 + panda/tests/elm_throughput.py | 44 + panda/tests/fan/fan_test.py | 14 + panda/tests/fan/fan_tuning.py | 88 + panda/tests/get_version.py | 7 + panda/tests/gmbitbang/recv.py | 18 + panda/tests/gmbitbang/rigol.py | 36 + panda/tests/gmbitbang/test.py | 32 + panda/tests/gmbitbang/test_one.py | 22 + panda/tests/gmbitbang/test_packer.c | 28 + panda/tests/gmlan_harness_test.py | 78 + panda/tests/health_test.py | 18 + panda/tests/hitl/1_program.py | 103 + panda/tests/hitl/2_health.py | 125 + panda/tests/hitl/3_usb_to_can.py | 127 + panda/tests/hitl/4_can_loopback.py | 202 + panda/tests/hitl/5_spi.py | 103 + panda/tests/hitl/6_safety.py | 29 + panda/tests/hitl/7_internal.py | 68 + panda/tests/hitl/__init__.py | 0 panda/tests/hitl/conftest.py | 225 + panda/tests/hitl/helpers.py | 71 + .../hitl/known_bootstub/bootstub.panda_h7.bin | Bin 0 -> 16824 bytes ...bootstub_f4_first_dos_production.panda.bin | Bin 0 -> 14220 bytes .../bootstub_f4_only_bcd.panda.bin | Bin 0 -> 14208 bytes panda/tests/hitl/reset_jungles.py | 42 + panda/tests/hitl/run_parallel_tests.sh | 8 + panda/tests/hitl/run_serial_tests.sh | 7 + panda/tests/ir_test.py | 14 + panda/tests/libpanda/SConscript | 42 + panda/tests/libpanda/libpanda_py.py | 98 + panda/tests/libpanda/panda.c | 33 + panda/tests/libpanda/safety_helpers.h | 195 + panda/tests/libpanda/safety_helpers.py | 106 + panda/tests/libs/resetter.py | 57 + panda/tests/loopback_test.py | 94 + panda/tests/message_drop_test.py | 70 + panda/tests/misra/.gitignore | 5 + panda/tests/misra/coverage_table | 156 + panda/tests/misra/install.sh | 19 + panda/tests/misra/test_misra.sh | 61 + panda/tests/misra/test_mutation.py | 84 + panda/tests/read_flash_spi.py | 32 + panda/tests/read_st_flash.sh | 6 + panda/tests/read_winusb_descriptors.py | 34 + panda/tests/reflash_internal_panda.py | 49 + panda/tests/relay_test.py | 16 + panda/tests/restore_flash_spi.py | 27 + panda/tests/rtc_test.py | 10 + panda/tests/safety/__init__.py | 0 panda/tests/safety/common.py | 1071 +++++ panda/tests/safety/hyundai_common.py | 157 + panda/tests/safety/test.sh | 34 + panda/tests/safety/test_body.py | 70 + panda/tests/safety/test_chrysler.py | 125 + panda/tests/safety/test_defaults.py | 73 + panda/tests/safety/test_elm327.py | 48 + panda/tests/safety/test_ford.py | 476 ++ panda/tests/safety/test_gm.py | 228 + panda/tests/safety/test_honda.py | 618 +++ panda/tests/safety/test_hyundai.py | 216 + panda/tests/safety/test_hyundai_canfd.py | 272 ++ panda/tests/safety/test_mazda.py | 86 + panda/tests/safety/test_nissan.py | 117 + panda/tests/safety/test_subaru.py | 228 + panda/tests/safety/test_subaru_preglobal.py | 70 + panda/tests/safety/test_tesla.py | 185 + panda/tests/safety/test_toyota.py | 367 ++ panda/tests/safety/test_volkswagen_mqb.py | 225 + panda/tests/safety/test_volkswagen_pq.py | 199 + panda/tests/safety_replay/.gitignore | 1 + panda/tests/safety_replay/__init__.py | 0 panda/tests/safety_replay/helpers.py | 80 + panda/tests/safety_replay/replay_drive.py | 103 + panda/tests/setup_device_ci.sh | 74 + panda/tests/som/on-device.py | 24 + panda/tests/som/test_bootkick.py | 154 + panda/tests/som_debug.sh | 7 + panda/tests/spam_can.py | 20 + panda/tests/standalone_test.py | 32 + panda/tests/test_rsa.c | 34 + panda/tests/tucan_loopback.py | 92 + panda/tests/usbprotocol/test.sh | 8 + panda/tests/usbprotocol/test_comms.py | 160 + panda/tests/usbprotocol/test_pandalib.py | 26 + selfdrive/car/tests/.gitignore | 1 + selfdrive/car/tests/big_cars_test.sh | 12 + selfdrive/car/tests/routes.py | 299 ++ selfdrive/car/tests/test_can_fingerprint.py | 67 + selfdrive/car/tests/test_car_interfaces.py | 2 +- selfdrive/car/tests/test_docs.py | 97 + selfdrive/car/tests/test_fingerprints.py | 96 + selfdrive/car/tests/test_fw_fingerprint.py | 313 ++ selfdrive/car/tests/test_lateral_limits.py | 101 + selfdrive/car/tests/test_models.py | 488 +++ selfdrive/car/tests/test_models_segs.txt | 3884 +++++++++++++++++ selfdrive/car/tests/test_platform_configs.py | 23 + selfdrive/controls/tests/__init__.py | 0 selfdrive/controls/tests/test_alerts.py | 135 + selfdrive/controls/tests/test_cruise_speed.py | 158 + .../controls/tests/test_following_distance.py | 47 + selfdrive/controls/tests/test_lateral_mpc.py | 89 + selfdrive/controls/tests/test_leads.py | 36 + selfdrive/controls/tests/test_startup.py | 120 + .../controls/tests/test_state_machine.py | 109 + selfdrive/locationd/test/.gitignore | 1 + selfdrive/locationd/test/__init__.py | 0 selfdrive/locationd/test/test_calibrationd.py | 117 + selfdrive/locationd/test/test_locationd.py | 94 + .../test/test_locationd_scenarios.py | 228 + selfdrive/test/.gitignore | 9 + selfdrive/test/ci_shell.sh | 19 + selfdrive/test/ciui.py | 58 + selfdrive/test/cpp_harness.py | 11 + selfdrive/test/docker_build.sh | 26 + selfdrive/test/docker_common.sh | 21 + selfdrive/test/docker_tag_multiarch.sh | 25 + selfdrive/test/fuzzy_generation.py | 17 +- selfdrive/test/helpers.py | 26 +- .../test/longitudinal_maneuvers/.gitignore | 1 + .../test/longitudinal_maneuvers/__init__.py | 0 .../test/longitudinal_maneuvers/maneuver.py | 71 + .../test/longitudinal_maneuvers/plant.py | 172 + .../test_longitudinal.py | 160 + selfdrive/test/loop_until_fail.sh | 8 + selfdrive/test/process_replay/.gitignore | 2 + selfdrive/test/process_replay/README.md | 126 + selfdrive/test/process_replay/__init__.py | 2 + selfdrive/test/process_replay/capture.py | 59 + selfdrive/test/process_replay/compare_logs.py | 150 + .../process_replay/debayer_replay_ref_commit | 1 + selfdrive/test/process_replay/migration.py | 203 + selfdrive/test/process_replay/model_replay.py | 249 ++ .../process_replay/model_replay_ref_commit | 1 + .../test/process_replay/process_replay.py | 800 ++++ selfdrive/test/process_replay/ref_commit | 1 + selfdrive/test/process_replay/regen.py | 158 + selfdrive/test/process_replay/regen_all.py | 54 + selfdrive/test/process_replay/test_debayer.py | 196 + selfdrive/test/process_replay/test_fuzzy.py | 33 + .../test/process_replay/test_processes.py | 231 + selfdrive/test/process_replay/test_regen.py | 45 + selfdrive/test/process_replay/vision_meta.py | 43 + selfdrive/test/profiling/.gitignore | 2 + selfdrive/test/profiling/__init__.py | 0 selfdrive/test/profiling/lib.py | 91 + selfdrive/test/profiling/profiler.py | 97 + selfdrive/test/scons_build_test.sh | 10 + selfdrive/test/setup_vsound.sh | 10 + selfdrive/test/setup_xvfb.sh | 19 + selfdrive/test/test_onroad.py | 4 +- selfdrive/test/test_updated.py | 302 ++ selfdrive/test/test_valgrind_replay.py | 117 + selfdrive/test/update_ci_routes.py | 84 + system/camerad/test/.gitignore | 2 + system/camerad/test/check_skips.py | 27 + .../test/get_thumbnails_for_segment.py | 24 + system/camerad/test/stress_restart.sh | 9 + system/camerad/test/test_ae_gray.cc | 83 + system/camerad/test/test_camerad.py | 83 + system/camerad/test/test_exposure.py | 55 + tinygrad_repo/test/Dockerfile | 12 + tinygrad_repo/test/__init__.py | 0 .../test/external/dist/test_collectives.py | 62 + .../test/external/dist/test_world.py | 68 + .../test/external/external_copy_benchmark.py | 27 + .../test/external/external_llama_eval.py | 102 + .../test/external/external_model_benchmark.py | 128 + .../test/external/external_multi_gpu.py | 70 + .../test/external/external_osx_profiling.py | 41 + .../external_test_allocator_on_models.py | 125 + .../test/external/external_test_embedding.py | 8 + .../test/external/external_test_gpu_ast.py | 208 + .../test/external/external_test_image.py | 52 + .../external/external_test_jit_on_models.py | 45 + .../external/external_test_onnx_backend.py | 208 + .../test/external/external_test_opt.py | 392 ++ .../test/external/external_test_optim.py | 75 + .../external/external_test_speed_llama.py | 57 + .../external/external_test_uops_graphing.py | 44 + .../test/external/external_test_yolo.py | 36 + .../test/external/external_test_yolov8.py | 76 + .../test/external/fuzz_shapetracker.py | 61 + tinygrad_repo/test/external/fuzz_symbolic.py | 69 + .../test/external/graph_batchnorm.py | 61 + tinygrad_repo/test/external/test_example.py | 74 + tinygrad_repo/test/extra/test_export_model.py | 50 + .../test/extra/test_extra_helpers.py | 57 + tinygrad_repo/test/extra/test_lr_scheduler.py | 107 + tinygrad_repo/test/extra/test_utils.py | 106 + tinygrad_repo/test/helpers.py | 15 + .../test/models/efficientnet/Chicken.jpg | Bin 0 -> 110248 bytes .../test/models/efficientnet/car.jpg | Bin 0 -> 8131 bytes .../imagenet1000_clsidx_to_labels.txt | 1000 +++++ tinygrad_repo/test/models/test_bert.py | 57 + .../test/models/test_efficientnet.py | 115 + tinygrad_repo/test/models/test_end2end.py | 165 + tinygrad_repo/test/models/test_mnist.py | 116 + tinygrad_repo/test/models/test_onnx.py | 143 + tinygrad_repo/test/models/test_real_world.py | 100 + tinygrad_repo/test/models/test_rnnt.py | 47 + tinygrad_repo/test/models/test_train.py | 83 + tinygrad_repo/test/models/test_waifu2x.py | 25 + tinygrad_repo/test/models/test_whisper.py | 25 + tinygrad_repo/test/models/waifu2x/input.png | Bin 0 -> 7321 bytes tinygrad_repo/test/models/waifu2x/output.png | Bin 0 -> 14906 bytes tinygrad_repo/test/models/whisper/test.wav | Bin 0 -> 35676 bytes tinygrad_repo/test/test_allocators.py | 136 + tinygrad_repo/test/test_assign.py | 67 + tinygrad_repo/test/test_conv.py | 147 + tinygrad_repo/test/test_conv_shapetracker.py | 27 + tinygrad_repo/test/test_custom_function.py | 107 + tinygrad_repo/test/test_dtype.py | 182 + tinygrad_repo/test/test_gc.py | 37 + tinygrad_repo/test/test_jit.py | 194 + tinygrad_repo/test/test_kernel_cache.py | 54 + tinygrad_repo/test/test_lazybuffer.py | 73 + tinygrad_repo/test/test_lazyop.py | 21 + tinygrad_repo/test/test_linearizer.py | 492 +++ .../test/test_linearizer_failures.py | 21 + tinygrad_repo/test/test_net_speed.py | 102 + tinygrad_repo/test/test_nn.py | 339 ++ tinygrad_repo/test/test_ops.py | 1245 ++++++ tinygrad_repo/test/test_optim.py | 98 + tinygrad_repo/test/test_randomness.py | 115 + tinygrad_repo/test/test_schedule.py | 335 ++ tinygrad_repo/test/test_search.py | 19 + tinygrad_repo/test/test_specific_conv.py | 57 + tinygrad_repo/test/test_speed_v_torch.py | 288 ++ tinygrad_repo/test/test_symbolic_jit.py | 181 + tinygrad_repo/test/test_symbolic_ops.py | 124 + .../test/test_symbolic_shapetracker.py | 173 + tinygrad_repo/test/test_tensor.py | 266 ++ tinygrad_repo/test/test_uops.py | 99 + tinygrad_repo/test/test_webgpu.js | 51 + tinygrad_repo/test/test_winograd.py | 40 + tinygrad_repo/test/unit/test_disk_cache.py | 66 + tinygrad_repo/test/unit/test_disk_tensor.py | 150 + tinygrad_repo/test/unit/test_flopcounter.py | 44 + tinygrad_repo/test/unit/test_helpers.py | 142 + tinygrad_repo/test/unit/test_shapetracker.py | 663 +++ tinygrad_repo/test/unit/test_shm_tensor.py | 39 + tinygrad_repo/test/unit/test_symbolic.py | 448 ++ 259 files changed, 31176 insertions(+), 12 deletions(-) create mode 100644 panda/drivers/linux/test/Makefile create mode 100644 panda/drivers/linux/test/main.c create mode 100644 panda/drivers/linux/test/run.sh create mode 100644 panda/tests/__init__.py create mode 100644 panda/tests/benchmark.py create mode 100644 panda/tests/black_white_loopback_test.py create mode 100644 panda/tests/black_white_relay_endurance.py create mode 100644 panda/tests/black_white_relay_test.py create mode 100644 panda/tests/bulk_write_test.py create mode 100644 panda/tests/can_printer.py create mode 100644 panda/tests/canfd/test_canfd.py create mode 100644 panda/tests/check_fw_size.py create mode 100644 panda/tests/ci_shell.sh create mode 100644 panda/tests/debug_console.py create mode 100644 panda/tests/development/register_hashmap_spread.py create mode 100644 panda/tests/echo.py create mode 100644 panda/tests/elm_car_simulator.py create mode 100644 panda/tests/elm_throughput.py create mode 100644 panda/tests/fan/fan_test.py create mode 100644 panda/tests/fan/fan_tuning.py create mode 100644 panda/tests/get_version.py create mode 100644 panda/tests/gmbitbang/recv.py create mode 100644 panda/tests/gmbitbang/rigol.py create mode 100644 panda/tests/gmbitbang/test.py create mode 100644 panda/tests/gmbitbang/test_one.py create mode 100644 panda/tests/gmbitbang/test_packer.c create mode 100644 panda/tests/gmlan_harness_test.py create mode 100644 panda/tests/health_test.py create mode 100644 panda/tests/hitl/1_program.py create mode 100644 panda/tests/hitl/2_health.py create mode 100644 panda/tests/hitl/3_usb_to_can.py create mode 100644 panda/tests/hitl/4_can_loopback.py create mode 100644 panda/tests/hitl/5_spi.py create mode 100644 panda/tests/hitl/6_safety.py create mode 100644 panda/tests/hitl/7_internal.py create mode 100644 panda/tests/hitl/__init__.py create mode 100644 panda/tests/hitl/conftest.py create mode 100644 panda/tests/hitl/helpers.py create mode 100644 panda/tests/hitl/known_bootstub/bootstub.panda_h7.bin create mode 100644 panda/tests/hitl/known_bootstub/bootstub_f4_first_dos_production.panda.bin create mode 100644 panda/tests/hitl/known_bootstub/bootstub_f4_only_bcd.panda.bin create mode 100644 panda/tests/hitl/reset_jungles.py create mode 100644 panda/tests/hitl/run_parallel_tests.sh create mode 100644 panda/tests/hitl/run_serial_tests.sh create mode 100644 panda/tests/ir_test.py create mode 100644 panda/tests/libpanda/SConscript create mode 100644 panda/tests/libpanda/libpanda_py.py create mode 100644 panda/tests/libpanda/panda.c create mode 100644 panda/tests/libpanda/safety_helpers.h create mode 100644 panda/tests/libpanda/safety_helpers.py create mode 100644 panda/tests/libs/resetter.py create mode 100644 panda/tests/loopback_test.py create mode 100644 panda/tests/message_drop_test.py create mode 100644 panda/tests/misra/.gitignore create mode 100644 panda/tests/misra/coverage_table create mode 100644 panda/tests/misra/install.sh create mode 100644 panda/tests/misra/test_misra.sh create mode 100644 panda/tests/misra/test_mutation.py create mode 100644 panda/tests/read_flash_spi.py create mode 100644 panda/tests/read_st_flash.sh create mode 100644 panda/tests/read_winusb_descriptors.py create mode 100644 panda/tests/reflash_internal_panda.py create mode 100644 panda/tests/relay_test.py create mode 100644 panda/tests/restore_flash_spi.py create mode 100644 panda/tests/rtc_test.py create mode 100644 panda/tests/safety/__init__.py create mode 100644 panda/tests/safety/common.py create mode 100644 panda/tests/safety/hyundai_common.py create mode 100644 panda/tests/safety/test.sh create mode 100644 panda/tests/safety/test_body.py create mode 100644 panda/tests/safety/test_chrysler.py create mode 100644 panda/tests/safety/test_defaults.py create mode 100644 panda/tests/safety/test_elm327.py create mode 100644 panda/tests/safety/test_ford.py create mode 100644 panda/tests/safety/test_gm.py create mode 100644 panda/tests/safety/test_honda.py create mode 100644 panda/tests/safety/test_hyundai.py create mode 100644 panda/tests/safety/test_hyundai_canfd.py create mode 100644 panda/tests/safety/test_mazda.py create mode 100644 panda/tests/safety/test_nissan.py create mode 100644 panda/tests/safety/test_subaru.py create mode 100644 panda/tests/safety/test_subaru_preglobal.py create mode 100644 panda/tests/safety/test_tesla.py create mode 100644 panda/tests/safety/test_toyota.py create mode 100644 panda/tests/safety/test_volkswagen_mqb.py create mode 100644 panda/tests/safety/test_volkswagen_pq.py create mode 100644 panda/tests/safety_replay/.gitignore create mode 100644 panda/tests/safety_replay/__init__.py create mode 100644 panda/tests/safety_replay/helpers.py create mode 100644 panda/tests/safety_replay/replay_drive.py create mode 100644 panda/tests/setup_device_ci.sh create mode 100644 panda/tests/som/on-device.py create mode 100644 panda/tests/som/test_bootkick.py create mode 100644 panda/tests/som_debug.sh create mode 100644 panda/tests/spam_can.py create mode 100644 panda/tests/standalone_test.py create mode 100644 panda/tests/test_rsa.c create mode 100644 panda/tests/tucan_loopback.py create mode 100644 panda/tests/usbprotocol/test.sh create mode 100644 panda/tests/usbprotocol/test_comms.py create mode 100644 panda/tests/usbprotocol/test_pandalib.py create mode 100644 selfdrive/car/tests/.gitignore create mode 100644 selfdrive/car/tests/big_cars_test.sh create mode 100644 selfdrive/car/tests/routes.py create mode 100644 selfdrive/car/tests/test_can_fingerprint.py create mode 100644 selfdrive/car/tests/test_docs.py create mode 100644 selfdrive/car/tests/test_fingerprints.py create mode 100644 selfdrive/car/tests/test_fw_fingerprint.py create mode 100644 selfdrive/car/tests/test_lateral_limits.py create mode 100644 selfdrive/car/tests/test_models.py create mode 100644 selfdrive/car/tests/test_models_segs.txt create mode 100644 selfdrive/car/tests/test_platform_configs.py create mode 100644 selfdrive/controls/tests/__init__.py create mode 100644 selfdrive/controls/tests/test_alerts.py create mode 100644 selfdrive/controls/tests/test_cruise_speed.py create mode 100644 selfdrive/controls/tests/test_following_distance.py create mode 100644 selfdrive/controls/tests/test_lateral_mpc.py create mode 100644 selfdrive/controls/tests/test_leads.py create mode 100644 selfdrive/controls/tests/test_startup.py create mode 100644 selfdrive/controls/tests/test_state_machine.py create mode 100644 selfdrive/locationd/test/.gitignore create mode 100644 selfdrive/locationd/test/__init__.py create mode 100644 selfdrive/locationd/test/test_calibrationd.py create mode 100644 selfdrive/locationd/test/test_locationd.py create mode 100644 selfdrive/locationd/test/test_locationd_scenarios.py create mode 100644 selfdrive/test/.gitignore create mode 100644 selfdrive/test/ci_shell.sh create mode 100644 selfdrive/test/ciui.py create mode 100644 selfdrive/test/cpp_harness.py create mode 100644 selfdrive/test/docker_build.sh create mode 100644 selfdrive/test/docker_common.sh create mode 100644 selfdrive/test/docker_tag_multiarch.sh create mode 100644 selfdrive/test/longitudinal_maneuvers/.gitignore create mode 100644 selfdrive/test/longitudinal_maneuvers/__init__.py create mode 100644 selfdrive/test/longitudinal_maneuvers/maneuver.py create mode 100644 selfdrive/test/longitudinal_maneuvers/plant.py create mode 100644 selfdrive/test/longitudinal_maneuvers/test_longitudinal.py create mode 100644 selfdrive/test/loop_until_fail.sh create mode 100644 selfdrive/test/process_replay/.gitignore create mode 100644 selfdrive/test/process_replay/README.md create mode 100644 selfdrive/test/process_replay/__init__.py create mode 100644 selfdrive/test/process_replay/capture.py create mode 100644 selfdrive/test/process_replay/compare_logs.py create mode 100644 selfdrive/test/process_replay/debayer_replay_ref_commit create mode 100644 selfdrive/test/process_replay/migration.py create mode 100644 selfdrive/test/process_replay/model_replay.py create mode 100644 selfdrive/test/process_replay/model_replay_ref_commit create mode 100644 selfdrive/test/process_replay/process_replay.py create mode 100644 selfdrive/test/process_replay/ref_commit create mode 100644 selfdrive/test/process_replay/regen.py create mode 100644 selfdrive/test/process_replay/regen_all.py create mode 100644 selfdrive/test/process_replay/test_debayer.py create mode 100644 selfdrive/test/process_replay/test_fuzzy.py create mode 100644 selfdrive/test/process_replay/test_processes.py create mode 100644 selfdrive/test/process_replay/test_regen.py create mode 100644 selfdrive/test/process_replay/vision_meta.py create mode 100644 selfdrive/test/profiling/.gitignore create mode 100644 selfdrive/test/profiling/__init__.py create mode 100644 selfdrive/test/profiling/lib.py create mode 100644 selfdrive/test/profiling/profiler.py create mode 100644 selfdrive/test/scons_build_test.sh create mode 100644 selfdrive/test/setup_vsound.sh create mode 100644 selfdrive/test/setup_xvfb.sh create mode 100644 selfdrive/test/test_updated.py create mode 100644 selfdrive/test/test_valgrind_replay.py create mode 100644 selfdrive/test/update_ci_routes.py create mode 100644 system/camerad/test/.gitignore create mode 100644 system/camerad/test/check_skips.py create mode 100644 system/camerad/test/get_thumbnails_for_segment.py create mode 100644 system/camerad/test/stress_restart.sh create mode 100644 system/camerad/test/test_ae_gray.cc create mode 100644 system/camerad/test/test_camerad.py create mode 100644 system/camerad/test/test_exposure.py create mode 100644 tinygrad_repo/test/Dockerfile create mode 100644 tinygrad_repo/test/__init__.py create mode 100644 tinygrad_repo/test/external/dist/test_collectives.py create mode 100644 tinygrad_repo/test/external/dist/test_world.py create mode 100644 tinygrad_repo/test/external/external_copy_benchmark.py create mode 100644 tinygrad_repo/test/external/external_llama_eval.py create mode 100644 tinygrad_repo/test/external/external_model_benchmark.py create mode 100644 tinygrad_repo/test/external/external_multi_gpu.py create mode 100644 tinygrad_repo/test/external/external_osx_profiling.py create mode 100644 tinygrad_repo/test/external/external_test_allocator_on_models.py create mode 100644 tinygrad_repo/test/external/external_test_embedding.py create mode 100644 tinygrad_repo/test/external/external_test_gpu_ast.py create mode 100644 tinygrad_repo/test/external/external_test_image.py create mode 100644 tinygrad_repo/test/external/external_test_jit_on_models.py create mode 100644 tinygrad_repo/test/external/external_test_onnx_backend.py create mode 100644 tinygrad_repo/test/external/external_test_opt.py create mode 100644 tinygrad_repo/test/external/external_test_optim.py create mode 100644 tinygrad_repo/test/external/external_test_speed_llama.py create mode 100644 tinygrad_repo/test/external/external_test_uops_graphing.py create mode 100644 tinygrad_repo/test/external/external_test_yolo.py create mode 100644 tinygrad_repo/test/external/external_test_yolov8.py create mode 100644 tinygrad_repo/test/external/fuzz_shapetracker.py create mode 100644 tinygrad_repo/test/external/fuzz_symbolic.py create mode 100644 tinygrad_repo/test/external/graph_batchnorm.py create mode 100644 tinygrad_repo/test/external/test_example.py create mode 100644 tinygrad_repo/test/extra/test_export_model.py create mode 100644 tinygrad_repo/test/extra/test_extra_helpers.py create mode 100644 tinygrad_repo/test/extra/test_lr_scheduler.py create mode 100644 tinygrad_repo/test/extra/test_utils.py create mode 100644 tinygrad_repo/test/helpers.py create mode 100644 tinygrad_repo/test/models/efficientnet/Chicken.jpg create mode 100644 tinygrad_repo/test/models/efficientnet/car.jpg create mode 100644 tinygrad_repo/test/models/efficientnet/imagenet1000_clsidx_to_labels.txt create mode 100644 tinygrad_repo/test/models/test_bert.py create mode 100644 tinygrad_repo/test/models/test_efficientnet.py create mode 100644 tinygrad_repo/test/models/test_end2end.py create mode 100644 tinygrad_repo/test/models/test_mnist.py create mode 100644 tinygrad_repo/test/models/test_onnx.py create mode 100644 tinygrad_repo/test/models/test_real_world.py create mode 100644 tinygrad_repo/test/models/test_rnnt.py create mode 100644 tinygrad_repo/test/models/test_train.py create mode 100644 tinygrad_repo/test/models/test_waifu2x.py create mode 100644 tinygrad_repo/test/models/test_whisper.py create mode 100644 tinygrad_repo/test/models/waifu2x/input.png create mode 100644 tinygrad_repo/test/models/waifu2x/output.png create mode 100644 tinygrad_repo/test/models/whisper/test.wav create mode 100644 tinygrad_repo/test/test_allocators.py create mode 100644 tinygrad_repo/test/test_assign.py create mode 100644 tinygrad_repo/test/test_conv.py create mode 100644 tinygrad_repo/test/test_conv_shapetracker.py create mode 100644 tinygrad_repo/test/test_custom_function.py create mode 100644 tinygrad_repo/test/test_dtype.py create mode 100644 tinygrad_repo/test/test_gc.py create mode 100644 tinygrad_repo/test/test_jit.py create mode 100644 tinygrad_repo/test/test_kernel_cache.py create mode 100644 tinygrad_repo/test/test_lazybuffer.py create mode 100644 tinygrad_repo/test/test_lazyop.py create mode 100644 tinygrad_repo/test/test_linearizer.py create mode 100644 tinygrad_repo/test/test_linearizer_failures.py create mode 100644 tinygrad_repo/test/test_net_speed.py create mode 100644 tinygrad_repo/test/test_nn.py create mode 100644 tinygrad_repo/test/test_ops.py create mode 100644 tinygrad_repo/test/test_optim.py create mode 100644 tinygrad_repo/test/test_randomness.py create mode 100644 tinygrad_repo/test/test_schedule.py create mode 100644 tinygrad_repo/test/test_search.py create mode 100644 tinygrad_repo/test/test_specific_conv.py create mode 100644 tinygrad_repo/test/test_speed_v_torch.py create mode 100644 tinygrad_repo/test/test_symbolic_jit.py create mode 100644 tinygrad_repo/test/test_symbolic_ops.py create mode 100644 tinygrad_repo/test/test_symbolic_shapetracker.py create mode 100644 tinygrad_repo/test/test_tensor.py create mode 100644 tinygrad_repo/test/test_uops.py create mode 100644 tinygrad_repo/test/test_webgpu.js create mode 100644 tinygrad_repo/test/test_winograd.py create mode 100644 tinygrad_repo/test/unit/test_disk_cache.py create mode 100644 tinygrad_repo/test/unit/test_disk_tensor.py create mode 100644 tinygrad_repo/test/unit/test_flopcounter.py create mode 100644 tinygrad_repo/test/unit/test_helpers.py create mode 100644 tinygrad_repo/test/unit/test_shapetracker.py create mode 100644 tinygrad_repo/test/unit/test_shm_tensor.py create mode 100644 tinygrad_repo/test/unit/test_symbolic.py diff --git a/panda/drivers/linux/test/Makefile b/panda/drivers/linux/test/Makefile new file mode 100644 index 0000000..c73945e --- /dev/null +++ b/panda/drivers/linux/test/Makefile @@ -0,0 +1,2 @@ +all: + gcc main.c -o cantest -pthread -lpthread diff --git a/panda/drivers/linux/test/main.c b/panda/drivers/linux/test/main.c new file mode 100644 index 0000000..1f44efc --- /dev/null +++ b/panda/drivers/linux/test/main.c @@ -0,0 +1,120 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +const char *ifname = "can0"; + +static unsigned char payload[] = {0xAA, 0xAA, 0xAA, 0xAA, 0x07, 0x00, 0x00, 0x00}; +int packet_len = 8; +int dir = 0; + +void *write_thread( void *dat ){ + int nbytes; + struct can_frame frame; + int s = *((int*) dat); + + while(1){ + for(int i = 0; i < 1; i ++){ + if(packet_len % 2){ + frame.can_id = 0x8AA | CAN_EFF_FLAG; + }else{ + frame.can_id = 0xAA; + } + + frame.can_dlc = packet_len; + memcpy(frame.data, payload, frame.can_dlc); + + nbytes = write(s, &frame, sizeof(struct can_frame)); + + printf("Wrote %d bytes; addr: %lx; datlen: %d\n", nbytes, frame.can_id, frame.can_dlc); + + if(dir){ + packet_len++; + if(packet_len >= 8) + dir = 0; + }else{ + packet_len--; + if(packet_len <= 0) + dir = 1; + } + } + sleep(2); + } +} + + +int main(void) +{ + pthread_t sndthread; + int err, s, nbytes; + struct sockaddr_can addr; + struct can_frame frame; + struct ifreq ifr; + + if((s = socket(PF_CAN, SOCK_RAW, CAN_RAW)) < 0) { + perror("Error while opening socket"); + return -1; + } + + strcpy(ifr.ifr_name, ifname); + ioctl(s, SIOCGIFINDEX, &ifr); + + addr.can_family = AF_CAN; + addr.can_ifindex = ifr.ifr_ifindex; + + printf("%s at index %d\n", ifname, ifr.ifr_ifindex); + + if(bind(s, (struct sockaddr *)&addr, sizeof(addr)) < 0) { + perror("Error in socket bind"); + return -2; + } + + /////// Create Write Thread + + err = pthread_create( &sndthread, NULL, write_thread, (void*) &s); + if(err){ + fprintf(stderr,"Error - pthread_create() return code: %d\n", err); + exit(EXIT_FAILURE); + } + + /////// Listen to socket + while (1) { + struct can_frame framein; + + // Read in a CAN frame + int numBytes = read(s, &framein, CANFD_MTU); + switch (numBytes) { + case CAN_MTU: + if(framein.can_id & 0x80000000) + printf("Received %u byte payload; canid 0x%lx (EXT)\n", + framein.can_dlc, framein.can_id & 0x7FFFFFFF); + else + printf("Received %u byte payload; canid 0x%lx\n", framein.can_dlc, framein.can_id); + break; + case CANFD_MTU: + // TODO: Should make an example for CAN FD + break; + case -1: + // Check the signal value on interrupt + //if (EINTR == errno) + // continue; + + // Delay before continuing + sleep(1); + default: + continue; + } + } + + return 0; +} diff --git a/panda/drivers/linux/test/run.sh b/panda/drivers/linux/test/run.sh new file mode 100644 index 0000000..5301719 --- /dev/null +++ b/panda/drivers/linux/test/run.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +sudo ifconfig can0 up +make +./cantest diff --git a/panda/tests/__init__.py b/panda/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/panda/tests/benchmark.py b/panda/tests/benchmark.py new file mode 100644 index 0000000..c2b0c85 --- /dev/null +++ b/panda/tests/benchmark.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +import time +from contextlib import contextmanager + +from panda import Panda, PandaDFU +from panda.tests.hitl.helpers import get_random_can_messages + + +@contextmanager +def print_time(desc): + start = time.perf_counter() + yield + end = time.perf_counter() + print(f"{end - start:.2f}s - {desc}") + + +if __name__ == "__main__": + with print_time("Panda()"): + p = Panda() + + with print_time("PandaDFU.list()"): + PandaDFU.list() + + fxn = [ + 'reset', + 'reconnect', + 'up_to_date', + 'health', + #'flash', + ] + for f in fxn: + with print_time(f"Panda.{f}()"): + getattr(p, f)() + + p.set_can_loopback(True) + + for n in range(6): + msgs = get_random_can_messages(int(10**n)) + with print_time(f"Panda.can_send_many() - {len(msgs)} msgs"): + p.can_send_many(msgs) + + with print_time("Panda.can_recv()"): + m = p.can_recv() diff --git a/panda/tests/black_white_loopback_test.py b/panda/tests/black_white_loopback_test.py new file mode 100644 index 0000000..5b2312b --- /dev/null +++ b/panda/tests/black_white_loopback_test.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 + +# Loopback test between black panda (+ harness and power) and white/grey panda +# Tests all buses, including OBD CAN, which is on the same bus as CAN0 in this test. +# To be sure, the test should be run with both harness orientations + + +import os +import time +import random +import argparse +from panda import Panda + +def get_test_string(): + return b"test" + os.urandom(10) + +counter = 0 +nonzero_bus_errors = 0 +zero_bus_errors = 0 +content_errors = 0 + +def run_test(sleep_duration): + global counter + + pandas = Panda.list() + print(pandas) + + # make sure two pandas are connected + if len(pandas) != 2: + raise Exception("Connect white/grey and black panda to run this test!") + + # connect + pandas[0] = Panda(pandas[0]) + pandas[1] = Panda(pandas[1]) + + black_panda = None + other_panda = None + + # find out which one is black + if pandas[0].is_black() and not pandas[1].is_black(): + black_panda = pandas[0] + other_panda = pandas[1] + elif not pandas[0].is_black() and pandas[1].is_black(): + black_panda = pandas[1] + other_panda = pandas[0] + else: + raise Exception("Connect white/grey and black panda to run this test!") + + # disable safety modes + black_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + other_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + # test health packet + print("black panda health", black_panda.health()) + print("other panda health", other_panda.health()) + + # test black -> other + while True: + test_buses(black_panda, other_panda, True, [(0, False, [0]), (1, False, [1]), (2, False, [2]), (1, True, [0])], sleep_duration) + test_buses(black_panda, other_panda, False, [(0, False, [0]), (1, False, [1]), (2, False, [2]), (0, True, [0, 1])], sleep_duration) + counter += 1 + print("Number of cycles:", counter, "Non-zero bus errors:", nonzero_bus_errors, "Zero bus errors:", zero_bus_errors, "Content errors:", content_errors) + + # Toggle relay + black_panda.set_safety_mode(Panda.SAFETY_SILENT) + time.sleep(1) + black_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + time.sleep(1) + + +def test_buses(black_panda, other_panda, direction, test_array, sleep_duration): + global nonzero_bus_errors, zero_bus_errors, content_errors + + if direction: + print("***************** TESTING (BLACK --> OTHER) *****************") + else: + print("***************** TESTING (OTHER --> BLACK) *****************") + + for send_bus, obd, recv_buses in test_array: + black_panda.send_heartbeat() + other_panda.send_heartbeat() + print("\ntest can: ", send_bus, " OBD: ", obd) + + # set OBD on black panda + black_panda.set_gmlan(True if obd else None) + + # clear and flush + if direction: + black_panda.can_clear(send_bus) + else: + other_panda.can_clear(send_bus) + + for recv_bus in recv_buses: + if direction: + other_panda.can_clear(recv_bus) + else: + black_panda.can_clear(recv_bus) + + black_panda.can_recv() + other_panda.can_recv() + + # send the characters + at = random.randint(1, 2000) + st = get_test_string()[0:8] + if direction: + black_panda.can_send(at, st, send_bus) + else: + other_panda.can_send(at, st, send_bus) + time.sleep(0.1) + + # check for receive + if direction: + _ = black_panda.can_recv() # can echo + cans_loop = other_panda.can_recv() + else: + _ = other_panda.can_recv() # can echo + cans_loop = black_panda.can_recv() + + loop_buses = [] + for loop in cans_loop: + if (loop[0] != at) or (loop[2] != st): + content_errors += 1 + + print(" Loop on bus", str(loop[3])) + loop_buses.append(loop[3]) + if len(cans_loop) == 0: + print(" No loop") + assert not os.getenv("NOASSERT") + + # test loop buses + recv_buses.sort() + loop_buses.sort() + if(recv_buses != loop_buses): + if len(loop_buses) == 0: + zero_bus_errors += 1 + else: + nonzero_bus_errors += 1 + assert not os.getenv("NOASSERT") + else: + print(" TEST PASSED") + + time.sleep(sleep_duration) + print("\n") + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("-n", type=int, help="Number of test iterations to run") + parser.add_argument("-sleep", type=int, help="Sleep time between tests", default=0) + args = parser.parse_args() + + if args.n is None: + while True: + run_test(sleep_duration=args.sleep) + else: + for _ in range(args.n): + run_test(sleep_duration=args.sleep) diff --git a/panda/tests/black_white_relay_endurance.py b/panda/tests/black_white_relay_endurance.py new file mode 100644 index 0000000..db19e72 --- /dev/null +++ b/panda/tests/black_white_relay_endurance.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 + +# Loopback test between black panda (+ harness and power) and white/grey panda +# Tests all buses, including OBD CAN, which is on the same bus as CAN0 in this test. +# To be sure, the test should be run with both harness orientations + + +import os +import time +import random +import argparse + +from panda import Panda + +def get_test_string(): + return b"test" + os.urandom(10) + +counter = 0 +nonzero_bus_errors = 0 +zero_bus_errors = 0 +content_errors = 0 + +def run_test(sleep_duration): + global counter + + pandas = Panda.list() + print(pandas) + + # make sure two pandas are connected + if len(pandas) != 2: + raise Exception("Connect white/grey and black panda to run this test!") + + # connect + pandas[0] = Panda(pandas[0]) + pandas[1] = Panda(pandas[1]) + + black_panda = None + other_panda = None + + # find out which one is black + if pandas[0].is_black() and not pandas[1].is_black(): + black_panda = pandas[0] + other_panda = pandas[1] + elif not pandas[0].is_black() and pandas[1].is_black(): + black_panda = pandas[1] + other_panda = pandas[0] + else: + raise Exception("Connect white/grey and black panda to run this test!") + + # disable safety modes + black_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + other_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + # test health packet + print("black panda health", black_panda.health()) + print("other panda health", other_panda.health()) + + # test black -> other + start_time = time.time() + temp_start_time = start_time + while True: + test_buses(black_panda, other_panda, True, [(0, False, [0]), (1, False, [1]), (2, False, [2]), (1, True, [0])], sleep_duration) + test_buses(black_panda, other_panda, False, [(0, False, [0]), (1, False, [1]), (2, False, [2]), (0, True, [0, 1])], sleep_duration) + counter += 1 + + runtime = time.time() - start_time + print("Number of cycles:", counter, "Non-zero bus errors:", nonzero_bus_errors, "Zero bus errors:", zero_bus_errors, + "Content errors:", content_errors, "Runtime: ", runtime) + + if (time.time() - temp_start_time) > 3600 * 6: + # Toggle relay + black_panda.set_safety_mode(Panda.SAFETY_SILENT) + time.sleep(1) + black_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + time.sleep(1) + temp_start_time = time.time() + + +def test_buses(black_panda, other_panda, direction, test_array, sleep_duration): + global nonzero_bus_errors, zero_bus_errors, content_errors + + if direction: + print("***************** TESTING (BLACK --> OTHER) *****************") + else: + print("***************** TESTING (OTHER --> BLACK) *****************") + + for send_bus, obd, recv_buses in test_array: + black_panda.send_heartbeat() + other_panda.send_heartbeat() + print("\ntest can: ", send_bus, " OBD: ", obd) + + # set OBD on black panda + black_panda.set_gmlan(True if obd else None) + + # clear and flush + if direction: + black_panda.can_clear(send_bus) + else: + other_panda.can_clear(send_bus) + + for recv_bus in recv_buses: + if direction: + other_panda.can_clear(recv_bus) + else: + black_panda.can_clear(recv_bus) + + black_panda.can_recv() + other_panda.can_recv() + + # send the characters + at = random.randint(1, 2000) + st = get_test_string()[0:8] + if direction: + black_panda.can_send(at, st, send_bus) + else: + other_panda.can_send(at, st, send_bus) + time.sleep(0.1) + + # check for receive + if direction: + _ = black_panda.can_recv() # cans echo + cans_loop = other_panda.can_recv() + else: + _ = other_panda.can_recv() # cans echo + cans_loop = black_panda.can_recv() + + loop_buses = [] + for loop in cans_loop: + if (loop[0] != at) or (loop[2] != st): + content_errors += 1 + + print(" Loop on bus", str(loop[3])) + loop_buses.append(loop[3]) + if len(cans_loop) == 0: + print(" No loop") + assert os.getenv("NOASSERT") + + # test loop buses + recv_buses.sort() + loop_buses.sort() + if(recv_buses != loop_buses): + if len(loop_buses) == 0: + zero_bus_errors += 1 + else: + nonzero_bus_errors += 1 + assert os.getenv("NOASSERT") + else: + print(" TEST PASSED") + + time.sleep(sleep_duration) + print("\n") + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("-n", type=int, help="Number of test iterations to run") + parser.add_argument("-sleep", type=int, help="Sleep time between tests", default=0) + args = parser.parse_args() + + if args.n is None: + while True: + run_test(sleep_duration=args.sleep) + else: + for _ in range(args.n): + run_test(sleep_duration=args.sleep) diff --git a/panda/tests/black_white_relay_test.py b/panda/tests/black_white_relay_test.py new file mode 100644 index 0000000..90b33be --- /dev/null +++ b/panda/tests/black_white_relay_test.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 + +# Relay test with loopback between black panda (+ harness and power) and white/grey panda +# Tests the relay switching multiple times / second by looking at the buses on which loop occurs. + + +import os +import time +import random +import argparse + +from panda import Panda + +def get_test_string(): + return b"test" + os.urandom(10) + +counter = 0 +open_errors = 0 +closed_errors = 0 +content_errors = 0 + +def run_test(sleep_duration): + global counter, open_errors, closed_errors + + pandas = Panda.list() + print(pandas) + + # make sure two pandas are connected + if len(pandas) != 2: + raise Exception("Connect white/grey and black panda to run this test!") + + # connect + pandas[0] = Panda(pandas[0]) + pandas[1] = Panda(pandas[1]) + + # find out which one is black + type0 = pandas[0].get_type() + type1 = pandas[1].get_type() + + black_panda = None + other_panda = None + + if type0 == "\x03" and type1 != "\x03": + black_panda = pandas[0] + other_panda = pandas[1] + elif type0 != "\x03" and type1 == "\x03": + black_panda = pandas[1] + other_panda = pandas[0] + else: + raise Exception("Connect white/grey and black panda to run this test!") + + # disable safety modes + black_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + other_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + # test health packet + print("black panda health", black_panda.health()) + print("other panda health", other_panda.health()) + + # test black -> other + while True: + # Switch on relay + black_panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + time.sleep(0.05) + + if not test_buses(black_panda, other_panda, (0, False, [0])): + open_errors += 1 + raise Exception("Open error") + + # Switch off relay + black_panda.set_safety_mode(Panda.SAFETY_SILENT) + time.sleep(0.05) + + if not test_buses(black_panda, other_panda, (0, False, [0, 2])): + closed_errors += 1 + raise Exception("Close error") + + counter += 1 + print("Number of cycles:", counter, "Open errors:", open_errors, "Closed errors:", closed_errors, "Content errors:", content_errors) + +def test_buses(black_panda, other_panda, test_obj): + global content_errors + send_bus, obd, recv_buses = test_obj + + black_panda.send_heartbeat() + other_panda.send_heartbeat() + + # Set OBD on send panda + other_panda.set_gmlan(True if obd else None) + + # clear and flush + other_panda.can_clear(send_bus) + + for recv_bus in recv_buses: + black_panda.can_clear(recv_bus) + + black_panda.can_recv() + other_panda.can_recv() + + # send the characters + at = random.randint(1, 2000) + st = get_test_string()[0:8] + other_panda.can_send(at, st, send_bus) + time.sleep(0.05) + + # check for receive + _ = other_panda.can_recv() # can echo + cans_loop = black_panda.can_recv() + + loop_buses = [] + for loop in cans_loop: + if (loop[0] != at) or (loop[2] != st): + content_errors += 1 + loop_buses.append(loop[3]) + + # test loop buses + recv_buses.sort() + loop_buses.sort() + if(recv_buses != loop_buses): + return False + else: + return True + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("-n", type=int, help="Number of test iterations to run") + parser.add_argument("-sleep", type=int, help="Sleep time between tests", default=0) + args = parser.parse_args() + + if args.n is None: + while True: + run_test(sleep_duration=args.sleep) + else: + for _ in range(args.n): + run_test(sleep_duration=args.sleep) diff --git a/panda/tests/bulk_write_test.py b/panda/tests/bulk_write_test.py new file mode 100644 index 0000000..278766a --- /dev/null +++ b/panda/tests/bulk_write_test.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +import os +import time +import threading +from typing import Any + +from panda import Panda + +JUNGLE = "JUNGLE" in os.environ +if JUNGLE: + from panda import PandaJungle + +# The TX buffers on pandas is 0x100 in length. +NUM_MESSAGES_PER_BUS = 10000 + +def flood_tx(panda): + print('Sending!') + msg = b"\xaa" * 4 + packet = [[0xaa, None, msg, 0], [0xaa, None, msg, 1], [0xaa, None, msg, 2]] * NUM_MESSAGES_PER_BUS + panda.can_send_many(packet, timeout=10000) + print(f"Done sending {3*NUM_MESSAGES_PER_BUS} messages!") + +if __name__ == "__main__": + serials = Panda.list() + if JUNGLE: + sender = Panda() + receiver = PandaJungle() + else: + if len(serials) != 2: + raise Exception("Connect two pandas to perform this test!") + sender = Panda(serials[0]) + receiver = Panda(serials[1]) # type: ignore + receiver.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + sender.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + # Start transmisson + threading.Thread(target=flood_tx, args=(sender,)).start() + + # Receive as much as we can in a few second time period + rx: list[Any] = [] + old_len = 0 + start_time = time.time() + while time.time() - start_time < 3 or len(rx) > old_len: + old_len = len(rx) + print(old_len) + rx.extend(receiver.can_recv()) + print(f"Received {len(rx)} messages") diff --git a/panda/tests/can_printer.py b/panda/tests/can_printer.py new file mode 100644 index 0000000..15ce89f --- /dev/null +++ b/panda/tests/can_printer.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +import os +import time +from collections import defaultdict +import binascii + +from panda import Panda + +# fake +def sec_since_boot(): + return time.time() + +def can_printer(): + p = Panda() + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + start = sec_since_boot() + lp = sec_since_boot() + msgs = defaultdict(list) + canbus = int(os.getenv("CAN", "0")) + while True: + can_recv = p.can_recv() + for address, _, dat, src in can_recv: + if src == canbus: + msgs[address].append(dat) + + if sec_since_boot() - lp > 0.1: + dd = chr(27) + "[2J" + dd += "%5.2f\n" % (sec_since_boot() - start) + for k, v in sorted(zip(list(msgs.keys()), [binascii.hexlify(x[-1]) for x in list(msgs.values())], strict=True)): + dd += "%s(%6d) %s\n" % ("%04X(%4d)" % (k, k), len(msgs[k]), v) + print(dd) + lp = sec_since_boot() + +if __name__ == "__main__": + can_printer() diff --git a/panda/tests/canfd/test_canfd.py b/panda/tests/canfd/test_canfd.py new file mode 100644 index 0000000..873bc79 --- /dev/null +++ b/panda/tests/canfd/test_canfd.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +import os +import time +import random +from collections import defaultdict +from panda import Panda, calculate_checksum, DLC_TO_LEN +from panda import PandaJungle +from panda.tests.hitl.helpers import time_many_sends + +H7_HW_TYPES = [Panda.HW_TYPE_RED_PANDA, Panda.HW_TYPE_RED_PANDA_V2] +JUNGLE_SERIAL = os.getenv("JUNGLE") +H7_PANDAS_EXCLUDE = [] # type: ignore +if os.getenv("H7_PANDAS_EXCLUDE"): + H7_PANDAS_EXCLUDE = os.getenv("H7_PANDAS_EXCLUDE").strip().split(" ") # type: ignore + +def panda_reset(): + panda_serials = [] + + panda_jungle = PandaJungle(JUNGLE_SERIAL) + panda_jungle.set_can_silent(True) + panda_jungle.set_panda_power(False) + time.sleep(1) + panda_jungle.set_panda_power(True) + time.sleep(4) + + for serial in Panda.list(): + if serial not in H7_PANDAS_EXCLUDE: + with Panda(serial=serial) as p: + if p.get_type() in H7_HW_TYPES: + p.reset() + panda_serials.append(serial) + + print("test pandas", panda_serials) + assert len(panda_serials) == 2, "Two H7 pandas required" + + return panda_serials + +def panda_init(serial, enable_canfd=False, enable_non_iso=False): + p = Panda(serial=serial) + p.set_power_save(False) + for bus in range(3): + p.set_can_speed_kbps(0, 500) + if enable_canfd: + p.set_can_data_speed_kbps(bus, 2000) + if enable_non_iso: + p.set_canfd_non_iso(bus, True) + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + return p + +def test_canfd_throughput(p, p_recv=None): + two_pandas = p_recv is not None + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + if two_pandas: + p_recv.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + # enable output mode + else: + p.set_can_loopback(True) + + tests = [ + [500, 1000, 2000], # speeds + [93, 87, 78], # saturation thresholds + ] + + for i in range(len(tests[0])): + # set bus 0 data speed to speed + p.set_can_data_speed_kbps(0, tests[0][i]) + if p_recv is not None: + p_recv.set_can_data_speed_kbps(0, tests[0][i]) + time.sleep(0.05) + + comp_kbps = time_many_sends(p, 0, p_recv=p_recv, msg_count=400, two_pandas=two_pandas, msg_len=64) + + # bit count from https://en.wikipedia.org/wiki/CAN_bus + saturation_pct = (comp_kbps / tests[0][i]) * 100.0 + assert saturation_pct > tests[1][i] + assert saturation_pct < 100 + +def canfd_test(p_send, p_recv): + for n in range(100): + sent_msgs = defaultdict(set) + to_send = [] + for _ in range(200): + bus = random.randrange(3) + for dlc in range(len(DLC_TO_LEN)): + address = random.randrange(1, 1<<29) + data = bytearray(random.getrandbits(8) for _ in range(DLC_TO_LEN[dlc])) + if len(data) >= 2: + data[0] = calculate_checksum(data[1:] + bytes(str(address), encoding="utf-8")) + to_send.append([address, 0, data, bus]) + sent_msgs[bus].add((address, bytes(data))) + + p_send.can_send_many(to_send, timeout=0) + + start_time = time.monotonic() + while (time.monotonic() - start_time < 1) and any(len(x) > 0 for x in sent_msgs.values()): + incoming = p_recv.can_recv() + for msg in incoming: + address, _, data, bus = msg + if len(data) >= 2: + assert calculate_checksum(data[1:] + bytes(str(address), encoding="utf-8")) == data[0] + k = (address, bytes(data)) + assert k in sent_msgs[bus], f"message {k} was never sent on bus {bus}" + sent_msgs[bus].discard(k) + + for bus in range(3): + assert not len(sent_msgs[bus]), f"loop {n}: bus {bus} missing {len(sent_msgs[bus])} messages" + +def setup_test(enable_non_iso=False): + panda_serials = panda_reset() + + p_send = panda_init(panda_serials[0], enable_canfd=False, enable_non_iso=enable_non_iso) + p_recv = panda_init(panda_serials[1], enable_canfd=True, enable_non_iso=enable_non_iso) + + # Check that sending panda CAN FD and BRS are turned off + for bus in range(3): + health = p_send.can_health(bus) + assert not health["canfd_enabled"] + assert not health["brs_enabled"] + assert health["canfd_non_iso"] == enable_non_iso + + # Receiving panda sends dummy CAN FD message that should enable CAN FD on sender side + for bus in range(3): + p_recv.can_send(0x200, b"dummymessage", bus) + p_recv.can_recv() + p_send.can_recv() + + # Check if all tested buses on sending panda have swithed to CAN FD with BRS + for bus in range(3): + health = p_send.can_health(bus) + assert health["canfd_enabled"] + assert health["brs_enabled"] + assert health["canfd_non_iso"] == enable_non_iso + + return p_send, p_recv + +def main(): + print("[TEST CAN-FD]") + p_send, p_recv = setup_test() + canfd_test(p_send, p_recv) + + print("[TEST CAN-FD non-ISO]") + p_send, p_recv = setup_test(enable_non_iso=True) + canfd_test(p_send, p_recv) + + print("[TEST CAN-FD THROUGHPUT]") + panda_serials = panda_reset() + p_send = panda_init(panda_serials[0], enable_canfd=True) + p_recv = panda_init(panda_serials[1], enable_canfd=True) + test_canfd_throughput(p_send, p_recv) + +if __name__ == "__main__": + main() diff --git a/panda/tests/check_fw_size.py b/panda/tests/check_fw_size.py new file mode 100644 index 0000000..53681c5 --- /dev/null +++ b/panda/tests/check_fw_size.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +import subprocess +from collections import defaultdict + + +def check_space(file, mcu): + MCUS = { + "H7": { + ".flash": 1024*1024, # FLASH + ".dtcmram": 128*1024, # DTCMRAM + ".itcmram": 64*1024, # ITCMRAM + ".axisram": 320*1024, # AXI SRAM + ".sram12": 32*1024, # SRAM1(16kb) + SRAM2(16kb) + ".sram4": 16*1024, # SRAM4 + ".backup_sram": 4*1024, # SRAM4 + }, + "F4": { + ".flash": 1024*1024, # FLASH + ".dtcmram": 256*1024, # RAM + ".ram_d1": 64*1024, # RAM2 + }, + } + IGNORE_LIST = [ + ".ARM.attributes", + ".comment", + ".debug_line", + ".debug_info", + ".debug_abbrev", + ".debug_aranges", + ".debug_str", + ".debug_ranges", + ".debug_loc", + ".debug_frame", + ".debug_line_str", + ".debug_rnglists", + ".debug_loclists", + ] + FLASH = [ + ".isr_vector", + ".text", + ".rodata", + ".data" + ] + RAM = [ + ".data", + ".bss", + "._user_heap_stack" # _user_heap_stack considered free? + ] + + result = {} + calcs = defaultdict(int) + + output = str(subprocess.check_output(f"arm-none-eabi-size -x --format=sysv {file}", shell=True), 'utf-8') + + for row in output.split('\n'): + pop = False + line = row.split() + if len(line) == 3 and line[0].startswith('.'): + if line[0] in IGNORE_LIST: + continue + result[line[0]] = [line[1], line[2]] + if line[0] in FLASH: + calcs[".flash"] += int(line[1], 16) + pop = True + if line[0] in RAM: + calcs[".dtcmram"] += int(line[1], 16) + pop = True + if pop: + result.pop(line[0]) + + if len(result): + for line in result: + calcs[line] += int(result[line][0], 16) + + print(f"=======SUMMARY FOR {mcu} FILE {file}=======") + for line in calcs: + if line in MCUS[mcu]: + used_percent = (100 - (MCUS[mcu][line] - calcs[line]) / MCUS[mcu][line] * 100) + print(f"SECTION: {line} size: {MCUS[mcu][line]} USED: {calcs[line]}({used_percent:.2f}%) FREE: {MCUS[mcu][line] - calcs[line]}") + else: + print(line, calcs[line]) + print() + + +if __name__ == "__main__": + # red panda + check_space("../board/obj/bootstub.panda_h7.elf", "H7") + check_space("../board/obj/panda_h7.elf", "H7") + # black panda + check_space("../board/obj/bootstub.panda.elf", "F4") + check_space("../board/obj/panda.elf", "F4") + # jungle v1 + check_space("../board/jungle/obj/bootstub.panda_jungle.elf", "F4") + check_space("../board/jungle/obj/panda_jungle.elf", "F4") + # jungle v2 + check_space("../board/jungle/obj/bootstub.panda_jungle_h7.elf", "H7") + check_space("../board/jungle/obj/panda_jungle_h7.elf", "H7") diff --git a/panda/tests/ci_shell.sh b/panda/tests/ci_shell.sh new file mode 100644 index 0000000..92c0f96 --- /dev/null +++ b/panda/tests/ci_shell.sh @@ -0,0 +1,20 @@ +#!/bin/bash -e + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" +OP_ROOT="$DIR/../../" +PANDA_ROOT="$DIR/../" + +if [ -z "$BUILD" ]; then + docker pull docker.io/commaai/panda:latest +else + docker build --cache-from docker.io/commaai/panda:latest -t docker.io/commaai/panda:latest -f $PANDA_ROOT/Dockerfile $PANDA_ROOT +fi + +docker run \ + -it \ + --rm \ + --volume $OP_ROOT:$OP_ROOT \ + --workdir $PWD \ + --env PYTHONPATH=$OP_ROOT \ + docker.io/commaai/panda:latest \ + /bin/bash diff --git a/panda/tests/debug_console.py b/panda/tests/debug_console.py new file mode 100644 index 0000000..8755be1 --- /dev/null +++ b/panda/tests/debug_console.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import os +import sys +import time +import select +import codecs + +from panda import Panda + +setcolor = ["\033[1;32;40m", "\033[1;31;40m"] +unsetcolor = "\033[00m" + +port_number = int(os.getenv("PORT", "0")) +claim = os.getenv("CLAIM") is not None +no_color = os.getenv("NO_COLOR") is not None +no_reconnect = os.getenv("NO_RECONNECT") is not None + +if __name__ == "__main__": + while True: + try: + serials = Panda.list() + if os.getenv("SERIAL"): + serials = [x for x in serials if x == os.getenv("SERIAL")] + + pandas = [Panda(x, claim=claim) for x in serials] + decoders = [codecs.getincrementaldecoder('utf-8')() for _ in pandas] + + if not len(pandas): + print("no pandas found") + if no_reconnect: + sys.exit(0) + time.sleep(1) + continue + + if os.getenv("BAUD") is not None: + for panda in pandas: + panda.set_uart_baud(port_number, int(os.getenv("BAUD"))) # type: ignore + + while True: + for i, panda in enumerate(pandas): + while True: + ret = panda.serial_read(port_number) + if len(ret) > 0: + decoded = decoders[i].decode(ret) + if no_color: + sys.stdout.write(decoded) + else: + sys.stdout.write(setcolor[i] + decoded + unsetcolor) + sys.stdout.flush() + else: + break + if select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []): + ln = sys.stdin.readline() + if claim: + panda.serial_write(port_number, ln) + time.sleep(0.01) + except KeyboardInterrupt: + break + except Exception: + print("panda disconnected!") + time.sleep(0.5) diff --git a/panda/tests/development/register_hashmap_spread.py b/panda/tests/development/register_hashmap_spread.py new file mode 100644 index 0000000..3e20e58 --- /dev/null +++ b/panda/tests/development/register_hashmap_spread.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +import matplotlib.pyplot as plt # pylint: disable=import-error + +HASHING_PRIME = 23 +REGISTER_MAP_SIZE = 0x3FF +BYTES_PER_REG = 4 + +# From ST32F413 datasheet +REGISTER_ADDRESS_REGIONS = [ + (0x40000000, 0x40007FFF), + (0x40010000, 0x400107FF), + (0x40011000, 0x400123FF), + (0x40012C00, 0x40014BFF), + (0x40015000, 0x400153FF), + (0x40015800, 0x40015BFF), + (0x40016000, 0x400167FF), + (0x40020000, 0x40021FFF), + (0x40023000, 0x400233FF), + (0x40023800, 0x40023FFF), + (0x40026000, 0x400267FF), + (0x50000000, 0x5003FFFF), + (0x50060000, 0x500603FF), + (0x50060800, 0x50060BFF), + (0x50060800, 0x50060BFF), + (0xE0000000, 0xE00FFFFF) +] + +def _hash(reg_addr): + return (((reg_addr >> 16) ^ ((((reg_addr + 1) & 0xFFFF) * HASHING_PRIME) & 0xFFFF)) & REGISTER_MAP_SIZE) + +# Calculate hash for each address +hashes = [] +double_hashes = [] +for (start_addr, stop_addr) in REGISTER_ADDRESS_REGIONS: + for addr in range(start_addr, stop_addr + 1, BYTES_PER_REG): + h = _hash(addr) + hashes.append(h) + double_hashes.append(_hash(h)) + +# Make histograms +plt.subplot(2, 1, 1) +plt.hist(hashes, bins=REGISTER_MAP_SIZE) +plt.title("Number of collisions per _hash") +plt.xlabel("Address") + +plt.subplot(2, 1, 2) +plt.hist(double_hashes, bins=REGISTER_MAP_SIZE) +plt.title("Number of collisions per double _hash") +plt.xlabel("Address") +plt.show() diff --git a/panda/tests/echo.py b/panda/tests/echo.py new file mode 100644 index 0000000..90bf4a8 --- /dev/null +++ b/panda/tests/echo.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +from panda import Panda + +# This script is intended to be used in conjunction with the echo_loopback_test.py test script from panda jungle. +# It sends a reversed response back for every message received containing b"test". +if __name__ == "__main__": + p = Panda() + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + p.set_power_save(False) + + while True: + incoming = p.can_recv() + for message in incoming: + address, notused, data, bus = message + if b'test' in data: + p.can_send(address, data[::-1], bus) diff --git a/panda/tests/elm_car_simulator.py b/panda/tests/elm_car_simulator.py new file mode 100644 index 0000000..56e825f --- /dev/null +++ b/panda/tests/elm_car_simulator.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python3 + +"""Used to Reverse/Test ELM protocol auto detect and OBD message response without a car.""" + +import os +import sys +import struct +import binascii +import time +import threading +from collections import deque + +from panda import Panda + +def lin_checksum(dat): + return sum(dat) % 0x100 + +class ELMCarSimulator(): + def __init__(self, sn, silent=False, can_kbaud=500, + can=True, can11b=True, can29b=True, + lin=True): + self.__p = Panda(sn if sn else Panda.list()[0]) + self.__on = True + self.__stop = False + self.__silent = silent + + self.__lin_timer = None + self.__lin_active = False + self.__lin_enable = lin + self.__lin_monitor_thread = threading.Thread(target=self.__lin_monitor) + + self.__can_multipart_data = None + self.__can_kbaud = can_kbaud + self.__can_extra_noise_msgs = deque() + self.__can_enable = can + self.__can11b = can11b + self.__can29b = can29b + self.__can_monitor_thread = threading.Thread(target=self.__can_monitor) + + @property + def panda(self): + return self.__p + + def stop(self): + if self.__lin_timer: + self.__lin_timer.cancel() + self.__lin_timeout_handler() + + self.__stop = True + + def join(self): + if self.__lin_monitor_thread.is_alive(): + self.__lin_monitor_thread.join() + if self.__can_monitor_thread.is_alive(): + self.__can_monitor_thread.join() + if self.__p: + print("closing handle") + self.__p.close() + + def set_enable(self, on): + self.__on = on + + def start(self): + self.panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + if self.__lin_enable: + self.__lin_monitor_thread.start() + if self.__can_enable: + self.__can_monitor_thread.start() + + ######################### + # CAN related functions # + ######################### + + def __can_monitor(self): + print("STARTING CAN THREAD") + self.panda.set_can_speed_kbps(0, self.__can_kbaud) + self.panda.can_recv() # Toss whatever was already there + + while not self.__stop: + for address, ts, data, src in self.panda.can_recv(): + if self.__on and src == 0 and len(data) == 8 and data[0] >= 2: + if not self.__silent: + print("Processing CAN message", src, hex(address), binascii.hexlify(data)) + self.__can_process_msg(data[1], data[2], address, ts, data, src) + elif not self.__silent: + print("Rejecting CAN message", src, hex(address), binascii.hexlify(data)) + + def can_mode_11b(self): + self.__can11b = True + self.__can29b = False + + def can_mode_29b(self): + self.__can11b = False + self.__can29b = True + + def can_mode_11b_29b(self): + self.__can11b = True + self.__can29b = True + + def change_can_baud(self, kbaud): + self.__can_kbaud = kbaud + self.panda.set_can_speed_kbps(0, self.__can_kbaud) + + def can_add_extra_noise(self, noise_msg, addr=None): + self.__can_extra_noise_msgs.append((addr, noise_msg)) + + def _can_send(self, addr, msg): + if not self.__silent: + print(" CAN Reply (%x)" % addr, binascii.hexlify(msg)) + self.panda.can_send(addr, msg + b'\x00' * (8 - len(msg)), 0) + if self.__can_extra_noise_msgs: + noise = self.__can_extra_noise_msgs.popleft() + self.panda.can_send(noise[0] if noise[0] is not None else addr, + noise[1] + b'\x00' * (8 - len(noise[1])), 0) + + def _can_addr_matches(self, addr): + if self.__can11b and (addr == 0x7DF or (addr & 0x7F8) == 0x7E0): + return True + if self.__can29b and (addr == 0x18db33f1 or (addr & 0x1FFF00FF) == 0x18da00f1): + return True + return False + + def __can_process_msg(self, mode, pid, address, ts, data, src): + if not self.__silent: + print("CAN MSG", binascii.hexlify(data[1:1 + data[0]]), + "Addr:", hex(address), "Mode:", hex(mode)[2:].zfill(2), + "PID:", hex(pid)[2:].zfill(2), "canLen:", len(data), + binascii.hexlify(data)) + + if self._can_addr_matches(address) and len(data) == 8: + outmsg = None + if data[:3] == b'\x30\x00\x00' and len(self.__can_multipart_data): + if not self.__silent: + print("Request for more data") + outaddr = 0x7E8 if address == 0x7DF or address == 0x7E0 else 0x18DAF110 + msgnum = 1 + while(self.__can_multipart_data): + datalen = min(7, len(self.__can_multipart_data)) + msgpiece = struct.pack("B", 0x20 | msgnum) + self.__can_multipart_data[:datalen] + self._can_send(outaddr, msgpiece) + self.__can_multipart_data = self.__can_multipart_data[7:] + msgnum = (msgnum + 1) % 0x10 + time.sleep(0.01) + + else: + outmsg = self._process_obd(mode, pid) + + if outmsg: + outaddr = 0x7E8 if address == 0x7DF or address == 0x7E0 else 0x18DAF110 + + if len(outmsg) <= 5: + self._can_send(outaddr, + struct.pack("BBB", len(outmsg) + 2, 0x40 | data[1], pid) + outmsg) + else: + first_msg_len = min(3, len(outmsg) % 7) + payload_len = len(outmsg) + 3 + msgpiece = struct.pack("BBBBB", 0x10 | ((payload_len >> 8) & 0xF), + payload_len & 0xFF, + 0x40 | data[1], pid, 1) + outmsg[:first_msg_len] + self._can_send(outaddr, msgpiece) + self.__can_multipart_data = outmsg[first_msg_len:] + + ######################### + # General OBD functions # + ######################### + + def _process_obd(self, mode, pid): + if mode == 0x01: # Mode: Show current data + if pid == 0x00: # List supported things + return b"\xff\xff\xff\xfe" # b"\xBE\x1F\xB8\x10" #Bitfield, random features + elif pid == 0x01: # Monitor Status since DTC cleared + return b"\x00\x00\x00\x00" # Bitfield, random features + elif pid == 0x04: # Calculated engine load + return b"\x2f" + elif pid == 0x05: # Engine coolant temperature + return b"\x3c" + elif pid == 0x0B: # Intake manifold absolute pressure + return b"\x90" + elif pid == 0x0C: # Engine RPM + return b"\x1A\xF8" + elif pid == 0x0D: # Vehicle Speed + return b"\x53" + elif pid == 0x10: # MAF air flow rate + return b"\x01\xA0" + elif pid == 0x11: # Throttle Position + return b"\x90" + elif pid == 0x33: # Absolute Barometric Pressure + return b"\x90" + elif mode == 0x09: # Mode: Request vehicle information + if pid == 0x02: # Show VIN + return b"1D4GP00R55B123456" + if pid == 0xFC: # test long multi message. Ligned up for LIN responses + return b''.join(struct.pack(">BBH", 0xAA, 0xAA, num + 1) for num in range(80)) + if pid == 0xFD: # test long multi message + parts = (b'\xAA\xAA\xAA' + struct.pack(">I", num) for num in range(80)) + return b'\xAA\xAA\xAA' + b''.join(parts) + if pid == 0xFE: # test very long multi message + parts = (b'\xAA\xAA\xAA' + struct.pack(">I", num) for num in range(584)) + return b'\xAA\xAA\xAA' + b''.join(parts) + b'\xAA' + if pid == 0xFF: + return b'\xAA\x00\x00' + \ + b"".join((b'\xAA' * 5) + struct.pack(">H", num + 1) for num in range(584)) + #return b"\xAA"*100#(0xFFF-3) + + +if __name__ == "__main__": + serial = os.getenv("SERIAL") if os.getenv("SERIAL") else None + kbaud = int(os.getenv("CANKBAUD")) if os.getenv("CANKBAUD") else 500 # type: ignore + bitwidth = int(os.getenv("CANBITWIDTH")) if os.getenv("CANBITWIDTH") else 0 # type: ignore + canenable = bool(int(os.getenv("CANENABLE"))) if os.getenv("CANENABLE") else True # type: ignore + linenable = bool(int(os.getenv("LINENABLE"))) if os.getenv("LINENABLE") else True # type: ignore + sim = ELMCarSimulator(serial, can_kbaud=kbaud, can=canenable, lin=linenable) + if(bitwidth == 0): + sim.can_mode_11b_29b() + if(bitwidth == 11): + sim.can_mode_11b() + if(bitwidth == 29): + sim.can_mode_29b() + + import signal + + def signal_handler(signal, frame): + print('\nShutting down simulator') + sim.stop() + sim.join() + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + + sim.start() + + signal.pause() diff --git a/panda/tests/elm_throughput.py b/panda/tests/elm_throughput.py new file mode 100644 index 0000000..983d4a1 --- /dev/null +++ b/panda/tests/elm_throughput.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 + +import socket +import threading +import select + +class Reader(threading.Thread): + def __init__(self, s, *args, **kwargs): + super().__init__(*args, **kwargs) + self._s = s + self.__stop = False + + def stop(self): + self.__stop = True + + def run(self): + while not self.__stop: + s.recv(1000) + +def read_or_fail(s): + ready = select.select([s], [], [], 4) + assert ready[0], "Socket did not receive data within the timeout duration." + return s.recv(1000) + +def send_msg(s, msg): + s.send(msg) + res = b'' + while not res.endswith(">"): + res += read_or_fail(s) + return res + +if __name__ == "__main__": + s = socket.create_connection(("192.168.0.10", 35000)) + send_msg(s, b"ATZ\r") + send_msg(s, b"ATL1\r") + print(send_msg(s, b"ATE0\r")) + print(send_msg(s, b"ATS0\r")) + print(send_msg(s, b"ATSP6\r")) + + print("\nLOOP\n") + + while True: + print(send_msg(s, b"0100\r")) + print(send_msg(s, b"010d\r")) diff --git a/panda/tests/fan/fan_test.py b/panda/tests/fan/fan_test.py new file mode 100644 index 0000000..36a1171 --- /dev/null +++ b/panda/tests/fan/fan_test.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +import time + +from panda import Panda + +if __name__ == "__main__": + p = Panda() + power = 0 + while True: + p.set_fan_power(power) + time.sleep(5) + print("Power: ", power, "RPM:", str(p.get_fan_rpm()), "Expected:", int(6500 * power / 100)) + power += 10 + power %= 110 diff --git a/panda/tests/fan/fan_tuning.py b/panda/tests/fan/fan_tuning.py new file mode 100644 index 0000000..2bdfab7 --- /dev/null +++ b/panda/tests/fan/fan_tuning.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +import json +import time +import threading + +from panda import Panda + +def drain_serial(p): + ret = [] + while True: + d = p.serial_read(0) + if len(d) == 0: + break + ret.append(d) + return ret + + +fan_cmd = 0. + +def logger(event): + # requires a build with DEBUG_FAN + with Panda(claim=False) as p, open('/tmp/fan_log', 'w') as f: + power = None + target_rpm = None + stall_count = None + rpm_fast = None + t = time.monotonic() + + drain_serial(p) + while not event.is_set(): + p.set_fan_power(fan_cmd) + + for l in drain_serial(p)[::-1]: + ns = l.decode('utf8').strip().split(' ') + if len(ns) == 4: + target_rpm, rpm_fast, power, stall_count = (int(n, 16) for n in ns) + break + + dat = { + 't': time.monotonic() - t, + 'cmd_power': fan_cmd, + 'pwm_power': power, + 'target_rpm': target_rpm, + 'rpm_fast': rpm_fast, + 'rpm': p.get_fan_rpm(), + 'stall_counter': stall_count, + 'total_stall_count': p.health()['fan_stall_count'], + } + f.write(json.dumps(dat) + '\n') + time.sleep(1/16.) + p.set_fan_power(0) + +def get_overshoot_rpm(p, power): + global fan_cmd + + # make sure the fan is stopped completely + fan_cmd = 0. + while p.get_fan_rpm() > 100: + time.sleep(0.1) + time.sleep(3) + + # set it to 30% power to mimic going onroad + fan_cmd = power + max_rpm = 0 + max_power = 0 + for _ in range(70): + max_rpm = max(max_rpm, p.get_fan_rpm()) + max_power = max(max_power, p.health()['fan_power']) + time.sleep(0.1) + + # tolerate 10% overshoot + expected_rpm = Panda.MAX_FAN_RPMs[bytes(p.get_type())] * power / 100 + overshoot = (max_rpm / expected_rpm) - 1 + + return overshoot, max_rpm, max_power + + +if __name__ == "__main__": + event = threading.Event() + threading.Thread(target=logger, args=(event, )).start() + + try: + p = Panda() + for power in range(10, 101, 10): + overshoot, max_rpm, max_power = get_overshoot_rpm(p, power) + print(f"Fan power {power}%: overshoot {overshoot:.2%}, Max RPM {max_rpm}, Max power {max_power}%") + finally: + event.set() diff --git a/panda/tests/get_version.py b/panda/tests/get_version.py new file mode 100644 index 0000000..a013812 --- /dev/null +++ b/panda/tests/get_version.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 +from panda import Panda + +if __name__ == "__main__": + for p in Panda.list(): + pp = Panda(p) + print(f"{pp.get_serial()[0]}: {pp.get_version()}") diff --git a/panda/tests/gmbitbang/recv.py b/panda/tests/gmbitbang/recv.py new file mode 100644 index 0000000..8dc594d --- /dev/null +++ b/panda/tests/gmbitbang/recv.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +from panda import Panda + +if __name__ == "__main__": + p = Panda() + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + p.set_gmlan(bus=2) + #p.can_send(0xaaa, b"\x00\x00", bus=3) + last_add: int | None = None + while True: + ret = p.can_recv() + if len(ret) > 0: + add = ret[0][0] + if last_add is not None and add != last_add + 1: + print("MISS: ", last_add, add) + last_add = add + print(ret) diff --git a/panda/tests/gmbitbang/rigol.py b/panda/tests/gmbitbang/rigol.py new file mode 100644 index 0000000..818df74 --- /dev/null +++ b/panda/tests/gmbitbang/rigol.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# pylint: skip-file +# type: ignore +import numpy as np +import visa +import matplotlib.pyplot as plt + +resources = visa.ResourceManager() +print(resources.list_resources()) + +scope = resources.open_resource('USB0::0x1AB1::0x04CE::DS1ZA184652242::INSTR', timeout=2000, chunk_size=1024000) +print(scope.query('*IDN?').strip()) + +#voltscale = scope.ask_for_values(':CHAN1:SCAL?')[0] +#voltoffset = scope.ask_for_values(":CHAN1:OFFS?")[0] + +#scope.write(":STOP") +scope.write(":WAV:POIN:MODE RAW") +scope.write(":WAV:DATA? CHAN1")[10:] +rawdata = scope.read_raw() +data = np.frombuffer(rawdata, 'B') +print(data.shape) + +s1 = data[0:650] +s2 = data[650:] +s1i = np.argmax(s1 > 100) +s2i = np.argmax(s2 > 100) +s1 = s1[s1i:] +s2 = s2[s2i:] + +plt.plot(s1) +plt.plot(s2) +plt.show() +#data = (data - 130.0 - voltoffset/voltscale*25) / 25 * voltscale + +print(data) diff --git a/panda/tests/gmbitbang/test.py b/panda/tests/gmbitbang/test.py new file mode 100644 index 0000000..b804113 --- /dev/null +++ b/panda/tests/gmbitbang/test.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import time +from panda import Panda + +p1 = Panda('380016000551363338383037') +p2 = Panda('430026000951363338383037') + +# this is a test, no safety +p1.set_safety_mode(Panda.SAFETY_ALLOUTPUT) +p2.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + +# get versions +print(p1.get_version()) +print(p2.get_version()) + +# this sets bus 2 to actually be GMLAN +p2.set_gmlan(bus=2) + +# send w bitbang then without +#iden = 123 +iden = 18000 +#dat = "\x01\x02" +dat = "\x01\x02\x03\x04\x05\x06\x07\x08" +while 1: + iden += 1 + p1.set_gmlan(bus=None) + p1.can_send(iden, dat, bus=3) + #p1.set_gmlan(bus=2) + #p1.can_send(iden, dat, bus=3) + time.sleep(0.01) + print(p2.can_recv()) + #exit(0) diff --git a/panda/tests/gmbitbang/test_one.py b/panda/tests/gmbitbang/test_one.py new file mode 100644 index 0000000..981edc5 --- /dev/null +++ b/panda/tests/gmbitbang/test_one.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +import time +from panda import Panda + +p = Panda() +p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + +# hack anything on bus +p.set_gmlan(bus=2) +time.sleep(0.1) +while len(p.can_recv()) > 0: + print("clearing") + time.sleep(0.1) +print("cleared") +p.set_gmlan(bus=None) + +iden = 18000 +dat = "\x01\x02\x03\x04\x05\x06\x07\x08" +while 1: + iden += 1 + p.can_send(iden, dat, bus=3) + time.sleep(0.01) diff --git a/panda/tests/gmbitbang/test_packer.c b/panda/tests/gmbitbang/test_packer.c new file mode 100644 index 0000000..63c0131 --- /dev/null +++ b/panda/tests/gmbitbang/test_packer.c @@ -0,0 +1,28 @@ +#include +#include + +#define CANPACKET_DATA_SIZE_MAX 8 +#include "../../board/can_definitions.h" + +#include "../../board/drivers/canbitbang.h" + +int main() { + char out[300]; + CANPacket_t to_bang = {0}; + to_bang.addr = 20 << 18; + to_bang.data_len_code = 1; + to_bang.data[0] = 1; + + int len = get_bit_message(out, &to_bang); + printf("T:"); + for (int i = 0; i < len; i++) { + printf("%d", out[i]); + } + printf("\n"); + printf("R:0000010010100000100010000010011110111010100111111111111111"); + printf("\n"); + return 0; +} + + + diff --git a/panda/tests/gmlan_harness_test.py b/panda/tests/gmlan_harness_test.py new file mode 100644 index 0000000..950918c --- /dev/null +++ b/panda/tests/gmlan_harness_test.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 + +import time + +from panda import Panda + +WHITE_GMLAN_BUS = 3 +OTHER_GMLAN_BUS = 1 + +def set_gmlan(p): + if p.get_type() == Panda.HW_TYPE_WHITE_PANDA: + p.set_gmlan(2) + else: + p.set_obd(True) + +def set_speed_kbps(p, speed): + if p.get_type() == Panda.HW_TYPE_WHITE_PANDA: + p.set_can_speed_kbps(WHITE_GMLAN_BUS, speed) + else: + p.set_can_speed_kbps(OTHER_GMLAN_BUS, speed) + +def send(p, id_, msg): + if p.get_type() == Panda.HW_TYPE_WHITE_PANDA: + p.can_send(id_, msg, WHITE_GMLAN_BUS) + else: + p.can_send(id_, msg, OTHER_GMLAN_BUS) + +if __name__ == "__main__": + pl = Panda.list() + assert(len(pl) == 2) + p0 = Panda(pl[1]) + p1 = Panda(pl[0]) + + p0.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + p1.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + print("0: ", p0.get_type()) + print("1: ", p1.get_type()) + + set_gmlan(p0) + set_gmlan(p1) + + p0.can_clear(0xFFFF) + p1.can_clear(0xFFFF) + + try: + loops = 0 + while True: + for speed in [33.3, 83.3]: + set_speed_kbps(p0, speed) + set_speed_kbps(p1, speed) + p0.can_clear(0xFFFF) + p1.can_clear(0xFFFF) + + print(f"Speed: {speed}") + time.sleep(0.1) + + print("Send 1 -> 0") + send(p1, 1, b"1to0:" + bytes(str(loops%100), "utf-8")) + time.sleep(0.05) + rx = list(filter(lambda x: x[3] < 128, p0.can_recv())) + print(rx) + assert(len(rx) == 1) + + print("Send 0 -> 1") + send(p0, 1, b"0to1:" + bytes(str(loops%100), "utf-8")) + time.sleep(0.05) + rx = list(filter(lambda x: x[3] < 128, p1.can_recv())) + print(rx) + assert(len(rx) == 1) + + time.sleep(0.5) + + + loops += 1 + print(f"Completed {loops} loops") + except Exception: + print("Test failed somehow. Did you power the black panda using the GMLAN harness?") diff --git a/panda/tests/health_test.py b/panda/tests/health_test.py new file mode 100644 index 0000000..1195c2d --- /dev/null +++ b/panda/tests/health_test.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +import time +from panda import Panda + +if __name__ == "__main__": + i = 0 + pi = 0 + + panda = Panda() + while True: + st = time.monotonic() + while time.monotonic() - st < 1: + panda.health() + i += 1 + print(i, panda.health(), "\n") + print(f"Speed: {i - pi}Hz") + pi = i + diff --git a/panda/tests/hitl/1_program.py b/panda/tests/hitl/1_program.py new file mode 100644 index 0000000..6a5087f --- /dev/null +++ b/panda/tests/hitl/1_program.py @@ -0,0 +1,103 @@ +import os +import time +import pytest + +from panda import Panda, PandaDFU, McuType, BASEDIR + + +def check_signature(p): + assert not p.bootstub, "Flashed firmware not booting. Stuck in bootstub." + assert p.up_to_date() + + +def test_dfu(p): + app_mcu_type = p.get_mcu_type() + dfu_serial = p.get_dfu_serial() + + p.reset(enter_bootstub=True) + p.reset(enter_bootloader=True) + assert Panda.wait_for_dfu(dfu_serial, timeout=19), "failed to enter DFU" + + dfu = PandaDFU(dfu_serial) + assert dfu.get_mcu_type() == app_mcu_type + + assert dfu_serial in PandaDFU.list() + + dfu._handle.clear_status() + dfu.reset() + p.reconnect() + +# TODO: make more comprehensive bootstub tests and run on a few production ones + current +# TODO: also test release-signed app +@pytest.mark.timeout(30) +def test_known_bootstub(p): + """ + Test that compiled app can work with known production bootstub + """ + known_bootstubs = { + # covers the two cases listed in Panda.connect + McuType.F4: [ + # case A - no bcdDevice or panda type, has to assume F4 + "bootstub_f4_first_dos_production.panda.bin", + + # case B - just bcdDevice + "bootstub_f4_only_bcd.panda.bin", + ], + McuType.H7: ["bootstub.panda_h7.bin"], + } + + for kb in known_bootstubs[p.get_mcu_type()]: + app_ids = (p.get_mcu_type(), p.get_usb_serial()) + assert None not in app_ids + + p.reset(enter_bootstub=True) + p.reset(enter_bootloader=True) + + dfu_serial = p.get_dfu_serial() + assert Panda.wait_for_dfu(dfu_serial, timeout=30) + + dfu = PandaDFU(dfu_serial) + with open(os.path.join(BASEDIR, "tests/hitl/known_bootstub", kb), "rb") as f: + code = f.read() + + dfu.program_bootstub(code) + dfu.reset() + + p.connect(claim=False, wait=True) + + # check for MCU or serial mismatch + with Panda(p._serial, claim=False) as np: + bootstub_ids = (np.get_mcu_type(), np.get_usb_serial()) + assert app_ids == bootstub_ids + + # ensure we can flash app and it jumps to app + p.flash() + check_signature(p) + assert not p.bootstub + +@pytest.mark.timeout(25) +def test_recover(p): + assert p.recover(timeout=30) + check_signature(p) + +@pytest.mark.timeout(25) +def test_flash(p): + # test flash from bootstub + serial = p._serial + assert serial is not None + p.reset(enter_bootstub=True) + p.close() + time.sleep(2) + + with Panda(serial) as np: + assert np.bootstub + assert np._serial == serial + np.flash() + + p.reconnect() + p.reset() + check_signature(p) + + # test flash from app + p.flash() + check_signature(p) diff --git a/panda/tests/hitl/2_health.py b/panda/tests/hitl/2_health.py new file mode 100644 index 0000000..acb993f --- /dev/null +++ b/panda/tests/hitl/2_health.py @@ -0,0 +1,125 @@ +import time +import pytest + +from panda import Panda +from panda import PandaJungle +from panda.tests.hitl.conftest import PandaGroup + + +def test_ignition(p, panda_jungle): + # Set harness orientation to #2, since the ignition line is on the wrong SBU bus :/ + panda_jungle.set_harness_orientation(PandaJungle.HARNESS_ORIENTATION_2) + p.reset() + + for ign in (True, False): + panda_jungle.set_ignition(ign) + time.sleep(0.1) + assert p.health()['ignition_line'] == ign + + +@pytest.mark.test_panda_types(PandaGroup.GEN2) +def test_harness_status(p, panda_jungle): + flipped = None + for ignition in [True, False]: + for orientation in [Panda.HARNESS_STATUS_NC, Panda.HARNESS_STATUS_NORMAL, Panda.HARNESS_STATUS_FLIPPED]: + panda_jungle.set_harness_orientation(orientation) + panda_jungle.set_ignition(ignition) + time.sleep(1) + + health = p.health() + detected_orientation = health['car_harness_status'] + print(f"set: {orientation} detected: {detected_orientation}") + + # Orientation + if orientation == Panda.HARNESS_STATUS_NC: + assert detected_orientation == Panda.HARNESS_STATUS_NC + else: + if flipped is None: + flipped = (detected_orientation != orientation) + + if orientation == Panda.HARNESS_STATUS_NORMAL: + assert detected_orientation == (Panda.HARNESS_STATUS_FLIPPED if flipped else Panda.HARNESS_STATUS_NORMAL) + else: + assert detected_orientation == (Panda.HARNESS_STATUS_NORMAL if flipped else Panda.HARNESS_STATUS_FLIPPED) + + # Line ignition + assert health['ignition_line'] == (False if orientation == Panda.HARNESS_STATUS_NC else ignition) + + # SBU voltages + supply_voltage_mV = 1800 if p.get_type() in [Panda.HW_TYPE_TRES, ] else 3300 + + if orientation == Panda.HARNESS_STATUS_NC: + assert health['sbu1_voltage_mV'] > 0.9 * supply_voltage_mV + assert health['sbu2_voltage_mV'] > 0.9 * supply_voltage_mV + else: + relay_line = 'sbu1_voltage_mV' if (detected_orientation == Panda.HARNESS_STATUS_FLIPPED) else 'sbu2_voltage_mV' + ignition_line = 'sbu2_voltage_mV' if (detected_orientation == Panda.HARNESS_STATUS_FLIPPED) else 'sbu1_voltage_mV' + + assert health[relay_line] < 0.1 * supply_voltage_mV + assert health[ignition_line] > health[relay_line] + if ignition: + assert health[ignition_line] < 0.3 * supply_voltage_mV + else: + assert health[ignition_line] > 0.9 * supply_voltage_mV + + + +@pytest.mark.skip_panda_types((Panda.HW_TYPE_DOS, )) +def test_voltage(p): + for _ in range(10): + voltage = p.health()['voltage'] + assert ((voltage > 11000) and (voltage < 13000)) + time.sleep(0.1) + +def test_hw_type(p): + """ + hw type should be same in bootstub as application + """ + + hw_type = p.get_type() + mcu_type = p.get_mcu_type() + assert mcu_type is not None + + app_uid = p.get_uid() + usb_serial = p.get_usb_serial() + assert app_uid == usb_serial + + p.reset(enter_bootstub=True, reconnect=True) + p.close() + time.sleep(3) + with Panda(p.get_usb_serial()) as pp: + assert pp.bootstub + assert pp.get_type() == hw_type, "Bootstub and app hw type mismatch" + assert pp.get_mcu_type() == mcu_type, "Bootstub and app MCU type mismatch" + assert pp.get_uid() == app_uid + +def test_heartbeat(p, panda_jungle): + panda_jungle.set_ignition(True) + # TODO: add more cases here once the tests aren't super slow + p.set_safety_mode(mode=Panda.SAFETY_HYUNDAI, param=Panda.FLAG_HYUNDAI_LONG) + p.send_heartbeat() + assert p.health()['safety_mode'] == Panda.SAFETY_HYUNDAI + assert p.health()['safety_param'] == Panda.FLAG_HYUNDAI_LONG + + # shouldn't do anything once we're in a car safety mode + p.set_heartbeat_disabled() + + time.sleep(6.) + + h = p.health() + assert h['heartbeat_lost'] + assert h['safety_mode'] == Panda.SAFETY_SILENT + assert h['safety_param'] == 0 + assert h['controls_allowed'] == 0 + +def test_microsecond_timer(p): + start_time = p.get_microsecond_timer() + time.sleep(1) + end_time = p.get_microsecond_timer() + + # account for uint32 overflow + if end_time < start_time: + end_time += 2**32 + + time_diff = (end_time - start_time) / 1e6 + assert 0.98 < time_diff < 1.02, f"Timer not running at the correct speed! (got {time_diff:.2f}s instead of 1.0s)" diff --git a/panda/tests/hitl/3_usb_to_can.py b/panda/tests/hitl/3_usb_to_can.py new file mode 100644 index 0000000..9321eb4 --- /dev/null +++ b/panda/tests/hitl/3_usb_to_can.py @@ -0,0 +1,127 @@ +import time +import pytest +from flaky import flaky + +from panda import Panda +from panda.tests.hitl.conftest import SPEED_NORMAL, SPEED_GMLAN, PandaGroup +from panda.tests.hitl.helpers import time_many_sends + +def test_can_loopback(p): + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + p.set_can_loopback(True) + + for bus in (0, 1, 2): + # set bus 0 speed to 5000 + p.set_can_speed_kbps(bus, 500) + + # send a message on bus 0 + p.can_send(0x1aa, b"message", bus) + + # confirm receive both on loopback and send receipt + time.sleep(0.05) + r = p.can_recv() + sr = [x for x in r if x[3] == 0x80 | bus] + lb = [x for x in r if x[3] == bus] + assert len(sr) == 1 + assert len(lb) == 1 + + # confirm data is correct + assert 0x1aa == sr[0][0] == lb[0][0] + assert b"message" == sr[0][2] == lb[0][2] + +def test_reliability(p): + MSG_COUNT = 100 + + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + p.set_can_loopback(True) + p.set_can_speed_kbps(0, 1000) + + addrs = list(range(100, 100 + MSG_COUNT)) + ts = [(j, 0, b"\xaa" * 8, 0) for j in addrs] + + for _ in range(100): + st = time.monotonic() + + p.can_send_many(ts) + + r = [] + while len(r) < 200 and (time.monotonic() - st) < 0.5: + r.extend(p.can_recv()) + + sent_echo = [x for x in r if x[3] == 0x80] + loopback_resp = [x for x in r if x[3] == 0] + + assert sorted([x[0] for x in loopback_resp]) == addrs + assert sorted([x[0] for x in sent_echo]) == addrs + assert len(r) == 200 + + # take sub 20ms + et = (time.monotonic() - st) * 1000.0 + assert et < 20 + +@flaky(max_runs=6, min_passes=1) +def test_throughput(p): + # enable output mode + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + # enable CAN loopback mode + p.set_can_loopback(True) + + for speed in [10, 20, 50, 100, 125, 250, 500, 1000]: + # set bus 0 speed to speed + p.set_can_speed_kbps(0, speed) + time.sleep(0.05) + + comp_kbps = time_many_sends(p, 0) + + # bit count from https://en.wikipedia.org/wiki/CAN_bus + saturation_pct = (comp_kbps / speed) * 100.0 + assert saturation_pct > 80 + assert saturation_pct < 100 + + print("loopback 100 messages at speed %d, comp speed is %.2f, percent %.2f" % (speed, comp_kbps, saturation_pct)) + +@pytest.mark.test_panda_types(PandaGroup.GMLAN) +def test_gmlan(p): + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + p.set_can_loopback(True) + + # set gmlan on CAN2 + for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3, Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]: + p.set_gmlan(bus) + comp_kbps_gmlan = time_many_sends(p, 3) + assert comp_kbps_gmlan > (0.8 * SPEED_GMLAN) + assert comp_kbps_gmlan < (1.0 * SPEED_GMLAN) + + p.set_gmlan(None) + comp_kbps_normal = time_many_sends(p, bus) + assert comp_kbps_normal > (0.8 * SPEED_NORMAL) + assert comp_kbps_normal < (1.0 * SPEED_NORMAL) + + print("%d: %.2f kbps vs %.2f kbps" % (bus, comp_kbps_gmlan, comp_kbps_normal)) + +@pytest.mark.test_panda_types(PandaGroup.GMLAN) +def test_gmlan_bad_toggle(p): + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + p.set_can_loopback(True) + + # GMLAN_CAN2 + for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]: + p.set_gmlan(bus) + comp_kbps_gmlan = time_many_sends(p, 3) + assert comp_kbps_gmlan > (0.6 * SPEED_GMLAN) + assert comp_kbps_gmlan < (1.0 * SPEED_GMLAN) + + # normal + for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]: + p.set_gmlan(None) + comp_kbps_normal = time_many_sends(p, bus) + assert comp_kbps_normal > (0.6 * SPEED_NORMAL) + assert comp_kbps_normal < (1.0 * SPEED_NORMAL) + + +# this will fail if you have hardware serial connected +def test_serial_debug(p): + _ = p.serial_read(Panda.SERIAL_DEBUG) # junk + p.call_control_api(0x01) + assert p.serial_read(Panda.SERIAL_DEBUG).startswith(b"NO HANDLER") diff --git a/panda/tests/hitl/4_can_loopback.py b/panda/tests/hitl/4_can_loopback.py new file mode 100644 index 0000000..a7e1aea --- /dev/null +++ b/panda/tests/hitl/4_can_loopback.py @@ -0,0 +1,202 @@ +import os +import time +import pytest +import random +import threading +from flaky import flaky +from collections import defaultdict + +from panda import Panda +from panda.tests.hitl.conftest import PandaGroup +from panda.tests.hitl.helpers import time_many_sends, get_random_can_messages, clear_can_buffers + +@flaky(max_runs=3, min_passes=1) +@pytest.mark.timeout(35) +def test_send_recv(p, panda_jungle): + def test(p_send, p_recv): + for bus in (0, 1, 2): + for speed in (10, 20, 50, 100, 125, 250, 500, 1000): + clear_can_buffers(p_send, speed) + clear_can_buffers(p_recv, speed) + + comp_kbps = time_many_sends(p_send, bus, p_recv, two_pandas=True) + + saturation_pct = (comp_kbps / speed) * 100.0 + assert 80 < saturation_pct < 100 + + print(f"two pandas bus {bus}, 100 messages at speed {speed:4d}, comp speed is {comp_kbps:7.2f}, {saturation_pct:6.2f}%") + + # Run tests in both directions + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + test(p, panda_jungle) + test(panda_jungle, p) + + +@flaky(max_runs=6, min_passes=1) +@pytest.mark.timeout(30) +def test_latency(p, panda_jungle): + def test(p_send, p_recv): + for bus in (0, 1, 2): + for speed in (10, 20, 50, 100, 125, 250, 500, 1000): + clear_can_buffers(p_send, speed) + clear_can_buffers(p_recv, speed) + + latencies = [] + comp_kbps_list = [] + saturation_pcts = [] + + num_messages = 100 + + for _ in range(num_messages): + st = time.monotonic() + p_send.can_send(0x1ab, b"message", bus) + r = [] + while len(r) < 1 and (time.monotonic() - st) < 5: + r = p_recv.can_recv() + et = time.monotonic() + r_echo = [] + while len(r_echo) < 1 and (time.monotonic() - st) < 10: + r_echo = p_send.can_recv() + + if len(r) == 0 or len(r_echo) == 0: + print(f"r: {r}, r_echo: {r_echo}") + + assert len(r) == 1 + assert len(r_echo) == 1 + + et = (et - st) * 1000.0 + comp_kbps = (1 + 11 + 1 + 1 + 1 + 4 + 8 * 8 + 15 + 1 + 1 + 1 + 7) / et + latency = et - ((1 + 11 + 1 + 1 + 1 + 4 + 8 * 8 + 15 + 1 + 1 + 1 + 7) / speed) + + assert latency < 5.0 + + saturation_pct = (comp_kbps / speed) * 100.0 + latencies.append(latency) + comp_kbps_list.append(comp_kbps) + saturation_pcts.append(saturation_pct) + + average_latency = sum(latencies) / num_messages + assert average_latency < 1.0 + average_comp_kbps = sum(comp_kbps_list) / num_messages + average_saturation_pct = sum(saturation_pcts) / num_messages + + print("two pandas bus {}, {} message average at speed {:4d}, latency is {:5.3f}ms, comp speed is {:7.2f}, percent {:6.2f}" + .format(bus, num_messages, speed, average_latency, average_comp_kbps, average_saturation_pct)) + + # Run tests in both directions + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + test(p, panda_jungle) + test(panda_jungle, p) + + +@pytest.mark.panda_expect_can_error +@pytest.mark.test_panda_types(PandaGroup.GEN2) +def test_gen2_loopback(p, panda_jungle): + def test(p_send, p_recv, address=None): + for bus in range(4): + obd = False + if bus == 3: + obd = True + bus = 1 + + # Clear buses + clear_can_buffers(p_send) + clear_can_buffers(p_recv) + + # Send a random string + addr = address if address else random.randint(1, 2000) + string = b"test" + os.urandom(4) + p_send.set_obd(obd) + p_recv.set_obd(obd) + time.sleep(0.2) + p_send.can_send(addr, string, bus) + time.sleep(0.2) + + content = p_recv.can_recv() + + # Check amount of messages + assert len(content) == 1 + + # Check content + assert content[0][0] == addr and content[0][2] == string + + # Check bus + assert content[0][3] == bus + + print("Bus:", bus, "address:", addr, "OBD:", obd, "OK") + + # Run tests in both directions + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + test(p, panda_jungle) + test(panda_jungle, p) + + # Test extended frame address with ELM327 mode + p.set_safety_mode(Panda.SAFETY_ELM327) + test(p, panda_jungle, 0x18DB33F1) + test(panda_jungle, p, 0x18DB33F1) + + # TODO: why it's not being reset by fixtures reinit? + p.set_obd(False) + panda_jungle.set_obd(False) + +def test_bulk_write(p, panda_jungle): + # The TX buffers on pandas is 0x100 in length. + NUM_MESSAGES_PER_BUS = 10000 + + def flood_tx(panda): + print('Sending!') + msg = b"\xaa" * 8 + packet = [] + # start with many messages on a single bus (higher contention for single TX ring buffer) + packet += [[0xaa, None, msg, 0]] * NUM_MESSAGES_PER_BUS + # end with many messages on multiple buses + packet += [[0xaa, None, msg, 0], [0xaa, None, msg, 1], [0xaa, None, msg, 2]] * NUM_MESSAGES_PER_BUS + + # Disable timeout + panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + panda.can_send_many(packet, timeout=0) + print(f"Done sending {4 * NUM_MESSAGES_PER_BUS} messages!", time.monotonic()) + print(panda.health()) + + # Start transmisson + threading.Thread(target=flood_tx, args=(p,)).start() + + # Receive as much as we can in a few second time period + rx = [] + old_len = 0 + start_time = time.monotonic() + while time.monotonic() - start_time < 5 or len(rx) > old_len: + old_len = len(rx) + rx.extend(panda_jungle.can_recv()) + print(f"Received {len(rx)} messages", time.monotonic()) + + # All messages should have been received + if len(rx) != 4 * NUM_MESSAGES_PER_BUS: + raise Exception("Did not receive all messages!") + +def test_message_integrity(p): + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + p.set_can_loopback(True) + for i in range(250): + sent_msgs = defaultdict(set) + for _ in range(random.randrange(10)): + to_send = get_random_can_messages(random.randrange(100)) + for m in to_send: + sent_msgs[m[3]].add((m[0], m[2])) + p.can_send_many(to_send, timeout=0) + + start_time = time.monotonic() + while time.monotonic() - start_time < 2 and any(len(sent_msgs[bus]) for bus in range(3)): + recvd = p.can_recv() + for msg in recvd: + if msg[3] >= 128: + k = (msg[0], bytes(msg[2])) + bus = msg[3]-128 + assert k in sent_msgs[bus], f"message {k} was never sent on bus {bus}" + sent_msgs[msg[3]-128].discard(k) + + # if a set isn't empty, messages got dropped + for bus in range(3): + assert not len(sent_msgs[bus]), f"loop {i}: bus {bus} missing {len(sent_msgs[bus])} messages" + + print("Got all messages intact") diff --git a/panda/tests/hitl/5_spi.py b/panda/tests/hitl/5_spi.py new file mode 100644 index 0000000..f9a4e28 --- /dev/null +++ b/panda/tests/hitl/5_spi.py @@ -0,0 +1,103 @@ +import binascii +import pytest +import random +from unittest.mock import patch + +from panda import Panda, PandaDFU +from panda.python.spi import SpiDevice, PandaProtocolMismatch, PandaSpiNackResponse + +pytestmark = [ + pytest.mark.test_panda_types((Panda.HW_TYPE_TRES, )) +] + +@pytest.mark.skip("doesn't work, bootloader seems to ignore commands once it sees junk") +def test_dfu_with_spam(p): + dfu_serial = p.get_dfu_serial() + + # enter DFU + p.reset(enter_bootstub=True) + p.reset(enter_bootloader=True) + assert Panda.wait_for_dfu(dfu_serial, timeout=19), "failed to enter DFU" + + # send junk + d = SpiDevice() + for _ in range(9): + with d.acquire() as spi: + dat = [random.randint(-1, 255) for _ in range(random.randint(1, 100))] + spi.xfer(dat) + + # should still show up + assert dfu_serial in PandaDFU.list() + +class TestSpi: + def _ping(self, mocker, panda): + # should work with no retries + spy = mocker.spy(panda._handle, '_wait_for_ack') + panda.health() + assert spy.call_count == 2 + mocker.stop(spy) + + def test_protocol_version_check(self, p): + for bootstub in (False, True): + p.reset(enter_bootstub=bootstub) + with patch('panda.python.spi.PandaSpiHandle.PROTOCOL_VERSION', return_value="abc"): + # list should still work with wrong version + assert p._serial in Panda.list() + + # connect but raise protocol error + with pytest.raises(PandaProtocolMismatch): + Panda(p._serial) + + def test_protocol_version_data(self, p): + for bootstub in (False, True): + p.reset(enter_bootstub=bootstub) + v = p._handle.get_protocol_version() + + uid = binascii.hexlify(v[:12]).decode() + assert uid == p.get_uid() + + hwtype = v[12] + assert hwtype == ord(p.get_type()) + + bstub = v[13] + assert bstub == (0xEE if bootstub else 0xCC) + + def test_all_comm_types(self, mocker, p): + spy = mocker.spy(p._handle, '_wait_for_ack') + + # controlRead + controlWrite + p.health() + p.can_clear(0) + assert spy.call_count == 2*2 + + # bulkRead + bulkWrite + p.can_recv() + p.can_send(0x123, b"somedata", 0) + assert spy.call_count == 2*4 + + def test_bad_header(self, mocker, p): + with patch('panda.python.spi.SYNC', return_value=0): + with pytest.raises(PandaSpiNackResponse): + p._handle.controlRead(Panda.REQUEST_IN, 0xd2, 0, 0, p.HEALTH_STRUCT.size, timeout=50) + self._ping(mocker, p) + + def test_bad_checksum(self, mocker, p): + cnt = p.health()['spi_checksum_error_count'] + with patch('panda.python.spi.PandaSpiHandle._calc_checksum', return_value=0): + with pytest.raises(PandaSpiNackResponse): + p._handle.controlRead(Panda.REQUEST_IN, 0xd2, 0, 0, p.HEALTH_STRUCT.size, timeout=50) + self._ping(mocker, p) + assert (p.health()['spi_checksum_error_count'] - cnt) > 0 + + def test_non_existent_endpoint(self, mocker, p): + for _ in range(10): + ep = random.randint(4, 20) + with pytest.raises(PandaSpiNackResponse): + p._handle.bulkRead(ep, random.randint(1, 1000), timeout=50) + + self._ping(mocker, p) + + with pytest.raises(PandaSpiNackResponse): + p._handle.bulkWrite(ep, b"abc", timeout=50) + + self._ping(mocker, p) diff --git a/panda/tests/hitl/6_safety.py b/panda/tests/hitl/6_safety.py new file mode 100644 index 0000000..2237f53 --- /dev/null +++ b/panda/tests/hitl/6_safety.py @@ -0,0 +1,29 @@ +import time + +from panda import Panda + + +def test_safety_nooutput(p): + p.set_safety_mode(Panda.SAFETY_SILENT) + p.set_can_loopback(True) + + # send a message on bus 0 + p.can_send(0x1aa, b"message", 0) + + # confirm receive nothing + time.sleep(0.05) + r = p.can_recv() + # bus 192 is messages blocked by TX safety hook on bus 0 + assert len([x for x in r if x[3] != 192]) == 0 + assert len([x for x in r if x[3] == 192]) == 1 + + +def test_canfd_safety_modes(p): + # works on all pandas + p.set_safety_mode(Panda.SAFETY_TOYOTA) + assert p.health()['safety_mode'] == Panda.SAFETY_TOYOTA + + # shouldn't be able to set a CAN-FD safety mode on non CAN-FD panda + p.set_safety_mode(Panda.SAFETY_HYUNDAI_CANFD) + expected_mode = Panda.SAFETY_HYUNDAI_CANFD if p.get_type() in Panda.H7_DEVICES else Panda.SAFETY_SILENT + assert p.health()['safety_mode'] == expected_mode diff --git a/panda/tests/hitl/7_internal.py b/panda/tests/hitl/7_internal.py new file mode 100644 index 0000000..eb8577f --- /dev/null +++ b/panda/tests/hitl/7_internal.py @@ -0,0 +1,68 @@ +import time +import pytest + +from panda import Panda + +pytestmark = [ + pytest.mark.skip_panda_types(Panda.HW_TYPE_UNO), + pytest.mark.test_panda_types(Panda.INTERNAL_DEVICES) +] + +@pytest.mark.timeout(2*60) +def test_fan_controller(p): + start_health = p.health() + + for power in (30, 50, 80, 100): + p.set_fan_power(0) + while p.get_fan_rpm() > 0: + time.sleep(0.1) + + # wait until fan spins up (and recovers if needed), + # then wait a bit more for the RPM to converge + p.set_fan_power(power) + for _ in range(20): + time.sleep(1) + if p.get_fan_rpm() > 1000: + break + time.sleep(5) + + expected_rpm = Panda.MAX_FAN_RPMs[bytes(p.get_type())] * power / 100 + assert 0.9 * expected_rpm <= p.get_fan_rpm() <= 1.1 * expected_rpm + + # Ensure the stall detection is tested on dos + if p.get_type() == Panda.HW_TYPE_DOS: + stalls = p.health()['fan_stall_count'] - start_health['fan_stall_count'] + assert stalls >= 2 + print("stall count", stalls) + else: + assert p.health()['fan_stall_count'] == 0 + +def test_fan_cooldown(p): + # if the fan cooldown doesn't work, we get high frequency noise on the tach line + # while the rotor spins down. this makes sure it never goes beyond the expected max RPM + p.set_fan_power(100) + time.sleep(3) + p.set_fan_power(0) + for _ in range(5): + assert p.get_fan_rpm() <= 7000 + time.sleep(0.5) + +def test_fan_overshoot(p): + if p.get_type() == Panda.HW_TYPE_DOS: + pytest.skip("panda's fan controller overshoots on the comma three fans that need stall recovery") + + # make sure it's stopped completely + p.set_fan_power(0) + while p.get_fan_rpm() > 0: + time.sleep(0.1) + + # set it to 30% power to mimic going onroad + p.set_fan_power(30) + max_rpm = 0 + for _ in range(50): + max_rpm = max(max_rpm, p.get_fan_rpm()) + time.sleep(0.1) + + # tolerate 10% overshoot + expected_rpm = Panda.MAX_FAN_RPMs[bytes(p.get_type())] * 30 / 100 + assert max_rpm <= 1.1 * expected_rpm, f"Fan overshoot: {(max_rpm / expected_rpm * 100) - 100:.1f}%" diff --git a/panda/tests/hitl/__init__.py b/panda/tests/hitl/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/panda/tests/hitl/conftest.py b/panda/tests/hitl/conftest.py new file mode 100644 index 0000000..b784435 --- /dev/null +++ b/panda/tests/hitl/conftest.py @@ -0,0 +1,225 @@ +import os +import pytest +import concurrent.futures + +from panda import Panda, PandaDFU, PandaJungle +from panda.tests.hitl.helpers import clear_can_buffers + +# needed to get output when using xdist +if "DEBUG" in os.environ: + import sys + sys.stdout = sys.stderr + +SPEED_NORMAL = 500 +SPEED_GMLAN = 33.3 +BUS_SPEEDS = [(0, SPEED_NORMAL), (1, SPEED_NORMAL), (2, SPEED_NORMAL), (3, SPEED_GMLAN)] + + +JUNGLE_SERIAL = os.getenv("PANDAS_JUNGLE") +NO_JUNGLE = os.environ.get("NO_JUNGLE", "0") == "1" +PANDAS_EXCLUDE = os.getenv("PANDAS_EXCLUDE", "").strip().split(" ") +HW_TYPES = os.environ.get("HW_TYPES", None) + +PARALLEL = "PARALLEL" in os.environ +NON_PARALLEL = "NON_PARALLEL" in os.environ +if PARALLEL: + NO_JUNGLE = True + +class PandaGroup: + H7 = (Panda.HW_TYPE_RED_PANDA, Panda.HW_TYPE_RED_PANDA_V2, Panda.HW_TYPE_TRES) + GEN2 = (Panda.HW_TYPE_BLACK_PANDA, Panda.HW_TYPE_UNO, Panda.HW_TYPE_DOS) + H7 + GMLAN = (Panda.HW_TYPE_WHITE_PANDA, Panda.HW_TYPE_GREY_PANDA) + + TESTED = (Panda.HW_TYPE_WHITE_PANDA, Panda.HW_TYPE_BLACK_PANDA, Panda.HW_TYPE_RED_PANDA, Panda.HW_TYPE_RED_PANDA_V2, Panda.HW_TYPE_UNO) + +if HW_TYPES is not None: + PandaGroup.TESTED = [bytes([int(x), ]) for x in HW_TYPES.strip().split(",")] # type: ignore + + +# Find all pandas connected +_all_pandas = {} +_panda_jungle = None +def init_all_pandas(): + if not NO_JUNGLE: + global _panda_jungle + _panda_jungle = PandaJungle(JUNGLE_SERIAL) + _panda_jungle.set_panda_power(True) + + for serial in Panda.list(): + if serial not in PANDAS_EXCLUDE: + with Panda(serial=serial, claim=False) as p: + ptype = bytes(p.get_type()) + if ptype in PandaGroup.TESTED: + _all_pandas[serial] = ptype + + # ensure we have all tested panda types + missing_types = set(PandaGroup.TESTED) - set(_all_pandas.values()) + assert len(missing_types) == 0, f"Missing panda types: {missing_types}" + + print(f"{len(_all_pandas)} total pandas") +init_all_pandas() +_all_panda_serials = sorted(_all_pandas.keys()) + + +def init_jungle(): + if _panda_jungle is None: + return + clear_can_buffers(_panda_jungle) + _panda_jungle.set_panda_power(True) + _panda_jungle.set_can_loopback(False) + _panda_jungle.set_obd(False) + _panda_jungle.set_harness_orientation(PandaJungle.HARNESS_ORIENTATION_1) + for bus, speed in BUS_SPEEDS: + _panda_jungle.set_can_speed_kbps(bus, speed) + + # ensure FW hasn't changed + assert _panda_jungle.up_to_date() + + +def pytest_configure(config): + config.addinivalue_line( + "markers", "test_panda_types(name): whitelist a test for specific panda types" + ) + config.addinivalue_line( + "markers", "skip_panda_types(name): blacklist panda types from a test" + ) + config.addinivalue_line( + "markers", "panda_expect_can_error: mark test to ignore CAN health errors" + ) + +@pytest.hookimpl(tryfirst=True) +def pytest_collection_modifyitems(items): + for item in items: + if item.get_closest_marker('timeout') is None: + item.add_marker(pytest.mark.timeout(60)) + + # xdist grouping by panda + serial = item.name.split("serial=")[1].split(",")[0] + assert len(serial) == 24 + item.add_marker(pytest.mark.xdist_group(serial)) + + needs_jungle = "panda_jungle" in item.fixturenames + if PARALLEL and needs_jungle: + item.add_marker(pytest.mark.skip(reason="no jungle tests in PARALLEL mode")) + elif NON_PARALLEL and not needs_jungle: + item.add_marker(pytest.mark.skip(reason="only running jungle tests")) + +def pytest_make_parametrize_id(config, val, argname): + if val in _all_pandas: + # TODO: get nice string instead of int + hw_type = _all_pandas[val][0] + return f"serial={val}, hw_type={hw_type}" + return None + + +@pytest.fixture(name='panda_jungle', scope='function') +def fixture_panda_jungle(request): + init_jungle() + return _panda_jungle + +@pytest.fixture(name='p', scope='function') +def func_fixture_panda(request, module_panda): + p = module_panda + + # Check if test is applicable to this panda + mark = request.node.get_closest_marker('test_panda_types') + if mark: + assert len(mark.args) > 0, "Missing panda types argument in mark" + test_types = mark.args[0] + if _all_pandas[p.get_usb_serial()] not in test_types: + pytest.skip(f"Not applicable, {test_types} pandas only") + + mark = request.node.get_closest_marker('skip_panda_types') + if mark: + assert len(mark.args) > 0, "Missing panda types argument in mark" + skip_types = mark.args[0] + if _all_pandas[p.get_usb_serial()] in skip_types: + pytest.skip(f"Not applicable to {skip_types}") + + # TODO: reset is slow (2+ seconds) + p.reset() + + # ensure FW hasn't changed + assert p.up_to_date() + + # Run test + yield p + + # Teardown + + # reconnect + if p.get_dfu_serial() in PandaDFU.list(): + PandaDFU(p.get_dfu_serial()).reset() + p.reconnect() + if not p.connected: + p.reconnect() + if p.bootstub: + p.reset() + + assert not p.bootstub + + # TODO: would be nice to make these common checks in the teardown + # show up as failed tests instead of "errors" + + # Check for faults + assert p.health()['faults'] == 0 + assert p.health()['fault_status'] == 0 + + # Check for SPI errors + #assert p.health()['spi_checksum_error_count'] == 0 + + # Check health of each CAN core after test, normal to fail for test_gen2_loopback on OBD bus, so skipping + mark = request.node.get_closest_marker('panda_expect_can_error') + expect_can_error = mark is not None + if not expect_can_error: + for i in range(3): + can_health = p.can_health(i) + assert can_health['bus_off_cnt'] == 0 + assert can_health['receive_error_cnt'] < 127 + assert can_health['transmit_error_cnt'] < 255 + assert can_health['error_passive'] == 0 + assert can_health['error_warning'] == 0 + assert can_health['total_rx_lost_cnt'] == 0 + assert can_health['total_tx_lost_cnt'] == 0 + assert can_health['total_error_cnt'] == 0 + assert can_health['total_tx_checksum_error_cnt'] == 0 + +@pytest.fixture(name='module_panda', params=_all_panda_serials, scope='module') +def fixture_panda_setup(request): + """ + Clean up all pandas + jungle and return the panda under test. + """ + panda_serial = request.param + + # Initialize jungle + init_jungle() + + # Connect to pandas + def cnnct(s): + if s == panda_serial: + p = Panda(serial=s) + p.reset(reconnect=True) + + p.set_can_loopback(False) + p.set_gmlan(None) + p.set_power_save(False) + for bus, speed in BUS_SPEEDS: + p.set_can_speed_kbps(bus, speed) + clear_can_buffers(p) + p.set_power_save(False) + return p + elif not PARALLEL: + with Panda(serial=s) as p: + p.reset(reconnect=False) + return None + + with concurrent.futures.ThreadPoolExecutor() as exc: + ps = list(exc.map(cnnct, _all_panda_serials, timeout=20)) + pandas = [p for p in ps if p is not None] + + # run test + yield pandas[0] + + # Teardown + for p in pandas: + p.close() diff --git a/panda/tests/hitl/helpers.py b/panda/tests/hitl/helpers.py new file mode 100644 index 0000000..4eee437 --- /dev/null +++ b/panda/tests/hitl/helpers.py @@ -0,0 +1,71 @@ +import time +import random + + +def get_random_can_messages(n): + m = [] + for _ in range(n): + bus = random.randrange(3) + addr = random.randrange(1 << 29) + dat = bytes([random.getrandbits(8) for _ in range(random.randrange(1, 9))]) + m.append([addr, None, dat, bus]) + return m + + +def time_many_sends(p, bus, p_recv=None, msg_count=100, two_pandas=False, msg_len=8): + if p_recv is None: + p_recv = p + if p == p_recv and two_pandas: + raise ValueError("Cannot have two pandas that are the same panda") + + msg_id = random.randint(0x100, 0x200) + to_send = [(msg_id, 0, b"\xaa" * msg_len, bus)] * msg_count + + start_time = time.monotonic() + p.can_send_many(to_send) + r = [] + r_echo = [] + r_len_expected = msg_count if two_pandas else msg_count * 2 + r_echo_len_exected = msg_count if two_pandas else 0 + + while len(r) < r_len_expected and (time.monotonic() - start_time) < 5: + r.extend(p_recv.can_recv()) + end_time = time.monotonic() + if two_pandas: + while len(r_echo) < r_echo_len_exected and (time.monotonic() - start_time) < 10: + r_echo.extend(p.can_recv()) + + sent_echo = [x for x in r if x[3] == 0x80 | bus and x[0] == msg_id] + sent_echo.extend([x for x in r_echo if x[3] == 0x80 | bus and x[0] == msg_id]) + resp = [x for x in r if x[3] == bus and x[0] == msg_id] + + leftovers = [x for x in r if (x[3] != 0x80 | bus and x[3] != bus) or x[0] != msg_id] + assert len(leftovers) == 0 + + assert len(resp) == msg_count + assert len(sent_echo) == msg_count + + end_time = (end_time - start_time) * 1000.0 + comp_kbps = (1 + 11 + 1 + 1 + 1 + 4 + (msg_len * 8) + 15 + 1 + 1 + 1 + 7) * msg_count / end_time + + return comp_kbps + + +def clear_can_buffers(panda, speed: int | None = None): + if speed is not None: + for bus in range(3): + panda.set_can_speed_kbps(bus, speed) + + # clear tx buffers + for i in range(4): + panda.can_clear(i) + + # clear rx buffers + panda.can_clear(0xFFFF) + r = [1] + st = time.monotonic() + while len(r) > 0: + r = panda.can_recv() + time.sleep(0.05) + if (time.monotonic() - st) > 10: + raise Exception("Unable to clear can buffers for panda ", panda.get_serial()) diff --git a/panda/tests/hitl/known_bootstub/bootstub.panda_h7.bin b/panda/tests/hitl/known_bootstub/bootstub.panda_h7.bin new file mode 100644 index 0000000000000000000000000000000000000000..a6d856bd2c7006ca022f8bb565fcd841451fe442 GIT binary patch literal 16824 zcmch834Bvk_WylLnsiSWpc{~drENeXP0DN96iO*-0hgqukb**JK@mj4HWWk= z(HWKz(YoV8i$!ZF1&h1Rv{q(7P#IAJR4|pod)@N>-}~As<8S`!=lA&s&b{ZJyPdn8 zd(OG9gs{37Gl{1CKLGi|Z=eh%58#7hh$a!B2aE=c1!Mpw0Hy$@0_=c7z@31*0i}R) zz!Jc6z$(C6!1`FC*#_7Qcp9(?w8sFnuU&|r1-uB@52yjW184v=0X_lz9dH708t^ZG z4{!-^1weW3L@WbTXgd@T1sE8|5a2@rDS#URHvuLAvH&@NX@DZYY`{RBmx0rEum9Z( zS{YywU@721z-qu7fP;W~KqFwFje+=sewy(=EVMet>avp{7sZV-MM(&ig`!C&y`0bWq;AjaS)Sy!B} z+oC>0Mt&ucqpwd5Ium-J;kJZfPjW>sfPc)KSoLm>JE z!VYP*jv2Q@diX3qDVo=%ST07C?2y)*q%#T?(i7|;SL-UAnFeuRylUQZB zv4zR-n69-jx^T<~)^;tI9NTg|f7b%P^XCtk!Hq-V<|6QSd_-U811E%;#7AIe!dQvV z=Mi%GdcxUEPgpK^g%;s%Nw14Z zVcR-O_hcK-Y((h{bE%nYud{Mjhh=cT?uuc?&CM>jc!;eQW~59P!dr^6E3(V7CuehI znzmqWU75A)gED@8p|ZQj2)l$q!dJf0DIsV6gx+aO-=L*~9C!7fR(-}v-=viAGr~m` z8-!egImXJhMYrn(uRU72zN{LwrZV2n%_qjA7g(b^M>mmZ7~|9h{(pVhB-J(Jtb_@V#Rd7NaEIb@ z+P;4fr53zsAuUovti&uj7B|JjwKHtQGPXq)b7f+P5q`mL7Yk6c4CgfDJL!i5mk49H zIhPlvUc6&DRziJ^$YP6J?Ru}BL~{Nq8cpN7?U|9YT1*i-fiZ-)tC`0nb($l>ae&YAq4q4nR^X;)*^7yY8@flGO zoTM7VNt&dnFiuh@MTKxuP*SzU-;ybC%4#u&*#h1Di{(AoXM-UJ$79~o>Yh2E!VYsG z$11yo5FrKFUBF~1-pNIGHYfwTtF*m$L2n<_WR{;7-YI9n29{melrq_G!m zl1t<)Pr6hZdt!M}4WwCag1}^Kam0?RHg&bA>`fPlfx+4)INdo!OUzQg3`R=l&glya z{0FS-4zx;DX`G+um>tZXx$3M&RbEULlM&;gxxArCs?xCs+&QnHK5LIFKYGPOP`i<6 z*kab6xRR?WIYiS`##bCt*|DY$K6lQ_DhH$ARl(N(>~rVPJi@bP+V6k;x9$Kj(!SSQ zm{!^ovxmvJ=3}!tVF&h+I|mXXvD3$;`s-AuvMv%;sK5(vsMDK+a4yrc|7;oG;%_LD zwD)Om%|=)xh2578D3UaYHHdk{{C$yQ`F1VO;hdj8856w3AIbCGoPo4)gPAri%HZd? zr!`^2#keCDb;}Yz$H!;$-73QuzCpsje6u9}?}EgC=J!QO%#+`!_1kPk!^%eN#w5vYW&KMFZ=*-ZuvveIQDt?LWOcImOCy)p>M~v=q}ee*Hz&ArtVvv(m2GN!AF@YlK6)h>D>3$hRG<=u2o;Bh z<(q$%Z~jhNwy(e+7W$MF>}e4+?F?61!MfI6WVlJ@A~T1bd`1rKCRgT2T-u{G`z?{p z)9W{&?7KK?Y>QD@&xZ5THs&^2{761jd)`NGxZE6OEVc%>5iVCEGv>Go%@fRp=H0G| z*4d8Tj_-VQjK`yiE(H7doyHL16RRv9Y;@N}<&#lS(~PczH5G>ztt@dd8H*~6u0&Un z`FU52g=;S8@L;qnV8UF_Mh1@p~<2TiiGYv~6V%8C7}6NsGr$ zk8*5xgr%>ybYh3le0N>Nq2c+?l_d|`lS(Tr^#y*9b$G1fR}8rMy3-w&?#I~KWi<`^Hzs5t={x7)&=N-mehF>=MBl9feht@L5aU+FTd z(yF6;_QFz^(;1qbm{95{sfg}x(cM6Kq~|OXyoBZFlCqV|O=T6_NY3?`neAfQu{Y9< zkk0X#AER8VjJgz~ZaAN5F8(P~sqd~+2@zP$@ch^`l*lNpV2m88I8g&BS&hR{TZJO{yZCoRj zUmwe=F@9y84r3jdWKA=h9MbHnr#Gd@=<ksVCMw=#$Dgene=03KVO=@gKQ1wHD*~{)2Px%gZeURtMpo6e?sjbeK@dZKnv1`0(%&=FuexY z4%80AYRX~>S`627z&3*xu0P?Udxq{gO%0b!TAsl!WQ=S%-SOitvRqgrqoem#r847# z1!9oJ8G#fbU75!Pa^KBy@T%wD$kU9`P_Dy`y5GFRid%?9-BDzeEY6bC1l%hP^j`7# zv~}wI5JRY*-p+|~2>7DdV7b8WuB>)og$a163%99IMixV46-oqyUlp)$y&4#iBe1Jg z;G{cH;9pb7_0Ni@fnZN-06{jjfz@m3WbtA^ZQ%55e~EKPaBrf0wIRqBCD8n@k+1nJ zzm?{rzP8SpPtXG8>}r7OJS=$ms&@c;4m4lV~~;i_jAr_Az#URlhL2l{^)dBF-N_TPG~2m~8+ zLKNmBOQD6K4VQw211nW96|j4N(cLSHV-;EuuvrR?V#YuOc%q}o!&wa0hk&Pe?9vcr zmzqHf!OGGTFN@**YeDrKx@J^12HRVulRAr^R5T%OmHawO2btM~xR1`VlnyzX%@$8b z*|R9ifOZ!76sP$EIDZ|kxenJ}hwHAx&HeZv`=sqtUMOFE{kKZz`};i)I>jk{0H^H_ z;I#b#+++D2r(>dY^rh6L0R9tO(OIT$EId7x!}#(a1{{~0*thqh07>*#N%qn|!7 ze?;Fmmqgzx5xv3hDQQC7H-B0^gVp(m_dMp3Kud@`%peRqO!cGS!yLk(!#qOnunHkL z9E7mft47%6)gTnT!3aCNAqd;Op$IQ~!w|N4!x5hMMj-sr8;S6HuNL8V-YA4;ywM0x zd50kUhc^ykt2Z8Dvo`_Z7v4mKpL&xJe&ijB@VIvv!eidy2#W+HsZI|1PX-iZh+y^|2Sy^|5%@6ATI z&^s030Eea379b@%1F`m-4a^8KDrQ=>lsUru>&T^1c}M$Wh^MjtZQR)4-%%MF}GRL85 zAaQ;5S$p2K*DwaEw-XyF&a4a1&%gG$RVAnmM#t1&Z!aRaPmB&(G<2E4Z;C+gl%%e6 zE@J9wL0zNKLGN=~mj`G7vG(0AgP+XY?bz+I_&tadTwn4%Cn(ipQK%iB^uAnzw8kDU z>~@jVp3?~^&#(zfJ%-)w2z8xS_nc68)3Ii)owS*mghd8_B%4sWj!7t08@8C|IC0-} z5&uai?TtsR1Xm$y#ogzrO>jj6qpfPt>OebmE>6iRd!lRZmSQVbq4GfY6~Ctzv(or3 z?sutf&~#qNaLsiktoXDhW?Jfs_i8jXjQ-W77ak%hy)8`YN!PB`8~ltq9CQ6hSB7h% zFxPczbsV(t2fO(C*O$VZAe{G2#C}(0^E1?Mw*R%dk0UAQxcuGA}%(Z%YBo~DVe@2=R^*vE_K&pSzweO_{+E4gu=E)jDv z&q!y2weR%MHfbNPU=LvrE~V0KVLe@Ews;_YZ+|+oE)H|KXO+o2#6D*UUBNjE<}7&< zGhfibO)cnPv!m>ZOHAf!OFXrq#zmm5tWH@xEz=a$y>9JH74!nty?&9XbpidL(e~@m zn{2;E#h&%i{Y819_mmr;trPMg%eoodfAmZ)?V@)vz1a@|qg3edodejL3Px}7yOnYj zdtNC=v9{(NoIvopfDgac}3qy59L7)`#BzxcjM1r092b)GyL1dFb5_$>Y)CGysFtav2aq z78nNGDV_Ri8m}zbhPz9Sv43iqQ&I%LneFhD>~?(Znu+yPQaG_+E7Y$YoM8?!Ph#-F5dYR8QkwVdkeST=QKEUr2o2 zBAy~Hjd^`Sv86c6-SF(67y`eAl@|sJ!-ay2S-}PV5RGamWT`V*2#DX(eGKfl#6J-b z{|VikET#0n%cU-2K&e7&2gyhlN*$#xU`3@4S$tnM*HoA9QHA1+kfnTk5@z8Y#arD9 z9}PSj>-qLL6>CUVw8;H3NxsrdQ!kZJt+E>Fw&s^hE>cf?4lisMb_&x8xC&7#TxFv7 z9ST>O!hALWlMa%8@Kukb9z; z&cKQ5X5eP|GoM&DT!7D%qc-bcjP-Fjv*sE2Pt##5>4ZtJOee_P0e{0E%dNzW{$7q+ zv9dU#zhsiqBK1@ZL5pQ_u~KrJ@QkF>B@1_%W6cwVQtYlUr95lrobX~O_r-zbZtcN3 zl6+e;Ycv_O`jLH;=G%~tSJFph zHPW{rO?C{jsf^IIWR=-t%m*b3ZERFhg-B7G^buKAvlG4>;%4EUJzx%qw;c%?p`O-P zp88YP%5n9~w8i5a2 za6_&EcznQ+U(oS(<$43r>ujaiXWBqPhdHC~WE&kuSCY-z@k*%?Ib0wipVBvCMgn3_ zuiK#04*@2^#xh@foZX-gi0!>Z7n7%R8eLXD!x_5|GR^v+7aj+xvzc^M=LfY-3* zO!%kA({Ui3hM10NGL2(V1~3M|1L*qExe1{++pk=AG=U#ed$bmQ+F;_3BwX8ij zj#w}9*|PY#tg~ty4H6;yu68**wX)bGt5A|f$s_LHp@koH9QZs zjdlmY94zVez-kYOZ}(EK>yzk_%2ev5PcuVTJ&e&6hXFJIS?*Ef5-Y^pPq{XuEbNje zX1k7EQKRfO^mptEuY>g}dj!Tmh45FqaL293?)~J73MC$qgMd*>|aA#f9a9MFS}$hw42cd#J#;#6A3}zOfS_$0^;4hEHsgTI2-4;Z!V*c zC_V_PgHT>cys>&0KL{Ni$6C~ z+yjjgJ)y^s57Txtp-Ti!)6~5B2vcLMp}Npz`E^J*lG@!ICy0uSbJuC7(UH@8I1{rK zBxn>o9XKxpEBKf|h<+WhT+HSegY{w%$*!fP^?~e*M+qcE1Bvb9`;;7JPp(%RzJ4Io z9-2q>8YY=<(IBn1Yx3~+gH$k59BeV@`M@qIJW!o=-z1{(1F(rSk(jp}o3<_>KHLpC zX_*SI8um(ewmm_Z?W%%>ROH%TZa2qtmik!(T;XQUm_d?(%kr8vHxiaf10F`y_+Ud##sH54Bmr~)E#NF*7GMUT z7;px_+E2RZZtJrH$xP19=go1j?N#Zpuwn1e{*26mshpq@<_rr;4L@04lgO&`taxo~ zb+qDE@*s{WpR4Zq3gvYJb$Cc>>UsLw!8j7ip(iW3WTwiVP`+Iqbb{uZk&C>bfjmv! zG=tTiom(yOqpW$^wkAn4it6WKqb%?gXh&hzEKO4Es8HB`2JC;TbN96Z-P?cE^0kuo zAP$I!IzwFv<*=W>PAFIGXJ>{_H1vaox0m9p(-hDqnz)-sW)Hiy{KD)DjL zRZ^1WXPH9Bu0{F|61A%|-j(GyGw}`@dl19F6@fb?4sjffv+#OA@q&&G&Uj~hIqsaV zpyL3G@@tvymMCYTv&E?`*L8T@TIVk360GiQXGHEU=P6fK$I-%5j$LS(VRku+Tt+|3 z6~Xq$NZ}!%wu0j0AVhcK7NU8gnIn#KgULVA!e&sJ%g`6f2m9V9ne6?>DhtBtI*(OpItqffFK6>TCQhIGZ}6@pWS(YNd<9|GNE zO{vwd;dVIjN|zoGYdWbOG){=Z&fMQw2K|M$6KdSS)*7ia5Z7M-r8 zFZ)#M{1URuj5lM=c-#>HEg&E1VEddk@j^0QyfrragVbVty^%>3TksNLoAaQ`9$&?b z>p)%Qy^xR*YQVdYd78W(<-hY6WZ$)NT+Ph#FL~|+(e12&-qDENhdyXM`ZZ2MzFNRr zswYCAT$8LJ)6BxaxF7GtEdcqvrE`KnM@YvvsdK(#e>oku2B&kNr~OXc8J%XTt7HC_ zbQ)c>X5aYnT?=Ssu=$HIF534z#B`i=%#-XBb(8Y4uFZ>b*HueZJQ*>;HfvOq^b~e7 z^|5Z{C+Tj2H|($cP1x&y9o!`Cqq(t7(#{dU`6lU=*dl$C^!f8DB=T`rYE$0fZ$$2uJN z5otK?g`vReC8`H}p{J7Qpn@}scKU{b>jkQxzAO!a2I481nA zzB_W~k%2ui$SxbcliWvRZG&?Cb<`(Mj4=hV(_^O%${&>XIV`YAjtqy%VRYcj7)Olb zb161$m9x^h$XV*NI_=IP=jT#bTDRkhqs?*7(d78d(dzJ1eFZX+-f_wu%_FA1gMPmO zNdm6iwc)}P!&*I0Z$_zn7?%=`ms-Zp#_LmDu-@ZHK%5KvoNw0wyF;0|f{rdH-#!Lu zjZ|7rcTcG^6Lx)bR$qCmq}BiAqqV8kJDJ+=OTYE?1Pl-Qj1i;w2J zbHuu7(~@1>7Gfg)r#$w2VhmbssB%`l-^NB8Jxp5`^Cfw*HY$6w#_4&kYI;3M$!s~d zDimL>ovWnZBUU@iXGzM;=G1!QDkeiVB{#~()3pm$87mLAeH2;XC!FmYW2HCgt4r)k z&h+f1@hdYMIb+A=k5+0>R%2|xtnDhTtX_rhow({s)0u;p`IQ})b5>Hit~yITX~wCt9<$LODi`ZhqnIgrQxXgky2B8=IZ663Ko9iyGscx zL(bBcPGNP;mDOwa`i2Q7e3a6Ib~xzdUtD5V!Wz;&zf=|rWs~dlqUrU|SLM~8T}3kE zpc5+JCz<1L+jjaGo#>;M|A1eTknxr!(d#D?^09@`AJ&r{rnYPAP!n2S0yN0ax(ou3 zQ`P7B)G7KUErsdM+dq)fm$9F7qp&j9^wTr%JCV}dfSwn)N=WLRw zuK8{kbhgfbnBB=jm#0=mfka>Ds8r3vUZd~J^8(a6TQBp_%tJxHP2!=iYk*C_-Zncm zX!mFUHZ8oJ^o4p>J=J>@orj)5@CO^WSZ(LVCAv0ixT~Hbf}6Wa`MxN@g^#0567CF) zSQlky+NXs1>k?geFp>G(Rc#r6F@Lix6qJIR!JrN_COKD~;ySVS-9HQacVFhtK4Md;-0rh(+;*fw&3@keKH}LGQT2UalxdakAt(<} zNSPo=4%*usS6mI%3#oQ#K-|?oC)zr^*W?==xcyzdVpeZe4sSO)dc}KtNMp}%lF9G% z509#Onv<@JnClk47Qtp zU2jQr(Qnh5u7q+sEYbFuytu|Oj@=Gf{027Mu1=Gy+7j9lkx%eKum>*!jNa(d2=r}0 z7T<>ND@_^4FGt?^mhwS)`1a@<-{RfrF8ci*#;2iYkG_=Y44T|tachsxpw86^kh?u; z^*T`bTzb-a#Wjbu4QJ&@O&q=@d;+!){f=Rv>{8THDYf42={x1xhWDWJ43wAZ=)U6> z>r)=pD;D6Yo3QJzK`?Ph4aL(Pju-ai-(PXkWN1UuR-P)`=dB9|C zt}%^l4%6{xkD0n#S|4F_Mz_>}9l?HCt4aO!Wb-7A>6eqt^Z4%9Pa4OL!?*c>U1|Rg z3;Jox5s$W?#yb=XsqjHF zZHu~N3=v9=uexJ&JK^d6vhxb|(ge&c-M9ERldcQyYYjQyKKv-=ovt5UJGz!NnEyR? zv2Mag3~sDwzeJ+@|IH)|70iSXqAQj7$=Wo8ILuWp;j!E|z)1}CUb<>)pYZlAK(95LnxmP``l^KDW@Mv zZj9C^4_&nEjE_?9$e)_Rw+|-CjZ>ka-`;C*wJP)(`5~as1wBOntFL+mZwfleOyW$z zO8YSh`mD>?Z++aT&eE20ZG1{{BYpcx?={;KTyH7uOtnRW8jY{lXc>zw45={v&%Rso z(v9X^GBU=^8K)E!x$(W`qO@RH{A=$M?i21H_$OuY&%N~)?Na(h_wDY>Ewp|*YNx{r z|6FM;-4=v&kp2TDbz^@|N0gKF$a(7&TISnNkyx}At6zm2RcdszI#u1wr^dFNs!FgWBPUtUpFLF-XWI*WFGlI> zp*QlWs#x3OpggX>im%xi7NMm4@2mZU}|BV&`I!K~*@#VyXH^Fud7Iy@_>g(nWy;|tc5mY^|X zPpJ(q;Z;btA3HU_a#{52>PLDv)NVBH*MFb1QDU+;qJ3k?Mu{CyjKxWNYIj@4oJ`GL zCy`kFP}PXdwIt-?QEo~MK7EmN=C@y zo$}v6;UX`i)X&WsYl!JWGZ``d!6u(6dk4zTHgm$qJ~Eme(c(LVxuZ5IoxiuKoPl+R zbsfP{+a1;r^p{5XM9pI_T*)cVH@faAzt?q@Z{YN8>E>$x;7Ri>5 zeI?KclGC{~__}S0U#K-F3UVP`68~L}xDvUmHRJk^O>6}g$5Vx)Rg)xC~;oYeE+-^7hw z)3N-mX!t|cLq}QS3o@g7B_GZa*$n&G`LQSS!Cszx058q^{#uLZE;BjLl1%856M_|pfOGK`B? zz4FBcRYs3^5wGp4-McM6wgl~AOy8eMiH{wr-tRF)+Q`$~_8u{}*IZB2+^$eidY~_o z_!008 z+_ka&MgFvD^Oi@qKDOE-bN8OVKC}1Pea~0F@Zw7^zw+v9`(JJM_-G_3ypsJ=}2QNaN9C$KG#hI)40v4?g_pqmMrR_>)gQ{q)n%@cX>)_eI~Y zKSeW;|H&sGe~i)}e(=HZ<4sNPA3Jum@yL;e!`}DatAF>MLv?Q-tgWdz@Yb8J?|<#p zS6+VU#TTld-}mg^Xa2fp_s+lk`RS*g-2TM2$F@GYW%H&#ZQQVKt!K^ZhgYq9=)nh; zFRNVYUb6W9`xchF7R)c3S6bq9+wMyZFvKZl6^&b7tX;g6Y$z<>%Y;@^Wpq zoE$-zIyF1nYPDJ{mMr|t1HUN)h?$a+i!#~SQ>O|-PL9o%o0n(L&!0AZdclmsnKO%K z-G0ZN#k1$kz3c9K?sYgzO6Qf$U*IZVc;Eetm$;W!E?fSXY|O*n zJq!I0Z&%8HLVgINORyZq($p~gtt{Xe;2Q{l&ItUi=@6nR1^ykt3HTS_9AE*U9AHIz z$A4=NZT7d9V!+?ZqMdQ!|L5&dK5o0NJ%(Y($MG69;RvZ8^S^FyJMw3vPs-n8L-BXO zz@GuWe6hP?$pht;jz-93268zwQT=;vNKjw17%tBR^U+)>Swil|KPR~XP)@=)H{!*3 zE_ae3E{%*M(cCmrL`W!h5-2**?A;~10l#vI(UMb>Su(u3x1(^>fDJ6&84r(@POed>Ap|qPwI+*~L zOUYQ2x*7OWh;9V#oz zsZeeMMF9UdJd^)suF<^?Fy&Z6VOF94y|=dBzJ>4k=|7!y;n}${b=uFzdh9*1 z$*c=g=cxZAOy>*nljEyor!SPVKB|4gvO$8Y-n zzzZh_Em%D(=grOD2X5NzoBqO`f4NY0Wq)SIOU1|Uy!We-=W~oFU8R-hZ=JXE?5FP- z6PG>|DgAKe`v=a|$rBTvzHP?0OaERKId7A5am3C;mpzY%PFuKdOzV*HqMP@g*f#3E zjc=JAUjFdI-+t7xF?9X&{|e>IiQ6vSz2V%r$Atx-oNM^#%^^(>pFaM#w#g}Ps}4+F zW{f{PF|XEhZO`brPez^F*7T1ZU5~S`zuYwQz?JYHgze;w7jJ#&6XUHbZ~gkIwz<7e zzfeDR%-i?88f!Rd+-hvFe0qoFwxqHLb?*sV>kG%<61k3v%8ay68}|9%ie8^Q)Lh(| iBj51hlwFtYr#@Yt8dO_9(MjhbgjLe9{^_nZ2$?-1K?5#9|K4P^Z{f7%z(jwp@2fb7{CO; zB!CMr4KM?6BcKXU16T}L4!8rb8gMV*0l+4}7QhpLX8=0^y8*8QngGp!4*{P74g&i3 zA-olE1oR0&5O5aI4(J3Z04?sv0g?dy0J(tSfKh;PfZnN{2)YC?8BhtB377+*_p4DZ z04#}K)4E$PqgR1m3s?`>2zU&z74RfrV4=#xg9-Su>n}e1>>P{I*NCa+de4G-n&p(Ns>r;vs{ zX_`k$&cw0WM>Gu%jCF#<%$lHj$Wb7B|VOOq=98k0)#FrxWIG zlJ7o_lHDZVb(|>xWijQJITqVR$uU9GsEXT@t|e%!d-%s8xA?KQI1{kok!aN)2br7D~^Ph`ds8{4K~ zhkup1Q`45pWUNe;nK-*}znzmAo79?7My!2MlPoit^>$k;Q|Pl@Xk|?C7=NgV8=iET zOgg1kB1RD)gBvn*hlS_g3=*?Hi02c`fkBtR%9@2b11ob2b+N3>E@Z}`h3R0>l#g?X zUDLX2zDU}P9BG@XO%*F;O=gULr@a`Z+$xw;Ma_so^Q#st5T5Z4^ezypgo@McOPMhT zs@hsxPNhEAa;oo7`b9U1`^4p%4SnaE_w8XXxI*;d+8Q0)`RqdO z+?EukU~2B{;)>I+u{*>mVti|5F;`tz%%6$js*A@LJF3@IzgMlfd78RshlpFmKH}%W z!MU+t|M9ezqnwU{(<>Zz{!53s@KA7EZv5BcX)S9Jbhw}5;Mx*#|7Z$#{>RF!!hZyN zdzGQ~J@i`ZxUR4%80X^LfNHL-zR&@j>)3JrPboVtY^YjWy#sa4)mj&KGqDis+oxD- zZOL}b6=R(=FLdIwX1TEi`&Jj4z`d4Ma^~Ax(PExKd?4)^J86}h-burXE=X85!N#>Q zEZeF`lNDk~5r4r6l`7D)dQ9mt`Q$FfqM@^{SEMhMvt-ig>z3nO#aRp*Nj@`tnD@zxd!kcXmyV zwzHLS5le=rwuBgo@HuoU@0wM!W)~aMu*@S&!!`Pe0c~k)Ju@QKr?JfPu)EMQK*loy z)xX-ZO0?xd499F@wldbr%kI~faIUl(Ux+1dw+jTl2rB@aYD*ZaJ%)q^5bh!gu`Y*f zp?3r11Ub&SwyxxM&wY(vQ~tFjksh@C{a@tn<+W>e5i6Brt(`eQ<%C3q7lLdtCvL(% zgFMg>ok3$GGp4uB*pwtQ6{vSf!Td&I-DGE4i7CJ7y&Lx5@C<0C@O-c$q~xCuvM$Eb zP)jAFay@1~DN=iYE35QcPZKiL-COU-;?6kO<}-ii z)}A&BZ@0%_^g~a{6MSod0X`7wFr_DAIyAK%7sRduX)&1r@kCsYa?_SOkSIHz1$ z5i-QJig8_}eU{ASZ`i%lp4c*F?>h8(1LuHk-J#B5(@A+FbG0IUpuD!{WRMK|xy4|; z-Vt+#aHTStI?Fpv7$Zy*8oV>zV;v3dV?jEumUv={#U6V-5G#J@P^4D^MU6?N$>n5F zocER8b+1+3Ugl*AtLoysnchm_K5vShYpLk)VWf8{Rkt(aj_j$bOW7NFJ~Ilk7H+dM zZ3gC8Yf%&BO`j3jfw-5?6t1-=x#>QHs>S+{0BvJzj4MyIa$Tf&2cEUVUFF?eqIYe* z-Ld;~=#>V$=CG-z>7-Fmq~d@Pd!O!E%D1So?lnVs_3g~4>N?I_O1MD}2v+Df_9D}k z?!YRJP$bu7QZ}6|65ju@sEPKiZPbbhnEm*2i~{3htf~1N`qk}Lq+4;i3&b4E%0Dml zw(V^?Su6bYM@3qo{1U8HdGL)1IAKTheGkUFj@-UFgy1hrq`L1{sK zpm1d=HCCLP6c3Lv5E400M8gV%XN3r_t(Wm4DzBa3MO0qTf)`PFJsIW2{x`gQA%B4^ZB0j<$u%iD_YS46QtLtk$_2E#e{7odHU*PGXjWSPitLXSS$Oo1Ge92C@T9 zA~P~q{Cn`zSbBv7| z(Fi$`AV*v)v9RV?v>S)Hj&V|1u2gte)>2~=16|h^Bo@xeqSgkx@N*FJMdt~!(Y50n zb(je{(^*Q>NnU{b>b&#`pq)CG#@%cV*TOf^yFrXMvM$=+v<|;T+cK(LT8tsFOvYicEL|UVD(iQyiEe0~tr=AEC%ef$SBJE$q<@7mp>?HJ>CN_~5g&==v zc0YNG&WBc=T}pAzSabVj^GwH7+h~}g6`6rc?_h25t=y2*05qDlayMh9b5CdfpNe+K zFRisr%#ctpS8T^z&^fq#-bY?G@0-+T{yOjfI=(rV#<%J><6CiQd`tch<7@fx*YVx> z9~$4gKmNL}tiKx{D{9o;*zc{#9{K%xth`KK?SDw#g_p_uaTAsO*Yz+`o#ndWc4p)a zb>B7C(xxFS8_llnz>y!%9=BZ^kl#qJ*5MCOrUgVGoU9M6y=)HlL?A)?E|g`a~gQn zgKNC`K(H_FP>u+nK~b&B^%y}JK!v;GK~e3|*&T=HQC>*amwEqb-_f5H3OOS~&C@&5a7c<)nrdw#>4+TvbYT1uwMh6(@98n5f> z-QStmfkm+Bia%nH*&4=5?Qg+pmNNtx6ZQ3$s{6gKNGysJ(>($nG4-22QX)RzE9ZT4 z5|3EWZ>O-yVGR#K-??pb z7tDoS>oSS4mN=~L)G(#!s=LA!q1vT8+$K?PiNUo4cl4H6P*;a}OOomtj0xw6yoHW| zwixjiwN0vxf=w9d%2;5>sn4jQB^xa@(UOmp9X?tvw0DpZ88^|=XNi`{Y8fJRka z^ZR0fwfo|L*?k6J#UBsc?N0#i^d|ye@Ed{K{Yk(fe=_h{e+uwve=2a$-v{`6 ze_!Ae{(ivU_|t%o_|t(8`7?kI`ZIw)^Jf8n>hBM{-=7Wqp??7I`+gH}v)>H-w%-EW zZvSB5m;5=vJN>!9&-wF!pYhv(pYjg@e!@Q#_%Hrpz+3#o zf&b*s2j1i_0Dj0n0{8)cA@F_vk-+!*M**+%UkSY0KN|Qhe-ZE<{xQHS{bPZb`^N#- z`Nsn<_D=v_=of%%{1bt1_S=E0{0?BZzZm#N|0Lj9ei3+vzXbSNzY}zKc z=${E}_TK=U?Vkgj>30LC`8~jW{Bwbm{Z+t;{+oae{%YVDzp=66f>NgunK3zt)9>h< zbYV^%W7f18h$1cPoM2(#`C!bQ#A@{zn<_#+$8bx-T*i{&o`sl|USB`IwnT2kI)zhR zsb$Ub_<_v8D+iImK1;Lg9Jp34y0%#smNm=8#R*dqob+kJxg*%QlRC{}oRdBeWA4ls z&Ef=Tma(0qJvs36WC1Ju28}I}^vE|NHqHSU06LexOkcXW4tfVs^t5OcjXeW7lEj^{k6YI(XN*SS<&HOD>?)Qd%ja&hNuATdSt4jbDfA_*v~zVD6pdA_NBYJ+CzL3@0xjsXE$;K-q~Izm-_Si zz~NPCHCt94@+#8Oy^(l#KjN|k*f++;wZJoNq53$kEZKXFcbYeORZPt$VS@uPf~u#r zDlf#9tCFFOH$WS=sj(&Hp-5i`6zMC#p#bG!Y~;(WUdDW+hBa+2(P3QuWviE%skcMx z(hw8Ue!f3#CWx5oo6m452UZhniZ2?6VMvD=*nE|JQqL8COFSAsXOecE%B6Ffg-Jt zp*xhA=vAGZhrL2&9f%d9R_o0`5o|Vq!G8tI#4?lj@>5fiQ}}U7`j9;Ie2%IOvv{0 z7u`NY0h<&pTC!;QpxTE6wO-jKYx0HDjuH0LS_gVsUGDfY_#fu5^eN&-ANE+Ptap9o zeFRS=mRWOY1}a}bUMavKi=@h*gFOyVly1VKL|(#SQpdoGJ6;*!I_zDKz6WIT&`j?; z?RxZ`p%BQlZqGaI8WYX=&bjE5na~8tQ>J{d=fifb@J_oObLdds#}%~!S1a%Bi3#0} z7Gsp=J*mDmGVA-YHfhOqOCDQtV2P2Qy|w+tY%yINAZ`}3#IuNTdS#>Xl$TF-eK(I> zvB;WedH0m5N-JWOQc9ig%Jun-xgw;CStqZ2C@f7^CgEI#r9UghYPm^qsO2My-DNLz zIMdMkt}D^E@ZD2B>$|7!1Jw7W`oVtg?r9aFbUt(NI=N%@IytK@EDcjeM|DSgQdk-hozaya3|GaVJSgjhUT=eLr1mr-)D0P&`mxQ z&Cvzg7jLY9b^qP<+{>>`&OF4RVd-y5e^7~_{;FiTj@HqcO8Xrz0TNY&i-60+KLwpA zo#Bok)k)RRV|)>G0&;gezyMehi}yf)9Kr}g0BHadU?ji*AQX_7qrME)ZCE{M5=Ht$ z7YEO0yfZ8nBVtby$Ko_-yT&;6h~pJ$Pv@1OsQ3M1=SZhk^}e6$)S_KJ+U@8ZjymdD zKi)Y6lpfTUP8*_f>TP{{F-MKh!_p|_3Q%;k`HB^k9+XWP;N;bIzD`Qfr`^%nA9pGB z?!~ld-+oFTNI-qGg`LTus7E)ilR=pX33gp%OktcM0{pl_%=GFhlx*Z0qg>SqtJ<^c z{KYv`C*L1nO|;F^YChqTZ)acyBj(~(g~JJ<8f*4^0*yuYDy?5|Sv?c2XGH_z)3Edb zjZZJ-8y6WCZUnp)=^ugK+)r3~@GZKAk@%m*^K>o9SYV_-D=hLXa*#2}(Od$RebGf* zxh)#IeJtmyu^ap(0&XuIMtt)W~dU?hrEi#N9Lr4$zU1qHrdH=Xt6>N^CRDBs!=I@>h=5SG5Ab(hxn z^7%gdFY`_9VNs*aX`4KxoMxu5{yl@SkJa_>VJw54N$`Mto!-5q%_q=vK+ZGpm2T}c z?bTTqEHk;R?xH4>lU2{z!p^ka3~Kwk+Jl*>Q{&xElGD-J4_EZm&BoQ))>K^4lXoqy zerz@3ipI2)(VIo@1lQ-m7|1~5Ye#3=UM=ckT^|KGlrgUNgLG_EAE|1si;ex7?xIa zCzqv^reH3YAgYI-w>5g?}i5cEUnC^oKPT`!^Gx0(qbh8@~I*WOGs(Xi*3mG`x zcCi(^8+N$88@>}tU@`<1U@Br3TcHss1o-|iY>&-5QWvx7ZkO_XLFo-~_7Z1t??Hp9mLF~

`;qsfHWbaSp3o_QJxOJ}>Nk8T2WvWr^6ijIpOP1_J$H*{6iIFQ$_@)R{ybhJ#2 zv_IHoH2)Z+y|d8fpIsR5l5c{M`?TggS~Klnu#c;6Szo8Ik(it1r%e?d!|R$)>`-(=p?)z-$@`eMXfh>eL)Qtf;`vR1CJG-MyWD$19Fv>cz` zu9ianFF|_0wj`nv6zL1-8G=2=W^2T>0Nz@v{=p|vc@*gk^8Hjl{|r6Fs`*M9J3u=4 z_hER6<5bNHpCMeNYTmFE6CMryMD^bNs^u-jxwyBh@RH^ojZ-1xps40u*_9Wq z`=p0jR9g3soduK5W)yC=KdQzEisb2` zb``pXo-exhpig>r37rQc=3$6ZP+kz7iS<3hqcgFtr`HDQ)tR8ygT>57#@}->Vr3wU zO3^u)j2@A(I#r6+>D3-|{IoBffv_|KWA3Yt`5KJ555|1EI_9Am=_@ML7u2)x41VaO zcB!qKj`^?gI%8iNQa{*!STYC)1q7{f**p>attM)jT;c#48 zruVAir!%K}Qr8}kN9Isn)r?YZ%J6I`` zc5UbQgE8fQmup?PiW5Q_;qNGC)+OzYyj70m%_(1OIHHTlUqviiy(u9kk#nU6jN+lY zzFHA^HXWm}QCE)4gInuL_l6On*OzEuZNpNDOak2sW6}_1K+^6PzZs4FdS1?QWtAhY zf<=0uGt-q>n&G|A-e1(WJZ0Cmq&wHx*_L$l&6VlQ(f8ECxhi{edmOjPo`TFpT40E~ z!L3MxdXUTMR;2S?HhhT`*P4I`nW+9%BvR(Z&G1|HC^TlHGXC<`i{DULs2tzC{o*&o zq&ZIC`D2`X#21Yz)rcneHEU#jsmVf5%z8P0uSwLF(p^gLEPun;bo@4Dz_GrfR;#{PwltCTo<*k+OKlTW+4|-KVp{KQ+oxSVPqBPTn%(F<_}Kk z3qA=hsGpe~oBQFRTzVd#?b$_l{t7>0ITgIk?jW%Mw zdZZr|qe*U}nW*a(lc^JTt-Bf9)2)dvV;Ogx411y_-jz_6;qH;fD)FTmYM+ml4Ab{G z%N)K}qw7Td4l6TnlB(yh0pG@a({-b3M%QpU=3Q>w>YBAOE-y<=JQSCg0xOgpxi+ZI-9bhQ|^IPsP?O*MfF~_Sz1}g3TV%z;h8myQ8$*M&Rum#r)&TCs)SjkH00*OT`fjUhLEw~`r`U>l{{R6aFlXLV zA!ZzBi&6QfW(}P5vkh;qREc%RPt;F#YNCDU8>MS|ijg;a3A~)nU!Qvp*OQ!vXgh=X zbZ~4Las(A4tSPmewX&8B!gnGI@?#YE<`2~#tkofZgfrN)*KS-geF^DFq-Ye7tBJ;!q(brS@ZM3Y}efShPe&F zhWfN}Ht+6^t6n}_pBinSiuQ+z7x%Pz$u0UkeTyb9we@g)x-%y?Cy-;-96wy&&-pm$ z$B|2;xlGc8-p&V6^Pu@WzINlqbJ&~T;~YOq_a@beJ|_Yy7+)W6r2F+|)XfLn2{7KS z49Ei1gMJXOfgz@|C|^NI^LX@+QFlQkFNbgBCiblu=Y^fYDb$Jd98%xjoc%_CDF^@SRHpa!yhwc@;nIfyK7x7Ay#cLrv8+jQv% zGv`yy+uZJOu=pPG2_LhK*)qrZPJ3*+Hk7C>#R)IyW`_!)4ZfK;QS^F?V)gb7y`7jH zH~@d!-m!fy^}9!K!;uGbW`3gQ4^Az`dCL6l+3{xnL(iSRa_;hjVi_?%yyAe&+YX{8 zzp6MesVu{@)tjcL(&@rhNX=)$W`ctgr4ywWr_yU%!c90hda~8~{Ctu-vxUj^v{-Pm z_%TOx?0MtdoP)Lvg|9qZQ*q(#x+J@~MXob{gm0hfxumDoZ-G-v?AJk&|onHe*ibxc`7JyRa_z zazpR#xb@{E%x}Yrx}eT>cSr5Zyg3Qp(qmoogAaSxYu11Mu$ScivE>o(x*&HxxoWHT zE8SBE={GR6&9-RWmP>xbenZ3_^RV^3`;l+dyLf1-`b#-7yf0Go=nhklbnH!w))DbEvS>2RTTz=?g1iOITZJXNN>T6lDo`}%6_$!rist+Z zLD6@J$QjD9umlEBxpL$kBF)zC$Lq?_0OXhFWI$@zz2wYsy6SrO-PDeP|y2iy*Zt)aO)f z4dOe~lX^nW3-T)aX#Q@QLITI+{1&`hL*9!!9_yNYy<@7TAjTQ%vZpoJFC2&|V~-Q7 zUDk}#yBh4B2aqwBYfC8KfA{@$7^_*^$e8Zm6_yrv^+=21za+|GNm0Vm!%DU*yDZ)N zu${wqJ{jc!#&b{ox)yjiy>OmBdtL$2)U=n<9UEj0FyArXTbkGBjg3W4Kkg849}q5YQTB4`2p8KfDyc& zi}IpVI0oZh4mlbDX90b1-3RS!!0RB|KL%cZM)@V+FyIK_DBu`i`J&po#Vcx-)+}CB zeG_;st6sXSX3@NY5ru^8cy`NAWJSTS(Zfa!EuM7UP`EH2PgQjRF6`40tq_*fgV- z6uried_HD-$%dZoE9(2aR!x|EE(`U?+(!?spY%cbUp}ke_V`WrJpp-jkE3HM$1Gnu zx7M@hCeN^{#kbtz8Rn^pCGiF@@ZL8d^7sj38qmaupehR#$?)8PrIWV{nhwjl{hXphkeE?MI^= zg&rOZz8bAofnN@lf}t|>{(diEq#o`U+G){l9VGz&FEW$=&r;*@CcuPuGNw(O7V^J* z|MdGb-9HXDt-E7?+_47B&*P>275k+=59WXS*uI`QlLiicta8`JeP7Q%dhyV61wUE{j`5zIgHt`2pej zDR-P*e`8wn&?g?4QgL^`>5HEIVCBo6qe8!{Kg)dQ&70q>?s;j-=JCS*bm(Khk`}I#N|4fu+na36$tejZ=NypO0e)Z!s4uppdF4~&?Zun*Xo|-Li#N8uvxsUkIu~DNIMdVb{dU*Whx*K0b#2K@>-Vh~wLUoI=^1}MRo%X` zsPNhA-<>h%^Fb#|tcSc+OHW>P)8^wJy>881a$BPOefzg7zH3y*W;}NF)Nhu2TAz5+ zy`Du0n_okK7dLtS_8|xQ)l^>j_`!`=EWGDs+nvkryz`q6TJMQl`_zdzPRQJN_QrMJ z6+9@;`|!J_4_@lmeCLsO|9NJ7?yK5e{{{FXf*Ak+ literal 0 HcmV?d00001 diff --git a/panda/tests/hitl/known_bootstub/bootstub_f4_only_bcd.panda.bin b/panda/tests/hitl/known_bootstub/bootstub_f4_only_bcd.panda.bin new file mode 100644 index 0000000000000000000000000000000000000000..000fd26fe47b95ff212f30ecaa02f6b99e413b53 GIT binary patch literal 14208 zcmch83w#tsws&>+Os4amJb=6i^Wc$Wcq9ZR2uL#%I+IKwU_=Fc%p?RRpb)}C7DWfa z-5@F|dM^ZZQPdS(g`iP07)Lg`zIQVsE{F(<>i`j*kU)1xGJXHmGePw3z2E)5-}k$d z-#K-vy6W_)>Zo8Lem+B`t-Ng@1RKr`R~;2_`wz-NF%fTMsD zfPVry0G$8{&=25J2_Fqe1PlUX0*ruBfH8nVz$Cylzzl!`Fb8lW;1)nNpcb$kuo`d| zU<2R}fQJCv06PGG1UwIT8PEiH6VM811AGMdJK!+j7~oq#0B{b_1?T}N0PP^c#{!Z7 z^8mL2h5&{F%z#|LIKWka$$;sA62MGAC7=ed6tDttCtzS{*Mr^&*bLYTcml8!K(9Z8 z@=t(QB6ZsK+7)yQXdmExz$XAd;2(hFfV4uDhrz8OD*QN=7=q2DlaC{2GLnp9-HlfA zb+C1gY!+^oHI^5|3F#%BMEhJu4e)fdjun3%!W5njj5!mm*s^b^NX*fXWr=iL)YoJ< z=iZGnc3?EVv7Sw+ z+9uz75+%D$zUL%U0Lo;@EyrDVcWSsZe#)`LPdXd8@uILtk!CCWDB)moo`y%Ll=)KQmVqa^F(Gcv9O&Q zcHCE)FKaqenT)lmG81bR9<*{YW0CwBWyCxPEy*&I*}{qMtFuWB$QbZd}q8 zGDRr^5;2MZ8Jv)zFC^@FJ3x%y0Pat)260gdtgN{`Dvp)8+oNJwnZ2Evj2?!=fvkL- zU+fzHz9k~*)N`bBwl-C)lr@>r-j}V#C}qE3OcgcbM=Yscx>R`HJ>0!is1_>DcCBP4 z9jfm1x1ULUvHi^8A9c$XiU-8iqA2>sTV$glC712!soq&^{`&s28mBGZ8sFQ24WqY6Hvo-HWb=`b8WjX{9DTIi(9HU*6c=GTaDJiEg~jje)kM( zt}A&GYsHvnEDnDAd7Iqaj(r;yoW{M8R?_j^o#?SRPJAfsc`NbDtq0TaM3*M4nr7iT z8J6`c(oBVzQpBHeLZu3fteI4LLO#8ZF=^lEo9$~iLQ>HlSQuwRU z+m*s!jeg{p(N0C$tYDXNITOBGiv2k{DS?wUSxIr6%w;9Ta59^vDco%h`in$PeOioZ zo@fviX_>M!Kzv_}C;UFFTYmFX`?R84OvM$O;ud4XTu?WIQl!;Ns)@lE5`0iLh(q_~ zw-TSPt75CRG|KU@o9#YXm16tENn41oA3Vu9a#BMsX?2dwT3Zt_N0p_EOyMHSqfWLn zmF@8}4r0o1)s+xE5x#&)uACQkVC{mY?E30&y&k{1*InZv);yP?>TgP9xjc4`3yIs+k z-v1{S!*?=Vbv^68?<~Vj7Age}`{!FlIImn?5sZuVi?O|= zYrf3oZ)tkjn%F+8WfR7{g}u)DcdP5vdRpGfT&qYQC~xdP9Ux@`4A=6R!j0A>C*5mMr+7ZZNBiiTW6M*`TrVl!jeG5OR=c;C=o~xO*_yt9?r5}X zjv8uPPwNFmD)#BIzv;fDe2bdv--s)(S;t&eQ_s0e2{+;)!3-V7USc}aZFq{~70Gdh zl&z z7mqm*5wG|-n2Rw!!LW~cg?;p}jO$V|u;zu&J& z#}&sLOoW9DTD!)I*LW$Pw!& zCe|2(ev`4*(RM1!)e7&(TB&bkpwBu3#KhTIv|6D1f1-Met`lUV&yH`7!b;GU&Qe-W z^8(~o*JY3o{nWM8H?cWfJKsvL1~A|7bJ6~)cjz73mr>=?Vh)MzFX9>Ng}(<_F~P_~ zwsRqqNL#da`UHP?hk;D=uHS};>3je?k&Z9oat0kaahe2KO-v#$3IYD|>VEbPT@S6g zx|HJVF~+Vd)|t+!t~qX&R%C{+yo&^WfNnhYfo4HZ;E#G&;GhrW^^!+ zD|TTm=o(zP?h~$9_igGue_8kcJips6&u{&&=C|hZ{8s!o^K1X%m-*fDzcj!1e)wfy znSV1sR@A7wamYL2J@VV7Wc1IHTFtqY}g86*7vf?m&wsJv*K8s0ed^i zxx>#q5@(!9&u~A}c$IoK%Rm*{Iqdxzpz`e+P(pt+PTd&vdrgTpMHwxiURCn!u_hi@ z_b54_5{=xxTVzd#B0b|XM^5SRegmi|^cmKl4T^Hj=+6X2xefx?IAa=kHGpfp@laqe zu27B$r9n}x%Ju6(#eoX-#e<^Sqo*$x_oKXShn|d8c`a6=>{LcYs!}*mQN}j|syw~F z=o7v<7941wBYjlAQn?QG(Q%Z&7||G15=ELu^$X4{-^@30Ic9&dW5HdG*_GCPmps9W zv$OS;)3;j(wI@Vm+Xsolvc23F9g!nj<)>9;QKTV0s-Ic|)E?01`#-(}wGFY^wD zD7OLLXWpT;t^W_aufNQ@;xg~Q{EGJhmAC6xys0f7u%)GBwj4L@Us>Z#y#xC@6Faa7 z7Crtz*dDXBjG5Zsg0n0a=VJ`i+gqvb_rW4DDN=ObczC_kH~v@&`+Tom@Z6So%!F|- z3)^f45#B$WD`M6oY3=sT)HT!Mc^Zoz$nG zwuRai9qbY!RCT^AYIH~0sBXdaqUPlC0!Z%29E{Dt$ET7w>T zesu=wWPHX#*ybWum_YkMn`Bhel8uy_NXdswmCwL9T6#&ZU5_<+ z+bdv>1fHejWq`ZA)SvG3a=<@%HNYKSE%0e?6!1U2I^gfTJn#u`H1OBn7~rGcSl}ysMiequy;7{7Vik) z2JcAV`@N%p@AKvW-|NikDYT&uviNM!+i-0S%Fsq%5h~8t?VMjCU?@sO3iB-jv-Vbx`$rp{{G-#}Zo{6p;_>Z!H z=l>RsS(0?fRU)p=0mK2K9J(@H>Gpc)GeqEXA`!SEwZhI&dqBt3@Py}^HTE}gpNCcJ zL3iY$w}`+%U&Yufx@~Mlx4>Y}%epc|8|a553Uq&zeR*)LwW9lOmm+=mmd4BoT;jb7 zZ(xKt0N)&4c94sPo%PDRBg1V%WS`+|bmM%ojm`}Bb-KR8N0#!fiZYE9p+7uMfu5#Q zKE9crtrLp%{L_doE4tUM+}GO`dkL_cjD>55$J}kC-KR)j0gm`64}CLVZgw-qW3{Yddr6c%By}ogH!)H#ingU8)}-S+|F)eV5^HGd z;8G54Am$WLB)-Q=dRE{Rz6D)hM+7!fUlFOtm@&)qTyHWF&a(P<_anCu&2?<#NHCF* z4jZW}LEH>^Gwnbh6Z)b@DFtpIjKR;16B=txbJ^RQQ_tTLV~ zwT5r|ieQVVoK)Y}0VX!HFLP%&lBe<}mRaq&<%+aV`Pf`tRcZDZN7||V$(D`cC+?e~ z`VK!OYYEL+X~d`GC{Vic7Cl+?d{`aB zf!eG*C2R79v$pZpvsxQQnjOygGWa#d5ctK|Jt$8rI>%S;$8bktnqNh;SNQ^LtB*ql zN|iqcdmNxBeS}Aee22}T&Vdzozc$Qq)V&#F56R?_dG3Q-l@AMg!Ca0Ypb5;A$0~Ut>-Q+RQ{GP9C`PK?p_utg3uB;pE zd5CR8l1CW|DiPFON|xhzJzc4^-|!M3(fCji@VL;A0Xs@ds5?M)(jCxad=Yd4@_jrY z4lp-{@Xr8p2qTOJqyY?o34k~N&3Dk89`$>uZo|`qCV`#m<=`Pr!5K(HCLl??1{&$; zrAZM#?DsuaM|`jiJrnF&)vLd|M~i;>=yzw&IJ8lZy}oBOC>^LfdMt?csqg;Ar5rWx z4@uF=NKkaP8pRAs2TBPIv-4`d>w73gy}pW`p}0z^?U&LbV>?2FAOZE|gq~zjv7jdR zFenot!KO=$AtWtU2yneZjI_1}B^$0|mUDWbt^4;~xO5xU$q)Ki1MRa`%~MdHQuPDl zSV51qxKrV9La4@Cu!KNk(Y;FBmtN7%MA}&~4smZt+DU!W%Xy3>#BiDI`~g;EjCx#s zFqCLFiCWC}5AZ)JXZTD$Torj-rSZQGxedq@VhleA==!s0za0P5oC|ove)tA*N+GFA z^6jb(_OO|p=Bqsc$oU6u!@RN|M=1f$d3=Pre zZ@}(K3z;x`#;{SlXsY^HhrVOAFB!$|VIefzPB+ z{Jg>fzoaNqu#c{KsrsCr4uzl7t++}hm=E2nSN+bEmhf8A=R(&WJ`>i;(4;{eM^DW^ zTUq3PLQ)%z7qB+SW#6rDVv5pM)s-mHS=DB94vn2NWvogt5>rU}RE@7`-|%|D=H+mR z^M6zrHP;ok^Q_3J&$2nfm##IGTNn43G;9{G^#ZhL$Mv(p3hB*;&+Tu(RqJv-YGlF=!p? z=n7<_jmF1CJtU{wKLj;;;tEhZ=TAkAp1M5Le(>v2qj9nsqgjkjaC{Mnh72@jPwz=< z(V{KJ@o|7d8SVIMfbK1-3sjwqHF_QII@B{!K=^XNcEI-l>i@86Bz93|9otuh^QYml z`sN|3H4RBm_NA7kxOZ@Uhf~T@(Uv+rB$f9im!*`ZU?oeCDS(f+A|y5R#W=FW4EJM9 z-{Azia9-=0elZbx`D#S{ELLl6-)=V-jN^FAB|mmHY;ac}{3evjy|#P#G(n>QO{UN@ z8&e)*pK*R}rP--dcXMi=&Mv5a;a2GO_`dRspW4jf_%fde8UPPq9c33^g5OE=-vo6A z??m2BL=8N#f6C^=6KohiKYSf+W@hmWPMn_|lgEqkM{1jNWygpuNVC5wwKiCt6xSj5 zF|O|ma4@JmF-GGM&cnBLG0v~tpNFsE{S17H5~IUr+9*#MVStA#j3BTd#Urvo+ecJ% z6Kk7XJ|Yw6Fr${_^?j95tHztIjM`-E$zK29(xc9!?&;mT<{WiWk7h@8x_f%i!=yWD zDWGhV$yiwRwCTu8flgZ<^h}@UiZ0#ZVvIYSHba}N$q7kO2dEtxGnVkqQ-cY927wUa z%eUfY^A6-nN_D1;>S(;l<(#q36sOJQa8g6gYc?S(Op+Xy+8pwkm-#>1}!AndSlgcT5pV6PVNosXxI=hU7zVR zUeI9A{KnQo_GapHVib@oEz{@%_`&N3J?=7MZ=e3I%<r*%~p;hc~OLA96=T9z@=1{sTH5`jA!gtu#h~bciED z(n@8rs^LS@tNlf)h7U=5`X@pUQvEtwweW>FgQI#2FKhU5&ZL z9sSgD(>8bih)CP0UTV2%o2i#YK5!WL-PupI{JK7>+k27a%(7FH}l*DPbKNtz%;1uS2NrK zzWAW_p5P4A_$Sz$RL)({*D#&a;0k>fuMOZ8Y@H2G8O{M-gvi5$T)8<>e9vlotb3Hfm>vKtApQsluBA`MZ7B{jYB%|z_iC*&+gRypD~Sidnn z(E6nr?gy+xMP2)|R&9H_{XQ$(o{q8qQ0U6h_w2&?YHM3pEVs>?f(%KTZ?v<~iCj=W z&PN}gS{ z^W?)mZ*-|fG{Ao{$+}X5iTZW9a(;_J)Rxj+O0NuhOW%6(cgnCY@r^`2fM$3H`s(DU z$fyLF6hWUE^sVpVyRd=&_-1E%9QG114uR*XG4)G7C>qnQv`MAP$bLk&BkPu~-CZ}a z?3nFT-J#mu&ZTRQSRN5|NB(Zn9ci)*ZQpMh*S^m(p*_wp^JO+o44dm+tR;OW^m)+dGY5U-<|iHV*T&{$ ziHS#I^HN}8l7l@YIr1kGmr5CcpBG>~^xeBHOKktPVT1KWAA|F| zdH>rdHd7@Ts~hs%nG8;Q8rL1Fj|U#}L>*x!bCziJE~#0=XZ&QryERqrsFU9|%(QDFW9U1t z>i%M6;9dqVyZx8@o=1I#JucEO&UiL3smxkxtr%}kspHI*bz}s-WLcUYjeK_hBXx)C zqL7I~KD)ogs$Vg81^q_uC;zt%<0Iq88}sZRfn%XPD$){VJcxS!6?bew-B>;;a(xnJ zvx__s$U^kP)>-RVmFr=K#ySMwAy+<2TBmGm+|0t=)HEmvUyDm@H zuE|UFA8kmt=j7)2a*UdjM;nINp9cLjvT`&ln%H`@A;tbMS{^oDz&Cfics}qw>WH zZ&ID-u_G{parN+ex?dNeZ3*CRfPS4aEDO*8`eDEph8WJFd<`Y_g6SWl?u2lDkDmGD znJ46OkHrmJf-|QR>8Uejzvbf!9@LYOuj4yU3r-_W)%0VvH2%I(9;rVfXJJ)ux`Z-c zQ%KJybM=`>9*N2{=1f<~?a+6(*52kmB&RhLYOKCm-1nO& zQR#;>mryO*)@8G?`2O;b9_A@#$8Gk5T`}p}V4}7ZXW7xmjuC=eJo9jZXni-bn1h4w zCT9B%!E?5D@2a96^muL@a%c9;&vg9Z*`+u?nZG_O?#vH$+=Z*FRv#A2i1E=ihh*Mz z7%lnL#l9J38Lpk~G#!;LA!LTsd?xH4IM`9zQMz#o-If*Hw8Il;n%#SrklcCgOs=cl zgpHaF}*~9dE9NOopNZXFfKEbDPVcW{XZV&7|zFFtsp_v*k=hYM`7Fhybuo?QL7vH9N zEWR1JlC>F4Y+e*{*x@ftG>E7DZ*N$OH`$>68aj_J7G;~!VVreBZtwse!MAnJkfgg5 zl0NDQNw32*E9xn{YytYQTgY%Sq7ZrOW3>$O*a9^DLw&RdFCME6*YEFRCd|OM%VZ4i zGmk#)KZ-AriRF9$4mqxl8F$iO=w=O>$e|RvCt(cDhJT6uN_9Kc^@eA3gzj6sb2&is zZ?D2HI3ee^E7C>et+LHimCW)i z#|ZcHoqW{O){94UFXaw14DFYG-%HHAsUThZ;%8^Hh5b@?Uy4K9*l7KyKWZmq`R-_L zdg=Mr0IWsiw_v>)EO(8BGAi?sAhn~;zAdprG(j2PnZ7C1bA)z^SRS>;p64KuE* zEM|#eHV1vdDF0hOsI~w6o1?{d{DAiXtx<$ejdbaQ4PFR9?i7E7xP}w{Ogz59PawPt z(3*|!`Vk{)lkoooD31;y{8W@vN8tYm%=kVZ^rM-m55@l;;5+%ZD6cRp@=O-~Hv#$D zF|qj0AFu&%4lpz^%xg2o&H%qpP<{rWyxs?V0Emh3Vj{d+{sph}xc?b10$v`-MOo1~ z&V$^OFvs5kngAOx$8W)Zj~;#%=);jYmZIMXz)|!WkV^wzPeyprC0vbp&x0I00Diz6 z)O%3Q1TVT?{s^ysHOJM<>gt!Tsa;vSd|Ayx@LE-~a#iiJ#RcOF3EBO^jvqh(Hs1PJ<+>Lw)F2>(=h0b8p7mrS!h4u zJbq;Jj1S76{JiF=rx!l(N5~uXGzO+}%<7d@b*^O#U1O`4-*Ja)tgAML&>y{?N;q<3 z>8!bxR<%h*9^^n6l0%9~4Os*6s!^6gyj3Xc$s!0zTS~}Wv=(E;EOHlGXxmj}JedTM ztH~Jjx*GH%P!mv2!Zq490oTTZ8V{QGpNR4*jBsJ{HR!b-{Bo!i43%Nv_ge|W?QnO{ zPm6w=C;|Atk(vB|mKwJ=0Hz(xm@|D&(7X4+xesXiei&!ibl0b`CmKyZO_4sW_*5G7 zaQ=5s9O%Do#_&;3RPNh);OiyFFCBTY;KyyPZ(OtVlYdtI_(1j1+78VHk1ucLj|cl4 zeW^Lm)b=jBr*UZaOB>ebk;0*OJyVm|ojl}z^6Idc?i+Q_v2z85k2jjAPX)Kf8Mhp<{&VlDFQ0i!en_}!)?Mc|-;&lg=8q4}sPTsghw zv+kA6LmH-J914vcRkSnvz0h9%{@NYQUsgOV&KY})bMd9Ci?{j@E~6TZuEjqx?CI%$ z_-@~GM+Plke|^cTn-8qHYI9)Lb2t9`nVPPbiwa-3>Af3o`(niD67vyv^~%#z7j8fK z@tfw%6~9ZAzwi2P&8cSPnv5r|o&C*dqgZ7e5bZasI)rc(tEi;F)x)%wA!L)z{>_TJw*rsTe^ z-8W^GIsNTx%JzFM?i{t?v7}R5+rHd(>0$QIe`>pKUswG1;-lo1=coSeBlFaCQ@?t= zV?qBD&$Wyh{ratYQcXw951L!8AKzfTHml}NgI9d8WzK|&iT5!{MTw4?*`NNc^3PL- m3ODtXC}aORZO2cJqaUx%i`w6Ejf<{D47Us?ei=46A^!n&%z@wl literal 0 HcmV?d00001 diff --git a/panda/tests/hitl/reset_jungles.py b/panda/tests/hitl/reset_jungles.py new file mode 100644 index 0000000..fb94673 --- /dev/null +++ b/panda/tests/hitl/reset_jungles.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +import concurrent.futures + +from panda import PandaJungle, PandaJungleDFU, McuType +from panda.tests.libs.resetter import Resetter + +SERIALS = {'180019001451313236343430', '1d0017000c50435635333720'} + +def recover(s): + with PandaJungleDFU(s) as pd: + pd.recover() + +def flash(s): + with PandaJungle(s) as p: + p.flash() + return p.get_mcu_type() + +# Reset + flash all CI hardware to get it into a consistent state +# * port 1: jungles-under-test +# * port 2: USB hubs +# * port 3: HITL pandas and their jungles +if __name__ == "__main__": + with Resetter() as r: + # everything off + for i in range(1, 4): + r.enable_power(i, 0) + r.cycle_power(ports=[1, 2], dfu=True) + + dfu_serials = PandaJungleDFU.list() + print(len(dfu_serials), len(SERIALS)) + assert len(dfu_serials) == len(SERIALS) + + with concurrent.futures.ProcessPoolExecutor(max_workers=len(dfu_serials)) as exc: + list(exc.map(recover, dfu_serials, timeout=30)) + + # power cycle for H7 bootloader bug + r.cycle_power(ports=[1, 2]) + + serials = PandaJungle.list() + assert set(PandaJungle.list()) >= SERIALS + mcu_types = list(exc.map(flash, SERIALS, timeout=20)) + assert set(mcu_types) == {McuType.F4, McuType.H7} diff --git a/panda/tests/hitl/run_parallel_tests.sh b/panda/tests/hitl/run_parallel_tests.sh new file mode 100644 index 0000000..b6b79d9 --- /dev/null +++ b/panda/tests/hitl/run_parallel_tests.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" +cd $DIR + +# n = number of pandas tested +PARALLEL=1 pytest --durations=0 *.py -n 5 --dist loadgroup -x diff --git a/panda/tests/hitl/run_serial_tests.sh b/panda/tests/hitl/run_serial_tests.sh new file mode 100644 index 0000000..31270f0 --- /dev/null +++ b/panda/tests/hitl/run_serial_tests.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" +cd $DIR + +NON_PARALLEL=1 pytest --durations=0 *.py -x diff --git a/panda/tests/ir_test.py b/panda/tests/ir_test.py new file mode 100644 index 0000000..e41decf --- /dev/null +++ b/panda/tests/ir_test.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +import time + +from panda import Panda + +power = 0 +if __name__ == "__main__": + p = Panda() + while True: + p.set_ir_power(power) + print("Power: ", str(power)) + time.sleep(1) + power += 10 + power %= 100 diff --git a/panda/tests/libpanda/SConscript b/panda/tests/libpanda/SConscript new file mode 100644 index 0000000..cc08907 --- /dev/null +++ b/panda/tests/libpanda/SConscript @@ -0,0 +1,42 @@ +import platform + +CC = 'gcc' +system = platform.system() +if system == 'Darwin': + # gcc installed by homebrew has version suffix (e.g. gcc-12) in order to be + # distinguishable from system one - which acts as a symlink to clang + CC += '-13' + +env = Environment( + CC=CC, + CFLAGS=[ + '-nostdlib', + '-fno-builtin', + '-std=gnu11', + '-Wfatal-errors', + '-Wno-pointer-to-int-cast', + ], + CPPPATH=[".", "../../board/"], +) +if system == "Darwin": + env.PrependENVPath('PATH', '/opt/homebrew/bin') + +if GetOption('ubsan'): + flags = [ + "-fsanitize=undefined", + "-fno-sanitize-recover=undefined", + ] + env['CFLAGS'] += flags + env['LINKFLAGS'] += flags + +panda = env.SharedObject("panda.os", "panda.c") +libpanda = env.SharedLibrary("libpanda.so", [panda]) + +if GetOption('coverage'): + env.Append( + CFLAGS=["-fprofile-arcs", "-ftest-coverage", "-fprofile-abs-path",], + LIBS=["gcov"], + ) + # GCC note file is generated by compiler, ensure we build it, and allow scons to clean it up + AlwaysBuild(panda) + env.SideEffect("panda.gcno", panda) diff --git a/panda/tests/libpanda/libpanda_py.py b/panda/tests/libpanda/libpanda_py.py new file mode 100644 index 0000000..8f876c5 --- /dev/null +++ b/panda/tests/libpanda/libpanda_py.py @@ -0,0 +1,98 @@ +import os +from cffi import FFI +from typing import Any, Protocol + +from panda import LEN_TO_DLC +from panda.tests.libpanda.safety_helpers import PandaSafety, setup_safety_helpers + +libpanda_dir = os.path.dirname(os.path.abspath(__file__)) +libpanda_fn = os.path.join(libpanda_dir, "libpanda.so") + +ffi = FFI() + +ffi.cdef(""" +typedef struct { + unsigned char reserved : 1; + unsigned char bus : 3; + unsigned char data_len_code : 4; + unsigned char rejected : 1; + unsigned char returned : 1; + unsigned char extended : 1; + unsigned int addr : 29; + unsigned char checksum; + unsigned char data[64]; +} CANPacket_t; +""", packed=True) + +ffi.cdef(""" +bool safety_rx_hook(CANPacket_t *to_send); +bool safety_tx_hook(CANPacket_t *to_push); +int safety_fwd_hook(int bus_num, int addr); +int set_safety_hooks(uint16_t mode, uint16_t param); +""") + +ffi.cdef(""" +typedef struct { + volatile uint32_t w_ptr; + volatile uint32_t r_ptr; + uint32_t fifo_size; + CANPacket_t *elems; +} can_ring; + +extern can_ring *rx_q; +extern can_ring *txgmlan_q; +extern can_ring *tx1_q; +extern can_ring *tx2_q; +extern can_ring *tx3_q; + +bool can_pop(can_ring *q, CANPacket_t *elem); +bool can_push(can_ring *q, CANPacket_t *elem); +void can_set_checksum(CANPacket_t *packet); +int comms_can_read(uint8_t *data, uint32_t max_len); +void comms_can_write(uint8_t *data, uint32_t len); +void comms_can_reset(void); +uint32_t can_slots_empty(can_ring *q); +""") + +setup_safety_helpers(ffi) + +class CANPacket: + reserved: int + bus: int + data_len_code: int + rejected: int + returned: int + extended: int + addr: int + data: list[int] + +class Panda(PandaSafety, Protocol): + # CAN + tx1_q: Any + tx2_q: Any + tx3_q: Any + txgmlan_q: Any + def can_set_checksum(self, p: CANPacket) -> None: ... + + # safety + def safety_rx_hook(self, to_send: CANPacket) -> int: ... + def safety_tx_hook(self, to_push: CANPacket) -> int: ... + def safety_fwd_hook(self, bus_num: int, addr: int) -> int: ... + def set_safety_hooks(self, mode: int, param: int) -> int: ... + + +libpanda: Panda = ffi.dlopen(libpanda_fn) + + +# helpers + +def make_CANPacket(addr: int, bus: int, dat): + ret = ffi.new('CANPacket_t *') + ret[0].extended = 1 if addr >= 0x800 else 0 + ret[0].addr = addr + ret[0].data_len_code = LEN_TO_DLC[len(dat)] + ret[0].bus = bus + ret[0].data = bytes(dat) + libpanda.can_set_checksum(ret) + + return ret diff --git a/panda/tests/libpanda/panda.c b/panda/tests/libpanda/panda.c new file mode 100644 index 0000000..8efb6de --- /dev/null +++ b/panda/tests/libpanda/panda.c @@ -0,0 +1,33 @@ +#include "fake_stm.h" +#include "config.h" +#include "can_definitions.h" + +bool bitbang_gmlan(CANPacket_t *to_bang) { return true; } +bool can_init(uint8_t can_number) { return true; } +void process_can(uint8_t can_number) { } +//int safety_tx_hook(CANPacket_t *to_send) { return 1; } + +typedef struct harness_configuration harness_configuration; +void refresh_can_tx_slots_available(void); +void can_tx_comms_resume_usb(void) { }; +void can_tx_comms_resume_spi(void) { }; + +#include "health.h" +#include "faults.h" +#include "libc.h" +#include "boards/board_declarations.h" +#include "safety.h" +#include "main_declarations.h" +#include "drivers/can_common.h" + +can_ring *rx_q = &can_rx_q; +can_ring *txgmlan_q = &can_txgmlan_q; +can_ring *tx1_q = &can_tx1_q; +can_ring *tx2_q = &can_tx2_q; +can_ring *tx3_q = &can_tx3_q; + +#include "comms_definitions.h" +#include "can_comms.h" + +// libpanda stuff +#include "safety_helpers.h" diff --git a/panda/tests/libpanda/safety_helpers.h b/panda/tests/libpanda/safety_helpers.h new file mode 100644 index 0000000..074463d --- /dev/null +++ b/panda/tests/libpanda/safety_helpers.h @@ -0,0 +1,195 @@ +void safety_tick_current_safety_config() { + safety_tick(¤t_safety_config); +} + +bool safety_config_valid() { + if (current_safety_config.rx_checks_len <= 0) { + printf("missing RX checks\n"); + return false; + } + + for (int i = 0; i < current_safety_config.rx_checks_len; i++) { + const RxCheck addr = current_safety_config.rx_checks[i]; + bool valid = addr.status.msg_seen && !addr.status.lagging && addr.status.valid_checksum && (addr.status.wrong_counters < MAX_WRONG_COUNTERS) && addr.status.valid_quality_flag; + if (!valid) { + // printf("i %d seen %d lagging %d valid checksum %d wrong counters %d valid quality flag %d\n", i, addr.status.msg_seen, addr.status.lagging, addr.status.valid_checksum, addr.status.wrong_counters, addr.status.valid_quality_flag); + return false; + } + } + return true; +} + +void set_controls_allowed(bool c){ + controls_allowed = c; +} + +void set_alternative_experience(int mode){ + alternative_experience = mode; +} + +void set_relay_malfunction(bool c){ + relay_malfunction = c; +} + +bool get_controls_allowed(void){ + return controls_allowed; +} + +int get_alternative_experience(void){ + return alternative_experience; +} + +bool get_relay_malfunction(void){ + return relay_malfunction; +} + +int get_gas_interceptor_prev(void){ + return gas_interceptor_prev; +} + +bool get_gas_pressed_prev(void){ + return gas_pressed_prev; +} + +bool get_brake_pressed_prev(void){ + return brake_pressed_prev; +} + +bool get_regen_braking_prev(void){ + return regen_braking_prev; +} + +bool get_cruise_engaged_prev(void){ + return cruise_engaged_prev; +} + +void set_cruise_engaged_prev(bool engaged){ + cruise_engaged_prev = engaged; +} + +bool get_vehicle_moving(void){ + return vehicle_moving; +} + +bool get_acc_main_on(void){ + return acc_main_on; +} + +int get_vehicle_speed_min(void){ + return vehicle_speed.min; +} + +int get_vehicle_speed_max(void){ + return vehicle_speed.max; +} + +int get_vehicle_speed_last(void){ + return vehicle_speed.values[0]; +} + +int get_current_safety_mode(void){ + return current_safety_mode; +} + +int get_current_safety_param(void){ + return current_safety_param; +} + +int get_hw_type(void){ + return hw_type; +} + +void set_timer(uint32_t t){ + timer.CNT = t; +} + +void set_torque_meas(int min, int max){ + torque_meas.min = min; + torque_meas.max = max; +} + +int get_torque_meas_min(void){ + return torque_meas.min; +} + +int get_torque_meas_max(void){ + return torque_meas.max; +} + +void set_torque_driver(int min, int max){ + torque_driver.min = min; + torque_driver.max = max; +} + +int get_torque_driver_min(void){ + return torque_driver.min; +} + +int get_torque_driver_max(void){ + return torque_driver.max; +} + +void set_rt_torque_last(int t){ + rt_torque_last = t; +} + +void set_desired_torque_last(int t){ + desired_torque_last = t; +} + +void set_desired_angle_last(int t){ + desired_angle_last = t; +} + +int get_desired_angle_last(void){ + return desired_angle_last; +} + +void set_angle_meas(int min, int max){ + angle_meas.min = min; + angle_meas.max = max; +} + +int get_angle_meas_min(void){ + return angle_meas.min; +} + +int get_angle_meas_max(void){ + return angle_meas.max; +} + + +// ***** car specific helpers ***** + +void set_honda_alt_brake_msg(bool c){ + honda_alt_brake_msg = c; +} + +void set_honda_bosch_long(bool c){ + honda_bosch_long = c; +} + +int get_honda_hw(void) { + return honda_hw; +} + +void set_honda_fwd_brake(bool c){ + honda_fwd_brake = c; +} + +bool get_honda_fwd_brake(void){ + return honda_fwd_brake; +} + +void init_tests(void){ + // get HW_TYPE from env variable set in test.sh + if (getenv("HW_TYPE")) { + hw_type = atoi(getenv("HW_TYPE")); + } + safety_mode_cnt = 2U; // avoid ignoring relay_malfunction logic + alternative_experience = 0; + set_timer(0); + ts_steer_req_mismatch_last = 0; + valid_steer_req_count = 0; + invalid_steer_req_count = 0; +} diff --git a/panda/tests/libpanda/safety_helpers.py b/panda/tests/libpanda/safety_helpers.py new file mode 100644 index 0000000..28f3349 --- /dev/null +++ b/panda/tests/libpanda/safety_helpers.py @@ -0,0 +1,106 @@ +# panda safety helpers, from safety_helpers.c +from typing import Protocol + +def setup_safety_helpers(ffi): + ffi.cdef(""" + void set_controls_allowed(bool c); + bool get_controls_allowed(void); + bool get_longitudinal_allowed(void); + void set_alternative_experience(int mode); + int get_alternative_experience(void); + void set_relay_malfunction(bool c); + bool get_relay_malfunction(void); + int get_gas_interceptor_prev(void); + bool get_gas_pressed_prev(void); + bool get_brake_pressed_prev(void); + bool get_regen_braking_prev(void); + bool get_acc_main_on(void); + int get_vehicle_speed_min(void); + int get_vehicle_speed_max(void); + int get_vehicle_speed_last(void); + int get_current_safety_mode(void); + int get_current_safety_param(void); + + void set_torque_meas(int min, int max); + int get_torque_meas_min(void); + int get_torque_meas_max(void); + void set_torque_driver(int min, int max); + int get_torque_driver_min(void); + int get_torque_driver_max(void); + void set_desired_torque_last(int t); + void set_rt_torque_last(int t); + void set_desired_angle_last(int t); + int get_desired_angle_last(); + void set_angle_meas(int min, int max); + int get_angle_meas_min(void); + int get_angle_meas_max(void); + + bool get_cruise_engaged_prev(void); + void set_cruise_engaged_prev(bool engaged); + bool get_vehicle_moving(void); + int get_hw_type(void); + void set_timer(uint32_t t); + + void safety_tick_current_safety_config(); + bool safety_config_valid(); + + void init_tests(void); + + void set_honda_fwd_brake(bool c); + bool get_honda_fwd_brake(void); + void set_honda_alt_brake_msg(bool c); + void set_honda_bosch_long(bool c); + int get_honda_hw(void); + """) + +class PandaSafety(Protocol): + def set_controls_allowed(self, c: bool) -> None: ... + def get_controls_allowed(self) -> bool: ... + def get_longitudinal_allowed(self) -> bool: ... + def set_alternative_experience(self, mode: int) -> None: ... + def get_alternative_experience(self) -> int: ... + def set_relay_malfunction(self, c: bool) -> None: ... + def get_relay_malfunction(self) -> bool: ... + def get_gas_interceptor_prev(self) -> int: ... + def get_gas_pressed_prev(self) -> bool: ... + def get_brake_pressed_prev(self) -> bool: ... + def get_regen_braking_prev(self) -> bool: ... + def get_acc_main_on(self) -> bool: ... + def get_vehicle_speed_min(self) -> int: ... + def get_vehicle_speed_max(self) -> int: ... + def get_vehicle_speed_last(self) -> int: ... + def get_current_safety_mode(self) -> int: ... + def get_current_safety_param(self) -> int: ... + + def set_torque_meas(self, min: int, max: int) -> None: ... # noqa: A002 + def get_torque_meas_min(self) -> int: ... + def get_torque_meas_max(self) -> int: ... + def set_torque_driver(self, min: int, max: int) -> None: ... # noqa: A002 + def get_torque_driver_min(self) -> int: ... + def get_torque_driver_max(self) -> int: ... + def set_desired_torque_last(self, t: int) -> None: ... + def set_rt_torque_last(self, t: int) -> None: ... + def set_desired_angle_last(self, t: int) -> None: ... + def get_desired_angle_last(self) -> int: ... + def set_angle_meas(self, min: int, max: int) -> None: ... # noqa: A002 + def get_angle_meas_min(self) -> int: ... + def get_angle_meas_max(self) -> int: ... + + def get_cruise_engaged_prev(self) -> bool: ... + def set_cruise_engaged_prev(self, enabled: bool) -> None: ... + def get_vehicle_moving(self) -> bool: ... + def get_hw_type(self) -> int: ... + def set_timer(self, t: int) -> None: ... + + def safety_tick_current_safety_config(self) -> None: ... + def safety_config_valid(self) -> bool: ... + + def init_tests(self) -> None: ... + + def set_honda_fwd_brake(self, c: bool) -> None: ... + def get_honda_fwd_brake(self) -> bool: ... + def set_honda_alt_brake_msg(self, c: bool) -> None: ... + def set_honda_bosch_long(self, c: bool) -> None: ... + def get_honda_hw(self) -> int: ... + + diff --git a/panda/tests/libs/resetter.py b/panda/tests/libs/resetter.py new file mode 100644 index 0000000..3868bde --- /dev/null +++ b/panda/tests/libs/resetter.py @@ -0,0 +1,57 @@ +import time +import usb1 + + +class Resetter(): + def __init__(self): + self._handle = None + self.connect() + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + self._handle.close() + self._context.close() + self._handle = None + + def connect(self): + if self._handle: + self.close() + + self._handle = None + + self._context = usb1.USBContext() + self._context.open() + for device in self._context.getDeviceList(skip_on_error=True): + if device.getVendorID() == 0xbbaa and device.getProductID() == 0xddc0: + try: + self._handle = device.open() + self._handle.claimInterface(0) + break + except Exception as e: + print(e) + assert self._handle + + def enable_power(self, port, enabled): + self._handle.controlWrite((usb1.ENDPOINT_OUT | usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE), 0xff, port, enabled, b'') + + def enable_boot(self, enabled): + self._handle.controlWrite((usb1.ENDPOINT_OUT | usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE), 0xff, 0, enabled, b'') + + def cycle_power(self, delay=5, dfu=False, ports=None): + if ports is None: + ports = [1, 2, 3] + + self.enable_boot(dfu) + for port in ports: + self.enable_power(port, False) + time.sleep(0.5) + + for port in ports: + self.enable_power(port, True) + time.sleep(delay) + self.enable_boot(False) diff --git a/panda/tests/loopback_test.py b/panda/tests/loopback_test.py new file mode 100644 index 0000000..d4f8beb --- /dev/null +++ b/panda/tests/loopback_test.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 + +import os +import time +import random +import argparse +from itertools import permutations + +from panda import Panda + +def get_test_string(): + return b"test" + os.urandom(10) + +def run_test(sleep_duration): + pandas = Panda.list() + print(pandas) + + if len(pandas) < 2: + raise Exception("Minimum two pandas are needed for test") + + run_test_w_pandas(pandas, sleep_duration) + +def run_test_w_pandas(pandas, sleep_duration): + h = [Panda(x) for x in pandas] + print("H", h) + + for hh in h: + hh.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + # test both directions + for ho in permutations(list(range(len(h))), r=2): + print("***************** TESTING", ho) + + panda0, panda1 = h[ho[0]], h[ho[1]] + + # **** test health packet **** + print("health", ho[0], h[ho[0]].health()) + + # **** test can line loopback **** + for bus, gmlan in [(0, False), (1, False), (2, False), (1, True), (2, True)]: + print("\ntest can", bus) + # flush + cans_echo = panda0.can_recv() + cans_loop = panda1.can_recv() + + panda0.set_gmlan(None) + panda1.set_gmlan(None) + + if gmlan is True: + panda0.set_gmlan(bus) + panda1.set_gmlan(bus) + bus = 3 + + # send the characters + at = random.randint(1, 2000) + st = get_test_string()[0:8] + panda0.can_send(at, st, bus) + time.sleep(0.1) + + # check for receive + cans_echo = panda0.can_recv() + cans_loop = panda1.can_recv() + + print("Bus", bus, "echo", cans_echo, "loop", cans_loop) + + assert len(cans_echo) == 1 + assert len(cans_loop) == 1 + + assert cans_echo[0][0] == at + assert cans_loop[0][0] == at + + assert cans_echo[0][2] == st + assert cans_loop[0][2] == st + + assert cans_echo[0][3] == 0x80 | bus + if cans_loop[0][3] != bus: + print("EXPECTED %d GOT %d" % (bus, cans_loop[0][3])) + assert cans_loop[0][3] == bus + + print("CAN pass", bus, ho) + time.sleep(sleep_duration) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("-n", type=int, help="Number of test iterations to run") + parser.add_argument("-sleep", type=int, help="Sleep time between tests", default=0) + args = parser.parse_args() + + if args.n is None: + while True: + run_test(sleep_duration=args.sleep) + else: + for _ in range(args.n): + run_test(sleep_duration=args.sleep) diff --git a/panda/tests/message_drop_test.py b/panda/tests/message_drop_test.py new file mode 100644 index 0000000..bf485e4 --- /dev/null +++ b/panda/tests/message_drop_test.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +import os +import usb1 +import time +import struct +import itertools +import threading +from typing import Any + +from panda import Panda + +JUNGLE = "JUNGLE" in os.environ +if JUNGLE: + from panda import PandaJungle + +# Generate unique messages +NUM_MESSAGES_PER_BUS = 10000 +messages = [bytes(struct.pack("Q", i)) for i in range(NUM_MESSAGES_PER_BUS)] +tx_messages = list(itertools.chain.from_iterable([[0xaa, None, msg, 0], [0xaa, None, msg, 1], [0xaa, None, msg, 2]] for msg in messages)) + +def flood_tx(panda): + print('Sending!') + transferred = 0 + while True: + try: + print(f"Sending block {transferred}-{len(tx_messages)}: ", end="") + panda.can_send_many(tx_messages[transferred:], timeout=10) + print("OK") + break + except usb1.USBErrorTimeout as e: + transferred += (e.transferred // 16) + print("timeout, transferred: ", transferred) + + print(f"Done sending {3*NUM_MESSAGES_PER_BUS} messages!") + +if __name__ == "__main__": + serials = Panda.list() + receiver: Panda | PandaJungle + if JUNGLE: + sender = Panda() + receiver = PandaJungle() + else: + if len(serials) != 2: + raise Exception("Connect two pandas to perform this test!") + sender = Panda(serials[0]) + receiver = Panda(serials[1]) + receiver.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + sender.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + # Start transmisson + threading.Thread(target=flood_tx, args=(sender,)).start() + + # Receive as much as we can, and stop when there hasn't been anything for a second + rx: list[Any] = [] + old_len = 0 + last_change = time.monotonic() + while time.monotonic() - last_change < 1: + if old_len < len(rx): + last_change = time.monotonic() + old_len = len(rx) + + rx.extend(receiver.can_recv()) + print(f"Received {len(rx)} messages") + + # Check if we received everything + for bus in range(3): + received_msgs = {bytes(m[2]) for m in filter(lambda m, b=bus: m[3] == b, rx)} # type: ignore + dropped_msgs = set(messages).difference(received_msgs) + print(f"Bus {bus} dropped msgs: {len(list(dropped_msgs))} / {len(messages)}") diff --git a/panda/tests/misra/.gitignore b/panda/tests/misra/.gitignore new file mode 100644 index 0000000..fc9ac22 --- /dev/null +++ b/panda/tests/misra/.gitignore @@ -0,0 +1,5 @@ +*.pdf +*.txt +.output.log +new_table +cppcheck/ diff --git a/panda/tests/misra/coverage_table b/panda/tests/misra/coverage_table new file mode 100644 index 0000000..0395aba --- /dev/null +++ b/panda/tests/misra/coverage_table @@ -0,0 +1,156 @@ +1.1 +1.2 X (Addon) +1.3 X (Cppcheck) +2.1 X (Cppcheck) +2.2 X (Addon) +2.3 X (Addon) +2.4 X (Addon) +2.5 X (Addon) +2.6 X (Cppcheck) +2.7 X (Addon) +3.1 X (Addon) +3.2 X (Addon) +4.1 X (Addon) +4.2 X (Addon) +5.1 X (Addon) +5.2 X (Addon) +5.3 X (Cppcheck) +5.4 X (Addon) +5.5 X (Addon) +5.6 X (Addon) +5.7 X (Addon) +5.8 X (Addon) +5.9 X (Addon) +6.1 X (Addon) +6.2 X (Addon) +7.1 X (Addon) +7.2 X (Addon) +7.3 X (Addon) +7.4 X (Addon) +8.1 X (Addon) +8.2 X (Addon) +8.3 X (Cppcheck) +8.4 X (Addon) +8.5 X (Addon) +8.6 X (Addon) +8.7 X (Addon) +8.8 X (Addon) +8.9 X (Addon) +8.10 X (Addon) +8.11 X (Addon) +8.12 X (Addon) +8.13 X (Cppcheck) +8.14 X (Addon) +9.1 X (Cppcheck) +9.2 X (Addon) +9.3 X (Addon) +9.4 X (Addon) +9.5 X (Addon) +10.1 X (Addon) +10.2 X (Addon) +10.3 X (Addon) +10.4 X (Addon) +10.5 X (Addon) +10.6 X (Addon) +10.7 X (Addon) +10.8 X (Addon) +11.1 X (Addon) +11.2 X (Addon) +11.3 X (Addon) +11.4 X (Addon) +11.5 X (Addon) +11.6 X (Addon) +11.7 X (Addon) +11.8 X (Addon) +11.9 X (Addon) +12.1 X (Addon) +12.2 X (Addon) +12.3 X (Addon) +12.4 X (Addon) +13.1 X (Addon) +13.2 X (Cppcheck) +13.3 X (Addon) +13.4 X (Addon) +13.5 X (Addon) +13.6 X (Addon) +14.1 X (Addon) +14.2 X (Addon) +14.3 X (Cppcheck) +14.4 X (Addon) +15.1 X (Addon) +15.2 X (Addon) +15.3 X (Addon) +15.4 X (Addon) +15.5 X (Addon) +15.6 X (Addon) +15.7 X (Addon) +16.1 X (Addon) +16.2 X (Addon) +16.3 X (Addon) +16.4 X (Addon) +16.5 X (Addon) +16.6 X (Addon) +16.7 X (Addon) +17.1 X (Addon) +17.2 X (Addon) +17.3 X (Addon) +17.4 X (Cppcheck) +17.5 X (Cppcheck) +17.6 X (Addon) +17.7 X (Addon) +17.8 X (Addon) +18.1 X (Cppcheck) +18.2 X (Cppcheck) +18.3 X (Cppcheck) +18.4 X (Addon) +18.5 X (Addon) +18.6 X (Cppcheck) +18.7 X (Addon) +18.8 X (Addon) +19.1 X (Cppcheck) +19.2 X (Addon) +20.1 X (Addon) +20.2 X (Addon) +20.3 X (Addon) +20.4 X (Addon) +20.5 X (Addon) +20.6 X (Cppcheck) +20.7 X (Addon) +20.8 X (Addon) +20.9 X (Addon) +20.10 X (Addon) +20.11 X (Addon) +20.12 X (Addon) +20.13 X (Addon) +20.14 X (Addon) +21.1 X (Addon) +21.2 X (Addon) +21.3 X (Addon) +21.4 X (Addon) +21.5 X (Addon) +21.6 X (Addon) +21.7 X (Addon) +21.8 X (Addon) +21.9 X (Addon) +21.10 X (Addon) +21.11 X (Addon) +21.12 X (Addon) +21.13 X (Cppcheck) +21.14 X (Addon) +21.15 X (Addon) +21.16 X (Addon) +21.17 X (Cppcheck) +21.18 X (Cppcheck) +21.19 X (Addon) +21.20 X (Addon) +21.21 X (Addon) +22.1 X (Cppcheck) +22.2 X (Cppcheck) +22.3 X (Cppcheck) +22.4 X (Cppcheck) +22.5 X (Addon) +22.6 X (Cppcheck) +22.7 X (Addon) +22.8 X (Addon) +22.9 X (Addon) +22.10 X (Addon) diff --git a/panda/tests/misra/install.sh b/panda/tests/misra/install.sh new file mode 100644 index 0000000..ecc0b3f --- /dev/null +++ b/panda/tests/misra/install.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +: "${CPPCHECK_DIR:=$DIR/cppcheck/}" + +if [ ! -d "$CPPCHECK_DIR" ]; then + git clone https://github.com/danmar/cppcheck.git $CPPCHECK_DIR +fi + +cd $CPPCHECK_DIR + +VERS="2.13.0" +git fetch --all --tags +git checkout $VERS +git cherry-pick -n f6b538e855f0bacea33c4074664628024ef39dc6 b11b42087ff29569bc3740f5aa07eb6616ea4f63 + +#make clean +make MATCHCOMPILTER=yes CXXFLAGS="-O2" -j8 diff --git a/panda/tests/misra/test_misra.sh b/panda/tests/misra/test_misra.sh new file mode 100644 index 0000000..9fdbea1 --- /dev/null +++ b/panda/tests/misra/test_misra.sh @@ -0,0 +1,61 @@ +#!/bin/bash +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +PANDA_DIR=$(realpath $DIR/../../) + +GREEN="\e[1;32m" +NC='\033[0m' + +: "${CPPCHECK_DIR:=$DIR/cppcheck/}" + +# install cppcheck if missing +if [ -z "${SKIP_BUILD}" ]; then + $DIR/install.sh +fi + +# ensure checked in coverage table is up to date +cd $DIR +python $CPPCHECK_DIR/addons/misra.py -generate-table > new_table +if ! cmp -s new_table coverage_table; then + echo "MISRA coverage table doesn't match. Update and commit:" + echo "mv new_table coverage_table && git add . && git commit -m 'update table'" + exit 1 +fi + +cd $PANDA_DIR +if [ -z "${SKIP_BUILD}" ]; then + scons -j8 +fi + +cppcheck() { + # note that cppcheck build cache results in inconsistent results as of v2.13.0 + OUTPUT=$DIR/.output.log + $CPPCHECK_DIR/cppcheck --force --inline-suppr -I $PANDA_DIR/board/ \ + -I $gcc_inc "$(arm-none-eabi-gcc -print-file-name=include)" \ + --suppressions-list=$DIR/suppressions.txt --suppress=*:*inc/* \ + --suppress=*:*include/* --error-exitcode=2 --check-level=exhaustive \ + --platform=arm32-wchar_t2 \ + "$@" |& tee $OUTPUT + + # cppcheck bug: some MISRA errors won't result in the error exit code, + # so check the output (https://trac.cppcheck.net/ticket/12440#no1) + if grep -e "misra violation" -e "error" -e "style: " $OUTPUT > /dev/null; then + exit 1 + fi +} + +PANDA_OPTS="--enable=all --disable=unusedFunction -DPANDA --addon=misra" + +printf "\n${GREEN}** PANDA F4 CODE **${NC}\n" +cppcheck $PANDA_OPTS -DSTM32F4 -DUID_BASE $PANDA_DIR/board/main.c + +printf "\n${GREEN}** PANDA H7 CODE **${NC}\n" +cppcheck $PANDA_OPTS -DSTM32H7 -DUID_BASE $PANDA_DIR/board/main.c + +# unused needs to run globally +#printf "\n${GREEN}** UNUSED ALL CODE **${NC}\n" +#cppcheck --enable=unusedFunction --quiet $PANDA_DIR/board/ + +printf "\n${GREEN}Success!${NC} took $SECONDS seconds\n" + diff --git a/panda/tests/misra/test_mutation.py b/panda/tests/misra/test_mutation.py new file mode 100644 index 0000000..cc3666f --- /dev/null +++ b/panda/tests/misra/test_mutation.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +import os +import glob +import pytest +import shutil +import subprocess +import tempfile +import random + +HERE = os.path.abspath(os.path.dirname(__file__)) +ROOT = os.path.join(HERE, "../../") + +IGNORED_PATHS = ( + 'board/obj', + 'board/jungle', + 'board/stm32h7/inc', + 'board/stm32fx/inc', + 'board/fake_stm.h', + + # bootstub only files + 'board/flasher.h', + 'board/bootstub.c', + 'board/bootstub_declarations.h', + 'board/stm32fx/llflash.h' +) + +mutations = [ + # default + (None, None, False), + # F4 only + ("board/stm32fx/llbxcan.h", "s/1U/1/g", True), + # H7 only + ("board/stm32h7/llfdcan.h", "s/return ret;/if (true) { return ret; } else { return false; }/g", True), + # general safety + ("board/safety/safety_toyota.h", "s/is_lkas_msg =.*;/is_lkas_msg = addr == 1 || addr == 2;/g", True), +] + +patterns = [ + # misra-c2012-13.3 + "$a void test(int tmp) { int tmp2 = tmp++ + 2; if (tmp2) {;}}", + # misra-c2012-13.4 + "$a int test(int x, int y) { return (x=2) && (y=2); }", + # misra-c2012-13.5 + "$a void test(int tmp) { if (true && tmp++) {;} }", + # misra-c2012-13.6 + "$a void test(int tmp) { if (sizeof(tmp++)) {;} }", + # misra-c2012-14.1 + "$a void test(float len) { for (float j = 0; j < len; j++) {;} }", + # misra-c2012-14.4 + "$a void test(int len) { if (len - 8) {;} }", + # misra-c2012-16.4 + r"$a void test(int temp) {switch (temp) { case 1: ; }}\n", + # misra-c2012-17.8 + "$a void test(int cnt) { for (cnt=0;;cnt++) {;} }", + # misra-c2012-20.4 + r"$a #define auto 1\n", + # misra-c2012-20.5 + r"$a #define TEST 1\n#undef TEST\n", +] + +all_files = glob.glob('board/**', root_dir=ROOT, recursive=True) +files = [f for f in all_files if f.endswith(('.c', '.h')) and not f.startswith(IGNORED_PATHS)] +assert len(files) > 70, all(d in files for d in ('board/main.c', 'board/stm32fx/llbxcan.h', 'board/stm32h7/llfdcan.h', 'board/safety/safety_toyota.h')) + +for p in patterns: + mutations.append((random.choice(files), p, True)) + +@pytest.mark.parametrize("fn, patch, should_fail", mutations) +def test_misra_mutation(fn, patch, should_fail): + with tempfile.TemporaryDirectory() as tmp: + shutil.copytree(ROOT, tmp, dirs_exist_ok=True) + + # apply patch + if fn is not None: + r = os.system(f"cd {tmp} && sed -i '{patch}' {fn}") + assert r == 0 + + # run test + r = subprocess.run("tests/misra/test_misra.sh", cwd=tmp, shell=True) + failed = r.returncode != 0 + assert failed == should_fail + +if __name__ == "__main__": + pytest.main([__file__, "-n 8"]) diff --git a/panda/tests/read_flash_spi.py b/panda/tests/read_flash_spi.py new file mode 100644 index 0000000..133062b --- /dev/null +++ b/panda/tests/read_flash_spi.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +from panda import Panda, PandaDFU + +if __name__ == "__main__": + try: + from openpilot.system.hardware import HARDWARE + HARDWARE.recover_internal_panda() + Panda.wait_for_dfu(None, 5) + except Exception: + pass + + p = PandaDFU(None) + cfg = p.get_mcu_type().config + + def readmem(addr, length, fn): + print(f"reading {hex(addr)} {hex(length)} bytes to {fn}") + max_size = 255 + with open(fn, "wb") as f: + to_read = length + while to_read > 0: + l = min(to_read, max_size) + dat = p._handle.read(addr, l) + assert len(dat) == l + f.write(dat) + + to_read -= len(dat) + addr += len(dat) + + addr = cfg.bootstub_address + for i, sector_size in enumerate(cfg.sector_sizes): + readmem(addr, sector_size, f"sector_{i}.bin") + addr += sector_size diff --git a/panda/tests/read_st_flash.sh b/panda/tests/read_st_flash.sh new file mode 100644 index 0000000..ffcfd7b --- /dev/null +++ b/panda/tests/read_st_flash.sh @@ -0,0 +1,6 @@ +#!/bin/bash +rm -f /tmp/dump_bootstub +rm -f /tmp/dump_main +dfu-util -a 0 -s 0x08000000 -U /tmp/dump_bootstub +dfu-util -a 0 -s 0x08004000 -U /tmp/dump_main + diff --git a/panda/tests/read_winusb_descriptors.py b/panda/tests/read_winusb_descriptors.py new file mode 100644 index 0000000..5d311c9 --- /dev/null +++ b/panda/tests/read_winusb_descriptors.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# type: ignore +from panda import Panda +from hexdump import hexdump + +DEBUG = False + +if __name__ == "__main__": + p = Panda() + + length = p._handle.controlRead(Panda.REQUEST_IN, 0x06, 3 << 8 | 238, 0, 1) + print('Microsoft OS String Descriptor') + dat = p._handle.controlRead(Panda.REQUEST_IN, 0x06, 3 << 8 | 238, 0, length[0]) + if DEBUG: + print(f'LEN: {hex(length[0])}') + hexdump("".join(map(chr, dat))) + + ms_vendor_code = dat[16] + if DEBUG: + print(f'MS_VENDOR_CODE: {hex(length[0])}') + + print('\nMicrosoft Compatible ID Feature Descriptor') + length = p._handle.controlRead(Panda.REQUEST_IN, ms_vendor_code, 0, 4, 1) + if DEBUG: + print(f'LEN: {hex(length[0])}') + dat = p._handle.controlRead(Panda.REQUEST_IN, ms_vendor_code, 0, 4, length[0]) + hexdump("".join(map(chr, dat))) + + print('\nMicrosoft Extended Properties Feature Descriptor') + length = p._handle.controlRead(Panda.REQUEST_IN, ms_vendor_code, 0, 5, 1) + if DEBUG: + print(f'LEN: {hex(length[0])}') + dat = p._handle.controlRead(Panda.REQUEST_IN, ms_vendor_code, 0, 5, length[0]) + hexdump("".join(map(chr, dat))) diff --git a/panda/tests/reflash_internal_panda.py b/panda/tests/reflash_internal_panda.py new file mode 100644 index 0000000..c2ad9f8 --- /dev/null +++ b/panda/tests/reflash_internal_panda.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +import time +from panda import Panda, PandaDFU + +class GPIO: + STM_RST_N = 124 + STM_BOOT0 = 134 + HUB_RST_N = 30 + + +def gpio_init(pin, output): + with open(f"/sys/class/gpio/gpio{pin}/direction", 'wb') as f: + f.write(b"out" if output else b"in") + +def gpio_set(pin, high): + with open(f"/sys/class/gpio/gpio{pin}/value", 'wb') as f: + f.write(b"1" if high else b"0") + + +if __name__ == "__main__": + for pin in (GPIO.STM_RST_N, GPIO.STM_BOOT0, GPIO.HUB_RST_N): + gpio_init(pin, True) + + # reset USB hub + gpio_set(GPIO.HUB_RST_N, 0) + time.sleep(0.5) + gpio_set(GPIO.HUB_RST_N, 1) + + # flash bootstub + print("resetting into DFU") + gpio_set(GPIO.STM_RST_N, 1) + gpio_set(GPIO.STM_BOOT0, 1) + time.sleep(1) + gpio_set(GPIO.STM_RST_N, 0) + gpio_set(GPIO.STM_BOOT0, 0) + time.sleep(1) + + print("flashing bootstub") + PandaDFU(None).recover() + + gpio_set(GPIO.STM_RST_N, 1) + time.sleep(0.5) + gpio_set(GPIO.STM_RST_N, 0) + time.sleep(1) + + print("flashing app") + p = Panda() + assert p.bootstub + p.flash() diff --git a/panda/tests/relay_test.py b/panda/tests/relay_test.py new file mode 100644 index 0000000..68789b1 --- /dev/null +++ b/panda/tests/relay_test.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +import time +from panda import Panda + +p = Panda() + +while True: + p.set_safety_mode(Panda.SAFETY_TOYOTA) + p.send_heartbeat() + print("ON") + time.sleep(1) + p.set_safety_mode(Panda.SAFETY_NOOUTPUT) + p.send_heartbeat() + print("OFF") + time.sleep(1) + diff --git a/panda/tests/restore_flash_spi.py b/panda/tests/restore_flash_spi.py new file mode 100644 index 0000000..c23b298 --- /dev/null +++ b/panda/tests/restore_flash_spi.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +from panda import Panda, PandaDFU, STBootloaderSPIHandle + +if __name__ == "__main__": + try: + from openpilot.system.hardware import HARDWARE + HARDWARE.recover_internal_panda() + Panda.wait_for_dfu(None, 5) + except Exception: + pass + + p = PandaDFU(None) + assert isinstance(p._handle, STBootloaderSPIHandle) + cfg = p.get_mcu_type().config + + print("restoring from backup...") + addr = cfg.bootstub_address + for i, sector_size in enumerate(cfg.sector_sizes): + print(f"- sector #{i}") + p._handle.erase_sector(i) + with open(f"sector_{i}.bin", "rb") as f: + dat = f.read() + assert len(dat) == sector_size + p._handle.program(addr, dat) + addr += len(dat) + + p.reset() diff --git a/panda/tests/rtc_test.py b/panda/tests/rtc_test.py new file mode 100644 index 0000000..01c9f4d --- /dev/null +++ b/panda/tests/rtc_test.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +import datetime + +from panda import Panda + +if __name__ == "__main__": + p = Panda() + + p.set_datetime(datetime.datetime.now()) + print(p.get_datetime()) diff --git a/panda/tests/safety/__init__.py b/panda/tests/safety/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/panda/tests/safety/common.py b/panda/tests/safety/common.py new file mode 100644 index 0000000..a3c22df --- /dev/null +++ b/panda/tests/safety/common.py @@ -0,0 +1,1071 @@ +import os +import abc +import unittest +import importlib +import numpy as np +from collections.abc import Callable + +from opendbc.can.packer import CANPacker # pylint: disable=import-error +from panda import ALTERNATIVE_EXPERIENCE +from panda.tests.libpanda import libpanda_py + +MAX_WRONG_COUNTERS = 5 +MAX_SAMPLE_VALS = 6 +VEHICLE_SPEED_FACTOR = 100 + +MessageFunction = Callable[[float], libpanda_py.CANPacket] + +def sign_of(a): + return 1 if a > 0 else -1 + + +def make_msg(bus, addr, length=8, dat=None): + if dat is None: + dat = b'\x00' * length + return libpanda_py.make_CANPacket(addr, bus, dat) + + +class CANPackerPanda(CANPacker): + def make_can_msg_panda(self, name_or_addr, bus, values, fix_checksum=None): + msg = self.make_can_msg(name_or_addr, bus, values) + if fix_checksum is not None: + msg = fix_checksum(msg) + addr, _, dat, bus = msg + return libpanda_py.make_CANPacket(addr, bus, dat) + + +def add_regen_tests(cls): + """Dynamically adds regen tests for all user brake tests.""" + + # only rx/user brake tests, not brake command + found_tests = [func for func in dir(cls) if func.startswith("test_") and "user_brake" in func] + assert len(found_tests) >= 3, "Failed to detect known brake tests" + + for test in found_tests: + def _make_regen_test(brake_func): + def _regen_test(self): + # only for safety modes with a regen message + if self._user_regen_msg(0) is None: + raise unittest.SkipTest("Safety mode implements no _user_regen_msg") + + getattr(self, brake_func)(self._user_regen_msg, self.safety.get_regen_braking_prev) + return _regen_test + + setattr(cls, test.replace("brake", "regen"), _make_regen_test(test)) + + return cls + + +class PandaSafetyTestBase(unittest.TestCase): + safety: libpanda_py.Panda + + @classmethod + def setUpClass(cls): + if cls.__name__ == "PandaSafetyTestBase": + cls.safety = None + raise unittest.SkipTest + + def _reset_safety_hooks(self): + self.safety.set_safety_hooks(self.safety.get_current_safety_mode(), + self.safety.get_current_safety_param()) + + def _rx(self, msg): + return self.safety.safety_rx_hook(msg) + + def _tx(self, msg): + return self.safety.safety_tx_hook(msg) + + def _generic_limit_safety_check(self, msg_function: MessageFunction, min_allowed_value: float, max_allowed_value: float, + min_possible_value: float, max_possible_value: float, test_delta: float = 1, inactive_value: float = 0, + msg_allowed = True, additional_setup: Callable[[float], None] | None = None): + """ + Enforces that a signal within a message is only allowed to be sent within a specific range, min_allowed_value -> max_allowed_value. + Tests the range of min_possible_value -> max_possible_value with a delta of test_delta. + Message is also only allowed to be sent when controls_allowed is true, unless the value is equal to inactive_value. + Message is never allowed if msg_allowed is false, for example when stock longitudinal is enabled and you are sending acceleration requests. + additional_setup is used for extra setup before each _tx, ex: for setting the previous torque for rate limits + """ + + # Ensure that we at least test the allowed_value range + self.assertGreater(max_possible_value, max_allowed_value) + self.assertLessEqual(min_possible_value, min_allowed_value) + + for controls_allowed in [False, True]: + # enforce we don't skip over 0 or inactive + for v in np.concatenate((np.arange(min_possible_value, max_possible_value, test_delta), np.array([0, inactive_value]))): + v = round(v, 2) # floats might not hit exact boundary conditions without rounding + self.safety.set_controls_allowed(controls_allowed) + if additional_setup is not None: + additional_setup(v) + should_tx = controls_allowed and min_allowed_value <= v <= max_allowed_value + should_tx = (should_tx or v == inactive_value) and msg_allowed + self.assertEqual(self._tx(msg_function(v)), should_tx, (controls_allowed, should_tx, v)) + + def _common_measurement_test(self, msg_func: Callable, min_value: float, max_value: float, factor: float, + meas_min_func: Callable[[], int], meas_max_func: Callable[[], int]): + """Tests accurate measurement parsing, and that the struct is reset on safety mode init""" + for val in np.arange(min_value, max_value, 0.5): + for i in range(MAX_SAMPLE_VALS): + self.assertTrue(self._rx(msg_func(val + i * 0.1))) + + # assert close by one decimal place + self.assertAlmostEqual(meas_min_func() / factor, val, delta=0.1) + self.assertAlmostEqual(meas_max_func() / factor - 0.5, val, delta=0.1) + + # ensure sample_t is reset on safety init + self._reset_safety_hooks() + self.assertEqual(meas_min_func(), 0) + self.assertEqual(meas_max_func(), 0) + + +class GasInterceptorSafetyTest(PandaSafetyTestBase): + + INTERCEPTOR_THRESHOLD = 0 + + cnt_gas_cmd = 0 + cnt_user_gas = 0 + + packer: CANPackerPanda + + @classmethod + def setUpClass(cls): + if cls.__name__ == "GasInterceptorSafetyTest" or cls.__name__.endswith("Base"): + cls.safety = None + raise unittest.SkipTest + + def _interceptor_gas_cmd(self, gas: int): + values: dict[str, float | int] = {"COUNTER_PEDAL": self.__class__.cnt_gas_cmd & 0xF} + if gas > 0: + values["GAS_COMMAND"] = gas * 255. + values["GAS_COMMAND2"] = gas * 255. + self.__class__.cnt_gas_cmd += 1 + return self.packer.make_can_msg_panda("GAS_COMMAND", 0, values) + + def _interceptor_user_gas(self, gas: int): + values = {"INTERCEPTOR_GAS": gas, "INTERCEPTOR_GAS2": gas, + "COUNTER_PEDAL": self.__class__.cnt_user_gas} + self.__class__.cnt_user_gas += 1 + return self.packer.make_can_msg_panda("GAS_SENSOR", 0, values) + + # Skip non-interceptor user gas tests + def test_prev_gas(self): + pass + + def test_disengage_on_gas(self): + pass + + def test_alternative_experience_no_disengage_on_gas(self): + pass + + def test_prev_gas_interceptor(self): + self._rx(self._interceptor_user_gas(0x0)) + self.assertFalse(self.safety.get_gas_interceptor_prev()) + self._rx(self._interceptor_user_gas(0x1000)) + self.assertTrue(self.safety.get_gas_interceptor_prev()) + self._rx(self._interceptor_user_gas(0x0)) + + def test_disengage_on_gas_interceptor(self): + for g in range(0x1000): + self._rx(self._interceptor_user_gas(0)) + self.safety.set_controls_allowed(True) + self._rx(self._interceptor_user_gas(g)) + remain_enabled = g <= self.INTERCEPTOR_THRESHOLD + self.assertEqual(remain_enabled, self.safety.get_controls_allowed()) + self._rx(self._interceptor_user_gas(0)) + + def test_alternative_experience_no_disengage_on_gas_interceptor(self): + self.safety.set_controls_allowed(True) + self.safety.set_alternative_experience(ALTERNATIVE_EXPERIENCE.DISABLE_DISENGAGE_ON_GAS) + for g in range(0x1000): + self._rx(self._interceptor_user_gas(g)) + # Test we allow lateral, but not longitudinal + self.assertTrue(self.safety.get_controls_allowed()) + self.assertEqual(g <= self.INTERCEPTOR_THRESHOLD, self.safety.get_longitudinal_allowed()) + # Make sure we can re-gain longitudinal actuation + self._rx(self._interceptor_user_gas(0)) + self.assertTrue(self.safety.get_longitudinal_allowed()) + + def test_allow_engage_with_gas_interceptor_pressed(self): + self._rx(self._interceptor_user_gas(0x1000)) + self.safety.set_controls_allowed(1) + self._rx(self._interceptor_user_gas(0x1000)) + self.assertTrue(self.safety.get_controls_allowed()) + self._rx(self._interceptor_user_gas(0)) + + def test_gas_interceptor_safety_check(self): + for gas in np.arange(0, 4000, 100): + for controls_allowed in [True, False]: + self.safety.set_controls_allowed(controls_allowed) + if controls_allowed: + send = True + else: + send = gas == 0 + self.assertEqual(send, self._tx(self._interceptor_gas_cmd(gas))) + + +class LongitudinalAccelSafetyTest(PandaSafetyTestBase, abc.ABC): + + MAX_ACCEL: float = 2.0 + MIN_ACCEL: float = -3.5 + INACTIVE_ACCEL: float = 0.0 + + @classmethod + def setUpClass(cls): + if cls.__name__ == "LongitudinalAccelSafetyTest": + cls.safety = None + raise unittest.SkipTest + + @abc.abstractmethod + def _accel_msg(self, accel: float): + pass + + def test_accel_limits_correct(self): + self.assertGreater(self.MAX_ACCEL, 0) + self.assertLess(self.MIN_ACCEL, 0) + + def test_accel_actuation_limits(self, stock_longitudinal=False): + limits = ((self.MIN_ACCEL, self.MAX_ACCEL, ALTERNATIVE_EXPERIENCE.DEFAULT), + (self.MIN_ACCEL, self.MAX_ACCEL, ALTERNATIVE_EXPERIENCE.RAISE_LONGITUDINAL_LIMITS_TO_ISO_MAX)) + + for min_accel, max_accel, alternative_experience in limits: + # enforce we don't skip over 0 or inactive accel + for accel in np.concatenate((np.arange(min_accel - 1, max_accel + 1, 0.05), [0, self.INACTIVE_ACCEL])): + accel = round(accel, 2) # floats might not hit exact boundary conditions without rounding + for controls_allowed in [True, False]: + self.safety.set_controls_allowed(controls_allowed) + self.safety.set_alternative_experience(alternative_experience) + if stock_longitudinal: + should_tx = False + else: + should_tx = controls_allowed and min_accel <= accel <= max_accel + should_tx = should_tx or accel == self.INACTIVE_ACCEL + self.assertEqual(should_tx, self._tx(self._accel_msg(accel))) + + +class LongitudinalGasBrakeSafetyTest(PandaSafetyTestBase, abc.ABC): + + MIN_BRAKE: int = 0 + MAX_BRAKE: int | None = None + MAX_POSSIBLE_BRAKE: int | None = None + + MIN_GAS: int = 0 + MAX_GAS: int | None = None + INACTIVE_GAS = 0 + MAX_POSSIBLE_GAS: int | None = None + + def test_gas_brake_limits_correct(self): + self.assertIsNotNone(self.MAX_POSSIBLE_BRAKE) + self.assertIsNotNone(self.MAX_POSSIBLE_GAS) + + self.assertGreater(self.MAX_BRAKE, self.MIN_BRAKE) + self.assertGreater(self.MAX_GAS, self.MIN_GAS) + + @abc.abstractmethod + def _send_gas_msg(self, gas: int): + pass + + @abc.abstractmethod + def _send_brake_msg(self, brake: int): + pass + + def test_brake_safety_check(self): + self._generic_limit_safety_check(self._send_brake_msg, self.MIN_BRAKE, self.MAX_BRAKE, 0, self.MAX_POSSIBLE_BRAKE, 1) + + def test_gas_safety_check(self): + self._generic_limit_safety_check(self._send_gas_msg, self.MIN_GAS, self.MAX_GAS, 0, self.MAX_POSSIBLE_GAS, 1, self.INACTIVE_GAS) + + +class TorqueSteeringSafetyTestBase(PandaSafetyTestBase, abc.ABC): + + MAX_RATE_UP = 0 + MAX_RATE_DOWN = 0 + MAX_TORQUE = 0 + MAX_RT_DELTA = 0 + RT_INTERVAL = 0 + + NO_STEER_REQ_BIT = False + + @classmethod + def setUpClass(cls): + if cls.__name__ == "TorqueSteeringSafetyTestBase": + cls.safety = None + raise unittest.SkipTest + + @abc.abstractmethod + def _torque_cmd_msg(self, torque, steer_req=1): + pass + + def _set_prev_torque(self, t): + self.safety.set_desired_torque_last(t) + self.safety.set_rt_torque_last(t) + + def test_steer_safety_check(self): + for enabled in [0, 1]: + for t in range(int(-self.MAX_TORQUE * 1.5), int(self.MAX_TORQUE * 1.5)): + self.safety.set_controls_allowed(enabled) + self._set_prev_torque(t) + if abs(t) > self.MAX_TORQUE or (not enabled and abs(t) > 0): + self.assertFalse(self._tx(self._torque_cmd_msg(t))) + else: + self.assertTrue(self._tx(self._torque_cmd_msg(t))) + + def test_non_realtime_limit_up(self): + self.safety.set_controls_allowed(True) + + self._set_prev_torque(0) + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_RATE_UP))) + self._set_prev_torque(0) + self.assertTrue(self._tx(self._torque_cmd_msg(-self.MAX_RATE_UP))) + + self._set_prev_torque(0) + self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_RATE_UP + 1))) + self.safety.set_controls_allowed(True) + self._set_prev_torque(0) + self.assertFalse(self._tx(self._torque_cmd_msg(-self.MAX_RATE_UP - 1))) + + def test_steer_req_bit(self): + """Asserts all torque safety modes check the steering request bit""" + if self.NO_STEER_REQ_BIT: + raise unittest.SkipTest("No steering request bit") + + self.safety.set_controls_allowed(True) + self._set_prev_torque(self.MAX_TORQUE) + + # Send torque successfully, then only drop the request bit and ensure it stays blocked + for _ in range(10): + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, 1))) + + self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, 0))) + for _ in range(10): + self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, 1))) + + +class SteerRequestCutSafetyTest(TorqueSteeringSafetyTestBase, abc.ABC): + + @classmethod + def setUpClass(cls): + if cls.__name__ == "SteerRequestCutSafetyTest": + cls.safety = None + raise unittest.SkipTest + + # Safety around steering request bit mismatch tolerance + MIN_VALID_STEERING_FRAMES: int + MAX_INVALID_STEERING_FRAMES: int + MIN_VALID_STEERING_RT_INTERVAL: int + + def test_steer_req_bit_frames(self): + """ + Certain safety modes implement some tolerance on their steer request bits matching the + requested torque to avoid a steering fault or lockout and maintain torque. This tests: + - We can't cut torque for more than one frame + - We can't cut torque until at least the minimum number of matching steer_req messages + - We can always recover from violations if steer_req=1 + """ + + for min_valid_steer_frames in range(self.MIN_VALID_STEERING_FRAMES * 2): + # Reset match count and rt timer to allow cut (valid_steer_req_count, ts_steer_req_mismatch_last) + self.safety.init_tests() + self.safety.set_timer(self.MIN_VALID_STEERING_RT_INTERVAL) + + # Allow torque cut + self.safety.set_controls_allowed(True) + self._set_prev_torque(self.MAX_TORQUE) + for _ in range(min_valid_steer_frames): + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=1))) + + # should tx if we've sent enough valid frames, and we're not cutting torque for too many frames consecutively + should_tx = min_valid_steer_frames >= self.MIN_VALID_STEERING_FRAMES + for idx in range(self.MAX_INVALID_STEERING_FRAMES * 2): + tx = self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=0)) + self.assertEqual(should_tx and idx < self.MAX_INVALID_STEERING_FRAMES, tx) + + # Keep blocking after one steer_req mismatch + for _ in range(100): + self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=0))) + + # Make sure we can recover + self.assertTrue(self._tx(self._torque_cmd_msg(0, steer_req=1))) + self._set_prev_torque(self.MAX_TORQUE) + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=1))) + + def test_steer_req_bit_multi_invalid(self): + """ + For safety modes allowing multiple consecutive invalid frames, this ensures that once a valid frame + is sent after an invalid frame (even without sending the max number of allowed invalid frames), + all counters are reset. + """ + for max_invalid_steer_frames in range(1, self.MAX_INVALID_STEERING_FRAMES * 2): + self.safety.init_tests() + self.safety.set_timer(self.MIN_VALID_STEERING_RT_INTERVAL) + + # Allow torque cut + self.safety.set_controls_allowed(True) + self._set_prev_torque(self.MAX_TORQUE) + for _ in range(self.MIN_VALID_STEERING_FRAMES): + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=1))) + + # Send partial amount of allowed invalid frames + for idx in range(max_invalid_steer_frames): + should_tx = idx < self.MAX_INVALID_STEERING_FRAMES + self.assertEqual(should_tx, self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=0))) + + # Send one valid frame, and subsequent invalid should now be blocked + self._set_prev_torque(self.MAX_TORQUE) + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=1))) + for _ in range(self.MIN_VALID_STEERING_FRAMES + 1): + self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=0))) + + def test_steer_req_bit_realtime(self): + """ + Realtime safety for cutting steer request bit. This tests: + - That we allow messages with mismatching steer request bit if time from last is >= MIN_VALID_STEERING_RT_INTERVAL + - That frame mismatch safety does not interfere with this test + """ + for rt_us in np.arange(self.MIN_VALID_STEERING_RT_INTERVAL - 50000, self.MIN_VALID_STEERING_RT_INTERVAL + 50000, 10000): + # Reset match count and rt timer (valid_steer_req_count, ts_steer_req_mismatch_last) + self.safety.init_tests() + + # Make sure valid_steer_req_count doesn't affect this test + self.safety.set_controls_allowed(True) + self._set_prev_torque(self.MAX_TORQUE) + for _ in range(self.MIN_VALID_STEERING_FRAMES): + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=1))) + + # Normally, sending MIN_VALID_STEERING_FRAMES valid frames should always allow + self.safety.set_timer(max(rt_us, 0)) + should_tx = rt_us >= self.MIN_VALID_STEERING_RT_INTERVAL + for _ in range(self.MAX_INVALID_STEERING_FRAMES): + self.assertEqual(should_tx, self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=0))) + + # Keep blocking after one steer_req mismatch + for _ in range(100): + self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=0))) + + # Make sure we can recover + self.assertTrue(self._tx(self._torque_cmd_msg(0, steer_req=1))) + self._set_prev_torque(self.MAX_TORQUE) + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE, steer_req=1))) + + +class DriverTorqueSteeringSafetyTest(TorqueSteeringSafetyTestBase, abc.ABC): + + DRIVER_TORQUE_ALLOWANCE = 0 + DRIVER_TORQUE_FACTOR = 0 + + @classmethod + def setUpClass(cls): + if cls.__name__ == "DriverTorqueSteeringSafetyTest": + cls.safety = None + raise unittest.SkipTest + + @abc.abstractmethod + def _torque_driver_msg(self, torque): + pass + + def _reset_torque_driver_measurement(self, torque): + for _ in range(MAX_SAMPLE_VALS): + self._rx(self._torque_driver_msg(torque)) + + def test_non_realtime_limit_up(self): + self._reset_torque_driver_measurement(0) + super().test_non_realtime_limit_up() + + def test_against_torque_driver(self): + # Tests down limits and driver torque blending + self.safety.set_controls_allowed(True) + + # Cannot stay at MAX_TORQUE if above DRIVER_TORQUE_ALLOWANCE + for sign in [-1, 1]: + for driver_torque in np.arange(0, self.DRIVER_TORQUE_ALLOWANCE * 2, 1): + self._reset_torque_driver_measurement(-driver_torque * sign) + self._set_prev_torque(self.MAX_TORQUE * sign) + should_tx = abs(driver_torque) <= self.DRIVER_TORQUE_ALLOWANCE + self.assertEqual(should_tx, self._tx(self._torque_cmd_msg(self.MAX_TORQUE * sign))) + + # arbitrary high driver torque to ensure max steer torque is allowed + max_driver_torque = int(self.MAX_TORQUE / self.DRIVER_TORQUE_FACTOR + self.DRIVER_TORQUE_ALLOWANCE + 1) + + # spot check some individual cases + for sign in [-1, 1]: + # Ensure we wind down factor units for every unit above allowance + driver_torque = (self.DRIVER_TORQUE_ALLOWANCE + 10) * sign + torque_desired = (self.MAX_TORQUE - 10 * self.DRIVER_TORQUE_FACTOR) * sign + delta = 1 * sign + self._set_prev_torque(torque_desired) + self._reset_torque_driver_measurement(-driver_torque) + self.assertTrue(self._tx(self._torque_cmd_msg(torque_desired))) + self._set_prev_torque(torque_desired + delta) + self._reset_torque_driver_measurement(-driver_torque) + self.assertFalse(self._tx(self._torque_cmd_msg(torque_desired + delta))) + + # If we're well past the allowance, minimum wind down is MAX_RATE_DOWN + self._set_prev_torque(self.MAX_TORQUE * sign) + self._reset_torque_driver_measurement(-max_driver_torque * sign) + self.assertTrue(self._tx(self._torque_cmd_msg((self.MAX_TORQUE - self.MAX_RATE_DOWN) * sign))) + self._set_prev_torque(self.MAX_TORQUE * sign) + self._reset_torque_driver_measurement(-max_driver_torque * sign) + self.assertTrue(self._tx(self._torque_cmd_msg(0))) + self._set_prev_torque(self.MAX_TORQUE * sign) + self._reset_torque_driver_measurement(-max_driver_torque * sign) + self.assertFalse(self._tx(self._torque_cmd_msg((self.MAX_TORQUE - self.MAX_RATE_DOWN + 1) * sign))) + + def test_realtime_limits(self): + self.safety.set_controls_allowed(True) + + for sign in [-1, 1]: + self.safety.init_tests() + self._set_prev_torque(0) + self._reset_torque_driver_measurement(0) + for t in np.arange(0, self.MAX_RT_DELTA, 1): + t *= sign + self.assertTrue(self._tx(self._torque_cmd_msg(t))) + self.assertFalse(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA + 1)))) + + self._set_prev_torque(0) + for t in np.arange(0, self.MAX_RT_DELTA, 1): + t *= sign + self.assertTrue(self._tx(self._torque_cmd_msg(t))) + + # Increase timer to update rt_torque_last + self.safety.set_timer(self.RT_INTERVAL + 1) + self.assertTrue(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA - 1)))) + self.assertTrue(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA + 1)))) + + def test_reset_driver_torque_measurements(self): + # Tests that the driver torque measurement sample_t is reset on safety mode init + for t in np.linspace(-self.MAX_TORQUE, self.MAX_TORQUE, MAX_SAMPLE_VALS): + self.assertTrue(self._rx(self._torque_driver_msg(t))) + + self.assertNotEqual(self.safety.get_torque_driver_min(), 0) + self.assertNotEqual(self.safety.get_torque_driver_max(), 0) + + self._reset_safety_hooks() + self.assertEqual(self.safety.get_torque_driver_min(), 0) + self.assertEqual(self.safety.get_torque_driver_max(), 0) + + +class MotorTorqueSteeringSafetyTest(TorqueSteeringSafetyTestBase, abc.ABC): + + MAX_TORQUE_ERROR = 0 + TORQUE_MEAS_TOLERANCE = 0 + + @classmethod + def setUpClass(cls): + if cls.__name__ == "MotorTorqueSteeringSafetyTest": + cls.safety = None + raise unittest.SkipTest + + @abc.abstractmethod + def _torque_meas_msg(self, torque): + pass + + def _set_prev_torque(self, t): + super()._set_prev_torque(t) + self.safety.set_torque_meas(t, t) + + def test_torque_absolute_limits(self): + for controls_allowed in [True, False]: + for torque in np.arange(-self.MAX_TORQUE - 1000, self.MAX_TORQUE + 1000, self.MAX_RATE_UP): + self.safety.set_controls_allowed(controls_allowed) + self.safety.set_rt_torque_last(torque) + self.safety.set_torque_meas(torque, torque) + self.safety.set_desired_torque_last(torque - self.MAX_RATE_UP) + + if controls_allowed: + send = (-self.MAX_TORQUE <= torque <= self.MAX_TORQUE) + else: + send = torque == 0 + + self.assertEqual(send, self._tx(self._torque_cmd_msg(torque))) + + def test_non_realtime_limit_down(self): + self.safety.set_controls_allowed(True) + + torque_meas = self.MAX_TORQUE - self.MAX_TORQUE_ERROR - 50 + + self.safety.set_rt_torque_last(self.MAX_TORQUE) + self.safety.set_torque_meas(torque_meas, torque_meas) + self.safety.set_desired_torque_last(self.MAX_TORQUE) + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE - self.MAX_RATE_DOWN))) + + self.safety.set_rt_torque_last(self.MAX_TORQUE) + self.safety.set_torque_meas(torque_meas, torque_meas) + self.safety.set_desired_torque_last(self.MAX_TORQUE) + self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_TORQUE - self.MAX_RATE_DOWN + 1))) + + def test_exceed_torque_sensor(self): + self.safety.set_controls_allowed(True) + + for sign in [-1, 1]: + self._set_prev_torque(0) + for t in np.arange(0, self.MAX_TORQUE_ERROR + 2, 2): # step needs to be smaller than MAX_TORQUE_ERROR + t *= sign + self.assertTrue(self._tx(self._torque_cmd_msg(t))) + + self.assertFalse(self._tx(self._torque_cmd_msg(sign * (self.MAX_TORQUE_ERROR + 2)))) + + def test_realtime_limit_up(self): + self.safety.set_controls_allowed(True) + + for sign in [-1, 1]: + self.safety.init_tests() + self._set_prev_torque(0) + for t in np.arange(0, self.MAX_RT_DELTA + 1, 1): + t *= sign + self.safety.set_torque_meas(t, t) + self.assertTrue(self._tx(self._torque_cmd_msg(t))) + self.assertFalse(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA + 1)))) + + self._set_prev_torque(0) + for t in np.arange(0, self.MAX_RT_DELTA + 1, 1): + t *= sign + self.safety.set_torque_meas(t, t) + self.assertTrue(self._tx(self._torque_cmd_msg(t))) + + # Increase timer to update rt_torque_last + self.safety.set_timer(self.RT_INTERVAL + 1) + self.assertTrue(self._tx(self._torque_cmd_msg(sign * self.MAX_RT_DELTA))) + self.assertTrue(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA + 1)))) + + def test_torque_measurements(self): + trq = 50 + for t in [trq, -trq, 0, 0, 0, 0]: + self._rx(self._torque_meas_msg(t)) + + max_range = range(trq, trq + self.TORQUE_MEAS_TOLERANCE + 1) + min_range = range(-(trq + self.TORQUE_MEAS_TOLERANCE), -trq + 1) + self.assertTrue(self.safety.get_torque_meas_min() in min_range) + self.assertTrue(self.safety.get_torque_meas_max() in max_range) + + max_range = range(self.TORQUE_MEAS_TOLERANCE + 1) + min_range = range(-(trq + self.TORQUE_MEAS_TOLERANCE), -trq + 1) + self._rx(self._torque_meas_msg(0)) + self.assertTrue(self.safety.get_torque_meas_min() in min_range) + self.assertTrue(self.safety.get_torque_meas_max() in max_range) + + max_range = range(self.TORQUE_MEAS_TOLERANCE + 1) + min_range = range(-self.TORQUE_MEAS_TOLERANCE, 0 + 1) + self._rx(self._torque_meas_msg(0)) + self.assertTrue(self.safety.get_torque_meas_min() in min_range) + self.assertTrue(self.safety.get_torque_meas_max() in max_range) + + def test_reset_torque_measurements(self): + # Tests that the torque measurement sample_t is reset on safety mode init + for t in np.linspace(-self.MAX_TORQUE, self.MAX_TORQUE, MAX_SAMPLE_VALS): + self.assertTrue(self._rx(self._torque_meas_msg(t))) + + self.assertNotEqual(self.safety.get_torque_meas_min(), 0) + self.assertNotEqual(self.safety.get_torque_meas_max(), 0) + + self._reset_safety_hooks() + self.assertEqual(self.safety.get_torque_meas_min(), 0) + self.assertEqual(self.safety.get_torque_meas_max(), 0) + + +class AngleSteeringSafetyTest(PandaSafetyTestBase): + + DEG_TO_CAN: float + ANGLE_RATE_BP: list[float] + ANGLE_RATE_UP: list[float] # windup limit + ANGLE_RATE_DOWN: list[float] # unwind limit + + @classmethod + def setUpClass(cls): + if cls.__name__ == "AngleSteeringSafetyTest": + cls.safety = None + raise unittest.SkipTest + + @abc.abstractmethod + def _speed_msg(self, speed): + pass + + @abc.abstractmethod + def _angle_cmd_msg(self, angle: float, enabled: bool): + pass + + @abc.abstractmethod + def _angle_meas_msg(self, angle: float): + pass + + def _set_prev_desired_angle(self, t): + t = round(t * self.DEG_TO_CAN) + self.safety.set_desired_angle_last(t) + + def _reset_angle_measurement(self, angle): + for _ in range(MAX_SAMPLE_VALS): + self._rx(self._angle_meas_msg(angle)) + + def _reset_speed_measurement(self, speed): + for _ in range(MAX_SAMPLE_VALS): + self._rx(self._speed_msg(speed)) + + def test_vehicle_speed_measurements(self): + self._common_measurement_test(self._speed_msg, 0, 80, VEHICLE_SPEED_FACTOR, self.safety.get_vehicle_speed_min, self.safety.get_vehicle_speed_max) + + def test_steering_angle_measurements(self, max_angle=300): + self._common_measurement_test(self._angle_meas_msg, -max_angle, max_angle, self.DEG_TO_CAN, self.safety.get_angle_meas_min, self.safety.get_angle_meas_max) + + def test_angle_cmd_when_enabled(self, max_angle=300): + # when controls are allowed, angle cmd rate limit is enforced + speeds = [0., 1., 5., 10., 15., 50.] + angles = np.concatenate((np.arange(-max_angle, max_angle, 5), [0])) + for a in angles: + for s in speeds: + max_delta_up = np.interp(s, self.ANGLE_RATE_BP, self.ANGLE_RATE_UP) + max_delta_down = np.interp(s, self.ANGLE_RATE_BP, self.ANGLE_RATE_DOWN) + + # first test against false positives + self._reset_angle_measurement(a) + self._reset_speed_measurement(s) + + self._set_prev_desired_angle(a) + self.safety.set_controls_allowed(1) + + # Stay within limits + # Up + self.assertTrue(self._tx(self._angle_cmd_msg(a + sign_of(a) * max_delta_up, True))) + self.assertTrue(self.safety.get_controls_allowed()) + + # Don't change + self.assertTrue(self._tx(self._angle_cmd_msg(a, True))) + self.assertTrue(self.safety.get_controls_allowed()) + + # Down + self.assertTrue(self._tx(self._angle_cmd_msg(a - sign_of(a) * max_delta_down, True))) + self.assertTrue(self.safety.get_controls_allowed()) + + # Inject too high rates + # Up + self.assertFalse(self._tx(self._angle_cmd_msg(a + sign_of(a) * (max_delta_up + 1.1), True))) + + # Don't change + self.safety.set_controls_allowed(1) + self._set_prev_desired_angle(a) + self.assertTrue(self.safety.get_controls_allowed()) + self.assertTrue(self._tx(self._angle_cmd_msg(a, True))) + self.assertTrue(self.safety.get_controls_allowed()) + + # Down + self.assertFalse(self._tx(self._angle_cmd_msg(a - sign_of(a) * (max_delta_down + 1.1), True))) + + # Check desired steer should be the same as steer angle when controls are off + self.safety.set_controls_allowed(0) + self.assertTrue(self._tx(self._angle_cmd_msg(a, False))) + + def test_angle_cmd_when_disabled(self): + # Tests that only angles close to the meas are allowed while + # steer actuation bit is 0, regardless of controls allowed. + for controls_allowed in (True, False): + self.safety.set_controls_allowed(controls_allowed) + + for steer_control_enabled in (True, False): + for angle_meas in np.arange(-90, 91, 10): + self._reset_angle_measurement(angle_meas) + + for angle_cmd in np.arange(-90, 91, 10): + self._set_prev_desired_angle(angle_cmd) + + # controls_allowed is checked if actuation bit is 1, else the angle must be close to meas (inactive) + should_tx = controls_allowed if steer_control_enabled else angle_cmd == angle_meas + self.assertEqual(should_tx, self._tx(self._angle_cmd_msg(angle_cmd, steer_control_enabled))) + + +class PandaSafetyTest(PandaSafetyTestBase): + TX_MSGS: list[list[int]] | None = None + SCANNED_ADDRS = [*range(0x800), # Entire 11-bit CAN address space + *range(0x18DA00F1, 0x18DB00F1, 0x100), # 29-bit UDS physical addressing + *range(0x18DB00F1, 0x18DC00F1, 0x100), # 29-bit UDS functional addressing + *range(0x3300, 0x3400), # Honda + 0x10400060, 0x104c006c] # GMLAN (exceptions, range/format unclear) + FWD_BLACKLISTED_ADDRS: dict[int, list[int]] = {} # {bus: [addr]} + FWD_BUS_LOOKUP: dict[int, int] = {} + + @classmethod + def setUpClass(cls): + if cls.__name__ == "PandaSafetyTest" or cls.__name__.endswith('Base'): + cls.safety = None + raise unittest.SkipTest + + # ***** standard tests for all safety modes ***** + + def test_tx_msg_in_scanned_range(self): + # the relay malfunction, fwd hook, and spam can tests don't exhaustively + # scan the entire 29-bit address space, only some known important ranges + # make sure SCANNED_ADDRS stays up to date with car port TX_MSGS; new + # model ports should expand the range if needed + for msg in self.TX_MSGS: + self.assertTrue(msg[0] in self.SCANNED_ADDRS, f"{msg[0]=:#x}") + + def test_fwd_hook(self): + # some safety modes don't forward anything, while others blacklist msgs + for bus in range(3): + for addr in self.SCANNED_ADDRS: + # assume len 8 + fwd_bus = self.FWD_BUS_LOOKUP.get(bus, -1) + if bus in self.FWD_BLACKLISTED_ADDRS and addr in self.FWD_BLACKLISTED_ADDRS[bus]: + fwd_bus = -1 + self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(bus, addr), f"{addr=:#x} from {bus=} to {fwd_bus=}") + + def test_spam_can_buses(self): + for bus in range(4): + for addr in self.SCANNED_ADDRS: + if [addr, bus] not in self.TX_MSGS: + self.assertFalse(self._tx(make_msg(bus, addr, 8)), f"allowed TX {addr=} {bus=}") + + def test_default_controls_not_allowed(self): + self.assertFalse(self.safety.get_controls_allowed()) + + def test_manually_enable_controls_allowed(self): + self.safety.set_controls_allowed(1) + self.assertTrue(self.safety.get_controls_allowed()) + self.safety.set_controls_allowed(0) + self.assertFalse(self.safety.get_controls_allowed()) + + def test_tx_hook_on_wrong_safety_mode(self): + files = os.listdir(os.path.dirname(os.path.realpath(__file__))) + test_files = [f for f in files if f.startswith("test_") and f.endswith(".py")] + + current_test = self.__class__.__name__ + + all_tx = [] + for tf in test_files: + test = importlib.import_module("panda.tests.safety."+tf[:-3]) + for attr in dir(test): + if attr.startswith("Test") and attr != current_test: + tc = getattr(test, attr) + tx = tc.TX_MSGS + if tx is not None and not attr.endswith('Base'): + # No point in comparing different Tesla safety modes + if 'Tesla' in attr and 'Tesla' in current_test: + continue + # No point in comparing to ALLOUTPUT which allows all messages + if attr.startswith('TestAllOutput'): + continue + if attr.startswith('TestToyota') and current_test.startswith('TestToyota'): + continue + if attr.startswith('TestSubaruGen') and current_test.startswith('TestSubaruGen'): + continue + if attr.startswith('TestSubaruPreglobal') and current_test.startswith('TestSubaruPreglobal'): + continue + if {attr, current_test}.issubset({'TestVolkswagenPqSafety', 'TestVolkswagenPqStockSafety', 'TestVolkswagenPqLongSafety'}): + continue + if {attr, current_test}.issubset({'TestGmCameraSafety', 'TestGmCameraLongitudinalSafety'}): + continue + if attr.startswith('TestFord') and current_test.startswith('TestFord'): + continue + if attr.startswith('TestHyundaiCanfd') and current_test.startswith('TestHyundaiCanfd'): + continue + if {attr, current_test}.issubset({'TestVolkswagenMqbSafety', 'TestVolkswagenMqbStockSafety', 'TestVolkswagenMqbLongSafety'}): + continue + + # overlapping TX addrs, but they're not actuating messages for either car + if attr == 'TestHyundaiCanfdHDA2LongEV' and current_test.startswith('TestToyota'): + tx = list(filter(lambda m: m[0] not in [0x160, ], tx)) + + # Volkswagen MQB longitudinal actuating message overlaps with the Subaru lateral actuating message + if attr == 'TestVolkswagenMqbLongSafety' and current_test.startswith('TestSubaru'): + tx = list(filter(lambda m: m[0] not in [0x122, ], tx)) + + # Volkswagen MQB and Honda Nidec ACC HUD messages overlap + if attr == 'TestVolkswagenMqbLongSafety' and current_test.startswith('TestHondaNidec'): + tx = list(filter(lambda m: m[0] not in [0x30c, ], tx)) + + # Volkswagen MQB and Honda Bosch Radarless ACC HUD messages overlap + if attr == 'TestVolkswagenMqbLongSafety' and current_test.startswith('TestHondaBoschRadarless'): + tx = list(filter(lambda m: m[0] not in [0x30c, ], tx)) + + # TODO: Temporary, should be fixed in panda firmware, safety_honda.h + if attr.startswith('TestHonda'): + # exceptions for common msgs across different hondas + tx = list(filter(lambda m: m[0] not in [0x1FA, 0x30C, 0x33D, 0x33DB], tx)) + all_tx.append([[m[0], m[1], attr] for m in tx]) + + # make sure we got all the msgs + self.assertTrue(len(all_tx) >= len(test_files)-1) + + for tx_msgs in all_tx: + for addr, bus, test_name in tx_msgs: + msg = make_msg(bus, addr) + self.safety.set_controls_allowed(1) + # TODO: this should be blocked + if current_test in ["TestNissanSafety", "TestNissanSafetyAltEpsBus", "TestNissanLeafSafety"] and [addr, bus] in self.TX_MSGS: + continue + self.assertFalse(self._tx(msg), f"transmit of {addr=:#x} {bus=} from {test_name} during {current_test} was allowed") + + +@add_regen_tests +class PandaCarSafetyTest(PandaSafetyTest): + STANDSTILL_THRESHOLD: float | None = None + GAS_PRESSED_THRESHOLD = 0 + RELAY_MALFUNCTION_ADDRS: dict[int, tuple[int, ...]] | None = None + + @classmethod + def setUpClass(cls): + if cls.__name__ == "PandaCarSafetyTest" or cls.__name__.endswith('Base'): + cls.safety = None + raise unittest.SkipTest + + @abc.abstractmethod + def _user_brake_msg(self, brake): + pass + + def _user_regen_msg(self, regen): + pass + + @abc.abstractmethod + def _speed_msg(self, speed): + pass + + # Safety modes can override if vehicle_moving is driven by a different message + def _vehicle_moving_msg(self, speed: float): + return self._speed_msg(speed) + + @abc.abstractmethod + def _user_gas_msg(self, gas): + pass + + @abc.abstractmethod + def _pcm_status_msg(self, enable): + pass + + # ***** standard tests for all car-specific safety modes ***** + + def test_relay_malfunction(self): + # each car has an addr that is used to detect relay malfunction + # if that addr is seen on specified bus, triggers the relay malfunction + # protection logic: both tx_hook and fwd_hook are expected to return failure + self.assertFalse(self.safety.get_relay_malfunction()) + for bus in range(3): + for addr in self.SCANNED_ADDRS: + self.safety.set_relay_malfunction(False) + self._rx(make_msg(bus, addr, 8)) + should_relay_malfunction = addr in self.RELAY_MALFUNCTION_ADDRS.get(bus, ()) + self.assertEqual(should_relay_malfunction, self.safety.get_relay_malfunction(), (bus, addr)) + + # test relay malfunction protection logic + self.safety.set_relay_malfunction(True) + for bus in range(3): + for addr in self.SCANNED_ADDRS: + self.assertFalse(self._tx(make_msg(bus, addr, 8))) + self.assertEqual(-1, self.safety.safety_fwd_hook(bus, addr)) + + def test_prev_gas(self): + self.assertFalse(self.safety.get_gas_pressed_prev()) + for pressed in [self.GAS_PRESSED_THRESHOLD + 1, 0]: + self._rx(self._user_gas_msg(pressed)) + self.assertEqual(bool(pressed), self.safety.get_gas_pressed_prev()) + + def test_allow_engage_with_gas_pressed(self): + self._rx(self._user_gas_msg(1)) + self.safety.set_controls_allowed(True) + self._rx(self._user_gas_msg(1)) + self.assertTrue(self.safety.get_controls_allowed()) + self._rx(self._user_gas_msg(1)) + self.assertTrue(self.safety.get_controls_allowed()) + + def test_disengage_on_gas(self): + self._rx(self._user_gas_msg(0)) + self.safety.set_controls_allowed(True) + self._rx(self._user_gas_msg(self.GAS_PRESSED_THRESHOLD + 1)) + self.assertFalse(self.safety.get_controls_allowed()) + + def test_alternative_experience_no_disengage_on_gas(self): + self._rx(self._user_gas_msg(0)) + self.safety.set_controls_allowed(True) + self.safety.set_alternative_experience(ALTERNATIVE_EXPERIENCE.DISABLE_DISENGAGE_ON_GAS) + self._rx(self._user_gas_msg(self.GAS_PRESSED_THRESHOLD + 1)) + # Test we allow lateral, but not longitudinal + self.assertTrue(self.safety.get_controls_allowed()) + self.assertFalse(self.safety.get_longitudinal_allowed()) + # Make sure we can re-gain longitudinal actuation + self._rx(self._user_gas_msg(0)) + self.assertTrue(self.safety.get_longitudinal_allowed()) + + def test_prev_user_brake(self, _user_brake_msg=None, get_brake_pressed_prev=None): + if _user_brake_msg is None: + _user_brake_msg = self._user_brake_msg + get_brake_pressed_prev = self.safety.get_brake_pressed_prev + + self.assertFalse(get_brake_pressed_prev()) + for pressed in [True, False]: + self._rx(_user_brake_msg(not pressed)) + self.assertEqual(not pressed, get_brake_pressed_prev()) + self._rx(_user_brake_msg(pressed)) + self.assertEqual(pressed, get_brake_pressed_prev()) + + def test_enable_control_allowed_from_cruise(self): + self._rx(self._pcm_status_msg(False)) + self.assertFalse(self.safety.get_controls_allowed()) + self._rx(self._pcm_status_msg(True)) + self.assertTrue(self.safety.get_controls_allowed()) + + def test_disable_control_allowed_from_cruise(self): + self.safety.set_controls_allowed(1) + self._rx(self._pcm_status_msg(False)) + self.assertFalse(self.safety.get_controls_allowed()) + + def test_cruise_engaged_prev(self): + for engaged in [True, False]: + self._rx(self._pcm_status_msg(engaged)) + self.assertEqual(engaged, self.safety.get_cruise_engaged_prev()) + self._rx(self._pcm_status_msg(not engaged)) + self.assertEqual(not engaged, self.safety.get_cruise_engaged_prev()) + + def test_allow_user_brake_at_zero_speed(self, _user_brake_msg=None, get_brake_pressed_prev=None): + if _user_brake_msg is None: + _user_brake_msg = self._user_brake_msg + + # Brake was already pressed + self._rx(self._vehicle_moving_msg(0)) + self._rx(_user_brake_msg(1)) + self.safety.set_controls_allowed(1) + self._rx(_user_brake_msg(1)) + self.assertTrue(self.safety.get_controls_allowed()) + self.assertTrue(self.safety.get_longitudinal_allowed()) + self._rx(_user_brake_msg(0)) + self.assertTrue(self.safety.get_controls_allowed()) + self.assertTrue(self.safety.get_longitudinal_allowed()) + # rising edge of brake should disengage + self._rx(_user_brake_msg(1)) + self.assertFalse(self.safety.get_controls_allowed()) + self.assertFalse(self.safety.get_longitudinal_allowed()) + self._rx(_user_brake_msg(0)) # reset no brakes + + def test_not_allow_user_brake_when_moving(self, _user_brake_msg=None, get_brake_pressed_prev=None): + if _user_brake_msg is None: + _user_brake_msg = self._user_brake_msg + + # Brake was already pressed + self._rx(_user_brake_msg(1)) + self.safety.set_controls_allowed(1) + self._rx(self._vehicle_moving_msg(self.STANDSTILL_THRESHOLD)) + self._rx(_user_brake_msg(1)) + self.assertTrue(self.safety.get_controls_allowed()) + self.assertTrue(self.safety.get_longitudinal_allowed()) + self._rx(self._vehicle_moving_msg(self.STANDSTILL_THRESHOLD + 1)) + self._rx(_user_brake_msg(1)) + self.assertFalse(self.safety.get_controls_allowed()) + self.assertFalse(self.safety.get_longitudinal_allowed()) + self._rx(self._vehicle_moving_msg(0)) + + def test_vehicle_moving(self): + self.assertFalse(self.safety.get_vehicle_moving()) + + # not moving + self._rx(self._vehicle_moving_msg(0)) + self.assertFalse(self.safety.get_vehicle_moving()) + + # speed is at threshold + self._rx(self._vehicle_moving_msg(self.STANDSTILL_THRESHOLD)) + self.assertFalse(self.safety.get_vehicle_moving()) + + # past threshold + self._rx(self._vehicle_moving_msg(self.STANDSTILL_THRESHOLD + 1)) + self.assertTrue(self.safety.get_vehicle_moving()) + + def test_safety_tick(self): + self.safety.set_timer(int(2e6)) + self.safety.set_controls_allowed(True) + self.safety.safety_tick_current_safety_config() + self.assertFalse(self.safety.get_controls_allowed()) + self.assertFalse(self.safety.safety_config_valid()) diff --git a/panda/tests/safety/hyundai_common.py b/panda/tests/safety/hyundai_common.py new file mode 100644 index 0000000..da18671 --- /dev/null +++ b/panda/tests/safety/hyundai_common.py @@ -0,0 +1,157 @@ +import unittest + +import panda.tests.safety.common as common +from panda.tests.libpanda import libpanda_py +from panda.tests.safety.common import make_msg + + +class Buttons: + NONE = 0 + RESUME = 1 + SET = 2 + CANCEL = 4 + + +PREV_BUTTON_SAMPLES = 8 +ENABLE_BUTTONS = (Buttons.RESUME, Buttons.SET, Buttons.CANCEL) + + +class HyundaiButtonBase: + # pylint: disable=no-member,abstract-method + BUTTONS_TX_BUS = 0 # tx on this bus, rx on 0 + SCC_BUS = 0 # rx on this bus + + def test_button_sends(self): + """ + Only RES and CANCEL buttons are allowed + - RES allowed while controls allowed + - CANCEL allowed while cruise is enabled + """ + self.safety.set_controls_allowed(0) + self.assertFalse(self._tx(self._button_msg(Buttons.RESUME, bus=self.BUTTONS_TX_BUS))) + self.assertFalse(self._tx(self._button_msg(Buttons.SET, bus=self.BUTTONS_TX_BUS))) + + self.safety.set_controls_allowed(1) + self.assertTrue(self._tx(self._button_msg(Buttons.RESUME, bus=self.BUTTONS_TX_BUS))) + self.assertFalse(self._tx(self._button_msg(Buttons.SET, bus=self.BUTTONS_TX_BUS))) + + for enabled in (True, False): + self._rx(self._pcm_status_msg(enabled)) + self.assertEqual(enabled, self._tx(self._button_msg(Buttons.CANCEL, bus=self.BUTTONS_TX_BUS))) + + def test_enable_control_allowed_from_cruise(self): + """ + Hyundai non-longitudinal only enables on PCM rising edge and recent button press. Tests PCM enabling with: + - disallowed: No buttons + - disallowed: Buttons that don't enable cruise + - allowed: Buttons that do enable cruise + - allowed: Main button with all above combinations + """ + for main_button in (0, 1): + for btn in range(8): + for _ in range(PREV_BUTTON_SAMPLES): # reset + self._rx(self._button_msg(Buttons.NONE)) + + self._rx(self._pcm_status_msg(False)) + self.assertFalse(self.safety.get_controls_allowed()) + self._rx(self._button_msg(btn, main_button=main_button)) + self._rx(self._pcm_status_msg(True)) + controls_allowed = btn in ENABLE_BUTTONS or main_button + self.assertEqual(controls_allowed, self.safety.get_controls_allowed()) + + def test_sampling_cruise_buttons(self): + """ + Test that we allow controls on recent button press, but not as button leaves sliding window + """ + self._rx(self._button_msg(Buttons.SET)) + for i in range(2 * PREV_BUTTON_SAMPLES): + self._rx(self._pcm_status_msg(False)) + self.assertFalse(self.safety.get_controls_allowed()) + self._rx(self._pcm_status_msg(True)) + controls_allowed = i < PREV_BUTTON_SAMPLES + self.assertEqual(controls_allowed, self.safety.get_controls_allowed()) + self._rx(self._button_msg(Buttons.NONE)) + + +class HyundaiLongitudinalBase(common.LongitudinalAccelSafetyTest): + # pylint: disable=no-member,abstract-method + + DISABLED_ECU_UDS_MSG: tuple[int, int] + DISABLED_ECU_ACTUATION_MSG: tuple[int, int] + + @classmethod + def setUpClass(cls): + if cls.__name__ == "HyundaiLongitudinalBase": + cls.safety = None + raise unittest.SkipTest + + # override these tests from PandaCarSafetyTest, hyundai longitudinal uses button enable + def test_disable_control_allowed_from_cruise(self): + pass + + def test_enable_control_allowed_from_cruise(self): + pass + + def test_sampling_cruise_buttons(self): + pass + + def test_cruise_engaged_prev(self): + pass + + def test_button_sends(self): + pass + + def _pcm_status_msg(self, enable): + raise Exception + + def _accel_msg(self, accel, aeb_req=False, aeb_decel=0): + raise NotImplementedError + + def test_set_resume_buttons(self): + """ + SET and RESUME enter controls allowed on their falling edge. + """ + for btn_prev in range(8): + for btn_cur in range(8): + self._rx(self._button_msg(Buttons.NONE)) + self.safety.set_controls_allowed(0) + for _ in range(10): + self._rx(self._button_msg(btn_prev)) + self.assertFalse(self.safety.get_controls_allowed()) + + # should enter controls allowed on falling edge and not transitioning to cancel + should_enable = btn_cur != btn_prev and \ + btn_cur != Buttons.CANCEL and \ + btn_prev in (Buttons.RESUME, Buttons.SET) + + self._rx(self._button_msg(btn_cur)) + self.assertEqual(should_enable, self.safety.get_controls_allowed()) + + def test_cancel_button(self): + self.safety.set_controls_allowed(1) + self._rx(self._button_msg(Buttons.CANCEL)) + self.assertFalse(self.safety.get_controls_allowed()) + + def test_tester_present_allowed(self): + """ + Ensure tester present diagnostic message is allowed to keep ECU knocked out + for longitudinal control. + """ + + addr, bus = self.DISABLED_ECU_UDS_MSG + tester_present = libpanda_py.make_CANPacket(addr, bus, b"\x02\x3E\x80\x00\x00\x00\x00\x00") + self.assertTrue(self._tx(tester_present)) + + not_tester_present = libpanda_py.make_CANPacket(addr, bus, b"\x03\xAA\xAA\x00\x00\x00\x00\x00") + self.assertFalse(self._tx(not_tester_present)) + + def test_disabled_ecu_alive(self): + """ + If the ECU knockout failed, make sure the relay malfunction is shown + """ + + addr, bus = self.DISABLED_ECU_ACTUATION_MSG + self.assertFalse(self.safety.get_relay_malfunction()) + self._rx(make_msg(bus, addr, 8)) + self.assertTrue(self.safety.get_relay_malfunction()) + diff --git a/panda/tests/safety/test.sh b/panda/tests/safety/test.sh new file mode 100644 index 0000000..13703b2 --- /dev/null +++ b/panda/tests/safety/test.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" +cd $DIR + +# reset coverage data and generate gcc note file +rm -f ../libpanda/*.gcda +scons -j$(nproc) -D --coverage + +# run safety tests and generate coverage data +HW_TYPES=( 6 9 ) +for hw_type in "${HW_TYPES[@]}"; do + echo "Testing HW_TYPE: $hw_type" + HW_TYPE=$hw_type pytest test_*.py +done + +# generate and open report +if [ "$1" == "--report" ]; then + geninfo ../libpanda/ -o coverage.info + genhtml coverage.info -o coverage-out + sensible-browser coverage-out/index.html +fi + +# test coverage +GCOV_OUTPUT=$(gcov -n ../libpanda/panda.c) +INCOMPLETE_COVERAGE=$(echo "$GCOV_OUTPUT" | paste -s -d' \n' | grep -E "File.*(safety\/safety_.*)|(safety)\.h" | grep -v "100.00%" || true) +if [ -n "$INCOMPLETE_COVERAGE" ]; then + echo "FAILED: Some files have less than 100% coverage:" + echo "$INCOMPLETE_COVERAGE" + exit 1 +else + echo "SUCCESS: All checked files have 100% coverage!" +fi diff --git a/panda/tests/safety/test_body.py b/panda/tests/safety/test_body.py new file mode 100644 index 0000000..d23c09f --- /dev/null +++ b/panda/tests/safety/test_body.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +import unittest + +import panda.tests.safety.common as common + +from panda import Panda +from panda.tests.libpanda import libpanda_py +from panda.tests.safety.common import CANPackerPanda + + +class TestBody(common.PandaSafetyTest): + TX_MSGS = [[0x250, 0], [0x251, 0], [0x350, 0], [0x351, 0], + [0x1, 0], [0x1, 1], [0x1, 2], [0x1, 3]] + + def setUp(self): + self.packer = CANPackerPanda("comma_body") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_BODY, 0) + self.safety.init_tests() + + def _motors_data_msg(self, speed_l, speed_r): + values = {"SPEED_L": speed_l, "SPEED_R": speed_r} + return self.packer.make_can_msg_panda("MOTORS_DATA", 0, values) + + def _torque_cmd_msg(self, torque_l, torque_r): + values = {"TORQUE_L": torque_l, "TORQUE_R": torque_r} + return self.packer.make_can_msg_panda("TORQUE_CMD", 0, values) + + def _knee_torque_cmd_msg(self, torque_l, torque_r): + values = {"TORQUE_L": torque_l, "TORQUE_R": torque_r} + return self.packer.make_can_msg_panda("KNEE_TORQUE_CMD", 0, values) + + def _max_motor_rpm_cmd_msg(self, max_rpm_l, max_rpm_r): + values = {"MAX_RPM_L": max_rpm_l, "MAX_RPM_R": max_rpm_r} + return self.packer.make_can_msg_panda("MAX_MOTOR_RPM_CMD", 0, values) + + def test_rx_hook(self): + self.assertFalse(self.safety.get_controls_allowed()) + self.assertFalse(self.safety.get_vehicle_moving()) + + # controls allowed when we get MOTORS_DATA message + self.assertTrue(self._rx(self._torque_cmd_msg(0, 0))) + self.assertTrue(self.safety.get_vehicle_moving()) # always moving + self.assertFalse(self.safety.get_controls_allowed()) + + self.assertTrue(self._rx(self._motors_data_msg(0, 0))) + self.assertTrue(self.safety.get_vehicle_moving()) # always moving + self.assertTrue(self.safety.get_controls_allowed()) + + def test_tx_hook(self): + self.assertFalse(self._tx(self._torque_cmd_msg(0, 0))) + self.assertFalse(self._tx(self._knee_torque_cmd_msg(0, 0))) + self.safety.set_controls_allowed(True) + self.assertTrue(self._tx(self._torque_cmd_msg(0, 0))) + self.assertTrue(self._tx(self._knee_torque_cmd_msg(0, 0))) + + def test_can_flasher(self): + # CAN flasher always allowed + self.safety.set_controls_allowed(False) + self.assertTrue(self._tx(common.make_msg(0, 0x1, 8))) + + # 0xdeadfaceU enters CAN flashing mode for base & knee + for addr in (0x250, 0x350): + self.assertTrue(self._tx(common.make_msg(0, addr, dat=b'\xce\xfa\xad\xde\x1e\x0b\xb0\x0a'))) + self.assertFalse(self._tx(common.make_msg(0, addr, dat=b'\xce\xfa\xad\xde\x1e\x0b\xb0'))) # not correct data/len + self.assertFalse(self._tx(common.make_msg(0, addr + 1, dat=b'\xce\xfa\xad\xde\x1e\x0b\xb0\x0a'))) # wrong address + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_chrysler.py b/panda/tests/safety/test_chrysler.py new file mode 100644 index 0000000..5bbb6dd --- /dev/null +++ b/panda/tests/safety/test_chrysler.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda + + +class TestChryslerSafety(common.PandaCarSafetyTest, common.MotorTorqueSteeringSafetyTest): + TX_MSGS = [[0x23B, 0], [0x292, 0], [0x2A6, 0]] + STANDSTILL_THRESHOLD = 0 + RELAY_MALFUNCTION_ADDRS = {0: (0x292,)} + FWD_BLACKLISTED_ADDRS = {2: [0x292, 0x2A6]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + MAX_RATE_UP = 3 + MAX_RATE_DOWN = 3 + MAX_TORQUE = 261 + MAX_RT_DELTA = 112 + RT_INTERVAL = 250000 + MAX_TORQUE_ERROR = 80 + + LKAS_ACTIVE_VALUE = 1 + + DAS_BUS = 0 + + def setUp(self): + self.packer = CANPackerPanda("chrysler_pacifica_2017_hybrid_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_CHRYSLER, 0) + self.safety.init_tests() + + def _button_msg(self, cancel=False, resume=False): + values = {"ACC_Cancel": cancel, "ACC_Resume": resume} + return self.packer.make_can_msg_panda("CRUISE_BUTTONS", self.DAS_BUS, values) + + def _pcm_status_msg(self, enable): + values = {"ACC_ACTIVE": enable} + return self.packer.make_can_msg_panda("DAS_3", self.DAS_BUS, values) + + def _speed_msg(self, speed): + values = {"SPEED_LEFT": speed, "SPEED_RIGHT": speed} + return self.packer.make_can_msg_panda("SPEED_1", 0, values) + + def _user_gas_msg(self, gas): + values = {"Accelerator_Position": gas} + return self.packer.make_can_msg_panda("ECM_5", 0, values) + + def _user_brake_msg(self, brake): + values = {"Brake_Pedal_State": 1 if brake else 0} + return self.packer.make_can_msg_panda("ESP_1", 0, values) + + def _torque_meas_msg(self, torque): + values = {"EPS_TORQUE_MOTOR": torque} + return self.packer.make_can_msg_panda("EPS_2", 0, values) + + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"STEERING_TORQUE": torque, "LKAS_CONTROL_BIT": self.LKAS_ACTIVE_VALUE if steer_req else 0} + return self.packer.make_can_msg_panda("LKAS_COMMAND", 0, values) + + def test_buttons(self): + for controls_allowed in (True, False): + self.safety.set_controls_allowed(controls_allowed) + + # resume only while controls allowed + self.assertEqual(controls_allowed, self._tx(self._button_msg(resume=True))) + + # can always cancel + self.assertTrue(self._tx(self._button_msg(cancel=True))) + + # only one button at a time + self.assertFalse(self._tx(self._button_msg(cancel=True, resume=True))) + self.assertFalse(self._tx(self._button_msg(cancel=False, resume=False))) + + +class TestChryslerRamDTSafety(TestChryslerSafety): + TX_MSGS = [[0xB1, 2], [0xA6, 0], [0xFA, 0]] + RELAY_MALFUNCTION_ADDRS = {0: (0xA6,)} + FWD_BLACKLISTED_ADDRS = {2: [0xA6, 0xFA]} + + MAX_RATE_UP = 6 + MAX_RATE_DOWN = 6 + MAX_TORQUE = 350 + + DAS_BUS = 2 + + LKAS_ACTIVE_VALUE = 2 + + def setUp(self): + self.packer = CANPackerPanda("chrysler_ram_dt_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_CHRYSLER, Panda.FLAG_CHRYSLER_RAM_DT) + self.safety.init_tests() + + def _speed_msg(self, speed): + values = {"Vehicle_Speed": speed} + return self.packer.make_can_msg_panda("ESP_8", 0, values) + +class TestChryslerRamHDSafety(TestChryslerSafety): + TX_MSGS = [[0x275, 0], [0x276, 0], [0x23A, 2]] + RELAY_MALFUNCTION_ADDRS = {0: (0x276,)} + FWD_BLACKLISTED_ADDRS = {2: [0x275, 0x276]} + + MAX_TORQUE = 361 + MAX_RATE_UP = 14 + MAX_RATE_DOWN = 14 + MAX_RT_DELTA = 182 + + DAS_BUS = 2 + + LKAS_ACTIVE_VALUE = 2 + + def setUp(self): + self.packer = CANPackerPanda("chrysler_ram_hd_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_CHRYSLER, Panda.FLAG_CHRYSLER_RAM_HD) + self.safety.init_tests() + + def _speed_msg(self, speed): + values = {"Vehicle_Speed": speed} + return self.packer.make_can_msg_panda("ESP_8", 0, values) + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_defaults.py b/panda/tests/safety/test_defaults.py new file mode 100644 index 0000000..81bafee --- /dev/null +++ b/panda/tests/safety/test_defaults.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +import unittest + +import panda.tests.safety.common as common + +from panda import Panda +from panda.tests.libpanda import libpanda_py + + +class TestDefaultRxHookBase(common.PandaSafetyTest): + def test_rx_hook(self): + # default rx hook allows all msgs + for bus in range(4): + for addr in self.SCANNED_ADDRS: + self.assertTrue(self._rx(common.make_msg(bus, addr, 8)), f"failed RX {addr=}") + + +class TestNoOutput(TestDefaultRxHookBase): + TX_MSGS = [] + + def setUp(self): + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_NOOUTPUT, 0) + self.safety.init_tests() + + +class TestSilent(TestNoOutput): + """SILENT uses same hooks as NOOUTPUT""" + + def setUp(self): + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_SILENT, 0) + self.safety.init_tests() + + +class TestAllOutput(TestDefaultRxHookBase): + # Allow all messages + TX_MSGS = [[addr, bus] for addr in common.PandaSafetyTest.SCANNED_ADDRS + for bus in range(4)] + + def setUp(self): + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_ALLOUTPUT, 0) + self.safety.init_tests() + + def test_spam_can_buses(self): + # asserts tx allowed for all scanned addrs + for bus in range(4): + for addr in self.SCANNED_ADDRS: + should_tx = [addr, bus] in self.TX_MSGS + self.assertEqual(should_tx, self._tx(common.make_msg(bus, addr, 8)), f"allowed TX {addr=} {bus=}") + + def test_default_controls_not_allowed(self): + # controls always allowed + self.assertTrue(self.safety.get_controls_allowed()) + + def test_tx_hook_on_wrong_safety_mode(self): + # No point, since we allow all messages + pass + + +class TestAllOutputPassthrough(TestAllOutput): + FWD_BLACKLISTED_ADDRS = {} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + def setUp(self): + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_ALLOUTPUT, 1) + self.safety.init_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_elm327.py b/panda/tests/safety/test_elm327.py new file mode 100644 index 0000000..f133b2e --- /dev/null +++ b/panda/tests/safety/test_elm327.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +import unittest + +import panda.tests.safety.common as common + +from panda import DLC_TO_LEN, Panda +from panda.tests.libpanda import libpanda_py +from panda.tests.safety.test_defaults import TestDefaultRxHookBase + +GM_CAMERA_DIAG_ADDR = 0x24B + + +class TestElm327(TestDefaultRxHookBase): + TX_MSGS = [[addr, bus] for addr in [GM_CAMERA_DIAG_ADDR, *range(0x600, 0x800), + *range(0x18DA00F1, 0x18DB00F1, 0x100), # 29-bit UDS physical addressing + *[0x18DB33F1], # 29-bit UDS functional address + ] for bus in range(4)] + + def setUp(self): + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_ELM327, 0) + self.safety.init_tests() + + def test_tx_hook(self): + # ensure we can transmit arbitrary data on allowed addresses + for bus in range(4): + for addr in self.SCANNED_ADDRS: + should_tx = [addr, bus] in self.TX_MSGS + self.assertEqual(should_tx, self._tx(common.make_msg(bus, addr, 8))) + + # ELM only allows 8 byte UDS/KWP messages under ISO 15765-4 + for msg_len in DLC_TO_LEN: + should_tx = msg_len == 8 + self.assertEqual(should_tx, self._tx(common.make_msg(0, 0x700, msg_len))) + + # TODO: perform this check for all addresses + # 4 to 15 are reserved ISO-TP frame types (https://en.wikipedia.org/wiki/ISO_15765-2) + for byte in range(0xff): + should_tx = (byte >> 4) <= 3 + self.assertEqual(should_tx, self._tx(common.make_msg(0, GM_CAMERA_DIAG_ADDR, dat=bytes([byte] * 8)))) + + def test_tx_hook_on_wrong_safety_mode(self): + # No point, since we allow many diagnostic addresses + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_ford.py b/panda/tests/safety/test_ford.py new file mode 100644 index 0000000..1be3a27 --- /dev/null +++ b/panda/tests/safety/test_ford.py @@ -0,0 +1,476 @@ +#!/usr/bin/env python3 +import numpy as np +import random +import unittest + +import panda.tests.safety.common as common + +from panda import Panda +from panda.tests.libpanda import libpanda_py +from panda.tests.safety.common import CANPackerPanda + +MSG_EngBrakeData = 0x165 # RX from PCM, for driver brake pedal and cruise state +MSG_EngVehicleSpThrottle = 0x204 # RX from PCM, for driver throttle input +MSG_BrakeSysFeatures = 0x415 # RX from ABS, for vehicle speed +MSG_EngVehicleSpThrottle2 = 0x202 # RX from PCM, for second vehicle speed +MSG_Yaw_Data_FD1 = 0x91 # RX from RCM, for yaw rate +MSG_Steering_Data_FD1 = 0x083 # TX by OP, various driver switches and LKAS/CC buttons +MSG_ACCDATA = 0x186 # TX by OP, ACC controls +MSG_ACCDATA_3 = 0x18A # TX by OP, ACC/TJA user interface +MSG_Lane_Assist_Data1 = 0x3CA # TX by OP, Lane Keep Assist +MSG_LateralMotionControl = 0x3D3 # TX by OP, Lateral Control message +MSG_LateralMotionControl2 = 0x3D6 # TX by OP, alternate Lateral Control message +MSG_IPMA_Data = 0x3D8 # TX by OP, IPMA and LKAS user interface + + +def checksum(msg): + addr, t, dat, bus = msg + ret = bytearray(dat) + + if addr == MSG_Yaw_Data_FD1: + chksum = dat[0] + dat[1] # VehRol_W_Actl + chksum += dat[2] + dat[3] # VehYaw_W_Actl + chksum += dat[5] # VehRollYaw_No_Cnt + chksum += dat[6] >> 6 # VehRolWActl_D_Qf + chksum += (dat[6] >> 4) & 0x3 # VehYawWActl_D_Qf + chksum = 0xff - (chksum & 0xff) + ret[4] = chksum + + elif addr == MSG_BrakeSysFeatures: + chksum = dat[0] + dat[1] # Veh_V_ActlBrk + chksum += (dat[2] >> 2) & 0xf # VehVActlBrk_No_Cnt + chksum += dat[2] >> 6 # VehVActlBrk_D_Qf + chksum = 0xff - (chksum & 0xff) + ret[3] = chksum + + elif addr == MSG_EngVehicleSpThrottle2: + chksum = (dat[2] >> 3) & 0xf # VehVActlEng_No_Cnt + chksum += (dat[4] >> 5) & 0x3 # VehVActlEng_D_Qf + chksum += dat[6] + dat[7] # Veh_V_ActlEng + chksum = 0xff - (chksum & 0xff) + ret[1] = chksum + + return addr, t, ret, bus + + +class Buttons: + CANCEL = 0 + RESUME = 1 + TJA_TOGGLE = 2 + + +# Ford safety has four different configurations tested here: +# * CAN with stock longitudinal +# * CAN with openpilot longitudinal +# * CAN FD with stock longitudinal +# * CAN FD with openpilot longitudinal + +class TestFordSafetyBase(common.PandaCarSafetyTest): + STANDSTILL_THRESHOLD = 1 + RELAY_MALFUNCTION_ADDRS = {0: (MSG_ACCDATA_3, MSG_Lane_Assist_Data1, MSG_LateralMotionControl, + MSG_LateralMotionControl2, MSG_IPMA_Data)} + + FWD_BLACKLISTED_ADDRS = {2: [MSG_ACCDATA_3, MSG_Lane_Assist_Data1, MSG_LateralMotionControl, + MSG_LateralMotionControl2, MSG_IPMA_Data]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + # Max allowed delta between car speeds + MAX_SPEED_DELTA = 2.0 # m/s + + STEER_MESSAGE = 0 + + # Curvature control limits + DEG_TO_CAN = 50000 # 1 / (2e-5) rad to can + MAX_CURVATURE = 0.02 + MAX_CURVATURE_ERROR = 0.002 + CURVATURE_ERROR_MIN_SPEED = 10.0 # m/s + + ANGLE_RATE_BP = [5., 25., 25.] + ANGLE_RATE_UP = [0.0002, 0.0001, 0.0001] # windup limit + ANGLE_RATE_DOWN = [0.000225, 0.00015, 0.00015] # unwind limit + + cnt_speed = 0 + cnt_speed_2 = 0 + cnt_yaw_rate = 0 + + packer: CANPackerPanda + safety: libpanda_py.Panda + + @classmethod + def setUpClass(cls): + if cls.__name__ == "TestFordSafetyBase": + raise unittest.SkipTest + + def _set_prev_desired_angle(self, t): + t = round(t * self.DEG_TO_CAN) + self.safety.set_desired_angle_last(t) + + def _reset_curvature_measurement(self, curvature, speed): + for _ in range(6): + self._rx(self._speed_msg(speed)) + self._rx(self._yaw_rate_msg(curvature, speed)) + + # Driver brake pedal + def _user_brake_msg(self, brake: bool): + # brake pedal and cruise state share same message, so we have to send + # the other signal too + enable = self.safety.get_controls_allowed() + values = { + "BpedDrvAppl_D_Actl": 2 if brake else 1, + "CcStat_D_Actl": 5 if enable else 0, + } + return self.packer.make_can_msg_panda("EngBrakeData", 0, values) + + # ABS vehicle speed + def _speed_msg(self, speed: float, quality_flag=True): + values = {"Veh_V_ActlBrk": speed * 3.6, "VehVActlBrk_D_Qf": 3 if quality_flag else 0, "VehVActlBrk_No_Cnt": self.cnt_speed % 16} + self.__class__.cnt_speed += 1 + return self.packer.make_can_msg_panda("BrakeSysFeatures", 0, values, fix_checksum=checksum) + + # PCM vehicle speed + def _speed_msg_2(self, speed: float, quality_flag=True): + values = {"Veh_V_ActlEng": speed * 3.6, "VehVActlEng_D_Qf": 3 if quality_flag else 0, "VehVActlEng_No_Cnt": self.cnt_speed_2 % 16} + self.__class__.cnt_speed_2 += 1 + return self.packer.make_can_msg_panda("EngVehicleSpThrottle2", 0, values, fix_checksum=checksum) + + # Standstill state + def _vehicle_moving_msg(self, speed: float): + values = {"VehStop_D_Stat": 1 if speed <= self.STANDSTILL_THRESHOLD else random.choice((0, 2, 3))} + return self.packer.make_can_msg_panda("DesiredTorqBrk", 0, values) + + # Current curvature + def _yaw_rate_msg(self, curvature: float, speed: float, quality_flag=True): + values = {"VehYaw_W_Actl": curvature * speed, "VehYawWActl_D_Qf": 3 if quality_flag else 0, + "VehRollYaw_No_Cnt": self.cnt_yaw_rate % 256} + self.__class__.cnt_yaw_rate += 1 + return self.packer.make_can_msg_panda("Yaw_Data_FD1", 0, values, fix_checksum=checksum) + + # Drive throttle input + def _user_gas_msg(self, gas: float): + values = {"ApedPos_Pc_ActlArb": gas} + return self.packer.make_can_msg_panda("EngVehicleSpThrottle", 0, values) + + # Cruise status + def _pcm_status_msg(self, enable: bool): + # brake pedal and cruise state share same message, so we have to send + # the other signal too + brake = self.safety.get_brake_pressed_prev() + values = { + "BpedDrvAppl_D_Actl": 2 if brake else 1, + "CcStat_D_Actl": 5 if enable else 0, + } + return self.packer.make_can_msg_panda("EngBrakeData", 0, values) + + # LKAS command + def _lkas_command_msg(self, action: int): + values = { + "LkaActvStats_D2_Req": action, + } + return self.packer.make_can_msg_panda("Lane_Assist_Data1", 0, values) + + # LCA command + def _lat_ctl_msg(self, enabled: bool, path_offset: float, path_angle: float, curvature: float, curvature_rate: float): + if self.STEER_MESSAGE == MSG_LateralMotionControl: + values = { + "LatCtl_D_Rq": 1 if enabled else 0, + "LatCtlPathOffst_L_Actl": path_offset, # Path offset [-5.12|5.11] meter + "LatCtlPath_An_Actl": path_angle, # Path angle [-0.5|0.5235] radians + "LatCtlCurv_NoRate_Actl": curvature_rate, # Curvature rate [-0.001024|0.00102375] 1/meter^2 + "LatCtlCurv_No_Actl": curvature, # Curvature [-0.02|0.02094] 1/meter + } + return self.packer.make_can_msg_panda("LateralMotionControl", 0, values) + elif self.STEER_MESSAGE == MSG_LateralMotionControl2: + values = { + "LatCtl_D2_Rq": 1 if enabled else 0, + "LatCtlPathOffst_L_Actl": path_offset, # Path offset [-5.12|5.11] meter + "LatCtlPath_An_Actl": path_angle, # Path angle [-0.5|0.5235] radians + "LatCtlCrv_NoRate2_Actl": curvature_rate, # Curvature rate [-0.001024|0.001023] 1/meter^2 + "LatCtlCurv_No_Actl": curvature, # Curvature [-0.02|0.02094] 1/meter + } + return self.packer.make_can_msg_panda("LateralMotionControl2", 0, values) + + # Cruise control buttons + def _acc_button_msg(self, button: int, bus: int): + values = { + "CcAslButtnCnclPress": 1 if button == Buttons.CANCEL else 0, + "CcAsllButtnResPress": 1 if button == Buttons.RESUME else 0, + "TjaButtnOnOffPress": 1 if button == Buttons.TJA_TOGGLE else 0, + } + return self.packer.make_can_msg_panda("Steering_Data_FD1", bus, values) + + def test_rx_hook(self): + # checksum, counter, and quality flag checks + for quality_flag in [True, False]: + for msg in ["speed", "speed_2", "yaw"]: + self.safety.set_controls_allowed(True) + # send multiple times to verify counter checks + for _ in range(10): + if msg == "speed": + to_push = self._speed_msg(0, quality_flag=quality_flag) + elif msg == "speed_2": + to_push = self._speed_msg_2(0, quality_flag=quality_flag) + elif msg == "yaw": + to_push = self._yaw_rate_msg(0, 0, quality_flag=quality_flag) + + self.assertEqual(quality_flag, self._rx(to_push)) + self.assertEqual(quality_flag, self.safety.get_controls_allowed()) + + # Mess with checksum to make it fail, checksum is not checked for 2nd speed + to_push[0].data[3] = 0 # Speed checksum & half of yaw signal + should_rx = msg == "speed_2" and quality_flag + self.assertEqual(should_rx, self._rx(to_push)) + self.assertEqual(should_rx, self.safety.get_controls_allowed()) + + def test_rx_hook_speed_mismatch(self): + # Ford relies on speed for driver curvature limiting, so it checks two sources + for speed in np.arange(0, 40, 0.5): + for speed_delta in np.arange(-5, 5, 0.1): + speed_2 = round(max(speed + speed_delta, 0), 1) + # Set controls allowed in between rx since first message can reset it + self._rx(self._speed_msg(speed)) + self.safety.set_controls_allowed(True) + self._rx(self._speed_msg_2(speed_2)) + + within_delta = abs(speed - speed_2) <= self.MAX_SPEED_DELTA + self.assertEqual(self.safety.get_controls_allowed(), within_delta) + + def test_angle_measurements(self): + """Tests rx hook correctly parses the curvature measurement from the vehicle speed and yaw rate""" + for speed in np.arange(0.5, 40, 0.5): + for curvature in np.arange(0, self.MAX_CURVATURE * 2, 2e-3): + self._rx(self._speed_msg(speed)) + for c in (curvature, -curvature, 0, 0, 0, 0): + self._rx(self._yaw_rate_msg(c, speed)) + + self.assertEqual(self.safety.get_angle_meas_min(), round(-curvature * self.DEG_TO_CAN)) + self.assertEqual(self.safety.get_angle_meas_max(), round(curvature * self.DEG_TO_CAN)) + + self._rx(self._yaw_rate_msg(0, speed)) + self.assertEqual(self.safety.get_angle_meas_min(), round(-curvature * self.DEG_TO_CAN)) + self.assertEqual(self.safety.get_angle_meas_max(), 0) + + self._rx(self._yaw_rate_msg(0, speed)) + self.assertEqual(self.safety.get_angle_meas_min(), 0) + self.assertEqual(self.safety.get_angle_meas_max(), 0) + + def test_steer_allowed(self): + path_offsets = np.arange(-5.12, 5.11, 1).round() + path_angles = np.arange(-0.5, 0.5235, 0.1).round(1) + curvature_rates = np.arange(-0.001024, 0.00102375, 0.001).round(3) + curvatures = np.arange(-0.02, 0.02094, 0.01).round(2) + + for speed in (self.CURVATURE_ERROR_MIN_SPEED - 1, + self.CURVATURE_ERROR_MIN_SPEED + 1): + for controls_allowed in (True, False): + for steer_control_enabled in (True, False): + for path_offset in path_offsets: + for path_angle in path_angles: + for curvature_rate in curvature_rates: + for curvature in curvatures: + self.safety.set_controls_allowed(controls_allowed) + self._set_prev_desired_angle(curvature) + self._reset_curvature_measurement(curvature, speed) + + should_tx = path_offset == 0 and path_angle == 0 and curvature_rate == 0 + # when request bit is 0, only allow curvature of 0 since the signal range + # is not large enough to enforce it tracking measured + should_tx = should_tx and (controls_allowed if steer_control_enabled else curvature == 0) + with self.subTest(controls_allowed=controls_allowed, steer_control_enabled=steer_control_enabled, + path_offset=path_offset, path_angle=path_angle, curvature_rate=curvature_rate, + curvature=curvature): + self.assertEqual(should_tx, self._tx(self._lat_ctl_msg(steer_control_enabled, path_offset, path_angle, curvature, curvature_rate))) + + def test_curvature_rate_limit_up(self): + """ + When the curvature error is exceeded, commanded curvature must start moving towards meas respecting rate limits. + Since panda allows higher rate limits to avoid false positives, we need to allow a lower rate to move towards meas. + """ + self.safety.set_controls_allowed(True) + small_curvature = 2 / self.DEG_TO_CAN # significant small amount of curvature to cross boundary + + for speed in np.arange(0, 40, 0.5): + limit_command = speed > self.CURVATURE_ERROR_MIN_SPEED + max_delta_up = np.interp(speed, self.ANGLE_RATE_BP, self.ANGLE_RATE_UP) + max_delta_up_lower = np.interp(speed + 1, self.ANGLE_RATE_BP, self.ANGLE_RATE_UP) + + cases = [ + (not limit_command, 0), + (not limit_command, max_delta_up_lower - small_curvature), + (True, max_delta_up_lower), + (True, max_delta_up), + (False, max_delta_up + small_curvature), + ] + + for sign in (-1, 1): + self._reset_curvature_measurement(sign * (self.MAX_CURVATURE_ERROR + 1e-3), speed) + for should_tx, curvature in cases: + self._set_prev_desired_angle(sign * small_curvature) + self.assertEqual(should_tx, self._tx(self._lat_ctl_msg(True, 0, 0, sign * (small_curvature + curvature), 0))) + + def test_curvature_rate_limit_down(self): + self.safety.set_controls_allowed(True) + small_curvature = 2 / self.DEG_TO_CAN # significant small amount of curvature to cross boundary + + for speed in np.arange(0, 40, 0.5): + limit_command = speed > self.CURVATURE_ERROR_MIN_SPEED + max_delta_down = np.interp(speed, self.ANGLE_RATE_BP, self.ANGLE_RATE_DOWN) + max_delta_down_lower = np.interp(speed + 1, self.ANGLE_RATE_BP, self.ANGLE_RATE_DOWN) + + cases = [ + (not limit_command, self.MAX_CURVATURE), + (not limit_command, self.MAX_CURVATURE - max_delta_down_lower + small_curvature), + (True, self.MAX_CURVATURE - max_delta_down_lower), + (True, self.MAX_CURVATURE - max_delta_down), + (False, self.MAX_CURVATURE - max_delta_down - small_curvature), + ] + + for sign in (-1, 1): + self._reset_curvature_measurement(sign * (self.MAX_CURVATURE - self.MAX_CURVATURE_ERROR - 1e-3), speed) + for should_tx, curvature in cases: + self._set_prev_desired_angle(sign * self.MAX_CURVATURE) + self.assertEqual(should_tx, self._tx(self._lat_ctl_msg(True, 0, 0, sign * curvature, 0))) + + def test_prevent_lkas_action(self): + self.safety.set_controls_allowed(1) + self.assertFalse(self._tx(self._lkas_command_msg(1))) + + self.safety.set_controls_allowed(0) + self.assertFalse(self._tx(self._lkas_command_msg(1))) + + def test_acc_buttons(self): + for allowed in (0, 1): + self.safety.set_controls_allowed(allowed) + for enabled in (True, False): + self._rx(self._pcm_status_msg(enabled)) + self.assertTrue(self._tx(self._acc_button_msg(Buttons.TJA_TOGGLE, 2))) + + for allowed in (0, 1): + self.safety.set_controls_allowed(allowed) + for bus in (0, 2): + self.assertEqual(allowed, self._tx(self._acc_button_msg(Buttons.RESUME, bus))) + + for enabled in (True, False): + self._rx(self._pcm_status_msg(enabled)) + for bus in (0, 2): + self.assertEqual(enabled, self._tx(self._acc_button_msg(Buttons.CANCEL, bus))) + + +class TestFordStockSafety(TestFordSafetyBase): + STEER_MESSAGE = MSG_LateralMotionControl + + TX_MSGS = [ + [MSG_Steering_Data_FD1, 0], [MSG_Steering_Data_FD1, 2], [MSG_ACCDATA_3, 0], [MSG_Lane_Assist_Data1, 0], + [MSG_LateralMotionControl, 0], [MSG_IPMA_Data, 0], + ] + + def setUp(self): + self.packer = CANPackerPanda("ford_lincoln_base_pt") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_FORD, 0) + self.safety.init_tests() + + +class TestFordCANFDStockSafety(TestFordSafetyBase): + STEER_MESSAGE = MSG_LateralMotionControl2 + + TX_MSGS = [ + [MSG_Steering_Data_FD1, 0], [MSG_Steering_Data_FD1, 2], [MSG_ACCDATA_3, 0], [MSG_Lane_Assist_Data1, 0], + [MSG_LateralMotionControl2, 0], [MSG_IPMA_Data, 0], + ] + + def setUp(self): + self.packer = CANPackerPanda("ford_lincoln_base_pt") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_FORD, Panda.FLAG_FORD_CANFD) + self.safety.init_tests() + + +class TestFordLongitudinalSafetyBase(TestFordSafetyBase): + RELAY_MALFUNCTION_ADDRS = {0: (MSG_ACCDATA, MSG_ACCDATA_3, MSG_Lane_Assist_Data1, MSG_LateralMotionControl, + MSG_LateralMotionControl2, MSG_IPMA_Data)} + + FWD_BLACKLISTED_ADDRS = {2: [MSG_ACCDATA, MSG_ACCDATA_3, MSG_Lane_Assist_Data1, MSG_LateralMotionControl, + MSG_LateralMotionControl2, MSG_IPMA_Data]} + + MAX_ACCEL = 2.0 # accel is used for brakes, but openpilot can set positive values + MIN_ACCEL = -3.5 + INACTIVE_ACCEL = 0.0 + + MAX_GAS = 2.0 + MIN_GAS = -0.5 + INACTIVE_GAS = -5.0 + + @classmethod + def setUpClass(cls): + if cls.__name__ == "TestFordLongitudinalSafetyBase": + raise unittest.SkipTest + + # ACC command + def _acc_command_msg(self, gas: float, brake: float, cmbb_deny: bool = False): + values = { + "AccPrpl_A_Rq": gas, # [-5|5.23] m/s^2 + "AccPrpl_A_Pred": gas, # [-5|5.23] m/s^2 + "AccBrkTot_A_Rq": brake, # [-20|11.9449] m/s^2 + "CmbbDeny_B_Actl": 1 if cmbb_deny else 0, # [0|1] deny AEB actuation + } + return self.packer.make_can_msg_panda("ACCDATA", 0, values) + + def test_stock_aeb(self): + # Test that CmbbDeny_B_Actl is never 1, it prevents the ABS module from actuating AEB requests from ACCDATA_2 + for controls_allowed in (True, False): + self.safety.set_controls_allowed(controls_allowed) + for cmbb_deny in (True, False): + should_tx = not cmbb_deny + self.assertEqual(should_tx, self._tx(self._acc_command_msg(self.INACTIVE_GAS, self.INACTIVE_ACCEL, cmbb_deny))) + should_tx = controls_allowed and not cmbb_deny + self.assertEqual(should_tx, self._tx(self._acc_command_msg(self.MAX_GAS, self.MAX_ACCEL, cmbb_deny))) + + def test_gas_safety_check(self): + for controls_allowed in (True, False): + self.safety.set_controls_allowed(controls_allowed) + for gas in np.concatenate((np.arange(self.MIN_GAS - 2, self.MAX_GAS + 2, 0.05), [self.INACTIVE_GAS])): + gas = round(gas, 2) # floats might not hit exact boundary conditions without rounding + should_tx = (controls_allowed and self.MIN_GAS <= gas <= self.MAX_GAS) or gas == self.INACTIVE_GAS + self.assertEqual(should_tx, self._tx(self._acc_command_msg(gas, self.INACTIVE_ACCEL))) + + def test_brake_safety_check(self): + for controls_allowed in (True, False): + self.safety.set_controls_allowed(controls_allowed) + for brake in np.arange(self.MIN_ACCEL - 2, self.MAX_ACCEL + 2, 0.05): + brake = round(brake, 2) # floats might not hit exact boundary conditions without rounding + should_tx = (controls_allowed and self.MIN_ACCEL <= brake <= self.MAX_ACCEL) or brake == self.INACTIVE_ACCEL + self.assertEqual(should_tx, self._tx(self._acc_command_msg(self.INACTIVE_GAS, brake))) + + +class TestFordLongitudinalSafety(TestFordLongitudinalSafetyBase): + STEER_MESSAGE = MSG_LateralMotionControl + + TX_MSGS = [ + [MSG_Steering_Data_FD1, 0], [MSG_Steering_Data_FD1, 2], [MSG_ACCDATA, 0], [MSG_ACCDATA_3, 0], [MSG_Lane_Assist_Data1, 0], + [MSG_LateralMotionControl, 0], [MSG_IPMA_Data, 0], + ] + + def setUp(self): + self.packer = CANPackerPanda("ford_lincoln_base_pt") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_FORD, Panda.FLAG_FORD_LONG_CONTROL) + self.safety.init_tests() + + +class TestFordCANFDLongitudinalSafety(TestFordLongitudinalSafetyBase): + STEER_MESSAGE = MSG_LateralMotionControl2 + + TX_MSGS = [ + [MSG_Steering_Data_FD1, 0], [MSG_Steering_Data_FD1, 2], [MSG_ACCDATA, 0], [MSG_ACCDATA_3, 0], [MSG_Lane_Assist_Data1, 0], + [MSG_LateralMotionControl2, 0], [MSG_IPMA_Data, 0], + ] + + def setUp(self): + self.packer = CANPackerPanda("ford_lincoln_base_pt") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_FORD, Panda.FLAG_FORD_LONG_CONTROL | Panda.FLAG_FORD_CANFD) + self.safety.init_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_gm.py b/panda/tests/safety/test_gm.py new file mode 100644 index 0000000..28b2ad5 --- /dev/null +++ b/panda/tests/safety/test_gm.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python3 +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda + + +class Buttons: + UNPRESS = 1 + RES_ACCEL = 2 + DECEL_SET = 3 + CANCEL = 6 + + +class GmLongitudinalBase(common.PandaCarSafetyTest, common.LongitudinalGasBrakeSafetyTest): + # pylint: disable=no-member,abstract-method + + RELAY_MALFUNCTION_ADDRS = {0: (0x180, 0x2CB)} # ASCMLKASteeringCmd, ASCMGasRegenCmd + + MAX_POSSIBLE_BRAKE = 2 ** 12 + MAX_BRAKE = 400 + + MAX_POSSIBLE_GAS = 2 ** 12 + + PCM_CRUISE = False # openpilot can control the PCM state if longitudinal + + def _send_brake_msg(self, brake): + values = {"FrictionBrakeCmd": -brake} + return self.packer_chassis.make_can_msg_panda("EBCMFrictionBrakeCmd", self.BRAKE_BUS, values) + + def _send_gas_msg(self, gas): + values = {"GasRegenCmd": gas} + return self.packer.make_can_msg_panda("ASCMGasRegenCmd", 0, values) + + # override these tests from PandaCarSafetyTest, GM longitudinal uses button enable + def _pcm_status_msg(self, enable): + raise NotImplementedError + + def test_disable_control_allowed_from_cruise(self): + pass + + def test_enable_control_allowed_from_cruise(self): + pass + + def test_cruise_engaged_prev(self): + pass + + def test_set_resume_buttons(self): + """ + SET and RESUME enter controls allowed on their falling and rising edges, respectively. + """ + for btn_prev in range(8): + for btn_cur in range(8): + with self.subTest(btn_prev=btn_prev, btn_cur=btn_cur): + self._rx(self._button_msg(btn_prev)) + self.safety.set_controls_allowed(0) + for _ in range(10): + self._rx(self._button_msg(btn_cur)) + + should_enable = btn_cur != Buttons.DECEL_SET and btn_prev == Buttons.DECEL_SET + should_enable = should_enable or (btn_cur == Buttons.RES_ACCEL and btn_prev != Buttons.RES_ACCEL) + should_enable = should_enable and btn_cur != Buttons.CANCEL + self.assertEqual(should_enable, self.safety.get_controls_allowed()) + + def test_cancel_button(self): + self.safety.set_controls_allowed(1) + self._rx(self._button_msg(Buttons.CANCEL)) + self.assertFalse(self.safety.get_controls_allowed()) + + +class TestGmSafetyBase(common.PandaCarSafetyTest, common.DriverTorqueSteeringSafetyTest): + STANDSTILL_THRESHOLD = 10 * 0.0311 + # Ensures ASCM is off on ASCM cars, and relay is not malfunctioning for camera-ACC cars + RELAY_MALFUNCTION_ADDRS = {0: (0x180,)} # ASCMLKASteeringCmd + BUTTONS_BUS = 0 # rx or tx + BRAKE_BUS = 0 # tx only + + MAX_RATE_UP = 10 + MAX_RATE_DOWN = 15 + MAX_TORQUE = 300 + MAX_RT_DELTA = 128 + RT_INTERVAL = 250000 + DRIVER_TORQUE_ALLOWANCE = 65 + DRIVER_TORQUE_FACTOR = 4 + + PCM_CRUISE = True # openpilot is tied to the PCM state if not longitudinal + + @classmethod + def setUpClass(cls): + if cls.__name__ == "TestGmSafetyBase": + cls.packer = None + cls.safety = None + raise unittest.SkipTest + + def setUp(self): + self.packer = CANPackerPanda("gm_global_a_powertrain_generated") + self.packer_chassis = CANPackerPanda("gm_global_a_chassis") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_GM, 0) + self.safety.init_tests() + + def _pcm_status_msg(self, enable): + if self.PCM_CRUISE: + values = {"CruiseState": enable} + return self.packer.make_can_msg_panda("AcceleratorPedal2", 0, values) + else: + raise NotImplementedError + + def _speed_msg(self, speed): + values = {"%sWheelSpd" % s: speed for s in ["RL", "RR"]} + return self.packer.make_can_msg_panda("EBCMWheelSpdRear", 0, values) + + def _user_brake_msg(self, brake): + # GM safety has a brake threshold of 8 + values = {"BrakePedalPos": 8 if brake else 0} + return self.packer.make_can_msg_panda("ECMAcceleratorPos", 0, values) + + def _user_regen_msg(self, regen): + values = {"RegenPaddle": 2 if regen else 0} + return self.packer.make_can_msg_panda("EBCMRegenPaddle", 0, values) + + def _user_gas_msg(self, gas): + values = {"AcceleratorPedal2": 1 if gas else 0} + if self.PCM_CRUISE: + # Fill CruiseState with expected value if the safety mode reads cruise state from gas msg + values["CruiseState"] = self.safety.get_controls_allowed() + return self.packer.make_can_msg_panda("AcceleratorPedal2", 0, values) + + def _torque_driver_msg(self, torque): + # Safety tests assume driver torque is an int, use DBC factor + values = {"LKADriverAppldTrq": torque * 0.01} + return self.packer.make_can_msg_panda("PSCMStatus", 0, values) + + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"LKASteeringCmd": torque, "LKASteeringCmdActive": steer_req} + return self.packer.make_can_msg_panda("ASCMLKASteeringCmd", 0, values) + + def _button_msg(self, buttons): + values = {"ACCButtons": buttons} + return self.packer.make_can_msg_panda("ASCMSteeringButton", self.BUTTONS_BUS, values) + + +class TestGmAscmSafety(GmLongitudinalBase, TestGmSafetyBase): + TX_MSGS = [[0x180, 0], [0x409, 0], [0x40A, 0], [0x2CB, 0], [0x370, 0], # pt bus + [0xA1, 1], [0x306, 1], [0x308, 1], [0x310, 1], # obs bus + [0x315, 2], # ch bus + [0x104c006c, 3], [0x10400060, 3]] # gmlan + FWD_BLACKLISTED_ADDRS: dict[int, list[int]] = {} + FWD_BUS_LOOKUP: dict[int, int] = {} + BRAKE_BUS = 2 + + MAX_GAS = 3072 + MIN_GAS = 1404 # maximum regen + INACTIVE_GAS = 1404 + + def setUp(self): + self.packer = CANPackerPanda("gm_global_a_powertrain_generated") + self.packer_chassis = CANPackerPanda("gm_global_a_chassis") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_GM, 0) + self.safety.init_tests() + + +class TestGmCameraSafetyBase(TestGmSafetyBase): + + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + @classmethod + def setUpClass(cls): + if cls.__name__ == "TestGmCameraSafetyBase": + cls.packer = None + cls.safety = None + raise unittest.SkipTest + + def _user_brake_msg(self, brake): + values = {"BrakePressed": brake} + return self.packer.make_can_msg_panda("ECMEngineStatus", 0, values) + + +class TestGmCameraSafety(TestGmCameraSafetyBase): + TX_MSGS = [[0x180, 0], # pt bus + [0x184, 2]] # camera bus + FWD_BLACKLISTED_ADDRS = {2: [0x180], 0: [0x184]} # block LKAS message and PSCMStatus + BUTTONS_BUS = 2 # tx only + + def setUp(self): + self.packer = CANPackerPanda("gm_global_a_powertrain_generated") + self.packer_chassis = CANPackerPanda("gm_global_a_chassis") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_GM, Panda.FLAG_GM_HW_CAM) + self.safety.init_tests() + + def test_buttons(self): + # Only CANCEL button is allowed while cruise is enabled + self.safety.set_controls_allowed(0) + for btn in range(8): + self.assertFalse(self._tx(self._button_msg(btn))) + + self.safety.set_controls_allowed(1) + for btn in range(8): + self.assertFalse(self._tx(self._button_msg(btn))) + + for enabled in (True, False): + self._rx(self._pcm_status_msg(enabled)) + self.assertEqual(enabled, self._tx(self._button_msg(Buttons.CANCEL))) + + +class TestGmCameraLongitudinalSafety(GmLongitudinalBase, TestGmCameraSafetyBase): + TX_MSGS = [[0x180, 0], [0x315, 0], [0x2CB, 0], [0x370, 0], # pt bus + [0x184, 2]] # camera bus + FWD_BLACKLISTED_ADDRS = {2: [0x180, 0x2CB, 0x370, 0x315], 0: [0x184]} # block LKAS, ACC messages and PSCMStatus + BUTTONS_BUS = 0 # rx only + + MAX_GAS = 3400 + MIN_GAS = 1514 # maximum regen + INACTIVE_GAS = 1554 + + def setUp(self): + self.packer = CANPackerPanda("gm_global_a_powertrain_generated") + self.packer_chassis = CANPackerPanda("gm_global_a_chassis") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_GM, Panda.FLAG_GM_HW_CAM | Panda.FLAG_GM_HW_CAM_LONG) + self.safety.init_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_honda.py b/panda/tests/safety/test_honda.py new file mode 100644 index 0000000..45f190c --- /dev/null +++ b/panda/tests/safety/test_honda.py @@ -0,0 +1,618 @@ +#!/usr/bin/env python3 +import unittest +import numpy as np + +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda, MAX_WRONG_COUNTERS + +HONDA_N_COMMON_TX_MSGS = [[0xE4, 0], [0x194, 0], [0x1FA, 0], [0x30C, 0], [0x33D, 0]] + +class Btn: + NONE = 0 + MAIN = 1 + CANCEL = 2 + SET = 3 + RESUME = 4 + +HONDA_NIDEC = 0 +HONDA_BOSCH = 1 + + +# Honda safety has several different configurations tested here: +# * Nidec +# * normal (PCM-enable) +# * alt SCM messages (PCM-enable) +# * gas interceptor (button-enable) +# * gas interceptor with alt SCM messages (button-enable) +# * Bosch +# * Bosch with Longitudinal Support +# * Bosch Radarless +# * Bosch Radarless with Longitudinal Support + + +class HondaButtonEnableBase(common.PandaCarSafetyTest): + # pylint: disable=no-member,abstract-method + + # override these inherited tests since we're using button enable + def test_disable_control_allowed_from_cruise(self): + pass + + def test_enable_control_allowed_from_cruise(self): + pass + + def test_cruise_engaged_prev(self): + pass + + def test_buttons_with_main_off(self): + for btn in (Btn.SET, Btn.RESUME, Btn.CANCEL): + self.safety.set_controls_allowed(1) + self._rx(self._acc_state_msg(False)) + self._rx(self._button_msg(btn, main_on=False)) + self.assertFalse(self.safety.get_controls_allowed()) + + def test_set_resume_buttons(self): + """ + Both SET and RES should enter controls allowed on their falling edge. + """ + for main_on in (True, False): + self._rx(self._acc_state_msg(main_on)) + for btn_prev in range(8): + for btn_cur in range(8): + self._rx(self._button_msg(Btn.NONE)) + self.safety.set_controls_allowed(0) + for _ in range(10): + self._rx(self._button_msg(btn_prev)) + self.assertFalse(self.safety.get_controls_allowed()) + + # should enter controls allowed on falling edge and not transitioning to cancel or main + should_enable = (main_on and + btn_cur != btn_prev and + btn_prev in (Btn.RESUME, Btn.SET) and + btn_cur not in (Btn.CANCEL, Btn.MAIN)) + + self._rx(self._button_msg(btn_cur, main_on=main_on)) + self.assertEqual(should_enable, self.safety.get_controls_allowed(), msg=f"{main_on=} {btn_prev=} {btn_cur=}") + + def test_main_cancel_buttons(self): + """ + Both MAIN and CANCEL should exit controls immediately. + """ + for btn in (Btn.MAIN, Btn.CANCEL): + self.safety.set_controls_allowed(1) + self._rx(self._button_msg(btn, main_on=True)) + self.assertFalse(self.safety.get_controls_allowed()) + + def test_disengage_on_main(self): + self.safety.set_controls_allowed(1) + self._rx(self._acc_state_msg(True)) + self.assertTrue(self.safety.get_controls_allowed()) + self._rx(self._acc_state_msg(False)) + self.assertFalse(self.safety.get_controls_allowed()) + + def test_rx_hook(self): + + # TODO: move this test to common + # checksum checks + for msg in ["btn", "gas", "speed"]: + self.safety.set_controls_allowed(1) + if msg == "btn": + to_push = self._button_msg(Btn.SET) + if msg == "gas": + to_push = self._user_gas_msg(0) + if msg == "speed": + to_push = self._speed_msg(0) + self.assertTrue(self._rx(to_push)) + if msg != "btn": + to_push[0].data[4] = 0 # invalidate checksum + to_push[0].data[5] = 0 + to_push[0].data[6] = 0 + to_push[0].data[7] = 0 + self.assertFalse(self._rx(to_push)) + self.assertFalse(self.safety.get_controls_allowed()) + + # counter + # reset wrong_counters to zero by sending valid messages + for i in range(MAX_WRONG_COUNTERS + 1): + self.__class__.cnt_speed += 1 + self.__class__.cnt_button += 1 + self.__class__.cnt_powertrain_data += 1 + if i < MAX_WRONG_COUNTERS: + self.safety.set_controls_allowed(1) + self._rx(self._button_msg(Btn.SET)) + self._rx(self._speed_msg(0)) + self._rx(self._user_gas_msg(0)) + else: + self.assertFalse(self._rx(self._button_msg(Btn.SET))) + self.assertFalse(self._rx(self._speed_msg(0))) + self.assertFalse(self._rx(self._user_gas_msg(0))) + self.assertFalse(self.safety.get_controls_allowed()) + + # restore counters for future tests with a couple of good messages + for _ in range(2): + self.safety.set_controls_allowed(1) + self._rx(self._button_msg(Btn.SET, main_on=True)) + self._rx(self._speed_msg(0)) + self._rx(self._user_gas_msg(0)) + self._rx(self._button_msg(Btn.SET, main_on=True)) + self.assertTrue(self.safety.get_controls_allowed()) + + +class HondaPcmEnableBase(common.PandaCarSafetyTest): + # pylint: disable=no-member,abstract-method + + def test_buttons(self): + """ + Buttons should only cancel in this configuration, + since our state is tied to the PCM's cruise state. + """ + for controls_allowed in (True, False): + for main_on in (True, False): + # not a valid state + if controls_allowed and not main_on: + continue + + for btn in (Btn.SET, Btn.RESUME, Btn.CANCEL): + self.safety.set_controls_allowed(controls_allowed) + self._rx(self._acc_state_msg(main_on)) + + # btn + none for falling edge + self._rx(self._button_msg(btn, main_on=main_on)) + self._rx(self._button_msg(Btn.NONE, main_on=main_on)) + + if btn == Btn.CANCEL: + self.assertFalse(self.safety.get_controls_allowed()) + else: + self.assertEqual(controls_allowed, self.safety.get_controls_allowed()) + + +class HondaBase(common.PandaCarSafetyTest): + MAX_BRAKE = 255 + PT_BUS: int | None = None # must be set when inherited + STEER_BUS: int | None = None # must be set when inherited + BUTTONS_BUS: int | None = None # must be set when inherited, tx on this bus, rx on PT_BUS + + STANDSTILL_THRESHOLD = 0 + RELAY_MALFUNCTION_ADDRS = {0: (0xE4, 0x194)} # STEERING_CONTROL + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + cnt_speed = 0 + cnt_button = 0 + cnt_brake = 0 + cnt_powertrain_data = 0 + cnt_acc_state = 0 + + @classmethod + def setUpClass(cls): + if cls.__name__.endswith("Base"): + cls.packer = None + cls.safety = None + raise unittest.SkipTest + + def _powertrain_data_msg(self, cruise_on=None, brake_pressed=None, gas_pressed=None): + # preserve the state + if cruise_on is None: + # or'd with controls allowed since the tests use it to "enable" cruise + cruise_on = self.safety.get_cruise_engaged_prev() or self.safety.get_controls_allowed() + if brake_pressed is None: + brake_pressed = self.safety.get_brake_pressed_prev() + if gas_pressed is None: + gas_pressed = self.safety.get_gas_pressed_prev() + + values = { + "ACC_STATUS": cruise_on, + "BRAKE_PRESSED": brake_pressed, + "PEDAL_GAS": gas_pressed, + "COUNTER": self.cnt_powertrain_data % 4 + } + self.__class__.cnt_powertrain_data += 1 + return self.packer.make_can_msg_panda("POWERTRAIN_DATA", self.PT_BUS, values) + + def _pcm_status_msg(self, enable): + return self._powertrain_data_msg(cruise_on=enable) + + def _speed_msg(self, speed): + values = {"XMISSION_SPEED": speed, "COUNTER": self.cnt_speed % 4} + self.__class__.cnt_speed += 1 + return self.packer.make_can_msg_panda("ENGINE_DATA", self.PT_BUS, values) + + def _acc_state_msg(self, main_on): + values = {"MAIN_ON": main_on, "COUNTER": self.cnt_acc_state % 4} + self.__class__.cnt_acc_state += 1 + return self.packer.make_can_msg_panda("SCM_FEEDBACK", self.PT_BUS, values) + + def _button_msg(self, buttons, main_on=False, bus=None): + bus = self.PT_BUS if bus is None else bus + values = {"CRUISE_BUTTONS": buttons, "COUNTER": self.cnt_button % 4} + self.__class__.cnt_button += 1 + return self.packer.make_can_msg_panda("SCM_BUTTONS", bus, values) + + def _user_brake_msg(self, brake): + return self._powertrain_data_msg(brake_pressed=brake) + + def _user_gas_msg(self, gas): + return self._powertrain_data_msg(gas_pressed=gas) + + def _send_steer_msg(self, steer): + values = {"STEER_TORQUE": steer} + return self.packer.make_can_msg_panda("STEERING_CONTROL", self.STEER_BUS, values) + + def _send_brake_msg(self, brake): + # must be implemented when inherited + raise NotImplementedError + + def test_disengage_on_brake(self): + self.safety.set_controls_allowed(1) + self._rx(self._user_brake_msg(1)) + self.assertFalse(self.safety.get_controls_allowed()) + + def test_steer_safety_check(self): + self.safety.set_controls_allowed(0) + self.assertTrue(self._tx(self._send_steer_msg(0x0000))) + self.assertFalse(self._tx(self._send_steer_msg(0x1000))) + + +# ********************* Honda Nidec ********************** + + +class TestHondaNidecSafetyBase(HondaBase): + TX_MSGS = HONDA_N_COMMON_TX_MSGS + FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0x194, 0x33D, 0x30C]} + + PT_BUS = 0 + STEER_BUS = 0 + BUTTONS_BUS = 0 + + MAX_GAS = 198 + + def setUp(self): + self.packer = CANPackerPanda("honda_civic_touring_2016_can_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_NIDEC, 0) + self.safety.init_tests() + + def _send_brake_msg(self, brake, aeb_req=0, bus=0): + values = {"COMPUTER_BRAKE": brake, "AEB_REQ_1": aeb_req} + return self.packer.make_can_msg_panda("BRAKE_COMMAND", bus, values) + + def _rx_brake_msg(self, brake, aeb_req=0): + return self._send_brake_msg(brake, aeb_req, bus=2) + + def _send_acc_hud_msg(self, pcm_gas, pcm_speed): + # Used to control ACC on Nidec without pedal + values = {"PCM_GAS": pcm_gas, "PCM_SPEED": pcm_speed} + return self.packer.make_can_msg_panda("ACC_HUD", 0, values) + + def test_acc_hud_safety_check(self): + for controls_allowed in [True, False]: + self.safety.set_controls_allowed(controls_allowed) + for pcm_gas in range(255): + for pcm_speed in range(100): + send = (controls_allowed and pcm_gas <= self.MAX_GAS) or (pcm_gas == 0 and pcm_speed == 0) + self.assertEqual(send, self._tx(self._send_acc_hud_msg(pcm_gas, pcm_speed))) + + def test_fwd_hook(self): + # normal operation, not forwarding AEB + self.FWD_BLACKLISTED_ADDRS[2].append(0x1FA) + self.safety.set_honda_fwd_brake(False) + super().test_fwd_hook() + + # forwarding AEB brake signal + self.FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0x194, 0x33D, 0x30C]} + self.safety.set_honda_fwd_brake(True) + super().test_fwd_hook() + + def test_honda_fwd_brake_latching(self): + # Shouldn't fwd stock Honda requesting brake without AEB + self.assertTrue(self._rx(self._rx_brake_msg(self.MAX_BRAKE, aeb_req=0))) + self.assertFalse(self.safety.get_honda_fwd_brake()) + + # Now allow controls and request some brake + openpilot_brake = round(self.MAX_BRAKE / 2.0) + self.safety.set_controls_allowed(True) + self.assertTrue(self._tx(self._send_brake_msg(openpilot_brake))) + + # Still shouldn't fwd stock Honda brake until it's more than openpilot's + for stock_honda_brake in range(self.MAX_BRAKE + 1): + self.assertTrue(self._rx(self._rx_brake_msg(stock_honda_brake, aeb_req=1))) + should_fwd_brake = stock_honda_brake >= openpilot_brake + self.assertEqual(should_fwd_brake, self.safety.get_honda_fwd_brake()) + + # Shouldn't stop fwding until AEB event is over + for stock_honda_brake in range(self.MAX_BRAKE + 1)[::-1]: + self.assertTrue(self._rx(self._rx_brake_msg(stock_honda_brake, aeb_req=1))) + self.assertTrue(self.safety.get_honda_fwd_brake()) + + self.assertTrue(self._rx(self._rx_brake_msg(0, aeb_req=0))) + self.assertFalse(self.safety.get_honda_fwd_brake()) + + def test_brake_safety_check(self): + for fwd_brake in [False, True]: + self.safety.set_honda_fwd_brake(fwd_brake) + for brake in np.arange(0, self.MAX_BRAKE + 10, 1): + for controls_allowed in [True, False]: + self.safety.set_controls_allowed(controls_allowed) + if fwd_brake: + send = False # block openpilot brake msg when fwd'ing stock msg + elif controls_allowed: + send = self.MAX_BRAKE >= brake >= 0 + else: + send = brake == 0 + self.assertEqual(send, self._tx(self._send_brake_msg(brake))) + + +class TestHondaNidecPcmSafety(HondaPcmEnableBase, TestHondaNidecSafetyBase): + """ + Covers the Honda Nidec safety mode + """ + + # Nidec doesn't disengage on falling edge of cruise. See comment in safety_honda.h + def test_disable_control_allowed_from_cruise(self): + pass + + +class TestHondaNidecGasInterceptorSafety(common.GasInterceptorSafetyTest, HondaButtonEnableBase, TestHondaNidecSafetyBase): + """ + Covers the Honda Nidec safety mode with a gas interceptor, switches to a button-enable car + """ + + TX_MSGS = HONDA_N_COMMON_TX_MSGS + [[0x200, 0]] + INTERCEPTOR_THRESHOLD = 492 + + def setUp(self): + self.packer = CANPackerPanda("honda_civic_touring_2016_can_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_NIDEC, Panda.FLAG_HONDA_GAS_INTERCEPTOR) + self.safety.init_tests() + + +class TestHondaNidecPcmAltSafety(TestHondaNidecPcmSafety): + """ + Covers the Honda Nidec safety mode with alt SCM messages + """ + def setUp(self): + self.packer = CANPackerPanda("acura_ilx_2016_can_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_NIDEC, Panda.FLAG_HONDA_NIDEC_ALT) + self.safety.init_tests() + + def _acc_state_msg(self, main_on): + values = {"MAIN_ON": main_on, "COUNTER": self.cnt_acc_state % 4} + self.__class__.cnt_acc_state += 1 + return self.packer.make_can_msg_panda("SCM_BUTTONS", self.PT_BUS, values) + + def _button_msg(self, buttons, main_on=False, bus=None): + bus = self.PT_BUS if bus is None else bus + values = {"CRUISE_BUTTONS": buttons, "MAIN_ON": main_on, "COUNTER": self.cnt_button % 4} + self.__class__.cnt_button += 1 + return self.packer.make_can_msg_panda("SCM_BUTTONS", bus, values) + + +class TestHondaNidecAltGasInterceptorSafety(common.GasInterceptorSafetyTest, HondaButtonEnableBase, TestHondaNidecSafetyBase): + """ + Covers the Honda Nidec safety mode with alt SCM messages and gas interceptor, switches to a button-enable car + """ + + TX_MSGS = HONDA_N_COMMON_TX_MSGS + [[0x200, 0]] + INTERCEPTOR_THRESHOLD = 492 + + def setUp(self): + self.packer = CANPackerPanda("acura_ilx_2016_can_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_NIDEC, Panda.FLAG_HONDA_NIDEC_ALT | Panda.FLAG_HONDA_GAS_INTERCEPTOR) + self.safety.init_tests() + + def _acc_state_msg(self, main_on): + values = {"MAIN_ON": main_on, "COUNTER": self.cnt_acc_state % 4} + self.__class__.cnt_acc_state += 1 + return self.packer.make_can_msg_panda("SCM_BUTTONS", self.PT_BUS, values) + + def _button_msg(self, buttons, main_on=False, bus=None): + bus = self.PT_BUS if bus is None else bus + values = {"CRUISE_BUTTONS": buttons, "MAIN_ON": main_on, "COUNTER": self.cnt_button % 4} + self.__class__.cnt_button += 1 + return self.packer.make_can_msg_panda("SCM_BUTTONS", bus, values) + + + +# ********************* Honda Bosch ********************** + + +class TestHondaBoschSafetyBase(HondaBase): + PT_BUS = 1 + STEER_BUS = 0 + BUTTONS_BUS = 1 + + TX_MSGS = [[0xE4, 0], [0xE5, 0], [0x296, 1], [0x33D, 0], [0x33DA, 0], [0x33DB, 0]] + FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0xE5, 0x33D, 0x33DA, 0x33DB]} + + def setUp(self): + self.packer = CANPackerPanda("honda_accord_2018_can_generated") + self.safety = libpanda_py.libpanda + + def _alt_brake_msg(self, brake): + values = {"BRAKE_PRESSED": brake, "COUNTER": self.cnt_brake % 4} + self.__class__.cnt_brake += 1 + return self.packer.make_can_msg_panda("BRAKE_MODULE", self.PT_BUS, values) + + def _send_brake_msg(self, brake): + pass + + def test_alt_disengage_on_brake(self): + self.safety.set_honda_alt_brake_msg(1) + self.safety.set_controls_allowed(1) + self._rx(self._alt_brake_msg(1)) + self.assertFalse(self.safety.get_controls_allowed()) + + self.safety.set_honda_alt_brake_msg(0) + self.safety.set_controls_allowed(1) + self._rx(self._alt_brake_msg(1)) + self.assertTrue(self.safety.get_controls_allowed()) + + def test_spam_cancel_safety_check(self): + self.safety.set_controls_allowed(0) + self.assertTrue(self._tx(self._button_msg(Btn.CANCEL, bus=self.BUTTONS_BUS))) + self.assertFalse(self._tx(self._button_msg(Btn.RESUME, bus=self.BUTTONS_BUS))) + self.assertFalse(self._tx(self._button_msg(Btn.SET, bus=self.BUTTONS_BUS))) + # do not block resume if we are engaged already + self.safety.set_controls_allowed(1) + self.assertTrue(self._tx(self._button_msg(Btn.RESUME, bus=self.BUTTONS_BUS))) + + +class TestHondaBoschAltBrakeSafetyBase(TestHondaBoschSafetyBase): + """ + Base Bosch safety test class with an alternate brake message + """ + def setUp(self): + super().setUp() + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH, Panda.FLAG_HONDA_ALT_BRAKE) + self.safety.init_tests() + + def _user_brake_msg(self, brake): + return self._alt_brake_msg(brake) + + def test_alt_brake_rx_hook(self): + self.safety.set_honda_alt_brake_msg(1) + self.safety.set_controls_allowed(1) + to_push = self._alt_brake_msg(0) + self.assertTrue(self._rx(to_push)) + to_push[0].data[2] = to_push[0].data[2] & 0xF0 # invalidate checksum + self.assertFalse(self._rx(to_push)) + self.assertFalse(self.safety.get_controls_allowed()) + + +class TestHondaBoschSafety(HondaPcmEnableBase, TestHondaBoschSafetyBase): + """ + Covers the Honda Bosch safety mode with stock longitudinal + """ + def setUp(self): + super().setUp() + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH, 0) + self.safety.init_tests() + + +class TestHondaBoschAltBrakeSafety(HondaPcmEnableBase, TestHondaBoschAltBrakeSafetyBase): + """ + Covers the Honda Bosch safety mode with stock longitudinal and an alternate brake message + """ + + +class TestHondaBoschLongSafety(HondaButtonEnableBase, TestHondaBoschSafetyBase): + """ + Covers the Honda Bosch safety mode with longitudinal control + """ + NO_GAS = -30000 + MAX_GAS = 2000 + MAX_ACCEL = 2.0 # accel is used for brakes, but openpilot can set positive values + MIN_ACCEL = -3.5 + + STEER_BUS = 1 + TX_MSGS = [[0xE4, 1], [0x1DF, 1], [0x1EF, 1], [0x1FA, 1], [0x30C, 1], [0x33D, 1], [0x33DA, 1], [0x33DB, 1], [0x39F, 1], [0x18DAB0F1, 1]] + FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0xE5, 0x33D, 0x33DA, 0x33DB]} + # 0x1DF is to test that radar is disabled + RELAY_MALFUNCTION_ADDRS = {0: (0xE4, 0x194), 1: (0x1DF,)} # STEERING_CONTROL, ACC_CONTROL + + def setUp(self): + super().setUp() + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH, Panda.FLAG_HONDA_BOSCH_LONG) + self.safety.init_tests() + + def _send_gas_brake_msg(self, gas, accel): + values = { + "GAS_COMMAND": gas, + "ACCEL_COMMAND": accel, + "BRAKE_REQUEST": accel < 0, + } + return self.packer.make_can_msg_panda("ACC_CONTROL", self.PT_BUS, values) + + # Longitudinal doesn't need to send buttons + def test_spam_cancel_safety_check(self): + pass + + def test_diagnostics(self): + tester_present = libpanda_py.make_CANPacket(0x18DAB0F1, self.PT_BUS, b"\x02\x3E\x80\x00\x00\x00\x00\x00") + self.assertTrue(self._tx(tester_present)) + + not_tester_present = libpanda_py.make_CANPacket(0x18DAB0F1, self.PT_BUS, b"\x03\xAA\xAA\x00\x00\x00\x00\x00") + self.assertFalse(self._tx(not_tester_present)) + + def test_gas_safety_check(self): + for controls_allowed in [True, False]: + for gas in np.arange(self.NO_GAS, self.MAX_GAS + 2000, 100): + accel = 0 if gas < 0 else gas / 1000 + self.safety.set_controls_allowed(controls_allowed) + send = (controls_allowed and 0 <= gas <= self.MAX_GAS) or gas == self.NO_GAS + self.assertEqual(send, self._tx(self._send_gas_brake_msg(gas, accel)), (controls_allowed, gas, accel)) + + def test_brake_safety_check(self): + for controls_allowed in [True, False]: + for accel in np.arange(self.MIN_ACCEL - 1, self.MAX_ACCEL + 1, 0.01): + accel = round(accel, 2) # floats might not hit exact boundary conditions without rounding + self.safety.set_controls_allowed(controls_allowed) + send = self.MIN_ACCEL <= accel <= self.MAX_ACCEL if controls_allowed else accel == 0 + self.assertEqual(send, self._tx(self._send_gas_brake_msg(self.NO_GAS, accel)), (controls_allowed, accel)) + + +class TestHondaBoschRadarlessSafetyBase(TestHondaBoschSafetyBase): + """Base class for radarless Honda Bosch""" + PT_BUS = 0 + STEER_BUS = 0 + BUTTONS_BUS = 2 # camera controls ACC, need to send buttons on bus 2 + + TX_MSGS = [[0xE4, 0], [0x296, 2], [0x33D, 0]] + FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0xE5, 0x33D, 0x33DA, 0x33DB]} + + def setUp(self): + self.packer = CANPackerPanda("honda_civic_ex_2022_can_generated") + self.safety = libpanda_py.libpanda + + +class TestHondaBoschRadarlessSafety(HondaPcmEnableBase, TestHondaBoschRadarlessSafetyBase): + """ + Covers the Honda Bosch Radarless safety mode with stock longitudinal + """ + + def setUp(self): + super().setUp() + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH, Panda.FLAG_HONDA_RADARLESS) + self.safety.init_tests() + + +class TestHondaBoschRadarlessAltBrakeSafety(HondaPcmEnableBase, TestHondaBoschRadarlessSafetyBase, TestHondaBoschAltBrakeSafetyBase): + """ + Covers the Honda Bosch Radarless safety mode with stock longitudinal and an alternate brake message + """ + + def setUp(self): + super().setUp() + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH, Panda.FLAG_HONDA_RADARLESS | Panda.FLAG_HONDA_ALT_BRAKE) + self.safety.init_tests() + + +class TestHondaBoschRadarlessLongSafety(common.LongitudinalAccelSafetyTest, HondaButtonEnableBase, + TestHondaBoschRadarlessSafetyBase): + """ + Covers the Honda Bosch Radarless safety mode with longitudinal control + """ + TX_MSGS = [[0xE4, 0], [0x33D, 0], [0x1C8, 0], [0x30C, 0]] + FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0xE5, 0x33D, 0x33DA, 0x33DB, 0x1C8, 0x30C]} + + def setUp(self): + super().setUp() + self.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH, Panda.FLAG_HONDA_RADARLESS | Panda.FLAG_HONDA_BOSCH_LONG) + self.safety.init_tests() + + def _accel_msg(self, accel): + values = { + "ACCEL_COMMAND": accel, + } + return self.packer.make_can_msg_panda("ACC_CONTROL", self.PT_BUS, values) + + # Longitudinal doesn't need to send buttons + def test_spam_cancel_safety_check(self): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_hyundai.py b/panda/tests/safety/test_hyundai.py new file mode 100644 index 0000000..fbe7d10 --- /dev/null +++ b/panda/tests/safety/test_hyundai.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python3 +import random +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda +from panda.tests.safety.hyundai_common import HyundaiButtonBase, HyundaiLongitudinalBase + + +# 4 bit checkusm used in some hyundai messages +# lives outside the can packer because we never send this msg +def checksum(msg): + addr, t, dat, bus = msg + + chksum = 0 + if addr == 0x386: + for i, b in enumerate(dat): + for j in range(8): + # exclude checksum and counter bits + if (i != 1 or j < 6) and (i != 3 or j < 6) and (i != 5 or j < 6) and (i != 7 or j < 6): + bit = (b >> j) & 1 + else: + bit = 0 + chksum += bit + chksum = (chksum ^ 9) & 0xF + ret = bytearray(dat) + ret[5] |= (chksum & 0x3) << 6 + ret[7] |= (chksum & 0xc) << 4 + else: + for i, b in enumerate(dat): + if addr in [0x260, 0x421] and i == 7: + b &= 0x0F if addr == 0x421 else 0xF0 + elif addr == 0x394 and i == 6: + b &= 0xF0 + elif addr == 0x394 and i == 7: + continue + chksum += sum(divmod(b, 16)) + chksum = (16 - chksum) % 16 + ret = bytearray(dat) + ret[6 if addr == 0x394 else 7] |= chksum << (4 if addr == 0x421 else 0) + + return addr, t, ret, bus + + +class TestHyundaiSafety(HyundaiButtonBase, common.PandaCarSafetyTest, common.DriverTorqueSteeringSafetyTest, common.SteerRequestCutSafetyTest): + TX_MSGS = [[0x340, 0], [0x4F1, 0], [0x485, 0]] + STANDSTILL_THRESHOLD = 12 # 0.375 kph + RELAY_MALFUNCTION_ADDRS = {0: (0x340,)} # LKAS11 + FWD_BLACKLISTED_ADDRS = {2: [0x340, 0x485]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + MAX_RATE_UP = 3 + MAX_RATE_DOWN = 7 + MAX_TORQUE = 384 + MAX_RT_DELTA = 112 + RT_INTERVAL = 250000 + DRIVER_TORQUE_ALLOWANCE = 50 + DRIVER_TORQUE_FACTOR = 2 + + # Safety around steering req bit + MIN_VALID_STEERING_FRAMES = 89 + MAX_INVALID_STEERING_FRAMES = 2 + MIN_VALID_STEERING_RT_INTERVAL = 810000 # a ~10% buffer, can send steer up to 110Hz + + cnt_gas = 0 + cnt_speed = 0 + cnt_brake = 0 + cnt_cruise = 0 + cnt_button = 0 + + def setUp(self): + self.packer = CANPackerPanda("hyundai_kia_generic") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI, 0) + self.safety.init_tests() + + def _button_msg(self, buttons, main_button=0, bus=0): + values = {"CF_Clu_CruiseSwState": buttons, "CF_Clu_CruiseSwMain": main_button, "CF_Clu_AliveCnt1": self.cnt_button} + self.__class__.cnt_button += 1 + return self.packer.make_can_msg_panda("CLU11", bus, values) + + def _user_gas_msg(self, gas): + values = {"CF_Ems_AclAct": gas, "AliveCounter": self.cnt_gas % 4} + self.__class__.cnt_gas += 1 + return self.packer.make_can_msg_panda("EMS16", 0, values, fix_checksum=checksum) + + def _user_brake_msg(self, brake): + values = {"DriverOverride": 2 if brake else random.choice((0, 1, 3)), + "AliveCounterTCS": self.cnt_brake % 8} + self.__class__.cnt_brake += 1 + return self.packer.make_can_msg_panda("TCS13", 0, values, fix_checksum=checksum) + + def _speed_msg(self, speed): + # panda safety doesn't scale, so undo the scaling + values = {"WHL_SPD_%s" % s: speed * 0.03125 for s in ["FL", "FR", "RL", "RR"]} + values["WHL_SPD_AliveCounter_LSB"] = (self.cnt_speed % 16) & 0x3 + values["WHL_SPD_AliveCounter_MSB"] = (self.cnt_speed % 16) >> 2 + self.__class__.cnt_speed += 1 + return self.packer.make_can_msg_panda("WHL_SPD11", 0, values, fix_checksum=checksum) + + def _pcm_status_msg(self, enable): + values = {"ACCMode": enable, "CR_VSM_Alive": self.cnt_cruise % 16} + self.__class__.cnt_cruise += 1 + return self.packer.make_can_msg_panda("SCC12", self.SCC_BUS, values, fix_checksum=checksum) + + def _torque_driver_msg(self, torque): + values = {"CR_Mdps_StrColTq": torque} + return self.packer.make_can_msg_panda("MDPS12", 0, values) + + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"CR_Lkas_StrToqReq": torque, "CF_Lkas_ActToi": steer_req} + return self.packer.make_can_msg_panda("LKAS11", 0, values) + + +class TestHyundaiSafetyAltLimits(TestHyundaiSafety): + MAX_RATE_UP = 2 + MAX_RATE_DOWN = 3 + MAX_TORQUE = 270 + + def setUp(self): + self.packer = CANPackerPanda("hyundai_kia_generic") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI, Panda.FLAG_HYUNDAI_ALT_LIMITS) + self.safety.init_tests() + + +class TestHyundaiSafetyCameraSCC(TestHyundaiSafety): + BUTTONS_TX_BUS = 2 # tx on 2, rx on 0 + SCC_BUS = 2 # rx on 2 + + def setUp(self): + self.packer = CANPackerPanda("hyundai_kia_generic") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI, Panda.FLAG_HYUNDAI_CAMERA_SCC) + self.safety.init_tests() + + +class TestHyundaiLegacySafety(TestHyundaiSafety): + def setUp(self): + self.packer = CANPackerPanda("hyundai_kia_generic") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_LEGACY, 0) + self.safety.init_tests() + + +class TestHyundaiLegacySafetyEV(TestHyundaiSafety): + def setUp(self): + self.packer = CANPackerPanda("hyundai_kia_generic") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_LEGACY, 1) + self.safety.init_tests() + + def _user_gas_msg(self, gas): + values = {"Accel_Pedal_Pos": gas} + return self.packer.make_can_msg_panda("E_EMS11", 0, values, fix_checksum=checksum) + + +class TestHyundaiLegacySafetyHEV(TestHyundaiSafety): + def setUp(self): + self.packer = CANPackerPanda("hyundai_kia_generic") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_LEGACY, 2) + self.safety.init_tests() + + def _user_gas_msg(self, gas): + values = {"CR_Vcu_AccPedDep_Pos": gas} + return self.packer.make_can_msg_panda("E_EMS11", 0, values, fix_checksum=checksum) + +class TestHyundaiLongitudinalSafety(HyundaiLongitudinalBase, TestHyundaiSafety): + TX_MSGS = [[0x340, 0], [0x4F1, 0], [0x485, 0], [0x420, 0], [0x421, 0], [0x50A, 0], [0x389, 0], [0x4A2, 0], [0x38D, 0], [0x483, 0], [0x7D0, 0]] + + RELAY_MALFUNCTION_ADDRS = {0: (0x340, 0x421)} # LKAS11, SCC12 + + DISABLED_ECU_UDS_MSG = (0x7D0, 0) + DISABLED_ECU_ACTUATION_MSG = (0x421, 0) + + def setUp(self): + self.packer = CANPackerPanda("hyundai_kia_generic") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI, Panda.FLAG_HYUNDAI_LONG) + self.safety.init_tests() + + def _accel_msg(self, accel, aeb_req=False, aeb_decel=0): + values = { + "aReqRaw": accel, + "aReqValue": accel, + "AEB_CmdAct": int(aeb_req), + "CR_VSM_DecCmd": aeb_decel, + } + return self.packer.make_can_msg_panda("SCC12", self.SCC_BUS, values) + + def _fca11_msg(self, idx=0, vsm_aeb_req=False, fca_aeb_req=False, aeb_decel=0): + values = { + "CR_FCA_Alive": idx % 0xF, + "FCA_Status": 2, + "CR_VSM_DecCmd": aeb_decel, + "CF_VSM_DecCmdAct": int(vsm_aeb_req), + "FCA_CmdAct": int(fca_aeb_req), + } + return self.packer.make_can_msg_panda("FCA11", 0, values) + + def test_no_aeb_fca11(self): + self.assertTrue(self._tx(self._fca11_msg())) + self.assertFalse(self._tx(self._fca11_msg(vsm_aeb_req=True))) + self.assertFalse(self._tx(self._fca11_msg(fca_aeb_req=True))) + self.assertFalse(self._tx(self._fca11_msg(aeb_decel=1.0))) + + def test_no_aeb_scc12(self): + self.assertTrue(self._tx(self._accel_msg(0))) + self.assertFalse(self._tx(self._accel_msg(0, aeb_req=True))) + self.assertFalse(self._tx(self._accel_msg(0, aeb_decel=1.0))) + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_hyundai_canfd.py b/panda/tests/safety/test_hyundai_canfd.py new file mode 100644 index 0000000..7f280b6 --- /dev/null +++ b/panda/tests/safety/test_hyundai_canfd.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python3 +from parameterized import parameterized_class +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda +from panda.tests.safety.hyundai_common import HyundaiButtonBase, HyundaiLongitudinalBase + + +class TestHyundaiCanfdBase(HyundaiButtonBase, common.PandaCarSafetyTest, common.DriverTorqueSteeringSafetyTest, common.SteerRequestCutSafetyTest): + + TX_MSGS = [[0x50, 0], [0x1CF, 1], [0x2A4, 0]] + STANDSTILL_THRESHOLD = 12 # 0.375 kph + FWD_BLACKLISTED_ADDRS = {2: [0x50, 0x2a4]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + MAX_RATE_UP = 2 + MAX_RATE_DOWN = 3 + MAX_TORQUE = 270 + + MAX_RT_DELTA = 112 + RT_INTERVAL = 250000 + + DRIVER_TORQUE_ALLOWANCE = 250 + DRIVER_TORQUE_FACTOR = 2 + + # Safety around steering req bit + MIN_VALID_STEERING_FRAMES = 89 + MAX_INVALID_STEERING_FRAMES = 2 + MIN_VALID_STEERING_RT_INTERVAL = 810000 # a ~10% buffer, can send steer up to 110Hz + + PT_BUS = 0 + SCC_BUS = 2 + STEER_BUS = 0 + STEER_MSG = "" + GAS_MSG = ("", "") + BUTTONS_TX_BUS = 1 + + @classmethod + def setUpClass(cls): + super().setUpClass() + if cls.__name__ == "TestHyundaiCanfdBase": + cls.packer = None + cls.safety = None + raise unittest.SkipTest + + def _torque_driver_msg(self, torque): + values = {"STEERING_COL_TORQUE": torque} + return self.packer.make_can_msg_panda("MDPS", self.PT_BUS, values) + + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"TORQUE_REQUEST": torque, "STEER_REQ": steer_req} + return self.packer.make_can_msg_panda(self.STEER_MSG, self.STEER_BUS, values) + + def _speed_msg(self, speed): + values = {f"WHEEL_SPEED_{i}": speed * 0.03125 for i in range(1, 5)} + return self.packer.make_can_msg_panda("WHEEL_SPEEDS", self.PT_BUS, values) + + def _user_brake_msg(self, brake): + values = {"DriverBraking": brake} + return self.packer.make_can_msg_panda("TCS", self.PT_BUS, values) + + def _user_gas_msg(self, gas): + values = {self.GAS_MSG[1]: gas} + return self.packer.make_can_msg_panda(self.GAS_MSG[0], self.PT_BUS, values) + + def _pcm_status_msg(self, enable): + values = {"ACCMode": 1 if enable else 0} + return self.packer.make_can_msg_panda("SCC_CONTROL", self.SCC_BUS, values) + + def _button_msg(self, buttons, main_button=0, bus=None): + if bus is None: + bus = self.PT_BUS + values = { + "CRUISE_BUTTONS": buttons, + "ADAPTIVE_CRUISE_MAIN_BTN": main_button, + } + return self.packer.make_can_msg_panda("CRUISE_BUTTONS", bus, values) + + +class TestHyundaiCanfdHDA1Base(TestHyundaiCanfdBase): + + TX_MSGS = [[0x12A, 0], [0x1A0, 1], [0x1CF, 0], [0x1E0, 0]] + RELAY_MALFUNCTION_ADDRS = {0: (0x12A,)} # LFA + FWD_BLACKLISTED_ADDRS = {2: [0x12A, 0x1E0]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + STEER_MSG = "LFA" + BUTTONS_TX_BUS = 2 + SAFETY_PARAM: int + + @classmethod + def setUpClass(cls): + super().setUpClass() + if cls.__name__ in ("TestHyundaiCanfdHDA1", "TestHyundaiCanfdHDA1AltButtons"): + cls.packer = None + cls.safety = None + raise unittest.SkipTest + + def setUp(self): + self.packer = CANPackerPanda("hyundai_canfd") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_CANFD, self.SAFETY_PARAM) + self.safety.init_tests() + + +@parameterized_class([ + # Radar SCC, test with long flag to ensure flag is not respected until it is supported + {"GAS_MSG": ("ACCELERATOR_BRAKE_ALT", "ACCELERATOR_PEDAL_PRESSED"), "SCC_BUS": 0, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_LONG}, + {"GAS_MSG": ("ACCELERATOR", "ACCELERATOR_PEDAL"), "SCC_BUS": 0, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_EV_GAS | Panda.FLAG_HYUNDAI_LONG}, + {"GAS_MSG": ("ACCELERATOR_ALT", "ACCELERATOR_PEDAL"), "SCC_BUS": 0, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_HYBRID_GAS | Panda.FLAG_HYUNDAI_LONG}, + # Camera SCC + {"GAS_MSG": ("ACCELERATOR_BRAKE_ALT", "ACCELERATOR_PEDAL_PRESSED"), "SCC_BUS": 2, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_CAMERA_SCC}, + {"GAS_MSG": ("ACCELERATOR", "ACCELERATOR_PEDAL"), "SCC_BUS": 2, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_EV_GAS | Panda.FLAG_HYUNDAI_CAMERA_SCC}, + {"GAS_MSG": ("ACCELERATOR_ALT", "ACCELERATOR_PEDAL"), "SCC_BUS": 2, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_HYBRID_GAS | Panda.FLAG_HYUNDAI_CAMERA_SCC}, +]) +class TestHyundaiCanfdHDA1(TestHyundaiCanfdHDA1Base): + pass + + +@parameterized_class([ + # Radar SCC, test with long flag to ensure flag is not respected until it is supported + {"GAS_MSG": ("ACCELERATOR_BRAKE_ALT", "ACCELERATOR_PEDAL_PRESSED"), "SCC_BUS": 0, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_LONG}, + {"GAS_MSG": ("ACCELERATOR", "ACCELERATOR_PEDAL"), "SCC_BUS": 0, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_EV_GAS | Panda.FLAG_HYUNDAI_LONG}, + {"GAS_MSG": ("ACCELERATOR_ALT", "ACCELERATOR_PEDAL"), "SCC_BUS": 0, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_HYBRID_GAS | Panda.FLAG_HYUNDAI_LONG}, + # Camera SCC + {"GAS_MSG": ("ACCELERATOR_BRAKE_ALT", "ACCELERATOR_PEDAL_PRESSED"), "SCC_BUS": 2, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_CAMERA_SCC}, + {"GAS_MSG": ("ACCELERATOR", "ACCELERATOR_PEDAL"), "SCC_BUS": 2, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_EV_GAS | Panda.FLAG_HYUNDAI_CAMERA_SCC}, + {"GAS_MSG": ("ACCELERATOR_ALT", "ACCELERATOR_PEDAL"), "SCC_BUS": 2, "SAFETY_PARAM": Panda.FLAG_HYUNDAI_HYBRID_GAS | Panda.FLAG_HYUNDAI_CAMERA_SCC}, +]) +class TestHyundaiCanfdHDA1AltButtons(TestHyundaiCanfdHDA1Base): + + SAFETY_PARAM: int + + def setUp(self): + self.packer = CANPackerPanda("hyundai_canfd") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_CANFD, Panda.FLAG_HYUNDAI_CANFD_ALT_BUTTONS | self.SAFETY_PARAM) + self.safety.init_tests() + + def _button_msg(self, buttons, main_button=0, bus=1): + values = { + "CRUISE_BUTTONS": buttons, + "ADAPTIVE_CRUISE_MAIN_BTN": main_button, + } + return self.packer.make_can_msg_panda("CRUISE_BUTTONS_ALT", self.PT_BUS, values) + + def test_button_sends(self): + """ + No button send allowed with alt buttons. + """ + for enabled in (True, False): + for btn in range(8): + self.safety.set_controls_allowed(enabled) + self.assertFalse(self._tx(self._button_msg(btn))) + + +class TestHyundaiCanfdHDA2EV(TestHyundaiCanfdBase): + + TX_MSGS = [[0x50, 0], [0x1CF, 1], [0x2A4, 0]] + RELAY_MALFUNCTION_ADDRS = {0: (0x50,)} # LKAS + FWD_BLACKLISTED_ADDRS = {2: [0x50, 0x2a4]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + PT_BUS = 1 + SCC_BUS = 1 + STEER_MSG = "LKAS" + GAS_MSG = ("ACCELERATOR", "ACCELERATOR_PEDAL") + + def setUp(self): + self.packer = CANPackerPanda("hyundai_canfd") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_CANFD, Panda.FLAG_HYUNDAI_CANFD_HDA2 | Panda.FLAG_HYUNDAI_EV_GAS) + self.safety.init_tests() + + +# TODO: Handle ICE and HEV configurations once we see cars that use the new messages +class TestHyundaiCanfdHDA2EVAltSteering(TestHyundaiCanfdBase): + + TX_MSGS = [[0x110, 0], [0x1CF, 1], [0x362, 0]] + RELAY_MALFUNCTION_ADDRS = {0: (0x110,)} # LKAS_ALT + FWD_BLACKLISTED_ADDRS = {2: [0x110, 0x362]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + PT_BUS = 1 + SCC_BUS = 1 + STEER_MSG = "LKAS_ALT" + GAS_MSG = ("ACCELERATOR", "ACCELERATOR_PEDAL") + + def setUp(self): + self.packer = CANPackerPanda("hyundai_canfd") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_CANFD, Panda.FLAG_HYUNDAI_CANFD_HDA2 | Panda.FLAG_HYUNDAI_EV_GAS | + Panda.FLAG_HYUNDAI_CANFD_HDA2_ALT_STEERING) + self.safety.init_tests() + + +class TestHyundaiCanfdHDA2LongEV(HyundaiLongitudinalBase, TestHyundaiCanfdHDA2EV): + + TX_MSGS = [[0x50, 0], [0x1CF, 1], [0x2A4, 0], [0x51, 0], [0x730, 1], [0x12a, 1], [0x160, 1], + [0x1e0, 1], [0x1a0, 1], [0x1ea, 1], [0x200, 1], [0x345, 1], [0x1da, 1]] + + RELAY_MALFUNCTION_ADDRS = {0: (0x50,), 1: (0x1a0,)} # LKAS, SCC_CONTROL + + DISABLED_ECU_UDS_MSG = (0x730, 1) + DISABLED_ECU_ACTUATION_MSG = (0x1a0, 1) + + STEER_MSG = "LFA" + GAS_MSG = ("ACCELERATOR", "ACCELERATOR_PEDAL") + STEER_BUS = 1 + + def setUp(self): + self.packer = CANPackerPanda("hyundai_canfd") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_CANFD, Panda.FLAG_HYUNDAI_CANFD_HDA2 | Panda.FLAG_HYUNDAI_LONG | Panda.FLAG_HYUNDAI_EV_GAS) + self.safety.init_tests() + + def _accel_msg(self, accel, aeb_req=False, aeb_decel=0): + values = { + "aReqRaw": accel, + "aReqValue": accel, + } + return self.packer.make_can_msg_panda("SCC_CONTROL", 1, values) + + +# Tests HDA1 longitudinal for ICE, hybrid, EV +@parameterized_class([ + # Camera SCC is the only supported configuration for HDA1 longitudinal, TODO: allow radar SCC + {"GAS_MSG": ("ACCELERATOR_BRAKE_ALT", "ACCELERATOR_PEDAL_PRESSED"), "SAFETY_PARAM": Panda.FLAG_HYUNDAI_LONG}, + {"GAS_MSG": ("ACCELERATOR", "ACCELERATOR_PEDAL"), "SAFETY_PARAM": Panda.FLAG_HYUNDAI_LONG | Panda.FLAG_HYUNDAI_EV_GAS}, + {"GAS_MSG": ("ACCELERATOR_ALT", "ACCELERATOR_PEDAL"), "SAFETY_PARAM": Panda.FLAG_HYUNDAI_LONG | Panda.FLAG_HYUNDAI_HYBRID_GAS}, +]) +class TestHyundaiCanfdHDA1Long(HyundaiLongitudinalBase, TestHyundaiCanfdHDA1Base): + + FWD_BLACKLISTED_ADDRS = {2: [0x12a, 0x1e0, 0x1a0]} + + RELAY_MALFUNCTION_ADDRS = {0: (0x12A, 0x1a0)} # LFA, SCC_CONTROL + + DISABLED_ECU_UDS_MSG = (0x730, 1) + DISABLED_ECU_ACTUATION_MSG = (0x1a0, 0) + + STEER_MSG = "LFA" + STEER_BUS = 0 + SCC_BUS = 2 + + @classmethod + def setUpClass(cls): + if cls.__name__ == "TestHyundaiCanfdHDA1Long": + cls.safety = None + raise unittest.SkipTest + + def setUp(self): + self.packer = CANPackerPanda("hyundai_canfd") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_HYUNDAI_CANFD, Panda.FLAG_HYUNDAI_CAMERA_SCC | self.SAFETY_PARAM) + self.safety.init_tests() + + def _accel_msg(self, accel, aeb_req=False, aeb_decel=0): + values = { + "aReqRaw": accel, + "aReqValue": accel, + } + return self.packer.make_can_msg_panda("SCC_CONTROL", 0, values) + + # no knockout + def test_tester_present_allowed(self): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_mazda.py b/panda/tests/safety/test_mazda.py new file mode 100644 index 0000000..9d2fb89 --- /dev/null +++ b/panda/tests/safety/test_mazda.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda + + +class TestMazdaSafety(common.PandaCarSafetyTest, common.DriverTorqueSteeringSafetyTest): + + TX_MSGS = [[0x243, 0], [0x09d, 0], [0x440, 0]] + STANDSTILL_THRESHOLD = .1 + RELAY_MALFUNCTION_ADDRS = {0: (0x243,)} + FWD_BLACKLISTED_ADDRS = {2: [0x243, 0x440]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + MAX_RATE_UP = 10 + MAX_RATE_DOWN = 25 + MAX_TORQUE = 800 + + MAX_RT_DELTA = 300 + RT_INTERVAL = 250000 + + DRIVER_TORQUE_ALLOWANCE = 15 + DRIVER_TORQUE_FACTOR = 1 + + # Mazda actually does not set any bit when requesting torque + NO_STEER_REQ_BIT = True + + def setUp(self): + self.packer = CANPackerPanda("mazda_2017") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_MAZDA, 0) + self.safety.init_tests() + + def _torque_meas_msg(self, torque): + values = {"STEER_TORQUE_MOTOR": torque} + return self.packer.make_can_msg_panda("STEER_TORQUE", 0, values) + + def _torque_driver_msg(self, torque): + values = {"STEER_TORQUE_SENSOR": torque} + return self.packer.make_can_msg_panda("STEER_TORQUE", 0, values) + + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"LKAS_REQUEST": torque} + return self.packer.make_can_msg_panda("CAM_LKAS", 0, values) + + def _speed_msg(self, speed): + values = {"SPEED": speed} + return self.packer.make_can_msg_panda("ENGINE_DATA", 0, values) + + def _user_brake_msg(self, brake): + values = {"BRAKE_ON": brake} + return self.packer.make_can_msg_panda("PEDALS", 0, values) + + def _user_gas_msg(self, gas): + values = {"PEDAL_GAS": gas} + return self.packer.make_can_msg_panda("ENGINE_DATA", 0, values) + + def _pcm_status_msg(self, enable): + values = {"CRZ_ACTIVE": enable} + return self.packer.make_can_msg_panda("CRZ_CTRL", 0, values) + + def _button_msg(self, resume=False, cancel=False): + values = { + "CAN_OFF": cancel, + "CAN_OFF_INV": (cancel + 1) % 2, + "RES": resume, + "RES_INV": (resume + 1) % 2, + } + return self.packer.make_can_msg_panda("CRZ_BTNS", 0, values) + + def test_buttons(self): + # only cancel allows while controls not allowed + self.safety.set_controls_allowed(0) + self.assertTrue(self._tx(self._button_msg(cancel=True))) + self.assertFalse(self._tx(self._button_msg(resume=True))) + + # do not block resume if we are engaged already + self.safety.set_controls_allowed(1) + self.assertTrue(self._tx(self._button_msg(cancel=True))) + self.assertTrue(self._tx(self._button_msg(resume=True))) + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_nissan.py b/panda/tests/safety/test_nissan.py new file mode 100644 index 0000000..4c83ca3 --- /dev/null +++ b/panda/tests/safety/test_nissan.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda + + +class TestNissanSafety(common.PandaCarSafetyTest, common.AngleSteeringSafetyTest): + + TX_MSGS = [[0x169, 0], [0x2b1, 0], [0x4cc, 0], [0x20b, 2], [0x280, 2]] + STANDSTILL_THRESHOLD = 0 + GAS_PRESSED_THRESHOLD = 3 + RELAY_MALFUNCTION_ADDRS = {0: (0x169,)} + FWD_BLACKLISTED_ADDRS = {0: [0x280], 2: [0x169, 0x2b1, 0x4cc]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + EPS_BUS = 0 + CRUISE_BUS = 2 + + # Angle control limits + DEG_TO_CAN = 100 + + ANGLE_RATE_BP = [0., 5., 15.] + ANGLE_RATE_UP = [5., .8, .15] # windup limit + ANGLE_RATE_DOWN = [5., 3.5, .4] # unwind limit + + def setUp(self): + self.packer = CANPackerPanda("nissan_x_trail_2017_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_NISSAN, 0) + self.safety.init_tests() + + def _angle_cmd_msg(self, angle: float, enabled: bool): + values = {"DESIRED_ANGLE": angle, "LKA_ACTIVE": 1 if enabled else 0} + return self.packer.make_can_msg_panda("LKAS", 0, values) + + def _angle_meas_msg(self, angle: float): + values = {"STEER_ANGLE": angle} + return self.packer.make_can_msg_panda("STEER_ANGLE_SENSOR", self.EPS_BUS, values) + + def _pcm_status_msg(self, enable): + values = {"CRUISE_ENABLED": enable} + return self.packer.make_can_msg_panda("CRUISE_STATE", self.CRUISE_BUS, values) + + def _speed_msg(self, speed): + values = {"WHEEL_SPEED_%s" % s: speed * 3.6 for s in ["RR", "RL"]} + return self.packer.make_can_msg_panda("WHEEL_SPEEDS_REAR", self.EPS_BUS, values) + + def _user_brake_msg(self, brake): + values = {"USER_BRAKE_PRESSED": brake} + return self.packer.make_can_msg_panda("DOORS_LIGHTS", self.EPS_BUS, values) + + def _user_gas_msg(self, gas): + values = {"GAS_PEDAL": gas} + return self.packer.make_can_msg_panda("GAS_PEDAL", self.EPS_BUS, values) + + def _acc_button_cmd(self, cancel=0, propilot=0, flw_dist=0, _set=0, res=0): + no_button = not any([cancel, propilot, flw_dist, _set, res]) + values = {"CANCEL_BUTTON": cancel, "PROPILOT_BUTTON": propilot, + "FOLLOW_DISTANCE_BUTTON": flw_dist, "SET_BUTTON": _set, + "RES_BUTTON": res, "NO_BUTTON_PRESSED": no_button} + return self.packer.make_can_msg_panda("CRUISE_THROTTLE", 2, values) + + def test_acc_buttons(self): + btns = [ + ("cancel", True), + ("propilot", False), + ("flw_dist", False), + ("_set", False), + ("res", False), + (None, False), + ] + for controls_allowed in (True, False): + for btn, should_tx in btns: + self.safety.set_controls_allowed(controls_allowed) + args = {} if btn is None else {btn: 1} + tx = self._tx(self._acc_button_cmd(**args)) + self.assertEqual(tx, should_tx) + + +class TestNissanSafetyAltEpsBus(TestNissanSafety): + """Altima uses different buses""" + + EPS_BUS = 1 + CRUISE_BUS = 1 + + def setUp(self): + self.packer = CANPackerPanda("nissan_x_trail_2017_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_NISSAN, Panda.FLAG_NISSAN_ALT_EPS_BUS) + self.safety.init_tests() + + +class TestNissanLeafSafety(TestNissanSafety): + + def setUp(self): + self.packer = CANPackerPanda("nissan_leaf_2018_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_NISSAN, 0) + self.safety.init_tests() + + def _user_brake_msg(self, brake): + values = {"USER_BRAKE_PRESSED": brake} + return self.packer.make_can_msg_panda("CRUISE_THROTTLE", 0, values) + + def _user_gas_msg(self, gas): + values = {"GAS_PEDAL": gas} + return self.packer.make_can_msg_panda("CRUISE_THROTTLE", 0, values) + + # TODO: leaf should use its own safety param + def test_acc_buttons(self): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_subaru.py b/panda/tests/safety/test_subaru.py new file mode 100644 index 0000000..61d3d91 --- /dev/null +++ b/panda/tests/safety/test_subaru.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python3 +import enum +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda +from functools import partial + +class SubaruMsg(enum.IntEnum): + Brake_Status = 0x13c + CruiseControl = 0x240 + Throttle = 0x40 + Steering_Torque = 0x119 + Wheel_Speeds = 0x13a + ES_LKAS = 0x122 + ES_LKAS_ANGLE = 0x124 + ES_Brake = 0x220 + ES_Distance = 0x221 + ES_Status = 0x222 + ES_DashStatus = 0x321 + ES_LKAS_State = 0x322 + ES_Infotainment = 0x323 + ES_UDS_Request = 0x787 + ES_HighBeamAssist = 0x121 + ES_STATIC_1 = 0x22a + ES_STATIC_2 = 0x325 + + +SUBARU_MAIN_BUS = 0 +SUBARU_ALT_BUS = 1 +SUBARU_CAM_BUS = 2 + + +def lkas_tx_msgs(alt_bus, lkas_msg=SubaruMsg.ES_LKAS): + return [[lkas_msg, SUBARU_MAIN_BUS], + [SubaruMsg.ES_Distance, alt_bus], + [SubaruMsg.ES_DashStatus, SUBARU_MAIN_BUS], + [SubaruMsg.ES_LKAS_State, SUBARU_MAIN_BUS], + [SubaruMsg.ES_Infotainment, SUBARU_MAIN_BUS]] + +def long_tx_msgs(alt_bus): + return [[SubaruMsg.ES_Brake, alt_bus], + [SubaruMsg.ES_Status, alt_bus]] + +def gen2_long_additional_tx_msgs(): + return [[SubaruMsg.ES_UDS_Request, SUBARU_CAM_BUS], + [SubaruMsg.ES_HighBeamAssist, SUBARU_MAIN_BUS], + [SubaruMsg.ES_STATIC_1, SUBARU_MAIN_BUS], + [SubaruMsg.ES_STATIC_2, SUBARU_MAIN_BUS]] + +def fwd_blacklisted_addr(lkas_msg=SubaruMsg.ES_LKAS): + return {SUBARU_CAM_BUS: [lkas_msg, SubaruMsg.ES_DashStatus, SubaruMsg.ES_LKAS_State, SubaruMsg.ES_Infotainment]} + +class TestSubaruSafetyBase(common.PandaCarSafetyTest): + FLAGS = 0 + STANDSTILL_THRESHOLD = 0 # kph + RELAY_MALFUNCTION_ADDRS = {SUBARU_MAIN_BUS: (SubaruMsg.ES_LKAS,)} + FWD_BUS_LOOKUP = {SUBARU_MAIN_BUS: SUBARU_CAM_BUS, SUBARU_CAM_BUS: SUBARU_MAIN_BUS} + FWD_BLACKLISTED_ADDRS = fwd_blacklisted_addr() + + MAX_RT_DELTA = 940 + RT_INTERVAL = 250000 + + DRIVER_TORQUE_ALLOWANCE = 60 + DRIVER_TORQUE_FACTOR = 50 + + ALT_MAIN_BUS = SUBARU_MAIN_BUS + ALT_CAM_BUS = SUBARU_CAM_BUS + + DEG_TO_CAN = 100 + + INACTIVE_GAS = 1818 + + def setUp(self): + self.packer = CANPackerPanda("subaru_global_2017_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_SUBARU, self.FLAGS) + self.safety.init_tests() + + def _set_prev_torque(self, t): + self.safety.set_desired_torque_last(t) + self.safety.set_rt_torque_last(t) + + def _torque_driver_msg(self, torque): + values = {"Steer_Torque_Sensor": torque} + return self.packer.make_can_msg_panda("Steering_Torque", 0, values) + + def _speed_msg(self, speed): + values = {s: speed for s in ["FR", "FL", "RR", "RL"]} + return self.packer.make_can_msg_panda("Wheel_Speeds", self.ALT_MAIN_BUS, values) + + def _angle_meas_msg(self, angle): + values = {"Steering_Angle": angle} + return self.packer.make_can_msg_panda("Steering_Torque", 0, values) + + def _user_brake_msg(self, brake): + values = {"Brake": brake} + return self.packer.make_can_msg_panda("Brake_Status", self.ALT_MAIN_BUS, values) + + def _user_gas_msg(self, gas): + values = {"Throttle_Pedal": gas} + return self.packer.make_can_msg_panda("Throttle", 0, values) + + def _pcm_status_msg(self, enable): + values = {"Cruise_Activated": enable} + return self.packer.make_can_msg_panda("CruiseControl", self.ALT_MAIN_BUS, values) + + +class TestSubaruStockLongitudinalSafetyBase(TestSubaruSafetyBase): + def _cancel_msg(self, cancel, cruise_throttle=0): + values = {"Cruise_Cancel": cancel, "Cruise_Throttle": cruise_throttle} + return self.packer.make_can_msg_panda("ES_Distance", self.ALT_MAIN_BUS, values) + + def test_cancel_message(self): + # test that we can only send the cancel message (ES_Distance) with inactive throttle (1818) and Cruise_Cancel=1 + for cancel in [True, False]: + self._generic_limit_safety_check(partial(self._cancel_msg, cancel), self.INACTIVE_GAS, self.INACTIVE_GAS, 0, 2**12, 1, self.INACTIVE_GAS, cancel) + + +class TestSubaruLongitudinalSafetyBase(TestSubaruSafetyBase, common.LongitudinalGasBrakeSafetyTest): + MIN_GAS = 808 + MAX_GAS = 3400 + INACTIVE_GAS = 1818 + MAX_POSSIBLE_GAS = 2**13 + + MIN_BRAKE = 0 + MAX_BRAKE = 600 + MAX_POSSIBLE_BRAKE = 2**16 + + MIN_RPM = 0 + MAX_RPM = 2400 + MAX_POSSIBLE_RPM = 2**13 + + FWD_BLACKLISTED_ADDRS = {2: [SubaruMsg.ES_LKAS, SubaruMsg.ES_Brake, SubaruMsg.ES_Distance, + SubaruMsg.ES_Status, SubaruMsg.ES_DashStatus, + SubaruMsg.ES_LKAS_State, SubaruMsg.ES_Infotainment]} + + def test_rpm_safety_check(self): + self._generic_limit_safety_check(self._send_rpm_msg, self.MIN_RPM, self.MAX_RPM, 0, self.MAX_POSSIBLE_RPM, 1) + + def _send_brake_msg(self, brake): + values = {"Brake_Pressure": brake} + return self.packer.make_can_msg_panda("ES_Brake", self.ALT_MAIN_BUS, values) + + def _send_gas_msg(self, gas): + values = {"Cruise_Throttle": gas} + return self.packer.make_can_msg_panda("ES_Distance", self.ALT_MAIN_BUS, values) + + def _send_rpm_msg(self, rpm): + values = {"Cruise_RPM": rpm} + return self.packer.make_can_msg_panda("ES_Status", self.ALT_MAIN_BUS, values) + + +class TestSubaruTorqueSafetyBase(TestSubaruSafetyBase, common.DriverTorqueSteeringSafetyTest, common.SteerRequestCutSafetyTest): + MAX_RATE_UP = 50 + MAX_RATE_DOWN = 70 + MAX_TORQUE = 2047 + + # Safety around steering req bit + MIN_VALID_STEERING_FRAMES = 7 + MAX_INVALID_STEERING_FRAMES = 1 + MIN_VALID_STEERING_RT_INTERVAL = 144000 + + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"LKAS_Output": torque, "LKAS_Request": steer_req} + return self.packer.make_can_msg_panda("ES_LKAS", SUBARU_MAIN_BUS, values) + + +class TestSubaruGen1TorqueStockLongitudinalSafety(TestSubaruStockLongitudinalSafetyBase, TestSubaruTorqueSafetyBase): + FLAGS = 0 + TX_MSGS = lkas_tx_msgs(SUBARU_MAIN_BUS) + + +class TestSubaruGen2TorqueSafetyBase(TestSubaruTorqueSafetyBase): + ALT_MAIN_BUS = SUBARU_ALT_BUS + ALT_CAM_BUS = SUBARU_ALT_BUS + + MAX_RATE_UP = 40 + MAX_RATE_DOWN = 40 + MAX_TORQUE = 1000 + + +class TestSubaruGen2TorqueStockLongitudinalSafety(TestSubaruStockLongitudinalSafetyBase, TestSubaruGen2TorqueSafetyBase): + FLAGS = Panda.FLAG_SUBARU_GEN2 + TX_MSGS = lkas_tx_msgs(SUBARU_ALT_BUS) + + +class TestSubaruGen1LongitudinalSafety(TestSubaruLongitudinalSafetyBase, TestSubaruTorqueSafetyBase): + FLAGS = Panda.FLAG_SUBARU_LONG + TX_MSGS = lkas_tx_msgs(SUBARU_MAIN_BUS) + long_tx_msgs(SUBARU_MAIN_BUS) + + +class TestSubaruGen2LongitudinalSafety(TestSubaruLongitudinalSafetyBase, TestSubaruGen2TorqueSafetyBase): + FLAGS = Panda.FLAG_SUBARU_LONG | Panda.FLAG_SUBARU_GEN2 + TX_MSGS = lkas_tx_msgs(SUBARU_ALT_BUS) + long_tx_msgs(SUBARU_ALT_BUS) + gen2_long_additional_tx_msgs() + + def _rdbi_msg(self, did: int): + return b'\x03\x22' + did.to_bytes(2) + b'\x00\x00\x00\x00' + + def _es_uds_msg(self, msg: bytes): + return libpanda_py.make_CANPacket(SubaruMsg.ES_UDS_Request, 2, msg) + + def test_es_uds_message(self): + tester_present = b'\x02\x3E\x80\x00\x00\x00\x00\x00' + not_tester_present = b"\x03\xAA\xAA\x00\x00\x00\x00\x00" + + button_did = 0x1130 + + # Tester present is allowed for gen2 long to keep eyesight disabled + self.assertTrue(self._tx(self._es_uds_msg(tester_present))) + + # Non-Tester present is not allowed + self.assertFalse(self._tx(self._es_uds_msg(not_tester_present))) + + # Only button_did is allowed to be read via UDS + for did in range(0xFFFF): + should_tx = (did == button_did) + self.assertEqual(self._tx(self._es_uds_msg(self._rdbi_msg(did))), should_tx) + + # any other msg is not allowed + for sid in range(0xFF): + msg = b'\x03' + sid.to_bytes(1) + b'\x00' * 6 + self.assertFalse(self._tx(self._es_uds_msg(msg))) + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_subaru_preglobal.py b/panda/tests/safety/test_subaru_preglobal.py new file mode 100644 index 0000000..06c4cde --- /dev/null +++ b/panda/tests/safety/test_subaru_preglobal.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda + + +class TestSubaruPreglobalSafety(common.PandaCarSafetyTest, common.DriverTorqueSteeringSafetyTest): + FLAGS = 0 + DBC = "subaru_outback_2015_generated" + TX_MSGS = [[0x161, 0], [0x164, 0]] + STANDSTILL_THRESHOLD = 0 # kph + RELAY_MALFUNCTION_ADDRS = {0: (0x164,)} + FWD_BLACKLISTED_ADDRS = {2: [0x161, 0x164]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + MAX_RATE_UP = 50 + MAX_RATE_DOWN = 70 + MAX_TORQUE = 2047 + + MAX_RT_DELTA = 940 + RT_INTERVAL = 250000 + + DRIVER_TORQUE_ALLOWANCE = 75 + DRIVER_TORQUE_FACTOR = 10 + + def setUp(self): + self.packer = CANPackerPanda(self.DBC) + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_SUBARU_PREGLOBAL, self.FLAGS) + self.safety.init_tests() + + def _set_prev_torque(self, t): + self.safety.set_desired_torque_last(t) + self.safety.set_rt_torque_last(t) + + def _torque_driver_msg(self, torque): + values = {"Steer_Torque_Sensor": torque} + return self.packer.make_can_msg_panda("Steering_Torque", 0, values) + + def _speed_msg(self, speed): + # subaru safety doesn't use the scaled value, so undo the scaling + values = {s: speed*0.0592 for s in ["FR", "FL", "RR", "RL"]} + return self.packer.make_can_msg_panda("Wheel_Speeds", 0, values) + + def _user_brake_msg(self, brake): + values = {"Brake_Pedal": brake} + return self.packer.make_can_msg_panda("Brake_Pedal", 0, values) + + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"LKAS_Command": torque, "LKAS_Active": steer_req} + return self.packer.make_can_msg_panda("ES_LKAS", 0, values) + + def _user_gas_msg(self, gas): + values = {"Throttle_Pedal": gas} + return self.packer.make_can_msg_panda("Throttle", 0, values) + + def _pcm_status_msg(self, enable): + values = {"Cruise_Activated": enable} + return self.packer.make_can_msg_panda("CruiseControl", 0, values) + + +class TestSubaruPreglobalReversedDriverTorqueSafety(TestSubaruPreglobalSafety): + FLAGS = Panda.FLAG_SUBARU_PREGLOBAL_REVERSED_DRIVER_TORQUE + DBC = "subaru_outback_2019_generated" + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_tesla.py b/panda/tests/safety/test_tesla.py new file mode 100644 index 0000000..9461ff6 --- /dev/null +++ b/panda/tests/safety/test_tesla.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python3 +import unittest +import numpy as np +from panda import Panda +import panda.tests.safety.common as common +from panda.tests.libpanda import libpanda_py +from panda.tests.safety.common import CANPackerPanda + +MAX_ACCEL = 2.0 +MIN_ACCEL = -3.5 + + +class CONTROL_LEVER_STATE: + DN_1ST = 32 + UP_1ST = 16 + DN_2ND = 8 + UP_2ND = 4 + RWD = 2 + FWD = 1 + IDLE = 0 + + +class TestTeslaSafety(common.PandaCarSafetyTest): + STANDSTILL_THRESHOLD = 0 + GAS_PRESSED_THRESHOLD = 3 + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + def setUp(self): + self.packer = None + raise unittest.SkipTest + + def _speed_msg(self, speed): + values = {"DI_vehicleSpeed": speed / 0.447} + return self.packer.make_can_msg_panda("DI_torque2", 0, values) + + def _user_brake_msg(self, brake): + values = {"driverBrakeStatus": 2 if brake else 1} + return self.packer.make_can_msg_panda("BrakeMessage", 0, values) + + def _user_gas_msg(self, gas): + values = {"DI_pedalPos": gas} + return self.packer.make_can_msg_panda("DI_torque1", 0, values) + + def _control_lever_cmd(self, command): + values = {"SpdCtrlLvr_Stat": command} + return self.packer.make_can_msg_panda("STW_ACTN_RQ", 0, values) + + def _pcm_status_msg(self, enable): + values = {"DI_cruiseState": 2 if enable else 0} + return self.packer.make_can_msg_panda("DI_state", 0, values) + + def _long_control_msg(self, set_speed, acc_val=0, jerk_limits=(0, 0), accel_limits=(0, 0), aeb_event=0, bus=0): + values = { + "DAS_setSpeed": set_speed, + "DAS_accState": acc_val, + "DAS_aebEvent": aeb_event, + "DAS_jerkMin": jerk_limits[0], + "DAS_jerkMax": jerk_limits[1], + "DAS_accelMin": accel_limits[0], + "DAS_accelMax": accel_limits[1], + } + return self.packer.make_can_msg_panda("DAS_control", bus, values) + + +class TestTeslaSteeringSafety(TestTeslaSafety, common.AngleSteeringSafetyTest): + TX_MSGS = [[0x488, 0], [0x45, 0], [0x45, 2]] + RELAY_MALFUNCTION_ADDRS = {0: (0x488,)} + FWD_BLACKLISTED_ADDRS = {2: [0x488]} + + # Angle control limits + DEG_TO_CAN = 10 + + ANGLE_RATE_BP = [0., 5., 15.] + ANGLE_RATE_UP = [10., 1.6, .3] # windup limit + ANGLE_RATE_DOWN = [10., 7.0, .8] # unwind limit + + def setUp(self): + self.packer = CANPackerPanda("tesla_can") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TESLA, 0) + self.safety.init_tests() + + def _angle_cmd_msg(self, angle: float, enabled: bool): + values = {"DAS_steeringAngleRequest": angle, "DAS_steeringControlType": 1 if enabled else 0} + return self.packer.make_can_msg_panda("DAS_steeringControl", 0, values) + + def _angle_meas_msg(self, angle: float): + values = {"EPAS_internalSAS": angle} + return self.packer.make_can_msg_panda("EPAS_sysStatus", 0, values) + + def test_acc_buttons(self): + """ + FWD (cancel) always allowed. + """ + btns = [ + (CONTROL_LEVER_STATE.FWD, True), + (CONTROL_LEVER_STATE.RWD, False), + (CONTROL_LEVER_STATE.UP_1ST, False), + (CONTROL_LEVER_STATE.UP_2ND, False), + (CONTROL_LEVER_STATE.DN_1ST, False), + (CONTROL_LEVER_STATE.DN_2ND, False), + (CONTROL_LEVER_STATE.IDLE, False), + ] + for btn, should_tx in btns: + for controls_allowed in (True, False): + self.safety.set_controls_allowed(controls_allowed) + tx = self._tx(self._control_lever_cmd(btn)) + self.assertEqual(tx, should_tx) + + +class TestTeslaRavenSteeringSafety(TestTeslaSteeringSafety): + def setUp(self): + self.packer = CANPackerPanda("tesla_can") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TESLA, Panda.FLAG_TESLA_RAVEN) + self.safety.init_tests() + + def _angle_meas_msg(self, angle: float): + values = {"EPAS_internalSAS": angle} + return self.packer.make_can_msg_panda("EPAS3P_sysStatus", 2, values) + +class TestTeslaLongitudinalSafety(TestTeslaSafety): + def setUp(self): + raise unittest.SkipTest + + def test_no_aeb(self): + for aeb_event in range(4): + self.assertEqual(self._tx(self._long_control_msg(10, aeb_event=aeb_event)), aeb_event == 0) + + def test_stock_aeb_passthrough(self): + no_aeb_msg = self._long_control_msg(10, aeb_event=0) + no_aeb_msg_cam = self._long_control_msg(10, aeb_event=0, bus=2) + aeb_msg_cam = self._long_control_msg(10, aeb_event=1, bus=2) + + # stock system sends no AEB -> no forwarding, and OP is allowed to TX + self.assertEqual(1, self._rx(no_aeb_msg_cam)) + self.assertEqual(-1, self.safety.safety_fwd_hook(2, no_aeb_msg_cam.addr)) + self.assertEqual(True, self._tx(no_aeb_msg)) + + # stock system sends AEB -> forwarding, and OP is not allowed to TX + self.assertEqual(1, self._rx(aeb_msg_cam)) + self.assertEqual(0, self.safety.safety_fwd_hook(2, aeb_msg_cam.addr)) + self.assertEqual(False, self._tx(no_aeb_msg)) + + def test_acc_accel_limits(self): + for controls_allowed in [True, False]: + self.safety.set_controls_allowed(controls_allowed) + for min_accel in np.arange(MIN_ACCEL - 1, MAX_ACCEL + 1, 0.1): + for max_accel in np.arange(MIN_ACCEL - 1, MAX_ACCEL + 1, 0.1): + # floats might not hit exact boundary conditions without rounding + min_accel = round(min_accel, 2) + max_accel = round(max_accel, 2) + if controls_allowed: + send = (MIN_ACCEL <= min_accel <= MAX_ACCEL) and (MIN_ACCEL <= max_accel <= MAX_ACCEL) + else: + send = np.all(np.isclose([min_accel, max_accel], 0, atol=0.0001)) + self.assertEqual(send, self._tx(self._long_control_msg(10, acc_val=4, accel_limits=[min_accel, max_accel]))) + + +class TestTeslaChassisLongitudinalSafety(TestTeslaLongitudinalSafety): + TX_MSGS = [[0x488, 0], [0x45, 0], [0x45, 2], [0x2B9, 0]] + RELAY_MALFUNCTION_ADDRS = {0: (0x488,)} + FWD_BLACKLISTED_ADDRS = {2: [0x2B9, 0x488]} + + def setUp(self): + self.packer = CANPackerPanda("tesla_can") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TESLA, Panda.FLAG_TESLA_LONG_CONTROL) + self.safety.init_tests() + + +class TestTeslaPTLongitudinalSafety(TestTeslaLongitudinalSafety): + TX_MSGS = [[0x2BF, 0]] + RELAY_MALFUNCTION_ADDRS = {0: (0x2BF,)} + FWD_BLACKLISTED_ADDRS = {2: [0x2BF]} + + def setUp(self): + self.packer = CANPackerPanda("tesla_powertrain") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TESLA, Panda.FLAG_TESLA_LONG_CONTROL | Panda.FLAG_TESLA_POWERTRAIN) + self.safety.init_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_toyota.py b/panda/tests/safety/test_toyota.py new file mode 100644 index 0000000..0743c67 --- /dev/null +++ b/panda/tests/safety/test_toyota.py @@ -0,0 +1,367 @@ +#!/usr/bin/env python3 +import numpy as np +import random +import unittest +import itertools + +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda + +TOYOTA_COMMON_TX_MSGS = [[0x2E4, 0], [0x191, 0], [0x412, 0], [0x343, 0], [0x1D2, 0]] # LKAS + LTA + ACC & PCM cancel cmds +TOYOTA_COMMON_LONG_TX_MSGS = [[0x283, 0], [0x2E6, 0], [0x2E7, 0], [0x33E, 0], [0x344, 0], [0x365, 0], [0x366, 0], [0x4CB, 0], # DSU bus 0 + [0x128, 1], [0x141, 1], [0x160, 1], [0x161, 1], [0x470, 1], # DSU bus 1 + [0x411, 0], # PCS_HUD + [0x750, 0]] # radar diagnostic address +GAS_INTERCEPTOR_TX_MSGS = [[0x200, 0]] + + +class TestToyotaSafetyBase(common.PandaCarSafetyTest, common.LongitudinalAccelSafetyTest): + + TX_MSGS = TOYOTA_COMMON_TX_MSGS + TOYOTA_COMMON_LONG_TX_MSGS + STANDSTILL_THRESHOLD = 0 # kph + RELAY_MALFUNCTION_ADDRS = {0: (0x2E4, 0x343)} + FWD_BLACKLISTED_ADDRS = {2: [0x2E4, 0x412, 0x191, 0x343]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + EPS_SCALE = 73 + + packer: CANPackerPanda + safety: libpanda_py.Panda + + @classmethod + def setUpClass(cls): + if cls.__name__.endswith("Base"): + cls.packer = None + cls.safety = None + raise unittest.SkipTest + + def _torque_meas_msg(self, torque: int, driver_torque: int | None = None): + values = {"STEER_TORQUE_EPS": (torque / self.EPS_SCALE) * 100.} + if driver_torque is not None: + values["STEER_TORQUE_DRIVER"] = driver_torque + return self.packer.make_can_msg_panda("STEER_TORQUE_SENSOR", 0, values) + + # Both torque and angle safety modes test with each other's steering commands + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"STEER_TORQUE_CMD": torque, "STEER_REQUEST": steer_req} + return self.packer.make_can_msg_panda("STEERING_LKA", 0, values) + + def _angle_meas_msg(self, angle: float, steer_angle_initializing: bool = False): + # This creates a steering torque angle message. Not set on all platforms, + # relative to init angle on some older TSS2 platforms. Only to be used with LTA + values = {"STEER_ANGLE": angle, "STEER_ANGLE_INITIALIZING": int(steer_angle_initializing)} + return self.packer.make_can_msg_panda("STEER_TORQUE_SENSOR", 0, values) + + def _angle_cmd_msg(self, angle: float, enabled: bool): + return self._lta_msg(int(enabled), int(enabled), angle, torque_wind_down=100 if enabled else 0) + + def _lta_msg(self, req, req2, angle_cmd, torque_wind_down=100): + values = {"STEER_REQUEST": req, "STEER_REQUEST_2": req2, "STEER_ANGLE_CMD": angle_cmd, "TORQUE_WIND_DOWN": torque_wind_down} + return self.packer.make_can_msg_panda("STEERING_LTA", 0, values) + + def _accel_msg(self, accel, cancel_req=0): + values = {"ACCEL_CMD": accel, "CANCEL_REQ": cancel_req} + return self.packer.make_can_msg_panda("ACC_CONTROL", 0, values) + + def _speed_msg(self, speed): + values = {("WHEEL_SPEED_%s" % n): speed * 3.6 for n in ["FR", "FL", "RR", "RL"]} + return self.packer.make_can_msg_panda("WHEEL_SPEEDS", 0, values) + + def _user_brake_msg(self, brake): + values = {"BRAKE_PRESSED": brake} + return self.packer.make_can_msg_panda("BRAKE_MODULE", 0, values) + + def _user_gas_msg(self, gas): + cruise_active = self.safety.get_controls_allowed() + values = {"GAS_RELEASED": not gas, "CRUISE_ACTIVE": cruise_active} + return self.packer.make_can_msg_panda("PCM_CRUISE", 0, values) + + def _pcm_status_msg(self, enable): + values = {"CRUISE_ACTIVE": enable} + return self.packer.make_can_msg_panda("PCM_CRUISE", 0, values) + + def test_diagnostics(self, stock_longitudinal: bool = False): + for should_tx, msg in ((False, b"\x6D\x02\x3E\x00\x00\x00\x00\x00"), # fwdCamera tester present + (False, b"\x0F\x03\xAA\xAA\x00\x00\x00\x00"), # non-tester present + (True, b"\x0F\x02\x3E\x00\x00\x00\x00\x00")): + tester_present = libpanda_py.make_CANPacket(0x750, 0, msg) + self.assertEqual(should_tx and not stock_longitudinal, self._tx(tester_present)) + + def test_block_aeb(self, stock_longitudinal: bool = False): + for controls_allowed in (True, False): + for bad in (True, False): + for _ in range(10): + self.safety.set_controls_allowed(controls_allowed) + dat = [random.randint(1, 255) for _ in range(7)] + if not bad: + dat = [0]*6 + dat[-1:] + msg = libpanda_py.make_CANPacket(0x283, 0, bytes(dat)) + self.assertEqual(not bad and not stock_longitudinal, self._tx(msg)) + + # Only allow LTA msgs with no actuation + def test_lta_steer_cmd(self): + for engaged, req, req2, torque_wind_down, angle in itertools.product([True, False], + [0, 1], [0, 1], + [0, 50, 100], + np.linspace(-20, 20, 5)): + self.safety.set_controls_allowed(engaged) + + should_tx = not req and not req2 and angle == 0 and torque_wind_down == 0 + self.assertEqual(should_tx, self._tx(self._lta_msg(req, req2, angle, torque_wind_down))) + + def test_rx_hook(self): + # checksum checks + for msg in ["trq", "pcm"]: + self.safety.set_controls_allowed(1) + if msg == "trq": + to_push = self._torque_meas_msg(0) + if msg == "pcm": + to_push = self._pcm_status_msg(True) + self.assertTrue(self._rx(to_push)) + to_push[0].data[4] = 0 + to_push[0].data[5] = 0 + to_push[0].data[6] = 0 + to_push[0].data[7] = 0 + self.assertFalse(self._rx(to_push)) + self.assertFalse(self.safety.get_controls_allowed()) + + +class TestToyotaSafetyGasInterceptorBase(common.GasInterceptorSafetyTest, TestToyotaSafetyBase): + + TX_MSGS = TOYOTA_COMMON_TX_MSGS + TOYOTA_COMMON_LONG_TX_MSGS + GAS_INTERCEPTOR_TX_MSGS + INTERCEPTOR_THRESHOLD = 805 + + def setUp(self): + super().setUp() + self.safety.set_safety_hooks(Panda.SAFETY_TOYOTA, self.safety.get_current_safety_param() | + Panda.FLAG_TOYOTA_GAS_INTERCEPTOR) + self.safety.init_tests() + + def test_stock_longitudinal(self): + # If stock longitudinal is set, the gas interceptor safety param should not be respected + self.safety.set_safety_hooks(Panda.SAFETY_TOYOTA, self.safety.get_current_safety_param() | + Panda.FLAG_TOYOTA_STOCK_LONGITUDINAL) + self.safety.init_tests() + + # Spot check a few gas interceptor tests: (1) reading interceptor, + # (2) behavior around interceptor, and (3) txing interceptor msgs + for test in (self.test_prev_gas_interceptor, self.test_disengage_on_gas_interceptor, + self.test_gas_interceptor_safety_check): + with self.subTest(test=test.__name__): + with self.assertRaises(AssertionError): + test() + + +class TestToyotaSafetyTorque(TestToyotaSafetyBase, common.MotorTorqueSteeringSafetyTest, common.SteerRequestCutSafetyTest): + + MAX_RATE_UP = 15 + MAX_RATE_DOWN = 25 + MAX_TORQUE = 1500 + MAX_RT_DELTA = 450 + RT_INTERVAL = 250000 + MAX_TORQUE_ERROR = 350 + TORQUE_MEAS_TOLERANCE = 1 # toyota safety adds one to be conservative for rounding + + # Safety around steering req bit + MIN_VALID_STEERING_FRAMES = 18 + MAX_INVALID_STEERING_FRAMES = 1 + MIN_VALID_STEERING_RT_INTERVAL = 170000 # a ~10% buffer, can send steer up to 110Hz + + def setUp(self): + self.packer = CANPackerPanda("toyota_nodsu_pt_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TOYOTA, self.EPS_SCALE) + self.safety.init_tests() + + +class TestToyotaSafetyTorqueGasInterceptor(TestToyotaSafetyGasInterceptorBase, TestToyotaSafetyTorque): + pass + + +class TestToyotaSafetyAngle(TestToyotaSafetyBase, common.AngleSteeringSafetyTest): + + # Angle control limits + DEG_TO_CAN = 17.452007 # 1 / 0.0573 deg to can + + ANGLE_RATE_BP = [5., 25., 25.] + ANGLE_RATE_UP = [0.3, 0.15, 0.15] # windup limit + ANGLE_RATE_DOWN = [0.36, 0.26, 0.26] # unwind limit + + MAX_LTA_ANGLE = 94.9461 # PCS faults if commanding above this, deg + MAX_MEAS_TORQUE = 1500 # max allowed measured EPS torque before wind down + MAX_LTA_DRIVER_TORQUE = 150 # max allowed driver torque before wind down + + def setUp(self): + self.packer = CANPackerPanda("toyota_nodsu_pt_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TOYOTA, self.EPS_SCALE | Panda.FLAG_TOYOTA_LTA) + self.safety.init_tests() + + # Only allow LKA msgs with no actuation + def test_lka_steer_cmd(self): + for engaged, steer_req, torque in itertools.product([True, False], + [0, 1], + np.linspace(-1500, 1500, 7)): + self.safety.set_controls_allowed(engaged) + torque = int(torque) + self.safety.set_rt_torque_last(torque) + self.safety.set_torque_meas(torque, torque) + self.safety.set_desired_torque_last(torque) + + should_tx = not steer_req and torque == 0 + self.assertEqual(should_tx, self._tx(self._torque_cmd_msg(torque, steer_req))) + + def test_lta_steer_cmd(self): + """ + Tests the LTA steering command message + controls_allowed: + * STEER_REQUEST and STEER_REQUEST_2 do not mismatch + * TORQUE_WIND_DOWN is only set to 0 or 100 when STEER_REQUEST and STEER_REQUEST_2 are both 1 + * Full torque messages are blocked if either EPS torque or driver torque is above the threshold + + not controls_allowed: + * STEER_REQUEST, STEER_REQUEST_2, and TORQUE_WIND_DOWN are all 0 + """ + for controls_allowed in (True, False): + for angle in np.arange(-90, 90, 1): + self.safety.set_controls_allowed(controls_allowed) + self._reset_angle_measurement(angle) + self._set_prev_desired_angle(angle) + + self.assertTrue(self._tx(self._lta_msg(0, 0, angle, 0))) + if controls_allowed: + # Test the two steer request bits and TORQUE_WIND_DOWN torque wind down signal + for req, req2, torque_wind_down in itertools.product([0, 1], [0, 1], [0, 50, 100]): + mismatch = not (req or req2) and torque_wind_down != 0 + should_tx = req == req2 and (torque_wind_down in (0, 100)) and not mismatch + self.assertEqual(should_tx, self._tx(self._lta_msg(req, req2, angle, torque_wind_down))) + + # Test max EPS torque and driver override thresholds + cases = itertools.product( + (0, self.MAX_MEAS_TORQUE - 1, self.MAX_MEAS_TORQUE, self.MAX_MEAS_TORQUE + 1, self.MAX_MEAS_TORQUE * 2), + (0, self.MAX_LTA_DRIVER_TORQUE - 1, self.MAX_LTA_DRIVER_TORQUE, self.MAX_LTA_DRIVER_TORQUE + 1, self.MAX_LTA_DRIVER_TORQUE * 2) + ) + + for eps_torque, driver_torque in cases: + for sign in (-1, 1): + for _ in range(6): + self._rx(self._torque_meas_msg(sign * eps_torque, sign * driver_torque)) + + # Toyota adds 1 to EPS torque since it is rounded after EPS factor + should_tx = (eps_torque - 1) <= self.MAX_MEAS_TORQUE and driver_torque <= self.MAX_LTA_DRIVER_TORQUE + self.assertEqual(should_tx, self._tx(self._lta_msg(1, 1, angle, 100))) + self.assertTrue(self._tx(self._lta_msg(1, 1, angle, 0))) # should tx if we wind down torque + + else: + # Controls not allowed + for req, req2, torque_wind_down in itertools.product([0, 1], [0, 1], [0, 50, 100]): + should_tx = not (req or req2) and torque_wind_down == 0 + self.assertEqual(should_tx, self._tx(self._lta_msg(req, req2, angle, torque_wind_down))) + + def test_steering_angle_measurements(self, max_angle=None): + # Measurement test tests max angle + 0.5 which will fail + super().test_steering_angle_measurements(max_angle=self.MAX_LTA_ANGLE - 0.5) + + def test_angle_cmd_when_enabled(self, max_angle=None): + super().test_angle_cmd_when_enabled(max_angle=self.MAX_LTA_ANGLE) + + def test_angle_measurements(self): + """ + * Tests angle meas quality flag dictates whether angle measurement is parsed, and if rx is valid + * Tests rx hook correctly clips the angle measurement, since it is to be compared to LTA cmd when inactive + """ + for steer_angle_initializing in (True, False): + for angle in np.arange(0, self.MAX_LTA_ANGLE * 2, 1): + # If init flag is set, do not rx or parse any angle measurements + for a in (angle, -angle, 0, 0, 0, 0): + self.assertEqual(not steer_angle_initializing, + self._rx(self._angle_meas_msg(a, steer_angle_initializing))) + + final_angle = (0 if steer_angle_initializing else + round(min(angle, self.MAX_LTA_ANGLE) * self.DEG_TO_CAN)) + self.assertEqual(self.safety.get_angle_meas_min(), -final_angle) + self.assertEqual(self.safety.get_angle_meas_max(), final_angle) + + self._rx(self._angle_meas_msg(0)) + self.assertEqual(self.safety.get_angle_meas_min(), -final_angle) + self.assertEqual(self.safety.get_angle_meas_max(), 0) + + self._rx(self._angle_meas_msg(0)) + self.assertEqual(self.safety.get_angle_meas_min(), 0) + self.assertEqual(self.safety.get_angle_meas_max(), 0) + + +class TestToyotaSafetyAngleGasInterceptor(TestToyotaSafetyGasInterceptorBase, TestToyotaSafetyAngle): + pass + + +class TestToyotaAltBrakeSafety(TestToyotaSafetyTorque): + + def setUp(self): + self.packer = CANPackerPanda("toyota_new_mc_pt_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TOYOTA, self.EPS_SCALE | Panda.FLAG_TOYOTA_ALT_BRAKE) + self.safety.init_tests() + + def _user_brake_msg(self, brake): + values = {"BRAKE_PRESSED": brake} + return self.packer.make_can_msg_panda("BRAKE_MODULE", 0, values) + + # No LTA message in the DBC + def test_lta_steer_cmd(self): + pass + + +class TestToyotaAltBrakeSafetyGasInterceptor(TestToyotaSafetyGasInterceptorBase, TestToyotaAltBrakeSafety): + pass + + +class TestToyotaStockLongitudinalBase(TestToyotaSafetyBase): + + TX_MSGS = TOYOTA_COMMON_TX_MSGS + # Base addresses minus ACC_CONTROL (0x343) + RELAY_MALFUNCTION_ADDRS = {0: (0x2E4,)} + FWD_BLACKLISTED_ADDRS = {2: [0x2E4, 0x412, 0x191]} + + def test_diagnostics(self, stock_longitudinal: bool = True): + super().test_diagnostics(stock_longitudinal=stock_longitudinal) + + def test_block_aeb(self, stock_longitudinal: bool = True): + super().test_block_aeb(stock_longitudinal=stock_longitudinal) + + def test_accel_actuation_limits(self, stock_longitudinal=True): + super().test_accel_actuation_limits(stock_longitudinal=stock_longitudinal) + + def test_acc_cancel(self): + """ + Regardless of controls allowed, never allow ACC_CONTROL if cancel bit isn't set + """ + for controls_allowed in [True, False]: + self.safety.set_controls_allowed(controls_allowed) + for accel in np.arange(self.MIN_ACCEL - 1, self.MAX_ACCEL + 1, 0.1): + self.assertFalse(self._tx(self._accel_msg(accel))) + should_tx = np.isclose(accel, 0, atol=0.0001) + self.assertEqual(should_tx, self._tx(self._accel_msg(accel, cancel_req=1))) + + +class TestToyotaStockLongitudinalTorque(TestToyotaStockLongitudinalBase, TestToyotaSafetyTorque): + + def setUp(self): + self.packer = CANPackerPanda("toyota_nodsu_pt_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TOYOTA, self.EPS_SCALE | Panda.FLAG_TOYOTA_STOCK_LONGITUDINAL) + self.safety.init_tests() + + +class TestToyotaStockLongitudinalAngle(TestToyotaStockLongitudinalBase, TestToyotaSafetyAngle): + + def setUp(self): + self.packer = CANPackerPanda("toyota_nodsu_pt_generated") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_TOYOTA, self.EPS_SCALE | Panda.FLAG_TOYOTA_STOCK_LONGITUDINAL | Panda.FLAG_TOYOTA_LTA) + self.safety.init_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_volkswagen_mqb.py b/panda/tests/safety/test_volkswagen_mqb.py new file mode 100644 index 0000000..276ee6c --- /dev/null +++ b/panda/tests/safety/test_volkswagen_mqb.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python3 +import unittest +import numpy as np +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda + +MAX_ACCEL = 2.0 +MIN_ACCEL = -3.5 + +MSG_ESP_19 = 0xB2 # RX from ABS, for wheel speeds +MSG_LH_EPS_03 = 0x9F # RX from EPS, for driver steering torque +MSG_ESP_05 = 0x106 # RX from ABS, for brake light state +MSG_TSK_06 = 0x120 # RX from ECU, for ACC status from drivetrain coordinator +MSG_MOTOR_20 = 0x121 # RX from ECU, for driver throttle input +MSG_ACC_06 = 0x122 # TX by OP, ACC control instructions to the drivetrain coordinator +MSG_HCA_01 = 0x126 # TX by OP, Heading Control Assist steering torque +MSG_GRA_ACC_01 = 0x12B # TX by OP, ACC control buttons for cancel/resume +MSG_ACC_07 = 0x12E # TX by OP, ACC control instructions to the drivetrain coordinator +MSG_ACC_02 = 0x30C # TX by OP, ACC HUD data to the instrument cluster +MSG_LDW_02 = 0x397 # TX by OP, Lane line recognition and text alerts + + +class TestVolkswagenMqbSafety(common.PandaCarSafetyTest, common.DriverTorqueSteeringSafetyTest): + STANDSTILL_THRESHOLD = 0 + RELAY_MALFUNCTION_ADDRS = {0: (MSG_HCA_01,)} + + MAX_RATE_UP = 4 + MAX_RATE_DOWN = 10 + MAX_TORQUE = 300 + MAX_RT_DELTA = 75 + RT_INTERVAL = 250000 + + DRIVER_TORQUE_ALLOWANCE = 80 + DRIVER_TORQUE_FACTOR = 3 + + @classmethod + def setUpClass(cls): + if cls.__name__ == "TestVolkswagenMqbSafety": + cls.packer = None + cls.safety = None + raise unittest.SkipTest + + # Wheel speeds _esp_19_msg + def _speed_msg(self, speed): + values = {"ESP_%s_Radgeschw_02" % s: speed for s in ["HL", "HR", "VL", "VR"]} + return self.packer.make_can_msg_panda("ESP_19", 0, values) + + # Driver brake pressure over threshold + def _esp_05_msg(self, brake): + values = {"ESP_Fahrer_bremst": brake} + return self.packer.make_can_msg_panda("ESP_05", 0, values) + + # Brake pedal switch + def _motor_14_msg(self, brake): + values = {"MO_Fahrer_bremst": brake} + return self.packer.make_can_msg_panda("Motor_14", 0, values) + + def _user_brake_msg(self, brake): + return self._motor_14_msg(brake) + + # Driver throttle input + def _user_gas_msg(self, gas): + values = {"MO_Fahrpedalrohwert_01": gas} + return self.packer.make_can_msg_panda("Motor_20", 0, values) + + # ACC engagement status + def _tsk_status_msg(self, enable, main_switch=True): + if main_switch: + tsk_status = 3 if enable else 2 + else: + tsk_status = 0 + values = {"TSK_Status": tsk_status} + return self.packer.make_can_msg_panda("TSK_06", 0, values) + + def _pcm_status_msg(self, enable): + return self._tsk_status_msg(enable) + + # Driver steering input torque + def _torque_driver_msg(self, torque): + values = {"EPS_Lenkmoment": abs(torque), "EPS_VZ_Lenkmoment": torque < 0} + return self.packer.make_can_msg_panda("LH_EPS_03", 0, values) + + # openpilot steering output torque + def _torque_cmd_msg(self, torque, steer_req=1): + values = {"HCA_01_LM_Offset": abs(torque), "HCA_01_LM_OffSign": torque < 0, "HCA_01_Sendestatus": steer_req} + return self.packer.make_can_msg_panda("HCA_01", 0, values) + + # Cruise control buttons + def _gra_acc_01_msg(self, cancel=0, resume=0, _set=0, bus=2): + values = {"GRA_Abbrechen": cancel, "GRA_Tip_Setzen": _set, "GRA_Tip_Wiederaufnahme": resume} + return self.packer.make_can_msg_panda("GRA_ACC_01", bus, values) + + # Acceleration request to drivetrain coordinator + def _acc_06_msg(self, accel): + values = {"ACC_Sollbeschleunigung_02": accel} + return self.packer.make_can_msg_panda("ACC_06", 0, values) + + # Acceleration request to drivetrain coordinator + def _acc_07_msg(self, accel, secondary_accel=3.02): + values = {"ACC_Sollbeschleunigung_02": accel, "ACC_Folgebeschl": secondary_accel} + return self.packer.make_can_msg_panda("ACC_07", 0, values) + + # Verify brake_pressed is true if either the switch or pressure threshold signals are true + def test_redundant_brake_signals(self): + test_combinations = [(True, True, True), (True, True, False), (True, False, True), (False, False, False)] + for brake_pressed, motor_14_signal, esp_05_signal in test_combinations: + self._rx(self._motor_14_msg(False)) + self._rx(self._esp_05_msg(False)) + self.assertFalse(self.safety.get_brake_pressed_prev()) + self._rx(self._motor_14_msg(motor_14_signal)) + self._rx(self._esp_05_msg(esp_05_signal)) + self.assertEqual(brake_pressed, self.safety.get_brake_pressed_prev(), + f"expected {brake_pressed=} with {motor_14_signal=} and {esp_05_signal=}") + + def test_torque_measurements(self): + # TODO: make this test work with all cars + self._rx(self._torque_driver_msg(50)) + self._rx(self._torque_driver_msg(-50)) + self._rx(self._torque_driver_msg(0)) + self._rx(self._torque_driver_msg(0)) + self._rx(self._torque_driver_msg(0)) + self._rx(self._torque_driver_msg(0)) + + self.assertEqual(-50, self.safety.get_torque_driver_min()) + self.assertEqual(50, self.safety.get_torque_driver_max()) + + self._rx(self._torque_driver_msg(0)) + self.assertEqual(0, self.safety.get_torque_driver_max()) + self.assertEqual(-50, self.safety.get_torque_driver_min()) + + self._rx(self._torque_driver_msg(0)) + self.assertEqual(0, self.safety.get_torque_driver_max()) + self.assertEqual(0, self.safety.get_torque_driver_min()) + + +class TestVolkswagenMqbStockSafety(TestVolkswagenMqbSafety): + TX_MSGS = [[MSG_HCA_01, 0], [MSG_LDW_02, 0], [MSG_LH_EPS_03, 2], [MSG_GRA_ACC_01, 0], [MSG_GRA_ACC_01, 2]] + FWD_BLACKLISTED_ADDRS = {0: [MSG_LH_EPS_03], 2: [MSG_HCA_01, MSG_LDW_02]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + def setUp(self): + self.packer = CANPackerPanda("vw_mqb_2010") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_VOLKSWAGEN_MQB, 0) + self.safety.init_tests() + + def test_spam_cancel_safety_check(self): + self.safety.set_controls_allowed(0) + self.assertTrue(self._tx(self._gra_acc_01_msg(cancel=1))) + self.assertFalse(self._tx(self._gra_acc_01_msg(resume=1))) + self.assertFalse(self._tx(self._gra_acc_01_msg(_set=1))) + # do not block resume if we are engaged already + self.safety.set_controls_allowed(1) + self.assertTrue(self._tx(self._gra_acc_01_msg(resume=1))) + + +class TestVolkswagenMqbLongSafety(TestVolkswagenMqbSafety): + TX_MSGS = [[MSG_HCA_01, 0], [MSG_LDW_02, 0], [MSG_LH_EPS_03, 2], [MSG_ACC_02, 0], [MSG_ACC_06, 0], [MSG_ACC_07, 0]] + FWD_BLACKLISTED_ADDRS = {0: [MSG_LH_EPS_03], 2: [MSG_HCA_01, MSG_LDW_02, MSG_ACC_02, MSG_ACC_06, MSG_ACC_07]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + INACTIVE_ACCEL = 3.01 + + def setUp(self): + self.packer = CANPackerPanda("vw_mqb_2010") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_VOLKSWAGEN_MQB, Panda.FLAG_VOLKSWAGEN_LONG_CONTROL) + self.safety.init_tests() + + # stock cruise controls are entirely bypassed under openpilot longitudinal control + def test_disable_control_allowed_from_cruise(self): + pass + + def test_enable_control_allowed_from_cruise(self): + pass + + def test_cruise_engaged_prev(self): + pass + + def test_set_and_resume_buttons(self): + for button in ["set", "resume"]: + # ACC main switch must be on, engage on falling edge + self.safety.set_controls_allowed(0) + self._rx(self._tsk_status_msg(False, main_switch=False)) + self._rx(self._gra_acc_01_msg(_set=(button == "set"), resume=(button == "resume"), bus=0)) + self.assertFalse(self.safety.get_controls_allowed(), f"controls allowed on {button} with main switch off") + self._rx(self._tsk_status_msg(False, main_switch=True)) + self._rx(self._gra_acc_01_msg(_set=(button == "set"), resume=(button == "resume"), bus=0)) + self.assertFalse(self.safety.get_controls_allowed(), f"controls allowed on {button} rising edge") + self._rx(self._gra_acc_01_msg(bus=0)) + self.assertTrue(self.safety.get_controls_allowed(), f"controls not allowed on {button} falling edge") + + def test_cancel_button(self): + # Disable on rising edge of cancel button + self._rx(self._tsk_status_msg(False, main_switch=True)) + self.safety.set_controls_allowed(1) + self._rx(self._gra_acc_01_msg(cancel=True, bus=0)) + self.assertFalse(self.safety.get_controls_allowed(), "controls allowed after cancel") + + def test_main_switch(self): + # Disable as soon as main switch turns off + self._rx(self._tsk_status_msg(False, main_switch=True)) + self.safety.set_controls_allowed(1) + self._rx(self._tsk_status_msg(False, main_switch=False)) + self.assertFalse(self.safety.get_controls_allowed(), "controls allowed after ACC main switch off") + + def test_accel_safety_check(self): + for controls_allowed in [True, False]: + # enforce we don't skip over 0 or inactive accel + for accel in np.concatenate((np.arange(MIN_ACCEL - 2, MAX_ACCEL + 2, 0.03), [0, self.INACTIVE_ACCEL])): + accel = round(accel, 2) # floats might not hit exact boundary conditions without rounding + is_inactive_accel = accel == self.INACTIVE_ACCEL + send = (controls_allowed and MIN_ACCEL <= accel <= MAX_ACCEL) or is_inactive_accel + self.safety.set_controls_allowed(controls_allowed) + # primary accel request used by ECU + self.assertEqual(send, self._tx(self._acc_06_msg(accel)), (controls_allowed, accel)) + # additional accel request used by ABS/ESP + self.assertEqual(send, self._tx(self._acc_07_msg(accel)), (controls_allowed, accel)) + # ensure the optional secondary accel field remains inactive for now + self.assertEqual(is_inactive_accel, self._tx(self._acc_07_msg(accel, secondary_accel=accel)), (controls_allowed, accel)) + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety/test_volkswagen_pq.py b/panda/tests/safety/test_volkswagen_pq.py new file mode 100644 index 0000000..f2bc317 --- /dev/null +++ b/panda/tests/safety/test_volkswagen_pq.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 +import unittest +from panda import Panda +from panda.tests.libpanda import libpanda_py +import panda.tests.safety.common as common +from panda.tests.safety.common import CANPackerPanda + +MSG_LENKHILFE_3 = 0x0D0 # RX from EPS, for steering angle and driver steering torque +MSG_HCA_1 = 0x0D2 # TX by OP, Heading Control Assist steering torque +MSG_BREMSE_1 = 0x1A0 # RX from ABS, for ego speed +MSG_MOTOR_2 = 0x288 # RX from ECU, for CC state and brake switch state +MSG_ACC_SYSTEM = 0x368 # TX by OP, longitudinal acceleration controls +MSG_MOTOR_3 = 0x380 # RX from ECU, for driver throttle input +MSG_GRA_NEU = 0x38A # TX by OP, ACC control buttons for cancel/resume +MSG_MOTOR_5 = 0x480 # RX from ECU, for ACC main switch state +MSG_ACC_GRA_ANZEIGE = 0x56A # TX by OP, ACC HUD +MSG_LDW_1 = 0x5BE # TX by OP, Lane line recognition and text alerts + + +class TestVolkswagenPqSafety(common.PandaCarSafetyTest, common.DriverTorqueSteeringSafetyTest): + cruise_engaged = False + + STANDSTILL_THRESHOLD = 0 + RELAY_MALFUNCTION_ADDRS = {0: (MSG_HCA_1,)} + + MAX_RATE_UP = 6 + MAX_RATE_DOWN = 10 + MAX_TORQUE = 300 + MAX_RT_DELTA = 113 + RT_INTERVAL = 250000 + + DRIVER_TORQUE_ALLOWANCE = 80 + DRIVER_TORQUE_FACTOR = 3 + + @classmethod + def setUpClass(cls): + if cls.__name__ == "TestVolkswagenPqSafety": + cls.packer = None + cls.safety = None + raise unittest.SkipTest + + def _set_prev_torque(self, t): + self.safety.set_desired_torque_last(t) + self.safety.set_rt_torque_last(t) + + # Ego speed (Bremse_1) + def _speed_msg(self, speed): + values = {"Geschwindigkeit_neu__Bremse_1_": speed} + return self.packer.make_can_msg_panda("Bremse_1", 0, values) + + # Brake light switch (shared message Motor_2) + def _user_brake_msg(self, brake): + # since this signal is used for engagement status, preserve current state + return self._motor_2_msg(brake_pressed=brake, cruise_engaged=self.safety.get_controls_allowed()) + + # ACC engaged status (shared message Motor_2) + def _pcm_status_msg(self, enable): + self.__class__.cruise_engaged = enable + return self._motor_2_msg(cruise_engaged=enable) + + # Acceleration request to drivetrain coordinator + def _accel_msg(self, accel): + values = {"ACS_Sollbeschl": accel} + return self.packer.make_can_msg_panda("ACC_System", 0, values) + + # Driver steering input torque + def _torque_driver_msg(self, torque): + values = {"LH3_LM": abs(torque), "LH3_LMSign": torque < 0} + return self.packer.make_can_msg_panda("Lenkhilfe_3", 0, values) + + # openpilot steering output torque + def _torque_cmd_msg(self, torque, steer_req=1, hca_status=5): + values = {"LM_Offset": abs(torque), "LM_OffSign": torque < 0, "HCA_Status": hca_status if steer_req else 3} + return self.packer.make_can_msg_panda("HCA_1", 0, values) + + # ACC engagement and brake light switch status + # Called indirectly for compatibility with common.py tests + def _motor_2_msg(self, brake_pressed=False, cruise_engaged=False): + values = {"Bremslichtschalter": brake_pressed, + "GRA_Status": cruise_engaged} + return self.packer.make_can_msg_panda("Motor_2", 0, values) + + # ACC main switch status + def _motor_5_msg(self, main_switch=False): + values = {"GRA_Hauptschalter": main_switch} + return self.packer.make_can_msg_panda("Motor_5", 0, values) + + # Driver throttle input (Motor_3) + def _user_gas_msg(self, gas): + values = {"Fahrpedal_Rohsignal": gas} + return self.packer.make_can_msg_panda("Motor_3", 0, values) + + # Cruise control buttons (GRA_Neu) + def _button_msg(self, _set=False, resume=False, cancel=False, bus=2): + values = {"GRA_Neu_Setzen": _set, "GRA_Recall": resume, "GRA_Abbrechen": cancel} + return self.packer.make_can_msg_panda("GRA_Neu", bus, values) + + def test_torque_measurements(self): + # TODO: make this test work with all cars + self._rx(self._torque_driver_msg(50)) + self._rx(self._torque_driver_msg(-50)) + self._rx(self._torque_driver_msg(0)) + self._rx(self._torque_driver_msg(0)) + self._rx(self._torque_driver_msg(0)) + self._rx(self._torque_driver_msg(0)) + + self.assertEqual(-50, self.safety.get_torque_driver_min()) + self.assertEqual(50, self.safety.get_torque_driver_max()) + + self._rx(self._torque_driver_msg(0)) + self.assertEqual(0, self.safety.get_torque_driver_max()) + self.assertEqual(-50, self.safety.get_torque_driver_min()) + + self._rx(self._torque_driver_msg(0)) + self.assertEqual(0, self.safety.get_torque_driver_max()) + self.assertEqual(0, self.safety.get_torque_driver_min()) + + +class TestVolkswagenPqStockSafety(TestVolkswagenPqSafety): + # Transmit of GRA_Neu is allowed on bus 0 and 2 to keep compatibility with gateway and camera integration + TX_MSGS = [[MSG_HCA_1, 0], [MSG_GRA_NEU, 0], [MSG_GRA_NEU, 2], [MSG_LDW_1, 0]] + FWD_BLACKLISTED_ADDRS = {2: [MSG_HCA_1, MSG_LDW_1]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + + def setUp(self): + self.packer = CANPackerPanda("vw_golf_mk4") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_VOLKSWAGEN_PQ, 0) + self.safety.init_tests() + + def test_spam_cancel_safety_check(self): + self.safety.set_controls_allowed(0) + self.assertTrue(self._tx(self._button_msg(cancel=True))) + self.assertFalse(self._tx(self._button_msg(resume=True))) + self.assertFalse(self._tx(self._button_msg(_set=True))) + # do not block resume if we are engaged already + self.safety.set_controls_allowed(1) + self.assertTrue(self._tx(self._button_msg(resume=True))) + + +class TestVolkswagenPqLongSafety(TestVolkswagenPqSafety, common.LongitudinalAccelSafetyTest): + TX_MSGS = [[MSG_HCA_1, 0], [MSG_LDW_1, 0], [MSG_ACC_SYSTEM, 0], [MSG_ACC_GRA_ANZEIGE, 0]] + FWD_BLACKLISTED_ADDRS = {2: [MSG_HCA_1, MSG_LDW_1, MSG_ACC_SYSTEM, MSG_ACC_GRA_ANZEIGE]} + FWD_BUS_LOOKUP = {0: 2, 2: 0} + INACTIVE_ACCEL = 3.01 + + def setUp(self): + self.packer = CANPackerPanda("vw_golf_mk4") + self.safety = libpanda_py.libpanda + self.safety.set_safety_hooks(Panda.SAFETY_VOLKSWAGEN_PQ, Panda.FLAG_VOLKSWAGEN_LONG_CONTROL) + self.safety.init_tests() + + # stock cruise controls are entirely bypassed under openpilot longitudinal control + def test_disable_control_allowed_from_cruise(self): + pass + + def test_enable_control_allowed_from_cruise(self): + pass + + def test_cruise_engaged_prev(self): + pass + + def test_set_and_resume_buttons(self): + for button in ["set", "resume"]: + # ACC main switch must be on, engage on falling edge + self.safety.set_controls_allowed(0) + self._rx(self._motor_5_msg(main_switch=False)) + self._rx(self._button_msg(_set=(button == "set"), resume=(button == "resume"), bus=0)) + self._rx(self._button_msg(bus=0)) + self.assertFalse(self.safety.get_controls_allowed(), f"controls allowed on {button} with main switch off") + self._rx(self._motor_5_msg(main_switch=True)) + self._rx(self._button_msg(_set=(button == "set"), resume=(button == "resume"), bus=0)) + self.assertFalse(self.safety.get_controls_allowed(), f"controls allowed on {button} rising edge") + self._rx(self._button_msg(bus=0)) + self.assertTrue(self.safety.get_controls_allowed(), f"controls not allowed on {button} falling edge") + + def test_cancel_button(self): + # Disable on rising edge of cancel button + self._rx(self._motor_5_msg(main_switch=True)) + self.safety.set_controls_allowed(1) + self._rx(self._button_msg(cancel=True, bus=0)) + self.assertFalse(self.safety.get_controls_allowed(), "controls allowed after cancel") + + def test_main_switch(self): + # Disable as soon as main switch turns off + self._rx(self._motor_5_msg(main_switch=True)) + self.safety.set_controls_allowed(1) + self._rx(self._motor_5_msg(main_switch=False)) + self.assertFalse(self.safety.get_controls_allowed(), "controls allowed after ACC main switch off") + + def test_torque_cmd_enable_variants(self): + # The EPS rack accepts either 5 or 7 for an enabled status, with different low speed tuning behavior + self.safety.set_controls_allowed(1) + for enabled_status in (5, 7): + self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_RATE_UP, steer_req=1, hca_status=enabled_status)), + f"torque cmd rejected with {enabled_status=}") + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/safety_replay/.gitignore b/panda/tests/safety_replay/.gitignore new file mode 100644 index 0000000..192fb09 --- /dev/null +++ b/panda/tests/safety_replay/.gitignore @@ -0,0 +1 @@ +*.bz2 diff --git a/panda/tests/safety_replay/__init__.py b/panda/tests/safety_replay/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/panda/tests/safety_replay/helpers.py b/panda/tests/safety_replay/helpers.py new file mode 100644 index 0000000..a1a6cd3 --- /dev/null +++ b/panda/tests/safety_replay/helpers.py @@ -0,0 +1,80 @@ +import panda.tests.libpanda.libpanda_py as libpanda_py +from panda import Panda + +def to_signed(d, bits): + ret = d + if d >= (1 << (bits - 1)): + ret = d - (1 << bits) + return ret + +def is_steering_msg(mode, param, addr): + ret = False + if mode in (Panda.SAFETY_HONDA_NIDEC, Panda.SAFETY_HONDA_BOSCH): + ret = (addr == 0xE4) or (addr == 0x194) or (addr == 0x33D) or (addr == 0x33DA) or (addr == 0x33DB) + elif mode == Panda.SAFETY_TOYOTA: + ret = addr == (0x191 if param & Panda.FLAG_TOYOTA_LTA else 0x2E4) + elif mode == Panda.SAFETY_GM: + ret = addr == 384 + elif mode == Panda.SAFETY_HYUNDAI: + ret = addr == 832 + elif mode == Panda.SAFETY_CHRYSLER: + ret = addr == 0x292 + elif mode == Panda.SAFETY_SUBARU: + ret = addr == 0x122 + elif mode == Panda.SAFETY_FORD: + ret = addr == 0x3d3 + elif mode == Panda.SAFETY_NISSAN: + ret = addr == 0x169 + return ret + +def get_steer_value(mode, param, to_send): + torque, angle = 0, 0 + if mode in (Panda.SAFETY_HONDA_NIDEC, Panda.SAFETY_HONDA_BOSCH): + torque = (to_send.data[0] << 8) | to_send.data[1] + torque = to_signed(torque, 16) + elif mode == Panda.SAFETY_TOYOTA: + if param & Panda.FLAG_TOYOTA_LTA: + angle = (to_send.data[1] << 8) | to_send.data[2] + angle = to_signed(angle, 16) + else: + torque = (to_send.data[1] << 8) | (to_send.data[2]) + torque = to_signed(torque, 16) + elif mode == Panda.SAFETY_GM: + torque = ((to_send.data[0] & 0x7) << 8) | to_send.data[1] + torque = to_signed(torque, 11) + elif mode == Panda.SAFETY_HYUNDAI: + torque = (((to_send.data[3] & 0x7) << 8) | to_send.data[2]) - 1024 + elif mode == Panda.SAFETY_CHRYSLER: + torque = (((to_send.data[0] & 0x7) << 8) | to_send.data[1]) - 1024 + elif mode == Panda.SAFETY_SUBARU: + torque = ((to_send.data[3] & 0x1F) << 8) | to_send.data[2] + torque = -to_signed(torque, 13) + elif mode == Panda.SAFETY_FORD: + angle = ((to_send.data[0] << 3) | (to_send.data[1] >> 5)) - 1000 + elif mode == Panda.SAFETY_NISSAN: + angle = (to_send.data[0] << 10) | (to_send.data[1] << 2) | (to_send.data[2] >> 6) + angle = -angle + (1310 * 100) + return torque, angle + +def package_can_msg(msg): + return libpanda_py.make_CANPacket(msg.address, msg.src % 4, msg.dat) + +def init_segment(safety, lr, mode, param): + sendcan = (msg for msg in lr if msg.which() == 'sendcan') + steering_msgs = (can for msg in sendcan for can in msg.sendcan if is_steering_msg(mode, param, can.address)) + + msg = next(steering_msgs, None) + if msg is None: + # no steering msgs + return + + to_send = package_can_msg(msg) + torque, angle = get_steer_value(mode, param, to_send) + if torque != 0: + safety.set_controls_allowed(1) + safety.set_desired_torque_last(torque) + elif angle != 0: + safety.set_controls_allowed(1) + safety.set_desired_angle_last(angle) + safety.set_angle_meas(angle, angle) + assert safety.safety_tx_hook(to_send), "failed to initialize panda safety for segment" diff --git a/panda/tests/safety_replay/replay_drive.py b/panda/tests/safety_replay/replay_drive.py new file mode 100644 index 0000000..df3e055 --- /dev/null +++ b/panda/tests/safety_replay/replay_drive.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +import argparse +import os +from collections import Counter + +from panda.tests.libpanda import libpanda_py +from panda.tests.safety_replay.helpers import package_can_msg, init_segment + +# replay a drive to check for safety violations +def replay_drive(lr, safety_mode, param, alternative_experience, segment=False): + safety = libpanda_py.libpanda + + err = safety.set_safety_hooks(safety_mode, param) + assert err == 0, "invalid safety mode: %d" % safety_mode + safety.set_alternative_experience(alternative_experience) + + if segment: + init_segment(safety, lr, safety_mode, param) + lr.reset() + + rx_tot, rx_invalid, tx_tot, tx_blocked, tx_controls, tx_controls_blocked = 0, 0, 0, 0, 0, 0 + safety_tick_rx_invalid = False + blocked_addrs = Counter() + invalid_addrs = set() + + can_msgs = [m for m in lr if m.which() in ('can', 'sendcan')] + start_t = can_msgs[0].logMonoTime + end_t = can_msgs[-1].logMonoTime + for msg in can_msgs: + safety.set_timer((msg.logMonoTime // 1000) % 0xFFFFFFFF) + + # skip start and end of route, warm up/down period + if msg.logMonoTime - start_t > 1e9 and end_t - msg.logMonoTime > 1e9: + safety.safety_tick_current_safety_config() + safety_tick_rx_invalid |= not safety.safety_config_valid() or safety_tick_rx_invalid + + if msg.which() == 'sendcan': + for canmsg in msg.sendcan: + to_send = package_can_msg(canmsg) + sent = safety.safety_tx_hook(to_send) + if not sent: + tx_blocked += 1 + tx_controls_blocked += safety.get_controls_allowed() + blocked_addrs[canmsg.address] += 1 + + if "DEBUG" in os.environ: + print("blocked bus %d msg %d at %f" % (canmsg.src, canmsg.address, (msg.logMonoTime - start_t) / 1e9)) + tx_controls += safety.get_controls_allowed() + tx_tot += 1 + elif msg.which() == 'can': + # ignore msgs we sent + for canmsg in filter(lambda m: m.src < 128, msg.can): + to_push = package_can_msg(canmsg) + recv = safety.safety_rx_hook(to_push) + if not recv: + rx_invalid += 1 + invalid_addrs.add(canmsg.address) + rx_tot += 1 + + print("\nRX") + print("total rx msgs:", rx_tot) + print("invalid rx msgs:", rx_invalid) + print("safety tick rx invalid:", safety_tick_rx_invalid) + print("invalid addrs:", invalid_addrs) + print("\nTX") + print("total openpilot msgs:", tx_tot) + print("total msgs with controls allowed:", tx_controls) + print("blocked msgs:", tx_blocked) + print("blocked with controls allowed:", tx_controls_blocked) + print("blocked addrs:", blocked_addrs) + + return tx_controls_blocked == 0 and rx_invalid == 0 and not safety_tick_rx_invalid + +if __name__ == "__main__": + from openpilot.tools.lib.logreader import LogReader + + parser = argparse.ArgumentParser(description="Replay CAN messages from a route or segment through a safety mode", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("route_or_segment_name", nargs='+') + parser.add_argument("--mode", type=int, help="Override the safety mode from the log") + parser.add_argument("--param", type=int, help="Override the safety param from the log") + parser.add_argument("--alternative-experience", type=int, help="Override the alternative experience from the log") + args = parser.parse_args() + + lr = LogReader(args.route_or_segment_name[0]) + + if None in (args.mode, args.param, args.alternative_experience): + for msg in lr: + if msg.which() == 'carParams': + if args.mode is None: + args.mode = msg.carParams.safetyConfigs[-1].safetyModel.raw + if args.param is None: + args.param = msg.carParams.safetyConfigs[-1].safetyParam + if args.alternative_experience is None: + args.alternative_experience = msg.carParams.alternativeExperience + break + else: + raise Exception("carParams not found in log. Set safety mode and param manually.") + + lr.reset() + + print(f"replaying {args.route_or_segment_name[0]} with safety mode {args.mode}, param {args.param}, alternative experience {args.alternative_experience}") + replay_drive(lr, args.mode, args.param, args.alternative_experience, segment=len(lr.logreader_identifiers) == 1) diff --git a/panda/tests/setup_device_ci.sh b/panda/tests/setup_device_ci.sh new file mode 100644 index 0000000..b45c6b4 --- /dev/null +++ b/panda/tests/setup_device_ci.sh @@ -0,0 +1,74 @@ +#!/usr/bin/bash + +set -e + +if [ -z "$SOURCE_DIR" ]; then + echo "SOURCE_DIR must be set" + exit 1 +fi + +if [ -z "$GIT_COMMIT" ]; then + echo "GIT_COMMIT must be set" + exit 1 +fi + +if [ -z "$TEST_DIR" ]; then + echo "TEST_DIR must be set" + exit 1 +fi + +CONTINUE_PATH="/data/continue.sh" +tee $CONTINUE_PATH << EOF +#!/usr/bin/bash + +sudo abctl --set_success + +# patch sshd config +sudo mount -o rw,remount / +sudo sed -i "s,/data/params/d/GithubSshKeys,/usr/comma/setup_keys," /etc/ssh/sshd_config +sudo systemctl daemon-reload +sudo systemctl restart ssh +sudo systemctl disable ssh-param-watcher.path +sudo systemctl disable ssh-param-watcher.service +sudo mount -o ro,remount / + +while true; do + if ! sudo systemctl is-active -q ssh; then + sudo systemctl start ssh + fi + sleep 5s +done + +sleep infinity +EOF +chmod +x $CONTINUE_PATH + + +# set up environment +if [ ! -d "$SOURCE_DIR" ]; then + git clone https://github.com/commaai/panda.git $SOURCE_DIR +fi + +# setup device/SOM state +SOM_ST_IO=49 +echo $SOM_ST_IO > /sys/class/gpio/export || true +echo out > /sys/class/gpio/gpio${SOM_ST_IO}/direction +echo 1 > /sys/class/gpio/gpio${SOM_ST_IO}/value + +# checkout panda commit +cd $SOURCE_DIR + +rm -f .git/index.lock +git reset --hard +git fetch --no-tags --no-recurse-submodules -j4 --verbose --depth 1 origin $GIT_COMMIT +find . -maxdepth 1 -not -path './.git' -not -name '.' -not -name '..' -exec rm -rf '{}' \; +git reset --hard $GIT_COMMIT +git checkout $GIT_COMMIT +git clean -xdff + +echo "git checkout done, t=$SECONDS" +du -hs $SOURCE_DIR $SOURCE_DIR/.git + +rsync -a --delete $SOURCE_DIR $TEST_DIR + +echo "$TEST_DIR synced with $GIT_COMMIT, t=$SECONDS" diff --git a/panda/tests/som/on-device.py b/panda/tests/som/on-device.py new file mode 100644 index 0000000..f88d5a9 --- /dev/null +++ b/panda/tests/som/on-device.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 +import os +import time + +from panda import Panda + + +if __name__ == "__main__": + flag_set = False + while True: + try: + with Panda(disable_checks=False) as p: + if not flag_set: + p.set_heartbeat_disabled() + p.set_safety_mode(Panda.SAFETY_ELM327, 30) + flag_set = True + + # shutdown when told + dt = p.get_datetime() + if dt.year == 2040 and dt.month == 8: + os.system("sudo poweroff") + except Exception as e: + print(str(e)) + time.sleep(0.5) diff --git a/panda/tests/som/test_bootkick.py b/panda/tests/som/test_bootkick.py new file mode 100644 index 0000000..6c08e1a --- /dev/null +++ b/panda/tests/som/test_bootkick.py @@ -0,0 +1,154 @@ +import time +import pytest +import datetime + +from panda import Panda, PandaJungle + +PANDA_SERIAL = "28002d000451323431333839" +JUNGLE_SERIAL = "26001c001451313236343430" + +OBDC_PORT = 1 + +@pytest.fixture(autouse=True, scope="function") +def pj(): + jungle = PandaJungle(JUNGLE_SERIAL) + jungle.flash() + + jungle.reset() + jungle.set_ignition(False) + + yield jungle + + jungle.set_panda_power(False) + jungle.close() + +@pytest.fixture(scope="function") +def p(pj): + # note that the 3X's panda lib isn't updated, which + # shold be fine since it only uses stable APIs + pj.set_panda_power(True) + assert Panda.wait_for_panda(PANDA_SERIAL, 10) + p = Panda(PANDA_SERIAL) + p.flash() + p.reset() + yield p + p.close() + +def setup_state(panda, jungle, state): + jungle.set_panda_power(0) + + if state == "off": + wait_for_full_poweroff(jungle) + elif state == "normal boot": + jungle.set_panda_individual_power(OBDC_PORT, 1) + elif state == "QDL": + time.sleep(0.5) + jungle.set_panda_individual_power(OBDC_PORT, 1) + elif state == "ready to bootkick": + wait_for_full_poweroff(jungle) + jungle.set_panda_individual_power(OBDC_PORT, 1) + wait_for_boot(panda, jungle) + set_som_shutdown_flag(panda) + panda.set_safety_mode(Panda.SAFETY_SILENT) + panda.send_heartbeat() + wait_for_som_shutdown(panda, jungle) + else: + raise ValueError(f"unkown state: {state}") + + +def wait_for_som_shutdown(panda, jungle): + st = time.monotonic() + while panda.read_som_gpio(): + # can take a while for the SOM to fully shutdown + if time.monotonic() - st > 120: + raise Exception("SOM didn't shutdown in time") + if check_som_boot_flag(panda): + raise Exception(f"SOM rebooted instead of shutdown: {time.monotonic() - st}s") + time.sleep(0.5) + dt = time.monotonic() - st + print("waiting for shutdown", round(dt)) + dt = time.monotonic() - st + print(f"took {dt:.2f}s for SOM to shutdown") + +def wait_for_full_poweroff(jungle, timeout=30): + st = time.monotonic() + + time.sleep(15) + while PANDA_SERIAL in Panda.list(): + if time.monotonic() - st > timeout: + raise Exception("took too long for device to turn off") + + health = jungle.health() + assert all(health[f"ch{i}_power"] < 0.1 for i in range(1, 7)) + +def check_som_boot_flag(panda): + h = panda.health() + return h['safety_mode'] == Panda.SAFETY_ELM327 and h['safety_param'] == 30 + +def set_som_shutdown_flag(panda): + panda.set_datetime(datetime.datetime(year=2040, month=8, day=23)) + +def wait_for_boot(panda, jungle, reset_expected=False, bootkick=False, timeout=120): + st = time.monotonic() + + Panda.wait_for_panda(PANDA_SERIAL, timeout) + panda.reconnect() + if bootkick: + assert panda.health()['uptime'] > 20 + else: + assert panda.health()['uptime'] < 3 + + for i in range(3): + assert not check_som_boot_flag(panda) + time.sleep(1) + + # wait for SOM to bootup + while not check_som_boot_flag(panda): + if time.monotonic() - st > timeout: + raise Exception("SOM didn't boot in time") + time.sleep(1.0) + + assert panda.health()['som_reset_triggered'] == reset_expected + +def test_cold_boot(p, pj): + setup_state(p, pj, "off") + setup_state(p, pj, "normal boot") + wait_for_boot(p, pj) + +def test_bootkick_ignition_line(p, pj): + setup_state(p, pj, "ready to bootkick") + pj.set_ignition(True) + wait_for_boot(p, pj, bootkick=True) + +@pytest.mark.skip("test isn't reliable yet") +def test_bootkick_can_ignition(p, pj): + setup_state(p, pj, "ready to bootkick") + for _ in range(10): + # Mazda ignition signal + pj.can_send(0x9E, b'\xc0\x00\x00\x00\x00\x00\x00\x00', 0) + time.sleep(0.5) + wait_for_boot(p, pj, bootkick=True) + +def test_recovery_from_qdl(p, pj): + setup_state(p, pj, "ready to bootkick") + + # put into QDL using the FORCE_USB_BOOT pin + for i in range(10): + pj.set_header_pin(i, 1) + + # try to boot + time.sleep(1) + pj.set_ignition(True) + time.sleep(3) + + # release FORCE_USB_BOOT + for i in range(10): + pj.set_header_pin(i, 0) + + # normally, this GPIO is set immediately since it's first enabled in the ABL + for i in range(40): + assert not p.read_som_gpio() + time.sleep(1) + + # should boot after 45s + wait_for_boot(p, pj, reset_expected=True, bootkick=True, timeout=120) diff --git a/panda/tests/som_debug.sh b/panda/tests/som_debug.sh new file mode 100644 index 0000000..9bb4219 --- /dev/null +++ b/panda/tests/som_debug.sh @@ -0,0 +1,7 @@ +#!/usr/bin/bash +set -e + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" +cd $DIR + +PYTHONUNBUFFERED=1 NO_COLOR=1 CLAIM=1 PORT=4 ./debug_console.py diff --git a/panda/tests/spam_can.py b/panda/tests/spam_can.py new file mode 100644 index 0000000..3154cc0 --- /dev/null +++ b/panda/tests/spam_can.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +import os +import random + +from panda import Panda + +def get_test_string(): + return b"test" + os.urandom(10) + +if __name__ == "__main__": + p = Panda() + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + print("Spamming all buses...") + while True: + at = random.randint(1, 2000) + st = get_test_string()[0:8] + bus = random.randint(0, 2) + p.can_send(at, st, bus) + # print("Sent message on bus: ", bus) diff --git a/panda/tests/standalone_test.py b/panda/tests/standalone_test.py new file mode 100644 index 0000000..7ec5559 --- /dev/null +++ b/panda/tests/standalone_test.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import struct +import time + +from panda import Panda + +if __name__ == "__main__": + p = Panda() + print(p.get_serial()) + print(p.health()) + + t1 = time.time() + for _ in range(100): + p.get_serial() + t2 = time.time() + print("100 requests took %.2f ms" % ((t2 - t1) * 1000)) + + p.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + a = 0 + while True: + # flood + msg = b"\xaa" * 4 + struct.pack("I", a) + p.can_send(0xaa, msg, 0) + p.can_send(0xaa, msg, 1) + p.can_send(0xaa, msg, 4) + time.sleep(0.01) + + dat = p.can_recv() + if len(dat) > 0: + print(dat) + a += 1 diff --git a/panda/tests/test_rsa.c b/panda/tests/test_rsa.c new file mode 100644 index 0000000..5c784e2 --- /dev/null +++ b/panda/tests/test_rsa.c @@ -0,0 +1,34 @@ +/* +gcc -DTEST_RSA test_rsa.c ../crypto/rsa.c ../crypto/sha.c && ./a.out +*/ + +#include +#include + +#define MAX_LEN 0x40000 +char buf[MAX_LEN]; + +#include "../crypto/sha.h" +#include "../crypto/rsa.h" +#include "../obj/cert.h" + +int main() { + FILE *f = fopen("../obj/panda.bin", "rb"); + int tlen = fread(buf, 1, MAX_LEN, f); + fclose(f); + printf("read %d\n", tlen); + uint32_t *_app_start = (uint32_t *)buf; + + int len = _app_start[0]; + char digest[SHA_DIGEST_SIZE]; + SHA_hash(&_app_start[1], len-4, digest); + printf("SHA hash done\n"); + + if (!RSA_verify(&rsa_key, ((void*)&_app_start[0]) + len, RSANUMBYTES, digest, SHA_DIGEST_SIZE)) { + printf("RSA fail\n"); + } else { + printf("RSA match!!!\n"); + } + + return 0; +} diff --git a/panda/tests/tucan_loopback.py b/panda/tests/tucan_loopback.py new file mode 100644 index 0000000..457facd --- /dev/null +++ b/panda/tests/tucan_loopback.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 + +import os +import time +import random +import argparse +from itertools import permutations + +from panda import Panda + +def get_test_string(): + return b"test" + os.urandom(10) + +def run_test(sleep_duration): + pandas = Panda.list() + print(pandas) + + if len(pandas) < 2: + raise Exception("Two pandas are needed for test") + + run_test_w_pandas(pandas, sleep_duration) + +def run_test_w_pandas(pandas, sleep_duration): + h = [Panda(x) for x in pandas] + print("H", h) + + for hh in h: + hh.set_safety_mode(Panda.SAFETY_ALLOUTPUT) + + # test both directions + for ho in permutations(list(range(len(h))), r=2): + print("***************** TESTING", ho) + + panda0, panda1 = h[ho[0]], h[ho[1]] + + # **** test health packet **** + print("health", ho[0], h[ho[0]].health()) + + # **** test can line loopback **** + # for bus, gmlan in [(0, None), (1, False), (2, False), (1, True), (2, True)]: + for bus, gmlan in [(0, None), (1, None)]: + print("\ntest can", bus) + # flush + cans_echo = panda0.can_recv() + cans_loop = panda1.can_recv() + + if gmlan is not None: + panda0.set_gmlan(gmlan, bus) + panda1.set_gmlan(gmlan, bus) + + # send the characters + # pick addresses high enough to not conflict with honda code + at = random.randint(1024, 2000) + st = get_test_string()[0:8] + panda0.can_send(at, st, bus) + time.sleep(0.1) + + # check for receive + cans_echo = panda0.can_recv() + cans_loop = panda1.can_recv() + + print("Bus", bus, "echo", cans_echo, "loop", cans_loop) + + assert len(cans_echo) == 1 + assert len(cans_loop) == 1 + + assert cans_echo[0][0] == at + assert cans_loop[0][0] == at + + assert cans_echo[0][2] == st + assert cans_loop[0][2] == st + + assert cans_echo[0][3] == 0x80 | bus + if cans_loop[0][3] != bus: + print("EXPECTED %d GOT %d" % (bus, cans_loop[0][3])) + assert cans_loop[0][3] == bus + + print("CAN pass", bus, ho) + time.sleep(sleep_duration) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("-n", type=int, help="Number of test iterations to run") + parser.add_argument("-sleep", type=int, help="Sleep time between tests", default=0) + args = parser.parse_args() + + if args.n is None: + while True: + run_test(sleep_duration=args.sleep) + else: + for _ in range(args.n): + run_test(sleep_duration=args.sleep) diff --git a/panda/tests/usbprotocol/test.sh b/panda/tests/usbprotocol/test.sh new file mode 100644 index 0000000..8e3886d --- /dev/null +++ b/panda/tests/usbprotocol/test.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -e + +# Loops over all HW_TYPEs, see board/boards/board_declarations.h +for hw_type in {0..7}; do + echo "Testing HW_TYPE: $hw_type" + HW_TYPE=$hw_type python -m unittest discover . +done diff --git a/panda/tests/usbprotocol/test_comms.py b/panda/tests/usbprotocol/test_comms.py new file mode 100644 index 0000000..c08551b --- /dev/null +++ b/panda/tests/usbprotocol/test_comms.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +import random +import unittest + +from panda import Panda, DLC_TO_LEN, USBPACKET_MAX_SIZE, pack_can_buffer, unpack_can_buffer +from panda.tests.libpanda import libpanda_py + +lpp = libpanda_py.libpanda + +CHUNK_SIZE = USBPACKET_MAX_SIZE +TX_QUEUES = (lpp.tx1_q, lpp.tx2_q, lpp.tx3_q, lpp.txgmlan_q) + + +def unpackage_can_msg(pkt): + dat_len = DLC_TO_LEN[pkt[0].data_len_code] + dat = bytes(pkt[0].data[0:dat_len]) + return pkt[0].addr, 0, dat, pkt[0].bus + + +def random_can_messages(n, bus=None): + msgs = [] + for _ in range(n): + if bus is None: + bus = random.randint(0, 3) + address = random.randint(1, (1 << 29) - 1) + data = bytes([random.getrandbits(8) for _ in range(DLC_TO_LEN[random.randrange(0, len(DLC_TO_LEN))])]) + msgs.append((address, 0, data, bus)) + return msgs + + +class TestPandaComms(unittest.TestCase): + def setUp(self): + lpp.comms_can_reset() + + def test_tx_queues(self): + for bus in range(4): + message = (0x100, 0, b"test", bus) + + can_pkt_tx = libpanda_py.make_CANPacket(message[0], message[3], message[2]) + can_pkt_rx = libpanda_py.ffi.new('CANPacket_t *') + + assert lpp.can_push(TX_QUEUES[bus], can_pkt_tx), "CAN push failed" + assert lpp.can_pop(TX_QUEUES[bus], can_pkt_rx), "CAN pop failed" + + assert unpackage_can_msg(can_pkt_rx) == message + + def test_comms_reset_rx(self): + # store some test messages in the queue + test_msg = (0x100, 0, b"test", 0) + for _ in range(100): + can_pkt_tx = libpanda_py.make_CANPacket(test_msg[0], test_msg[3], test_msg[2]) + lpp.can_push(lpp.rx_q, can_pkt_tx) + + # read a small chunk such that we have some overflow + TINY_CHUNK_SIZE = 6 + dat = libpanda_py.ffi.new(f"uint8_t[{TINY_CHUNK_SIZE}]") + rx_len = lpp.comms_can_read(dat, TINY_CHUNK_SIZE) + assert rx_len == TINY_CHUNK_SIZE, "comms_can_read returned too little data" + + _, overflow = unpack_can_buffer(bytes(dat)) + assert len(overflow) > 0, "overflow buffer should not be empty" + + # reset the comms to clear the overflow buffer on the panda side + lpp.comms_can_reset() + + # read a large chunk, which should now contain valid messages + LARGE_CHUNK_SIZE = 512 + dat = libpanda_py.ffi.new(f"uint8_t[{LARGE_CHUNK_SIZE}]") + rx_len = lpp.comms_can_read(dat, LARGE_CHUNK_SIZE) + assert rx_len == LARGE_CHUNK_SIZE, "comms_can_read returned too little data" + + msgs, _ = unpack_can_buffer(bytes(dat)) + assert len(msgs) > 0, "message buffer should not be empty" + for m in msgs: + assert m == test_msg, "message buffer should contain valid test messages" + + def test_comms_reset_tx(self): + # store some test messages in the queue + test_msg = (0x100, 0, b"test", 0) + packed = pack_can_buffer([test_msg for _ in range(100)]) + + # write a small chunk such that we have some overflow + TINY_CHUNK_SIZE = 6 + lpp.comms_can_write(packed[0][:TINY_CHUNK_SIZE], TINY_CHUNK_SIZE) + + # reset the comms to clear the overflow buffer on the panda side + lpp.comms_can_reset() + + # write a full valid chunk, which should now contain valid messages + lpp.comms_can_write(packed[1], len(packed[1])) + + # read the messages from the queue and make sure they're valid + queue_msgs = [] + pkt = libpanda_py.ffi.new('CANPacket_t *') + while lpp.can_pop(TX_QUEUES[0], pkt): + queue_msgs.append(unpackage_can_msg(pkt)) + + assert len(queue_msgs) > 0, "message buffer should not be empty" + for m in queue_msgs: + assert m == test_msg, "message buffer should contain valid test messages" + + + def test_can_send_usb(self): + lpp.set_safety_hooks(Panda.SAFETY_ALLOUTPUT, 0) + + for bus in range(3): + with self.subTest(bus=bus): + for _ in range(100): + msgs = random_can_messages(200, bus=bus) + packed = pack_can_buffer(msgs) + + # Simulate USB bulk chunks + for buf in packed: + for i in range(0, len(buf), CHUNK_SIZE): + chunk_len = min(CHUNK_SIZE, len(buf) - i) + lpp.comms_can_write(buf[i:i+chunk_len], chunk_len) + + # Check that they ended up in the right buffers + queue_msgs = [] + pkt = libpanda_py.ffi.new('CANPacket_t *') + while lpp.can_pop(TX_QUEUES[bus], pkt): + queue_msgs.append(unpackage_can_msg(pkt)) + + self.assertEqual(len(queue_msgs), len(msgs)) + self.assertEqual(queue_msgs, msgs) + + def test_can_receive_usb(self): + msgs = random_can_messages(50000) + packets = [libpanda_py.make_CANPacket(m[0], m[3], m[2]) for m in msgs] + + rx_msgs = [] + overflow_buf = b"" + while len(packets) > 0: + # Push into queue + while lpp.can_slots_empty(lpp.rx_q) > 0 and len(packets) > 0: + lpp.can_push(lpp.rx_q, packets.pop(0)) + + # Simulate USB bulk IN chunks + MAX_TRANSFER_SIZE = 16384 + dat = libpanda_py.ffi.new(f"uint8_t[{CHUNK_SIZE}]") + while True: + buf = b"" + while len(buf) < MAX_TRANSFER_SIZE: + max_size = min(CHUNK_SIZE, MAX_TRANSFER_SIZE - len(buf)) + rx_len = lpp.comms_can_read(dat, max_size) + buf += bytes(dat[0:rx_len]) + if rx_len < max_size: + break + + if len(buf) == 0: + break + unpacked_msgs, overflow_buf = unpack_can_buffer(overflow_buf + buf) + rx_msgs.extend(unpacked_msgs) + + self.assertEqual(len(rx_msgs), len(msgs)) + self.assertEqual(rx_msgs, msgs) + + +if __name__ == "__main__": + unittest.main() diff --git a/panda/tests/usbprotocol/test_pandalib.py b/panda/tests/usbprotocol/test_pandalib.py new file mode 100644 index 0000000..c03f246 --- /dev/null +++ b/panda/tests/usbprotocol/test_pandalib.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +import random +import unittest + +from panda import pack_can_buffer, unpack_can_buffer, DLC_TO_LEN + +class PandaTestPackUnpack(unittest.TestCase): + def test_panda_lib_pack_unpack(self): + overflow_buf = b'' + + to_pack = [] + for _ in range(10000): + address = random.randint(1, (1 << 29) - 1) + data = bytes([random.getrandbits(8) for _ in range(DLC_TO_LEN[random.randrange(0, len(DLC_TO_LEN))])]) + to_pack.append((address, 0, data, 0)) + + packed = pack_can_buffer(to_pack) + unpacked = [] + for dat in packed: + msgs, overflow_buf = unpack_can_buffer(overflow_buf + dat) + unpacked.extend(msgs) + + self.assertEqual(unpacked, to_pack) + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/car/tests/.gitignore b/selfdrive/car/tests/.gitignore new file mode 100644 index 0000000..192fb09 --- /dev/null +++ b/selfdrive/car/tests/.gitignore @@ -0,0 +1 @@ +*.bz2 diff --git a/selfdrive/car/tests/big_cars_test.sh b/selfdrive/car/tests/big_cars_test.sh new file mode 100644 index 0000000..af45c9c --- /dev/null +++ b/selfdrive/car/tests/big_cars_test.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +SCRIPT_DIR=$(dirname "$0") +BASEDIR=$(realpath "$SCRIPT_DIR/../../../") +cd $BASEDIR + +MAX_EXAMPLES=300 +INTERNAL_SEG_CNT=300 +FILEREADER_CACHE=1 +INTERNAL_SEG_LIST=selfdrive/car/tests/test_models_segs.txt + +cd selfdrive/car/tests && pytest test_models.py test_car_interfaces.py \ No newline at end of file diff --git a/selfdrive/car/tests/routes.py b/selfdrive/car/tests/routes.py new file mode 100644 index 0000000..a95c13e --- /dev/null +++ b/selfdrive/car/tests/routes.py @@ -0,0 +1,299 @@ +#!/usr/bin/env python3 +from typing import NamedTuple + +from openpilot.selfdrive.car.chrysler.values import CAR as CHRYSLER +from openpilot.selfdrive.car.gm.values import CAR as GM +from openpilot.selfdrive.car.ford.values import CAR as FORD +from openpilot.selfdrive.car.honda.values import CAR as HONDA +from openpilot.selfdrive.car.hyundai.values import CAR as HYUNDAI +from openpilot.selfdrive.car.nissan.values import CAR as NISSAN +from openpilot.selfdrive.car.mazda.values import CAR as MAZDA +from openpilot.selfdrive.car.subaru.values import CAR as SUBARU +from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA +from openpilot.selfdrive.car.volkswagen.values import CAR as VOLKSWAGEN +from openpilot.selfdrive.car.tesla.values import CAR as TESLA +from openpilot.selfdrive.car.body.values import CAR as COMMA + +# TODO: add routes for these cars +non_tested_cars = [ + FORD.F_150_MK14, + GM.CADILLAC_ATS, + GM.HOLDEN_ASTRA, + GM.MALIBU, + HYUNDAI.GENESIS_G90, + HONDA.ODYSSEY_CHN, + VOLKSWAGEN.CRAFTER_MK2, # need a route from an ACC-equipped Crafter + SUBARU.FORESTER_HYBRID, +] + + +class CarTestRoute(NamedTuple): + route: str + car_model: str | None + segment: int | None = None + + +routes = [ + CarTestRoute("efdf9af95e71cd84|2022-05-13--19-03-31", COMMA.BODY), + + CarTestRoute("0c94aa1e1296d7c6|2021-05-05--19-48-37", CHRYSLER.JEEP_GRAND_CHEROKEE), + CarTestRoute("91dfedae61d7bd75|2021-05-22--20-07-52", CHRYSLER.JEEP_GRAND_CHEROKEE_2019), + CarTestRoute("420a8e183f1aed48|2020-03-05--07-15-29", CHRYSLER.PACIFICA_2017_HYBRID), + CarTestRoute("43a685a66291579b|2021-05-27--19-47-29", CHRYSLER.PACIFICA_2018), + CarTestRoute("378472f830ee7395|2021-05-28--07-38-43", CHRYSLER.PACIFICA_2018_HYBRID), + CarTestRoute("8190c7275a24557b|2020-01-29--08-33-58", CHRYSLER.PACIFICA_2019_HYBRID), + CarTestRoute("3d84727705fecd04|2021-05-25--08-38-56", CHRYSLER.PACIFICA_2020), + CarTestRoute("221c253375af4ee9|2022-06-15--18-38-24", CHRYSLER.RAM_1500), + CarTestRoute("8fb5eabf914632ae|2022-08-04--17-28-53", CHRYSLER.RAM_HD, segment=6), + CarTestRoute("3379c85aeedc8285|2023-12-07--17-49-39", CHRYSLER.DODGE_DURANGO), + + CarTestRoute("54827bf84c38b14f|2023-01-25--14-14-11", FORD.BRONCO_SPORT_MK1), + CarTestRoute("f8eaaccd2a90aef8|2023-05-04--15-10-09", FORD.ESCAPE_MK4), + CarTestRoute("62241b0c7fea4589|2022-09-01--15-32-49", FORD.EXPLORER_MK6), + CarTestRoute("e886087f430e7fe7|2023-06-16--23-06-36", FORD.FOCUS_MK4), + CarTestRoute("bd37e43731e5964b|2023-04-30--10-42-26", FORD.MAVERICK_MK1), + CarTestRoute("112e4d6e0cad05e1|2023-11-14--08-21-43", FORD.F_150_LIGHTNING_MK1), + CarTestRoute("83a4e056c7072678|2023-11-13--16-51-33", FORD.MUSTANG_MACH_E_MK1), + #TestRoute("f1b4c567731f4a1b|2018-04-30--10-15-35", FORD.FUSION), + + CarTestRoute("7cc2a8365b4dd8a9|2018-12-02--12-10-44", GM.ACADIA), + CarTestRoute("aa20e335f61ba898|2019-02-05--16-59-04", GM.BUICK_REGAL), + CarTestRoute("75a6bcb9b8b40373|2023-03-11--22-47-33", GM.BUICK_LACROSSE), + CarTestRoute("e746f59bc96fd789|2024-01-31--22-25-58", GM.EQUINOX), + CarTestRoute("ef8f2185104d862e|2023-02-09--18-37-13", GM.ESCALADE), + CarTestRoute("46460f0da08e621e|2021-10-26--07-21-46", GM.ESCALADE_ESV), + CarTestRoute("168f8b3be57f66ae|2023-09-12--21-44-42", GM.ESCALADE_ESV_2019), + CarTestRoute("c950e28c26b5b168|2018-05-30--22-03-41", GM.VOLT), + CarTestRoute("f08912a233c1584f|2022-08-11--18-02-41", GM.BOLT_EUV, segment=1), + CarTestRoute("555d4087cf86aa91|2022-12-02--12-15-07", GM.BOLT_EUV, segment=14), # Bolt EV + CarTestRoute("38aa7da107d5d252|2022-08-15--16-01-12", GM.SILVERADO), + CarTestRoute("5085c761395d1fe6|2023-04-07--18-20-06", GM.TRAILBLAZER), + + CarTestRoute("0e7a2ba168465df5|2020-10-18--14-14-22", HONDA.ACURA_RDX_3G), + CarTestRoute("a74b011b32b51b56|2020-07-26--17-09-36", HONDA.CIVIC), + CarTestRoute("a859a044a447c2b0|2020-03-03--18-42-45", HONDA.CRV_EU), + CarTestRoute("68aac44ad69f838e|2021-05-18--20-40-52", HONDA.CRV), + CarTestRoute("14fed2e5fa0aa1a5|2021-05-25--14-59-42", HONDA.CRV_HYBRID), + CarTestRoute("52f3e9ae60c0d886|2021-05-23--15-59-43", HONDA.FIT), + CarTestRoute("2c4292a5cd10536c|2021-08-19--21-32-15", HONDA.FREED), + CarTestRoute("03be5f2fd5c508d1|2020-04-19--18-44-15", HONDA.HRV), + CarTestRoute("320098ff6c5e4730|2023-04-13--17-47-46", HONDA.HRV_3G), + CarTestRoute("917b074700869333|2021-05-24--20-40-20", HONDA.ACURA_ILX), + CarTestRoute("08a3deb07573f157|2020-03-06--16-11-19", HONDA.ACCORD), # 1.5T + CarTestRoute("1da5847ac2488106|2021-05-24--19-31-50", HONDA.ACCORD), # 2.0T + CarTestRoute("085ac1d942c35910|2021-03-25--20-11-15", HONDA.ACCORD), # 2021 with new style HUD msgs + CarTestRoute("07585b0da3c88459|2021-05-26--18-52-04", HONDA.ACCORD), # hybrid + CarTestRoute("f29e2b57a55e7ad5|2021-03-24--20-52-38", HONDA.ACCORD), # hybrid, 2021 with new style HUD msgs + CarTestRoute("1ad763dd22ef1a0e|2020-02-29--18-37-03", HONDA.CRV_5G), + CarTestRoute("0a96f86fcfe35964|2020-02-05--07-25-51", HONDA.ODYSSEY), + CarTestRoute("d83f36766f8012a5|2020-02-05--18-42-21", HONDA.CIVIC_BOSCH_DIESEL), + CarTestRoute("f0890d16a07a236b|2021-05-25--17-27-22", HONDA.INSIGHT), + CarTestRoute("07d37d27996096b6|2020-03-04--21-57-27", HONDA.PILOT), + CarTestRoute("684e8f96bd491a0e|2021-11-03--11-08-42", HONDA.PILOT), # Passport + CarTestRoute("0a78dfbacc8504ef|2020-03-04--13-29-55", HONDA.CIVIC_BOSCH), + CarTestRoute("f34a60d68d83b1e5|2020-10-06--14-35-55", HONDA.ACURA_RDX), + CarTestRoute("54fd8451b3974762|2021-04-01--14-50-10", HONDA.RIDGELINE), + CarTestRoute("2d5808fae0b38ac6|2021-09-01--17-14-11", HONDA.HONDA_E), + CarTestRoute("f44aa96ace22f34a|2021-12-22--06-22-31", HONDA.CIVIC_2022), + + CarTestRoute("87d7f06ade479c2e|2023-09-11--23-30-11", HYUNDAI.AZERA_6TH_GEN), + CarTestRoute("66189dd8ec7b50e6|2023-09-20--07-02-12", HYUNDAI.AZERA_HEV_6TH_GEN), + CarTestRoute("6fe86b4e410e4c37|2020-07-22--16-27-13", HYUNDAI.HYUNDAI_GENESIS), + CarTestRoute("b5d6dc830ad63071|2022-12-12--21-28-25", HYUNDAI.GENESIS_GV60_EV_1ST_GEN, segment=12), + CarTestRoute("70c5bec28ec8e345|2020-08-08--12-22-23", HYUNDAI.GENESIS_G70), + CarTestRoute("ca4de5b12321bd98|2022-10-18--21-15-59", HYUNDAI.GENESIS_GV70_1ST_GEN), + CarTestRoute("6b301bf83f10aa90|2020-11-22--16-45-07", HYUNDAI.GENESIS_G80), + CarTestRoute("0bbe367c98fa1538|2023-09-16--00-16-49", HYUNDAI.CUSTIN_1ST_GEN), + CarTestRoute("f0709d2bc6ca451f|2022-10-15--08-13-54", HYUNDAI.SANTA_CRUZ_1ST_GEN), + CarTestRoute("4dbd55df87507948|2022-03-01--09-45-38", HYUNDAI.SANTA_FE), + CarTestRoute("bf43d9df2b660eb0|2021-09-23--14-16-37", HYUNDAI.SANTA_FE_2022), + CarTestRoute("37398f32561a23ad|2021-11-18--00-11-35", HYUNDAI.SANTA_FE_HEV_2022), + CarTestRoute("656ac0d830792fcc|2021-12-28--14-45-56", HYUNDAI.SANTA_FE_PHEV_2022, segment=1), + CarTestRoute("de59124955b921d8|2023-06-24--00-12-50", HYUNDAI.KIA_CARNIVAL_4TH_GEN), + CarTestRoute("409c9409979a8abc|2023-07-11--09-06-44", HYUNDAI.KIA_CARNIVAL_4TH_GEN), # Chinese model + CarTestRoute("e0e98335f3ebc58f|2021-03-07--16-38-29", HYUNDAI.KIA_CEED), + CarTestRoute("7653b2bce7bcfdaa|2020-03-04--15-34-32", HYUNDAI.KIA_OPTIMA_G4), + CarTestRoute("018654717bc93d7d|2022-09-19--23-11-10", HYUNDAI.KIA_OPTIMA_G4_FL, segment=0), + CarTestRoute("f9716670b2481438|2023-08-23--14-49-50", HYUNDAI.KIA_OPTIMA_H), + CarTestRoute("6a42c1197b2a8179|2023-09-21--10-23-44", HYUNDAI.KIA_OPTIMA_H_G4_FL), + CarTestRoute("c75a59efa0ecd502|2021-03-11--20-52-55", HYUNDAI.KIA_SELTOS), + CarTestRoute("5b7c365c50084530|2020-04-15--16-13-24", HYUNDAI.SONATA), + CarTestRoute("b2a38c712dcf90bd|2020-05-18--18-12-48", HYUNDAI.SONATA_LF), + CarTestRoute("c344fd2492c7a9d2|2023-12-11--09-03-23", HYUNDAI.STARIA_4TH_GEN), + CarTestRoute("fb3fd42f0baaa2f8|2022-03-30--15-25-05", HYUNDAI.TUCSON), + CarTestRoute("db68bbe12250812c|2022-12-05--00-54-12", HYUNDAI.TUCSON_4TH_GEN), # 2023 + CarTestRoute("36e10531feea61a4|2022-07-25--13-37-42", HYUNDAI.TUCSON_4TH_GEN), # hybrid + CarTestRoute("5875672fc1d4bf57|2020-07-23--21-33-28", HYUNDAI.KIA_SORENTO), + CarTestRoute("1d0d000db3370fd0|2023-01-04--22-28-42", HYUNDAI.KIA_SORENTO_4TH_GEN, segment=5), + CarTestRoute("fc19648042eb6896|2023-08-16--11-43-27", HYUNDAI.KIA_SORENTO_HEV_4TH_GEN, segment=14), + CarTestRoute("628935d7d3e5f4f7|2022-11-30--01-12-46", HYUNDAI.KIA_SORENTO_HEV_4TH_GEN), # plug-in hybrid + CarTestRoute("9c917ba0d42ffe78|2020-04-17--12-43-19", HYUNDAI.PALISADE), + CarTestRoute("05a8f0197fdac372|2022-10-19--14-14-09", HYUNDAI.IONIQ_5), # HDA2 + CarTestRoute("eb4eae1476647463|2023-08-26--18-07-04", HYUNDAI.IONIQ_6, segment=6), # HDA2 + CarTestRoute("3f29334d6134fcd4|2022-03-30--22-00-50", HYUNDAI.IONIQ_PHEV_2019), + CarTestRoute("fa8db5869167f821|2021-06-10--22-50-10", HYUNDAI.IONIQ_PHEV), + CarTestRoute("e1107f9d04dfb1e2|2023-09-05--22-32-12", HYUNDAI.IONIQ_PHEV), # openpilot longitudinal enabled + CarTestRoute("2c5cf2dd6102e5da|2020-12-17--16-06-44", HYUNDAI.IONIQ_EV_2020), + CarTestRoute("610ebb9faaad6b43|2020-06-13--15-28-36", HYUNDAI.IONIQ_EV_LTD), + CarTestRoute("2c5cf2dd6102e5da|2020-06-26--16-00-08", HYUNDAI.IONIQ), + CarTestRoute("012c95f06918eca4|2023-01-15--11-19-36", HYUNDAI.IONIQ), # openpilot longitudinal enabled + CarTestRoute("ab59fe909f626921|2021-10-18--18-34-28", HYUNDAI.IONIQ_HEV_2022), + CarTestRoute("22d955b2cd499c22|2020-08-10--19-58-21", HYUNDAI.KONA), + CarTestRoute("efc48acf44b1e64d|2021-05-28--21-05-04", HYUNDAI.KONA_EV), + CarTestRoute("f90d3cd06caeb6fa|2023-09-06--17-15-47", HYUNDAI.KONA_EV), # openpilot longitudinal enabled + CarTestRoute("ff973b941a69366f|2022-07-28--22-01-19", HYUNDAI.KONA_EV_2022, segment=11), + CarTestRoute("1618132d68afc876|2023-08-27--09-32-14", HYUNDAI.KONA_EV_2ND_GEN, segment=13), + CarTestRoute("49f3c13141b6bc87|2021-07-28--08-05-13", HYUNDAI.KONA_HEV), + CarTestRoute("5dddcbca6eb66c62|2020-07-26--13-24-19", HYUNDAI.KIA_STINGER), + CarTestRoute("5b50b883a4259afb|2022-11-09--15-00-42", HYUNDAI.KIA_STINGER_2022), + CarTestRoute("d624b3d19adce635|2020-08-01--14-59-12", HYUNDAI.VELOSTER), + CarTestRoute("d545129f3ca90f28|2022-10-19--09-22-54", HYUNDAI.KIA_EV6), # HDA2 + CarTestRoute("68d6a96e703c00c9|2022-09-10--16-09-39", HYUNDAI.KIA_EV6), # HDA1 + CarTestRoute("9b25e8c1484a1b67|2023-04-13--10-41-45", HYUNDAI.KIA_EV6), + CarTestRoute("007d5e4ad9f86d13|2021-09-30--15-09-23", HYUNDAI.KIA_K5_2021), + CarTestRoute("c58dfc9fc16590e0|2023-01-14--13-51-48", HYUNDAI.KIA_K5_HEV_2020), + CarTestRoute("78ad5150de133637|2023-09-13--16-15-57", HYUNDAI.KIA_K8_HEV_1ST_GEN), + CarTestRoute("50c6c9b85fd1ff03|2020-10-26--17-56-06", HYUNDAI.KIA_NIRO_EV), + CarTestRoute("b153671049a867b3|2023-04-05--10-00-30", HYUNDAI.KIA_NIRO_EV_2ND_GEN), + CarTestRoute("173219cf50acdd7b|2021-07-05--10-27-41", HYUNDAI.KIA_NIRO_PHEV), + CarTestRoute("23349923ba5c4e3b|2023-12-02--08-51-54", HYUNDAI.KIA_NIRO_PHEV_2022), + CarTestRoute("34a875f29f69841a|2021-07-29--13-02-09", HYUNDAI.KIA_NIRO_HEV_2021), + CarTestRoute("db04d2c63990e3ba|2023-02-08--16-52-39", HYUNDAI.KIA_NIRO_HEV_2ND_GEN), + CarTestRoute("50a2212c41f65c7b|2021-05-24--16-22-06", HYUNDAI.KIA_FORTE), + CarTestRoute("192283cdbb7a58c2|2022-10-15--01-43-18", HYUNDAI.KIA_SPORTAGE_5TH_GEN), + CarTestRoute("09559f1fcaed4704|2023-11-16--02-24-57", HYUNDAI.KIA_SPORTAGE_5TH_GEN), # openpilot longitudinal + CarTestRoute("b3537035ffe6a7d6|2022-10-17--15-23-49", HYUNDAI.KIA_SPORTAGE_5TH_GEN), # hybrid + CarTestRoute("c5ac319aa9583f83|2021-06-01--18-18-31", HYUNDAI.ELANTRA), + CarTestRoute("734ef96182ddf940|2022-10-02--16-41-44", HYUNDAI.ELANTRA_GT_I30), + CarTestRoute("82e9cdd3f43bf83e|2021-05-15--02-42-51", HYUNDAI.ELANTRA_2021), + CarTestRoute("715ac05b594e9c59|2021-06-20--16-21-07", HYUNDAI.ELANTRA_HEV_2021), + CarTestRoute("7120aa90bbc3add7|2021-08-02--07-12-31", HYUNDAI.SONATA_HYBRID), + CarTestRoute("715ac05b594e9c59|2021-10-27--23-24-56", HYUNDAI.GENESIS_G70_2020), + CarTestRoute("6b0d44d22df18134|2023-05-06--10-36-55", HYUNDAI.GENESIS_GV80), + + CarTestRoute("00c829b1b7613dea|2021-06-24--09-10-10", TOYOTA.ALPHARD_TSS2), + CarTestRoute("912119ebd02c7a42|2022-03-19--07-24-50", TOYOTA.ALPHARD_TSS2), # hybrid + CarTestRoute("000cf3730200c71c|2021-05-24--10-42-05", TOYOTA.AVALON), + CarTestRoute("0bb588106852abb7|2021-05-26--12-22-01", TOYOTA.AVALON_2019), + CarTestRoute("87bef2930af86592|2021-05-30--09-40-54", TOYOTA.AVALON_2019), # hybrid + CarTestRoute("e9966711cfb04ce3|2022-01-11--07-59-43", TOYOTA.AVALON_TSS2), + CarTestRoute("eca1080a91720a54|2022-03-17--13-32-29", TOYOTA.AVALON_TSS2), # hybrid + CarTestRoute("6cdecc4728d4af37|2020-02-23--15-44-18", TOYOTA.CAMRY), + CarTestRoute("2f37c007683e85ba|2023-09-02--14-39-44", TOYOTA.CAMRY), # openpilot longitudinal, with radar CAN filter + CarTestRoute("54034823d30962f5|2021-05-24--06-37-34", TOYOTA.CAMRY), # hybrid + CarTestRoute("3456ad0cd7281b24|2020-12-13--17-45-56", TOYOTA.CAMRY_TSS2), + CarTestRoute("ffccc77938ddbc44|2021-01-04--16-55-41", TOYOTA.CAMRY_TSS2), # hybrid + CarTestRoute("4e45c89c38e8ec4d|2021-05-02--02-49-28", TOYOTA.COROLLA), + CarTestRoute("5f5afb36036506e4|2019-05-14--02-09-54", TOYOTA.COROLLA_TSS2), + CarTestRoute("5ceff72287a5c86c|2019-10-19--10-59-02", TOYOTA.COROLLA_TSS2), # hybrid + CarTestRoute("d2525c22173da58b|2021-04-25--16-47-04", TOYOTA.PRIUS), + CarTestRoute("b14c5b4742e6fc85|2020-07-28--19-50-11", TOYOTA.RAV4), + CarTestRoute("32a7df20486b0f70|2020-02-06--16-06-50", TOYOTA.RAV4H), + CarTestRoute("cdf2f7de565d40ae|2019-04-25--03-53-41", TOYOTA.RAV4_TSS2), + CarTestRoute("a5c341bb250ca2f0|2022-05-18--16-05-17", TOYOTA.RAV4_TSS2_2022), + CarTestRoute("ad5a3fa719bc2f83|2023-10-17--19-48-42", TOYOTA.RAV4_TSS2_2023), + CarTestRoute("7e34a988419b5307|2019-12-18--19-13-30", TOYOTA.RAV4_TSS2), # hybrid + CarTestRoute("2475fb3eb2ffcc2e|2022-04-29--12-46-23", TOYOTA.RAV4_TSS2_2022), # hybrid + CarTestRoute("7a31f030957b9c85|2023-04-01--14-12-51", TOYOTA.LEXUS_ES), + CarTestRoute("37041c500fd30100|2020-12-30--12-17-24", TOYOTA.LEXUS_ES), # hybrid + CarTestRoute("e6a24be49a6cd46e|2019-10-29--10-52-42", TOYOTA.LEXUS_ES_TSS2), + CarTestRoute("f49e8041283f2939|2019-05-30--11-51-51", TOYOTA.LEXUS_ES_TSS2), # hybrid + CarTestRoute("da23c367491f53e2|2021-05-21--09-09-11", TOYOTA.LEXUS_CTH, segment=3), + CarTestRoute("32696cea52831b02|2021-11-19--18-13-30", TOYOTA.LEXUS_RC), + CarTestRoute("ab9b64a5e5960cba|2023-10-24--17-32-08", TOYOTA.LEXUS_GS_F), + CarTestRoute("886fcd8408d570e9|2020-01-29--02-18-55", TOYOTA.LEXUS_RX), + CarTestRoute("d27ad752e9b08d4f|2021-05-26--19-39-51", TOYOTA.LEXUS_RX), # hybrid + CarTestRoute("01b22eb2ed121565|2020-02-02--11-25-51", TOYOTA.LEXUS_RX_TSS2), + CarTestRoute("b74758c690a49668|2020-05-20--15-58-57", TOYOTA.LEXUS_RX_TSS2), # hybrid + CarTestRoute("964c09eb11ca8089|2020-11-03--22-04-00", TOYOTA.LEXUS_NX), + CarTestRoute("ec429c0f37564e3c|2020-02-01--17-28-12", TOYOTA.LEXUS_NX), # hybrid + CarTestRoute("3fd5305f8b6ca765|2021-04-28--19-26-49", TOYOTA.LEXUS_NX_TSS2), + CarTestRoute("09ae96064ed85a14|2022-06-09--12-22-31", TOYOTA.LEXUS_NX_TSS2), # hybrid + CarTestRoute("4765fbbf59e3cd88|2024-02-06--17-45-32", TOYOTA.LEXUS_LC_TSS2), + CarTestRoute("0a302ffddbb3e3d3|2020-02-08--16-19-08", TOYOTA.HIGHLANDER_TSS2), + CarTestRoute("437e4d2402abf524|2021-05-25--07-58-50", TOYOTA.HIGHLANDER_TSS2), # hybrid + CarTestRoute("3183cd9b021e89ce|2021-05-25--10-34-44", TOYOTA.HIGHLANDER), + CarTestRoute("80d16a262e33d57f|2021-05-23--20-01-43", TOYOTA.HIGHLANDER), # hybrid + CarTestRoute("eb6acd681135480d|2019-06-20--20-00-00", TOYOTA.SIENNA), + CarTestRoute("2e07163a1ba9a780|2019-08-25--13-15-13", TOYOTA.LEXUS_IS), + CarTestRoute("649bf2997ada6e3a|2023-08-08--18-04-22", TOYOTA.LEXUS_IS_TSS2), + CarTestRoute("0a0de17a1e6a2d15|2020-09-21--21-24-41", TOYOTA.PRIUS_TSS2), + CarTestRoute("9b36accae406390e|2021-03-30--10-41-38", TOYOTA.MIRAI), + CarTestRoute("cd9cff4b0b26c435|2021-05-13--15-12-39", TOYOTA.CHR), + CarTestRoute("57858ede0369a261|2021-05-18--20-34-20", TOYOTA.CHR), # hybrid + CarTestRoute("ea8fbe72b96a185c|2023-02-08--15-11-46", TOYOTA.CHR_TSS2), + CarTestRoute("ea8fbe72b96a185c|2023-02-22--09-20-34", TOYOTA.CHR_TSS2), # openpilot longitudinal, with smartDSU + CarTestRoute("6719965b0e1d1737|2023-02-09--22-44-05", TOYOTA.CHR_TSS2), # hybrid + CarTestRoute("6719965b0e1d1737|2023-08-29--06-40-05", TOYOTA.CHR_TSS2), # hybrid, openpilot longitudinal, radar disabled + CarTestRoute("14623aae37e549f3|2021-10-24--01-20-49", TOYOTA.PRIUS_V), + + CarTestRoute("202c40641158a6e5|2021-09-21--09-43-24", VOLKSWAGEN.ARTEON_MK1), + CarTestRoute("2c68dda277d887ac|2021-05-11--15-22-20", VOLKSWAGEN.ATLAS_MK1), + #CarTestRoute("ffcd23abbbd02219|2024-02-28--14-59-38", VOLKSWAGEN.CADDY_MK3), + CarTestRoute("cae14e88932eb364|2021-03-26--14-43-28", VOLKSWAGEN.GOLF_MK7), # Stock ACC + CarTestRoute("3cfdec54aa035f3f|2022-10-13--14-58-58", VOLKSWAGEN.GOLF_MK7), # openpilot longitudinal + CarTestRoute("58a7d3b707987d65|2021-03-25--17-26-37", VOLKSWAGEN.JETTA_MK7), + CarTestRoute("4d134e099430fba2|2021-03-26--00-26-06", VOLKSWAGEN.PASSAT_MK8), + CarTestRoute("3cfdec54aa035f3f|2022-07-19--23-45-10", VOLKSWAGEN.PASSAT_NMS), + CarTestRoute("0cd0b7f7e31a3853|2021-11-03--19-30-22", VOLKSWAGEN.POLO_MK6), + CarTestRoute("064d1816e448f8eb|2022-09-29--15-32-34", VOLKSWAGEN.SHARAN_MK2), + CarTestRoute("7d82b2f3a9115f1f|2021-10-21--15-39-42", VOLKSWAGEN.TAOS_MK1), + CarTestRoute("2744c89a8dda9a51|2021-07-24--21-28-06", VOLKSWAGEN.TCROSS_MK1), + CarTestRoute("2cef8a0b898f331a|2021-03-25--20-13-57", VOLKSWAGEN.TIGUAN_MK2), + CarTestRoute("a589dcc642fdb10a|2021-06-14--20-54-26", VOLKSWAGEN.TOURAN_MK2), + CarTestRoute("a459f4556782eba1|2021-09-19--09-48-00", VOLKSWAGEN.TRANSPORTER_T61), + CarTestRoute("0cd0b7f7e31a3853|2021-11-18--00-38-32", VOLKSWAGEN.TROC_MK1), + CarTestRoute("07667b885add75fd|2021-01-23--19-48-42", VOLKSWAGEN.AUDI_A3_MK3), + CarTestRoute("6c6b466346192818|2021-06-06--14-17-47", VOLKSWAGEN.AUDI_Q2_MK1), + CarTestRoute("0cd0b7f7e31a3853|2021-12-03--03-12-05", VOLKSWAGEN.AUDI_Q3_MK2), + CarTestRoute("8f205bdd11bcbb65|2021-03-26--01-00-17", VOLKSWAGEN.SEAT_ATECA_MK1), + CarTestRoute("fc6b6c9a3471c846|2021-05-27--13-39-56", VOLKSWAGEN.SEAT_LEON_MK3), + CarTestRoute("0bbe367c98fa1538|2023-03-04--17-46-11", VOLKSWAGEN.SKODA_FABIA_MK4), + CarTestRoute("12d6ae3057c04b0d|2021-09-15--00-04-07", VOLKSWAGEN.SKODA_KAMIQ_MK1), + CarTestRoute("12d6ae3057c04b0d|2021-09-04--21-21-21", VOLKSWAGEN.SKODA_KAROQ_MK1), + CarTestRoute("90434ff5d7c8d603|2021-03-15--12-07-31", VOLKSWAGEN.SKODA_KODIAQ_MK1), + CarTestRoute("66e5edc3a16459c5|2021-05-25--19-00-29", VOLKSWAGEN.SKODA_OCTAVIA_MK3), + CarTestRoute("026b6d18fba6417f|2021-03-26--09-17-04", VOLKSWAGEN.SKODA_SCALA_MK1), + CarTestRoute("b2e9858e29db492b|2021-03-26--16-58-42", VOLKSWAGEN.SKODA_SUPERB_MK3), + + CarTestRoute("3c8f0c502e119c1c|2020-06-30--12-58-02", SUBARU.ASCENT), + CarTestRoute("c321c6b697c5a5ff|2020-06-23--11-04-33", SUBARU.FORESTER), + CarTestRoute("791340bc01ed993d|2019-03-10--16-28-08", SUBARU.IMPREZA), + CarTestRoute("8bf7e79a3ce64055|2021-05-24--09-36-27", SUBARU.IMPREZA_2020), + CarTestRoute("8de015561e1ea4a0|2023-08-29--17-08-31", SUBARU.IMPREZA), # openpilot longitudinal + # CarTestRoute("c3d1ccb52f5f9d65|2023-07-22--01-23-20", SUBARU.OUTBACK, segment=9), # gen2 longitudinal, eyesight disabled + CarTestRoute("1bbe6bf2d62f58a8|2022-07-14--17-11-43", SUBARU.OUTBACK, segment=10), + CarTestRoute("c56e69bbc74b8fad|2022-08-18--09-43-51", SUBARU.LEGACY, segment=3), + CarTestRoute("f4e3a0c511a076f4|2022-08-04--16-16-48", SUBARU.CROSSTREK_HYBRID, segment=2), + CarTestRoute("7fd1e4f3a33c1673|2022-12-04--15-09-53", SUBARU.FORESTER_2022, segment=4), + CarTestRoute("f3b34c0d2632aa83|2023-07-23--20-43-25", SUBARU.OUTBACK_2023, segment=7), + CarTestRoute("99437cef6d5ff2ee|2023-03-13--21-21-38", SUBARU.ASCENT_2023, segment=7), + # Pre-global, dashcam + CarTestRoute("95441c38ae8c130e|2020-06-08--12-10-17", SUBARU.FORESTER_PREGLOBAL), + CarTestRoute("df5ca7660000fba8|2020-06-16--17-37-19", SUBARU.LEGACY_PREGLOBAL), + CarTestRoute("5ab784f361e19b78|2020-06-08--16-30-41", SUBARU.OUTBACK_PREGLOBAL), + CarTestRoute("e19eb5d5353b1ac1|2020-08-09--14-37-56", SUBARU.OUTBACK_PREGLOBAL_2018), + + CarTestRoute("fbbfa6af821552b9|2020-03-03--08-09-43", NISSAN.XTRAIL), + CarTestRoute("5b7c365c50084530|2020-03-25--22-10-13", NISSAN.LEAF), + CarTestRoute("22c3dcce2dd627eb|2020-12-30--16-38-48", NISSAN.LEAF_IC), + CarTestRoute("059ab9162e23198e|2020-05-30--09-41-01", NISSAN.ROGUE), + CarTestRoute("b72d3ec617c0a90f|2020-12-11--15-38-17", NISSAN.ALTIMA), + + CarTestRoute("32a319f057902bb3|2020-04-27--15-18-58", MAZDA.CX5), + CarTestRoute("10b5a4b380434151|2020-08-26--17-11-45", MAZDA.CX9), + CarTestRoute("74f1038827005090|2020-08-26--20-05-50", MAZDA.MAZDA3), + CarTestRoute("fb53c640f499b73d|2021-06-01--04-17-56", MAZDA.MAZDA6), + CarTestRoute("f6d5b1a9d7a1c92e|2021-07-08--06-56-59", MAZDA.CX9_2021), + CarTestRoute("a4af1602d8e668ac|2022-02-03--12-17-07", MAZDA.CX5_2022), + + CarTestRoute("6c14ee12b74823ce|2021-06-30--11-49-02", TESLA.AP1_MODELS), + CarTestRoute("bb50caf5f0945ab1|2021-06-19--17-20-18", TESLA.AP2_MODELS), + #CarTestRoute("66c1699b7697267d/2024-03-03--13-09-53", TESLA.MODELS_RAVEN), + + # Segments that test specific issues + # Controls mismatch due to interceptor threshold + CarTestRoute("cfb32f0fb91b173b|2022-04-06--14-54-45", HONDA.CIVIC, segment=21), + # Controls mismatch due to standstill threshold + CarTestRoute("bec2dcfde6a64235|2022-04-08--14-21-32", HONDA.CRV_HYBRID, segment=22), +] diff --git a/selfdrive/car/tests/test_can_fingerprint.py b/selfdrive/car/tests/test_can_fingerprint.py new file mode 100644 index 0000000..63621b4 --- /dev/null +++ b/selfdrive/car/tests/test_can_fingerprint.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +from parameterized import parameterized +import unittest + +from cereal import log, messaging +from openpilot.selfdrive.car.car_helpers import FRAME_FINGERPRINT, can_fingerprint +from openpilot.selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS + + +class TestCanFingerprint(unittest.TestCase): + @parameterized.expand(list(FINGERPRINTS.items())) + def test_can_fingerprint(self, car_model, fingerprints): + """Tests online fingerprinting function on offline fingerprints""" + + for fingerprint in fingerprints: # can have multiple fingerprints for each platform + can = messaging.new_message('can', 1) + can.can = [log.CanData(address=address, dat=b'\x00' * length, src=src) + for address, length in fingerprint.items() for src in (0, 1)] + + fingerprint_iter = iter([can]) + empty_can = messaging.new_message('can', 0) + car_fingerprint, finger = can_fingerprint(lambda: next(fingerprint_iter, empty_can)) # noqa: B023 + + self.assertEqual(car_fingerprint, car_model) + self.assertEqual(finger[0], fingerprint) + self.assertEqual(finger[1], fingerprint) + self.assertEqual(finger[2], {}) + + def test_timing(self): + # just pick any CAN fingerprinting car + car_model = 'CHEVROLET BOLT EUV 2022' + fingerprint = FINGERPRINTS[car_model][0] + + cases = [] + + # case 1 - one match, make sure we keep going for 100 frames + can = messaging.new_message('can', 1) + can.can = [log.CanData(address=address, dat=b'\x00' * length, src=src) + for address, length in fingerprint.items() for src in (0, 1)] + cases.append((FRAME_FINGERPRINT, car_model, can)) + + # case 2 - no matches, make sure we keep going for 100 frames + can = messaging.new_message('can', 1) + can.can = [log.CanData(address=1, dat=b'\x00' * 1, src=src) for src in (0, 1)] # uncommon address + cases.append((FRAME_FINGERPRINT, None, can)) + + # case 3 - multiple matches, make sure we keep going for 200 frames to try to eliminate some + can = messaging.new_message('can', 1) + can.can = [log.CanData(address=2016, dat=b'\x00' * 8, src=src) for src in (0, 1)] # common address + cases.append((FRAME_FINGERPRINT * 2, None, can)) + + for expected_frames, car_model, can in cases: + with self.subTest(expected_frames=expected_frames, car_model=car_model): + frames = 0 + + def test(): + nonlocal frames + frames += 1 + return can # noqa: B023 + + car_fingerprint, _ = can_fingerprint(test) + self.assertEqual(car_fingerprint, car_model) + self.assertEqual(frames, expected_frames + 2) # TODO: fix extra frames + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/car/tests/test_car_interfaces.py b/selfdrive/car/tests/test_car_interfaces.py index a454f61..02a8d60 100755 --- a/selfdrive/car/tests/test_car_interfaces.py +++ b/selfdrive/car/tests/test_car_interfaces.py @@ -22,7 +22,7 @@ from openpilot.selfdrive.test.fuzzy_generation import DrawType, FuzzyGenerator ALL_ECUS = list({ecu for ecus in FW_VERSIONS.values() for ecu in ecus.keys()}) -MAX_EXAMPLES = int(os.environ.get('MAX_EXAMPLES', '20')) +MAX_EXAMPLES = int(os.environ.get('MAX_EXAMPLES', '40')) def get_fuzzy_car_interface_args(draw: DrawType) -> dict: diff --git a/selfdrive/car/tests/test_docs.py b/selfdrive/car/tests/test_docs.py new file mode 100644 index 0000000..0ee35dd --- /dev/null +++ b/selfdrive/car/tests/test_docs.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +from collections import defaultdict +import os +import re +import unittest + +from openpilot.common.basedir import BASEDIR +from openpilot.selfdrive.car.car_helpers import interfaces +from openpilot.selfdrive.car.docs import CARS_MD_OUT, CARS_MD_TEMPLATE, generate_cars_md, get_all_car_info +from openpilot.selfdrive.car.docs_definitions import Cable, Column, PartType, Star +from openpilot.selfdrive.car.honda.values import CAR as HONDA +from openpilot.selfdrive.car.values import PLATFORMS +from openpilot.selfdrive.debug.dump_car_info import dump_car_info +from openpilot.selfdrive.debug.print_docs_diff import print_car_info_diff + + +class TestCarDocs(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.all_cars = get_all_car_info() + + def test_generator(self): + generated_cars_md = generate_cars_md(self.all_cars, CARS_MD_TEMPLATE) + with open(CARS_MD_OUT) as f: + current_cars_md = f.read() + + self.assertEqual(generated_cars_md, current_cars_md, + "Run selfdrive/car/docs.py to update the compatibility documentation") + + def test_docs_diff(self): + dump_path = os.path.join(BASEDIR, "selfdrive", "car", "tests", "cars_dump") + dump_car_info(dump_path) + print_car_info_diff(dump_path) + os.remove(dump_path) + + def test_duplicate_years(self): + make_model_years = defaultdict(list) + for car in self.all_cars: + with self.subTest(car_info_name=car.name): + make_model = (car.make, car.model) + for year in car.year_list: + self.assertNotIn(year, make_model_years[make_model], f"{car.name}: Duplicate model year") + make_model_years[make_model].append(year) + + def test_missing_car_info(self): + all_car_info_platforms = [name for name, config in PLATFORMS.items()] + for platform in sorted(interfaces.keys()): + with self.subTest(platform=platform): + self.assertTrue(platform in all_car_info_platforms, f"Platform: {platform} doesn't have a CarInfo entry") + + def test_naming_conventions(self): + # Asserts market-standard car naming conventions by brand + for car in self.all_cars: + with self.subTest(car=car): + tokens = car.model.lower().split(" ") + if car.car_name == "hyundai": + self.assertNotIn("phev", tokens, "Use `Plug-in Hybrid`") + self.assertNotIn("hev", tokens, "Use `Hybrid`") + if "plug-in hybrid" in car.model.lower(): + self.assertIn("Plug-in Hybrid", car.model, "Use correct capitalization") + if car.make != "Kia": + self.assertNotIn("ev", tokens, "Use `Electric`") + elif car.car_name == "toyota": + if "rav4" in tokens: + self.assertIn("RAV4", car.model, "Use correct capitalization") + + def test_torque_star(self): + # Asserts brand-specific assumptions around steering torque star + for car in self.all_cars: + with self.subTest(car=car): + # honda sanity check, it's the definition of a no torque star + if car.car_fingerprint in (HONDA.ACCORD, HONDA.CIVIC, HONDA.CRV, HONDA.ODYSSEY, HONDA.PILOT): + self.assertEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has full torque star") + elif car.car_name in ("toyota", "hyundai"): + self.assertNotEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has no torque star") + + def test_year_format(self): + for car in self.all_cars: + with self.subTest(car=car): + self.assertIsNone(re.search(r"\d{4}-\d{4}", car.name), f"Format years correctly: {car.name}") + + def test_harnesses(self): + for car in self.all_cars: + with self.subTest(car=car): + if car.name == "comma body": + raise unittest.SkipTest + + car_part_type = [p.part_type for p in car.car_parts.all_parts()] + car_parts = list(car.car_parts.all_parts()) + self.assertTrue(len(car_parts) > 0, f"Need to specify car parts: {car.name}") + self.assertTrue(car_part_type.count(PartType.connector) == 1, f"Need to specify one harness connector: {car.name}") + self.assertTrue(car_part_type.count(PartType.mount) == 1, f"Need to specify one mount: {car.name}") + self.assertTrue(Cable.right_angle_obd_c_cable_1_5ft in car_parts, f"Need to specify a right angle OBD-C cable (1.5ft): {car.name}") + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/car/tests/test_fingerprints.py b/selfdrive/car/tests/test_fingerprints.py new file mode 100644 index 0000000..34f30bc --- /dev/null +++ b/selfdrive/car/tests/test_fingerprints.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +import os +import sys + +from openpilot.common.basedir import BASEDIR + +# messages reserved for CAN based ignition (see can_ignition_hook function in panda/board/drivers/can) +# (addr, len) +CAN_IGNITION_MSGS = { + 'gm': [(0x1F1, 8), (0x160, 5)], + #'tesla' : [(0x348, 8)], +} + +def _get_fingerprints(): + # read all the folders in selfdrive/car and return a dict where: + # - keys are all the car names that which we have a fingerprint dict for + # - values are dicts of fingeprints for each trim + fingerprints = {} + for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]: + car_name = car_folder.split('/')[-1] + try: + fingerprints[car_name] = __import__(f'selfdrive.car.{car_name}.values', fromlist=['FINGERPRINTS']).FINGERPRINTS + except (ImportError, OSError, AttributeError): + pass + + return fingerprints + + +def check_fingerprint_consistency(f1, f2): + # return false if it finds a fingerprint fully included in another + # max message worth checking is 1800, as above that they usually come too infrequently and not + # usable for fingerprinting + + max_msg = 1800 + + is_f1_in_f2 = True + for k in f1: + if (k not in f2 or f1[k] != f2[k]) and k < max_msg: + is_f1_in_f2 = False + + is_f2_in_f1 = True + for k in f2: + if (k not in f1 or f2[k] != f1[k]) and k < max_msg: + is_f2_in_f1 = False + + return not is_f1_in_f2 and not is_f2_in_f1 + + +def check_can_ignition_conflicts(fingerprints, brands): + # loops through all the fingerprints and exits if CAN ignition dedicated messages + # are found in unexpected fingerprints + + for brand_can, msgs_can in CAN_IGNITION_MSGS.items(): + for i, f in enumerate(fingerprints): + for msg_can in msgs_can: + if brand_can != brands[i] and msg_can[0] in f and msg_can[1] == f[msg_can[0]]: + print("CAN ignition dedicated msg %d with len %d found in %s fingerprints!" % (msg_can[0], msg_can[1], brands[i])) + print("TEST FAILED") + sys.exit(1) + + + +if __name__ == "__main__": + fingerprints = _get_fingerprints() + + fingerprints_flat: list[dict] = [] + car_names = [] + brand_names = [] + for brand in fingerprints: + for car in fingerprints[brand]: + fingerprints_flat += fingerprints[brand][car] + for _ in range(len(fingerprints[brand][car])): + car_names.append(car) + brand_names.append(brand) + + # first check if CAN ignition specific messages are unexpectedly included in other fingerprints + check_can_ignition_conflicts(fingerprints_flat, brand_names) + + valid = True + for idx1, f1 in enumerate(fingerprints_flat): + for idx2, f2 in enumerate(fingerprints_flat): + if idx1 < idx2 and not check_fingerprint_consistency(f1, f2): + valid = False + print(f"Those two fingerprints are inconsistent {car_names[idx1]} {car_names[idx2]}") + print("") + print(', '.join("%d: %d" % v for v in sorted(f1.items()))) + print("") + print(', '.join("%d: %d" % v for v in sorted(f2.items()))) + print("") + + print(f"Found {len(fingerprints_flat)} individual fingerprints") + if not valid or len(fingerprints_flat) == 0: + print("TEST FAILED") + sys.exit(1) + else: + print("TEST SUCCESSFUL") diff --git a/selfdrive/car/tests/test_fw_fingerprint.py b/selfdrive/car/tests/test_fw_fingerprint.py new file mode 100644 index 0000000..b9eadc8 --- /dev/null +++ b/selfdrive/car/tests/test_fw_fingerprint.py @@ -0,0 +1,313 @@ +#!/usr/bin/env python3 +import random +import time +import unittest +from collections import defaultdict +from parameterized import parameterized +from unittest import mock + +from cereal import car +from openpilot.selfdrive.car.car_helpers import interfaces +from openpilot.selfdrive.car.fingerprints import FW_VERSIONS +from openpilot.selfdrive.car.fw_versions import FW_QUERY_CONFIGS, FUZZY_EXCLUDE_ECUS, VERSIONS, build_fw_dict, \ + match_fw_to_car, get_brand_ecu_matches, get_fw_versions, get_present_ecus +from openpilot.selfdrive.car.vin import get_vin + +CarFw = car.CarParams.CarFw +Ecu = car.CarParams.Ecu + +ECU_NAME = {v: k for k, v in Ecu.schema.enumerants.items()} + + +class FakeSocket: + def receive(self, non_blocking=False): + pass + + def send(self, msg): + pass + + +class TestFwFingerprint(unittest.TestCase): + def assertFingerprints(self, candidates, expected): + candidates = list(candidates) + self.assertEqual(len(candidates), 1, f"got more than one candidate: {candidates}") + self.assertEqual(candidates[0], expected) + + @parameterized.expand([(b, c, e[c], n) for b, e in VERSIONS.items() for c in e for n in (True, False)]) + def test_exact_match(self, brand, car_model, ecus, test_non_essential): + config = FW_QUERY_CONFIGS[brand] + CP = car.CarParams.new_message() + for _ in range(100): + fw = [] + for ecu, fw_versions in ecus.items(): + # Assume non-essential ECUs apply to all cars, so we catch cases where Car A with + # missing ECUs won't match to Car B where only Car B has labeled non-essential ECUs + if ecu[0] in config.non_essential_ecus and test_non_essential: + continue + + ecu_name, addr, sub_addr = ecu + fw.append({"ecu": ecu_name, "fwVersion": random.choice(fw_versions), 'brand': brand, + "address": addr, "subAddress": 0 if sub_addr is None else sub_addr}) + CP.carFw = fw + _, matches = match_fw_to_car(CP.carFw, allow_fuzzy=False) + if not test_non_essential: + self.assertFingerprints(matches, car_model) + else: + # if we're removing ECUs we expect some match loss, but it shouldn't mismatch + if len(matches) != 0: + self.assertFingerprints(matches, car_model) + + @parameterized.expand([(b, c, e[c]) for b, e in VERSIONS.items() for c in e]) + def test_custom_fuzzy_match(self, brand, car_model, ecus): + # Assert brand-specific fuzzy fingerprinting function doesn't disagree with standard fuzzy function + config = FW_QUERY_CONFIGS[brand] + if config.match_fw_to_car_fuzzy is None: + raise unittest.SkipTest("Brand does not implement custom fuzzy fingerprinting function") + + CP = car.CarParams.new_message() + for _ in range(5): + fw = [] + for ecu, fw_versions in ecus.items(): + ecu_name, addr, sub_addr = ecu + fw.append({"ecu": ecu_name, "fwVersion": random.choice(fw_versions), 'brand': brand, + "address": addr, "subAddress": 0 if sub_addr is None else sub_addr}) + CP.carFw = fw + _, matches = match_fw_to_car(CP.carFw, allow_exact=False, log=False) + brand_matches = config.match_fw_to_car_fuzzy(build_fw_dict(CP.carFw), VERSIONS[brand]) + + # If both have matches, they must agree + if len(matches) == 1 and len(brand_matches) == 1: + self.assertEqual(matches, brand_matches) + + @parameterized.expand([(b, c, e[c]) for b, e in VERSIONS.items() for c in e]) + def test_fuzzy_match_ecu_count(self, brand, car_model, ecus): + # Asserts that fuzzy matching does not count matching FW, but ECU address keys + valid_ecus = [e for e in ecus if e[0] not in FUZZY_EXCLUDE_ECUS] + if not len(valid_ecus): + raise unittest.SkipTest("Car model has no compatible ECUs for fuzzy matching") + + fw = [] + for ecu in valid_ecus: + ecu_name, addr, sub_addr = ecu + for _ in range(5): + # Add multiple FW versions to simulate ECU returning to multiple queries in a brand + fw.append({"ecu": ecu_name, "fwVersion": random.choice(ecus[ecu]), 'brand': brand, + "address": addr, "subAddress": 0 if sub_addr is None else sub_addr}) + CP = car.CarParams.new_message(carFw=fw) + _, matches = match_fw_to_car(CP.carFw, allow_exact=False, log=False) + + # Assert no match if there are not enough unique ECUs + unique_ecus = {(f['address'], f['subAddress']) for f in fw} + if len(unique_ecus) < 2: + self.assertEqual(len(matches), 0, car_model) + # There won't always be a match due to shared FW, but if there is it should be correct + elif len(matches): + self.assertFingerprints(matches, car_model) + + def test_fw_version_lists(self): + for car_model, ecus in FW_VERSIONS.items(): + with self.subTest(car_model=car_model.value): + for ecu, ecu_fw in ecus.items(): + with self.subTest(ecu): + duplicates = {fw for fw in ecu_fw if ecu_fw.count(fw) > 1} + self.assertFalse(len(duplicates), f'{car_model}: Duplicate FW versions: Ecu.{ECU_NAME[ecu[0]]}, {duplicates}') + self.assertGreater(len(ecu_fw), 0, f'{car_model}: No FW versions: Ecu.{ECU_NAME[ecu[0]]}') + + def test_all_addrs_map_to_one_ecu(self): + for brand, cars in VERSIONS.items(): + addr_to_ecu = defaultdict(set) + for ecus in cars.values(): + for ecu_type, addr, sub_addr in ecus.keys(): + addr_to_ecu[(addr, sub_addr)].add(ecu_type) + ecus_for_addr = addr_to_ecu[(addr, sub_addr)] + ecu_strings = ", ".join([f'Ecu.{ECU_NAME[ecu]}' for ecu in ecus_for_addr]) + self.assertLessEqual(len(ecus_for_addr), 1, f"{brand} has multiple ECUs that map to one address: {ecu_strings} -> ({hex(addr)}, {sub_addr})") + + def test_data_collection_ecus(self): + # Asserts no extra ECUs are in the fingerprinting database + for brand, config in FW_QUERY_CONFIGS.items(): + for car_model, ecus in VERSIONS[brand].items(): + bad_ecus = set(ecus).intersection(config.extra_ecus) + with self.subTest(car_model=car_model.value): + self.assertFalse(len(bad_ecus), f'{car_model}: Fingerprints contain ECUs added for data collection: {bad_ecus}') + + def test_blacklisted_ecus(self): + blacklisted_addrs = (0x7c4, 0x7d0) # includes A/C ecu and an unknown ecu + for car_model, ecus in FW_VERSIONS.items(): + with self.subTest(car_model=car_model.value): + CP = interfaces[car_model][0].get_non_essential_params(car_model) + if CP.carName == 'subaru': + for ecu in ecus.keys(): + self.assertNotIn(ecu[1], blacklisted_addrs, f'{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})') + + elif CP.carName == "chrysler": + # Some HD trucks have a combined TCM and ECM + if CP.carFingerprint.startswith("RAM HD"): + for ecu in ecus.keys(): + self.assertNotEqual(ecu[0], Ecu.transmission, f"{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})") + + def test_missing_versions_and_configs(self): + brand_versions = set(VERSIONS.keys()) + brand_configs = set(FW_QUERY_CONFIGS.keys()) + if len(brand_configs - brand_versions): + with self.subTest(): + self.fail(f"Brands do not implement FW_VERSIONS: {brand_configs - brand_versions}") + + if len(brand_versions - brand_configs): + with self.subTest(): + self.fail(f"Brands do not implement FW_QUERY_CONFIG: {brand_versions - brand_configs}") + + # Ensure each brand has at least 1 ECU to query, and extra ECU retrieval + for brand, config in FW_QUERY_CONFIGS.items(): + self.assertEqual(len(config.get_all_ecus({}, include_extra_ecus=False)), 0) + self.assertEqual(config.get_all_ecus({}), set(config.extra_ecus)) + self.assertGreater(len(config.get_all_ecus(VERSIONS[brand])), 0) + + def test_fw_request_ecu_whitelist(self): + for brand, config in FW_QUERY_CONFIGS.items(): + with self.subTest(brand=brand): + whitelisted_ecus = {ecu for r in config.requests for ecu in r.whitelist_ecus} + brand_ecus = {fw[0] for car_fw in VERSIONS[brand].values() for fw in car_fw} + brand_ecus |= {ecu[0] for ecu in config.extra_ecus} + + # each ecu in brand's fw versions + extra ecus needs to be whitelisted at least once + ecus_not_whitelisted = brand_ecus - whitelisted_ecus + + ecu_strings = ", ".join([f'Ecu.{ECU_NAME[ecu]}' for ecu in ecus_not_whitelisted]) + self.assertFalse(len(whitelisted_ecus) and len(ecus_not_whitelisted), + f'{brand.title()}: ECUs not in any FW query whitelists: {ecu_strings}') + + def test_fw_requests(self): + # Asserts equal length request and response lists + for brand, config in FW_QUERY_CONFIGS.items(): + with self.subTest(brand=brand): + for request_obj in config.requests: + self.assertEqual(len(request_obj.request), len(request_obj.response)) + + # No request on the OBD port (bus 1, multiplexed) should be run on an aux panda + self.assertFalse(request_obj.auxiliary and request_obj.bus == 1 and request_obj.obd_multiplexing, + f"{brand.title()}: OBD multiplexed request is marked auxiliary: {request_obj}") + + def test_brand_ecu_matches(self): + empty_response = {brand: set() for brand in FW_QUERY_CONFIGS} + self.assertEqual(get_brand_ecu_matches(set()), empty_response) + + # we ignore bus + expected_response = empty_response | {'toyota': {(0x750, 0xf)}} + self.assertEqual(get_brand_ecu_matches({(0x758, 0xf, 99)}), expected_response) + + +class TestFwFingerprintTiming(unittest.TestCase): + N: int = 5 + TOL: float = 0.05 + + # for patched functions + current_obd_multiplexing: bool + total_time: float + + def fake_set_obd_multiplexing(self, _, obd_multiplexing): + """The 10Hz blocking params loop adds on average 50ms to the query time for each OBD multiplexing change""" + if obd_multiplexing != self.current_obd_multiplexing: + self.current_obd_multiplexing = obd_multiplexing + self.total_time += 0.1 / 2 + + def fake_get_data(self, timeout): + self.total_time += timeout + return {} + + def _benchmark_brand(self, brand, num_pandas): + fake_socket = FakeSocket() + self.total_time = 0 + with (mock.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing), + mock.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)): + for _ in range(self.N): + # Treat each brand as the most likely (aka, the first) brand with OBD multiplexing initially on + self.current_obd_multiplexing = True + + t = time.perf_counter() + get_fw_versions(fake_socket, fake_socket, brand, num_pandas=num_pandas) + self.total_time += time.perf_counter() - t + + return self.total_time / self.N + + def _assert_timing(self, avg_time, ref_time): + self.assertLess(avg_time, ref_time + self.TOL) + self.assertGreater(avg_time, ref_time - self.TOL, "Performance seems to have improved, update test refs.") + + def test_startup_timing(self): + # Tests worse-case VIN query time and typical present ECU query time + vin_ref_times = {'worst': 1.2, 'best': 0.6} # best assumes we go through all queries to get a match + present_ecu_ref_time = 0.75 + + def fake_get_ecu_addrs(*_, timeout): + self.total_time += timeout + return set() + + fake_socket = FakeSocket() + self.total_time = 0.0 + with (mock.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing), + mock.patch("openpilot.selfdrive.car.fw_versions.get_ecu_addrs", fake_get_ecu_addrs)): + for _ in range(self.N): + self.current_obd_multiplexing = True + get_present_ecus(fake_socket, fake_socket, num_pandas=2) + self._assert_timing(self.total_time / self.N, present_ecu_ref_time) + print(f'get_present_ecus, query time={self.total_time / self.N} seconds') + + for name, args in (('worst', {}), ('best', {'retry': 1})): + with self.subTest(name=name): + self.total_time = 0.0 + with (mock.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)): + for _ in range(self.N): + get_vin(fake_socket, fake_socket, (0, 1), **args) + self._assert_timing(self.total_time / self.N, vin_ref_times[name]) + print(f'get_vin {name} case, query time={self.total_time / self.N} seconds') + + def test_fw_query_timing(self): + total_ref_time = {1: 8.4, 2: 9.3} + brand_ref_times = { + 1: { + 'gm': 1.0, + 'body': 0.1, + 'chrysler': 0.3, + 'ford': 1.5, + 'honda': 0.55, + 'hyundai': 1.05, + 'mazda': 0.1, + 'nissan': 0.8, + 'subaru': 0.45, + 'tesla': 0.3, + 'toyota': 1.6, + 'volkswagen': 0.65, + }, + 2: { + 'ford': 1.6, + 'hyundai': 1.85, + 'tesla': 0.3, + } + } + + total_times = {1: 0.0, 2: 0.0} + for num_pandas in (1, 2): + for brand, config in FW_QUERY_CONFIGS.items(): + with self.subTest(brand=brand, num_pandas=num_pandas): + avg_time = self._benchmark_brand(brand, num_pandas) + total_times[num_pandas] += avg_time + avg_time = round(avg_time, 2) + + ref_time = brand_ref_times[num_pandas].get(brand) + if ref_time is None: + # ref time should be same as 1 panda if no aux queries + ref_time = brand_ref_times[num_pandas - 1][brand] + + self._assert_timing(avg_time, ref_time) + print(f'{brand=}, {num_pandas=}, {len(config.requests)=}, avg FW query time={avg_time} seconds') + + for num_pandas in (1, 2): + with self.subTest(brand='all_brands', num_pandas=num_pandas): + total_time = round(total_times[num_pandas], 2) + self._assert_timing(total_time, total_ref_time[num_pandas]) + print(f'all brands, total FW query time={total_time} seconds') + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/car/tests/test_lateral_limits.py b/selfdrive/car/tests/test_lateral_limits.py new file mode 100644 index 0000000..083cdd5 --- /dev/null +++ b/selfdrive/car/tests/test_lateral_limits.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +from collections import defaultdict +import importlib +from parameterized import parameterized_class +import sys +import unittest + +from openpilot.common.realtime import DT_CTRL +from openpilot.selfdrive.car.car_helpers import interfaces +from openpilot.selfdrive.car.fingerprints import all_known_cars +from openpilot.selfdrive.car.interfaces import get_torque_params +from openpilot.selfdrive.car.subaru.values import CAR as SUBARU + +CAR_MODELS = all_known_cars() + +# ISO 11270 - allowed up jerk is strictly lower than recommended limits +MAX_LAT_ACCEL = 3.0 # m/s^2 +MAX_LAT_JERK_UP = 2.5 # m/s^3 +MAX_LAT_JERK_DOWN = 5.0 # m/s^3 +MAX_LAT_JERK_UP_TOLERANCE = 0.5 # m/s^3 + +# jerk is measured over half a second +JERK_MEAS_T = 0.5 + +# TODO: put these cars within limits +ABOVE_LIMITS_CARS = [ + SUBARU.LEGACY, + SUBARU.OUTBACK, +] + +car_model_jerks: defaultdict[str, dict[str, float]] = defaultdict(dict) + + +@parameterized_class('car_model', [(c,) for c in sorted(CAR_MODELS)]) +class TestLateralLimits(unittest.TestCase): + car_model: str + + @classmethod + def setUpClass(cls): + CarInterface, _, _ = interfaces[cls.car_model] + CP = CarInterface.get_non_essential_params(cls.car_model) + + if CP.dashcamOnly: + raise unittest.SkipTest("Platform is behind dashcamOnly") + + # TODO: test all platforms + if CP.lateralTuning.which() != 'torque': + raise unittest.SkipTest + + if CP.notCar: + raise unittest.SkipTest + + if CP.carFingerprint in ABOVE_LIMITS_CARS: + raise unittest.SkipTest + + CarControllerParams = importlib.import_module(f'selfdrive.car.{CP.carName}.values').CarControllerParams + cls.control_params = CarControllerParams(CP) + cls.torque_params = get_torque_params(cls.car_model) + + @staticmethod + def calculate_0_5s_jerk(control_params, torque_params): + steer_step = control_params.STEER_STEP + max_lat_accel = torque_params['MAX_LAT_ACCEL_MEASURED'] + + # Steer up/down delta per 10ms frame, in percentage of max torque + steer_up_per_frame = control_params.STEER_DELTA_UP / control_params.STEER_MAX / steer_step + steer_down_per_frame = control_params.STEER_DELTA_DOWN / control_params.STEER_MAX / steer_step + + # Lateral acceleration reached in 0.5 seconds, clipping to max torque + accel_up_0_5_sec = min(steer_up_per_frame * JERK_MEAS_T / DT_CTRL, 1.0) * max_lat_accel + accel_down_0_5_sec = min(steer_down_per_frame * JERK_MEAS_T / DT_CTRL, 1.0) * max_lat_accel + + # Convert to m/s^3 + return accel_up_0_5_sec / JERK_MEAS_T, accel_down_0_5_sec / JERK_MEAS_T + + def test_jerk_limits(self): + up_jerk, down_jerk = self.calculate_0_5s_jerk(self.control_params, self.torque_params) + car_model_jerks[self.car_model] = {"up_jerk": up_jerk, "down_jerk": down_jerk} + self.assertLessEqual(up_jerk, MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE) + self.assertLessEqual(down_jerk, MAX_LAT_JERK_DOWN) + + def test_max_lateral_accel(self): + self.assertLessEqual(self.torque_params["MAX_LAT_ACCEL_MEASURED"], MAX_LAT_ACCEL) + + +if __name__ == "__main__": + result = unittest.main(exit=False) + + print(f"\n\n---- Lateral limit report ({len(CAR_MODELS)} cars) ----\n") + + max_car_model_len = max([len(car_model) for car_model in car_model_jerks]) + for car_model, _jerks in sorted(car_model_jerks.items(), key=lambda i: i[1]['up_jerk'], reverse=True): + violation = _jerks["up_jerk"] > MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE or \ + _jerks["down_jerk"] > MAX_LAT_JERK_DOWN + violation_str = " - VIOLATION" if violation else "" + + print(f"{car_model:{max_car_model_len}} - up jerk: {round(_jerks['up_jerk'], 2):5} " + + f"m/s^3, down jerk: {round(_jerks['down_jerk'], 2):5} m/s^3{violation_str}") + + # exit with test result + sys.exit(not result.result.wasSuccessful()) diff --git a/selfdrive/car/tests/test_models.py b/selfdrive/car/tests/test_models.py new file mode 100644 index 0000000..b7d20e5 --- /dev/null +++ b/selfdrive/car/tests/test_models.py @@ -0,0 +1,488 @@ +#!/usr/bin/env python3 +import capnp +import os +import importlib +import pytest +import random +import unittest +from collections import defaultdict, Counter +import hypothesis.strategies as st +from hypothesis import Phase, given, settings +from parameterized import parameterized_class + +from cereal import messaging, log, car +from openpilot.common.basedir import BASEDIR +from openpilot.common.params import Params +from openpilot.common.realtime import DT_CTRL +from openpilot.selfdrive.car import gen_empty_fingerprint +from openpilot.selfdrive.car.fingerprints import all_known_cars +from openpilot.selfdrive.car.car_helpers import FRAME_FINGERPRINT, interfaces +from openpilot.selfdrive.car.honda.values import CAR as HONDA, HondaFlags +from openpilot.selfdrive.car.tests.routes import non_tested_cars, routes, CarTestRoute +from openpilot.selfdrive.controls.controlsd import Controls +from openpilot.selfdrive.test.helpers import read_segment_list +from openpilot.system.hardware.hw import DEFAULT_DOWNLOAD_CACHE_ROOT +from openpilot.tools.lib.logreader import LogReader, internal_source, openpilotci_source +from openpilot.tools.lib.route import SegmentName + +from panda.tests.libpanda import libpanda_py + +EventName = car.CarEvent.EventName +PandaType = log.PandaState.PandaType +SafetyModel = car.CarParams.SafetyModel + +NUM_JOBS = int(os.environ.get("NUM_JOBS", "1")) +JOB_ID = int(os.environ.get("JOB_ID", "0")) +INTERNAL_SEG_LIST = os.environ.get("INTERNAL_SEG_LIST", "") +INTERNAL_SEG_CNT = int(os.environ.get("INTERNAL_SEG_CNT", "0")) +MAX_EXAMPLES = int(os.environ.get("MAX_EXAMPLES", "300")) +CI = os.environ.get("CI", None) is not None + + +def get_test_cases() -> list[tuple[str, CarTestRoute | None]]: + # build list of test cases + test_cases = [] + if not len(INTERNAL_SEG_LIST): + routes_by_car = defaultdict(set) + for r in routes: + routes_by_car[r.car_model].add(r) + + for i, c in enumerate(sorted(all_known_cars())): + if i % NUM_JOBS == JOB_ID: + test_cases.extend(sorted((c, r) for r in routes_by_car.get(c, (None,)))) + + else: + segment_list = read_segment_list(os.path.join(BASEDIR, INTERNAL_SEG_LIST)) + segment_list = random.sample(segment_list, INTERNAL_SEG_CNT or len(segment_list)) + for platform, segment in segment_list: + segment_name = SegmentName(segment) + test_cases.append((platform, CarTestRoute(segment_name.route_name.canonical_name, platform, + segment=segment_name.segment_num))) + return test_cases + + +@pytest.mark.slow +@pytest.mark.shared_download_cache +class TestCarModelBase(unittest.TestCase): + car_model: str | None = None + test_route: CarTestRoute | None = None + test_route_on_bucket: bool = True # whether the route is on the preserved CI bucket + + can_msgs: list[capnp.lib.capnp._DynamicStructReader] + fingerprint: dict[int, dict[int, int]] + elm_frame: int | None + car_safety_mode_frame: int | None + + @classmethod + def get_testing_data_from_logreader(cls, lr): + car_fw = [] + can_msgs = [] + cls.elm_frame = None + cls.car_safety_mode_frame = None + cls.fingerprint = gen_empty_fingerprint() + experimental_long = False + for msg in lr: + if msg.which() == "can": + can_msgs.append(msg) + if len(can_msgs) <= FRAME_FINGERPRINT: + for m in msg.can: + if m.src < 64: + cls.fingerprint[m.src][m.address] = len(m.dat) + + elif msg.which() == "carParams": + car_fw = msg.carParams.carFw + if msg.carParams.openpilotLongitudinalControl: + experimental_long = True + if cls.car_model is None and not cls.ci: + cls.car_model = msg.carParams.carFingerprint + + # Log which can frame the panda safety mode left ELM327, for CAN validity checks + elif msg.which() == 'pandaStates': + for ps in msg.pandaStates: + if cls.elm_frame is None and ps.safetyModel != SafetyModel.elm327: + cls.elm_frame = len(can_msgs) + if cls.car_safety_mode_frame is None and ps.safetyModel not in \ + (SafetyModel.elm327, SafetyModel.noOutput): + cls.car_safety_mode_frame = len(can_msgs) + + elif msg.which() == 'pandaStateDEPRECATED': + if cls.elm_frame is None and msg.pandaStateDEPRECATED.safetyModel != SafetyModel.elm327: + cls.elm_frame = len(can_msgs) + if cls.car_safety_mode_frame is None and msg.pandaStateDEPRECATED.safetyModel not in \ + (SafetyModel.elm327, SafetyModel.noOutput): + cls.car_safety_mode_frame = len(can_msgs) + + if len(can_msgs) > int(50 / DT_CTRL): + return car_fw, can_msgs, experimental_long + + raise Exception("no can data found") + + @classmethod + def get_testing_data(cls): + test_segs = (2, 1, 0) + if cls.test_route.segment is not None: + test_segs = (cls.test_route.segment,) + + is_internal = len(INTERNAL_SEG_LIST) + + for seg in test_segs: + segment_range = f"{cls.test_route.route}/{seg}" + + try: + lr = LogReader(segment_range, default_source=internal_source if is_internal else openpilotci_source) + return cls.get_testing_data_from_logreader(lr) + except Exception: + pass + + # Route is not in CI bucket, assume either user has access (private), or it is public + # test_route_on_ci_bucket will fail when running in CI + if not is_internal: + cls.test_route_on_bucket = False + + for seg in test_segs: + segment_range = f"{cls.test_route.route}/{seg}" + try: + lr = LogReader(segment_range) + return cls.get_testing_data_from_logreader(lr) + except Exception: + pass + + raise Exception(f"Route: {repr(cls.test_route.route)} with segments: {test_segs} not found or no CAN msgs found. Is it uploaded and public?") + + + @classmethod + def setUpClass(cls): + if cls.__name__ == 'TestCarModel' or cls.__name__.endswith('Base'): + raise unittest.SkipTest + + if 'FILTER' in os.environ: + if not cls.car_model.startswith(tuple(os.environ.get('FILTER').split(','))): + raise unittest.SkipTest + + if cls.test_route is None: + if cls.car_model in non_tested_cars: + print(f"Skipping tests for {cls.car_model}: missing route") + raise unittest.SkipTest + raise Exception(f"missing test route for {cls.car_model}") + + car_fw, can_msgs, experimental_long = cls.get_testing_data() + + # if relay is expected to be open in the route + cls.openpilot_enabled = cls.car_safety_mode_frame is not None + + cls.can_msgs = sorted(can_msgs, key=lambda msg: msg.logMonoTime) + + cls.CarInterface, cls.CarController, cls.CarState = interfaces[cls.car_model] + cls.CP = cls.CarInterface.get_params(cls.car_model, cls.fingerprint, car_fw, experimental_long, docs=False) + assert cls.CP + assert cls.CP.carFingerprint == cls.car_model + + os.environ["COMMA_CACHE"] = DEFAULT_DOWNLOAD_CACHE_ROOT + + @classmethod + def tearDownClass(cls): + del cls.can_msgs + + def setUp(self): + self.CI = self.CarInterface(self.CP.copy(), self.CarController, self.CarState) + assert self.CI + + Params().put_bool("OpenpilotEnabledToggle", self.openpilot_enabled) + + # TODO: check safetyModel is in release panda build + self.safety = libpanda_py.libpanda + + cfg = self.CP.safetyConfigs[-1] + set_status = self.safety.set_safety_hooks(cfg.safetyModel.raw, cfg.safetyParam) + self.assertEqual(0, set_status, f"failed to set safetyModel {cfg}") + self.safety.init_tests() + + def test_car_params(self): + if self.CP.dashcamOnly: + self.skipTest("no need to check carParams for dashcamOnly") + + # make sure car params are within a valid range + self.assertGreater(self.CP.mass, 1) + + if self.CP.steerControlType != car.CarParams.SteerControlType.angle: + tuning = self.CP.lateralTuning.which() + if tuning == 'pid': + self.assertTrue(len(self.CP.lateralTuning.pid.kpV)) + elif tuning == 'torque': + self.assertTrue(self.CP.lateralTuning.torque.kf > 0) + else: + raise Exception("unknown tuning") + + def test_car_interface(self): + # TODO: also check for checksum violations from can parser + can_invalid_cnt = 0 + can_valid = False + CC = car.CarControl.new_message() + + for i, msg in enumerate(self.can_msgs): + CS = self.CI.update(CC, (msg.as_builder().to_bytes(),)) + self.CI.apply(CC, msg.logMonoTime) + + if CS.canValid: + can_valid = True + + # wait max of 2s for low frequency msgs to be seen + if i > 200 or can_valid: + can_invalid_cnt += not CS.canValid + + self.assertEqual(can_invalid_cnt, 0) + + def test_radar_interface(self): + RadarInterface = importlib.import_module(f'selfdrive.car.{self.CP.carName}.radar_interface').RadarInterface + RI = RadarInterface(self.CP) + assert RI + + # Since OBD port is multiplexed to bus 1 (commonly radar bus) while fingerprinting, + # start parsing CAN messages after we've left ELM mode and can expect CAN traffic + error_cnt = 0 + for i, msg in enumerate(self.can_msgs[self.elm_frame:]): + rr = RI.update((msg.as_builder().to_bytes(),)) + if rr is not None and i > 50: + error_cnt += car.RadarData.Error.canError in rr.errors + self.assertEqual(error_cnt, 0) + + def test_panda_safety_rx_checks(self): + if self.CP.dashcamOnly: + self.skipTest("no need to check panda safety for dashcamOnly") + + start_ts = self.can_msgs[0].logMonoTime + + failed_addrs = Counter() + for can in self.can_msgs: + # update panda timer + t = (can.logMonoTime - start_ts) / 1e3 + self.safety.set_timer(int(t)) + + # run all msgs through the safety RX hook + for msg in can.can: + if msg.src >= 64: + continue + + to_send = libpanda_py.make_CANPacket(msg.address, msg.src % 4, msg.dat) + if self.safety.safety_rx_hook(to_send) != 1: + failed_addrs[hex(msg.address)] += 1 + + # ensure all msgs defined in the addr checks are valid + self.safety.safety_tick_current_safety_config() + if t > 1e6: + self.assertTrue(self.safety.safety_config_valid()) + + # Don't check relay malfunction on disabled routes (relay closed), + # or before fingerprinting is done (elm327 and noOutput) + if self.openpilot_enabled and t / 1e4 > self.car_safety_mode_frame: + self.assertFalse(self.safety.get_relay_malfunction()) + else: + self.safety.set_relay_malfunction(False) + + self.assertFalse(len(failed_addrs), f"panda safety RX check failed: {failed_addrs}") + + # ensure RX checks go invalid after small time with no traffic + self.safety.set_timer(int(t + (2*1e6))) + self.safety.safety_tick_current_safety_config() + self.assertFalse(self.safety.safety_config_valid()) + + def test_panda_safety_tx_cases(self, data=None): + """Asserts we can tx common messages""" + if self.CP.notCar: + self.skipTest("Skipping test for notCar") + + def test_car_controller(car_control): + now_nanos = 0 + msgs_sent = 0 + CI = self.CarInterface(self.CP, self.CarController, self.CarState) + for _ in range(round(10.0 / DT_CTRL)): # make sure we hit the slowest messages + CI.update(car_control, []) + _, sendcan = CI.apply(car_control, now_nanos) + + now_nanos += DT_CTRL * 1e9 + msgs_sent += len(sendcan) + for addr, _, dat, bus in sendcan: + to_send = libpanda_py.make_CANPacket(addr, bus % 4, dat) + self.assertTrue(self.safety.safety_tx_hook(to_send), (addr, dat, bus)) + + # Make sure we attempted to send messages + self.assertGreater(msgs_sent, 50) + + # Make sure we can send all messages while inactive + CC = car.CarControl.new_message() + test_car_controller(CC) + + # Test cancel + general messages (controls_allowed=False & cruise_engaged=True) + self.safety.set_cruise_engaged_prev(True) + CC = car.CarControl.new_message(cruiseControl={'cancel': True}) + test_car_controller(CC) + + # Test resume + general messages (controls_allowed=True & cruise_engaged=True) + self.safety.set_controls_allowed(True) + CC = car.CarControl.new_message(cruiseControl={'resume': True}) + test_car_controller(CC) + + # Skip stdout/stderr capture with pytest, causes elevated memory usage + @pytest.mark.nocapture + @settings(max_examples=MAX_EXAMPLES, deadline=None, + phases=(Phase.reuse, Phase.generate, Phase.shrink)) + @given(data=st.data()) + def test_panda_safety_carstate_fuzzy(self, data): + """ + For each example, pick a random CAN message on the bus and fuzz its data, + checking for panda state mismatches. + """ + + if self.CP.dashcamOnly: + self.skipTest("no need to check panda safety for dashcamOnly") + + valid_addrs = [(addr, bus, size) for bus, addrs in self.fingerprint.items() for addr, size in addrs.items()] + address, bus, size = data.draw(st.sampled_from(valid_addrs)) + + msg_strategy = st.binary(min_size=size, max_size=size) + msgs = data.draw(st.lists(msg_strategy, min_size=20)) + + CC = car.CarControl.new_message() + + for dat in msgs: + # due to panda updating state selectively, only edges are expected to match + # TODO: warm up CarState with real CAN messages to check edge of both sources + # (eg. toyota's gasPressed is the inverse of a signal being set) + prev_panda_gas = self.safety.get_gas_pressed_prev() + prev_panda_brake = self.safety.get_brake_pressed_prev() + prev_panda_regen_braking = self.safety.get_regen_braking_prev() + prev_panda_vehicle_moving = self.safety.get_vehicle_moving() + prev_panda_cruise_engaged = self.safety.get_cruise_engaged_prev() + prev_panda_acc_main_on = self.safety.get_acc_main_on() + + to_send = libpanda_py.make_CANPacket(address, bus, dat) + self.safety.safety_rx_hook(to_send) + + can = messaging.new_message('can', 1) + can.can = [log.CanData(address=address, dat=dat, src=bus)] + + CS = self.CI.update(CC, (can.to_bytes(),)) + + if self.safety.get_gas_pressed_prev() != prev_panda_gas: + self.assertEqual(CS.gasPressed, self.safety.get_gas_pressed_prev()) + + if self.safety.get_brake_pressed_prev() != prev_panda_brake: + # TODO: remove this exception once this mismatch is resolved + brake_pressed = CS.brakePressed + if CS.brakePressed and not self.safety.get_brake_pressed_prev(): + if self.CP.carFingerprint in (HONDA.PILOT, HONDA.RIDGELINE) and CS.brake > 0.05: + brake_pressed = False + + self.assertEqual(brake_pressed, self.safety.get_brake_pressed_prev()) + + if self.safety.get_regen_braking_prev() != prev_panda_regen_braking: + self.assertEqual(CS.regenBraking, self.safety.get_regen_braking_prev()) + + if self.safety.get_vehicle_moving() != prev_panda_vehicle_moving: + self.assertEqual(not CS.standstill, self.safety.get_vehicle_moving()) + + if not (self.CP.carName == "honda" and not (self.CP.flags & HondaFlags.BOSCH)): + if self.safety.get_cruise_engaged_prev() != prev_panda_cruise_engaged: + self.assertEqual(CS.cruiseState.enabled, self.safety.get_cruise_engaged_prev()) + + if self.CP.carName == "honda": + if self.safety.get_acc_main_on() != prev_panda_acc_main_on: + self.assertEqual(CS.cruiseState.available, self.safety.get_acc_main_on()) + + def test_panda_safety_carstate(self): + """ + Assert that panda safety matches openpilot's carState + """ + if self.CP.dashcamOnly: + self.skipTest("no need to check panda safety for dashcamOnly") + + CC = car.CarControl.new_message() + + # warm up pass, as initial states may be different + for can in self.can_msgs[:300]: + self.CI.update(CC, (can.as_builder().to_bytes(), )) + for msg in filter(lambda m: m.src in range(64), can.can): + to_send = libpanda_py.make_CANPacket(msg.address, msg.src % 4, msg.dat) + self.safety.safety_rx_hook(to_send) + + controls_allowed_prev = False + CS_prev = car.CarState.new_message() + checks = defaultdict(int) + controlsd = Controls(CI=self.CI) + controlsd.initialized = True + for idx, can in enumerate(self.can_msgs): + CS = self.CI.update(CC, (can.as_builder().to_bytes(), )) + for msg in filter(lambda m: m.src in range(64), can.can): + to_send = libpanda_py.make_CANPacket(msg.address, msg.src % 4, msg.dat) + ret = self.safety.safety_rx_hook(to_send) + self.assertEqual(1, ret, f"safety rx failed ({ret=}): {to_send}") + + # Skip first frame so CS_prev is properly initialized + if idx == 0: + CS_prev = CS + # Button may be left pressed in warm up period + if not self.CP.pcmCruise: + self.safety.set_controls_allowed(0) + continue + + # TODO: check rest of panda's carstate (steering, ACC main on, etc.) + + checks['gasPressed'] += CS.gasPressed != self.safety.get_gas_pressed_prev() + checks['standstill'] += CS.standstill == self.safety.get_vehicle_moving() + + # TODO: remove this exception once this mismatch is resolved + brake_pressed = CS.brakePressed + if CS.brakePressed and not self.safety.get_brake_pressed_prev(): + if self.CP.carFingerprint in (HONDA.PILOT, HONDA.RIDGELINE) and CS.brake > 0.05: + brake_pressed = False + checks['brakePressed'] += brake_pressed != self.safety.get_brake_pressed_prev() + checks['regenBraking'] += CS.regenBraking != self.safety.get_regen_braking_prev() + + if self.CP.pcmCruise: + # On most pcmCruise cars, openpilot's state is always tied to the PCM's cruise state. + # On Honda Nidec, we always engage on the rising edge of the PCM cruise state, but + # openpilot brakes to zero even if the min ACC speed is non-zero (i.e. the PCM disengages). + if self.CP.carName == "honda" and not (self.CP.flags & HondaFlags.BOSCH): + # only the rising edges are expected to match + if CS.cruiseState.enabled and not CS_prev.cruiseState.enabled: + checks['controlsAllowed'] += not self.safety.get_controls_allowed() + else: + checks['controlsAllowed'] += not CS.cruiseState.enabled and self.safety.get_controls_allowed() + + # TODO: fix notCar mismatch + if not self.CP.notCar: + checks['cruiseState'] += CS.cruiseState.enabled != self.safety.get_cruise_engaged_prev() + else: + # Check for enable events on rising edge of controls allowed + controlsd.update_events(CS) + controlsd.CS_prev = CS + button_enable = (any(evt.enable for evt in CS.events) and + not any(evt == EventName.pedalPressed for evt in controlsd.events.names)) + mismatch = button_enable != (self.safety.get_controls_allowed() and not controls_allowed_prev) + checks['controlsAllowed'] += mismatch + controls_allowed_prev = self.safety.get_controls_allowed() + if button_enable and not mismatch: + self.safety.set_controls_allowed(False) + + if self.CP.carName == "honda": + checks['mainOn'] += CS.cruiseState.available != self.safety.get_acc_main_on() + + CS_prev = CS + + failed_checks = {k: v for k, v in checks.items() if v > 0} + self.assertFalse(len(failed_checks), f"panda safety doesn't agree with openpilot: {failed_checks}") + + @unittest.skipIf(not CI, "Accessing non CI-bucket routes is allowed only when not in CI") + def test_route_on_ci_bucket(self): + self.assertTrue(self.test_route_on_bucket, "Route not on CI bucket. " + + "This is fine to fail for WIP car ports, just let us know and we can upload your routes to the CI bucket.") + + +@parameterized_class(('car_model', 'test_route'), get_test_cases()) +@pytest.mark.xdist_group_class_property('test_route') +class TestCarModel(TestCarModelBase): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/car/tests/test_models_segs.txt b/selfdrive/car/tests/test_models_segs.txt new file mode 100644 index 0000000..d805ce7 --- /dev/null +++ b/selfdrive/car/tests/test_models_segs.txt @@ -0,0 +1,3884 @@ +# HONDA ODYSSEY 2018 +0ffff9faf2699ebc|2023-06-15--14-19-40--3 +# HONDA ODYSSEY 2018 +0c21c391abd382af|2023-07-17--19-49-46--12 +# HONDA ODYSSEY 2018 +0c23034a3a1e7bf4|2023-07-22--12-15-36--37 +# HONDA ODYSSEY 2018 +1b83b62ce1e15374|2023-07-24--16-56-02--166 +# HONDA ODYSSEY 2018 +0494ab1688baf0d1|2023-07-12--12-59-45--18 +# HONDA ODYSSEY 2018 +10702b68bb2db4f7|2023-07-12--09-09-08--24 +# HONDA ODYSSEY 2018 +349a10da75da84b3|2023-07-12--05-20-10--17 +# HONDA ODYSSEY 2018 +3eb25233bf8777b2|2023-08-03--12-55-23--19 +# HONDA ODYSSEY 2018 +4480efadd4e5e9b0|2023-07-31--18-14-01--20 +# HONDA ODYSSEY 2018 +568f8a1ce998fe8c|2023-05-15--18-09-35--3 +# HONDA ODYSSEY 2018 +564e1c4005971631|2023-07-15--21-26-58--9 +# HONDA ODYSSEY 2018 +5a28fb7518234651|2023-07-16--10-05-04--20 +# HONDA ODYSSEY 2018 +5de06f504727ef4e|2023-05-16--13-44-26--76 +# HONDA ODYSSEY 2018 +6da2ceb7bf6a818c|2023-05-24--16-27-24--38 +# HONDA ODYSSEY 2018 +6d3b5e3ce824a6be|2023-07-21--09-19-25--2 +# HONDA ODYSSEY 2018 +7074dfeeffbb7f3b|2023-08-07--08-42-57--3 +# HONDA ODYSSEY 2018 +761e1503448b32d5|2023-07-04--15-25-15--9 +# HONDA ODYSSEY 2018 +76b6bcf9925e9293|2023-07-15--10-36-05--31 +# HONDA ODYSSEY 2018 +7a7eba6f9ce36254|2023-07-16--16-28-27--7 +# HONDA ODYSSEY 2018 +772b833c28f493b5|2023-06-04--10-39-28--23 +# HONDA ODYSSEY 2018 +8906faebfc695953|2023-07-01--10-35-06--1 +# HONDA ODYSSEY 2018 +89ae461c07c586a3|2023-07-21--12-55-44--4 +# HONDA ODYSSEY 2018 +8eb3d1f10274ac44|2023-05-10--05-26-26--28 +# HONDA ODYSSEY 2018 +9954025f3a2aec2d|2023-06-16--14-15-08--2 +# HONDA ODYSSEY 2018 +a3636d74d9353700|2023-08-05--11-59-10--56 +# HONDA ODYSSEY 2018 +a27fe4e91d6e73c8|2023-05-31--20-34-59--2 +# HONDA ODYSSEY 2018 +aa7176d108cb2f97|2023-05-17--11-01-55--1 +# HONDA ODYSSEY 2018 +b288b21f47092c68|2023-06-23--21-09-10--7 +# HONDA ODYSSEY 2018 +b95acf45b72e104d|2023-07-10--08-01-29--11 +# HONDA ODYSSEY 2018 +b83fc39362bdde40|2023-05-15--09-42-06--9 +# HONDA ODYSSEY 2018 +bdc4ffbeb0ae5239|2023-05-11--17-52-21--3 +# HONDA ODYSSEY 2018 +c57d8a1adaecd30e|2023-07-09--21-29-19--70 +# HONDA ODYSSEY 2018 +c7fa6fb6f4d42407|2023-07-30--21-31-47--3 +# HONDA ODYSSEY 2018 +d0d197a10408c839|2023-05-13--16-17-45--67 +# HONDA ODYSSEY 2018 +e1aa4eae17f7b626|2023-06-06--16-12-33--1 +# HONDA ODYSSEY 2018 +e462034ac9e0c71d|2023-07-28--16-31-10--37 +# HONDA ODYSSEY 2018 +e67fa35fcc6db9d5|2023-07-22--17-19-19--28 +# HONDA ODYSSEY 2018 +f30eee173ffa893d|2023-05-16--19-43-43--31 +# HONDA ODYSSEY 2018 +f77dffd74248543d|2023-06-05--21-15-01--39 +# HONDA ODYSSEY 2018 +fa0bfc645891d426|2023-06-19--15-41-01--10 +# SUBARU FORESTER 2019 +1589151a309c433c|2023-05-24--17-23-34--51 +# SUBARU FORESTER 2019 +2caa16bfdc10557f|2023-07-17--06-33-02--17 +# SUBARU FORESTER 2019 +382f4ac8109f707f|2023-06-02--17-38-42--2 +# SUBARU FORESTER 2019 +8fc4e96c0d82e152|2023-08-03--07-28-17--6 +# SUBARU FORESTER 2019 +9227902ca7eb7bba|2023-07-31--13-51-49--118 +# SUBARU FORESTER 2019 +9a3079fb5c491ea5|2023-07-24--06-10-15--22 +# SUBARU FORESTER 2019 +a447729c1d15ff89|2023-07-31--06-59-17--2 +# SUBARU FORESTER 2019 +a752a19628873b10|2023-06-23--06-07-53--7 +# SUBARU FORESTER 2019 +ace607260543d257|2023-06-25--16-30-06--38 +# SUBARU FORESTER 2019 +fe3d890194b380ae|2023-06-01--19-39-37--120 +# TOYOTA RAV4 2019 +2308c55920b6c000|2023-07-01--18-34-43--21 +# TOYOTA RAV4 2019 +23562c9692a54321|2023-06-26--20-58-17--5 +# TOYOTA RAV4 2019 +1df2464a11c3caa7|2023-07-27--11-55-45--2 +# TOYOTA RAV4 2019 +1da6142d55922568|2023-05-21--19-08-27--17 +# TOYOTA RAV4 2019 +1930f57e827a5277|2023-06-18--09-30-23--7 +# TOYOTA RAV4 2019 +1c05783affe6641f|2023-05-28--09-51-01--28 +# TOYOTA RAV4 2019 +12913b66ea1ba32c|2023-08-02--12-35-55--14 +# TOYOTA RAV4 2019 +17e30e35ce70aa27|2023-06-12--05-53-27--13 +# TOYOTA RAV4 2019 +0559b34634b98e40|2023-05-26--19-07-54--10 +# TOYOTA RAV4 2019 +1523b70b49465492|2023-07-07--15-35-52--26 +# TOYOTA RAV4 2019 +131b0198e811cbaf|2023-05-10--19-45-30--1 +# TOYOTA RAV4 2019 +1aa9f62e2da571ae|2023-06-06--16-15-53--12 +# TOYOTA RAV4 2019 +06c6a866f247ccd3|2023-07-11--17-20-33--1 +# TOYOTA RAV4 2019 +1ca69541b8d83867|2023-07-18--13-20-11--63 +# TOYOTA RAV4 2019 +1c063541db21a08a|2023-05-18--20-01-46--158 +# TOYOTA RAV4 2019 +222b4aa34efc150b|2023-07-06--07-11-01--18 +# TOYOTA RAV4 2019 +25c96345c4ae0f5f|2023-05-28--13-48-48--44 +# TOYOTA RAV4 2019 +183ed80ffd5de6df|2023-05-19--09-54-02--108 +# TOYOTA RAV4 2019 +0835a20d4c688b4b|2023-05-21--21-06-46--101 +# TOYOTA RAV4 2019 +294c4257370dcd7f|2023-07-16--22-03-45--55 +# TOYOTA RAV4 2019 +013d89e3119a61fa|2023-07-27--19-28-55--1 +# TOYOTA RAV4 2019 +0111bbc17b5fc829|2023-07-22--15-33-17--3 +# TOYOTA RAV4 2019 +27fbf8f57ad1ed35|2023-07-25--03-04-13--5 +# TOYOTA RAV4 2019 +11e905ad4a26f7cf|2023-07-19--17-16-34--2 +# TOYOTA RAV4 2019 +076e5dea58433d1b|2023-05-30--14-28-07--99 +# TOYOTA RAV4 2019 +2d76c29b8aa236f4|2023-05-10--20-14-29--5 +# TOYOTA RAV4 2019 +31a0911d03035419|2023-06-11--16-24-31--3 +# TOYOTA RAV4 2019 +33c48660a967984c|2023-07-15--12-09-46--27 +# TOYOTA RAV4 2019 +334cffe17908547f|2023-07-05--19-08-58--175 +# TOYOTA RAV4 2019 +3550e1127d042e0c|2023-05-20--07-41-42--17 +# TOYOTA RAV4 2019 +3d77767f8c063017|2023-07-26--08-07-41--3 +# TOYOTA RAV4 2019 +3ecc8a00642fb457|2023-07-07--15-00-35--1 +# TOYOTA RAV4 2019 +4031a71d2f2bbf41|2023-06-24--21-44-33--19 +# TOYOTA RAV4 2019 +3dc04fa00e9b54d4|2023-07-10--20-45-54--20 +# TOYOTA RAV4 2019 +41f0e37bcb089214|2023-05-15--16-06-13--7 +# TOYOTA RAV4 2019 +4501ab837e09a1c6|2023-07-29--10-06-44--27 +# TOYOTA RAV4 2019 +430cfb7ed591a94b|2023-05-11--09-51-06--4 +# TOYOTA RAV4 2019 +43d464c559ee7b80|2023-08-02--09-27-58--26 +# TOYOTA RAV4 2019 +45bd1475c3f373bf|2023-07-17--07-09-41--42 +# TOYOTA RAV4 2019 +4d059401f47cf0b3|2023-05-13--12-01-17--9 +# TOYOTA RAV4 2019 +59e0e393764bc46b|2023-07-02--13-33-24--40 +# TOYOTA RAV4 2019 +595291c68fb8e6ea|2023-06-08--09-48-19--28 +# TOYOTA RAV4 2019 +5a62b93c7f9af2c9|2023-06-14--20-12-16--5 +# TOYOTA RAV4 2019 +5a9c05e4b4e221ba|2023-06-30--07-08-38--33 +# TOYOTA RAV4 2019 +5ac3d4195844832a|2023-05-11--08-10-51--15 +# TOYOTA RAV4 2019 +5ce8412c1359aee9|2023-06-04--01-23-21--11 +# TOYOTA RAV4 2019 +628fd36bec15e0fb|2023-06-27--07-40-01--6 +# TOYOTA RAV4 2019 +66ec82e487f41877|2023-06-08--07-57-18--2 +# TOYOTA RAV4 2019 +643e2c33f76301ed|2023-07-13--11-28-53--1 +# TOYOTA RAV4 2019 +734696450d800618|2023-08-04--23-17-18--8 +# TOYOTA RAV4 2019 +7ca260f9516f6b11|2023-05-28--11-43-26--193 +# TOYOTA RAV4 2019 +80c09a7a12eeea03|2023-05-27--12-27-44--10 +# TOYOTA RAV4 2019 +81ef68582345921f|2023-06-18--14-19-57--13 +# TOYOTA RAV4 2019 +7a6d59eb02cb9e4f|2023-08-04--07-49-50--2 +# TOYOTA RAV4 2019 +87d791b5f816cb1a|2023-07-17--12-47-30--6 +# TOYOTA RAV4 2019 +850119b3a36f6308|2023-05-28--16-46-06--10 +# TOYOTA RAV4 2019 +8a111c9611f505e8|2023-05-14--17-09-37--3 +# TOYOTA RAV4 2019 +8a7d3987605d80d7|2023-07-11--22-11-11--98 +# TOYOTA RAV4 2019 +904ef86637af3549|2023-06-29--21-07-54--12 +# TOYOTA RAV4 2019 +8bb58ef0f2d612b7|2023-07-24--05-57-12--18 +# TOYOTA RAV4 2019 +9145fe2119f27d86|2023-05-31--08-42-48--13 +# TOYOTA RAV4 2019 +9508cd0cab46a905|2023-06-14--16-43-42--10 +# TOYOTA RAV4 2019 +95768af0ae83525e|2023-07-21--15-19-36--83 +# TOYOTA RAV4 2019 +96a8957593166ea1|2023-06-19--12-15-04--6 +# TOYOTA RAV4 2019 +973311a7d6c7d753|2023-06-13--14-17-32--108 +# TOYOTA RAV4 2019 +9c52118646f93f97|2023-07-04--23-02-46--17 +# TOYOTA RAV4 2019 +9cbbc22dbfb7be88|2023-05-30--17-24-35--18 +# TOYOTA RAV4 2019 +9b6a2386fd03ae08|2023-07-13--12-22-52--3 +# TOYOTA RAV4 2019 +9a2bcd25135b821b|2023-07-08--10-55-13--13 +# TOYOTA RAV4 2019 +97799676cedffbe5|2023-06-14--18-24-55--19 +# TOYOTA RAV4 2019 +a08761c412401005|2023-05-23--19-01-36--12 +# TOYOTA RAV4 2019 +a27c12e2a2a70e31|2023-05-16--07-59-59--4 +# TOYOTA RAV4 2019 +9d5333455b91ee89|2023-07-07--19-32-53--2 +# TOYOTA RAV4 2019 +a158f766c4caa342|2023-05-13--12-27-56--97 +# TOYOTA RAV4 2019 +a8e906dda0b7438b|2023-06-08--11-06-31--14 +# TOYOTA RAV4 2019 +a919bd078978c0ab|2023-07-28--07-52-04--11 +# TOYOTA RAV4 2019 +a9aa5c6455c86571|2023-06-20--13-48-37--9 +# TOYOTA RAV4 2019 +ad00cbe5074c4fab|2023-07-26--15-06-11--76 +# TOYOTA RAV4 2019 +a7f1bfccea36b5eb|2023-07-14--15-30-38--5 +# TOYOTA RAV4 2019 +aeeec17f13cb4cef|2023-05-30--13-12-13--23 +# TOYOTA RAV4 2019 +af355970e532a9db|2023-06-11--15-19-23--60 +# TOYOTA RAV4 2019 +b303e820b96a73e9|2023-07-19--13-45-57--11 +# TOYOTA RAV4 2019 +b86cfd7190536fee|2023-06-04--19-32-09--1 +# TOYOTA RAV4 2019 +bc0ebfc25d121b5b|2023-07-08--17-31-57--3 +# TOYOTA RAV4 2019 +b740f7b3cb9bce43|2023-07-17--07-37-05--63 +# TOYOTA RAV4 2019 +c0a876c347ffbbe9|2023-05-11--08-40-32--7 +# TOYOTA RAV4 2019 +c98b94fb47ef8195|2023-07-21--16-10-23--62 +# TOYOTA RAV4 2019 +cea26252c5847413|2023-06-19--21-03-38--21 +# TOYOTA RAV4 2019 +ccc74bf50752879f|2023-06-16--15-04-07--14 +# TOYOTA RAV4 2019 +cd79e21eac267227|2023-05-28--19-05-31--24 +# TOYOTA RAV4 2019 +ce997a6490dec120|2023-05-24--10-21-17--28 +# TOYOTA RAV4 2019 +d3cd2946dfd5c271|2023-07-10--18-33-33--17 +# TOYOTA RAV4 2019 +d66e301c2c7277e1|2023-05-10--19-31-45--2 +# TOYOTA RAV4 2019 +dc49ffbba8a3b9dc|2023-06-08--18-18-49--24 +# TOYOTA RAV4 2019 +dc25f84f7b307cc9|2023-06-17--11-51-25--13 +# TOYOTA RAV4 2019 +df79933372901026|2023-07-06--18-03-06--16 +# TOYOTA RAV4 2019 +e15949cf8f8d7249|2023-07-31--22-54-31--10 +# TOYOTA RAV4 2019 +e1f6a3b4e19955a7|2023-07-09--17-04-05--15 +# TOYOTA RAV4 2019 +e461efc72ee56215|2023-07-01--11-14-35--138 +# TOYOTA RAV4 2019 +e87a90b4e7d2cbf6|2023-05-24--14-38-43--13 +# TOYOTA RAV4 2019 +e4bc15da1e30e125|2023-06-19--14-52-41--2 +# TOYOTA RAV4 2019 +e999ea6601c35fad|2023-06-06--21-56-30--1 +# TOYOTA RAV4 2019 +eb04174d0d305a32|2023-06-14--18-34-40--2 +# TOYOTA RAV4 2019 +e2fac89340529432|2023-05-12--21-16-54--33 +# TOYOTA RAV4 2019 +edb8ec2edf00089e|2023-07-11--14-36-05--31 +# TOYOTA RAV4 2019 +e9a117016fec049d|2023-07-23--10-19-20--2 +# TOYOTA RAV4 2019 +ed70a3fe1c252cfa|2023-07-03--11-18-29--6 +# TOYOTA RAV4 2019 +f0c3362708e50e2d|2023-07-29--17-46-22--5 +# TOYOTA RAV4 2019 +f3384252b57fdfe9|2023-08-08--01-57-06--18 +# TOYOTA RAV4 2019 +ef69d3afc3c6db45|2023-07-04--12-23-05--8 +# TOYOTA RAV4 2019 +f3b7f7fe759fbfee|2023-07-09--18-08-17--1 +# TOYOTA RAV4 2019 +f43f8cf5f35d1c68|2023-05-15--12-02-55--19 +# TOYOTA RAV4 2019 +f9fcd2ad910e89db|2023-08-06--11-26-36--12 +# TOYOTA RAV4 2019 +f7fb88eece6ec686|2023-06-29--13-16-55--8 +# TOYOTA RAV4 2019 +fe70bc78933fe382|2023-06-26--13-08-03--243 +# TOYOTA RAV4 HYBRID 2017 +260738525c081f84|2023-06-07--15-56-35--13 +# TOYOTA RAV4 HYBRID 2017 +0fd770798a15a939|2023-06-29--07-57-15--19 +# TOYOTA RAV4 HYBRID 2017 +054808dce11e7aa0|2023-08-01--18-13-07--8 +# TOYOTA RAV4 HYBRID 2017 +0ae1be4d5f71ec38|2023-05-17--17-16-12--1 +# TOYOTA RAV4 HYBRID 2017 +39e928c1da9fa3d5|2023-06-15--11-58-12--24 +# TOYOTA RAV4 HYBRID 2017 +5c556eaf4a001e29|2023-06-06--08-53-51--17 +# TOYOTA RAV4 HYBRID 2017 +602b5e96b65d4762|2023-07-10--17-55-55--150 +# TOYOTA RAV4 HYBRID 2017 +694d657526334832|2023-06-11--18-18-58--9 +# TOYOTA RAV4 HYBRID 2017 +6967b97d12f377fc|2023-07-29--14-23-52--19 +# TOYOTA RAV4 HYBRID 2017 +731520c68c728363|2023-06-27--19-28-08--5 +# TOYOTA RAV4 HYBRID 2017 +76654637ecd7a147|2023-05-10--20-08-10--59 +# TOYOTA RAV4 HYBRID 2017 +71f1afdb4ec3b211|2023-05-12--09-32-27--15 +# TOYOTA RAV4 HYBRID 2017 +8db44088260c7e81|2023-08-02--19-23-22--5 +# TOYOTA RAV4 HYBRID 2017 +a809df548b49a68b|2023-07-09--18-05-37--9 +# TOYOTA RAV4 HYBRID 2017 +b4069565beef1091|2023-07-12--09-05-40--1 +# TOYOTA RAV4 HYBRID 2017 +c0ca616eaa24d73b|2023-07-21--17-29-37--65 +# TOYOTA RAV4 HYBRID 2017 +d243dcf7caee6052|2023-06-25--17-19-26--1 +# TOYOTA RAV4 HYBRID 2017 +d32dd4e182d86a39|2023-05-15--18-11-23--18 +# TOYOTA RAV4 HYBRID 2017 +fa0e4850a023f361|2023-06-14--07-46-06--19 +# HONDA ACCORD 2018 +0750f87f2b569f15|2023-07-21--22-10-13--6 +# HONDA ACCORD 2018 +2911ec305fa73239|2023-05-16--13-29-47--27 +# HONDA ACCORD 2018 +1ada25c684c79c9f|2023-06-23--19-30-03--7 +# HONDA ACCORD 2018 +03ae79f182fff0a3|2023-05-17--20-02-41--10 +# HONDA ACCORD 2018 +0c871d574e333eeb|2023-06-02--09-32-01--21 +# HONDA ACCORD 2018 +24207e2d7868334c|2023-06-29--17-12-39--9 +# HONDA ACCORD 2018 +1a74e8bc1353f9eb|2023-06-01--00-25-52--7 +# HONDA ACCORD 2018 +20761ff63dc0e2c1|2023-05-10--19-03-20--1 +# HONDA ACCORD 2018 +066f9f7cf3c7c459|2023-07-17--17-13-44--5 +# HONDA ACCORD 2018 +21c578cf98555ce9|2023-05-11--13-46-23--58 +# HONDA ACCORD 2018 +69689c771ec57486|2023-07-11--11-36-10--27 +# HONDA ACCORD 2018 +80e02967896fde0a|2023-07-23--12-04-28--20 +# HONDA ACCORD 2018 +abcb019b84d10680|2023-05-26--20-13-32--16 +# HONDA ACCORD 2018 +bbcdb91eec4efb11|2023-08-04--10-51-09--24 +# HONDA ACCORD 2018 +c40b5785213bcb5d|2023-07-07--12-22-36--15 +# HONDA ACCORD 2018 +c7d2ba6fdcf69eef|2023-07-28--14-53-45--55 +# HONDA ACCORD 2018 +e3ddeba86ad8eb59|2023-05-11--12-48-12--3 +# HONDA ACCORD 2018 +e84b9e1ed3e077d2|2023-06-15--17-59-02--30 +# HONDA ACCORD 2018 +e961d6edc8ee01d7|2023-05-26--15-23-15--10 +# HONDA ACCORD 2018 +f54b4ab5f24eb9d7|2023-07-31--10-38-00--22 +# HONDA ACCORD 2018 +f4696dfe648b7d20|2023-06-26--07-31-46--7 +# HONDA ACCORD 2018 +f68a86aa6ba710d5|2023-07-13--19-29-03--53 +# HYUNDAI SONATA 2020 +145908dabb729ad9|2023-05-14--09-50-20--3 +# HYUNDAI SONATA 2020 +053bf385cdad1079|2023-06-02--15-48-36--24 +# HYUNDAI SONATA 2020 +10b8dcf2fb80bf56|2023-08-06--02-07-08--86 +# HYUNDAI SONATA 2020 +026e1eb462c28560|2023-07-29--09-58-52--430 +# HYUNDAI SONATA 2020 +14118dc2aced6462|2023-05-15--21-05-33--13 +# HYUNDAI SONATA 2020 +12f39c1bf68b56eb|2023-07-27--17-00-21--1 +# HYUNDAI SONATA 2020 +1edd7a8340016f2c|2023-05-26--10-01-21--1 +# HYUNDAI SONATA 2020 +310cf131d28b30d8|2023-06-08--10-57-46--24 +# HYUNDAI SONATA 2020 +3f796d94e84349fd|2023-06-10--14-30-13--5 +# HYUNDAI SONATA 2020 +40812bc457628b70|2023-07-01--12-06-20--38 +# HYUNDAI SONATA 2020 +4df896aa825bd9b3|2023-08-01--19-18-48--15 +# HYUNDAI SONATA 2020 +4a612c47eb83412a|2023-05-30--18-36-08--53 +# HYUNDAI SONATA 2020 +4960a9f1dd71aad1|2023-06-07--08-42-19--2 +# HYUNDAI SONATA 2020 +4b157737c2154617|2023-07-04--12-35-21--22 +# HYUNDAI SONATA 2020 +5c49984c8efb9c9c|2023-07-21--21-41-27--22 +# HYUNDAI SONATA 2020 +5b337c203559c300|2023-07-29--15-56-17--28 +# HYUNDAI SONATA 2020 +63f7cda2244f2338|2023-06-20--10-53-16--10 +# HYUNDAI SONATA 2020 +67cafbae8ee7f156|2023-05-15--21-35-53--57 +# HYUNDAI SONATA 2020 +67cc68f0290328c2|2023-07-25--01-33-25--14 +# HYUNDAI SONATA 2020 +700d37ccd12315cf|2023-06-01--16-13-43--68 +# HYUNDAI SONATA 2020 +7dd1f8f7f731715a|2023-05-29--20-33-34--28 +# HYUNDAI SONATA 2020 +7ef7d92481ec0ace|2023-05-20--11-58-25--6 +# HYUNDAI SONATA 2020 +89f56aec25cccf29|2023-05-31--16-32-32--12 +# HYUNDAI SONATA 2020 +895ddc214c42abea|2023-07-02--18-54-14--96 +# HYUNDAI SONATA 2020 +87aed74d45ea588f|2023-07-12--10-02-13--1 +# HYUNDAI SONATA 2020 +8d162f50f95b7128|2023-08-06--22-34-03--67 +# HYUNDAI SONATA 2020 +9ea4578ee2b1abcb|2023-05-26--07-04-55--18 +# HYUNDAI SONATA 2020 +a15bf814a6e3acc1|2023-07-26--14-52-00--4 +# HYUNDAI SONATA 2020 +a8078ee810ef83d6|2023-08-06--14-35-34--6 +# HYUNDAI SONATA 2020 +aaf2f8e95bfb437f|2023-08-05--23-38-18--38 +# HYUNDAI SONATA 2020 +ad68728bc42b1357|2023-07-10--22-22-02--27 +# HYUNDAI SONATA 2020 +b0af2fff166d80fe|2023-06-10--11-51-40--1 +# HYUNDAI SONATA 2020 +c86ea37c4a66ea6e|2023-07-27--12-27-23--22 +# HYUNDAI SONATA 2020 +cb7eee5264754a21|2023-08-07--09-30-49--3 +# HYUNDAI SONATA 2020 +c22d16eacdaf890d|2023-07-16--18-51-06--49 +# HYUNDAI SONATA 2020 +d9855ccd97606ca8|2023-05-28--12-12-29--1 +# HYUNDAI SONATA 2020 +d63936f7ce28d591|2023-05-24--16-36-34--63 +# HYUNDAI SONATA 2020 +d4958b8ee5ef5cb3|2023-06-02--16-39-43--2 +# HYUNDAI SONATA 2020 +e7369387e5ed2414|2023-05-11--08-24-27--74 +# HYUNDAI SONATA 2020 +e5904ea020532291|2023-07-22--12-16-02--17 +# HYUNDAI SONATA 2020 +e8b1f9ef35c46678|2023-07-27--17-16-47--5 +# HYUNDAI SONATA 2020 +ffce4a1d578827ee|2023-06-28--06-38-06--12 +# LEXUS RX 2020 +06a5dec26667d10e|2023-07-25--17-17-48--6 +# LEXUS RX 2020 +09ca5f224952b7f6|2023-07-19--19-40-11--12 +# LEXUS RX 2020 +08c881f7590e2420|2023-07-30--13-09-14--10 +# LEXUS RX 2020 +cc988c4ab06633fb|2023-05-12--21-16-33--41 +# TOYOTA PRIUS 2017 +167e954cba293640|2023-05-21--13-46-18--149 +# TOYOTA PRIUS 2017 +0c84cb73d1fb3a8b|2023-07-26--07-13-06--13 +# TOYOTA PRIUS 2017 +07387c2cc03c9b81|2023-06-22--16-56-50--5 +# TOYOTA PRIUS 2017 +0069f91b2663b892|2023-07-22--00-39-50--24 +# TOYOTA PRIUS 2017 +201dc5bb0067f5f2|2023-05-19--12-48-00--25 +# TOYOTA PRIUS 2017 +1e5629342e716c0a|2023-07-17--06-38-37--3 +# TOYOTA PRIUS 2017 +17c6857c5789af97|2023-07-21--19-51-17--6 +# TOYOTA PRIUS 2017 +232dd0fe26a73c53|2023-05-30--18-12-34--10 +# TOYOTA PRIUS 2017 +25d5c0772e79f4db|2023-08-03--13-54-36--17 +# TOYOTA PRIUS 2017 +0028c4ef0c8f6c30|2023-05-30--09-22-40--10 +# TOYOTA PRIUS 2017 +19e0a2420a8751bf|2023-07-16--09-41-33--9 +# TOYOTA PRIUS 2017 +1f5701894610dcd1|2023-05-13--09-02-34--6 +# TOYOTA PRIUS 2017 +15b5e12b452a992e|2023-06-13--00-02-42--12 +# TOYOTA PRIUS 2017 +0cb8c50b9ee4f5ea|2023-05-12--21-04-13--3 +# TOYOTA PRIUS 2017 +0ef0d6b0644cd7a6|2023-06-09--14-40-33--1 +# TOYOTA PRIUS 2017 +21dfd2256ee0a693|2023-07-20--13-06-55--36 +# TOYOTA PRIUS 2017 +26805bfeb242559b|2023-05-14--17-59-27--12 +# TOYOTA PRIUS 2017 +2f8790e5ac2b13e0|2023-07-22--11-03-25--5 +# TOYOTA PRIUS 2017 +2f155ba65825554f|2023-07-02--11-55-59--1 +# TOYOTA PRIUS 2017 +2eab2aa4530e03ba|2023-05-13--08-48-32--3 +# TOYOTA PRIUS 2017 +2ff4529579d6e1dc|2023-06-25--21-06-34--17 +# TOYOTA PRIUS 2017 +34faeeba08c3cc97|2023-07-30--13-22-33--14 +# TOYOTA PRIUS 2017 +3684fea792cff931|2023-08-01--20-59-40--20 +# TOYOTA PRIUS 2017 +39d749cf71d12a8e|2023-08-04--22-54-18--1 +# TOYOTA PRIUS 2017 +3f559b9878a7ccad|2023-06-21--07-22-48--15 +# TOYOTA PRIUS 2017 +457cc6df3c5f2ee1|2023-07-09--12-28-54--22 +# TOYOTA PRIUS 2017 +46b651928e9cda0f|2023-07-12--17-19-44--3 +# TOYOTA PRIUS 2017 +451e03ec9ff02db4|2023-05-27--11-30-37--8 +# TOYOTA PRIUS 2017 +4ae18286107b36b0|2023-06-07--14-03-47--2 +# TOYOTA PRIUS 2017 +4617eff9ed63ea3f|2023-07-19--17-39-20--36 +# TOYOTA PRIUS 2017 +4e36531fa34978a9|2023-05-31--15-32-40--2 +# TOYOTA PRIUS 2017 +4aa5341890583ca0|2023-05-29--11-13-49--92 +# TOYOTA PRIUS 2017 +4f2e3cebc8f3c715|2023-06-01--11-51-40--21 +# TOYOTA PRIUS 2017 +4eb29c14f8b26ba3|2023-07-25--14-09-36--8 +# TOYOTA PRIUS 2017 +52f69098ec1f801b|2023-05-16--16-10-28--3 +# TOYOTA PRIUS 2017 +51898c9b8a83d1d9|2023-07-30--23-27-13--22 +# TOYOTA PRIUS 2017 +5548e173b952a76b|2023-07-28--20-16-50--14 +# TOYOTA PRIUS 2017 +55cbd4260394fa17|2023-06-25--13-33-59--11 +# TOYOTA PRIUS 2017 +553ad02d6e7fa60e|2023-07-31--17-43-57--7 +# TOYOTA PRIUS 2017 +5e5d69c771e74940|2023-06-07--12-21-34--13 +# TOYOTA PRIUS 2017 +5a466bb77d1d0050|2023-05-12--11-49-47--9 +# TOYOTA PRIUS 2017 +5f4043fdfdd41dd5|2023-05-15--16-56-34--31 +# TOYOTA PRIUS 2017 +6318c59fa2535f38|2023-06-04--08-04-19--1 +# TOYOTA PRIUS 2017 +6371ff697bb4c045|2023-08-07--03-18-49--28 +# TOYOTA PRIUS 2017 +667087bd361ef7b1|2023-06-09--13-28-53--15 +# TOYOTA PRIUS 2017 +675c0d0c13fea348|2023-07-30--21-42-40--14 +# TOYOTA PRIUS 2017 +6925b5e1f713c09a|2023-05-19--12-25-25--4 +# TOYOTA PRIUS 2017 +754d115c7e89ca90|2023-07-07--16-40-57--4 +# TOYOTA PRIUS 2017 +718c2708b8bc1078|2023-07-18--08-32-40--14 +# TOYOTA PRIUS 2017 +748cc5e00e00d2f8|2023-07-14--11-13-29--122 +# TOYOTA PRIUS 2017 +7897d61fcfd64f73|2023-08-04--22-00-59--19 +# TOYOTA PRIUS 2017 +771fcc78f0aa53f0|2023-07-05--15-37-38--35 +# TOYOTA PRIUS 2017 +7ff274e2d6e08599|2023-08-04--20-03-11--4 +# TOYOTA PRIUS 2017 +7efdefad37338f74|2023-06-21--11-35-24--6 +# TOYOTA PRIUS 2017 +8347883441d02d13|2023-07-31--09-57-00--16 +# TOYOTA PRIUS 2017 +823d9d53d4e86649|2023-07-18--11-02-27--10 +# TOYOTA PRIUS 2017 +88f9ec172917bb98|2023-07-14--05-46-09--87 +# TOYOTA PRIUS 2017 +8df89195cd79c215|2023-06-23--13-51-00--12 +# TOYOTA PRIUS 2017 +84e716ed147ae3d7|2023-07-23--13-26-35--3 +# TOYOTA PRIUS 2017 +8c753fb54c2ad8d8|2023-07-27--17-51-39--39 +# TOYOTA PRIUS 2017 +903ac8da1aa52026|2023-08-01--17-00-11--20 +# TOYOTA PRIUS 2017 +973137405a15dccb|2023-06-04--11-13-45--13 +# TOYOTA PRIUS 2017 +9ad0cccf33247e18|2023-06-22--20-29-34--2 +# TOYOTA PRIUS 2017 +982b88e8363dc93b|2023-06-04--11-23-29--34 +# TOYOTA PRIUS 2017 +a074a62144234949|2023-07-08--18-35-42--1 +# TOYOTA PRIUS 2017 +a1206dd4a0d4d083|2023-07-19--12-54-21--52 +# TOYOTA PRIUS 2017 +a0ca4450fae65c5e|2023-06-08--07-07-25--29 +# TOYOTA PRIUS 2017 +a3b7335f488f53c2|2023-05-14--12-06-21--6 +# TOYOTA PRIUS 2017 +9f913ea6594100bf|2023-06-09--10-21-06--2 +# TOYOTA PRIUS 2017 +a55e88a02483439c|2023-07-19--12-11-58--57 +# TOYOTA PRIUS 2017 +a750926c99e2b60b|2023-05-31--15-00-56--5 +# TOYOTA PRIUS 2017 +ac11bb2bc0f2a7f7|2023-08-06--16-12-30--23 +# TOYOTA PRIUS 2017 +b222271cbe10254d|2023-07-03--10-37-50--17 +# TOYOTA PRIUS 2017 +b54d8a1ef1c820d3|2023-07-05--10-17-32--7 +# TOYOTA PRIUS 2017 +afc64e738cb8a1be|2023-06-11--16-26-28--28 +# TOYOTA PRIUS 2017 +b0b65204fa492a96|2023-05-13--11-31-33--18 +# TOYOTA PRIUS 2017 +bb94f981f4431981|2023-07-03--03-15-39--2 +# TOYOTA PRIUS 2017 +ba01e1864cd4ac61|2023-07-31--19-28-56--4 +# TOYOTA PRIUS 2017 +b68db0665d641d74|2023-07-08--16-56-57--123 +# TOYOTA PRIUS 2017 +ba94ba717fccb2f0|2023-07-11--07-47-11--1 +# TOYOTA PRIUS 2017 +bb5708908e46bcba|2023-07-12--09-08-55--10 +# TOYOTA PRIUS 2017 +bef5f54503850405|2023-06-04--10-32-31--66 +# TOYOTA PRIUS 2017 +c0c02897f03b7c7b|2023-06-23--08-31-50--10 +# TOYOTA PRIUS 2017 +c155b927e07113ad|2023-07-14--20-14-18--103 +# TOYOTA PRIUS 2017 +c38f860d281bcc5e|2023-06-09--20-09-02--17 +# TOYOTA PRIUS 2017 +c67dbd0e8b4cc29d|2023-06-05--20-53-34--9 +# TOYOTA PRIUS 2017 +c02c848a64d5177c|2023-07-06--08-45-28--13 +# TOYOTA PRIUS 2017 +cc2b9c9f221f1812|2023-07-31--17-15-47--11 +# TOYOTA PRIUS 2017 +cd0e6154e0cb0484|2023-08-05--15-28-19--1 +# TOYOTA PRIUS 2017 +c848c39dfb2de30f|2023-06-23--22-44-03--1 +# TOYOTA PRIUS 2017 +d68f8916cb7adfdf|2023-06-21--06-37-56--11 +# TOYOTA PRIUS 2017 +d67f123a82aaf50e|2023-05-12--13-49-59--2 +# TOYOTA PRIUS 2017 +e057838a043251cf|2023-07-27--14-13-22--5 +# TOYOTA PRIUS 2017 +e15020c852627a0a|2023-07-26--20-49-08--4 +# TOYOTA PRIUS 2017 +e206ddb85e53cc0d|2023-07-31--11-28-07--10 +# TOYOTA PRIUS 2017 +dd311669c55d1ccf|2023-06-25--16-57-05--1 +# TOYOTA PRIUS 2017 +e31072835e1cdc73|2023-05-20--21-59-30--69 +# TOYOTA PRIUS 2017 +ea43e1e2911e37e5|2023-07-07--03-34-54--1 +# TOYOTA PRIUS 2017 +e8cb87d767f608dd|2023-07-14--19-23-30--16 +# TOYOTA PRIUS 2017 +f1c69863517027e8|2023-07-29--12-47-02--17 +# TOYOTA PRIUS 2017 +f4b656c4aaf63360|2023-06-28--18-52-46--99 +# TOYOTA PRIUS 2017 +f8046cede9c720b1|2023-08-06--18-46-48--4 +# TOYOTA PRIUS 2017 +f8e3dfca6df16c7e|2023-05-15--11-33-42--7 +# TOYOTA PRIUS 2017 +f921646c299a6d36|2023-07-01--17-16-09--119 +# TOYOTA PRIUS 2017 +ff634b088ec2db2d|2023-06-16--15-14-04--39 +# HONDA RIDGELINE 2017 +0ddc5f974851a022|2023-06-11--18-08-25--104 +# HONDA RIDGELINE 2017 +1551280de648df83|2023-06-27--19-23-19--5 +# HONDA RIDGELINE 2017 +24187316e568f2ec|2023-07-25--10-57-53--1 +# HONDA RIDGELINE 2017 +1ce42f7a69e4b11c|2023-07-03--13-09-36--28 +# HONDA RIDGELINE 2017 +263d65b0fd18f5c4|2023-07-18--16-42-57--44 +# HONDA RIDGELINE 2017 +19fe0c3f46444eeb|2023-06-12--20-07-52--1 +# HONDA RIDGELINE 2017 +3aa992e52b91bf8f|2023-07-28--15-22-10--5 +# HONDA RIDGELINE 2017 +6acf79c245a32eb3|2023-07-08--17-40-23--17 +# HONDA RIDGELINE 2017 +6e2f19b5263c9d65|2023-07-16--09-56-19--23 +# HONDA RIDGELINE 2017 +701b7d276e45a7bf|2023-07-31--15-17-26--28 +# HONDA RIDGELINE 2017 +715416e4439e555e|2023-08-03--14-53-14--1 +# HONDA RIDGELINE 2017 +732c29f8108b54a9|2023-07-30--10-59-07--35 +# HONDA RIDGELINE 2017 +758cfd09da38d125|2023-07-24--11-45-13--1 +# HONDA RIDGELINE 2017 +7fe0389b26adfb30|2023-05-12--13-55-02--3 +# HONDA RIDGELINE 2017 +7f498d0d7b378766|2023-06-08--15-18-05--47 +# HONDA RIDGELINE 2017 +805f226ab2a8b1d7|2023-07-05--14-03-54--10 +# HONDA RIDGELINE 2017 +9824e24dd1574abc|2023-06-09--19-55-12--14 +# HONDA RIDGELINE 2017 +9b29873dd03e9357|2023-05-30--10-10-24--4 +# HONDA RIDGELINE 2017 +9bdc08e34137d504|2023-05-28--13-07-50--10 +# HONDA RIDGELINE 2017 +a9648fa1431cf7a8|2023-05-13--18-40-53--33 +# HONDA RIDGELINE 2017 +aaa226a738d37659|2023-05-19--11-59-08--5 +# HONDA RIDGELINE 2017 +a7252f38a01fc213|2023-06-20--16-17-58--2 +# HONDA RIDGELINE 2017 +adbbce5339634eae|2023-08-03--17-35-47--8 +# HONDA RIDGELINE 2017 +ac97f88de8b23b6b|2023-06-26--11-44-14--6 +# HONDA RIDGELINE 2017 +b3c88ee6c378a66e|2023-07-24--08-47-23--6 +# HONDA RIDGELINE 2017 +bf0fac55013ab937|2023-07-10--10-23-24--17 +# HONDA RIDGELINE 2017 +c5e0d17adaaefd82|2023-07-22--16-12-30--58 +# HONDA RIDGELINE 2017 +d169250d735e1346|2023-06-08--16-35-51--9 +# HONDA RIDGELINE 2017 +d084ea2309575a71|2023-05-18--17-25-07--14 +# HONDA RIDGELINE 2017 +d5dd26679a932af9|2023-07-04--09-39-08--18 +# HONDA RIDGELINE 2017 +e52c055a77cac84c|2023-06-05--17-27-17--17 +# HONDA RIDGELINE 2017 +e51512df42d845d0|2023-06-05--14-27-56--53 +# HONDA RIDGELINE 2017 +e4bcdeea7c6f1fd3|2023-07-10--11-47-50--17 +# HONDA RIDGELINE 2017 +f53658bd0c1ccf07|2023-07-28--19-28-13--12 +# TOYOTA RAV4 2019 +135e1e376668a393|2023-06-25--19-00-07--36 +# TOYOTA RAV4 2019 +0de4f23a490a8502|2023-07-19--17-54-03--7 +# TOYOTA RAV4 2019 +01a8bd5caab4a678|2023-06-15--10-36-38--2 +# TOYOTA RAV4 2019 +0dcc1daec4a26b89|2023-06-12--12-15-52--4 +# TOYOTA RAV4 2019 +180ea5df0f0b4db9|2023-07-18--22-50-24--6 +# TOYOTA RAV4 2019 +28de01246b5d6401|2023-06-19--16-24-29--8 +# TOYOTA RAV4 2019 +166ed2b12f634fb7|2023-06-05--19-15-04--13 +# TOYOTA RAV4 2019 +1f623af84e701b2a|2023-06-29--16-42-01--3 +# TOYOTA RAV4 2019 +1ccd70c07ff29d9f|2023-05-17--20-26-06--5 +# TOYOTA RAV4 2019 +1199c1d31bb3a214|2023-06-13--14-23-39--16 +# TOYOTA RAV4 2019 +24b7eef92acb725c|2023-06-25--14-09-20--9 +# TOYOTA RAV4 2019 +1ab6dacda17ac28a|2023-05-22--18-38-12--11 +# TOYOTA RAV4 2019 +1da1a0519b1669c3|2023-07-01--22-29-45--18 +# TOYOTA RAV4 2019 +0f8a92e2122ea309|2023-07-26--10-46-14--3 +# TOYOTA RAV4 2019 +0f60d680150676d0|2023-06-27--05-57-53--2 +# TOYOTA RAV4 2019 +166a8fc9c484866b|2023-07-26--08-14-07--12 +# TOYOTA RAV4 2019 +2a2249f498ddc22c|2023-07-04--13-41-52--3 +# TOYOTA RAV4 2019 +28947140cc79e4cf|2023-08-05--12-17-34--41 +# TOYOTA RAV4 2019 +028a208d08a11420|2023-07-22--15-41-34--60 +# TOYOTA RAV4 2019 +336f33d84fa8d5db|2023-07-29--16-59-09--6 +# TOYOTA RAV4 2019 +3ae010558239929d|2023-05-31--19-06-35--19 +# TOYOTA RAV4 2019 +395fe23cc0a51896|2023-06-25--10-15-45--48 +# TOYOTA RAV4 2019 +3f3a48346848df21|2023-07-26--16-15-13--1 +# TOYOTA RAV4 2019 +40f6907c6b554368|2023-06-17--05-51-38--66 +# TOYOTA RAV4 2019 +41f4ae6d3b79a9b1|2023-08-03--09-20-24--6 +# TOYOTA RAV4 2019 +45edf064d369bff9|2023-06-09--12-21-08--11 +# TOYOTA RAV4 2019 +4a389c94e0a137cc|2023-05-17--17-52-33--4 +# TOYOTA RAV4 2019 +4dfda86b67cad08f|2023-08-01--12-25-44--1 +# TOYOTA RAV4 2019 +4d0c6516a05013a1|2023-05-10--17-14-40--59 +# TOYOTA RAV4 2019 +4e4a768f20ae1e96|2023-06-06--10-43-02--36 +# TOYOTA RAV4 2019 +5175d97daef0d2ce|2023-07-02--12-18-55--8 +# TOYOTA RAV4 2019 +4a4e7f053b5730d9|2023-05-31--17-00-07--20 +# TOYOTA RAV4 2019 +523a062b170f5402|2023-05-24--15-37-42--23 +# TOYOTA RAV4 2019 +4857e763b605484d|2023-07-12--15-33-09--27 +# TOYOTA RAV4 2019 +601862be98f8db50|2023-08-06--01-03-26--37 +# TOYOTA RAV4 2019 +615671030be02e4e|2023-05-23--15-20-04--21 +# TOYOTA RAV4 2019 +5f70a3bed42b7932|2023-08-06--19-00-39--2 +# TOYOTA RAV4 2019 +628ea5913838ecc2|2023-05-23--21-38-48--3 +# TOYOTA RAV4 2019 +60093534d6d04cd2|2023-06-14--12-21-58--40 +# TOYOTA RAV4 2019 +6adb1b2f007ef2e0|2023-08-03--15-57-43--8 +# TOYOTA RAV4 2019 +672d2d359ba20275|2023-08-05--22-39-34--84 +# TOYOTA RAV4 2019 +666f3e6d7f7f02ab|2023-07-21--05-45-19--3 +# TOYOTA RAV4 2019 +6b0a418312ab1345|2023-08-07--06-41-16--1 +# TOYOTA RAV4 2019 +6f74e02218e61a6d|2023-07-22--09-59-20--8 +# TOYOTA RAV4 2019 +715bdc3654d57eb0|2023-07-23--19-41-44--8 +# TOYOTA RAV4 2019 +74ef295ef2f668fd|2023-06-14--22-25-34--27 +# TOYOTA RAV4 2019 +82d5f13f2a583c11|2023-07-01--09-18-28--21 +# TOYOTA RAV4 2019 +8074c8663302e56c|2023-06-08--20-59-32--61 +# TOYOTA RAV4 2019 +8610459df75c5ac1|2023-05-20--10-16-13--14 +# TOYOTA RAV4 2019 +890069284dace3bf|2023-07-22--11-08-46--45 +# TOYOTA RAV4 2019 +8da230f47b0fef44|2023-06-09--20-19-37--27 +# TOYOTA RAV4 2019 +8aad58f199ab2bed|2023-06-08--13-17-02--3 +# TOYOTA RAV4 2019 +8e29ec646f62a1d2|2023-08-04--14-40-06--2 +# TOYOTA RAV4 2019 +84e20ddfaf4c56d8|2023-07-25--07-10-21--12 +# TOYOTA RAV4 2019 +8d99fb063bc66a26|2023-06-02--19-49-30--27 +# TOYOTA RAV4 2019 +91f940f4a03e5406|2023-08-02--12-44-05--3 +# TOYOTA RAV4 2019 +8efd4729c72a4da9|2023-06-02--16-40-33--8 +# TOYOTA RAV4 2019 +9837fffee4d677f5|2023-06-16--21-35-16--30 +# TOYOTA RAV4 2019 +96b6b95903a330a4|2023-07-08--15-45-40--19 +# TOYOTA RAV4 2019 +9d96018bfdd9ac61|2023-06-13--15-55-43--10 +# TOYOTA RAV4 2019 +9d3747d13e211f85|2023-07-04--17-51-45--1 +# TOYOTA RAV4 2019 +a28a6e3882c1cdf1|2023-05-19--12-34-50--19 +# TOYOTA RAV4 2019 +a61651a726f6bf4d|2023-06-23--12-49-05--47 +# TOYOTA RAV4 2019 +b18225558cb35a1e|2023-07-17--15-46-21--14 +# TOYOTA RAV4 2019 +add7bff9cfb270ba|2023-05-18--22-26-57--11 +# TOYOTA RAV4 2019 +b6508eacf6f008df|2023-05-21--17-31-08--3 +# TOYOTA RAV4 2019 +b97486b552c1f5e3|2023-07-30--15-17-31--26 +# TOYOTA RAV4 2019 +be98421ae126bf91|2023-06-02--16-52-56--13 +# TOYOTA RAV4 2019 +c49a105de4a54ea1|2023-07-07--21-29-40--199 +# TOYOTA RAV4 2019 +d05aaa4d42d7c441|2023-06-05--22-11-24--3 +# TOYOTA RAV4 2019 +ca9194f601494b20|2023-05-24--17-40-49--23 +# TOYOTA RAV4 2019 +d3d6397ff2e7052e|2023-05-29--15-57-39--24 +# TOYOTA RAV4 2019 +cca5f152e7f637f0|2023-05-22--11-01-03--43 +# TOYOTA RAV4 2019 +d3e27ee09a40d1fd|2023-08-02--18-21-22--7 +# TOYOTA RAV4 2019 +d935e25b7e6c725f|2023-07-15--07-36-42--45 +# TOYOTA RAV4 2019 +e1905174a6abbc3c|2023-07-18--07-13-51--2 +# TOYOTA RAV4 2019 +e7cd4ae3bee72b52|2023-07-05--20-45-50--29 +# TOYOTA RAV4 2019 +e7571e0b3674d663|2023-06-23--21-10-27--1 +# TOYOTA RAV4 2019 +e712b42b94578404|2023-05-24--11-21-11--2 +# TOYOTA RAV4 2019 +ebe4b1a7667f231a|2023-06-27--15-23-46--3 +# TOYOTA RAV4 2019 +f30a29d40aa4a4f9|2023-05-17--16-55-57--66 +# TOYOTA RAV4 2019 +f6695ef06f916f23|2023-06-09--17-44-36--14 +# TOYOTA RAV4 2019 +fbff49b354d8a801|2023-07-21--13-52-37--2 +# TOYOTA RAV4 2019 +fc6c359e0e92884f|2023-05-14--13-50-56--9 +# TOYOTA RAV4 2017 +204be5d78daf7006|2023-06-15--07-35-31--11 +# TOYOTA RAV4 2017 +1cbdc1fbae86b49f|2023-06-09--16-31-47--10 +# TOYOTA RAV4 2017 +0655a3f86baf711f|2023-05-17--18-09-14--5 +# TOYOTA RAV4 2017 +28ca2ff434583ddc|2023-06-03--10-49-10--3 +# TOYOTA RAV4 2017 +23940cdb2383e639|2023-07-07--13-06-17--19 +# TOYOTA RAV4 2017 +3932cd29d3fe4a4b|2023-07-27--13-26-25--1 +# TOYOTA RAV4 2017 +2e5de09806977462|2023-07-01--02-13-50--2 +# TOYOTA RAV4 2017 +4a0de7e463e259ef|2023-08-03--14-16-49--2 +# TOYOTA RAV4 2017 +5e9be1efcd6ca25c|2023-07-19--15-05-58--38 +# TOYOTA RAV4 2017 +5d76166971f7ee03|2023-05-22--21-51-32--24 +# TOYOTA RAV4 2017 +6c5718d5c35b6aa8|2023-07-14--23-33-06--40 +# TOYOTA RAV4 2017 +6cd3f6f236b89cbb|2023-06-04--20-29-13--1 +# TOYOTA RAV4 2017 +829a18f6611a0593|2023-07-19--17-06-18--25 +# TOYOTA RAV4 2017 +8ab2027f3c00b364|2023-07-03--15-21-24--114 +# TOYOTA RAV4 2017 +8bf48c046a7750ec|2023-06-08--13-30-25--2 +# TOYOTA RAV4 2017 +98f88292bbae4624|2023-07-21--15-07-11--90 +# TOYOTA RAV4 2017 +a099e968ef197bc0|2023-05-24--06-36-30--2 +# TOYOTA RAV4 2017 +aa332893d92ff60a|2023-07-04--22-48-05--7 +# TOYOTA RAV4 2017 +ade7b7b6f44ba9ea|2023-08-04--16-52-21--34 +# TOYOTA RAV4 2017 +c62d83d70a8ae2b3|2023-06-06--21-39-21--43 +# TOYOTA RAV4 2017 +c9d0e3690e929c02|2023-07-06--20-22-25--25 +# TOYOTA RAV4 2017 +ceacc4d097850b24|2023-05-19--00-11-34--2 +# TOYOTA RAV4 2017 +c84e969db16a7821|2023-07-04--18-50-40--1 +# TOYOTA RAV4 2017 +dccc846a6d7f0198|2023-08-04--13-38-05--6 +# TOYOTA RAV4 2017 +e54de504cefdd500|2023-05-26--13-30-47--14 +# TOYOTA RAV4 2017 +fbc6e1316bac1a40|2023-07-30--18-48-20--18 +# CHRYSLER PACIFICA HYBRID 2018 +00d247a9bb1f9196|2023-06-25--00-50-52--16 +# CHRYSLER PACIFICA HYBRID 2018 +0f53b336851e1384|2023-07-06--20-48-35--28 +# CHRYSLER PACIFICA HYBRID 2018 +19657d9411ef4f1c|2023-06-19--15-37-12--16 +# CHRYSLER PACIFICA HYBRID 2018 +05a30afc5c891372|2023-07-29--13-14-28--1 +# CHRYSLER PACIFICA HYBRID 2018 +268be6212ca01164|2023-05-26--10-55-29--20 +# CHRYSLER PACIFICA HYBRID 2018 +2fc676162569f5f0|2023-05-27--21-30-31--6 +# CHRYSLER PACIFICA HYBRID 2018 +34a660c3c3aa6310|2023-06-09--16-51-25--7 +# CHRYSLER PACIFICA HYBRID 2018 +6480130cc87ac628|2023-07-30--19-17-09--13 +# CHRYSLER PACIFICA HYBRID 2018 +796aba999d046d7d|2023-07-12--14-19-33--5 +# CHRYSLER PACIFICA HYBRID 2018 +7620ad20d3cefc64|2023-05-19--08-49-37--5 +# CHRYSLER PACIFICA HYBRID 2018 +80d3ea3484ea9cfd|2023-05-27--15-01-03--101 +# CHRYSLER PACIFICA HYBRID 2018 +ba0a98377680c14d|2023-07-19--19-00-32--2 +# CHRYSLER PACIFICA HYBRID 2018 +f87ec6de63679cb6|2023-05-29--13-01-37--1 +# LEXUS NX 2018 +21b5c2189fb5972e|2023-07-12--12-15-44--4 +# LEXUS NX 2018 +22feb9142bcc73c9|2023-08-01--17-37-30--60 +# LEXUS NX 2018 +6bd8524bfd558e75|2023-06-30--14-15-54--4 +# LEXUS NX 2018 +83ae8304745fad7e|2023-05-17--20-16-52--148 +# JEEP GRAND CHEROKEE V6 2018 +15afba6e8f2a793d|2023-07-02--09-47-25--36 +# JEEP GRAND CHEROKEE V6 2018 +0448d4c7552b98c6|2023-07-15--14-01-58--1 +# JEEP GRAND CHEROKEE V6 2018 +044cbaf7278c5761|2023-05-15--18-58-25--11 +# JEEP GRAND CHEROKEE V6 2018 +326edad388d0a5e2|2023-07-01--19-19-08--2 +# JEEP GRAND CHEROKEE V6 2018 +37f25823e3b0d3e2|2023-07-08--15-07-03--8 +# JEEP GRAND CHEROKEE V6 2018 +5ec00346ae32f57f|2023-08-01--05-39-55--26 +# JEEP GRAND CHEROKEE V6 2018 +641d9bfd161cccd7|2023-06-29--17-52-45--7 +# JEEP GRAND CHEROKEE V6 2018 +900dfa83b4addfe6|2023-05-11--15-01-42--45 +# JEEP GRAND CHEROKEE V6 2018 +9db428338427dec2|2023-07-12--21-25-15--26 +# JEEP GRAND CHEROKEE V6 2018 +aed0854cefa55533|2023-07-07--21-01-07--4 +# JEEP GRAND CHEROKEE V6 2018 +c692ccc84342a478|2023-06-24--16-40-17--14 +# JEEP GRAND CHEROKEE V6 2018 +d50ada8ee55a5e74|2023-07-03--19-45-11--9 +# JEEP GRAND CHEROKEE V6 2018 +e56b1d3147b6a8f0|2023-07-30--12-51-42--38 +# KIA NIRO EV 2020 +05b1a2556c5bc638|2023-08-07--12-47-27--7 +# KIA NIRO EV 2020 +180ead4c4c126f9d|2023-06-08--20-02-58--6 +# KIA NIRO EV 2020 +36abc44c4f296c7a|2023-05-19--16-15-28--3 +# KIA NIRO EV 2020 +400c0f76b5a1aa90|2023-06-03--21-47-52--15 +# KIA NIRO EV 2020 +561e5f3991916e4b|2023-08-05--15-06-08--15 +# KIA NIRO EV 2020 +6d2b75c2e67bf314|2023-07-29--10-55-10--16 +# KIA NIRO EV 2020 +70a73b2fbd820de4|2023-07-31--16-50-14--17 +# KIA NIRO EV 2020 +89c7460182ce76eb|2023-06-07--22-19-13--63 +# KIA NIRO EV 2020 +9b052af2986bbe3c|2023-08-07--20-11-53--9 +# KIA NIRO EV 2020 +b382b3c7a6e6c4a3|2023-05-14--12-40-13--2 +# KIA NIRO EV 2020 +b576d2ff8a193b4a|2023-07-30--17-49-23--16 +# KIA NIRO EV 2020 +b5cebe8c56d710c5|2023-05-10--20-26-44--26 +# KIA NIRO EV 2020 +c04b1653edb84818|2023-05-16--23-20-13--17 +# KIA NIRO EV 2020 +c8f86b163152d2c5|2023-05-28--10-13-22--2 +# KIA NIRO EV 2020 +d5f923604b1b2f75|2023-05-24--07-29-50--14 +# KIA NIRO EV 2020 +dbda33c67462b907|2023-05-13--12-03-22--21 +# KIA NIRO EV 2020 +e81862333a36d2f7|2023-07-13--09-59-47--19 +# KIA NIRO EV 2020 +fc4cfe20b331a575|2023-07-31--10-34-38--1 +# HONDA CIVIC 2016 +0d922f5798ae65ed|2023-08-01--14-56-54--14 +# HONDA CIVIC 2016 +20994bc5331f98c0|2023-07-26--19-09-06--4 +# HONDA CIVIC 2016 +123dce2cbb7370cd|2023-06-08--20-14-38--19 +# HONDA CIVIC 2016 +1230ef5ea53311d6|2023-05-16--22-44-05--2 +# HONDA CIVIC 2016 +242b33685ed4e03a|2023-08-02--09-29-10--7 +# HONDA CIVIC 2016 +35ca75966798efba|2023-05-23--18-42-57--18 +# HONDA CIVIC 2016 +3e1e2b62f3f2ac3d|2023-07-12--18-23-44--5 +# HONDA CIVIC 2016 +429b106d0b800755|2023-06-03--22-13-41--2 +# HONDA CIVIC 2016 +4a080fe908e25eb2|2023-05-12--20-21-48--18 +# HONDA CIVIC 2016 +52534ebb0e145937|2023-07-14--11-40-16--5 +# HONDA CIVIC 2016 +5cfa1ad8f35779d0|2023-05-10--13-31-39--7 +# HONDA CIVIC 2016 +5dfdb608ac9bc838|2023-05-22--16-58-36--43 +# HONDA CIVIC 2016 +5dedd0807e354b45|2023-08-03--16-29-27--32 +# HONDA CIVIC 2016 +675bf95ba0ce21b7|2023-06-25--12-39-50--1 +# HONDA CIVIC 2016 +7528525a29a9d9e6|2023-07-23--01-21-59--24 +# HONDA CIVIC 2016 +7496d76ba25de13f|2023-07-22--10-48-35--5 +# HONDA CIVIC 2016 +79b03b378a603ec8|2023-05-23--05-02-29--8 +# HONDA CIVIC 2016 +8d80c16a8485aaf3|2023-05-18--21-35-54--75 +# HONDA CIVIC 2016 +9098b6df85df4bab|2023-07-12--19-32-41--22 +# HONDA CIVIC 2016 +a182b0586ed5197f|2023-07-27--19-50-29--3 +# HONDA CIVIC 2016 +a3f025079c99dde7|2023-06-11--19-51-19--6 +# HONDA CIVIC 2016 +ae5111c66432dc8d|2023-07-24--14-16-50--4 +# HONDA CIVIC 2016 +b029c0f4cf2064e7|2023-07-19--07-02-46--37 +# HONDA CIVIC 2016 +b1c832ad56b6bc9d|2023-05-28--19-00-21--6 +# HONDA CIVIC 2016 +b7107213450829d2|2023-07-25--19-57-40--42 +# HONDA CIVIC 2016 +d313e54d1691a342|2023-06-20--16-22-23--37 +# HONDA CIVIC 2016 +dfae8f89bc63acf4|2023-05-21--18-14-46--6 +# VOLKSWAGEN GOLF 7TH GEN +1f032f5173c8ad99|2023-07-25--07-06-28--2 +# VOLKSWAGEN GOLF 7TH GEN +063f36154faa4921|2023-07-04--13-42-41--69 +# VOLKSWAGEN GOLF 7TH GEN +2cdded3a5da75b6a|2023-07-29--16-03-09--21 +# VOLKSWAGEN GOLF 7TH GEN +3a12e8c9983fb961|2023-06-29--16-29-48--21 +# VOLKSWAGEN GOLF 7TH GEN +3cfdec54aa035f3f|2023-07-30--16-55-43--25 +# VOLKSWAGEN GOLF 7TH GEN +434df3000c5d36ff|2023-08-07--08-16-23--15 +# VOLKSWAGEN GOLF 7TH GEN +4aa5d874f367adbc|2023-05-12--17-55-23--3 +# VOLKSWAGEN GOLF 7TH GEN +59658f79475526e4|2023-05-10--14-12-23--21 +# VOLKSWAGEN GOLF 7TH GEN +77db91ca5676dfd9|2023-05-11--12-32-36--18 +# VOLKSWAGEN GOLF 7TH GEN +7b6a4fdf7c484d0c|2023-06-02--20-28-47--1 +# VOLKSWAGEN GOLF 7TH GEN +8906f6f8c8052728|2023-07-30--17-49-47--16 +# VOLKSWAGEN GOLF 7TH GEN +8fe9ced03c94e256|2023-05-16--09-40-38--11 +# VOLKSWAGEN GOLF 7TH GEN +9806e7635acbfc4e|2023-07-30--08-52-18--4 +# VOLKSWAGEN GOLF 7TH GEN +9dfef26e8c734ff1|2023-07-21--21-17-53--12 +# VOLKSWAGEN GOLF 7TH GEN +a5ca9217d833ec9f|2023-06-03--12-07-06--1 +# VOLKSWAGEN GOLF 7TH GEN +a61c85c75dbf1c22|2023-07-03--10-14-01--9 +# VOLKSWAGEN GOLF 7TH GEN +ba232e71d028199a|2023-06-19--15-48-13--83 +# VOLKSWAGEN GOLF 7TH GEN +c2317d8d52490584|2023-05-20--15-01-59--4 +# VOLKSWAGEN GOLF 7TH GEN +dd154b34dc4d8c52|2023-07-03--20-13-03--63 +# VOLKSWAGEN GOLF 7TH GEN +e4221d4d60b6d43e|2023-05-11--18-57-30--33 +# VOLKSWAGEN GOLF 7TH GEN +e55f210ede806fde|2023-06-09--21-52-53--21 +# TOYOTA COROLLA TSS2 2019 +2526d4fd82f82868|2023-06-16--18-16-26--14 +# TOYOTA COROLLA TSS2 2019 +01febfaac7572f25|2023-06-06--19-08-52--2 +# TOYOTA COROLLA TSS2 2019 +0ba864f7f238934a|2023-07-07--15-45-46--3 +# TOYOTA COROLLA TSS2 2019 +13aa844538f5f3ea|2023-07-28--20-37-12--12 +# TOYOTA COROLLA TSS2 2019 +117fd309156bb38b|2023-07-08--19-26-59--3 +# TOYOTA COROLLA TSS2 2019 +08a1ee03695f6a19|2023-07-12--06-56-22--37 +# TOYOTA COROLLA TSS2 2019 +1febe902238e07e7|2023-07-04--11-40-53--19 +# TOYOTA COROLLA TSS2 2019 +0976603847a63139|2023-05-24--13-04-43--69 +# TOYOTA COROLLA TSS2 2019 +1d430ad170e95d66|2023-05-10--13-47-49--3 +# TOYOTA COROLLA TSS2 2019 +10f64136dcc9e6cf|2023-07-22--12-54-01--1 +# TOYOTA COROLLA TSS2 2019 +066f132fea612a43|2023-06-18--20-06-31--9 +# TOYOTA COROLLA TSS2 2019 +230649810f83e032|2023-07-24--22-23-09--21 +# TOYOTA COROLLA TSS2 2019 +13a296d002fe988d|2023-08-05--16-55-23--10 +# TOYOTA COROLLA TSS2 2019 +0ffb338268a7f57b|2023-07-08--17-15-25--71 +# TOYOTA COROLLA TSS2 2019 +2edc2c12fbb3cc34|2023-06-12--16-21-43--15 +# TOYOTA COROLLA TSS2 2019 +2653b4b309780e1a|2023-05-15--12-33-26--32 +# TOYOTA COROLLA TSS2 2019 +2ea9a9a47b4d658b|2023-08-03--17-08-37--18 +# TOYOTA COROLLA TSS2 2019 +2cd916ec939a0cdb|2023-07-29--20-09-03--29 +# TOYOTA COROLLA TSS2 2019 +36f5b57e4202d677|2023-07-19--16-07-41--20 +# TOYOTA COROLLA TSS2 2019 +375f6b7dcfa712bb|2023-07-29--00-25-41--3 +# TOYOTA COROLLA TSS2 2019 +3cb092b934c1321a|2023-07-12--14-19-10--1 +# TOYOTA COROLLA TSS2 2019 +42ff158c63048ad3|2023-06-19--12-40-18--12 +# TOYOTA COROLLA TSS2 2019 +3c755ae77207f5e6|2023-07-07--06-56-53--26 +# TOYOTA COROLLA TSS2 2019 +479477500279ec91|2023-07-15--15-08-01--13 +# TOYOTA COROLLA TSS2 2019 +4955b33f42f814ec|2023-06-19--18-19-33--10 +# TOYOTA COROLLA TSS2 2019 +4f871d414c69c6ba|2023-05-26--01-44-47--1 +# TOYOTA COROLLA TSS2 2019 +4ecfcac756318cbf|2023-05-26--09-11-43--16 +# TOYOTA COROLLA TSS2 2019 +51e32fb50a64250a|2023-07-07--20-50-11--51 +# TOYOTA COROLLA TSS2 2019 +48590e272587fc49|2023-06-02--08-36-54--48 +# TOYOTA COROLLA TSS2 2019 +4ccbc0d42d611d6e|2023-06-06--19-48-02--3 +# TOYOTA COROLLA TSS2 2019 +50c35b8d29fd7574|2023-07-12--19-36-54--2 +# TOYOTA COROLLA TSS2 2019 +51b1e5305ed717c2|2023-07-04--08-50-02--3 +# TOYOTA COROLLA TSS2 2019 +4c833174af1f11d9|2023-06-27--21-28-20--36 +# TOYOTA COROLLA TSS2 2019 +575626d3c2c61bc3|2023-07-18--17-06-03--46 +# TOYOTA COROLLA TSS2 2019 +59bbf0ec138e1bc4|2023-07-30--19-20-22--8 +# TOYOTA COROLLA TSS2 2019 +567e47e503b36408|2023-05-11--13-24-46--8 +# TOYOTA COROLLA TSS2 2019 +5d2be9f6c9bb6d59|2023-07-14--18-20-52--31 +# TOYOTA COROLLA TSS2 2019 +636f850472924d8b|2023-07-27--05-38-09--139 +# TOYOTA COROLLA TSS2 2019 +66235d23ccedf22b|2023-06-19--15-16-34--3 +# TOYOTA COROLLA TSS2 2019 +6c15cf37ba9e13af|2023-05-10--07-18-08--33 +# TOYOTA COROLLA TSS2 2019 +6439fb08910d24cb|2023-06-15--14-26-45--42 +# TOYOTA COROLLA TSS2 2019 +7472c18ac7ee970a|2023-07-30--01-05-28--2 +# TOYOTA COROLLA TSS2 2019 +7065b7a287ca6bfd|2023-06-21--16-08-50--4 +# TOYOTA COROLLA TSS2 2019 +7037f9c72bc6f964|2023-06-12--16-02-49--2 +# TOYOTA COROLLA TSS2 2019 +75e2a892240ca11a|2023-07-22--09-18-55--5 +# TOYOTA COROLLA TSS2 2019 +796a1c5804476c3f|2023-06-26--19-24-19--7 +# TOYOTA COROLLA TSS2 2019 +7848680077bfdcdc|2023-07-24--18-38-50--3 +# TOYOTA COROLLA TSS2 2019 +742af4a6e6eaa03a|2023-06-27--13-07-41--44 +# TOYOTA COROLLA TSS2 2019 +744a7144504b6e2e|2023-08-02--13-28-51--7 +# TOYOTA COROLLA TSS2 2019 +7a2f01307609c158|2023-08-06--17-30-48--12 +# TOYOTA COROLLA TSS2 2019 +772e65c4f149b380|2023-05-20--04-14-17--1 +# TOYOTA COROLLA TSS2 2019 +86d675e9d797d210|2023-05-21--21-53-30--21 +# TOYOTA COROLLA TSS2 2019 +8730608053a8d3af|2023-06-26--17-05-48--18 +# TOYOTA COROLLA TSS2 2019 +89d53004921bd239|2023-07-22--15-00-53--38 +# TOYOTA COROLLA TSS2 2019 +90a0f4e5ad469fb7|2023-08-04--11-48-13--1 +# TOYOTA COROLLA TSS2 2019 +927ee6ce294e2f25|2023-07-29--08-22-53--1 +# TOYOTA COROLLA TSS2 2019 +9285a03a79895178|2023-07-04--23-14-40--16 +# TOYOTA COROLLA TSS2 2019 +9670ceb56ecf4d79|2023-07-10--07-55-49--14 +# TOYOTA COROLLA TSS2 2019 +99846616e03e1162|2023-07-30--12-59-13--2 +# TOYOTA COROLLA TSS2 2019 +98d3152d30060750|2023-05-20--15-57-05--17 +# TOYOTA COROLLA TSS2 2019 +990a1b5f50ca3d5c|2023-05-17--10-17-05--1 +# TOYOTA COROLLA TSS2 2019 +9afac539f787a200|2023-06-30--15-05-48--14 +# TOYOTA COROLLA TSS2 2019 +9a2505595b2a3554|2023-08-07--14-46-21--59 +# TOYOTA COROLLA TSS2 2019 +a1459329fb4cd970|2023-06-28--15-44-03--19 +# TOYOTA COROLLA TSS2 2019 +a2a0ccea32023010|2023-07-26--17-19-33--11 +# TOYOTA COROLLA TSS2 2019 +9f65d75230c64fb4|2023-06-19--12-28-55--95 +# TOYOTA COROLLA TSS2 2019 +a43409b966c53d15|2023-06-29--10-11-49--4 +# TOYOTA COROLLA TSS2 2019 +a4446d3a8ae1e324|2023-06-06--11-49-40--17 +# TOYOTA COROLLA TSS2 2019 +9d5c10b6f66f736a|2023-06-18--10-48-03--191 +# TOYOTA COROLLA TSS2 2019 +a2804476419fd4f3|2023-05-30--14-13-10--2 +# TOYOTA COROLLA TSS2 2019 +ac7fa8ccceee8fe4|2023-07-21--13-50-12--14 +# TOYOTA COROLLA TSS2 2019 +ad27b5fb54426e8a|2023-07-12--16-32-18--9 +# TOYOTA COROLLA TSS2 2019 +b46ac14e96fa047d|2023-05-26--13-10-23--9 +# TOYOTA COROLLA TSS2 2019 +b292d45117041581|2023-06-22--20-59-15--34 +# TOYOTA COROLLA TSS2 2019 +b135d4bbc0ace012|2023-07-28--08-46-43--5 +# TOYOTA COROLLA TSS2 2019 +b2a420a551c95f61|2023-06-10--13-52-14--2 +# TOYOTA COROLLA TSS2 2019 +b27aabf96cf81189|2023-07-20--17-59-53--9 +# TOYOTA COROLLA TSS2 2019 +ba2c31fbceccf0a2|2023-05-26--08-28-50--19 +# TOYOTA COROLLA TSS2 2019 +b8e73c4b855918a4|2023-06-23--22-09-47--5 +# TOYOTA COROLLA TSS2 2019 +b4cce6e306466c3a|2023-06-09--13-01-32--3 +# TOYOTA COROLLA TSS2 2019 +bc30537149678ec5|2023-07-16--09-33-33--4 +# TOYOTA COROLLA TSS2 2019 +bcb2e59018be5f0e|2023-05-22--14-41-32--295 +# TOYOTA COROLLA TSS2 2019 +bb6bc8e6ac2190dc|2023-05-31--16-10-00--53 +# TOYOTA COROLLA TSS2 2019 +b7440708a5e2c796|2023-06-21--19-37-36--9 +# TOYOTA COROLLA TSS2 2019 +c0023418edc8ed7b|2023-06-04--21-14-16--36 +# TOYOTA COROLLA TSS2 2019 +c33e7ea5e01511d3|2023-07-13--12-05-30--1 +# TOYOTA COROLLA TSS2 2019 +c9315a5b08fabfb4|2023-05-17--15-45-13--7 +# TOYOTA COROLLA TSS2 2019 +d02240bb0738ae9d|2023-07-04--19-59-49--37 +# TOYOTA COROLLA TSS2 2019 +d0bef3e100712fce|2023-05-23--07-58-05--3 +# TOYOTA COROLLA TSS2 2019 +d1830450c4c43463|2023-06-24--21-57-32--2 +# TOYOTA COROLLA TSS2 2019 +cf0bbb6c86836b73|2023-07-08--18-37-43--28 +# TOYOTA COROLLA TSS2 2019 +d2a3925946654cd3|2023-06-12--16-19-24--3 +# TOYOTA COROLLA TSS2 2019 +da803e391542cb10|2023-05-29--13-49-28--35 +# TOYOTA COROLLA TSS2 2019 +dcefbc5933b0e410|2023-06-14--19-00-09--3 +# TOYOTA COROLLA TSS2 2019 +e0601ce36770a765|2023-07-25--09-12-36--55 +# TOYOTA COROLLA TSS2 2019 +dfb993cc43ba69dc|2023-06-06--06-52-08--2 +# TOYOTA COROLLA TSS2 2019 +e5bbae6d1c740ba1|2023-06-19--12-57-23--14 +# TOYOTA COROLLA TSS2 2019 +e5bfe3f909216ef6|2023-07-25--18-03-22--7 +# TOYOTA COROLLA TSS2 2019 +e514e647fc5e28d0|2023-07-10--16-11-35--1 +# TOYOTA COROLLA TSS2 2019 +e01d990b9fafc7c7|2023-06-26--04-53-20--24 +# TOYOTA COROLLA TSS2 2019 +ead14b5ba31cdb4d|2023-08-05--21-07-24--107 +# TOYOTA COROLLA TSS2 2019 +e9b1885ec511d838|2023-05-15--09-35-44--42 +# TOYOTA COROLLA TSS2 2019 +e305dea90c599b82|2023-06-20--19-39-01--6 +# TOYOTA COROLLA TSS2 2019 +e8d19bce0da0148f|2023-07-28--16-53-43--13 +# TOYOTA COROLLA TSS2 2019 +f1c4e2a016e4de93|2023-07-19--21-27-01--3 +# TOYOTA COROLLA TSS2 2019 +f1b3bb0340f9b560|2023-07-26--09-19-02--22 +# TOYOTA COROLLA TSS2 2019 +eb7ac6daa55e0823|2023-06-10--19-14-01--4 +# TOYOTA COROLLA TSS2 2019 +f5695afee48c4efe|2023-05-12--08-12-59--21 +# TOYOTA COROLLA TSS2 2019 +feee8fadcb368355|2023-07-22--21-12-52--25 +# TOYOTA COROLLA TSS2 2019 +f82bb7e095cdd66f|2023-05-10--05-19-59--9 +# TOYOTA COROLLA TSS2 2019 +fe18f736cb0d7813|2023-05-24--19-43-43--10 +# CHRYSLER PACIFICA HYBRID 2019 +0e99ed1796cf0796|2023-07-01--13-12-55--14 +# CHRYSLER PACIFICA HYBRID 2019 +1943b3d682b7f11d|2023-07-09--13-56-25--20 +# CHRYSLER PACIFICA HYBRID 2019 +120a432f63cb0de2|2023-06-24--13-48-30--7 +# CHRYSLER PACIFICA HYBRID 2019 +1a10807c60c127fd|2023-06-05--18-28-54--44 +# CHRYSLER PACIFICA HYBRID 2019 +2b964a897363fb8d|2023-07-07--16-03-35--35 +# CHRYSLER PACIFICA HYBRID 2019 +2f6f39e4b14298b8|2023-05-28--18-25-37--43 +# CHRYSLER PACIFICA HYBRID 2019 +35176353a674cbdc|2023-06-04--18-26-00--52 +# CHRYSLER PACIFICA HYBRID 2019 +37004be1e4d06893|2023-08-04--00-57-55--1 +# CHRYSLER PACIFICA HYBRID 2019 +3e5d2ea720b2ade6|2023-05-18--20-54-26--14 +# CHRYSLER PACIFICA HYBRID 2019 +4395faadf0a373f2|2023-05-13--16-36-01--12 +# CHRYSLER PACIFICA HYBRID 2019 +4b17c71e7787e92f|2023-05-16--21-51-11--12 +# CHRYSLER PACIFICA HYBRID 2019 +63b6ad8d485e56a3|2023-07-17--09-57-27--20 +# CHRYSLER PACIFICA HYBRID 2019 +644df7356b2e465d|2023-06-10--21-35-04--90 +# CHRYSLER PACIFICA HYBRID 2019 +6244bf059e173e99|2023-07-30--09-14-03--15 +# CHRYSLER PACIFICA HYBRID 2019 +6d3ff8fbb8417635|2023-07-11--18-41-27--2 +# CHRYSLER PACIFICA HYBRID 2019 +7251ba2e0f80add1|2023-05-21--20-49-01--94 +# CHRYSLER PACIFICA HYBRID 2019 +7494bdf484b714e7|2023-05-15--22-44-48--48 +# CHRYSLER PACIFICA HYBRID 2019 +86deeb65f10c3129|2023-06-08--09-04-59--1 +# CHRYSLER PACIFICA HYBRID 2019 +8b4a62384ce1dbda|2023-07-10--20-20-58--8 +# CHRYSLER PACIFICA HYBRID 2019 +91190f5999463dd8|2023-06-12--11-38-07--3 +# CHRYSLER PACIFICA HYBRID 2019 +94586e8449ef012b|2023-06-08--14-38-27--5 +# CHRYSLER PACIFICA HYBRID 2019 +a3b4d4b4cb18bdad|2023-06-11--16-20-07--62 +# CHRYSLER PACIFICA HYBRID 2019 +aadbd1a661fb4da3|2023-08-03--13-56-56--2 +# CHRYSLER PACIFICA HYBRID 2019 +acb4cd444864d759|2023-05-12--22-57-05--9 +# CHRYSLER PACIFICA HYBRID 2019 +b398a1af9eea479f|2023-06-11--19-48-36--36 +# CHRYSLER PACIFICA HYBRID 2019 +cf10bbff2c1d2c6b|2023-07-10--08-53-44--2 +# CHRYSLER PACIFICA HYBRID 2019 +d8c9e6bde1d2900f|2023-06-15--10-51-56--3 +# CHRYSLER PACIFICA HYBRID 2019 +de666a40633d13fe|2023-07-28--14-19-44--12 +# CHRYSLER PACIFICA HYBRID 2019 +e51816839229e7c6|2023-07-08--13-44-57--1 +# CHRYSLER PACIFICA HYBRID 2019 +ec62067436775681|2023-07-19--18-28-32--26 +# CHRYSLER PACIFICA HYBRID 2019 +ed624dae6fed0e6b|2023-07-16--09-21-32--35 +# CHRYSLER PACIFICA HYBRID 2019 +ef47e9b061fba593|2023-05-13--21-59-33--5 +# CHRYSLER PACIFICA HYBRID 2019 +ede4a914fcc6fb6d|2023-08-02--14-13-23--3 +# CHRYSLER PACIFICA HYBRID 2019 +f421e8c2744fed7f|2023-06-30--13-10-52--24 +# CHRYSLER PACIFICA HYBRID 2019 +f6f46c9092a35916|2023-07-15--16-38-00--3 +# CHRYSLER PACIFICA HYBRID 2019 +f800ed88a5f7fa19|2023-07-11--11-16-38--5 +# CHRYSLER PACIFICA HYBRID 2019 +fe0f36c1b6573659|2023-07-19--15-06-52--7 +# CHEVROLET BOLT EUV 2022 +2334b466b5ba9cab|2023-05-31--17-06-45--9 +# CHEVROLET BOLT EUV 2022 +276dfdea5c6a45e0|2023-05-16--11-15-03--5 +# CHEVROLET BOLT EUV 2022 +186ece45c5f7730a|2023-06-05--12-57-13--5 +# CHEVROLET BOLT EUV 2022 +0def4a390f6fe5c0|2023-06-07--20-34-57--34 +# CHEVROLET BOLT EUV 2022 +161c32b370ac8d0f|2023-07-23--09-03-44--2 +# CHEVROLET BOLT EUV 2022 +1829f1c500a2c163|2023-07-27--16-07-15--11 +# CHEVROLET BOLT EUV 2022 +0ff88cd04a49e0b8|2023-06-06--21-03-24--4 +# CHEVROLET BOLT EUV 2022 +1ad81494603f8450|2023-06-15--16-34-30--8 +# CHEVROLET BOLT EUV 2022 +10b00eb757360a96|2023-06-30--10-31-18--19 +# CHEVROLET BOLT EUV 2022 +135b91dcfd6b31b1|2023-06-17--21-01-23--3 +# CHEVROLET BOLT EUV 2022 +297302e607039e23|2023-06-06--20-28-40--9 +# CHEVROLET BOLT EUV 2022 +18dbdce81b2402b0|2023-08-07--07-34-34--7 +# CHEVROLET BOLT EUV 2022 +2b4118b61e62995e|2023-07-15--15-17-08--10 +# CHEVROLET BOLT EUV 2022 +2d6490f23b2f8592|2023-08-07--10-00-17--12 +# CHEVROLET BOLT EUV 2022 +2fd7f5f47f793c9c|2023-05-21--12-19-26--15 +# CHEVROLET BOLT EUV 2022 +3140e1c6195c880e|2023-05-10--00-08-11--8 +# CHEVROLET BOLT EUV 2022 +36abd7f7160a9a75|2023-07-05--14-20-25--13 +# CHEVROLET BOLT EUV 2022 +3b68b660a77fd80e|2023-07-16--01-08-43--11 +# CHEVROLET BOLT EUV 2022 +446478f0404c86d3|2023-07-30--11-24-22--14 +# CHEVROLET BOLT EUV 2022 +43f2df44eba11325|2023-07-02--18-10-34--1 +# CHEVROLET BOLT EUV 2022 +43d44eef740e247b|2023-07-30--16-49-25--7 +# CHEVROLET BOLT EUV 2022 +45bcfa6a1dab12b2|2023-07-03--16-44-59--1 +# CHEVROLET BOLT EUV 2022 +4fe3129b0fc9aed8|2023-06-24--12-06-07--2 +# CHEVROLET BOLT EUV 2022 +50d49a7246b883d5|2023-06-20--09-05-14--47 +# CHEVROLET BOLT EUV 2022 +4c85a75ef10f7a6a|2023-07-31--11-57-04--42 +# CHEVROLET BOLT EUV 2022 +555d4087cf86aa91|2023-05-12--15-16-07--34 +# CHEVROLET BOLT EUV 2022 +583b1612bef234ed|2023-07-22--14-24-44--10 +# CHEVROLET BOLT EUV 2022 +63a62077852a6194|2023-07-31--12-11-15--14 +# CHEVROLET BOLT EUV 2022 +63cd6284b07989a9|2023-07-07--19-48-05--7 +# CHEVROLET BOLT EUV 2022 +6b654cb3eaade30b|2023-08-01--08-12-37--16 +# CHEVROLET BOLT EUV 2022 +6afbe9126b0b488e|2023-07-29--15-32-54--1 +# CHEVROLET BOLT EUV 2022 +715792adb472e791|2023-08-07--16-02-30--19 +# CHEVROLET BOLT EUV 2022 +77360d8b4b0aa355|2023-06-09--17-54-25--24 +# CHEVROLET BOLT EUV 2022 +79c2ed37b4345620|2023-07-08--22-23-27--4 +# CHEVROLET BOLT EUV 2022 +80c623f72cae73bb|2023-06-01--21-46-15--3 +# CHEVROLET BOLT EUV 2022 +82a90d8103afed98|2023-06-12--07-52-31--23 +# CHEVROLET BOLT EUV 2022 +8490e1e599660163|2023-05-15--06-17-33--1 +# CHEVROLET BOLT EUV 2022 +894c916ba5d876c1|2023-06-06--06-55-07--15 +# CHEVROLET BOLT EUV 2022 +8ed03d21b3c15090|2023-06-22--22-10-54--33 +# CHEVROLET BOLT EUV 2022 +93fbf2642ce58ddf|2023-06-27--15-39-36--5 +# CHEVROLET BOLT EUV 2022 +988fdc2a62b72489|2023-06-29--22-43-31--15 +# CHEVROLET BOLT EUV 2022 +986c6ba7f7118db8|2023-07-03--15-12-47--7 +# CHEVROLET BOLT EUV 2022 +989d9a23ae4cbc76|2023-06-03--21-50-50--4 +# CHEVROLET BOLT EUV 2022 +9e27a5ad608f9b5d|2023-07-28--19-22-22--5 +# CHEVROLET BOLT EUV 2022 +9d07b85adb1c7959|2023-07-24--19-16-41--12 +# CHEVROLET BOLT EUV 2022 +a1c6fc5f9e1c0641|2023-07-26--18-20-50--2 +# CHEVROLET BOLT EUV 2022 +a40f0130fa3d0aaf|2023-07-25--10-57-51--89 +# CHEVROLET BOLT EUV 2022 +a8632896bc8c451f|2023-05-21--10-22-51--3 +# CHEVROLET BOLT EUV 2022 +aea39a07b9a7343f|2023-07-09--17-06-28--8 +# CHEVROLET BOLT EUV 2022 +b7709c9bd2bb88fd|2023-06-18--12-43-53--46 +# CHEVROLET BOLT EUV 2022 +b79a39df75584415|2023-07-04--22-01-28--21 +# CHEVROLET BOLT EUV 2022 +bd059400a5cde994|2023-07-04--16-14-42--4 +# CHEVROLET BOLT EUV 2022 +bf073985dfe87f7a|2023-08-04--14-12-05--11 +# CHEVROLET BOLT EUV 2022 +be4060e17a23ffdd|2023-05-20--14-36-48--3 +# CHEVROLET BOLT EUV 2022 +c401b4043ba9000a|2023-08-03--14-48-07--29 +# CHEVROLET BOLT EUV 2022 +b9a4ef3d6f3df97b|2023-08-03--05-45-23--35 +# CHEVROLET BOLT EUV 2022 +c6d15839fd14a490|2023-05-12--07-40-52--16 +# CHEVROLET BOLT EUV 2022 +d061de08f31aeed7|2023-05-30--14-22-41--75 +# CHEVROLET BOLT EUV 2022 +d1f071678ccb1920|2023-07-11--15-10-34--2 +# CHEVROLET BOLT EUV 2022 +d08af0cf79acfd74|2023-07-19--05-13-02--2 +# CHEVROLET BOLT EUV 2022 +d4dfed34943b6c7e|2023-07-20--08-10-21--20 +# CHEVROLET BOLT EUV 2022 +dbc1d04685612ecc|2023-05-15--17-57-58--3 +# CHEVROLET BOLT EUV 2022 +da4d62ee196295d4|2023-07-28--19-30-36--29 +# CHEVROLET BOLT EUV 2022 +da4f7891ae804737|2023-07-06--14-28-22--12 +# CHEVROLET BOLT EUV 2022 +dc50076c39d4257c|2023-06-01--20-16-39--1 +# CHEVROLET BOLT EUV 2022 +dda7599351b213bc|2023-06-13--05-36-42--5 +# CHEVROLET BOLT EUV 2022 +dc2696502990ee07|2023-08-05--06-27-00--8 +# CHEVROLET BOLT EUV 2022 +e340670ab48ba88a|2023-07-29--21-21-49--8 +# CHEVROLET BOLT EUV 2022 +ea4cb0f25db44a80|2023-06-14--05-18-10--19 +# CHEVROLET BOLT EUV 2022 +ed9dc2d827252019|2023-07-27--06-22-48--42 +# CHEVROLET BOLT EUV 2022 +f1ed193b05fe93b0|2023-05-11--18-47-03--3 +# CHEVROLET BOLT EUV 2022 +ef91b5c6792c0a7a|2023-07-31--20-36-50--1 +# CHEVROLET BOLT EUV 2022 +f89a0a966e54f7ff|2023-07-15--20-44-12--8 +# CHEVROLET BOLT EUV 2022 +fc55aed7a9efb49c|2023-06-08--08-01-19--23 +# TOYOTA CAMRY 2021 +093be653a841c345|2023-05-10--19-37-55--15 +# TOYOTA CAMRY 2021 +0a7615a819224eab|2023-05-18--17-00-34--181 +# TOYOTA CAMRY 2021 +1f9891d5a64661ef|2023-06-02--17-00-36--19 +# TOYOTA CAMRY 2021 +2307361bfeefee69|2023-06-08--17-48-28--70 +# TOYOTA CAMRY 2021 +06a5dbcee3ab12f1|2023-07-26--07-57-11--1 +# TOYOTA CAMRY 2021 +037c07a9b40d78a2|2023-05-18--22-31-50--21 +# TOYOTA CAMRY 2021 +3cf34a46abd8addd|2023-07-25--10-03-16--16 +# TOYOTA CAMRY 2021 +470698275bafef42|2023-07-31--06-34-13--8 +# TOYOTA CAMRY 2021 +5231cc88de0139ab|2023-06-14--13-33-32--7 +# TOYOTA CAMRY 2021 +528789e4391481cf|2023-07-27--15-16-03--16 +# TOYOTA CAMRY 2021 +552129078b970cd3|2023-07-25--09-41-30--20 +# TOYOTA CAMRY 2021 +5d4b81649a4bce56|2023-08-01--12-01-18--2 +# TOYOTA CAMRY 2021 +6f26453738f35634|2023-05-21--13-32-12--13 +# TOYOTA CAMRY 2021 +73088e067b2fb9d4|2023-05-12--08-24-31--25 +# TOYOTA CAMRY 2021 +8acd130e9e5d0cf8|2023-06-13--12-05-18--35 +# TOYOTA CAMRY 2021 +8c8ab5e2b14c51c9|2023-05-26--18-37-04--33 +# TOYOTA CAMRY 2021 +876a156a1191834d|2023-07-13--18-37-18--7 +# TOYOTA CAMRY 2021 +a092c3532573731f|2023-08-02--14-19-03--2 +# TOYOTA CAMRY 2021 +a7244a988251077d|2023-05-20--08-40-16--2 +# TOYOTA CAMRY 2021 +b7f21591ba459403|2023-06-02--18-32-59--20 +# TOYOTA CAMRY 2021 +b4f83330b2d59205|2023-05-16--18-20-35--9 +# TOYOTA CAMRY 2021 +bae56b301710cdf0|2023-05-30--07-44-31--23 +# TOYOTA CAMRY 2021 +c70d69b11bfb697d|2023-06-17--18-51-02--8 +# TOYOTA CAMRY 2021 +cc7c8c846c338570|2023-07-10--08-37-46--26 +# TOYOTA CAMRY 2021 +c4e9230919b74f75|2023-06-20--17-57-36--20 +# TOYOTA CAMRY 2021 +d45c4dcf8ea2f5d4|2023-07-18--12-10-14--71 +# TOYOTA CAMRY 2021 +d9d5a5d9f4ae2ad6|2023-05-27--10-56-51--7 +# TOYOTA CAMRY 2021 +f47661feccc29204|2023-05-13--19-57-58--11 +# TOYOTA CAMRY 2018 +043c1d59829f98e1|2023-05-15--16-39-19--18 +# TOYOTA CAMRY 2018 +2f37fc0d2ebb8cfe|2023-07-28--10-38-40--40 +# TOYOTA CAMRY 2018 +67f3731c5f3afaa2|2023-06-05--20-19-00--15 +# TOYOTA CAMRY 2018 +8dd5cd20feef7daf|2023-05-31--15-48-11--2 +# TOYOTA CAMRY 2018 +8eb7ff2f26051984|2023-05-17--10-07-44--23 +# TOYOTA CAMRY 2018 +aae51b841b955820|2023-05-21--00-01-38--3 +# TOYOTA CAMRY 2018 +b00f418d2b66cfa2|2023-05-27--16-36-37--129 +# TOYOTA CAMRY 2018 +b711d49263f20146|2023-06-23--17-30-14--137 +# TOYOTA CAMRY 2018 +c5827e54d59bdcc2|2023-08-03--18-40-21--1 +# TOYOTA CAMRY 2018 +c6503f6534bf811a|2023-08-07--14-05-15--1 +# TOYOTA CAMRY 2018 +eddf8030d2c0fbd8|2023-05-17--07-20-36--18 +# TOYOTA CAMRY 2018 +ef6bd990e34920b1|2023-07-25--08-53-09--1 +# CHEVROLET SILVERADO 1500 2020 +1092d371df987f78|2023-07-11--08-18-42--1 +# CHEVROLET SILVERADO 1500 2020 +2a3958b34246e3b7|2023-07-09--16-52-48--23 +# CHEVROLET SILVERADO 1500 2020 +25a42ae6068112ed|2023-07-19--08-46-57--6 +# CHEVROLET SILVERADO 1500 2020 +34d4e668268e0cc8|2023-06-24--18-00-35--12 +# CHEVROLET SILVERADO 1500 2020 +38aa7da107d5d252|2023-06-03--23-00-13--7 +# CHEVROLET SILVERADO 1500 2020 +3c760901e7b30fe2|2023-07-24--10-35-18--2 +# CHEVROLET SILVERADO 1500 2020 +5085c761395d1fe6|2023-05-26--18-54-08--17 +# CHEVROLET SILVERADO 1500 2020 +602051db17fb6945|2023-06-11--14-11-45--16 +# CHEVROLET SILVERADO 1500 2020 +b83dd25fd69d185a|2023-07-09--11-11-25--13 +# CHEVROLET SILVERADO 1500 2020 +c5762c796866f52b|2023-06-01--11-09-58--40 +# CHEVROLET SILVERADO 1500 2020 +cc2c095689982e90|2023-06-16--15-19-12--15 +# CHEVROLET SILVERADO 1500 2020 +d89ba4ef9ca61255|2023-06-05--20-19-55--1 +# CHEVROLET SILVERADO 1500 2020 +f04339ee7d0a4f4f|2023-07-06--12-12-45--1 +# CHEVROLET SILVERADO 1500 2020 +f1a814b2d113041b|2023-06-14--20-39-56--46 +# CHEVROLET SILVERADO 1500 2020 +f4ff60e509c0efad|2023-07-17--08-45-55--10 +# CHEVROLET SILVERADO 1500 2020 +fe80a4e1cedec853|2023-07-26--19-16-06--9 +# CHRYSLER PACIFICA 2018 +2137b01aa0ca63f9|2023-06-19--16-31-25--12 +# CHRYSLER PACIFICA 2018 +0a3e89f78b1d0071|2023-07-04--13-44-33--37 +# CHRYSLER PACIFICA 2018 +2900033dd47fa445|2023-07-19--10-02-00--5 +# CHRYSLER PACIFICA 2018 +0d67197945512e16|2023-06-02--18-33-08--186 +# CHRYSLER PACIFICA 2018 +454c31a132f2c003|2023-06-24--19-40-37--208 +# CHRYSLER PACIFICA 2018 +53f2206925c37581|2023-05-20--11-40-22--4 +# CHRYSLER PACIFICA 2018 +6732a0b0508ffc0e|2023-05-11--09-02-51--1 +# CHRYSLER PACIFICA 2018 +8363f866866f25e7|2023-05-10--06-55-11--26 +# CHRYSLER PACIFICA 2018 +87caf137ebd1c6f6|2023-07-05--12-13-27--4 +# CHRYSLER PACIFICA 2018 +89e4d9d4b8ec175c|2023-08-07--13-07-13--128 +# CHRYSLER PACIFICA 2018 +8fc6a1b72c8b1357|2023-07-31--06-08-43--18 +# CHRYSLER PACIFICA 2018 +e13cd4efc12b3ed9|2023-06-02--23-27-54--10 +# HYUNDAI SANTA FE 2019 +124457b31149107e|2023-06-11--17-33-36--39 +# HYUNDAI SANTA FE 2019 +0b2b36cf5a0789db|2023-05-30--21-15-07--17 +# HYUNDAI SANTA FE 2019 +2febc4fb44b77feb|2023-06-19--10-18-04--12 +# HYUNDAI SANTA FE 2019 +45a8ffd448481ce2|2023-06-11--11-43-42--69 +# HYUNDAI SANTA FE 2019 +47ba5e6f0490d659|2023-05-19--13-11-38--21 +# HYUNDAI SANTA FE 2019 +6af67ee8ad45b46d|2023-06-09--12-43-30--3 +# HYUNDAI SANTA FE 2019 +7effea3e618a7eff|2023-07-02--14-51-45--20 +# HYUNDAI SANTA FE 2019 +9ebc35731309da40|2023-05-15--20-55-46--5 +# HYUNDAI SANTA FE 2019 +ac34a01eff94e7d2|2023-06-09--11-20-45--1 +# HYUNDAI SANTA FE 2019 +dae9868756e02256|2023-05-24--21-02-45--14 +# HYUNDAI SANTA FE 2019 +e8a6a6776c51a45c|2023-05-14--14-07-00--6 +# HYUNDAI SANTA FE 2019 +ea9d3b13a4799604|2023-05-19--19-43-27--31 +# HYUNDAI SANTA FE 2019 +fddc8d2cbb48ec6f|2023-06-02--17-35-08--16 +# HONDA ACCORD 2018 +190b11018a714a28|2023-07-12--17-54-06--29 +# HONDA ACCORD 2018 +17a3b6806ff8e041|2023-05-19--16-04-06--24 +# HONDA ACCORD 2018 +0dacabc563f7e7d7|2023-06-04--21-45-33--4 +# HONDA ACCORD 2018 +10d6a69109431519|2023-05-28--14-21-19--19 +# HONDA ACCORD 2018 +05bcd2ee65199207|2023-07-19--18-22-21--6 +# HONDA ACCORD 2018 +0c676e287b251a20|2023-07-28--06-19-37--105 +# HONDA ACCORD 2018 +0793ebd391c355da|2023-07-20--06-35-32--19 +# HONDA ACCORD 2018 +005d4e7ceb089cfd|2023-07-13--07-22-49--18 +# HONDA ACCORD 2018 +214a25adc54636fc|2023-05-18--10-45-47--1 +# HONDA ACCORD 2018 +1ebe446c9f466f50|2023-05-16--03-37-24--1 +# HONDA ACCORD 2018 +0bbb933eda8ae17c|2023-07-01--20-27-36--179 +# HONDA ACCORD 2018 +2e08aac41ed464a6|2023-07-31--14-35-02--3 +# HONDA ACCORD 2018 +2ca8270fc59b50f3|2023-06-23--07-59-23--5 +# HONDA ACCORD 2018 +39ac3639fcffc102|2023-07-20--21-06-28--268 +# HONDA ACCORD 2018 +37b721607b521682|2023-07-20--15-09-59--25 +# HONDA ACCORD 2018 +3d151bec9fd04d09|2023-07-25--16-06-41--8 +# HONDA ACCORD 2018 +3b417dee7be1d8dd|2023-07-06--06-11-05--26 +# HONDA ACCORD 2018 +49c7b83d348197ed|2023-07-24--21-19-42--2 +# HONDA ACCORD 2018 +4ff67da1be2f4f43|2023-05-12--21-10-12--12 +# HONDA ACCORD 2018 +5e7e1a7c1d61b456|2023-06-19--19-36-24--60 +# HONDA ACCORD 2018 +69db8ee3bf677945|2023-07-24--21-17-32--74 +# HONDA ACCORD 2018 +73dbd685214c0e03|2023-06-01--12-47-34--67 +# HONDA ACCORD 2018 +7dde4489ff0a6c80|2023-06-07--20-10-09--6 +# HONDA ACCORD 2018 +82d1e3d3c754bf2a|2023-05-16--08-21-26--2 +# HONDA ACCORD 2018 +80d45cf838f8c716|2023-07-17--16-29-49--6 +# HONDA ACCORD 2018 +889163fc61871a68|2023-05-16--17-18-58--5 +# HONDA ACCORD 2018 +8b22d387ab08c5db|2023-07-15--21-35-09--2 +# HONDA ACCORD 2018 +8aaa7ccc2fc68822|2023-07-09--20-09-02--1 +# HONDA ACCORD 2018 +90d95c7fd3fa748a|2023-06-16--14-04-08--61 +# HONDA ACCORD 2018 +9d8f86b8b11b65c9|2023-06-30--19-33-48--156 +# HONDA ACCORD 2018 +a33e427bbd337300|2023-07-12--09-54-45--12 +# HONDA ACCORD 2018 +a3669d37d7fe28c2|2023-06-01--17-08-16--2 +# HONDA ACCORD 2018 +a85202e1dca5afaf|2023-06-21--11-46-10--20 +# HONDA ACCORD 2018 +a454b11d7910c242|2023-06-14--16-11-49--25 +# HONDA ACCORD 2018 +aca10b5acd89289d|2023-07-12--20-03-23--4 +# HONDA ACCORD 2018 +b3a1a214664b7d31|2023-07-28--16-21-31--23 +# HONDA ACCORD 2018 +b9028def8e733ee6|2023-05-16--18-34-52--7 +# HONDA ACCORD 2018 +bf887c016d27cca5|2023-07-31--12-34-34--1 +# HONDA ACCORD 2018 +c642bd29f2858d75|2023-06-27--08-21-19--9 +# HONDA ACCORD 2018 +c2143a4f3723576c|2023-06-21--12-51-06--16 +# HONDA ACCORD 2018 +cfb71d30183708f8|2023-07-30--12-40-33--6 +# HONDA ACCORD 2018 +d09c814b908913a2|2023-06-13--19-19-07--16 +# HONDA ACCORD 2018 +d37dc158651867ca|2023-06-03--16-03-15--2 +# HONDA ACCORD 2018 +d83010e83c7f87ea|2023-05-24--15-15-58--20 +# HONDA ACCORD 2018 +d8f44985fa82e7f5|2023-05-10--22-40-13--4 +# HONDA ACCORD 2018 +eb6f3ff143d0fac5|2023-05-23--10-51-44--7 +# HONDA ACCORD 2018 +ee1874709eb8b485|2023-05-11--17-10-40--183 +# HONDA ACCORD 2018 +ef6f577d770521e4|2023-05-10--07-44-14--32 +# TOYOTA COROLLA 2017 +00acf2e4c8c6b549|2023-07-01--20-25-46--8 +# TOYOTA COROLLA 2017 +053f594b05527a80|2023-07-26--01-47-44--33 +# TOYOTA COROLLA 2017 +16a9664d939d9efe|2023-06-24--20-10-27--5 +# TOYOTA COROLLA 2017 +364bb492aa33d7df|2023-06-06--07-47-54--23 +# TOYOTA COROLLA 2017 +39932c340e596ead|2023-06-29--21-47-38--11 +# TOYOTA COROLLA 2017 +3c3ea5134c90308a|2023-06-22--22-11-10--4 +# TOYOTA COROLLA 2017 +5a596be432c5fe53|2023-06-11--17-10-36--38 +# TOYOTA COROLLA 2017 +728c59e1123e2cf7|2023-05-16--16-05-22--9 +# TOYOTA COROLLA 2017 +718042f01647e90e|2023-06-21--22-29-28--10 +# TOYOTA COROLLA 2017 +7d48b40fb8782bda|2023-08-05--13-16-29--23 +# TOYOTA COROLLA 2017 +7ec9410b593c7d5a|2023-06-29--17-29-56--19 +# TOYOTA COROLLA 2017 +869558002728e30b|2023-08-02--14-19-46--6 +# TOYOTA COROLLA 2017 +887e76d536c1841e|2023-07-06--07-17-09--12 +# TOYOTA COROLLA 2017 +993aa9f1c1e81499|2023-07-14--19-08-45--10 +# TOYOTA COROLLA 2017 +975cdde87029652a|2023-07-04--17-39-43--4 +# TOYOTA COROLLA 2017 +a152dad3c3368320|2023-05-19--14-16-10--47 +# TOYOTA COROLLA 2017 +a61d43139939b1ac|2023-05-29--11-20-51--3 +# TOYOTA COROLLA 2017 +adb85c50d47ac0a9|2023-05-14--20-29-11--1 +# TOYOTA COROLLA 2017 +b86a9aad74db6ec7|2023-07-14--16-25-56--7 +# TOYOTA COROLLA 2017 +b9fc453e3021b7ff|2023-05-23--15-16-18--3 +# TOYOTA COROLLA 2017 +bc7a33f454ca54e8|2023-06-13--22-23-55--10 +# TOYOTA COROLLA 2017 +d29979b337e5381c|2023-06-22--21-45-44--6 +# TOYOTA COROLLA 2017 +d3faefc00d1e3660|2023-06-02--19-59-19--30 +# TOYOTA COROLLA 2017 +d1e714006749bfa5|2023-06-02--14-50-02--2 +# TOYOTA COROLLA 2017 +cf700d069c05bba8|2023-07-13--16-25-32--1 +# TOYOTA COROLLA 2017 +d9abb1b68b7289ae|2023-07-21--18-47-15--1 +# TOYOTA COROLLA 2017 +d4c10d06c1bb7800|2023-07-16--21-41-26--54 +# TOYOTA COROLLA 2017 +d89badbe085c52c1|2023-05-13--22-32-35--16 +# TOYOTA COROLLA 2017 +ea53462ef36d4315|2023-05-13--10-56-42--8 +# TOYOTA COROLLA 2017 +e831c4e62fba9a0f|2023-07-16--18-27-58--109 +# TOYOTA COROLLA 2017 +f74a24ad37ed9773|2023-06-02--17-56-50--2 +# TOYOTA COROLLA 2017 +f467e0cd91910513|2023-07-12--14-53-24--7 +# TOYOTA COROLLA 2017 +fb0d04ba394c06f2|2023-05-21--17-58-49--9 +# NISSAN LEAF 2018 +0b17985326a570c4|2023-07-24--17-13-53--44 +# NISSAN LEAF 2018 +1dd434bd0ea8aaa4|2023-05-17--06-25-44--11 +# NISSAN LEAF 2018 +29a1b6293b68f2a9|2023-05-28--14-39-04--3 +# NISSAN LEAF 2018 +02cd1b196ec17045|2023-07-25--10-07-20--55 +# NISSAN LEAF 2018 +289d318fded3221a|2023-06-18--09-36-51--109 +# NISSAN LEAF 2018 +39bf496887ea52e5|2023-05-12--07-00-53--34 +# NISSAN LEAF 2018 +4a9d0cf499314bec|2023-05-25--08-01-49--6 +# NISSAN LEAF 2018 +4ed383d0670d4b71|2023-06-18--13-09-17--29 +# NISSAN LEAF 2018 +6b51011199473e01|2023-08-04--14-10-11--7 +# NISSAN LEAF 2018 +7605de164f235364|2023-07-10--16-02-26--2 +# NISSAN LEAF 2018 +9299dd7dadb9007e|2023-06-22--14-05-43--71 +# NISSAN LEAF 2018 +962c4e8ab4396060|2023-06-24--10-43-41--5 +# NISSAN LEAF 2018 +969dd380d52937c6|2023-07-18--21-02-34--1 +# NISSAN LEAF 2018 +ba9225dd390fe75e|2023-05-27--14-19-20--12 +# NISSAN LEAF 2018 +bc12abb1f8ec2243|2023-05-23--16-17-37--4 +# NISSAN LEAF 2018 +c0516da831df9eaf|2023-07-14--16-54-04--3 +# NISSAN LEAF 2018 +bd349df2aad3c6fc|2023-07-11--08-00-00--25 +# NISSAN LEAF 2018 +b9a45053d2ded48b|2023-07-27--05-17-46--16 +# NISSAN LEAF 2018 +d045889f89a2b9a5|2023-07-21--12-27-36--13 +# ACURA RDX 2020 +1943107bbdc1070e|2023-05-14--20-02-24--108 +# ACURA RDX 2020 +23846edb2f7988e3|2023-06-09--21-02-00--65 +# ACURA RDX 2020 +9d3a5a3049d6c50d|2023-05-23--14-03-48--4 +# ACURA RDX 2020 +f8d72ec3768bc1f1|2023-07-19--19-05-48--11 +# ACURA RDX 2020 +fd5000ecde671462|2023-06-06--18-03-06--19 +# TOYOTA HIGHLANDER 2020 +144a496ccff4a0b4|2023-07-25--16-51-08--13 +# TOYOTA HIGHLANDER 2020 +0d44dc6722628b26|2023-06-19--09-17-31--8 +# TOYOTA HIGHLANDER 2020 +29e4d80dc9a37da5|2023-07-14--08-52-21--6 +# TOYOTA HIGHLANDER 2020 +2378eb8397a2b782|2023-05-13--18-10-35--48 +# TOYOTA HIGHLANDER 2020 +24bef3a2265d4af2|2023-06-05--08-39-30--4 +# TOYOTA HIGHLANDER 2020 +004e41c4761b3bf6|2023-05-11--16-02-55--15 +# TOYOTA HIGHLANDER 2020 +343e82c046c8f6dc|2023-05-30--16-07-55--21 +# TOYOTA HIGHLANDER 2020 +3ffc7b8fd385a504|2023-06-18--13-55-18--3 +# TOYOTA HIGHLANDER 2020 +41860f9bcb6f44b8|2023-07-09--12-52-21--24 +# TOYOTA HIGHLANDER 2020 +3d51bfc7ff880232|2023-05-11--17-34-52--13 +# TOYOTA HIGHLANDER 2020 +4d9d41e34fd9dd71|2023-06-18--13-21-37--15 +# TOYOTA HIGHLANDER 2020 +521a0922e3a04935|2023-05-13--18-45-20--44 +# TOYOTA HIGHLANDER 2020 +55c9f8b884d1db87|2023-06-24--09-58-10--4 +# TOYOTA HIGHLANDER 2020 +5c87f40f937ed46e|2023-06-17--23-40-13--16 +# TOYOTA HIGHLANDER 2020 +5fe239629c1a6cc6|2023-05-25--10-35-55--5 +# TOYOTA HIGHLANDER 2020 +597a8b0eb0c8853b|2023-07-12--12-38-41--2 +# TOYOTA HIGHLANDER 2020 +68863076c79b6e46|2023-07-18--18-27-05--2 +# TOYOTA HIGHLANDER 2020 +68dfa9ee803399d1|2023-06-16--16-12-34--62 +# TOYOTA HIGHLANDER 2020 +6c4eb3920653c33f|2023-06-11--17-26-48--38 +# TOYOTA HIGHLANDER 2020 +709398cb20ec2c8d|2023-07-31--11-04-29--1 +# TOYOTA HIGHLANDER 2020 +702527f5122b8600|2023-07-04--21-51-58--32 +# TOYOTA HIGHLANDER 2020 +71b0f13abdc777f0|2023-06-25--12-30-46--1 +# TOYOTA HIGHLANDER 2020 +75157fa04b7760e2|2023-06-18--14-13-08--8 +# TOYOTA HIGHLANDER 2020 +7332469da3c215b9|2023-06-30--16-06-09--23 +# TOYOTA HIGHLANDER 2020 +7a8dc1b623ec968b|2023-06-26--13-19-39--46 +# TOYOTA HIGHLANDER 2020 +7aead72b15132868|2023-08-07--15-32-55--22 +# TOYOTA HIGHLANDER 2020 +82d9c4c8a45c33d3|2023-07-16--10-29-19--6 +# TOYOTA HIGHLANDER 2020 +85f0cc9d2a7783e5|2023-07-06--15-06-13--2 +# TOYOTA HIGHLANDER 2020 +8527a913671bf82b|2023-07-29--11-52-54--36 +# TOYOTA HIGHLANDER 2020 +84050035bc8a4693|2023-06-14--09-20-16--27 +# TOYOTA HIGHLANDER 2020 +878a25353867b7e1|2023-06-04--19-13-19--5 +# TOYOTA HIGHLANDER 2020 +91eefbd00e06a375|2023-05-21--13-02-56--149 +# TOYOTA HIGHLANDER 2020 +8efd012fc8bc01ed|2023-05-13--13-33-26--16 +# TOYOTA HIGHLANDER 2020 +962482e363d8db53|2023-06-13--15-07-46--189 +# TOYOTA HIGHLANDER 2020 +9ef0941be9f8057d|2023-07-11--17-55-34--1 +# TOYOTA HIGHLANDER 2020 +a2c22070a4a86296|2023-05-17--20-50-51--1 +# TOYOTA HIGHLANDER 2020 +9dce1db48e723a7d|2023-05-22--20-05-34--4 +# TOYOTA HIGHLANDER 2020 +b39c8956c329f098|2023-07-01--15-32-06--8 +# TOYOTA HIGHLANDER 2020 +b67488601fc02963|2023-06-09--13-23-41--20 +# TOYOTA HIGHLANDER 2020 +b75bae9b9bde6210|2023-07-30--08-16-42--46 +# TOYOTA HIGHLANDER 2020 +ba14fa06ffd18df7|2023-06-10--21-16-42--6 +# TOYOTA HIGHLANDER 2020 +bf17b81eba9bf96e|2023-06-09--12-08-11--4 +# TOYOTA HIGHLANDER 2020 +b42bff5ee5d24c72|2023-08-07--17-03-15--29 +# TOYOTA HIGHLANDER 2020 +c006eb73d0b11b29|2023-07-30--12-03-27--62 +# TOYOTA HIGHLANDER 2020 +ce24faef3afe2803|2023-05-17--14-50-24--31 +# TOYOTA HIGHLANDER 2020 +d45a24d7a5a78a2f|2023-05-18--19-16-17--42 +# TOYOTA HIGHLANDER 2020 +d13b3d6d7d5529cf|2023-06-02--17-59-00--7 +# TOYOTA HIGHLANDER 2020 +e0001dc12a80b287|2023-07-05--08-10-47--31 +# TOYOTA HIGHLANDER 2020 +ec46c46b25ceac9c|2023-05-17--09-09-32--10 +# TOYOTA HIGHLANDER 2020 +eda8da0edd1c99c9|2023-07-23--13-03-07--9 +# TOYOTA HIGHLANDER 2020 +f2a06f696f9517b3|2023-07-06--13-58-37--7 +# TOYOTA HIGHLANDER 2020 +f5e4b280d1756282|2023-07-18--09-58-12--7 +# TOYOTA HIGHLANDER 2020 +f50cf090ee700251|2023-08-02--15-21-29--5 +# TOYOTA HIGHLANDER 2020 +f96a58a51695b2ce|2023-06-23--20-54-30--18 +# TOYOTA HIGHLANDER 2020 +f60c2e0e7fd95935|2023-05-15--10-15-08--2 +# TOYOTA HIGHLANDER 2020 +f9ea0ffa53d5759a|2023-06-28--10-59-17--5 +# TOYOTA HIGHLANDER 2020 +fd23445cb8537751|2023-07-30--13-32-59--17 +# TOYOTA COROLLA TSS2 2019 +09469776862f3ab8|2023-07-16--15-55-22--17 +# TOYOTA COROLLA TSS2 2019 +075b133b6181e058|2023-07-28--20-07-44--3 +# TOYOTA COROLLA TSS2 2019 +0fbfcf01e4882a83|2023-05-30--15-16-34--6 +# TOYOTA COROLLA TSS2 2019 +0e12554bfe8687d3|2023-07-04--11-36-31--2 +# TOYOTA COROLLA TSS2 2019 +0921b5e7de3ae0b5|2023-05-24--11-51-14--13 +# TOYOTA COROLLA TSS2 2019 +14a8b23f850f4d11|2023-06-12--15-23-21--34 +# TOYOTA COROLLA TSS2 2019 +0fe5135182d67910|2023-05-19--15-08-54--9 +# TOYOTA COROLLA TSS2 2019 +213b44717a9bc58e|2023-07-18--17-09-13--17 +# TOYOTA COROLLA TSS2 2019 +08d7726c5efb3caa|2023-08-01--09-27-21--20 +# TOYOTA COROLLA TSS2 2019 +182caf5dcf20ca16|2023-05-21--15-13-55--226 +# TOYOTA COROLLA TSS2 2019 +0a4445a705516261|2023-06-23--19-18-06--1 +# TOYOTA COROLLA TSS2 2019 +1d0e096df3cecedd|2023-05-31--17-44-20--25 +# TOYOTA COROLLA TSS2 2019 +21788d8d115dfff4|2023-06-02--21-11-27--18 +# TOYOTA COROLLA TSS2 2019 +2b967c9ee4146686|2023-07-04--12-18-13--76 +# TOYOTA COROLLA TSS2 2019 +2ddc8f97834a7e52|2023-06-17--13-56-27--150 +# TOYOTA COROLLA TSS2 2019 +376abd6a7d6c6fa0|2023-08-05--22-10-41--14 +# TOYOTA COROLLA TSS2 2019 +33833d235dea00c8|2023-05-10--15-37-53--27 +# TOYOTA COROLLA TSS2 2019 +39977e9eacdfebb6|2023-07-31--13-31-52--4 +# TOYOTA COROLLA TSS2 2019 +3eb4c34a2a663c37|2023-07-20--11-54-00--36 +# TOYOTA COROLLA TSS2 2019 +3d9c00d3509fd1a5|2023-05-17--12-46-23--1 +# TOYOTA COROLLA TSS2 2019 +3d2b038d1276f564|2023-07-09--12-35-09--14 +# TOYOTA COROLLA TSS2 2019 +421d8e266db99689|2023-08-07--21-14-05--17 +# TOYOTA COROLLA TSS2 2019 +4f50e44908cc46b8|2023-07-28--11-21-40--6 +# TOYOTA COROLLA TSS2 2019 +54a0066de4f6aa53|2023-05-11--07-56-38--15 +# TOYOTA COROLLA TSS2 2019 +60dcd78f23ffcb45|2023-05-31--20-57-42--216 +# TOYOTA COROLLA TSS2 2019 +669372fc0cdbad34|2023-06-22--16-48-24--69 +# TOYOTA COROLLA TSS2 2019 +6ca6cd014b18339b|2023-06-09--20-14-34--30 +# TOYOTA COROLLA TSS2 2019 +6c7b3c70cde5154a|2023-08-08--08-49-34--8 +# TOYOTA COROLLA TSS2 2019 +77bc3a1ef88b0f8c|2023-05-10--13-34-25--3 +# TOYOTA COROLLA TSS2 2019 +7fa981d6c0f04ab5|2023-05-11--07-24-47--15 +# TOYOTA COROLLA TSS2 2019 +80e58d853114e409|2023-06-21--18-02-42--11 +# TOYOTA COROLLA TSS2 2019 +8d23badbf0a9ea4b|2023-07-08--16-39-30--11 +# TOYOTA COROLLA TSS2 2019 +885386596e3eeb9d|2023-08-01--12-35-23--329 +# TOYOTA COROLLA TSS2 2019 +9d7fa9fa4e27966b|2023-07-09--06-25-37--20 +# TOYOTA COROLLA TSS2 2019 +a9e171cea6aa8261|2023-08-04--19-47-40--278 +# TOYOTA COROLLA TSS2 2019 +acd2094a4e29a830|2023-05-23--07-47-56--1 +# TOYOTA COROLLA TSS2 2019 +b4776f404cd487b0|2023-06-30--19-23-05--54 +# TOYOTA COROLLA TSS2 2019 +b0a7a22dea55a1cd|2023-05-28--12-39-18--29 +# TOYOTA COROLLA TSS2 2019 +b49892b434816269|2023-05-16--06-38-31--74 +# TOYOTA COROLLA TSS2 2019 +b41c77a268a0a16f|2023-06-11--11-51-19--2 +# TOYOTA COROLLA TSS2 2019 +bf8377a19ba31955|2023-06-10--16-34-28--28 +# TOYOTA COROLLA TSS2 2019 +c188b4341879aa18|2023-07-12--19-49-05--19 +# TOYOTA COROLLA TSS2 2019 +c1c8b3b7eaef25cc|2023-05-30--16-59-41--5 +# TOYOTA COROLLA TSS2 2019 +bb3106a9271328e3|2023-07-05--10-13-36--2 +# TOYOTA COROLLA TSS2 2019 +c7400d04331ac3ee|2023-05-26--18-06-31--74 +# TOYOTA COROLLA TSS2 2019 +c8aa3fdad5cec446|2023-05-16--12-07-55--12 +# TOYOTA COROLLA TSS2 2019 +cc379727a2ad9378|2023-07-22--19-16-50--38 +# TOYOTA COROLLA TSS2 2019 +cd34344665bc1a4a|2023-05-17--15-03-28--11 +# TOYOTA COROLLA TSS2 2019 +d20ec114453feac2|2023-05-16--17-39-43--7 +# TOYOTA COROLLA TSS2 2019 +e0aaaef2be9cd155|2023-05-25--21-22-33--63 +# TOYOTA COROLLA TSS2 2019 +edf29b3d6d15be9a|2023-06-12--20-14-03--5 +# TOYOTA COROLLA TSS2 2019 +ecebb0b63434e2f2|2023-07-06--12-35-59--76 +# TOYOTA COROLLA TSS2 2019 +f4d4cc62cc517acd|2023-06-17--12-14-02--3 +# TOYOTA RAV4 2022 +1e0487c8a03931a1|2023-07-17--17-11-07--14 +# TOYOTA RAV4 2022 +1c16ee70deee2972|2023-07-30--07-37-27--18 +# TOYOTA RAV4 2022 +23e0360acaefab4d|2023-06-22--08-05-05--44 +# TOYOTA RAV4 2022 +34fd9fa52bd9065f|2023-07-24--03-59-34--30 +# TOYOTA RAV4 2022 +3ab818cfeff024c4|2023-07-01--13-12-38--75 +# TOYOTA RAV4 2022 +3af132f650bd6b72|2023-06-02--17-34-32--33 +# TOYOTA RAV4 2022 +4d13f4feeb6a3d94|2023-07-01--11-24-38--34 +# TOYOTA RAV4 2022 +6228706487b4b0ea|2023-06-10--21-21-52--23 +# TOYOTA RAV4 2022 +6a2f4f5164a97fcd|2023-08-06--13-16-57--9 +# TOYOTA RAV4 2022 +7ac1ec97ce175121|2023-06-20--17-31-53--11 +# TOYOTA RAV4 2022 +868f770ac624c5dd|2023-06-27--09-15-06--67 +# TOYOTA RAV4 2022 +a5c341bb250ca2f0|2023-06-29--20-25-11--20 +# TOYOTA RAV4 2022 +dfb11f122b6fa282|2023-06-12--18-21-56--11 +# TOYOTA RAV4 2022 +f0a34b105563fa46|2023-08-01--15-32-05--14 +# TOYOTA RAV4 2022 +fa92d93019ed70bd|2023-07-07--01-12-37--17 +# HYUNDAI SANTA FE HYBRID 2022 +129db7c75bce8445|2023-08-05--17-41-16--24 +# HYUNDAI SANTA FE HYBRID 2022 +34397231b0958def|2023-06-10--12-21-46--18 +# HYUNDAI SANTA FE HYBRID 2022 +826b9eb4ac54a1c2|2023-07-08--19-03-34--4 +# TOYOTA HIGHLANDER 2020 +15af9b43bd0c5c76|2023-05-31--09-50-40--92 +# TOYOTA HIGHLANDER 2020 +0bf0ea192bb9cb3c|2023-07-19--13-04-59--24 +# TOYOTA HIGHLANDER 2020 +1c7b969c6660a3dd|2023-05-17--08-13-27--12 +# TOYOTA HIGHLANDER 2020 +29320b5f71353a26|2023-05-14--14-02-25--172 +# TOYOTA HIGHLANDER 2020 +2194b752a88786ac|2023-06-28--09-49-00--9 +# TOYOTA HIGHLANDER 2020 +0e9cc31bf324bed7|2023-07-16--15-18-39--110 +# TOYOTA HIGHLANDER 2020 +27444eac005fc311|2023-08-06--10-13-53--30 +# TOYOTA HIGHLANDER 2020 +2d823e45e0ba7a60|2023-07-11--17-55-38--4 +# TOYOTA HIGHLANDER 2020 +2f831f2ea3eff892|2023-07-17--18-59-16--21 +# TOYOTA HIGHLANDER 2020 +2ee40dc08b5c8680|2023-06-30--18-47-36--52 +# TOYOTA HIGHLANDER 2020 +2f475f484c7e48a4|2023-08-02--09-30-22--2 +# TOYOTA HIGHLANDER 2020 +333605df930e96c4|2023-05-27--12-18-04--1 +# TOYOTA HIGHLANDER 2020 +339a81aefecd3618|2023-08-02--07-10-36--28 +# TOYOTA HIGHLANDER 2020 +368455a0f08f4fec|2023-06-12--16-57-54--4 +# TOYOTA HIGHLANDER 2020 +3dca779f9f65c640|2023-07-26--17-31-54--19 +# TOYOTA HIGHLANDER 2020 +4868775dfada5a32|2023-06-07--09-36-34--4 +# TOYOTA HIGHLANDER 2020 +4a90db58c3f57afa|2023-07-29--17-34-55--42 +# TOYOTA HIGHLANDER 2020 +4e4e4789d5691a95|2023-07-28--18-47-17--8 +# TOYOTA HIGHLANDER 2020 +5647a5fa25085f0d|2023-08-07--14-10-54--13 +# TOYOTA HIGHLANDER 2020 +5f040438a009d6cb|2023-06-22--16-24-51--4 +# TOYOTA HIGHLANDER 2020 +6f6f526b4932d88b|2023-07-02--16-24-30--58 +# TOYOTA HIGHLANDER 2020 +6d0254ba44b664b0|2023-07-08--14-38-16--6 +# TOYOTA HIGHLANDER 2020 +76ced8f2f99250a6|2023-06-02--10-26-47--6 +# TOYOTA HIGHLANDER 2020 +8c1b23fe1568c036|2023-05-19--08-29-02--17 +# TOYOTA HIGHLANDER 2020 +9179d81377e52625|2023-06-30--16-49-47--109 +# TOYOTA HIGHLANDER 2020 +a474346bf635c3de|2023-06-10--17-49-02--17 +# TOYOTA HIGHLANDER 2020 +a023b41cc944b482|2023-06-25--08-55-36--4 +# TOYOTA HIGHLANDER 2020 +b557c584db633c06|2023-08-02--18-49-35--2 +# TOYOTA HIGHLANDER 2020 +b60de7fd1272d92c|2023-06-18--09-59-29--18 +# TOYOTA HIGHLANDER 2020 +b5e1be1336e936f9|2023-08-07--17-15-34--11 +# TOYOTA HIGHLANDER 2020 +caef64381b29fb3d|2023-07-19--17-27-45--3 +# TOYOTA HIGHLANDER 2020 +c21d5b4c2d8ac085|2023-06-29--07-51-21--1 +# TOYOTA HIGHLANDER 2020 +c7ae3dc86bba4525|2023-08-03--07-29-09--7 +# TOYOTA HIGHLANDER 2020 +ce3075423a164945|2023-05-27--10-28-48--4 +# TOYOTA HIGHLANDER 2020 +d173f04200e837a9|2023-05-13--12-34-10--4 +# TOYOTA HIGHLANDER 2020 +cf748e7031d411c8|2023-05-26--12-43-40--9 +# TOYOTA HIGHLANDER 2020 +d1fd12becb0bf3e2|2023-07-24--16-10-54--96 +# TOYOTA HIGHLANDER 2020 +d5d0b683a9a2ddf2|2023-07-24--18-36-30--9 +# TOYOTA HIGHLANDER 2020 +dc11867167a958d8|2023-06-09--18-49-38--50 +# TOYOTA HIGHLANDER 2020 +dc0f8f4f52442d48|2023-08-01--23-46-46--75 +# TOYOTA HIGHLANDER 2020 +df6ed1e430d585cc|2023-05-15--09-23-49--1 +# TOYOTA HIGHLANDER 2020 +e2ac4cd64bc32dd3|2023-07-31--20-24-01--27 +# TOYOTA HIGHLANDER 2020 +e3cf2794859fd8ea|2023-08-06--10-07-23--28 +# TOYOTA HIGHLANDER 2020 +e6f4a2f13554c77d|2023-05-19--14-17-11--3 +# TOYOTA HIGHLANDER 2020 +e01d0ee44be108b3|2023-05-16--12-17-48--9 +# TOYOTA HIGHLANDER 2020 +e8805c57d88ac65c|2023-06-26--15-03-36--47 +# TOYOTA HIGHLANDER 2020 +ea1b20c0286cb3e4|2023-07-12--09-14-26--34 +# TOYOTA HIGHLANDER 2020 +f20c2d8da930c25a|2023-07-02--16-03-24--33 +# TOYOTA HIGHLANDER 2020 +f57250843256cc1a|2023-05-24--18-01-00--2 +# TOYOTA HIGHLANDER 2020 +f5f17136be17692e|2023-05-30--14-50-39--6 +# TOYOTA HIGHLANDER 2020 +f7edb4b24e3b7321|2023-07-11--11-56-56--3 +# TOYOTA HIGHLANDER 2020 +ffe6475c53d1e78c|2023-07-11--17-56-32--55 +# TOYOTA HIGHLANDER 2020 +fe9f9a84fe26591c|2023-07-06--12-31-05--1 +# MAZDA CX-5 2022 +066b8ed7a80184fb|2023-06-13--23-33-37--62 +# MAZDA CX-5 2022 +1e11e59030d4757d|2023-07-17--12-16-44--1 +# MAZDA CX-5 2022 +05012d5566bf5275|2023-07-29--21-00-17--99 +# MAZDA CX-5 2022 +42ff4de626f8559b|2023-08-01--10-29-15--12 +# MAZDA CX-5 2022 +3b44bbb84058328a|2023-07-16--09-32-09--65 +# MAZDA CX-5 2022 +4ed535dd3ff18b2a|2023-06-05--16-36-49--3 +# MAZDA CX-5 2022 +76c17043c9df3064|2023-07-27--13-58-55--6 +# MAZDA CX-5 2022 +8c807f60018507d4|2023-07-26--09-20-15--16 +# MAZDA CX-5 2022 +8a9dac95d24c4e8d|2023-08-04--09-40-07--7 +# MAZDA CX-5 2022 +aa38b396675f5215|2023-06-28--08-47-57--8 +# MAZDA CX-5 2022 +b36f9b30a01c8521|2023-07-24--17-51-06--5 +# MAZDA CX-5 2022 +bdbbb819d8dc38ac|2023-06-09--07-48-08--63 +# MAZDA CX-5 2022 +c3ab14088c43bdf8|2023-06-30--21-42-12--1 +# MAZDA CX-5 2022 +c76c695d1b0485fe|2023-07-14--18-14-34--37 +# SUBARU IMPREZA SPORT 2020 +17250a2717728fb9|2023-07-20--07-38-29--18 +# SUBARU IMPREZA SPORT 2020 +22f2f49bcbecd829|2023-05-30--21-25-08--5 +# SUBARU IMPREZA SPORT 2020 +59755a6574d9ac67|2023-05-30--16-37-55--7 +# SUBARU IMPREZA SPORT 2020 +6024cc6844765678|2023-07-27--15-11-43--16 +# SUBARU IMPREZA SPORT 2020 +7be3f6b12cd49e4b|2023-06-20--13-50-03--93 +# SUBARU IMPREZA SPORT 2020 +d9df6f87e8feff94|2023-07-04--19-58-51--18 +# SUBARU IMPREZA SPORT 2020 +f900fccd457f495a|2023-05-26--20-18-12--36 +# HYUNDAI PALISADE 2020 +0cc137ccd450204c|2023-06-22--11-38-48--3 +# HYUNDAI PALISADE 2020 +1c5b0ef6f666062d|2023-06-02--08-57-58--2 +# HYUNDAI PALISADE 2020 +1336b7c61f4fba8b|2023-07-27--07-31-12--1 +# HYUNDAI PALISADE 2020 +2c63cbbbba450ba0|2023-06-18--23-59-14--24 +# HYUNDAI PALISADE 2020 +2f1f45af65b7b314|2023-05-30--19-30-46--12 +# HYUNDAI PALISADE 2020 +34ea9dfc9b0f9bbe|2023-05-21--12-38-47--7 +# HYUNDAI PALISADE 2020 +39070cb41aafdfc1|2023-07-05--23-15-41--2 +# HYUNDAI PALISADE 2020 +4c6b27a2c49c555e|2023-06-18--09-37-44--7 +# HYUNDAI PALISADE 2020 +4e5d75a1f3df3ae2|2023-06-09--02-25-39--74 +# HYUNDAI PALISADE 2020 +562a098bb9c7b922|2023-07-12--16-46-17--1 +# HYUNDAI PALISADE 2020 +628935d7d3e5f4f7|2023-05-24--19-48-58--8 +# HYUNDAI PALISADE 2020 +67a711f176acbbbf|2023-08-03--09-05-39--8 +# HYUNDAI PALISADE 2020 +7272d111d521a7bc|2023-05-22--09-21-09--1 +# HYUNDAI PALISADE 2020 +7b5a53e99b76e8d9|2023-06-14--14-55-32--15 +# HYUNDAI PALISADE 2020 +829fc01ecb03f524|2023-08-06--12-29-40--71 +# HYUNDAI PALISADE 2020 +7a6617bb650ac5f3|2023-06-02--14-13-59--31 +# HYUNDAI PALISADE 2020 +967360f5aae838e5|2023-05-18--23-59-21--1 +# HYUNDAI PALISADE 2020 +98025ffdb4d0d3ea|2023-06-03--02-55-53--33 +# HYUNDAI PALISADE 2020 +9a22857aff5535d8|2023-06-20--17-40-57--4 +# HYUNDAI PALISADE 2020 +9e7951200bd89ca3|2023-06-07--20-31-01--3 +# HYUNDAI PALISADE 2020 +a6e13aa41cb46ac4|2023-07-18--10-56-20--7 +# HYUNDAI PALISADE 2020 +c5f915f8d3049fd4|2023-07-15--09-57-02--1 +# HYUNDAI PALISADE 2020 +c502b178b93bcc57|2023-06-08--16-22-33--12 +# HYUNDAI PALISADE 2020 +c7d0d7dde77d49ce|2023-07-15--17-08-23--35 +# HYUNDAI PALISADE 2020 +d3bd16e6fc80062c|2023-05-11--10-24-46--4 +# HYUNDAI PALISADE 2020 +e0c6f1daf613f27e|2023-06-19--17-55-55--15 +# HYUNDAI PALISADE 2020 +ebdfcd4184611cfd|2023-08-04--12-56-10--1 +# HYUNDAI PALISADE 2020 +f4132dc91ccfa36a|2023-05-21--06-05-05--2 +# HYUNDAI PALISADE 2020 +fb3784f688906f36|2023-05-12--17-22-59--32 +# HONDA CR-V 2017 +1dd5f02533643f14|2023-06-03--12-17-36--8 +# HONDA CR-V 2017 +008d92ddb9798681|2023-06-18--12-41-21--8 +# HONDA CR-V 2017 +1b7349c28512586d|2023-07-14--18-42-54--26 +# HONDA CR-V 2017 +272caf9f22b2c59b|2023-06-18--12-51-26--22 +# HONDA CR-V 2017 +1069cd145e996003|2023-05-17--22-53-14--7 +# HONDA CR-V 2017 +2d03366246293eae|2023-05-31--17-40-01--9 +# HONDA CR-V 2017 +365b09442064886e|2023-07-24--10-34-12--9 +# HONDA CR-V 2017 +3bf8dc045bc9e677|2023-07-27--19-14-11--8 +# HONDA CR-V 2017 +40c808560e92c311|2023-07-19--17-39-48--5 +# HONDA CR-V 2017 +43e1963f8d94b15a|2023-07-15--18-32-25--8 +# HONDA CR-V 2017 +45fa8a98dc3bc264|2023-07-11--08-22-54--7 +# HONDA CR-V 2017 +50b4e56518f71489|2023-07-31--05-28-30--14 +# HONDA CR-V 2017 +562834cee8a360c0|2023-07-08--08-56-37--53 +# HONDA CR-V 2017 +5a3959d85bbde304|2023-07-18--17-04-29--31 +# HONDA CR-V 2017 +6b185836ee698c26|2023-07-07--20-52-24--7 +# HONDA CR-V 2017 +84b8b497d5e13b6a|2023-07-30--13-30-24--17 +# HONDA CR-V 2017 +846e6ba1cb8c5f85|2023-07-04--11-51-25--35 +# HONDA CR-V 2017 +89583b784eacd386|2023-06-24--23-32-36--1 +# HONDA CR-V 2017 +8faf799674fa044b|2023-07-18--19-49-14--15 +# HONDA CR-V 2017 +9732dc4fef24cf7a|2023-08-04--18-18-13--9 +# HONDA CR-V 2017 +a3fc26a4da829bb9|2023-05-28--14-21-54--39 +# HONDA CR-V 2017 +a9a580bdb154bb4d|2023-06-29--16-21-57--17 +# HONDA CR-V 2017 +b4f79d4e97360586|2023-07-29--16-32-40--16 +# HONDA CR-V 2017 +c40202b70807d4aa|2023-07-01--16-09-28--15 +# HONDA CR-V 2017 +c3d7cef4d9450951|2023-07-29--11-39-57--72 +# HONDA CR-V 2017 +c390b60ad96d2ff2|2023-07-10--16-40-16--5 +# HONDA CR-V 2017 +d33bc6acf39eb0d5|2023-08-04--10-30-07--18 +# HONDA CR-V 2017 +d360e36bf258dcd8|2023-07-20--18-11-51--29 +# HONDA CR-V 2017 +d26e907f37fcd37d|2023-06-29--07-04-20--2 +# HONDA CR-V 2017 +dcb2ee34f8c4c2be|2023-05-14--16-35-57--7 +# HONDA CR-V 2017 +e22ac98f691884e1|2023-08-02--17-22-36--3 +# HONDA CR-V 2017 +e69f2a903ed82e49|2023-06-27--19-39-46--8 +# HONDA CR-V 2017 +e8a630b87b2b0b7c|2023-07-21--16-31-16--12 +# HONDA CR-V 2017 +f1d6758277a8e2c3|2023-06-23--17-38-10--2 +# HONDA CR-V 2017 +f369374ecb30d9ad|2023-06-02--13-22-03--1 +# HONDA CR-V 2017 +ffa76986ec8e871e|2023-06-08--17-30-25--31 +# TOYOTA SIENNA 2018 +15141c10c47e840c|2023-08-06--19-46-46--2 +# TOYOTA SIENNA 2018 +033adba45e0f9e9e|2023-06-22--17-20-48--17 +# TOYOTA SIENNA 2018 +08aee1fd12dbcec9|2023-07-08--11-52-25--1 +# TOYOTA SIENNA 2018 +0401ca905fe1e3c9|2023-06-30--08-37-39--8 +# TOYOTA SIENNA 2018 +1bbce2b3aadea8fc|2023-07-04--21-48-31--12 +# TOYOTA SIENNA 2018 +13fbff4f559b022f|2023-07-28--12-53-45--76 +# TOYOTA SIENNA 2018 +336f30f83810c50a|2023-07-03--10-36-11--16 +# TOYOTA SIENNA 2018 +40a8bdeaeef71f2e|2023-07-12--09-25-54--4 +# TOYOTA SIENNA 2018 +4bd71e1011d03cb0|2023-07-31--15-09-23--8 +# TOYOTA SIENNA 2018 +5a2b996389025b8b|2023-07-09--20-07-13--168 +# TOYOTA SIENNA 2018 +66253de0fd139602|2023-08-03--17-10-18--12 +# TOYOTA SIENNA 2018 +6aa41b3f53b8dfbb|2023-07-16--11-13-32--15 +# TOYOTA SIENNA 2018 +6c4a3e1fb283f169|2023-06-15--16-59-16--2 +# TOYOTA SIENNA 2018 +78c139899b6152e7|2023-06-29--22-20-41--1 +# TOYOTA SIENNA 2018 +8624c343dda64ca1|2023-07-15--16-06-24--68 +# TOYOTA SIENNA 2018 +8c995b0a7884acaa|2023-06-01--12-29-57--40 +# TOYOTA SIENNA 2018 +96f32ba699f695d0|2023-08-02--13-17-17--9 +# TOYOTA SIENNA 2018 +a001f23b71be2971|2023-07-14--12-10-15--14 +# TOYOTA SIENNA 2018 +a5b340427848f1e3|2023-05-31--17-33-24--14 +# TOYOTA SIENNA 2018 +a8d3b0c452c29da7|2023-07-01--22-59-20--40 +# TOYOTA SIENNA 2018 +a7fde1302557afb0|2023-05-24--10-07-00--1 +# TOYOTA SIENNA 2018 +bd1e8f178842c492|2023-07-15--15-33-04--19 +# TOYOTA SIENNA 2018 +c793f2377f82673d|2023-06-10--15-19-02--5 +# TOYOTA SIENNA 2018 +ce728cfb301d0987|2023-07-23--16-23-48--38 +# TOYOTA SIENNA 2018 +dc352a2d23789b45|2023-07-19--14-12-21--106 +# TOYOTA SIENNA 2018 +e77aa024ac711986|2023-07-29--18-37-34--1 +# TOYOTA SIENNA 2018 +e7ada7e863132190|2023-06-07--11-54-54--25 +# TOYOTA SIENNA 2018 +e7831e2f6a8a1279|2023-07-18--20-20-56--10 +# TOYOTA SIENNA 2018 +efa518043b8f287e|2023-08-06--19-55-03--156 +# TOYOTA SIENNA 2018 +f258933135d07fc2|2023-07-08--12-05-44--5 +# TOYOTA SIENNA 2018 +f2a49af4f4758050|2023-08-06--08-52-58--22 +# TOYOTA SIENNA 2018 +ff4fdad9a9b183c2|2023-07-30--20-55-31--39 +# FORD EXPLORER 6TH GEN +0b91b433b9332780|2023-07-13--14-51-21--126 +# FORD EXPLORER 6TH GEN +4458bd61cf193dab|2023-07-28--12-22-12--435 +# FORD EXPLORER 6TH GEN +8e1bab39e558e773|2023-06-30--18-11-15--11 +# FORD EXPLORER 6TH GEN +f0709d2bc6ca451f|2023-05-24--12-29-22--31 +# FORD EXPLORER 6TH GEN +f166567f78fad32f|2023-06-23--22-48-43--23 +# TOYOTA HIGHLANDER 2017 +0a140f06ff5e5751|2023-06-22--17-41-43--18 +# TOYOTA HIGHLANDER 2017 +0e6f1a230ecf5dc9|2023-05-22--15-16-37--1 +# TOYOTA HIGHLANDER 2017 +1213650a719d1429|2023-08-06--16-21-48--16 +# TOYOTA HIGHLANDER 2017 +297518c3eb259e70|2023-05-15--13-27-34--2 +# TOYOTA HIGHLANDER 2017 +3a2ae9ef81244966|2023-07-20--15-56-17--65 +# TOYOTA HIGHLANDER 2017 +467419fc069b553e|2023-05-23--13-38-56--6 +# TOYOTA HIGHLANDER 2017 +4814b507cb35653c|2023-06-28--17-59-25--4 +# TOYOTA HIGHLANDER 2017 +4829a5573c5de536|2023-08-01--21-06-48--13 +# TOYOTA HIGHLANDER 2017 +4c486ce35e8dd3b4|2023-05-30--18-52-10--46 +# TOYOTA HIGHLANDER 2017 +6dabe682c1a8d437|2023-07-07--14-25-46--9 +# TOYOTA HIGHLANDER 2017 +7c8bb6201903618b|2023-07-08--08-18-26--22 +# TOYOTA HIGHLANDER 2017 +7bd25a6c7427cd3c|2023-08-06--10-56-19--20 +# TOYOTA HIGHLANDER 2017 +832d14f92ad84eb6|2023-05-17--10-51-58--18 +# TOYOTA HIGHLANDER 2017 +8a2f77d0f2d45965|2023-06-14--16-51-15--52 +# TOYOTA HIGHLANDER 2017 +8b9d7d2d35e6118c|2023-08-04--15-19-14--2 +# TOYOTA HIGHLANDER 2017 +92e13d6c269f512d|2023-05-13--19-58-04--1 +# TOYOTA HIGHLANDER 2017 +9cea9d0468eb3e96|2023-06-22--22-20-21--6 +# TOYOTA HIGHLANDER 2017 +9a6357ba59f0bc3d|2023-06-10--09-52-06--4 +# TOYOTA HIGHLANDER 2017 +a6f5c094b5b7fb33|2023-06-16--07-29-15--1 +# TOYOTA HIGHLANDER 2017 +ad8f2bf6b31dce85|2023-07-29--17-39-45--16 +# TOYOTA HIGHLANDER 2017 +aa7ed46cc8131f89|2023-05-29--07-13-22--9 +# TOYOTA HIGHLANDER 2017 +d886ae895cef77ef|2023-08-02--12-36-12--19 +# TOYOTA HIGHLANDER 2017 +d481bf9699bde1ee|2023-07-06--01-52-18--2 +# TOYOTA HIGHLANDER 2017 +dca9566a6850e4bf|2023-05-14--17-12-55--5 +# TOYOTA HIGHLANDER 2017 +dfe7ead74bb47dc8|2023-08-07--10-26-51--6 +# TOYOTA HIGHLANDER 2017 +e521d97f331b2e34|2023-07-12--13-42-10--33 +# TOYOTA HIGHLANDER 2017 +f01b5b7d7f4c5309|2023-07-01--01-34-34--23 +# TOYOTA HIGHLANDER 2017 +fe99b3a8c9ab61b9|2023-06-18--14-21-37--5 +# TOYOTA PRIUS TSS2 2021 +10a42f77b05a401f|2023-06-23--17-30-35--8 +# TOYOTA PRIUS TSS2 2021 +1a35342c8a1b558e|2023-06-26--21-28-43--18 +# TOYOTA PRIUS TSS2 2021 +0377091bf159038d|2023-06-18--17-04-20--23 +# TOYOTA PRIUS TSS2 2021 +135321a6baf43ec3|2023-07-19--06-07-57--21 +# TOYOTA PRIUS TSS2 2021 +0e511ea80aaf4931|2023-06-05--05-54-38--3 +# TOYOTA PRIUS TSS2 2021 +2061c2bef508176e|2023-06-20--17-00-45--2 +# TOYOTA PRIUS TSS2 2021 +1128edc391bf47c1|2023-05-19--18-10-40--14 +# TOYOTA PRIUS TSS2 2021 +233ef9dcd57e63d9|2023-07-27--15-19-23--10 +# TOYOTA PRIUS TSS2 2021 +1531d4305c9ebbf0|2023-05-18--14-52-53--31 +# TOYOTA PRIUS TSS2 2021 +115345214029cb8b|2023-06-25--14-58-20--2 +# TOYOTA PRIUS TSS2 2021 +283695e2bd4b03db|2023-07-18--22-52-41--5 +# TOYOTA PRIUS TSS2 2021 +305006f2b633be2d|2023-07-16--12-48-12--4 +# TOYOTA PRIUS TSS2 2021 +39a1e6521e802f2f|2023-05-24--17-16-54--1 +# TOYOTA PRIUS TSS2 2021 +3aea2f5376c16768|2023-07-17--05-21-08--25 +# TOYOTA PRIUS TSS2 2021 +449ac093304ba731|2023-05-11--11-14-03--40 +# TOYOTA PRIUS TSS2 2021 +451be4fe585fdac0|2023-06-16--16-08-55--5 +# TOYOTA PRIUS TSS2 2021 +45937213b62ce5ed|2023-05-23--15-56-35--166 +# TOYOTA PRIUS TSS2 2021 +53ecc962c9d86d50|2023-07-30--10-26-25--2 +# TOYOTA PRIUS TSS2 2021 +5b12a3ea4e95c4a5|2023-06-22--17-16-55--37 +# TOYOTA PRIUS TSS2 2021 +656a02ec772a7a4d|2023-06-30--01-33-46--34 +# TOYOTA PRIUS TSS2 2021 +60f3f8039fafd263|2023-05-18--17-30-08--16 +# TOYOTA PRIUS TSS2 2021 +698cf41e2173752f|2023-06-30--12-01-37--12 +# TOYOTA PRIUS TSS2 2021 +67344e48576a3ac2|2023-07-21--14-44-32--18 +# TOYOTA PRIUS TSS2 2021 +6ba67d5a83d831a8|2023-06-06--18-34-35--1 +# TOYOTA PRIUS TSS2 2021 +714dd3f9e708f73c|2023-06-14--14-51-51--6 +# TOYOTA PRIUS TSS2 2021 +716a982c6d0c5547|2023-07-25--11-17-26--30 +# TOYOTA PRIUS TSS2 2021 +6e90fb55f908940a|2023-05-10--17-32-34--3 +# TOYOTA PRIUS TSS2 2021 +8550ea8a4e07f761|2023-06-24--06-29-29--4 +# TOYOTA PRIUS TSS2 2021 +868ec8b032712e8c|2023-05-17--10-32-22--36 +# TOYOTA PRIUS TSS2 2021 +9048498ebc1297b3|2023-05-22--20-36-01--6 +# TOYOTA PRIUS TSS2 2021 +91ec51ef841d0fe5|2023-08-01--09-03-49--4 +# TOYOTA PRIUS TSS2 2021 +a0adc8b389da9563|2023-07-16--17-39-14--1 +# TOYOTA PRIUS TSS2 2021 +a2e5f1439e2cdf9e|2023-05-26--08-13-31--13 +# TOYOTA PRIUS TSS2 2021 +a8fec02c41697f7c|2023-07-12--16-54-45--7 +# TOYOTA PRIUS TSS2 2021 +a883325ac00c65f9|2023-05-28--18-17-32--18 +# TOYOTA PRIUS TSS2 2021 +aaf71ff1348f4b42|2023-06-09--09-09-07--4 +# TOYOTA PRIUS TSS2 2021 +a74cb70b7a19e427|2023-06-11--13-12-21--62 +# TOYOTA PRIUS TSS2 2021 +ac1a320cbefd424a|2023-06-20--15-09-25--12 +# TOYOTA PRIUS TSS2 2021 +aedf9ebed7828264|2023-05-16--18-52-09--5 +# TOYOTA PRIUS TSS2 2021 +b8622cf559d979f9|2023-06-09--14-40-58--2 +# TOYOTA PRIUS TSS2 2021 +b4ce4d1721db5c94|2023-06-18--13-33-24--32 +# TOYOTA PRIUS TSS2 2021 +bf3057bccc689886|2023-05-10--14-25-33--4 +# TOYOTA PRIUS TSS2 2021 +c1a2ba00c8f7e9d8|2023-07-31--14-06-34--2 +# TOYOTA PRIUS TSS2 2021 +ba7f87150ae0bbeb|2023-06-21--13-33-28--12 +# TOYOTA PRIUS TSS2 2021 +c363f25afff8d58e|2023-05-16--14-35-12--8 +# TOYOTA PRIUS TSS2 2021 +c3834a2fe82b632b|2023-07-18--18-50-19--8 +# TOYOTA PRIUS TSS2 2021 +cbe57b408a885627|2023-07-09--11-00-32--5 +# TOYOTA PRIUS TSS2 2021 +ccda673780fb737f|2023-05-21--12-39-40--2 +# TOYOTA PRIUS TSS2 2021 +c4e58bbe953ef5c9|2023-07-27--10-23-32--13 +# TOYOTA PRIUS TSS2 2021 +cf8446aa9d2793e8|2023-06-04--22-02-12--14 +# TOYOTA PRIUS TSS2 2021 +d2bfce9a4caff67f|2023-07-12--15-21-04--55 +# TOYOTA PRIUS TSS2 2021 +d083dc54dae836f0|2023-05-23--05-01-18--24 +# TOYOTA PRIUS TSS2 2021 +d653e1fe9aced0bb|2023-05-19--16-19-28--10 +# TOYOTA PRIUS TSS2 2021 +d67fb9ab2b635dc9|2023-05-12--21-49-32--13 +# TOYOTA PRIUS TSS2 2021 +dd39308405abab68|2023-06-03--08-04-58--51 +# TOYOTA PRIUS TSS2 2021 +e65253687b3999e0|2023-07-02--08-32-02--2 +# TOYOTA PRIUS TSS2 2021 +e921feb7a05eb6b2|2023-08-06--14-08-10--53 +# TOYOTA PRIUS TSS2 2021 +ef4fae77e5d45d9d|2023-07-23--13-18-02--4 +# TOYOTA PRIUS TSS2 2021 +f0a29b63232a3fbd|2023-07-20--07-10-56--30 +# TOYOTA PRIUS TSS2 2021 +faf6d8bcab5ae3a1|2023-07-05--16-00-03--6 +# TOYOTA PRIUS TSS2 2021 +fe2feb102f0b3824|2023-07-31--15-55-42--88 +# TOYOTA PRIUS TSS2 2021 +fed47dc1d49625ca|2023-06-11--13-37-17--5 +# TOYOTA RAV4 2022 +170e10c2cb0432dd|2023-05-17--11-35-52--52 +# TOYOTA RAV4 2022 +0dbef6854d19f908|2023-05-14--16-48-10--34 +# TOYOTA RAV4 2022 +2475fb3eb2ffcc2e|2023-08-05--15-20-42--48 +# TOYOTA RAV4 2022 +08583cb7cd02965d|2023-07-13--14-00-59--23 +# TOYOTA RAV4 2022 +255d0a53069b33c7|2023-05-16--07-12-07--33 +# TOYOTA RAV4 2022 +3507bf7e7573d50b|2023-08-05--15-18-07--44 +# TOYOTA RAV4 2022 +4c7c30edb6dffa5f|2023-08-04--18-14-42--46 +# TOYOTA RAV4 2022 +4e9c8dcbfedefc48|2023-08-07--19-19-04--1 +# TOYOTA RAV4 2022 +58138d1ad389f4d0|2023-07-24--19-33-45--11 +# TOYOTA RAV4 2022 +5374d1774eac5197|2023-06-14--11-26-16--3 +# TOYOTA RAV4 2022 +558928e2034bcef7|2023-06-09--14-26-37--14 +# TOYOTA RAV4 2022 +5bf05f6701c98d32|2023-06-30--13-01-19--22 +# TOYOTA RAV4 2022 +617492c124a692ef|2023-07-20--14-19-10--6 +# TOYOTA RAV4 2022 +6bd14dfc84131769|2023-06-22--13-55-05--82 +# TOYOTA RAV4 2022 +6fe3baa907616d50|2023-05-22--17-36-04--9 +# TOYOTA RAV4 2022 +73f49c9e89a98ff9|2023-06-03--21-51-04--5 +# TOYOTA RAV4 2022 +77f07def1eec9d4e|2023-05-20--12-35-38--58 +# TOYOTA RAV4 2022 +773c6c0313a36828|2023-08-01--12-10-22--4 +# TOYOTA RAV4 2022 +825c2be1679633af|2023-06-08--14-27-04--24 +# TOYOTA RAV4 2022 +8547882d5a9638ed|2023-05-20--16-46-23--40 +# TOYOTA RAV4 2022 +8a6df5e317cdb0f1|2023-06-22--11-48-00--7 +# TOYOTA RAV4 2022 +8c3afba96b7a5db6|2023-05-20--15-33-13--12 +# TOYOTA RAV4 2022 +91a5f9bc5181e3ba|2023-07-17--13-12-48--3 +# TOYOTA RAV4 2022 +932fa1fa23f936dd|2023-07-03--14-05-29--29 +# TOYOTA RAV4 2022 +98b8395bda1abb3d|2023-07-15--15-39-42--1 +# TOYOTA RAV4 2022 +9af9456fb41cefdc|2023-06-08--16-48-49--25 +# TOYOTA RAV4 2022 +9f7a308d4490eb77|2023-05-11--16-59-21--3 +# TOYOTA RAV4 2022 +9fd2e1d2322620c1|2023-06-22--08-20-30--149 +# TOYOTA RAV4 2022 +bc1fe643a3929103|2023-07-17--19-45-52--8 +# TOYOTA RAV4 2022 +d8117083fe3d623d|2023-07-29--18-25-35--2 +# TOYOTA RAV4 2022 +dd2ff62df396c1fc|2023-05-21--18-40-13--120 +# TOYOTA RAV4 2022 +e16e4f4ef04a4ab3|2023-07-06--07-30-04--15 +# TOYOTA RAV4 2022 +eee593d3239d7c3c|2023-07-20--07-09-40--2 +# TOYOTA RAV4 2022 +f5a63eb328699685|2023-07-02--19-58-47--1 +# TOYOTA RAV4 2022 +f2188129ca0904e2|2023-05-21--17-35-49--8 +# TOYOTA RAV4 2022 +f60772e6acef55c4|2023-06-22--20-18-39--47 +# TOYOTA RAV4 2022 +fa323424d809e22a|2023-05-13--20-15-51--24 +# TOYOTA CAMRY 2018 +06362e6a7f0b400b|2023-07-17--15-01-53--27 +# TOYOTA CAMRY 2018 +0066f60689e612ed|2023-07-26--10-32-30--15 +# TOYOTA CAMRY 2018 +098b070a8ea9766b|2023-06-16--15-23-49--11 +# TOYOTA CAMRY 2018 +02a10fdbba41d6c4|2023-05-11--05-10-43--4 +# TOYOTA CAMRY 2018 +22331374d2858074|2023-06-15--11-27-50--2 +# TOYOTA CAMRY 2018 +071f13e1cf591bd8|2023-06-09--21-07-21--30 +# TOYOTA CAMRY 2018 +1b07ad9af1629f61|2023-05-18--11-54-09--41 +# TOYOTA CAMRY 2018 +27f66d238e2daff2|2023-07-26--13-16-24--8 +# TOYOTA CAMRY 2018 +30d4248ac12fd57c|2023-05-25--11-28-46--3 +# TOYOTA CAMRY 2018 +2f37c007683e85ba|2023-05-10--18-35-29--12 +# TOYOTA CAMRY 2018 +36ad47227a8b6e0d|2023-07-11--15-03-35--16 +# TOYOTA CAMRY 2018 +3b69a15335041c72|2023-05-14--01-32-08--4 +# TOYOTA CAMRY 2018 +3cc72445faf2b595|2023-06-23--13-50-08--20 +# TOYOTA CAMRY 2018 +3f4d8c6e1329405c|2023-07-24--20-49-51--5 +# TOYOTA CAMRY 2018 +5a518984e4caef1d|2023-08-01--19-47-13--1 +# TOYOTA CAMRY 2018 +7ebcdd800f653ec4|2023-07-23--14-28-06--4 +# TOYOTA CAMRY 2018 +9a052d2d382b6896|2023-07-12--21-24-54--9 +# TOYOTA CAMRY 2018 +9c86ed5c7284733c|2023-07-29--17-35-37--8 +# TOYOTA CAMRY 2018 +a000cb5ab7ef5223|2023-05-31--21-24-48--10 +# TOYOTA CAMRY 2018 +b34be3bf345f0f5e|2023-07-12--23-12-39--124 +# TOYOTA CAMRY 2018 +b4057b71f56d1191|2023-07-31--16-16-57--4 +# TOYOTA CAMRY 2018 +b7b58a594926fa23|2023-07-27--23-28-50--4 +# TOYOTA CAMRY 2018 +ba0844983cdec847|2023-08-04--11-47-17--7 +# TOYOTA CAMRY 2018 +ba4a94169bf12f32|2023-05-11--16-50-56--4 +# TOYOTA CAMRY 2018 +be8ed458ac4879c8|2023-07-10--20-19-35--10 +# TOYOTA CAMRY 2018 +c2c9a0e4f71e20e6|2023-05-30--14-48-23--38 +# TOYOTA CAMRY 2018 +ce4328694fa2cca9|2023-05-13--02-19-59--4 +# TOYOTA CAMRY 2018 +cd42d2594be938f7|2023-07-25--07-06-01--16 +# TOYOTA CAMRY 2018 +cca80b7bda70c17a|2023-05-25--17-06-27--34 +# TOYOTA CAMRY 2018 +d76803d2dfa3a00e|2023-07-17--00-53-09--7 +# TOYOTA CAMRY 2018 +e27a03030a436e84|2023-06-20--09-23-41--117 +# TOYOTA CAMRY 2018 +ebc7fb7dfd3839d9|2023-06-14--06-06-07--10 +# TOYOTA CAMRY 2018 +ec7de7d250fdcd1d|2023-07-31--17-55-49--1 +# TOYOTA CAMRY 2018 +f71b4f559fb531ad|2023-05-10--16-47-20--31 +# AUDI A3 3RD GEN +200c952f826a6447|2023-07-02--11-55-50--7 +# AUDI A3 3RD GEN +359bcd3ea48092f6|2023-06-03--07-32-15--3 +# AUDI A3 3RD GEN +3ac7029f79711ca5|2023-06-30--07-41-35--5 +# AUDI A3 3RD GEN +76b48b05df10f05e|2023-07-08--16-05-11--4 +# AUDI A3 3RD GEN +a973e9182bc96a2c|2023-06-28--11-02-10--11 +# VOLKSWAGEN ATLAS 1ST GEN +16758e226aa2f747|2023-07-28--15-30-42--3 +# VOLKSWAGEN ATLAS 1ST GEN +16e8546287ecaada|2023-06-26--17-34-08--3 +# VOLKSWAGEN ATLAS 1ST GEN +2bbad62414dc6d9b|2023-08-05--18-02-38--9 +# VOLKSWAGEN ATLAS 1ST GEN +3174d252cec3bf6f|2023-07-04--09-24-03--11 +# VOLKSWAGEN ATLAS 1ST GEN +3737f89a2e0db063|2023-07-19--08-01-06--7 +# VOLKSWAGEN ATLAS 1ST GEN +4048de296d5207fb|2023-07-28--23-35-13--9 +# VOLKSWAGEN ATLAS 1ST GEN +58d4dfb7f4fee11d|2023-08-06--15-55-59--11 +# VOLKSWAGEN ATLAS 1ST GEN +793830b37089fe1d|2023-07-19--17-08-52--1 +# VOLKSWAGEN ATLAS 1ST GEN +d8e893cc7739f7d5|2023-06-20--09-35-18--10 +# VOLKSWAGEN ATLAS 1ST GEN +d7251ede344ff957|2023-07-21--16-32-02--61 +# VOLKSWAGEN ATLAS 1ST GEN +e415c44c77c17e15|2023-05-10--07-55-50--2 +# VOLKSWAGEN ATLAS 1ST GEN +e80066d826c705a9|2023-08-07--15-23-13--7 +# VOLKSWAGEN ATLAS 1ST GEN +f4971592598fa9a3|2023-06-10--11-08-36--7 +# VOLKSWAGEN ATLAS 1ST GEN +ffdd12beb96d5b54|2023-07-30--19-09-47--62 +# LEXUS RX 2016 +1786eaa4cc844889|2023-06-30--13-53-39--91 +# LEXUS RX 2016 +0cf857b1b13bd7cd|2023-07-28--22-10-53--3 +# LEXUS RX 2016 +0229eb43387cef7a|2023-06-15--18-47-25--11 +# LEXUS RX 2016 +2a566b7f69eb2a77|2023-05-18--18-12-09--12 +# LEXUS RX 2016 +592ae47bcb1a6658|2023-06-28--03-11-52--3 +# LEXUS RX 2016 +62b4af07ac90a8e5|2023-07-30--18-53-41--116 +# LEXUS RX 2016 +80ea6c1ff7648289|2023-07-25--18-31-23--9 +# LEXUS RX 2016 +8b196fa003fe55c9|2023-07-06--13-59-02--1 +# LEXUS RX 2016 +b04a5e7ac1c97127|2023-06-11--21-03-22--14 +# LEXUS RX 2016 +bba0a7d99d07b4d0|2023-07-17--10-47-12--16 +# LEXUS RX 2016 +c318581fb51199e6|2023-06-21--09-08-17--47 +# LEXUS RX 2016 +c8d3dd7cfe3c2bef|2023-05-16--07-49-03--21 +# LEXUS RX 2016 +d93ff944f9757e06|2023-07-04--00-28-00--4 +# LEXUS RX 2016 +e3792885a81edc65|2023-05-10--08-26-43--30 +# LEXUS RX 2016 +ea93dc40fbf84c96|2023-06-10--18-26-40--5 +# LEXUS ES 2019 +24f960aeabdd0b45|2023-07-22--22-56-58--9 +# LEXUS ES 2019 +221780419969efb8|2023-06-03--14-00-10--24 +# LEXUS ES 2019 +43000f0e5e058b22|2023-08-03--16-55-17--56 +# LEXUS ES 2019 +47724a191e0f4d9e|2023-06-24--20-39-39--28 +# LEXUS ES 2019 +5aa51c7df8ce288d|2023-07-14--07-31-51--14 +# LEXUS ES 2019 +5c0038031497257b|2023-07-11--16-58-54--45 +# LEXUS ES 2019 +5ad3c8116ceedf0e|2023-06-09--10-48-08--4 +# LEXUS ES 2019 +6d867424dc48c01b|2023-07-21--20-19-27--19 +# LEXUS ES 2019 +89c10c27e1a41814|2023-06-05--21-45-17--47 +# LEXUS ES 2019 +b9c9b1d4e4277fec|2023-07-09--10-55-55--3 +# LEXUS ES 2019 +eebb9787e66670b1|2023-06-09--13-43-21--22 +# HYUNDAI SANTA FE 2022 +1ad7728ce54a161d|2023-06-27--16-42-41--38 +# HYUNDAI SANTA FE 2022 +2d394786f239cd0d|2023-07-16--12-34-58--17 +# HYUNDAI SANTA FE 2022 +305cf2a0bda0e51c|2023-07-20--17-07-34--1 +# HYUNDAI SANTA FE 2022 +2fcc9a43453b10de|2023-06-30--10-25-43--12 +# HYUNDAI SANTA FE 2022 +782fcdb3a42ca1fa|2023-05-26--02-02-12--1 +# HYUNDAI SANTA FE 2022 +7a6ba99ba49954c0|2023-06-01--11-46-54--55 +# HYUNDAI SANTA FE 2022 +cc2f3c4ab6d7758b|2023-07-17--10-14-06--18 +# HYUNDAI SANTA FE 2022 +e37432b18654a7cc|2023-06-29--15-51-04--50 +# TOYOTA CAMRY 2021 +080fffcdcdf847e2|2023-06-17--22-09-54--16 +# TOYOTA CAMRY 2021 +256b964e662f0e39|2023-08-02--15-53-58--33 +# TOYOTA CAMRY 2021 +2422760916ecd3ed|2023-07-30--00-10-14--43 +# TOYOTA CAMRY 2021 +017772face80c94f|2023-06-22--17-04-43--15 +# TOYOTA CAMRY 2021 +1916f6660fdef56b|2023-05-22--16-31-45--50 +# TOYOTA CAMRY 2021 +171516dbc8965a98|2023-08-08--11-12-03--7 +# TOYOTA CAMRY 2021 +289cbd8c4e814c92|2023-07-24--16-08-08--60 +# TOYOTA CAMRY 2021 +2ec957d01038234b|2023-07-19--18-44-13--28 +# TOYOTA CAMRY 2021 +3ba061cbb7a708c3|2023-07-11--11-36-38--4 +# TOYOTA CAMRY 2021 +3ff411160b72e90e|2023-06-11--17-08-09--13 +# TOYOTA CAMRY 2021 +52682dee0263bc9e|2023-07-11--21-46-17--2 +# TOYOTA CAMRY 2021 +51f386982b6e8a77|2023-07-08--20-11-27--27 +# TOYOTA CAMRY 2021 +542e88ffeabefde6|2023-07-13--15-57-13--4 +# TOYOTA CAMRY 2021 +893ae4414cc33177|2023-07-17--16-27-47--44 +# TOYOTA CAMRY 2021 +914f01e43165ff12|2023-06-16--15-13-48--15 +# TOYOTA CAMRY 2021 +b8c0e1ef3bae2ae8|2023-07-06--15-56-32--1 +# TOYOTA CAMRY 2021 +bfd59919af7a9183|2023-05-22--17-06-38--3 +# TOYOTA CAMRY 2021 +c152e86589f67fa8|2023-06-09--11-26-31--8 +# TOYOTA CAMRY 2021 +ec21b3b2491e1d5b|2023-05-21--20-10-40--54 +# TOYOTA CAMRY 2021 +f56e1b6b8d3d4326|2023-06-16--16-40-59--19 +# LEXUS RX 2020 +14bae1f3b3778de2|2023-07-29--08-59-05--1 +# LEXUS RX 2020 +221bc8e55b068d8c|2023-07-22--14-09-28--5 +# LEXUS RX 2020 +284aa7812f549210|2023-07-17--13-43-52--15 +# LEXUS RX 2020 +02b50dc00776ca3c|2023-06-04--21-14-16--6 +# LEXUS RX 2020 +12f5389d0abdd2b0|2023-07-16--03-22-06--191 +# LEXUS RX 2020 +34091e43a35e6b0c|2023-07-28--15-25-20--9 +# LEXUS RX 2020 +4adbcf7b0056b7eb|2023-07-28--19-57-42--1 +# LEXUS RX 2020 +5706cbe0f8b82a15|2023-05-21--09-36-28--7 +# LEXUS RX 2020 +5eaac34b077defa9|2023-07-03--09-52-29--17 +# LEXUS RX 2020 +75b3f01a07557b7d|2023-06-14--18-57-50--7 +# LEXUS RX 2020 +b32bbd31c710d41c|2023-06-30--13-28-18--5 +# LEXUS RX 2020 +b0d686668fb49a0b|2023-07-08--14-38-02--30 +# LEXUS RX 2020 +c5c8fc431bbe68e5|2023-08-07--19-45-43--8 +# LEXUS RX 2020 +d61540bf6f3ff5fd|2023-05-26--11-18-58--4 +# TOYOTA AVALON 2022 +14f1b241a8c4e210|2023-05-14--17-14-36--75 +# TOYOTA AVALON 2022 +5cee358519141338|2023-07-31--14-50-56--15 +# TOYOTA AVALON 2022 +c417be6e8e4be4c6|2023-07-11--13-17-09--19 +# KIA EV6 2022 +2466dbded40ee302|2023-07-19--07-57-59--5 +# KIA EV6 2022 +150033b3d34ddfa3|2023-08-08--12-23-52--18 +# KIA EV6 2022 +21bbb80f61096b25|2023-05-21--05-23-18--24 +# KIA EV6 2022 +83cd83d9569526f1|2023-06-30--00-21-30--7 +# KIA EV6 2022 +879ccc014c30dbf9|2023-07-23--18-45-39--24 +# KIA EV6 2022 +b3a670efbd147ef7|2023-06-15--13-26-07--30 +# KIA EV6 2022 +cb06ac355b38798f|2023-06-11--20-29-07--30 +# KIA EV6 2022 +ff07367edd52b0d0|2023-08-06--01-47-15--51 +# CHRYSLER PACIFICA 2020 +12208e5acdc97eb3|2023-06-15--14-42-11--5 +# CHRYSLER PACIFICA 2020 +20665f9a424ada41|2023-07-31--17-11-31--8 +# CHRYSLER PACIFICA 2020 +1d59b9e6ca471788|2023-06-11--16-36-39--1 +# CHRYSLER PACIFICA 2020 +42ccfd732446a8e3|2023-05-18--08-53-58--1 +# CHRYSLER PACIFICA 2020 +5fc1717be83fc0da|2023-07-27--13-49-58--50 +# CHRYSLER PACIFICA 2020 +75564b71f403c136|2023-06-25--14-16-12--8 +# CHRYSLER PACIFICA 2020 +9cad19e0efce3650|2023-06-07--20-40-39--25 +# CHRYSLER PACIFICA 2020 +af014367f8fd3384|2023-06-04--08-57-55--5 +# CHRYSLER PACIFICA 2020 +c1feb10d20626c65|2023-06-11--15-14-18--64 +# CHRYSLER PACIFICA 2020 +f0b1085e8e45dc63|2023-05-31--17-25-38--7 +# LEXUS ES 2019 +1678ac13bc9877c8|2023-07-09--20-07-48--29 +# LEXUS ES 2019 +2844efe905d33fbd|2023-07-02--12-10-43--44 +# LEXUS ES 2019 +6ed275339186a130|2023-07-21--06-40-44--32 +# LEXUS ES 2019 +7a63be41f9cead58|2023-05-11--09-03-37--14 +# LEXUS ES 2019 +a0dc5a8ca1a5786a|2023-07-04--09-29-52--7 +# LEXUS ES 2019 +cf071db973993448|2023-07-29--18-58-05--13 +# LEXUS ES 2019 +db1c55738e630599|2023-06-09--21-03-38--3 +# LEXUS ES 2019 +ea4add78b3d29a5f|2023-05-25--06-41-18--50 +# TOYOTA AVALON 2019 +1cdab90d23115dc2|2023-05-27--21-27-46--9 +# TOYOTA AVALON 2019 +2aeb6689ee609d86|2023-05-25--17-51-03--14 +# TOYOTA AVALON 2019 +57ea94b88445973c|2023-05-24--18-27-06--7 +# TOYOTA AVALON 2019 +dfe129aad2e701b6|2023-05-11--21-08-54--3 +# TOYOTA AVALON 2019 +f02bf3c0f2ec038e|2023-07-17--12-52-56--4 +# TOYOTA AVALON 2019 +f7d1a944dc6e8789|2023-06-02--15-00-46--64 +# VOLKSWAGEN TIGUAN 2ND GEN +07ddd23c582b8e2a|2023-06-10--13-32-49--1 +# VOLKSWAGEN TIGUAN 2ND GEN +4aadfdcf6846d356|2023-07-10--20-22-59--1 +# VOLKSWAGEN TIGUAN 2ND GEN +4e144f64a2535890|2023-05-27--15-30-51--3 +# VOLKSWAGEN TIGUAN 2ND GEN +88b9fa8e179f87d8|2023-07-12--17-01-50--11 +# VOLKSWAGEN TIGUAN 2ND GEN +b08418a961021457|2023-05-15--10-51-25--4 +# VOLKSWAGEN TIGUAN 2ND GEN +c78c1972adfa10df|2023-05-20--13-11-13--8 +# VOLKSWAGEN TIGUAN 2ND GEN +d5eb604aa6ae46b7|2023-08-01--16-13-48--5 +# VOLKSWAGEN TIGUAN 2ND GEN +deb9a2546658c0ec|2023-08-07--14-53-50--1 +# VOLKSWAGEN TIGUAN 2ND GEN +f3dfef1528797a29|2023-05-25--20-36-07--8 +# HYUNDAI SONATA HYBRID 2021 +01d4ba35f8288743|2023-07-30--11-19-09--14 +# HYUNDAI SONATA HYBRID 2021 +1d08ecde44f58620|2023-06-08--19-12-14--2 +# HYUNDAI SONATA HYBRID 2021 +077309a26bf98013|2023-07-01--14-11-42--1 +# HYUNDAI SONATA HYBRID 2021 +2fdb3b00e818dfd7|2023-07-31--02-50-23--12 +# HYUNDAI SONATA HYBRID 2021 +49734429dda9bbe7|2023-07-10--14-53-29--38 +# HYUNDAI SONATA HYBRID 2021 +61ef897671a29f5c|2023-07-18--10-19-18--10 +# HYUNDAI SONATA HYBRID 2021 +6f322e987d684501|2023-07-27--17-11-53--14 +# HYUNDAI SONATA HYBRID 2021 +7a629f5c75f83fa7|2023-05-20--22-20-36--7 +# HYUNDAI SONATA HYBRID 2021 +a6c2adb491991166|2023-07-16--09-55-40--4 +# HYUNDAI SONATA HYBRID 2021 +b5374bdeea21f5ce|2023-07-15--19-31-57--13 +# HYUNDAI SONATA HYBRID 2021 +be44434f27731ff8|2023-07-02--08-00-08--240 +# HYUNDAI SONATA HYBRID 2021 +cc820fd0720354e1|2023-06-08--14-57-24--37 +# HYUNDAI SONATA HYBRID 2021 +df2433ef9f6dcaa5|2023-07-27--20-19-04--2 +# VOLKSWAGEN POLO 6TH GEN +0bbe367c98fa1538|2023-07-01--19-42-26--7 +# LEXUS NX 2020 +14f9b413ca9c2962|2023-06-30--12-09-53--5 +# LEXUS NX 2020 +436b3762e7406d9d|2023-05-28--19-54-45--18 +# LEXUS NX 2020 +9b42a9940cfd5fda|2023-06-17--19-17-40--53 +# LEXUS NX 2020 +c7a06a775d4a0b29|2023-05-16--10-36-19--1 +# LEXUS NX 2020 +e4b558dc139f76e1|2023-06-26--17-19-48--5 +# LEXUS NX 2018 +09d58cd27c493c29|2023-05-26--07-56-26--9 +# LEXUS NX 2018 +6179fc02b5ed871f|2023-06-26--15-38-35--4 +# LEXUS NX 2018 +b8dd7a23d04c522f|2023-06-15--15-43-11--33 +# HONDA CR-V HYBRID 2019 +11fa019479471a0a|2023-06-19--16-40-40--18 +# HONDA CR-V HYBRID 2019 +8f3f4683d7cfda74|2023-05-27--15-40-52--4 +# HONDA CR-V HYBRID 2019 +d6c98b1ebf9916b9|2023-07-20--09-04-29--20 +# SUBARU IMPREZA LIMITED 2019 +0744286ead2fbb96|2023-05-19--20-52-49--110 +# SUBARU IMPREZA LIMITED 2019 +283bc259f3c0fec9|2023-06-23--18-44-24--2 +# SUBARU IMPREZA LIMITED 2019 +45d40901dd17481d|2023-06-30--19-57-56--20 +# SUBARU IMPREZA LIMITED 2019 +5c3a4dc9bd0b2eaa|2023-05-16--16-43-44--22 +# SUBARU IMPREZA LIMITED 2019 +6f35adacf2529520|2023-08-01--09-20-18--12 +# SUBARU IMPREZA LIMITED 2019 +7a4ef9e9f44480ee|2023-06-17--09-29-32--3 +# SUBARU IMPREZA LIMITED 2019 +7d1607ec8698af97|2023-06-21--20-38-47--2 +# SUBARU IMPREZA LIMITED 2019 +8de015561e1ea4a0|2023-05-15--17-20-39--2 +# SUBARU IMPREZA LIMITED 2019 +9dedbd672d57fd88|2023-07-27--14-29-54--6 +# SUBARU IMPREZA LIMITED 2019 +d96660307a3000e2|2023-06-17--20-29-31--43 +# HYUNDAI IONIQ HYBRID 2017-2019 +0e13ee2b821302f4|2023-07-09--15-44-35--3 +# HYUNDAI IONIQ HYBRID 2017-2019 +8dc7b1ff3d3ce73d|2023-07-13--21-49-58--3 +# HYUNDAI GENESIS 2015-2016 +06353521b1f136dd|2023-05-12--19-40-35--48 +# HYUNDAI GENESIS 2015-2016 +61b3de3cce404316|2023-07-05--19-47-46--17 +# HYUNDAI GENESIS 2015-2016 +b6bb1d6e9e76d5c0|2023-06-26--06-28-25--4 +# KIA NIRO HYBRID 2021 +1ea7eb816544f6c0|2023-07-31--19-13-44--1 +# KIA NIRO HYBRID 2021 +92daf576a7eab465|2023-07-18--12-15-16--63 +# HONDA PILOT 2017 +11bfdda7cffeb882|2023-05-26--13-25-27--7 +# HONDA PILOT 2017 +1c8910b52e443f18|2023-05-21--20-31-29--44 +# HONDA PILOT 2017 +18bbbb7606cddb6a|2023-05-21--13-13-06--110 +# HONDA PILOT 2017 +30bdd852020fbd54|2023-06-07--09-05-51--22 +# HONDA PILOT 2017 +31e47fc2fc02c165|2023-06-02--13-31-29--13 +# HONDA PILOT 2017 +35b4e5307cce2504|2023-05-20--10-05-14--2 +# HONDA PILOT 2017 +34ff4137044da8fa|2023-05-10--22-40-43--12 +# HONDA PILOT 2017 +695826b02fd8fe16|2023-06-21--13-30-12--9 +# HONDA PILOT 2017 +83ccc4c48741b67a|2023-05-25--15-12-07--15 +# HONDA PILOT 2017 +8a9bfee29c5a277a|2023-06-01--09-20-21--70 +# HONDA PILOT 2017 +9cb432f9376cfa89|2023-05-16--16-25-22--6 +# HONDA PILOT 2017 +9bac7575fac62395|2023-05-17--06-24-54--11 +# HONDA PILOT 2017 +ad1b9150af890d9e|2023-07-31--09-13-55--15 +# HONDA PILOT 2017 +b4f429568ea81f21|2023-06-17--11-05-51--112 +# HONDA PILOT 2017 +bd80a1c23664b3b7|2023-06-12--16-44-33--4 +# HONDA PILOT 2017 +d53a9253d19264e5|2023-07-03--00-53-05--3 +# HONDA PILOT 2017 +da7d4c7c00a25e08|2023-06-22--19-20-41--15 +# HONDA PILOT 2017 +d9c43a383a53494b|2023-07-25--09-03-56--2 +# HONDA PILOT 2017 +f4e5206d7968b785|2023-06-24--22-50-01--1 +# HONDA PILOT 2017 +fff9af2c280cebe7|2023-07-15--09-10-37--5 +# HONDA CIVIC (BOSCH) 2019 +220bd1ec52459628|2023-05-14--13-38-23--117 +# HONDA CIVIC (BOSCH) 2019 +1cea7c17bf903d1d|2023-06-05--17-05-26--41 +# HONDA CIVIC (BOSCH) 2019 +2872a60fdd6a85a5|2023-06-07--23-37-19--139 +# HONDA CIVIC (BOSCH) 2019 +5123f2e42bc5c2fb|2023-07-04--17-34-47--16 +# HONDA CIVIC (BOSCH) 2019 +4ea994afc80a04cf|2023-07-01--19-47-55--2 +# HONDA CIVIC (BOSCH) 2019 +8506a4cbf74a6519|2023-06-30--12-05-04--14 +# HONDA CIVIC (BOSCH) 2019 +8c63e6150f08c5cc|2023-07-30--20-05-40--62 +# HONDA CIVIC (BOSCH) 2019 +90d56ac785678835|2023-05-28--17-28-04--1 +# HONDA CIVIC (BOSCH) 2019 +930ad2d3aa9a7438|2023-07-10--15-57-07--5 +# HONDA CIVIC (BOSCH) 2019 +9c9e7b7cb9a2d507|2023-06-16--22-18-46--8 +# HONDA CIVIC (BOSCH) 2019 +a341d8f71913b7c6|2023-07-03--11-08-08--9 +# HONDA CIVIC (BOSCH) 2019 +a9a11ee77c895117|2023-05-31--20-56-14--1 +# HONDA CIVIC (BOSCH) 2019 +addf4f63ee4758f3|2023-06-12--13-17-03--10 +# HONDA CIVIC (BOSCH) 2019 +c6d9941315763063|2023-05-26--18-06-58--47 +# HONDA CIVIC (BOSCH) 2019 +ced18df2ecb5eccd|2023-06-26--16-09-36--15 +# HONDA CIVIC (BOSCH) 2019 +d4c8df785ce7f3c5|2023-07-14--15-46-41--12 +# HONDA CIVIC (BOSCH) 2019 +dbd8a2d9977de5f5|2023-06-14--07-23-48--17 +# HONDA CIVIC (BOSCH) 2019 +e4f4282867c6a6b9|2023-07-21--11-37-55--7 +# HONDA CIVIC (BOSCH) 2019 +ee87fe694c837822|2023-07-22--10-02-27--10 +# HONDA CIVIC (BOSCH) 2019 +f27a5518b5be94da|2023-07-21--18-09-53--4 +# HONDA CIVIC (BOSCH) 2019 +f1c58b2efb96589f|2023-07-20--09-38-55--107 +# HONDA CIVIC (BOSCH) 2019 +f99ab12043ad3219|2023-07-19--17-07-43--2 +# HYUNDAI KONA ELECTRIC 2019 +15e5d08be3f89aeb|2023-05-10--05-05-16--1 +# HYUNDAI KONA ELECTRIC 2019 +39ff090a42469a72|2023-05-15--16-18-09--6 +# HYUNDAI KONA ELECTRIC 2019 +c9508b688d2009f6|2023-05-23--12-24-07--21 +# LEXUS RX 2016 +1124e8ae07941ba9|2023-06-05--15-45-48--9 +# LEXUS RX 2016 +2daa07741d66050b|2023-05-21--15-59-39--3 +# LEXUS RX 2016 +4424ddad7f805c10|2023-05-14--13-50-39--147 +# LEXUS RX 2016 +42132726c7467377|2023-07-08--17-03-46--83 +# LEXUS RX 2016 +6ecec1e8fd327774|2023-07-26--10-46-54--13 +# LEXUS RX 2016 +77cb7310f7b0e1d1|2023-05-28--20-12-02--95 +# LEXUS RX 2016 +89ee528b1eb135ed|2023-05-12--20-05-25--5 +# LEXUS RX 2016 +9cd71f2c46d99533|2023-08-02--20-53-42--17 +# LEXUS RX 2016 +ca71d21f2c04ff8e|2023-07-17--08-16-56--83 +# LEXUS RX 2016 +f4c7fca20721d4c1|2023-07-25--18-15-24--18 +# LEXUS RX 2016 +f2defa60d751d031|2023-05-17--15-27-40--91 +# JEEP GRAND CHEROKEE 2019 +06d01b4518df02ff|2023-07-05--06-29-47--46 +# JEEP GRAND CHEROKEE 2019 +2d9b6425552c52c1|2023-05-14--18-18-54--20 +# JEEP GRAND CHEROKEE 2019 +2f123609bbddbe0c|2023-07-18--17-46-35--16 +# JEEP GRAND CHEROKEE 2019 +626791454319848b|2023-07-06--02-13-50--169 +# JEEP GRAND CHEROKEE 2019 +76ec661ac0d18569|2023-08-05--16-58-53--42 +# JEEP GRAND CHEROKEE 2019 +e1dfba62a4e33f7b|2023-05-10--14-17-59--4 +# JEEP GRAND CHEROKEE 2019 +fdcd19f4434cea9d|2023-05-10--17-10-38--2 +# CHEVROLET VOLT PREMIER 2017 +20cb801701fc8237|2023-06-07--17-41-26--12 +# CHEVROLET VOLT PREMIER 2017 +08924d6a5ba4ab32|2023-07-04--06-56-44--30 +# CHEVROLET VOLT PREMIER 2017 +216776dc641516cc|2023-07-23--16-40-47--76 +# CHEVROLET VOLT PREMIER 2017 +255403b2bd0acb75|2023-05-12--08-44-56--19 +# CHEVROLET VOLT PREMIER 2017 +3189708ff522bc3a|2023-06-08--11-42-29--37 +# CHEVROLET VOLT PREMIER 2017 +3725c590ce069e78|2023-06-25--07-44-44--18 +# CHEVROLET VOLT PREMIER 2017 +3ddd64a934be4c23|2023-06-27--07-52-32--5 +# CHEVROLET VOLT PREMIER 2017 +4bd7be56a56eecaf|2023-05-10--18-17-26--7 +# CHEVROLET VOLT PREMIER 2017 +524ca9dc97ddd9a3|2023-07-25--21-32-28--5 +# CHEVROLET VOLT PREMIER 2017 +5617d19d0841b38c|2023-05-24--12-29-35--113 +# CHEVROLET VOLT PREMIER 2017 +67b9beb7b4d31cf8|2023-05-23--12-50-32--15 +# CHEVROLET VOLT PREMIER 2017 +73a06f3c204cb47a|2023-05-21--15-37-47--6 +# CHEVROLET VOLT PREMIER 2017 +90678d1470cad364|2023-06-03--14-10-34--15 +# CHEVROLET VOLT PREMIER 2017 +a3e8ad84037083ca|2023-06-02--17-10-25--13 +# CHEVROLET VOLT PREMIER 2017 +af9fc6047bf1dae2|2023-08-01--05-28-32--25 +# CHEVROLET VOLT PREMIER 2017 +b24f78641f62dd3c|2023-06-06--18-18-45--13 +# CHEVROLET VOLT PREMIER 2017 +c12c0f43a6c65515|2023-05-23--10-31-56--6 +# CHEVROLET VOLT PREMIER 2017 +d0aa4a8f195675a9|2023-06-20--19-35-14--33 +# CHEVROLET VOLT PREMIER 2017 +d44c5c17d2380d53|2023-07-27--20-23-18--9 +# CHEVROLET VOLT PREMIER 2017 +d2ccdcf33e62cfa0|2023-06-01--20-42-39--58 +# CHEVROLET VOLT PREMIER 2017 +dcaf04bb53a1f9df|2023-06-02--10-08-55--1 +# CHEVROLET VOLT PREMIER 2017 +e69bcbc2c84de4d2|2023-06-27--17-12-00--14 +# CHEVROLET VOLT PREMIER 2017 +e2b25f39da040caa|2023-07-23--12-59-47--49 +# CHEVROLET VOLT PREMIER 2017 +e4b91b0282619c79|2023-07-30--15-57-39--1 +# TOYOTA ALPHARD 2020 +09c50095f46f96cd|2023-05-21--10-27-44--20 +# TOYOTA ALPHARD 2020 +5ffe5d6678eaefb2|2023-07-04--17-18-33--96 +# HYUNDAI IONIQ PHEV 2020 +201c5aaced09a24a|2023-05-20--12-39-01--15 +# HYUNDAI IONIQ PHEV 2020 +4bcc377663bc541a|2023-05-24--14-59-59--219 +# HYUNDAI IONIQ PHEV 2020 +78fdfacf4a805561|2023-07-17--11-54-45--73 +# HYUNDAI IONIQ PHEV 2020 +93882bc7bea8b476|2023-07-30--19-56-22--7 +# HYUNDAI IONIQ PHEV 2020 +b28e9dbe4004e915|2023-06-11--19-55-28--2 +# HYUNDAI IONIQ PHEV 2020 +c447f3983314c453|2023-05-14--17-17-24--158 +# RAM 1500 5TH GEN +17fc16d840fe9d21|2023-05-18--19-28-17--11 +# VOLKSWAGEN ARTEON 1ST GEN +0662797832aac2eb|2023-07-13--09-33-52--73 +# VOLKSWAGEN ARTEON 1ST GEN +5ac586afbb236b5d|2023-07-17--22-52-19--5 +# VOLKSWAGEN ARTEON 1ST GEN +c34ef43981b6d888|2023-06-01--07-50-41--7 +# HONDA INSIGHT 2019 +0ac79efcae392b39|2023-06-29--10-57-45--8 +# HONDA INSIGHT 2019 +acdb227003c7aee1|2023-05-30--17-06-35--9 +# HONDA INSIGHT 2019 +baeb84fd6ad76707|2023-07-26--06-13-05--45 +# HONDA INSIGHT 2019 +ba9a88a070809873|2023-05-14--13-34-35--32 +# HONDA INSIGHT 2019 +bfde60a60008c36d|2023-05-24--16-51-16--24 +# HONDA INSIGHT 2019 +f7d92a003486dc37|2023-06-10--16-17-23--48 +# TOYOTA MIRAI 2021 +2611ca299b6b5939|2023-07-12--19-27-34--1 +# TOYOTA MIRAI 2021 +390a85b0f0d7a93b|2023-07-02--17-09-06--13 +# TOYOTA MIRAI 2021 +d2bd3cb8241c253c|2023-05-30--18-34-40--2 +# TOYOTA MIRAI 2021 +f4ee9d3ae1d656fb|2023-08-03--17-47-32--15 +# TOYOTA AVALON 2019 +1cf68ce8c78d6246|2023-07-02--22-42-04--9 +# TOYOTA AVALON 2019 +3e1100294929fd5f|2023-06-19--20-05-45--6 +# TOYOTA AVALON 2019 +7879897f0af9ed09|2023-07-21--12-35-20--1 +# LEXUS NX 2020 +01f31237627e0cc3|2023-08-08--09-32-39--10 +# KIA STINGER GT2 2018 +2ba5946fb25bfc1f|2023-07-29--22-48-54--40 +# KIA STINGER GT2 2018 +5ccb50cda9fba8b7|2023-07-02--12-59-48--2 +# KIA STINGER GT2 2018 +724f06bcdb81bc8e|2023-07-01--08-24-30--25 +# KIA STINGER GT2 2018 +8efe830da0568792|2023-07-07--12-21-04--14 +# KIA STINGER GT2 2018 +b5768b7348113531|2023-07-29--20-29-19--1 +# SKODA KODIAQ 1ST GEN +2696aa4107f3a534|2023-06-12--17-35-02--1 +# SKODA KODIAQ 1ST GEN +2ca49a03c8084514|2023-08-04--07-24-41--13 +# SKODA KODIAQ 1ST GEN +89b596c5edcb6dba|2023-07-15--15-38-29--1 +# SKODA KODIAQ 1ST GEN +8a5da7c1d1783730|2023-06-08--17-33-34--2 +# SKODA KODIAQ 1ST GEN +cd4c3e2031f01e61|2023-07-03--11-19-29--1 +# SKODA KODIAQ 1ST GEN +dafb0cb9e28e0249|2023-05-31--15-55-44--1 +# HONDA HRV 2019 +09c457a021a641e7|2023-07-24--20-02-45--3 +# HONDA HRV 2019 +6e9442a1c18f2abd|2023-05-26--12-52-43--4 +# HONDA HRV 2019 +78edb6a9a06e891d|2023-06-25--19-42-26--2 +# HONDA HRV 2019 +7c72f43a732e77ce|2023-07-26--21-44-12--10 +# HONDA HRV 2019 +a9203d77468a98a3|2023-05-11--21-05-37--10 +# GENESIS G80 2017 +2dcea2872df6d423|2023-06-06--19-13-28--50 +# GENESIS G80 2017 +450774a9a3ffe4c8|2023-08-05--15-07-10--11 +# GENESIS G80 2017 +a99d0d0edbedad89|2023-07-04--21-21-06--4 +# VOLKSWAGEN PASSAT 8TH GEN +2d7d87433a67c925|2023-05-14--17-05-12--21 +# VOLKSWAGEN PASSAT 8TH GEN +37e3253d2ac7c823|2023-05-30--16-09-30--26 +# VOLKSWAGEN PASSAT 8TH GEN +40443218782e5709|2023-07-11--10-44-37--14 +# VOLKSWAGEN PASSAT 8TH GEN +5405cac0c9d58c3d|2023-06-13--18-00-38--5 +# VOLKSWAGEN PASSAT 8TH GEN +7e069ec96c2da3a2|2023-07-08--14-06-11--23 +# VOLKSWAGEN PASSAT 8TH GEN +fd81eb7cc97ce831|2023-05-13--14-22-35--55 +# AUDI Q3 2ND GEN +2a7eeed5627fe50f|2023-05-26--16-00-32--42 +# AUDI Q3 2ND GEN +cdc1627feb1c82d3|2023-07-23--16-49-24--6 +# TOYOTA HIGHLANDER 2017 +30f476405b31e063|2023-07-23--11-27-50--4 +# TOYOTA HIGHLANDER 2017 +335da7758ecaf9b8|2023-06-07--06-45-19--6 +# TOYOTA HIGHLANDER 2017 +3dfa699819714d88|2023-05-23--10-21-28--15 +# TOYOTA HIGHLANDER 2017 +443fc6ac84d80c7e|2023-06-09--10-38-02--5 +# TOYOTA HIGHLANDER 2017 +69310bd073c5381d|2023-05-25--11-21-15--7 +# TOYOTA HIGHLANDER 2017 +7ebef57bc42d0ffd|2023-05-15--09-33-53--32 +# TOYOTA HIGHLANDER 2017 +90e19871a5f0753d|2023-05-30--19-41-28--30 +# TOYOTA HIGHLANDER 2017 +a2153ee04e30ac3c|2023-05-28--15-46-53--20 +# TOYOTA HIGHLANDER 2017 +a68647f2ba7092a3|2023-06-09--11-50-44--12 +# TOYOTA HIGHLANDER 2017 +d4f786dae36655f1|2023-07-08--11-46-59--31 +# TOYOTA HIGHLANDER 2017 +eb3a11981a924f0f|2023-07-29--12-16-21--12 +# CHRYSLER PACIFICA HYBRID 2017 +328eb791948dc4c4|2023-07-28--20-13-00--12 +# CHRYSLER PACIFICA HYBRID 2017 +57409982aa4a348b|2023-07-17--19-19-22--13 +# CHRYSLER PACIFICA HYBRID 2017 +62bde7c16381d8b2|2023-05-21--20-35-27--17 +# CHRYSLER PACIFICA HYBRID 2017 +949ce31b8c38095a|2023-07-11--19-03-33--37 +# TOYOTA AVALON 2022 +301800decccd6669|2023-06-10--12-18-42--12 +# TOYOTA AVALON 2022 +caa859705cd01362|2023-07-20--08-41-27--1 +# SUBARU OUTBACK 6TH GEN +33f86ddb06a656dd|2023-07-30--19-36-09--38 +# SUBARU OUTBACK 6TH GEN +46009c83d4595279|2023-07-10--18-04-48--49 +# SUBARU OUTBACK 6TH GEN +781b0068b313214f|2023-07-24--09-46-42--1 +# SUBARU OUTBACK 6TH GEN +83ce0f0dcdc42a2d|2023-07-12--11-37-21--5 +# SUBARU OUTBACK 6TH GEN +8a0a42036c8cd285|2023-07-15--12-20-41--251 +# SUBARU OUTBACK 6TH GEN +a0a54ab26674dbb9|2023-05-10--17-52-42--11 +# SUBARU OUTBACK 6TH GEN +bad6ae3584ece5b5|2023-06-06--20-47-16--1 +# SUBARU OUTBACK 6TH GEN +c726b2698bd24c5e|2023-07-22--11-45-38--2 +# SUBARU OUTBACK 6TH GEN +db94c35b2fc49a1d|2023-06-29--15-34-53--3 +# SUBARU OUTBACK 6TH GEN +eac0312bb7d371ef|2023-06-23--09-53-10--3 +# VOLKSWAGEN PASSAT NMS +398d43ca7e0c8e34|2023-06-07--21-05-24--3 +# LEXUS IS 2018 +3776c9b10eb2ab37|2023-07-17--19-38-03--12 +# LEXUS IS 2018 +5d64b83564cf5d9a|2023-07-23--15-00-46--12 +# LEXUS IS 2018 +a52787c41eced8a8|2023-07-28--00-12-38--4 +# LEXUS IS 2018 +c8c2f75d1782de03|2023-06-26--23-05-18--17 +# KIA SORENTO 4TH GEN +3c492c4530c6ad7f|2023-07-25--12-52-04--8 +# KIA SORENTO 4TH GEN +fd067a3a06f7bb3f|2023-07-18--19-05-41--2 +# HONDA CIVIC 2022 +35c004509e3057f2|2023-05-22--06-15-04--15 +# HONDA CIVIC 2022 +5130484aa8069bad|2023-05-20--17-37-41--2 +# HONDA CIVIC 2022 +8fcafab4167b8c6c|2023-06-24--15-42-50--55 +# KIA OPTIMA 4TH GEN FACELIFT +3d96bd05b5513638|2023-06-09--16-14-40--8 +# KIA OPTIMA 4TH GEN FACELIFT +fec0d754d02e943e|2023-06-19--14-54-54--144 +# HYUNDAI ELANTRA 2021 +3ea622c3c0ec3055|2023-06-22--18-40-28--15 +# HYUNDAI ELANTRA 2021 +449474d0a79c14aa|2023-07-30--15-41-03--10 +# HYUNDAI ELANTRA 2021 +5c9daa3b70d6f706|2023-08-05--15-56-10--2 +# HYUNDAI ELANTRA 2021 +5a4405495d2750ef|2023-05-13--21-13-20--16 +# HYUNDAI ELANTRA 2021 +be0bb3add22aeed0|2023-07-12--16-05-59--8 +# HYUNDAI ELANTRA 2021 +d8cd6e5e3998d306|2023-06-26--20-55-03--7 +# SKODA OCTAVIA 3RD GEN +3fa7570f2fcace3d|2023-08-02--14-13-00--2 +# SKODA OCTAVIA 3RD GEN +f5392228324cda8d|2023-07-19--09-22-26--1 +# KIA CARNIVAL 4TH GEN +409c9409979a8abc|2023-07-30--17-05-09--41 +# HYUNDAI IONIQ HYBRID 2020-2022 +452f244b23a48e9b|2023-07-19--18-49-50--5 +# KIA STINGER 2022 +47d09720bb2ccdf9|2023-06-05--20-25-36--6 +# COMMA BODY +4ae6d7a23dc12ee9|2023-07-22--18-04-32--2 +# COMMA BODY +8fb3c185d81fc85e|2023-08-06--14-57-51--5 +# COMMA BODY +efdf9af95e71cd84|2023-06-28--17-25-55--12 +# TOYOTA C-HR 2018 +482ce47555b39e90|2023-07-14--18-51-41--7 +# TOYOTA C-HR 2018 +4ff60f3f7178368b|2023-05-11--08-32-46--13 +# TOYOTA C-HR 2018 +fdf1eae234f98180|2023-05-24--09-19-26--1 +# TOYOTA AVALON 2016 +4bc88417168295f0|2023-07-28--18-04-07--71 +# TOYOTA AVALON 2016 +fe56ef3e908ceadc|2023-08-01--13-17-58--29 +# HONDA FIT 2018 +48536bafa6cabe3e|2023-06-14--14-50-06--6 +# HONDA FIT 2018 +8085a4d3c9416184|2023-07-27--15-31-59--9 +# HONDA FIT 2018 +a6883439bbd3d975|2023-05-30--17-28-13--26 +# HONDA FIT 2018 +e56002a2b1eeed4d|2023-07-04--13-32-44--4 +# MAZDA CX-9 2021 +51ffb2084fa16e13|2023-06-27--18-32-54--1 +# MAZDA CX-9 2021 +629c4aced073d15a|2023-06-13--12-03-56--5 +# MAZDA CX-9 2021 +ab2ef769ce18b0b2|2023-05-10--16-48-57--21 +# MAZDA CX-9 2021 +f04622e57618f6b8|2023-06-08--17-14-27--32 +# FORD BRONCO SPORT 1ST GEN +54827bf84c38b14f|2023-05-10--22-15-37--2 +# HYUNDAI IONIQ 5 2022 +5608f42aaece1463|2023-07-08--12-56-57--10 +# HYUNDAI IONIQ 5 2022 +57f8e21bb63728ed|2023-07-17--17-56-04--20 +# HYUNDAI IONIQ 5 2022 +534e3360c3936247|2023-08-03--10-30-38--6 +# HYUNDAI IONIQ 5 2022 +6c5e2106ca45c09f|2023-05-29--20-39-58--57 +# HYUNDAI IONIQ 5 2022 +8014970252341f77|2023-08-01--19-53-30--21 +# HYUNDAI IONIQ 5 2022 +81817870e9d2dc12|2023-06-24--21-37-09--23 +# HYUNDAI IONIQ 5 2022 +8379b28e51ceb3b1|2023-05-13--21-53-54--5 +# HYUNDAI IONIQ 5 2022 +8a53aa70d88313f1|2023-07-14--16-06-11--34 +# HYUNDAI IONIQ 5 2022 +94fb5be27a4b67b0|2023-07-02--21-38-55--55 +# HYUNDAI IONIQ 5 2022 +9a8af592c6b6b617|2023-05-18--07-24-13--121 +# HYUNDAI IONIQ 5 2022 +b70b56b76a6217f2|2023-06-02--14-53-42--63 +# HYUNDAI IONIQ 5 2022 +cb8f2450d9bc2438|2023-07-07--14-26-13--22 +# HYUNDAI IONIQ 5 2022 +cdc004f72f0192de|2023-06-20--17-46-52--10 +# HYUNDAI IONIQ 5 2022 +d172a844d3da91a4|2023-05-22--16-14-11--4 +# HYUNDAI IONIQ 5 2022 +dab2ee943375ae1f|2023-07-29--13-07-45--11 +# HYUNDAI IONIQ 5 2022 +db2f545ea314b36d|2023-07-05--16-54-15--19 +# HYUNDAI IONIQ 5 2022 +e42931599f753d96|2023-05-24--20-20-26--13 +# HYUNDAI IONIQ 5 2022 +f2a5efadd1dbf33f|2023-05-22--17-54-44--3 +# GENESIS GV70 1ST GEN +573d0052724c316d|2023-07-04--11-33-32--1 +# GENESIS GV70 1ST GEN +5d327dec6ec07b6e|2023-06-16--15-56-30--5 +# GENESIS GV70 1ST GEN +60c76f368ee05a23|2023-06-07--13-34-57--22 +# GENESIS GV70 1ST GEN +ca4de5b12321bd98|2023-05-10--08-48-48--7 +# KIA SPORTAGE 5TH GEN +5780f2cf75339e9f|2023-05-26--23-35-00--8 +# KIA SPORTAGE 5TH GEN +ce72415f25bcbf6f|2023-07-15--10-04-48--6 +# VOLKSWAGEN JETTA 7TH GEN +632ceb5cc1d7e3f3|2023-07-11--17-49-31--53 +# VOLKSWAGEN JETTA 7TH GEN +cf3717f55f9ff6bb|2023-05-21--23-17-16--13 +# TOYOTA C-HR 2021 +6719965b0e1d1737|2023-07-29--17-09-47--4 +# SKODA SUPERB 3RD GEN +6a6d9303aecc1e97|2023-07-12--09-56-43--13 +# SKODA SUPERB 3RD GEN +a7adec1fdbb46a1c|2023-07-31--22-39-06--2 +# SKODA SUPERB 3RD GEN +cd047d573cc78c34|2023-06-29--13-39-09--7 +# SKODA SUPERB 3RD GEN +d22f3f42780f2874|2023-07-11--17-43-18--45 +# NISSAN ALTIMA 2020 +62b0735d47268ec7|2023-06-23--18-44-12--11 +# NISSAN ALTIMA 2020 +742442ec7a49776d|2023-07-07--09-12-19--43 +# NISSAN ALTIMA 2020 +78daf6341d75cffb|2023-06-19--16-00-54--12 +# NISSAN ALTIMA 2020 +894145c25ba0328a|2023-06-24--14-39-55--18 +# NISSAN ALTIMA 2020 +92511c3b877c7215|2023-06-18--14-47-27--26 +# KIA SORENTO GT LINE 2018 +6b111e8e45cc2d07|2023-06-07--22-09-58--6 +# KIA SORENTO GT LINE 2018 +b78a19b557b991ab|2023-05-10--20-56-53--33 +# HYUNDAI TUCSON 4TH GEN +7173fdb19bf459f5|2023-06-02--20-50-49--14 +# HYUNDAI TUCSON 4TH GEN +8426de28ebdc65a2|2023-05-27--14-51-28--13 +# HYUNDAI TUCSON 4TH GEN +f7f6cc634ee443a9|2023-05-23--20-33-04--2 +# HYUNDAI ELANTRA 2017 +734ef96182ddf940|2023-07-12--14-31-45--34 +# SUBARU ASCENT LIMITED 2019 +72df1a0f641d5d4f|2023-06-30--17-17-26--7 +# SUBARU ASCENT LIMITED 2019 +91192c3ff5b51b64|2023-08-06--15-20-27--3 +# SUBARU ASCENT LIMITED 2019 +9936b4bbf75a5fe4|2023-05-12--14-23-02--16 +# SUBARU ASCENT LIMITED 2019 +cb9e6a98fd925948|2023-06-07--09-49-57--32 +# TESLA AP2 MODEL S +7277bddf4748e615|2023-05-10--10-30-06--11 +# HYUNDAI ELANTRA HYBRID 2021 +786fc028c014be71|2023-06-13--10-49-03--2 +# HYUNDAI ELANTRA HYBRID 2021 +8d49af2b041f8c5c|2023-07-06--17-34-04--36 +# HYUNDAI ELANTRA HYBRID 2021 +9b1bd7e1e54c6a37|2023-05-26--08-29-15--1 +# HYUNDAI ELANTRA HYBRID 2021 +d66b12aeb4ac1750|2023-08-04--14-14-48--5 +# HYUNDAI ELANTRA HYBRID 2021 +e33e369bf0cac9f5|2023-06-08--21-38-37--1 +# HYUNDAI ELANTRA HYBRID 2021 +eb5a50f9e98f50b9|2023-06-22--14-06-36--106 +# HYUNDAI ELANTRA HYBRID 2021 +f22818fb0dbd850d|2023-06-07--16-37-27--6 +# HYUNDAI SONATA 2019 +7ae1c131629d96e5|2023-06-01--18-40-26--18 +# HYUNDAI SONATA 2019 +c1fc13c1b806e536|2023-05-24--19-57-19--17 +# HYUNDAI SONATA 2019 +e3500498d01af116|2023-07-19--13-08-43--85 +# HYUNDAI TUCSON 4TH GEN +8afcf4c5d5b6d061|2023-05-14--20-29-14--9 +# HYUNDAI TUCSON 4TH GEN +a1f50efb97003e30|2023-05-17--20-13-07--1 +# HYUNDAI TUCSON 4TH GEN +c5956193b4fde1a3|2023-07-06--20-47-58--24 +# GENESIS GV60 ELECTRIC 1ST GEN +94542b2d06f7a9a6|2023-07-21--11-23-43--28 +# GENESIS GV60 ELECTRIC 1ST GEN +c9ccc00f8d7e00d2|2023-05-18--16-29-54--33 +# GENESIS G70 2018 +9238d5ce084d695e|2023-06-23--19-26-28--12 +# KIA SPORTAGE 5TH GEN +965a780ebeeed150|2023-07-25--07-16-08--40 +# KIA K5 2021 +9d4fa1c83653b90b|2023-07-10--07-30-25--14 +# ACURA RDX 2018 +a00bae406fe65d1c|2023-06-01--21-30-27--11 +# ACURA RDX 2018 +9f98fcb1aa88d319|2023-05-10--12-35-18--17 +# ACURA RDX 2018 +f41a6f453a4beb95|2023-06-25--07-00-46--23 +# FORD ESCAPE 4TH GEN +a4218e6416dfd978|2023-08-02--12-04-45--22 +# SEAT ATECA 1ST GEN +a4455cb22fa944d9|2023-05-19--16-54-51--6 +# VOLKSWAGEN TRANSPORTER T6.1 +a459f4556782eba1|2023-06-05--13-08-21--1 +# NISSAN ROGUE 2019 +ae2babeed1fe011f|2023-08-02--21-28-28--20 +# NISSAN ROGUE 2019 +b7287535e2a23992|2023-07-20--20-17-53--1 +# NISSAN ROGUE 2019 +dff9db8e8d8601f8|2023-05-14--18-00-16--30 +# KIA NIRO EV 2ND GEN +b153671049a867b3|2023-07-12--19-25-18--6 +# KIA NIRO HYBRID 2ND GEN +b1b094b91c2ceafd|2023-06-15--20-30-37--18 +# KIA NIRO HYBRID 2ND GEN +d250f6ef680e703d|2023-07-07--13-31-18--30 +# KIA NIRO HYBRID 2ND GEN +de8fc10be8a79842|2023-06-09--00-59-50--10 +# KIA NIRO HYBRID 2ND GEN +df54ba658aedc57c|2023-06-01--05-27-17--48 +# SEAT LEON 3RD GEN +aef9c04d6ec5cd57|2023-07-15--16-49-38--12 +# HYUNDAI SANTA CRUZ 1ST GEN +c00a40383605f531|2023-05-21--13-08-03--11 +# HONDA CR-V 2016 +bd96755604115d4c|2023-06-27--10-34-55--33 +# TOYOTA RAV4 2023 +c5e5740d799f017f|2023-08-08--13-23-41--7 +# FORD MAVERICK 1ST GEN +c72c220ce1de786f|2023-07-17--15-49-45--5 +# FORD MAVERICK 1ST GEN +c845bd8c366e6f3d|2023-06-29--19-03-50--12 +# CADILLAC ESCALADE 2017 +defd7f8491ad9254|2023-06-18--02-03-57--5 +# CADILLAC ESCALADE 2017 +ef8f2185104d862e|2023-07-29--18-02-25--18 +# TOYOTA C-HR 2021 +ea8fbe72b96a185c|2023-06-08--12-35-07--3 +# HYUNDAI KONA ELECTRIC 2022 +ecb854b94fdfa4d5|2023-05-27--00-04-01--45 +# KIA SORENTO HYBRID 4TH GEN +e8c813df1056766a|2023-05-26--19-01-03--34 +# NISSAN X-TRAIL 2017 +fa9224be239005c7|2023-07-30--14-28-52--18 +# NISSAN LEAF 2018 Instrument Cluster +fc10e0bf7dc6c892|2023-07-09--18-47-05--50 diff --git a/selfdrive/car/tests/test_platform_configs.py b/selfdrive/car/tests/test_platform_configs.py new file mode 100644 index 0000000..0b42a2b --- /dev/null +++ b/selfdrive/car/tests/test_platform_configs.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 + +import unittest + +from openpilot.selfdrive.car.values import PLATFORMS + + +class TestPlatformConfigs(unittest.TestCase): + def test_configs(self): + + for platform in PLATFORMS.values(): + with self.subTest(platform=str(platform)): + self.assertTrue(platform.config._frozen) + + if platform != "mock": + self.assertIn("pt", platform.config.dbc_dict) + self.assertTrue(len(platform.config.platform_str) > 0) + + self.assertIsNotNone(platform.config.specs) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/controls/tests/__init__.py b/selfdrive/controls/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/selfdrive/controls/tests/test_alerts.py b/selfdrive/controls/tests/test_alerts.py new file mode 100644 index 0000000..7b4fba0 --- /dev/null +++ b/selfdrive/controls/tests/test_alerts.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +import copy +import json +import os +import unittest +import random +from PIL import Image, ImageDraw, ImageFont + +from cereal import log, car +from cereal.messaging import SubMaster +from openpilot.common.basedir import BASEDIR +from openpilot.common.params import Params +from openpilot.selfdrive.controls.lib.events import Alert, EVENTS, ET +from openpilot.selfdrive.controls.lib.alertmanager import set_offroad_alert +from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS + +AlertSize = log.ControlsState.AlertSize + +OFFROAD_ALERTS_PATH = os.path.join(BASEDIR, "selfdrive/controls/lib/alerts_offroad.json") + +# TODO: add callback alerts +ALERTS = [] +for event_types in EVENTS.values(): + for alert in event_types.values(): + ALERTS.append(alert) + + +class TestAlerts(unittest.TestCase): + + @classmethod + def setUpClass(cls): + with open(OFFROAD_ALERTS_PATH) as f: + cls.offroad_alerts = json.loads(f.read()) + + # Create fake objects for callback + cls.CS = car.CarState.new_message() + cls.CP = car.CarParams.new_message() + cfg = [c for c in CONFIGS if c.proc_name == 'controlsd'][0] + cls.sm = SubMaster(cfg.pubs) + + def test_events_defined(self): + # Ensure all events in capnp schema are defined in events.py + events = car.CarEvent.EventName.schema.enumerants + + for name, e in events.items(): + if not name.endswith("DEPRECATED"): + fail_msg = "%s @%d not in EVENTS" % (name, e) + self.assertTrue(e in EVENTS.keys(), msg=fail_msg) + + # ensure alert text doesn't exceed allowed width + def test_alert_text_length(self): + font_path = os.path.join(BASEDIR, "selfdrive/assets/fonts") + regular_font_path = os.path.join(font_path, "Inter-SemiBold.ttf") + bold_font_path = os.path.join(font_path, "Inter-Bold.ttf") + semibold_font_path = os.path.join(font_path, "Inter-SemiBold.ttf") + + max_text_width = 2160 - 300 # full screen width is usable, minus sidebar + draw = ImageDraw.Draw(Image.new('RGB', (0, 0))) + + fonts = { + AlertSize.small: [ImageFont.truetype(semibold_font_path, 74)], + AlertSize.mid: [ImageFont.truetype(bold_font_path, 88), + ImageFont.truetype(regular_font_path, 66)], + } + + for alert in ALERTS: + if not isinstance(alert, Alert): + alert = alert(self.CP, self.CS, self.sm, metric=False, soft_disable_time=100) + + # for full size alerts, both text fields wrap the text, + # so it's unlikely that they would go past the max width + if alert.alert_size in (AlertSize.none, AlertSize.full): + continue + + for i, txt in enumerate([alert.alert_text_1, alert.alert_text_2]): + if i >= len(fonts[alert.alert_size]): + break + + font = fonts[alert.alert_size][i] + left, _, right, _ = draw.textbbox((0, 0), txt, font) + width = right - left + msg = f"type: {alert.alert_type} msg: {txt}" + self.assertLessEqual(width, max_text_width, msg=msg) + + def test_alert_sanity_check(self): + for event_types in EVENTS.values(): + for event_type, a in event_types.items(): + # TODO: add callback alerts + if not isinstance(a, Alert): + continue + + if a.alert_size == AlertSize.none: + self.assertEqual(len(a.alert_text_1), 0) + self.assertEqual(len(a.alert_text_2), 0) + elif a.alert_size == AlertSize.small: + self.assertGreater(len(a.alert_text_1), 0) + self.assertEqual(len(a.alert_text_2), 0) + elif a.alert_size == AlertSize.mid: + self.assertGreater(len(a.alert_text_1), 0) + self.assertGreater(len(a.alert_text_2), 0) + else: + self.assertGreater(len(a.alert_text_1), 0) + + self.assertGreaterEqual(a.duration, 0.) + + if event_type not in (ET.WARNING, ET.PERMANENT, ET.PRE_ENABLE): + self.assertEqual(a.creation_delay, 0.) + + def test_offroad_alerts(self): + params = Params() + for a in self.offroad_alerts: + # set the alert + alert = copy.copy(self.offroad_alerts[a]) + set_offroad_alert(a, True) + alert['extra'] = '' + self.assertTrue(json.dumps(alert) == params.get(a, encoding='utf8')) + + # then delete it + set_offroad_alert(a, False) + self.assertTrue(params.get(a) is None) + + def test_offroad_alerts_extra_text(self): + params = Params() + for i in range(50): + # set the alert + a = random.choice(list(self.offroad_alerts)) + alert = self.offroad_alerts[a] + set_offroad_alert(a, True, extra_text="a"*i) + + written_alert = json.loads(params.get(a, encoding='utf8')) + self.assertTrue("a"*i == written_alert['extra']) + self.assertTrue(alert["text"] == written_alert['text']) + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/controls/tests/test_cruise_speed.py b/selfdrive/controls/tests/test_cruise_speed.py new file mode 100644 index 0000000..76a2222 --- /dev/null +++ b/selfdrive/controls/tests/test_cruise_speed.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +import itertools +import numpy as np +import unittest + +from parameterized import parameterized_class +from cereal import log +from openpilot.common.params import Params +from openpilot.selfdrive.controls.lib.drive_helpers import VCruiseHelper, V_CRUISE_MIN, V_CRUISE_MAX, V_CRUISE_INITIAL, IMPERIAL_INCREMENT +from cereal import car +from openpilot.common.conversions import Conversions as CV +from openpilot.selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver + +ButtonEvent = car.CarState.ButtonEvent +ButtonType = car.CarState.ButtonEvent.Type + + +def run_cruise_simulation(cruise, e2e, t_end=20.): + man = Maneuver( + '', + duration=t_end, + initial_speed=max(cruise - 1., 0.0), + lead_relevancy=True, + initial_distance_lead=100, + cruise_values=[cruise], + prob_lead_values=[0.0], + breakpoints=[0.], + e2e=e2e, + ) + valid, output = man.evaluate() + assert valid + return output[-1, 3] + + +@parameterized_class(("e2e", "personality", "speed"), itertools.product( + [True, False], # e2e + log.LongitudinalPersonality.schema.enumerants, # personality + [5,35])) # speed +class TestCruiseSpeed(unittest.TestCase): + def test_cruise_speed(self): + params = Params() + params.put("LongitudinalPersonality", str(self.personality)) + print(f'Testing {self.speed} m/s') + cruise_speed = float(self.speed) + + simulation_steady_state = run_cruise_simulation(cruise_speed, self.e2e) + self.assertAlmostEqual(simulation_steady_state, cruise_speed, delta=.01, msg=f'Did not reach {self.speed} m/s') + + +# TODO: test pcmCruise +@parameterized_class(('pcm_cruise',), [(False,)]) +class TestVCruiseHelper(unittest.TestCase): + def setUp(self): + self.CP = car.CarParams(pcmCruise=self.pcm_cruise) + self.v_cruise_helper = VCruiseHelper(self.CP) + self.reset_cruise_speed_state() + + def reset_cruise_speed_state(self): + # Two resets previous cruise speed + for _ in range(2): + self.v_cruise_helper.update_v_cruise(car.CarState(cruiseState={"available": False}), enabled=False, is_metric=False) + + def enable(self, v_ego, experimental_mode): + # Simulates user pressing set with a current speed + self.v_cruise_helper.initialize_v_cruise(car.CarState(vEgo=v_ego), experimental_mode) + + def test_adjust_speed(self): + """ + Asserts speed changes on falling edges of buttons. + """ + + self.enable(V_CRUISE_INITIAL * CV.KPH_TO_MS, False) + + for btn in (ButtonType.accelCruise, ButtonType.decelCruise): + for pressed in (True, False): + CS = car.CarState(cruiseState={"available": True}) + CS.buttonEvents = [ButtonEvent(type=btn, pressed=pressed)] + + self.v_cruise_helper.update_v_cruise(CS, enabled=True, is_metric=False) + self.assertEqual(pressed, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last) + + def test_rising_edge_enable(self): + """ + Some car interfaces may enable on rising edge of a button, + ensure we don't adjust speed if enabled changes mid-press. + """ + + # NOTE: enabled is always one frame behind the result from button press in controlsd + for enabled, pressed in ((False, False), + (False, True), + (True, False)): + CS = car.CarState(cruiseState={"available": True}) + CS.buttonEvents = [ButtonEvent(type=ButtonType.decelCruise, pressed=pressed)] + self.v_cruise_helper.update_v_cruise(CS, enabled=enabled, is_metric=False) + if pressed: + self.enable(V_CRUISE_INITIAL * CV.KPH_TO_MS, False) + + # Expected diff on enabling. Speed should not change on falling edge of pressed + self.assertEqual(not pressed, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last) + + def test_resume_in_standstill(self): + """ + Asserts we don't increment set speed if user presses resume/accel to exit cruise standstill. + """ + + self.enable(0, False) + + for standstill in (True, False): + for pressed in (True, False): + CS = car.CarState(cruiseState={"available": True, "standstill": standstill}) + CS.buttonEvents = [ButtonEvent(type=ButtonType.accelCruise, pressed=pressed)] + self.v_cruise_helper.update_v_cruise(CS, enabled=True, is_metric=False) + + # speed should only update if not at standstill and button falling edge + should_equal = standstill or pressed + self.assertEqual(should_equal, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last) + + def test_set_gas_pressed(self): + """ + Asserts pressing set while enabled with gas pressed sets + the speed to the maximum of vEgo and current cruise speed. + """ + + for v_ego in np.linspace(0, 100, 101): + self.reset_cruise_speed_state() + self.enable(V_CRUISE_INITIAL * CV.KPH_TO_MS, False) + + # first decrement speed, then perform gas pressed logic + expected_v_cruise_kph = self.v_cruise_helper.v_cruise_kph - IMPERIAL_INCREMENT + expected_v_cruise_kph = max(expected_v_cruise_kph, v_ego * CV.MS_TO_KPH) # clip to min of vEgo + expected_v_cruise_kph = float(np.clip(round(expected_v_cruise_kph, 1), V_CRUISE_MIN, V_CRUISE_MAX)) + + CS = car.CarState(vEgo=float(v_ego), gasPressed=True, cruiseState={"available": True}) + CS.buttonEvents = [ButtonEvent(type=ButtonType.decelCruise, pressed=False)] + self.v_cruise_helper.update_v_cruise(CS, enabled=True, is_metric=False) + + # TODO: fix skipping first run due to enabled on rising edge exception + if v_ego == 0.0: + continue + self.assertEqual(expected_v_cruise_kph, self.v_cruise_helper.v_cruise_kph) + + def test_initialize_v_cruise(self): + """ + Asserts allowed cruise speeds on enabling with SET. + """ + + for experimental_mode in (True, False): + for v_ego in np.linspace(0, 100, 101): + self.reset_cruise_speed_state() + self.assertFalse(self.v_cruise_helper.v_cruise_initialized) + + self.enable(float(v_ego), experimental_mode) + self.assertTrue(V_CRUISE_INITIAL <= self.v_cruise_helper.v_cruise_kph <= V_CRUISE_MAX) + self.assertTrue(self.v_cruise_helper.v_cruise_initialized) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/controls/tests/test_following_distance.py b/selfdrive/controls/tests/test_following_distance.py new file mode 100644 index 0000000..3b31632 --- /dev/null +++ b/selfdrive/controls/tests/test_following_distance.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +import unittest +import itertools +from parameterized import parameterized_class + +from openpilot.common.params import Params +from cereal import log + +from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import desired_follow_distance, get_T_FOLLOW +from openpilot.selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver + + +def run_following_distance_simulation(v_lead, t_end=100.0, e2e=False): + man = Maneuver( + '', + duration=t_end, + initial_speed=float(v_lead), + lead_relevancy=True, + initial_distance_lead=100, + speed_lead_values=[v_lead], + breakpoints=[0.], + e2e=e2e, + ) + valid, output = man.evaluate() + assert valid + return output[-1,2] - output[-1,1] + + +@parameterized_class(("e2e", "personality", "speed"), itertools.product( + [True, False], # e2e + [log.LongitudinalPersonality.relaxed, # personality + log.LongitudinalPersonality.standard, + log.LongitudinalPersonality.aggressive], + [0,10,35])) # speed +class TestFollowingDistance(unittest.TestCase): + def test_following_distance(self): + params = Params() + params.put("LongitudinalPersonality", str(self.personality)) + v_lead = float(self.speed) + simulation_steady_state = run_following_distance_simulation(v_lead, e2e=self.e2e) + correct_steady_state = desired_follow_distance(v_lead, v_lead, get_T_FOLLOW(self.personality)) + err_ratio = 0.2 if self.e2e else 0.1 + self.assertAlmostEqual(simulation_steady_state, correct_steady_state, delta=(err_ratio * correct_steady_state + .5)) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/controls/tests/test_lateral_mpc.py b/selfdrive/controls/tests/test_lateral_mpc.py new file mode 100644 index 0000000..8c09f46 --- /dev/null +++ b/selfdrive/controls/tests/test_lateral_mpc.py @@ -0,0 +1,89 @@ +import unittest +import numpy as np +from openpilot.selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc +from openpilot.selfdrive.controls.lib.drive_helpers import CAR_ROTATION_RADIUS +from openpilot.selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import N as LAT_MPC_N + + +def run_mpc(lat_mpc=None, v_ref=30., x_init=0., y_init=0., psi_init=0., curvature_init=0., + lane_width=3.6, poly_shift=0.): + + if lat_mpc is None: + lat_mpc = LateralMpc() + lat_mpc.set_weights(1., .1, 0.0, .05, 800) + + y_pts = poly_shift * np.ones(LAT_MPC_N + 1) + heading_pts = np.zeros(LAT_MPC_N + 1) + curv_rate_pts = np.zeros(LAT_MPC_N + 1) + + x0 = np.array([x_init, y_init, psi_init, curvature_init]) + p = np.column_stack([v_ref * np.ones(LAT_MPC_N + 1), + CAR_ROTATION_RADIUS * np.ones(LAT_MPC_N + 1)]) + + # converge in no more than 10 iterations + for _ in range(10): + lat_mpc.run(x0, p, + y_pts, heading_pts, curv_rate_pts) + return lat_mpc.x_sol + + +class TestLateralMpc(unittest.TestCase): + + def _assert_null(self, sol, curvature=1e-6): + for i in range(len(sol)): + self.assertAlmostEqual(sol[0,i,1], 0., delta=curvature) + self.assertAlmostEqual(sol[0,i,2], 0., delta=curvature) + self.assertAlmostEqual(sol[0,i,3], 0., delta=curvature) + + def _assert_simmetry(self, sol, curvature=1e-6): + for i in range(len(sol)): + self.assertAlmostEqual(sol[0,i,1], -sol[1,i,1], delta=curvature) + self.assertAlmostEqual(sol[0,i,2], -sol[1,i,2], delta=curvature) + self.assertAlmostEqual(sol[0,i,3], -sol[1,i,3], delta=curvature) + self.assertAlmostEqual(sol[0,i,0], sol[1,i,0], delta=curvature) + + def test_straight(self): + sol = run_mpc() + self._assert_null(np.array([sol])) + + def test_y_symmetry(self): + sol = [] + for y_init in [-0.5, 0.5]: + sol.append(run_mpc(y_init=y_init)) + self._assert_simmetry(np.array(sol)) + + def test_poly_symmetry(self): + sol = [] + for poly_shift in [-1., 1.]: + sol.append(run_mpc(poly_shift=poly_shift)) + self._assert_simmetry(np.array(sol)) + + def test_curvature_symmetry(self): + sol = [] + for curvature_init in [-0.1, 0.1]: + sol.append(run_mpc(curvature_init=curvature_init)) + self._assert_simmetry(np.array(sol)) + + def test_psi_symmetry(self): + sol = [] + for psi_init in [-0.1, 0.1]: + sol.append(run_mpc(psi_init=psi_init)) + self._assert_simmetry(np.array(sol)) + + def test_no_overshoot(self): + y_init = 1. + sol = run_mpc(y_init=y_init) + for y in list(sol[:,1]): + self.assertGreaterEqual(y_init, abs(y)) + + def test_switch_convergence(self): + lat_mpc = LateralMpc() + sol = run_mpc(lat_mpc=lat_mpc, poly_shift=3.0, v_ref=7.0) + right_psi_deg = np.degrees(sol[:,2]) + sol = run_mpc(lat_mpc=lat_mpc, poly_shift=-3.0, v_ref=7.0) + left_psi_deg = np.degrees(sol[:,2]) + np.testing.assert_almost_equal(right_psi_deg, -left_psi_deg, decimal=3) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/controls/tests/test_leads.py b/selfdrive/controls/tests/test_leads.py new file mode 100644 index 0000000..268d9c4 --- /dev/null +++ b/selfdrive/controls/tests/test_leads.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +import unittest + +import cereal.messaging as messaging + +from openpilot.selfdrive.test.process_replay import replay_process_with_name +from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA + + +class TestLeads(unittest.TestCase): + def test_radar_fault(self): + # if there's no radar-related can traffic, radard should either not respond or respond with an error + # this is tightly coupled with underlying car radar_interface implementation, but it's a good sanity check + def single_iter_pkg(): + # single iter package, with meaningless cans and empty carState/modelV2 + msgs = [] + for _ in range(5): + can = messaging.new_message("can", 1) + cs = messaging.new_message("carState") + msgs.append(can.as_reader()) + msgs.append(cs.as_reader()) + model = messaging.new_message("modelV2") + msgs.append(model.as_reader()) + + return msgs + + msgs = [m for _ in range(3) for m in single_iter_pkg()] + out = replay_process_with_name("radard", msgs, fingerprint=TOYOTA.COROLLA_TSS2) + states = [m for m in out if m.which() == "radarState"] + failures = [not state.valid and len(state.radarState.radarErrors) for state in states] + + self.assertTrue(len(states) == 0 or all(failures)) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/controls/tests/test_startup.py b/selfdrive/controls/tests/test_startup.py new file mode 100644 index 0000000..34d14fb --- /dev/null +++ b/selfdrive/controls/tests/test_startup.py @@ -0,0 +1,120 @@ +import os +from parameterized import parameterized + +from cereal import log, car +import cereal.messaging as messaging +from openpilot.common.params import Params +from openpilot.selfdrive.boardd.boardd_api_impl import can_list_to_can_capnp +from openpilot.selfdrive.car.fingerprints import _FINGERPRINTS +from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA +from openpilot.selfdrive.car.mazda.values import CAR as MAZDA +from openpilot.selfdrive.controls.lib.events import EVENT_NAME +from openpilot.selfdrive.manager.process_config import managed_processes + +EventName = car.CarEvent.EventName +Ecu = car.CarParams.Ecu + +COROLLA_FW_VERSIONS = [ + (Ecu.engine, 0x7e0, None, b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00'), + (Ecu.abs, 0x7b0, None, b'F152602190\x00\x00\x00\x00\x00\x00'), + (Ecu.eps, 0x7a1, None, b'8965B02181\x00\x00\x00\x00\x00\x00'), + (Ecu.fwdRadar, 0x750, 0xf, b'8821F4702100\x00\x00\x00\x00'), + (Ecu.fwdCamera, 0x750, 0x6d, b'8646F0201101\x00\x00\x00\x00'), + (Ecu.dsu, 0x791, None, b'881510201100\x00\x00\x00\x00'), +] +COROLLA_FW_VERSIONS_FUZZY = COROLLA_FW_VERSIONS[:-1] + [(Ecu.dsu, 0x791, None, b'xxxxxx')] +COROLLA_FW_VERSIONS_NO_DSU = COROLLA_FW_VERSIONS[:-1] + +CX5_FW_VERSIONS = [ + (Ecu.engine, 0x7e0, None, b'PYNF-188K2-F\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'), + (Ecu.abs, 0x760, None, b'K123-437K2-E\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'), + (Ecu.eps, 0x730, None, b'KJ01-3210X-G-00\x00\x00\x00\x00\x00\x00\x00\x00\x00'), + (Ecu.fwdRadar, 0x764, None, b'K123-67XK2-F\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'), + (Ecu.fwdCamera, 0x706, None, b'B61L-67XK2-T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'), + (Ecu.transmission, 0x7e1, None, b'PYNC-21PS1-B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'), +] + + +@parameterized.expand([ + # TODO: test EventName.startup for release branches + + # officially supported car + (EventName.startupMaster, TOYOTA.COROLLA, COROLLA_FW_VERSIONS, "toyota"), + (EventName.startupMaster, TOYOTA.COROLLA, COROLLA_FW_VERSIONS, "toyota"), + + # dashcamOnly car + (EventName.startupNoControl, MAZDA.CX5, CX5_FW_VERSIONS, "mazda"), + (EventName.startupNoControl, MAZDA.CX5, CX5_FW_VERSIONS, "mazda"), + + # unrecognized car with no fw + (EventName.startupNoFw, None, None, ""), + (EventName.startupNoFw, None, None, ""), + + # unrecognized car + (EventName.startupNoCar, None, COROLLA_FW_VERSIONS[:1], "toyota"), + (EventName.startupNoCar, None, COROLLA_FW_VERSIONS[:1], "toyota"), + + # fuzzy match + (EventName.startupMaster, TOYOTA.COROLLA, COROLLA_FW_VERSIONS_FUZZY, "toyota"), + (EventName.startupMaster, TOYOTA.COROLLA, COROLLA_FW_VERSIONS_FUZZY, "toyota"), +]) +def test_startup_alert(expected_event, car_model, fw_versions, brand): + controls_sock = messaging.sub_sock("controlsState") + pm = messaging.PubMaster(['can', 'pandaStates']) + + params = Params() + params.put_bool("OpenpilotEnabledToggle", True) + + # Build capnn version of FW array + if fw_versions is not None: + car_fw = [] + cp = car.CarParams.new_message() + for ecu, addr, subaddress, version in fw_versions: + f = car.CarParams.CarFw.new_message() + f.ecu = ecu + f.address = addr + f.fwVersion = version + f.brand = brand + + if subaddress is not None: + f.subAddress = subaddress + + car_fw.append(f) + cp.carVin = "1" * 17 + cp.carFw = car_fw + params.put("CarParamsCache", cp.to_bytes()) + else: + os.environ['SKIP_FW_QUERY'] = '1' + + managed_processes['controlsd'].start() + + assert pm.wait_for_readers_to_update('can', 5) + pm.send('can', can_list_to_can_capnp([[0, 0, b"", 0]])) + + assert pm.wait_for_readers_to_update('pandaStates', 5) + msg = messaging.new_message('pandaStates', 1) + msg.pandaStates[0].pandaType = log.PandaState.PandaType.uno + pm.send('pandaStates', msg) + + # fingerprint + if (car_model is None) or (fw_versions is not None): + finger = {addr: 1 for addr in range(1, 100)} + else: + finger = _FINGERPRINTS[car_model][0] + + msgs = [[addr, 0, b'\x00'*length, 0] for addr, length in finger.items()] + for _ in range(1000): + # controlsd waits for boardd to echo back that it has changed the multiplexing mode + if not params.get_bool("ObdMultiplexingChanged"): + params.put_bool("ObdMultiplexingChanged", True) + + pm.send('can', can_list_to_can_capnp(msgs)) + assert pm.wait_for_readers_to_update('can', 5, dt=0.001), f"step: {_}" + + ctrls = messaging.drain_sock(controls_sock) + if len(ctrls): + event_name = ctrls[0].controlsState.alertType.split("/")[0] + assert EVENT_NAME[expected_event] == event_name, f"expected {EVENT_NAME[expected_event]} for '{car_model}', got {event_name}" + break + else: + raise Exception(f"failed to fingerprint {car_model}") diff --git a/selfdrive/controls/tests/test_state_machine.py b/selfdrive/controls/tests/test_state_machine.py new file mode 100644 index 0000000..d491117 --- /dev/null +++ b/selfdrive/controls/tests/test_state_machine.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +import unittest + +from cereal import car, log +from openpilot.common.realtime import DT_CTRL +from openpilot.selfdrive.car.car_helpers import interfaces +from openpilot.selfdrive.controls.controlsd import Controls, SOFT_DISABLE_TIME +from openpilot.selfdrive.controls.lib.events import Events, ET, Alert, Priority, AlertSize, AlertStatus, VisualAlert, \ + AudibleAlert, EVENTS +from openpilot.selfdrive.car.mock.values import CAR as MOCK + +State = log.ControlsState.OpenpilotState + +# The event types that maintain the current state +MAINTAIN_STATES = {State.enabled: (None,), State.disabled: (None,), State.softDisabling: (ET.SOFT_DISABLE,), + State.preEnabled: (ET.PRE_ENABLE,), State.overriding: (ET.OVERRIDE_LATERAL, ET.OVERRIDE_LONGITUDINAL)} +ALL_STATES = tuple(State.schema.enumerants.values()) +# The event types checked in DISABLED section of state machine +ENABLE_EVENT_TYPES = (ET.ENABLE, ET.PRE_ENABLE, ET.OVERRIDE_LATERAL, ET.OVERRIDE_LONGITUDINAL) + + +def make_event(event_types): + event = {} + for ev in event_types: + event[ev] = Alert("", "", AlertStatus.normal, AlertSize.small, Priority.LOW, + VisualAlert.none, AudibleAlert.none, 1.) + EVENTS[0] = event + return 0 + + +class TestStateMachine(unittest.TestCase): + + def setUp(self): + CarInterface, CarController, CarState = interfaces[MOCK.MOCK] + CP = CarInterface.get_non_essential_params(MOCK.MOCK) + CI = CarInterface(CP, CarController, CarState) + + self.controlsd = Controls(CI=CI) + self.controlsd.events = Events() + self.controlsd.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL) + self.CS = car.CarState() + + def test_immediate_disable(self): + for state in ALL_STATES: + for et in MAINTAIN_STATES[state]: + self.controlsd.events.add(make_event([et, ET.IMMEDIATE_DISABLE])) + self.controlsd.state = state + self.controlsd.state_transition(self.CS) + self.assertEqual(State.disabled, self.controlsd.state) + self.controlsd.events.clear() + + def test_user_disable(self): + for state in ALL_STATES: + for et in MAINTAIN_STATES[state]: + self.controlsd.events.add(make_event([et, ET.USER_DISABLE])) + self.controlsd.state = state + self.controlsd.state_transition(self.CS) + self.assertEqual(State.disabled, self.controlsd.state) + self.controlsd.events.clear() + + def test_soft_disable(self): + for state in ALL_STATES: + if state == State.preEnabled: # preEnabled considers NO_ENTRY instead + continue + for et in MAINTAIN_STATES[state]: + self.controlsd.events.add(make_event([et, ET.SOFT_DISABLE])) + self.controlsd.state = state + self.controlsd.state_transition(self.CS) + self.assertEqual(self.controlsd.state, State.disabled if state == State.disabled else State.softDisabling) + self.controlsd.events.clear() + + def test_soft_disable_timer(self): + self.controlsd.state = State.enabled + self.controlsd.events.add(make_event([ET.SOFT_DISABLE])) + self.controlsd.state_transition(self.CS) + for _ in range(int(SOFT_DISABLE_TIME / DT_CTRL)): + self.assertEqual(self.controlsd.state, State.softDisabling) + self.controlsd.state_transition(self.CS) + + self.assertEqual(self.controlsd.state, State.disabled) + + def test_no_entry(self): + # Make sure noEntry keeps us disabled + for et in ENABLE_EVENT_TYPES: + self.controlsd.events.add(make_event([ET.NO_ENTRY, et])) + self.controlsd.state_transition(self.CS) + self.assertEqual(self.controlsd.state, State.disabled) + self.controlsd.events.clear() + + def test_no_entry_pre_enable(self): + # preEnabled with noEntry event + self.controlsd.state = State.preEnabled + self.controlsd.events.add(make_event([ET.NO_ENTRY, ET.PRE_ENABLE])) + self.controlsd.state_transition(self.CS) + self.assertEqual(self.controlsd.state, State.preEnabled) + + def test_maintain_states(self): + # Given current state's event type, we should maintain state + for state in ALL_STATES: + for et in MAINTAIN_STATES[state]: + self.controlsd.state = state + self.controlsd.events.add(make_event([et])) + self.controlsd.state_transition(self.CS) + self.assertEqual(self.controlsd.state, state) + self.controlsd.events.clear() + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/locationd/test/.gitignore b/selfdrive/locationd/test/.gitignore new file mode 100644 index 0000000..89f9ac0 --- /dev/null +++ b/selfdrive/locationd/test/.gitignore @@ -0,0 +1 @@ +out/ diff --git a/selfdrive/locationd/test/__init__.py b/selfdrive/locationd/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/selfdrive/locationd/test/test_calibrationd.py b/selfdrive/locationd/test/test_calibrationd.py new file mode 100644 index 0000000..e2db094 --- /dev/null +++ b/selfdrive/locationd/test/test_calibrationd.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +import random +import unittest + +import numpy as np + +import cereal.messaging as messaging +from cereal import log +from openpilot.common.params import Params +from openpilot.selfdrive.locationd.calibrationd import Calibrator, INPUTS_NEEDED, INPUTS_WANTED, BLOCK_SIZE, MIN_SPEED_FILTER, \ + MAX_YAW_RATE_FILTER, SMOOTH_CYCLES, HEIGHT_INIT, MAX_ALLOWED_PITCH_SPREAD, MAX_ALLOWED_YAW_SPREAD + + +def process_messages(c, cam_odo_calib, cycles, + cam_odo_speed=MIN_SPEED_FILTER + 1, + carstate_speed=MIN_SPEED_FILTER + 1, + cam_odo_yr=0.0, + cam_odo_speed_std=1e-3, + cam_odo_height_std=1e-3): + old_rpy_weight_prev = 0.0 + for _ in range(cycles): + assert (old_rpy_weight_prev - c.old_rpy_weight < 1/SMOOTH_CYCLES + 1e-3) + old_rpy_weight_prev = c.old_rpy_weight + c.handle_v_ego(carstate_speed) + c.handle_cam_odom([cam_odo_speed, + np.sin(cam_odo_calib[2]) * cam_odo_speed, + -np.sin(cam_odo_calib[1]) * cam_odo_speed], + [0.0, 0.0, cam_odo_yr], + [0.0, 0.0, 0.0], + [cam_odo_speed_std, cam_odo_speed_std, cam_odo_speed_std], + [0.0, 0.0, HEIGHT_INIT.item()], + [cam_odo_height_std, cam_odo_height_std, cam_odo_height_std]) + +class TestCalibrationd(unittest.TestCase): + + def test_read_saved_params(self): + msg = messaging.new_message('liveCalibration') + msg.liveCalibration.validBlocks = random.randint(1, 10) + msg.liveCalibration.rpyCalib = [random.random() for _ in range(3)] + msg.liveCalibration.height = [random.random() for _ in range(1)] + Params().put("CalibrationParams", msg.to_bytes()) + c = Calibrator(param_put=True) + + np.testing.assert_allclose(msg.liveCalibration.rpyCalib, c.rpy) + np.testing.assert_allclose(msg.liveCalibration.height, c.height) + self.assertEqual(msg.liveCalibration.validBlocks, c.valid_blocks) + + + def test_calibration_basics(self): + c = Calibrator(param_put=False) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED) + self.assertEqual(c.valid_blocks, INPUTS_WANTED) + np.testing.assert_allclose(c.rpy, np.zeros(3)) + np.testing.assert_allclose(c.height, HEIGHT_INIT) + c.reset() + + + def test_calibration_low_speed_reject(self): + c = Calibrator(param_put=False) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed=MIN_SPEED_FILTER - 1) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, carstate_speed=MIN_SPEED_FILTER - 1) + self.assertEqual(c.valid_blocks, 0) + np.testing.assert_allclose(c.rpy, np.zeros(3)) + np.testing.assert_allclose(c.height, HEIGHT_INIT) + + + def test_calibration_yaw_rate_reject(self): + c = Calibrator(param_put=False) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_yr=MAX_YAW_RATE_FILTER) + self.assertEqual(c.valid_blocks, 0) + np.testing.assert_allclose(c.rpy, np.zeros(3)) + np.testing.assert_allclose(c.height, HEIGHT_INIT) + + + def test_calibration_speed_std_reject(self): + c = Calibrator(param_put=False) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed_std=1e3) + self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + np.testing.assert_allclose(c.rpy, np.zeros(3)) + + + def test_calibration_speed_std_height_reject(self): + c = Calibrator(param_put=False) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_height_std=1e3) + self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + np.testing.assert_allclose(c.rpy, np.zeros(3)) + + + def test_calibration_auto_reset(self): + c = Calibrator(param_put=False) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED) + self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0], atol=1e-3) + process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*0.9, MAX_ALLOWED_YAW_SPREAD*0.9], BLOCK_SIZE + 10) + self.assertEqual(c.valid_blocks, INPUTS_NEEDED + 1) + self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.calibrated) + + c = Calibrator(param_put=False) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED) + self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0]) + process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], BLOCK_SIZE + 10) + self.assertEqual(c.valid_blocks, 1) + self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating) + np.testing.assert_allclose(c.rpy, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], atol=1e-2) + + c = Calibrator(param_put=False) + process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED) + self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0]) + process_messages(c, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], BLOCK_SIZE + 10) + self.assertEqual(c.valid_blocks, 1) + self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating) + np.testing.assert_allclose(c.rpy, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], atol=1e-2) + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/locationd/test/test_locationd.py b/selfdrive/locationd/test/test_locationd.py new file mode 100644 index 0000000..78de921 --- /dev/null +++ b/selfdrive/locationd/test/test_locationd.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +import json +import random +import unittest +import time +import capnp + +import cereal.messaging as messaging +from cereal.services import SERVICE_LIST +from openpilot.common.params import Params +from openpilot.common.transformations.coordinates import ecef2geodetic + +from openpilot.selfdrive.manager.process_config import managed_processes + + +class TestLocationdProc(unittest.TestCase): + LLD_MSGS = ['gpsLocationExternal', 'cameraOdometry', 'carState', 'liveCalibration', + 'accelerometer', 'gyroscope', 'magnetometer'] + + def setUp(self): + random.seed(123489234) + + self.pm = messaging.PubMaster(self.LLD_MSGS) + + self.params = Params() + self.params.put_bool("UbloxAvailable", True) + managed_processes['locationd'].prepare() + managed_processes['locationd'].start() + + def tearDown(self): + managed_processes['locationd'].stop() + + def get_msg(self, name, t): + try: + msg = messaging.new_message(name) + except capnp.lib.capnp.KjException: + msg = messaging.new_message(name, 0) + + if name == "gpsLocationExternal": + msg.gpsLocationExternal.flags = 1 + msg.gpsLocationExternal.verticalAccuracy = 1.0 + msg.gpsLocationExternal.speedAccuracy = 1.0 + msg.gpsLocationExternal.bearingAccuracyDeg = 1.0 + msg.gpsLocationExternal.vNED = [0.0, 0.0, 0.0] + msg.gpsLocationExternal.latitude = float(self.lat) + msg.gpsLocationExternal.longitude = float(self.lon) + msg.gpsLocationExternal.unixTimestampMillis = t * 1e6 + msg.gpsLocationExternal.altitude = float(self.alt) + #if name == "gnssMeasurements": + # msg.gnssMeasurements.measTime = t + # msg.gnssMeasurements.positionECEF.value = [self.x , self.y, self.z] + # msg.gnssMeasurements.positionECEF.std = [0,0,0] + # msg.gnssMeasurements.positionECEF.valid = True + # msg.gnssMeasurements.velocityECEF.value = [] + # msg.gnssMeasurements.velocityECEF.std = [0,0,0] + # msg.gnssMeasurements.velocityECEF.valid = True + elif name == 'cameraOdometry': + msg.cameraOdometry.rot = [0.0, 0.0, 0.0] + msg.cameraOdometry.rotStd = [0.0, 0.0, 0.0] + msg.cameraOdometry.trans = [0.0, 0.0, 0.0] + msg.cameraOdometry.transStd = [0.0, 0.0, 0.0] + msg.logMonoTime = t + msg.valid = True + return msg + + def test_params_gps(self): + self.params.remove('LastGPSPosition') + + self.x = -2710700 + (random.random() * 1e5) + self.y = -4280600 + (random.random() * 1e5) + self.z = 3850300 + (random.random() * 1e5) + self.lat, self.lon, self.alt = ecef2geodetic([self.x, self.y, self.z]) + + # get fake messages at the correct frequency, listed in services.py + msgs = [] + for sec in range(65): + for name in self.LLD_MSGS: + for j in range(int(SERVICE_LIST[name].frequency)): + msgs.append(self.get_msg(name, int((sec + j / SERVICE_LIST[name].frequency) * 1e9))) + + for msg in sorted(msgs, key=lambda x: x.logMonoTime): + self.pm.send(msg.which(), msg) + if msg.which() == "cameraOdometry": + self.pm.wait_for_readers_to_update(msg.which(), 0.1, dt=0.005) + time.sleep(1) # wait for async params write + + lastGPS = json.loads(self.params.get('LastGPSPosition')) + self.assertAlmostEqual(lastGPS['latitude'], self.lat, places=3) + self.assertAlmostEqual(lastGPS['longitude'], self.lon, places=3) + self.assertAlmostEqual(lastGPS['altitude'], self.alt, places=3) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/locationd/test/test_locationd_scenarios.py b/selfdrive/locationd/test/test_locationd_scenarios.py new file mode 100644 index 0000000..f48c83c --- /dev/null +++ b/selfdrive/locationd/test/test_locationd_scenarios.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python3 +import pytest +import unittest +import numpy as np +from collections import defaultdict +from enum import Enum + +from openpilot.tools.lib.logreader import LogReader +from openpilot.selfdrive.test.process_replay.process_replay import replay_process_with_name + +TEST_ROUTE = "ff2bd20623fcaeaa|2023-09-05--10-14-54/4" +GPS_MESSAGES = ['gpsLocationExternal', 'gpsLocation'] +SELECT_COMPARE_FIELDS = { + 'yaw_rate': ['angularVelocityCalibrated', 'value', 2], + 'roll': ['orientationNED', 'value', 0], + 'gps_flag': ['gpsOK'], + 'inputs_flag': ['inputsOK'], + 'sensors_flag': ['sensorsOK'], +} +JUNK_IDX = 100 + + +class Scenario(Enum): + BASE = 'base' + GPS_OFF = 'gps_off' + GPS_OFF_MIDWAY = 'gps_off_midway' + GPS_ON_MIDWAY = 'gps_on_midway' + GPS_TUNNEL = 'gps_tunnel' + GYRO_OFF = 'gyro_off' + GYRO_SPIKE_MIDWAY = 'gyro_spike_midway' + ACCEL_OFF = 'accel_off' + ACCEL_SPIKE_MIDWAY = 'accel_spike_midway' + + +def get_select_fields_data(logs): + def get_nested_keys(msg, keys): + val = None + for key in keys: + val = getattr(msg if val is None else val, key) if isinstance(key, str) else val[key] + return val + llk = [x.liveLocationKalman for x in logs if x.which() == 'liveLocationKalman'] + data = defaultdict(list) + for msg in llk: + for key, fields in SELECT_COMPARE_FIELDS.items(): + data[key].append(get_nested_keys(msg, fields)) + for key in data: + data[key] = np.array(data[key][JUNK_IDX:], dtype=float) + return data + + +def run_scenarios(scenario, logs): + if scenario == Scenario.BASE: + pass + + elif scenario == Scenario.GPS_OFF: + logs = sorted([x for x in logs if x.which() not in GPS_MESSAGES], key=lambda x: x.logMonoTime) + + elif scenario == Scenario.GPS_OFF_MIDWAY: + non_gps = [x for x in logs if x.which() not in GPS_MESSAGES] + gps = [x for x in logs if x.which() in GPS_MESSAGES] + logs = sorted(non_gps + gps[: len(gps) // 2], key=lambda x: x.logMonoTime) + + elif scenario == Scenario.GPS_ON_MIDWAY: + non_gps = [x for x in logs if x.which() not in GPS_MESSAGES] + gps = [x for x in logs if x.which() in GPS_MESSAGES] + logs = sorted(non_gps + gps[len(gps) // 2:], key=lambda x: x.logMonoTime) + + elif scenario == Scenario.GPS_TUNNEL: + non_gps = [x for x in logs if x.which() not in GPS_MESSAGES] + gps = [x for x in logs if x.which() in GPS_MESSAGES] + logs = sorted(non_gps + gps[:len(gps) // 4] + gps[-len(gps) // 4:], key=lambda x: x.logMonoTime) + + elif scenario == Scenario.GYRO_OFF: + logs = sorted([x for x in logs if x.which() != 'gyroscope'], key=lambda x: x.logMonoTime) + + elif scenario == Scenario.GYRO_SPIKE_MIDWAY: + non_gyro = [x for x in logs if x.which() not in 'gyroscope'] + gyro = [x for x in logs if x.which() in 'gyroscope'] + temp = gyro[len(gyro) // 2].as_builder() + temp.gyroscope.gyroUncalibrated.v[0] += 3.0 + gyro[len(gyro) // 2] = temp.as_reader() + logs = sorted(non_gyro + gyro, key=lambda x: x.logMonoTime) + + elif scenario == Scenario.ACCEL_OFF: + logs = sorted([x for x in logs if x.which() != 'accelerometer'], key=lambda x: x.logMonoTime) + + elif scenario == Scenario.ACCEL_SPIKE_MIDWAY: + non_accel = [x for x in logs if x.which() not in 'accelerometer'] + accel = [x for x in logs if x.which() in 'accelerometer'] + temp = accel[len(accel) // 2].as_builder() + temp.accelerometer.acceleration.v[0] += 10.0 + accel[len(accel) // 2] = temp.as_reader() + logs = sorted(non_accel + accel, key=lambda x: x.logMonoTime) + + replayed_logs = replay_process_with_name(name='locationd', lr=logs) + return get_select_fields_data(logs), get_select_fields_data(replayed_logs) + + +@pytest.mark.xdist_group("test_locationd_scenarios") +@pytest.mark.shared_download_cache +class TestLocationdScenarios(unittest.TestCase): + """ + Test locationd with different scenarios. In all these scenarios, we expect the following: + - locationd kalman filter should never go unstable (we care mostly about yaw_rate, roll, gpsOK, inputsOK, sensorsOK) + - faulty values should be ignored, with appropriate flags set + """ + + @classmethod + def setUpClass(cls): + cls.logs = list(LogReader(TEST_ROUTE)) + + def test_base(self): + """ + Test: unchanged log + Expected Result: + - yaw_rate: unchanged + - roll: unchanged + """ + orig_data, replayed_data = run_scenarios(Scenario.BASE, self.logs) + self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) + self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) + + def test_gps_off(self): + """ + Test: no GPS message for the entire segment + Expected Result: + - yaw_rate: unchanged + - roll: + - gpsOK: False + """ + orig_data, replayed_data = run_scenarios(Scenario.GPS_OFF, self.logs) + self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) + self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) + self.assertTrue(np.all(replayed_data['gps_flag'] == 0.0)) + + def test_gps_off_midway(self): + """ + Test: no GPS message for the second half of the segment + Expected Result: + - yaw_rate: unchanged + - roll: + - gpsOK: True for the first half, False for the second half + """ + orig_data, replayed_data = run_scenarios(Scenario.GPS_OFF_MIDWAY, self.logs) + self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) + self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) + self.assertTrue(np.diff(replayed_data['gps_flag'])[512] == -1.0) + + def test_gps_on_midway(self): + """ + Test: no GPS message for the first half of the segment + Expected Result: + - yaw_rate: unchanged + - roll: + - gpsOK: False for the first half, True for the second half + """ + orig_data, replayed_data = run_scenarios(Scenario.GPS_ON_MIDWAY, self.logs) + self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) + self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(1.5))) + self.assertTrue(np.diff(replayed_data['gps_flag'])[505] == 1.0) + + def test_gps_tunnel(self): + """ + Test: no GPS message for the middle section of the segment + Expected Result: + - yaw_rate: unchanged + - roll: + - gpsOK: False for the middle section, True for the rest + """ + orig_data, replayed_data = run_scenarios(Scenario.GPS_TUNNEL, self.logs) + self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) + self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) + self.assertTrue(np.diff(replayed_data['gps_flag'])[213] == -1.0) + self.assertTrue(np.diff(replayed_data['gps_flag'])[805] == 1.0) + + def test_gyro_off(self): + """ + Test: no gyroscope message for the entire segment + Expected Result: + - yaw_rate: 0 + - roll: 0 + - sensorsOK: False + """ + _, replayed_data = run_scenarios(Scenario.GYRO_OFF, self.logs) + self.assertTrue(np.allclose(replayed_data['yaw_rate'], 0.0)) + self.assertTrue(np.allclose(replayed_data['roll'], 0.0)) + self.assertTrue(np.all(replayed_data['sensors_flag'] == 0.0)) + + def test_gyro_spikes(self): + """ + Test: a gyroscope spike in the middle of the segment + Expected Result: + - yaw_rate: unchanged + - roll: unchanged + - inputsOK: False for some time after the spike, True for the rest + """ + orig_data, replayed_data = run_scenarios(Scenario.GYRO_SPIKE_MIDWAY, self.logs) + self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) + self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) + self.assertTrue(np.diff(replayed_data['inputs_flag'])[500] == -1.0) + self.assertTrue(np.diff(replayed_data['inputs_flag'])[694] == 1.0) + + def test_accel_off(self): + """ + Test: no accelerometer message for the entire segment + Expected Result: + - yaw_rate: 0 + - roll: 0 + - sensorsOK: False + """ + _, replayed_data = run_scenarios(Scenario.ACCEL_OFF, self.logs) + self.assertTrue(np.allclose(replayed_data['yaw_rate'], 0.0)) + self.assertTrue(np.allclose(replayed_data['roll'], 0.0)) + self.assertTrue(np.all(replayed_data['sensors_flag'] == 0.0)) + + def test_accel_spikes(self): + """ + ToDo: + Test: an accelerometer spike in the middle of the segment + Expected Result: Right now, the kalman filter is not robust to small spikes like it is to gyroscope spikes. + """ + orig_data, replayed_data = run_scenarios(Scenario.ACCEL_SPIKE_MIDWAY, self.logs) + self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) + self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/test/.gitignore b/selfdrive/test/.gitignore new file mode 100644 index 0000000..5801faa --- /dev/null +++ b/selfdrive/test/.gitignore @@ -0,0 +1,9 @@ +out/ +docker_out/ + +process_replay/diff.txt +process_replay/model_diff.txt +valgrind_logs.txt + +*.bz2 +*.hevc diff --git a/selfdrive/test/ci_shell.sh b/selfdrive/test/ci_shell.sh new file mode 100644 index 0000000..a5ff714 --- /dev/null +++ b/selfdrive/test/ci_shell.sh @@ -0,0 +1,19 @@ +#!/bin/bash -e + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)" +OP_ROOT="$DIR/../../" + +if [ -z "$BUILD" ]; then + docker pull ghcr.io/commaai/openpilot-base:latest +else + docker build --cache-from ghcr.io/commaai/openpilot-base:latest -t ghcr.io/commaai/openpilot-base:latest -f $OP_ROOT/Dockerfile.openpilot_base . +fi + +docker run \ + -it \ + --rm \ + --volume $OP_ROOT:$OP_ROOT \ + --workdir $PWD \ + --env PYTHONPATH=$OP_ROOT \ + ghcr.io/commaai/openpilot-base:latest \ + /bin/bash diff --git a/selfdrive/test/ciui.py b/selfdrive/test/ciui.py new file mode 100644 index 0000000..f3b0c1a --- /dev/null +++ b/selfdrive/test/ciui.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +import signal +import subprocess + +signal.signal(signal.SIGINT, signal.SIG_DFL) +signal.signal(signal.SIGTERM, signal.SIG_DFL) + +from PyQt5.QtCore import QTimer +from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel +from openpilot.selfdrive.ui.qt.python_helpers import set_main_window + +class Window(QWidget): + def __init__(self, parent=None): + super().__init__(parent) + + layout = QVBoxLayout() + self.setLayout(layout) + + self.l = QLabel("jenkins runner") + layout.addWidget(self.l) + layout.addStretch(1) + layout.setContentsMargins(20, 20, 20, 20) + + cmds = [ + "cat /etc/hostname", + "echo AGNOS v$(cat /VERSION)", + "uptime -p", + ] + self.labels = {} + for c in cmds: + self.labels[c] = QLabel(c) + layout.addWidget(self.labels[c]) + + self.setStyleSheet(""" + * { + color: white; + font-size: 55px; + background-color: black; + font-family: "JetBrains Mono"; + } + """) + + self.timer = QTimer() + self.timer.timeout.connect(self.update) + self.timer.start(10 * 1000) + self.update() + + def update(self): + for cmd, label in self.labels.items(): + out = subprocess.run(cmd, capture_output=True, + shell=True, check=False, encoding='utf8').stdout + label.setText(out.strip()) + +if __name__ == "__main__": + app = QApplication([]) + w = Window() + set_main_window(w) + app.exec_() diff --git a/selfdrive/test/cpp_harness.py b/selfdrive/test/cpp_harness.py new file mode 100644 index 0000000..f9d3e68 --- /dev/null +++ b/selfdrive/test/cpp_harness.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 +import subprocess +import sys + +from openpilot.common.prefix import OpenpilotPrefix + + +with OpenpilotPrefix(): + ret = subprocess.call(sys.argv[1:]) + +exit(ret) diff --git a/selfdrive/test/docker_build.sh b/selfdrive/test/docker_build.sh new file mode 100644 index 0000000..5f77ceb --- /dev/null +++ b/selfdrive/test/docker_build.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -e + +# To build sim and docs, you can run the following to mount the scons cache to the same place as in CI: +# mkdir -p .ci_cache/scons_cache +# sudo mount --bind /tmp/scons_cache/ .ci_cache/scons_cache + +SCRIPT_DIR=$(dirname "$0") +OPENPILOT_DIR=$SCRIPT_DIR/../../ +if [ -n "$TARGET_ARCHITECTURE" ]; then + PLATFORM="linux/$TARGET_ARCHITECTURE" + TAG_SUFFIX="-$TARGET_ARCHITECTURE" +else + PLATFORM="linux/$(uname -m)" + TAG_SUFFIX="" +fi + +source $SCRIPT_DIR/docker_common.sh $1 "$TAG_SUFFIX" + +DOCKER_BUILDKIT=1 docker buildx build --provenance false --pull --platform $PLATFORM --load --cache-to type=inline --cache-from type=registry,ref=$REMOTE_TAG -t $REMOTE_TAG -t $LOCAL_TAG -f $OPENPILOT_DIR/$DOCKER_FILE $OPENPILOT_DIR + +if [ -n "$PUSH_IMAGE" ]; then + docker push $REMOTE_TAG + docker tag $REMOTE_TAG $REMOTE_SHA_TAG + docker push $REMOTE_SHA_TAG +fi diff --git a/selfdrive/test/docker_common.sh b/selfdrive/test/docker_common.sh new file mode 100644 index 0000000..f8a4237 --- /dev/null +++ b/selfdrive/test/docker_common.sh @@ -0,0 +1,21 @@ +if [ "$1" = "base" ]; then + export DOCKER_IMAGE=openpilot-base + export DOCKER_FILE=Dockerfile.openpilot_base +elif [ "$1" = "sim" ]; then + export DOCKER_IMAGE=openpilot-sim + export DOCKER_FILE=tools/sim/Dockerfile.sim +elif [ "$1" = "prebuilt" ]; then + export DOCKER_IMAGE=openpilot-prebuilt + export DOCKER_FILE=Dockerfile.openpilot +else + echo "Invalid docker build image: '$1'" + exit 1 +fi + +export DOCKER_REGISTRY=ghcr.io/commaai +export COMMIT_SHA=$(git rev-parse HEAD) + +TAG_SUFFIX=$2 +LOCAL_TAG=$DOCKER_IMAGE$TAG_SUFFIX +REMOTE_TAG=$DOCKER_REGISTRY/$LOCAL_TAG +REMOTE_SHA_TAG=$DOCKER_REGISTRY/$LOCAL_TAG:$COMMIT_SHA diff --git a/selfdrive/test/docker_tag_multiarch.sh b/selfdrive/test/docker_tag_multiarch.sh new file mode 100644 index 0000000..c176180 --- /dev/null +++ b/selfdrive/test/docker_tag_multiarch.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +if [ $# -lt 2 ]; then + echo "Usage: $0 ..." + exit 1 +fi + +SCRIPT_DIR=$(dirname "$0") +ARCHS=("${@:2}") + +source $SCRIPT_DIR/docker_common.sh $1 + +MANIFEST_AMENDS="" +for ARCH in ${ARCHS[@]}; do + MANIFEST_AMENDS="$MANIFEST_AMENDS --amend $REMOTE_TAG-$ARCH:$COMMIT_SHA" +done + +docker manifest create $REMOTE_TAG $MANIFEST_AMENDS +docker manifest create $REMOTE_SHA_TAG $MANIFEST_AMENDS + +if [[ -n "$PUSH_IMAGE" ]]; then + docker manifest push $REMOTE_TAG + docker manifest push $REMOTE_SHA_TAG +fi diff --git a/selfdrive/test/fuzzy_generation.py b/selfdrive/test/fuzzy_generation.py index 28c70a0..26c35c0 100644 --- a/selfdrive/test/fuzzy_generation.py +++ b/selfdrive/test/fuzzy_generation.py @@ -1,6 +1,7 @@ import capnp import hypothesis.strategies as st -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any +from collections.abc import Callable from cereal import log @@ -12,7 +13,7 @@ class FuzzyGenerator: self.draw = draw self.real_floats = real_floats - def generate_native_type(self, field: str) -> st.SearchStrategy[Union[bool, int, float, str, bytes]]: + def generate_native_type(self, field: str) -> st.SearchStrategy[bool | int | float | str | bytes]: def floats(**kwargs) -> st.SearchStrategy[float]: allow_nan = not self.real_floats allow_infinity = not self.real_floats @@ -67,18 +68,18 @@ class FuzzyGenerator: else: return self.generate_struct(field.schema) - def generate_struct(self, schema: capnp.lib.capnp._StructSchema, event: Optional[str] = None) -> st.SearchStrategy[Dict[str, Any]]: - full_fill: List[str] = list(schema.non_union_fields) - single_fill: List[str] = [event] if event else [self.draw(st.sampled_from(schema.union_fields))] if schema.union_fields else [] + def generate_struct(self, schema: capnp.lib.capnp._StructSchema, event: str = None) -> st.SearchStrategy[dict[str, Any]]: + full_fill: list[str] = list(schema.non_union_fields) + single_fill: list[str] = [event] if event else [self.draw(st.sampled_from(schema.union_fields))] if schema.union_fields else [] return st.fixed_dictionaries({field: self.generate_field(schema.fields[field]) for field in full_fill + single_fill}) @classmethod - def get_random_msg(cls, draw: DrawType, struct: capnp.lib.capnp._StructModule, real_floats: bool = False) -> Dict[str, Any]: + def get_random_msg(cls, draw: DrawType, struct: capnp.lib.capnp._StructModule, real_floats: bool = False) -> dict[str, Any]: fg = cls(draw, real_floats=real_floats) - data: Dict[str, Any] = draw(fg.generate_struct(struct.schema)) + data: dict[str, Any] = draw(fg.generate_struct(struct.schema)) return data @classmethod - def get_random_event_msg(cls, draw: DrawType, events: List[str], real_floats: bool = False) -> List[Dict[str, Any]]: + def get_random_event_msg(cls, draw: DrawType, events: list[str], real_floats: bool = False) -> list[dict[str, Any]]: fg = cls(draw, real_floats=real_floats) return [draw(fg.generate_struct(log.Event.schema, e)) for e in sorted(events)] diff --git a/selfdrive/test/helpers.py b/selfdrive/test/helpers.py index 0e7912a..210a283 100644 --- a/selfdrive/test/helpers.py +++ b/selfdrive/test/helpers.py @@ -1,4 +1,6 @@ +import http.server import os +import threading import time from functools import wraps @@ -72,7 +74,29 @@ def noop(*args, **kwargs): def read_segment_list(segment_list_path): - with open(segment_list_path, "r") as f: + with open(segment_list_path) as f: seg_list = f.read().splitlines() return [(platform[2:], segment) for platform, segment in zip(seg_list[::2], seg_list[1::2], strict=True)] + + +def with_http_server(func, handler=http.server.BaseHTTPRequestHandler, setup=None): + @wraps(func) + def inner(*args, **kwargs): + host = '127.0.0.1' + server = http.server.HTTPServer((host, 0), handler) + port = server.server_port + t = threading.Thread(target=server.serve_forever) + t.start() + + if setup is not None: + setup(host, port) + + try: + return func(*args, f'http://{host}:{port}', **kwargs) + finally: + server.shutdown() + server.server_close() + t.join() + + return inner diff --git a/selfdrive/test/longitudinal_maneuvers/.gitignore b/selfdrive/test/longitudinal_maneuvers/.gitignore new file mode 100644 index 0000000..d42ab35 --- /dev/null +++ b/selfdrive/test/longitudinal_maneuvers/.gitignore @@ -0,0 +1 @@ +out/* diff --git a/selfdrive/test/longitudinal_maneuvers/__init__.py b/selfdrive/test/longitudinal_maneuvers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/selfdrive/test/longitudinal_maneuvers/maneuver.py b/selfdrive/test/longitudinal_maneuvers/maneuver.py new file mode 100644 index 0000000..000225a --- /dev/null +++ b/selfdrive/test/longitudinal_maneuvers/maneuver.py @@ -0,0 +1,71 @@ +import numpy as np +from openpilot.selfdrive.test.longitudinal_maneuvers.plant import Plant + + +class Maneuver: + def __init__(self, title, duration, **kwargs): + # Was tempted to make a builder class + self.distance_lead = kwargs.get("initial_distance_lead", 200.0) + self.speed = kwargs.get("initial_speed", 0.0) + self.lead_relevancy = kwargs.get("lead_relevancy", 0) + + self.breakpoints = kwargs.get("breakpoints", [0.0, duration]) + self.speed_lead_values = kwargs.get("speed_lead_values", [0.0 for i in range(len(self.breakpoints))]) + self.prob_lead_values = kwargs.get("prob_lead_values", [1.0 for i in range(len(self.breakpoints))]) + self.cruise_values = kwargs.get("cruise_values", [50.0 for i in range(len(self.breakpoints))]) + + self.only_lead2 = kwargs.get("only_lead2", False) + self.only_radar = kwargs.get("only_radar", False) + self.ensure_start = kwargs.get("ensure_start", False) + self.enabled = kwargs.get("enabled", True) + self.e2e = kwargs.get("e2e", False) + self.force_decel = kwargs.get("force_decel", False) + + self.duration = duration + self.title = title + + def evaluate(self): + plant = Plant( + lead_relevancy=self.lead_relevancy, + speed=self.speed, + distance_lead=self.distance_lead, + enabled=self.enabled, + only_lead2=self.only_lead2, + only_radar=self.only_radar, + e2e=self.e2e, + force_decel=self.force_decel, + ) + + valid = True + logs = [] + while plant.current_time < self.duration: + speed_lead = np.interp(plant.current_time, self.breakpoints, self.speed_lead_values) + prob = np.interp(plant.current_time, self.breakpoints, self.prob_lead_values) + cruise = np.interp(plant.current_time, self.breakpoints, self.cruise_values) + log = plant.step(speed_lead, prob, cruise) + + d_rel = log['distance_lead'] - log['distance'] if self.lead_relevancy else 200. + v_rel = speed_lead - log['speed'] if self.lead_relevancy else 0. + log['d_rel'] = d_rel + log['v_rel'] = v_rel + logs.append(np.array([plant.current_time, + log['distance'], + log['distance_lead'], + log['speed'], + speed_lead, + log['acceleration']])) + + if d_rel < .4 and (self.only_radar or prob > 0.5): + print("Crashed!!!!") + valid = False + + if self.ensure_start and log['v_rel'] > 0 and log['speeds'][-1] <= 0.1: + print('LongitudinalPlanner not starting!') + valid = False + if self.force_decel and log['speed'] > 1e-1 and log['acceleration'] > -0.04: + print('Not stopping with force decel') + valid = False + + + print("maneuver end", valid) + return valid, np.array(logs) diff --git a/selfdrive/test/longitudinal_maneuvers/plant.py b/selfdrive/test/longitudinal_maneuvers/plant.py new file mode 100644 index 0000000..bb935fd --- /dev/null +++ b/selfdrive/test/longitudinal_maneuvers/plant.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 +import time +import numpy as np + +from cereal import log +import cereal.messaging as messaging +from openpilot.common.realtime import Ratekeeper, DT_MDL +from openpilot.selfdrive.controls.lib.longcontrol import LongCtrlState +from openpilot.selfdrive.modeld.constants import ModelConstants +from openpilot.selfdrive.controls.lib.longitudinal_planner import LongitudinalPlanner +from openpilot.selfdrive.controls.radard import _LEAD_ACCEL_TAU + + +class Plant: + messaging_initialized = False + + def __init__(self, lead_relevancy=False, speed=0.0, distance_lead=2.0, + enabled=True, only_lead2=False, only_radar=False, e2e=False, force_decel=False): + self.rate = 1. / DT_MDL + + if not Plant.messaging_initialized: + Plant.radar = messaging.pub_sock('radarState') + Plant.controls_state = messaging.pub_sock('controlsState') + Plant.car_state = messaging.pub_sock('carState') + Plant.plan = messaging.sub_sock('longitudinalPlan') + Plant.messaging_initialized = True + + self.v_lead_prev = 0.0 + + self.distance = 0. + self.speed = speed + self.acceleration = 0.0 + self.speeds = [] + + # lead car + self.lead_relevancy = lead_relevancy + self.distance_lead = distance_lead + self.enabled = enabled + self.only_lead2 = only_lead2 + self.only_radar = only_radar + self.e2e = e2e + self.force_decel = force_decel + + self.rk = Ratekeeper(self.rate, print_delay_threshold=100.0) + self.ts = 1. / self.rate + time.sleep(0.1) + self.sm = messaging.SubMaster(['longitudinalPlan']) + + from openpilot.selfdrive.car.honda.values import CAR + from openpilot.selfdrive.car.honda.interface import CarInterface + + self.planner = LongitudinalPlanner(CarInterface.get_non_essential_params(CAR.CIVIC), init_v=self.speed) + + @property + def current_time(self): + return float(self.rk.frame) / self.rate + + def step(self, v_lead=0.0, prob=1.0, v_cruise=50.): + # ******** publish a fake model going straight and fake calibration ******** + # note that this is worst case for MPC, since model will delay long mpc by one time step + radar = messaging.new_message('radarState') + control = messaging.new_message('controlsState') + car_state = messaging.new_message('carState') + model = messaging.new_message('modelV2') + a_lead = (v_lead - self.v_lead_prev)/self.ts + self.v_lead_prev = v_lead + + if self.lead_relevancy: + d_rel = np.maximum(0., self.distance_lead - self.distance) + v_rel = v_lead - self.speed + if self.only_radar: + status = True + elif prob > .5: + status = True + else: + status = False + else: + d_rel = 200. + v_rel = 0. + prob = 0.0 + status = False + + lead = log.RadarState.LeadData.new_message() + lead.dRel = float(d_rel) + lead.yRel = float(0.0) + lead.vRel = float(v_rel) + lead.aRel = float(a_lead - self.acceleration) + lead.vLead = float(v_lead) + lead.vLeadK = float(v_lead) + lead.aLeadK = float(a_lead) + # TODO use real radard logic for this + lead.aLeadTau = float(_LEAD_ACCEL_TAU) + lead.status = status + lead.modelProb = float(prob) + if not self.only_lead2: + radar.radarState.leadOne = lead + radar.radarState.leadTwo = lead + + # Simulate model predicting slightly faster speed + # this is to ensure lead policy is effective when model + # does not predict slowdown in e2e mode + position = log.XYZTData.new_message() + position.x = [float(x) for x in (self.speed + 0.5) * np.array(ModelConstants.T_IDXS)] + model.modelV2.position = position + velocity = log.XYZTData.new_message() + velocity.x = [float(x) for x in (self.speed + 0.5) * np.ones_like(ModelConstants.T_IDXS)] + model.modelV2.velocity = velocity + acceleration = log.XYZTData.new_message() + acceleration.x = [float(x) for x in np.zeros_like(ModelConstants.T_IDXS)] + model.modelV2.acceleration = acceleration + + control.controlsState.longControlState = LongCtrlState.pid if self.enabled else LongCtrlState.off + control.controlsState.vCruise = float(v_cruise * 3.6) + control.controlsState.experimentalMode = self.e2e + control.controlsState.forceDecel = self.force_decel + car_state.carState.vEgo = float(self.speed) + car_state.carState.standstill = self.speed < 0.01 + + # ******** get controlsState messages for plotting *** + sm = {'radarState': radar.radarState, + 'carState': car_state.carState, + 'controlsState': control.controlsState, + 'modelV2': model.modelV2} + self.planner.update(sm) + self.speed = self.planner.v_desired_filter.x + self.acceleration = self.planner.a_desired + self.speeds = self.planner.v_desired_trajectory.tolist() + fcw = self.planner.fcw + self.distance_lead = self.distance_lead + v_lead * self.ts + + # ******** run the car ******** + #print(self.distance, speed) + if self.speed <= 0: + self.speed = 0 + self.acceleration = 0 + self.distance = self.distance + self.speed * self.ts + + # *** radar model *** + if self.lead_relevancy: + d_rel = np.maximum(0., self.distance_lead - self.distance) + v_rel = v_lead - self.speed + else: + d_rel = 200. + v_rel = 0. + + # print at 5hz + # if (self.rk.frame % (self.rate // 5)) == 0: + # print("%2.2f sec %6.2f m %6.2f m/s %6.2f m/s2 lead_rel: %6.2f m %6.2f m/s" + # % (self.current_time, self.distance, self.speed, self.acceleration, d_rel, v_rel)) + + + # ******** update prevs ******** + self.rk.monitor_time() + + return { + "distance": self.distance, + "speed": self.speed, + "acceleration": self.acceleration, + "speeds": self.speeds, + "distance_lead": self.distance_lead, + "fcw": fcw, + } + +# simple engage in standalone mode +def plant_thread(): + plant = Plant() + while 1: + plant.step() + + +if __name__ == "__main__": + plant_thread() diff --git a/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py b/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py new file mode 100644 index 0000000..713b780 --- /dev/null +++ b/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +import itertools +import unittest +from parameterized import parameterized_class + +from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import STOP_DISTANCE +from openpilot.selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver + + +# TODO: make new FCW tests +def create_maneuvers(kwargs): + maneuvers = [ + Maneuver( + 'approach stopped car at 25m/s, initial distance: 120m', + duration=20., + initial_speed=25., + lead_relevancy=True, + initial_distance_lead=120., + speed_lead_values=[30., 0.], + breakpoints=[0., 1.], + **kwargs, + ), + Maneuver( + 'approach stopped car at 20m/s, initial distance 90m', + duration=20., + initial_speed=20., + lead_relevancy=True, + initial_distance_lead=90., + speed_lead_values=[20., 0.], + breakpoints=[0., 1.], + **kwargs, + ), + Maneuver( + 'steady state following a car at 20m/s, then lead decel to 0mph at 1m/s^2', + duration=50., + initial_speed=20., + lead_relevancy=True, + initial_distance_lead=35., + speed_lead_values=[20., 20., 0.], + breakpoints=[0., 15., 35.0], + **kwargs, + ), + Maneuver( + 'steady state following a car at 20m/s, then lead decel to 0mph at 2m/s^2', + duration=50., + initial_speed=20., + lead_relevancy=True, + initial_distance_lead=35., + speed_lead_values=[20., 20., 0.], + breakpoints=[0., 15., 25.0], + **kwargs, + ), + Maneuver( + 'steady state following a car at 20m/s, then lead decel to 0mph at 3m/s^2', + duration=50., + initial_speed=20., + lead_relevancy=True, + initial_distance_lead=35., + speed_lead_values=[20., 20., 0.], + breakpoints=[0., 15., 21.66], + **kwargs, + ), + Maneuver( + 'steady state following a car at 20m/s, then lead decel to 0mph at 3+m/s^2', + duration=40., + initial_speed=20., + lead_relevancy=True, + initial_distance_lead=35., + speed_lead_values=[20., 20., 0.], + prob_lead_values=[0., 1., 1.], + cruise_values=[20., 20., 20.], + breakpoints=[2., 2.01, 8.8], + **kwargs, + ), + Maneuver( + "approach stopped car at 20m/s, with prob_lead_values", + duration=30., + initial_speed=20., + lead_relevancy=True, + initial_distance_lead=120., + speed_lead_values=[0.0, 0., 0.], + prob_lead_values=[0.0, 0., 1.], + cruise_values=[20., 20., 20.], + breakpoints=[0.0, 2., 2.01], + **kwargs, + ), + Maneuver( + "approach slower cut-in car at 20m/s", + duration=20., + initial_speed=20., + lead_relevancy=True, + initial_distance_lead=50., + speed_lead_values=[15., 15.], + breakpoints=[1., 11.], + only_lead2=True, + **kwargs, + ), + Maneuver( + "stay stopped behind radar override lead", + duration=20., + initial_speed=0., + lead_relevancy=True, + initial_distance_lead=10., + speed_lead_values=[0., 0.], + prob_lead_values=[0., 0.], + breakpoints=[1., 11.], + only_radar=True, + **kwargs, + ), + Maneuver( + "NaN recovery", + duration=30., + initial_speed=15., + lead_relevancy=True, + initial_distance_lead=60., + speed_lead_values=[0., 0., 0.0], + breakpoints=[1., 1.01, 11.], + cruise_values=[float("nan"), 15., 15.], + **kwargs, + ), + Maneuver( + 'cruising at 25 m/s while disabled', + duration=20., + initial_speed=25., + lead_relevancy=False, + enabled=False, + **kwargs, + ), + ] + if not kwargs['force_decel']: + # controls relies on planner commanding to move for stock-ACC resume spamming + maneuvers.append(Maneuver( + "resume from a stop", + duration=20., + initial_speed=0., + lead_relevancy=True, + initial_distance_lead=STOP_DISTANCE, + speed_lead_values=[0., 0., 2.], + breakpoints=[1., 10., 15.], + ensure_start=True, + **kwargs, + )) + return maneuvers + + +@parameterized_class(("e2e", "force_decel"), itertools.product([True, False], repeat=2)) +class LongitudinalControl(unittest.TestCase): + e2e: bool + force_decel: bool + + def test_maneuver(self): + for maneuver in create_maneuvers({"e2e": self.e2e, "force_decel": self.force_decel}): + with self.subTest(title=maneuver.title, e2e=maneuver.e2e, force_decel=maneuver.force_decel): + print(maneuver.title, f'in {"e2e" if maneuver.e2e else "acc"} mode') + valid, _ = maneuver.evaluate() + self.assertTrue(valid) + + +if __name__ == "__main__": + unittest.main(failfast=True) diff --git a/selfdrive/test/loop_until_fail.sh b/selfdrive/test/loop_until_fail.sh new file mode 100644 index 0000000..b73009d --- /dev/null +++ b/selfdrive/test/loop_until_fail.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -e + +# Loop something forever until it fails, for verifying new tests + +while true; do + $@ +done diff --git a/selfdrive/test/process_replay/.gitignore b/selfdrive/test/process_replay/.gitignore new file mode 100644 index 0000000..63c37e6 --- /dev/null +++ b/selfdrive/test/process_replay/.gitignore @@ -0,0 +1,2 @@ +fakedata/ +debayer_diff.txt diff --git a/selfdrive/test/process_replay/README.md b/selfdrive/test/process_replay/README.md new file mode 100644 index 0000000..008a901 --- /dev/null +++ b/selfdrive/test/process_replay/README.md @@ -0,0 +1,126 @@ +# Process replay + +Process replay is a regression test designed to identify any changes in the output of a process. This test replays a segment through individual processes and compares the output to a known good replay. Each make is represented in the test with a segment. + +If the test fails, make sure that you didn't unintentionally change anything. If there are intentional changes, the reference logs will be updated. + +Use `test_processes.py` to run the test locally. +Use `FILEREADER_CACHE='1' test_processes.py` to cache log files. + +Currently the following processes are tested: + +* controlsd +* radard +* plannerd +* calibrationd +* dmonitoringd +* locationd +* paramsd +* ubloxd +* torqued + +### Usage +``` +Usage: test_processes.py [-h] [--whitelist-procs PROCS] [--whitelist-cars CARS] [--blacklist-procs PROCS] + [--blacklist-cars CARS] [--ignore-fields FIELDS] [--ignore-msgs MSGS] [--update-refs] [--upload-only] +Regression test to identify changes in a process's output +optional arguments: + -h, --help show this help message and exit + --whitelist-procs PROCS Whitelist given processes from the test (e.g. controlsd) + --whitelist-cars WHITELIST_CARS Whitelist given cars from the test (e.g. HONDA) + --blacklist-procs BLACKLIST_PROCS Blacklist given processes from the test (e.g. controlsd) + --blacklist-cars BLACKLIST_CARS Blacklist given cars from the test (e.g. HONDA) + --ignore-fields IGNORE_FIELDS Extra fields or msgs to ignore (e.g. carState.events) + --ignore-msgs IGNORE_MSGS Msgs to ignore (e.g. onroadEvents) + --update-refs Updates reference logs using current commit + --upload-only Skips testing processes and uploads logs from previous test run +``` + +## Forks + +openpilot forks can use this test with their own reference logs, by default `test_proccess.py` saves logs locally. + +To generate new logs: + +`./test_processes.py` + +Then, check in the new logs using git-lfs. Make sure to also update the `ref_commit` file to the current commit. + +## API + +Process replay test suite exposes programmatic APIs for simultaneously running processes or groups of processes on provided logs. + +```py +def replay_process_with_name(name: Union[str, Iterable[str]], lr: LogIterable, *args, **kwargs) -> List[capnp._DynamicStructReader]: + +def replay_process( + cfg: Union[ProcessConfig, Iterable[ProcessConfig]], lr: LogIterable, frs: Optional[Dict[str, Any]] = None, + fingerprint: Optional[str] = None, return_all_logs: bool = False, custom_params: Optional[Dict[str, Any]] = None, disable_progress: bool = False +) -> List[capnp._DynamicStructReader]: +``` + +Example usage: +```py +from openpilot.selfdrive.test.process_replay import replay_process_with_name +from openpilot.tools.lib.logreader import LogReader + +lr = LogReader(...) + +# provide a name of the process to replay +output_logs = replay_process_with_name('locationd', lr) + +# or list of names +output_logs = replay_process_with_name(['ubloxd', 'locationd'], lr) +``` + +Supported processes: +* controlsd +* radard +* plannerd +* calibrationd +* dmonitoringd +* locationd +* paramsd +* ubloxd +* torqued +* modeld +* dmonitoringmodeld + +Certain processes may require an initial state, which is usually supplied within `Params` and persisting from segment to segment (e.g CalibrationParams, LiveParameters). The `custom_params` is dictionary used to prepopulate `Params` with arbitrary values. The `get_custom_params_from_lr` helper is provided to fetch meaningful values from log files. + +```py +from openpilot.selfdrive.test.process_replay import get_custom_params_from_lr + +previous_segment_lr = LogReader(...) +current_segment_lr = LogReader(...) + +custom_params = get_custom_params_from_lr(previous_segment_lr, 'last') + +output_logs = replay_process_with_name('calibrationd', lr, custom_params=custom_params) +``` + +Replaying processes that use VisionIPC (e.g. modeld, dmonitoringmodeld) require additional `frs` dictionary with camera states as keys and `FrameReader` objects as values. + +```py +from openpilot.tools.lib.framereader import FrameReader + +frs = { + 'roadCameraState': FrameReader(...), + 'wideRoadCameraState': FrameReader(...), + 'driverCameraState': FrameReader(...), +} + +output_logs = replay_process_with_name(['modeld', 'dmonitoringmodeld'], lr, frs=frs) +``` + +To capture stdout/stderr of the replayed process, `captured_output_store` can be provided. + +```py +output_store = dict() +# pass dictionary by reference, it will be filled with standard outputs - even if process replay fails +output_logs = replay_process_with_name(['radard', 'plannerd'], lr, captured_output_store=output_store) + +# entries with captured output in format { 'out': '...', 'err': '...' } will be added to provided dictionary for each replayed process +print(output_store['radard']['out']) # radard stdout +print(output_store['radard']['err']) # radard stderr +``` diff --git a/selfdrive/test/process_replay/__init__.py b/selfdrive/test/process_replay/__init__.py new file mode 100644 index 0000000..b994277 --- /dev/null +++ b/selfdrive/test/process_replay/__init__.py @@ -0,0 +1,2 @@ +from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS, get_process_config, get_custom_params_from_lr, \ + replay_process, replay_process_with_name # noqa: F401 diff --git a/selfdrive/test/process_replay/capture.py b/selfdrive/test/process_replay/capture.py new file mode 100644 index 0000000..90c279e --- /dev/null +++ b/selfdrive/test/process_replay/capture.py @@ -0,0 +1,59 @@ +import os +import sys + +from typing import no_type_check + +class FdRedirect: + def __init__(self, file_prefix: str, fd: int): + fname = os.path.join("/tmp", f"{file_prefix}.{fd}") + if os.path.exists(fname): + os.unlink(fname) + self.dest_fd = os.open(fname, os.O_WRONLY | os.O_CREAT) + self.dest_fname = fname + self.source_fd = fd + os.set_inheritable(self.dest_fd, True) + + def __del__(self): + os.close(self.dest_fd) + + def purge(self) -> None: + os.unlink(self.dest_fname) + + def read(self) -> bytes: + with open(self.dest_fname, "rb") as f: + return f.read() or b"" + + def link(self) -> None: + os.dup2(self.dest_fd, self.source_fd) + + +class ProcessOutputCapture: + def __init__(self, proc_name: str, prefix: str): + prefix = f"{proc_name}_{prefix}" + self.stdout_redirect = FdRedirect(prefix, 1) + self.stderr_redirect = FdRedirect(prefix, 2) + + def __del__(self): + self.stdout_redirect.purge() + self.stderr_redirect.purge() + + @no_type_check # ipython classes have incompatible signatures + def link_with_current_proc(self) -> None: + try: + # prevent ipykernel from redirecting stdout/stderr of python subprocesses + from ipykernel.iostream import OutStream + if isinstance(sys.stdout, OutStream): + sys.stdout = sys.__stdout__ + if isinstance(sys.stderr, OutStream): + sys.stderr = sys.__stderr__ + except ImportError: + pass + + # link stdout/stderr to the fifo + self.stdout_redirect.link() + self.stderr_redirect.link() + + def read_outerr(self) -> tuple[str, str]: + out_str = self.stdout_redirect.read().decode() + err_str = self.stderr_redirect.read().decode() + return out_str, err_str diff --git a/selfdrive/test/process_replay/compare_logs.py b/selfdrive/test/process_replay/compare_logs.py new file mode 100644 index 0000000..673f3b4 --- /dev/null +++ b/selfdrive/test/process_replay/compare_logs.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 +import sys +import math +import capnp +import numbers +import dictdiffer +from collections import Counter + +from openpilot.tools.lib.logreader import LogReader + +EPSILON = sys.float_info.epsilon + + +def remove_ignored_fields(msg, ignore): + msg = msg.as_builder() + for key in ignore: + attr = msg + keys = key.split(".") + if msg.which() != keys[0] and len(keys) > 1: + continue + + for k in keys[:-1]: + # indexing into list + if k.isdigit(): + attr = attr[int(k)] + else: + attr = getattr(attr, k) + + v = getattr(attr, keys[-1]) + if isinstance(v, bool): + val = False + elif isinstance(v, numbers.Number): + val = 0 + elif isinstance(v, (list, capnp.lib.capnp._DynamicListBuilder)): + val = [] + else: + raise NotImplementedError(f"Unknown type: {type(v)}") + setattr(attr, keys[-1], val) + return msg + + +def compare_logs(log1, log2, ignore_fields=None, ignore_msgs=None, tolerance=None,): + if ignore_fields is None: + ignore_fields = [] + if ignore_msgs is None: + ignore_msgs = [] + tolerance = EPSILON if tolerance is None else tolerance + + log1, log2 = ( + [m for m in log if m.which() not in ignore_msgs] + for log in (log1, log2) + ) + + if len(log1) != len(log2): + cnt1 = Counter(m.which() for m in log1) + cnt2 = Counter(m.which() for m in log2) + raise Exception(f"logs are not same length: {len(log1)} VS {len(log2)}\n\t\t{cnt1}\n\t\t{cnt2}") + + diff = [] + for msg1, msg2 in zip(log1, log2, strict=True): + if msg1.which() != msg2.which(): + raise Exception("msgs not aligned between logs") + + msg1 = remove_ignored_fields(msg1, ignore_fields) + msg2 = remove_ignored_fields(msg2, ignore_fields) + + if msg1.to_bytes() != msg2.to_bytes(): + msg1_dict = msg1.as_reader().to_dict(verbose=True) + msg2_dict = msg2.as_reader().to_dict(verbose=True) + + dd = dictdiffer.diff(msg1_dict, msg2_dict, ignore=ignore_fields) + + # Dictdiffer only supports relative tolerance, we also want to check for absolute + # TODO: add this to dictdiffer + def outside_tolerance(diff): + try: + if diff[0] == "change": + a, b = diff[2] + finite = math.isfinite(a) and math.isfinite(b) + if finite and isinstance(a, numbers.Number) and isinstance(b, numbers.Number): + return abs(a - b) > max(tolerance, tolerance * max(abs(a), abs(b))) + except TypeError: + pass + return True + + dd = list(filter(outside_tolerance, dd)) + + diff.extend(dd) + return diff + + +def format_process_diff(diff): + diff_short, diff_long = "", "" + + if isinstance(diff, str): + diff_short += f" {diff}\n" + diff_long += f"\t{diff}\n" + else: + cnt: dict[str, int] = {} + for d in diff: + diff_long += f"\t{str(d)}\n" + + k = str(d[1]) + cnt[k] = 1 if k not in cnt else cnt[k] + 1 + + for k, v in sorted(cnt.items()): + diff_short += f" {k}: {v}\n" + + return diff_short, diff_long + + +def format_diff(results, log_paths, ref_commit): + diff_short, diff_long = "", "" + diff_long += f"***** tested against commit {ref_commit} *****\n" + + failed = False + for segment, result in list(results.items()): + diff_short += f"***** results for segment {segment} *****\n" + diff_long += f"***** differences for segment {segment} *****\n" + + for proc, diff in list(result.items()): + diff_long += f"*** process: {proc} ***\n" + diff_long += f"\tref: {log_paths[segment][proc]['ref']}\n" + diff_long += f"\tnew: {log_paths[segment][proc]['new']}\n\n" + + diff_short += f" {proc}\n" + + if isinstance(diff, str) or len(diff): + diff_short += f" ref: {log_paths[segment][proc]['ref']}\n" + diff_short += f" new: {log_paths[segment][proc]['new']}\n\n" + failed = True + + proc_diff_short, proc_diff_long = format_process_diff(diff) + + diff_long += proc_diff_long + diff_short += proc_diff_short + + return diff_short, diff_long, failed + + +if __name__ == "__main__": + log1 = list(LogReader(sys.argv[1])) + log2 = list(LogReader(sys.argv[2])) + ignore_fields = sys.argv[3:] or ["logMonoTime", "controlsState.startMonoTime", "controlsState.cumLagMs"] + results = {"segment": {"proc": compare_logs(log1, log2, ignore_fields)}} + log_paths = {"segment": {"proc": {"ref": sys.argv[1], "new": sys.argv[2]}}} + diff_short, diff_long, failed = format_diff(results, log_paths, None) + + print(diff_long) + print(diff_short) diff --git a/selfdrive/test/process_replay/debayer_replay_ref_commit b/selfdrive/test/process_replay/debayer_replay_ref_commit new file mode 100644 index 0000000..551fc68 --- /dev/null +++ b/selfdrive/test/process_replay/debayer_replay_ref_commit @@ -0,0 +1 @@ +8f9ba7540b4549b4a57312129b8ff678d045f70f \ No newline at end of file diff --git a/selfdrive/test/process_replay/migration.py b/selfdrive/test/process_replay/migration.py new file mode 100644 index 0000000..ef74314 --- /dev/null +++ b/selfdrive/test/process_replay/migration.py @@ -0,0 +1,203 @@ +from collections import defaultdict + +from cereal import messaging +from openpilot.selfdrive.test.process_replay.vision_meta import meta_from_encode_index +from openpilot.selfdrive.car.toyota.values import EPS_SCALE +from openpilot.selfdrive.manager.process_config import managed_processes +from panda import Panda + + +def migrate_all(lr, old_logtime=False, manager_states=False, panda_states=False, camera_states=False): + msgs = migrate_sensorEvents(lr, old_logtime) + msgs = migrate_carParams(msgs, old_logtime) + if manager_states: + msgs = migrate_managerState(msgs) + if panda_states: + msgs = migrate_pandaStates(msgs) + msgs = migrate_peripheralState(msgs) + if camera_states: + msgs = migrate_cameraStates(msgs) + + return msgs + + +def migrate_managerState(lr): + all_msgs = [] + for msg in lr: + if msg.which() != "managerState": + all_msgs.append(msg) + continue + + new_msg = msg.as_builder() + new_msg.managerState.processes = [{'name': name, 'running': True} for name in managed_processes] + all_msgs.append(new_msg.as_reader()) + + return all_msgs + + +def migrate_pandaStates(lr): + all_msgs = [] + # TODO: safety param migration should be handled automatically + safety_param_migration = { + "TOYOTA PRIUS 2017": EPS_SCALE["TOYOTA PRIUS 2017"] | Panda.FLAG_TOYOTA_STOCK_LONGITUDINAL, + "TOYOTA RAV4 2017": EPS_SCALE["TOYOTA RAV4 2017"] | Panda.FLAG_TOYOTA_ALT_BRAKE | Panda.FLAG_TOYOTA_GAS_INTERCEPTOR, + "KIA EV6 2022": Panda.FLAG_HYUNDAI_EV_GAS | Panda.FLAG_HYUNDAI_CANFD_HDA2, + } + + # Migrate safety param base on carState + CP = next((m.carParams for m in lr if m.which() == 'carParams'), None) + assert CP is not None, "carParams message not found" + if CP.carFingerprint in safety_param_migration: + safety_param = safety_param_migration[CP.carFingerprint] + elif len(CP.safetyConfigs): + safety_param = CP.safetyConfigs[0].safetyParam + if CP.safetyConfigs[0].safetyParamDEPRECATED != 0: + safety_param = CP.safetyConfigs[0].safetyParamDEPRECATED + else: + safety_param = CP.safetyParamDEPRECATED + + for msg in lr: + if msg.which() == 'pandaStateDEPRECATED': + new_msg = messaging.new_message('pandaStates', 1) + new_msg.valid = msg.valid + new_msg.logMonoTime = msg.logMonoTime + new_msg.pandaStates[0] = msg.pandaStateDEPRECATED + new_msg.pandaStates[0].safetyParam = safety_param + all_msgs.append(new_msg.as_reader()) + elif msg.which() == 'pandaStates': + new_msg = msg.as_builder() + new_msg.pandaStates[-1].safetyParam = safety_param + all_msgs.append(new_msg.as_reader()) + else: + all_msgs.append(msg) + + return all_msgs + + +def migrate_peripheralState(lr): + if any(msg.which() == "peripheralState" for msg in lr): + return lr + + all_msg = [] + for msg in lr: + all_msg.append(msg) + if msg.which() not in ["pandaStates", "pandaStateDEPRECATED"]: + continue + + new_msg = messaging.new_message("peripheralState") + new_msg.valid = msg.valid + new_msg.logMonoTime = msg.logMonoTime + all_msg.append(new_msg.as_reader()) + + return all_msg + + +def migrate_cameraStates(lr): + all_msgs = [] + frame_to_encode_id = defaultdict(dict) + # just for encodeId fallback mechanism + min_frame_id = defaultdict(lambda: float('inf')) + + for msg in lr: + if msg.which() not in ["roadEncodeIdx", "wideRoadEncodeIdx", "driverEncodeIdx"]: + continue + + encode_index = getattr(msg, msg.which()) + meta = meta_from_encode_index(msg.which()) + + assert encode_index.segmentId < 1200, f"Encoder index segmentId greater that 1200: {msg.which()} {encode_index.segmentId}" + frame_to_encode_id[meta.camera_state][encode_index.frameId] = encode_index.segmentId + + for msg in lr: + if msg.which() not in ["roadCameraState", "wideRoadCameraState", "driverCameraState"]: + all_msgs.append(msg) + continue + + camera_state = getattr(msg, msg.which()) + min_frame_id[msg.which()] = min(min_frame_id[msg.which()], camera_state.frameId) + + encode_id = frame_to_encode_id[msg.which()].get(camera_state.frameId) + if encode_id is None: + print(f"Missing encoded frame for camera feed {msg.which()} with frameId: {camera_state.frameId}") + if len(frame_to_encode_id[msg.which()]) != 0: + continue + + # fallback mechanism for logs without encodeIdx (e.g. logs from before 2022 with dcamera recording disabled) + # try to fake encode_id by subtracting lowest frameId + encode_id = camera_state.frameId - min_frame_id[msg.which()] + print(f"Faking encodeId to {encode_id} for camera feed {msg.which()} with frameId: {camera_state.frameId}") + + new_msg = messaging.new_message(msg.which()) + new_camera_state = getattr(new_msg, new_msg.which()) + new_camera_state.frameId = encode_id + new_camera_state.encodeId = encode_id + # timestampSof was added later so it might be missing on some old segments + if camera_state.timestampSof == 0 and camera_state.timestampEof > 25000000: + new_camera_state.timestampSof = camera_state.timestampEof - 18000000 + else: + new_camera_state.timestampSof = camera_state.timestampSof + new_camera_state.timestampEof = camera_state.timestampEof + new_msg.logMonoTime = msg.logMonoTime + new_msg.valid = msg.valid + + all_msgs.append(new_msg.as_reader()) + + return all_msgs + + +def migrate_carParams(lr, old_logtime=False): + all_msgs = [] + for msg in lr: + if msg.which() == 'carParams': + CP = messaging.new_message('carParams') + CP.valid = True + CP.carParams = msg.carParams.as_builder() + for car_fw in CP.carParams.carFw: + car_fw.brand = CP.carParams.carName + if old_logtime: + CP.logMonoTime = msg.logMonoTime + msg = CP.as_reader() + all_msgs.append(msg) + + return all_msgs + + +def migrate_sensorEvents(lr, old_logtime=False): + all_msgs = [] + for msg in lr: + if msg.which() != 'sensorEventsDEPRECATED': + all_msgs.append(msg) + continue + + # migrate to split sensor events + for evt in msg.sensorEventsDEPRECATED: + # build new message for each sensor type + sensor_service = '' + if evt.which() == 'acceleration': + sensor_service = 'accelerometer' + elif evt.which() == 'gyro' or evt.which() == 'gyroUncalibrated': + sensor_service = 'gyroscope' + elif evt.which() == 'light' or evt.which() == 'proximity': + sensor_service = 'lightSensor' + elif evt.which() == 'magnetic' or evt.which() == 'magneticUncalibrated': + sensor_service = 'magnetometer' + elif evt.which() == 'temperature': + sensor_service = 'temperatureSensor' + + m = messaging.new_message(sensor_service) + m.valid = True + if old_logtime: + m.logMonoTime = msg.logMonoTime + + m_dat = getattr(m, sensor_service) + m_dat.version = evt.version + m_dat.sensor = evt.sensor + m_dat.type = evt.type + m_dat.source = evt.source + if old_logtime: + m_dat.timestamp = evt.timestamp + setattr(m_dat, evt.which(), getattr(evt, evt.which())) + + all_msgs.append(m.as_reader()) + + return all_msgs diff --git a/selfdrive/test/process_replay/model_replay.py b/selfdrive/test/process_replay/model_replay.py new file mode 100644 index 0000000..97b7c7c --- /dev/null +++ b/selfdrive/test/process_replay/model_replay.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 +import os +import sys +import time +from collections import defaultdict +from typing import Any + +import cereal.messaging as messaging +from openpilot.common.params import Params +from openpilot.system.hardware import PC +from openpilot.selfdrive.manager.process_config import managed_processes +from openpilot.tools.lib.openpilotci import BASE_URL, get_url +from openpilot.selfdrive.test.process_replay.compare_logs import compare_logs, format_diff +from openpilot.selfdrive.test.process_replay.process_replay import get_process_config, replay_process +from openpilot.system.version import get_commit +from openpilot.tools.lib.framereader import FrameReader +from openpilot.tools.lib.logreader import LogReader +from openpilot.tools.lib.helpers import save_log + +TEST_ROUTE = "2f4452b03ccb98f0|2022-12-03--13-45-30" +SEGMENT = 6 +MAX_FRAMES = 100 if PC else 600 +NAV_FRAMES = 50 + +NO_NAV = "NO_NAV" in os.environ +NO_MODEL = "NO_MODEL" in os.environ +SEND_EXTRA_INPUTS = bool(int(os.getenv("SEND_EXTRA_INPUTS", "0"))) + + +def get_log_fn(ref_commit, test_route): + return f"{test_route}_model_tici_{ref_commit}.bz2" + + +def trim_logs_to_max_frames(logs, max_frames, frs_types, include_all_types): + all_msgs = [] + cam_state_counts = defaultdict(int) + # keep adding messages until cam states are equal to MAX_FRAMES + for msg in sorted(logs, key=lambda m: m.logMonoTime): + all_msgs.append(msg) + if msg.which() in frs_types: + cam_state_counts[msg.which()] += 1 + + if all(cam_state_counts[state] == max_frames for state in frs_types): + break + + if len(include_all_types) != 0: + other_msgs = [m for m in logs if m.which() in include_all_types] + all_msgs.extend(other_msgs) + + return all_msgs + + +def nav_model_replay(lr): + sm = messaging.SubMaster(['navModel', 'navThumbnail', 'mapRenderState']) + pm = messaging.PubMaster(['liveLocationKalman', 'navRoute']) + + nav = [m for m in lr if m.which() == 'navRoute'] + llk = [m for m in lr if m.which() == 'liveLocationKalman'] + assert len(nav) > 0 and len(llk) >= NAV_FRAMES and nav[0].logMonoTime < llk[-NAV_FRAMES].logMonoTime + + log_msgs = [] + try: + assert "MAPBOX_TOKEN" in os.environ + os.environ['MAP_RENDER_TEST_MODE'] = '1' + Params().put_bool('DmModelInitialized', True) + managed_processes['mapsd'].start() + managed_processes['navmodeld'].start() + + # setup position and route + for _ in range(10): + for s in (llk[-NAV_FRAMES], nav[0]): + pm.send(s.which(), s.as_builder().to_bytes()) + sm.update(1000) + if sm.updated['navModel']: + break + time.sleep(1) + + if not sm.updated['navModel']: + raise Exception("no navmodeld outputs, failed to initialize") + + # drain + time.sleep(2) + sm.update(0) + + # run replay + for n in range(len(llk) - NAV_FRAMES, len(llk)): + pm.send(llk[n].which(), llk[n].as_builder().to_bytes()) + m = messaging.recv_one(sm.sock['navThumbnail']) + assert m is not None, f"no navThumbnail, frame={n}" + log_msgs.append(m) + + m = messaging.recv_one(sm.sock['mapRenderState']) + assert m is not None, f"no mapRenderState, frame={n}" + log_msgs.append(m) + + m = messaging.recv_one(sm.sock['navModel']) + assert m is not None, f"no navModel response, frame={n}" + log_msgs.append(m) + finally: + managed_processes['mapsd'].stop() + managed_processes['navmodeld'].stop() + + return log_msgs + + +def model_replay(lr, frs): + # modeld is using frame pairs + modeld_logs = trim_logs_to_max_frames(lr, MAX_FRAMES, {"roadCameraState", "wideRoadCameraState"}, {"roadEncodeIdx", "wideRoadEncodeIdx", "carParams"}) + dmodeld_logs = trim_logs_to_max_frames(lr, MAX_FRAMES, {"driverCameraState"}, {"driverEncodeIdx", "carParams"}) + if not SEND_EXTRA_INPUTS: + modeld_logs = [msg for msg in modeld_logs if msg.which() not in ["liveCalibration",]] + dmodeld_logs = [msg for msg in dmodeld_logs if msg.which() not in ["liveCalibration",]] + # initial calibration + cal_msg = next(msg for msg in lr if msg.which() == "liveCalibration").as_builder() + cal_msg.logMonoTime = lr[0].logMonoTime + modeld_logs.insert(0, cal_msg.as_reader()) + dmodeld_logs.insert(0, cal_msg.as_reader()) + + modeld = get_process_config("modeld") + dmonitoringmodeld = get_process_config("dmonitoringmodeld") + + modeld_msgs = replay_process(modeld, modeld_logs, frs) + dmonitoringmodeld_msgs = replay_process(dmonitoringmodeld, dmodeld_logs, frs) + return modeld_msgs + dmonitoringmodeld_msgs + + +if __name__ == "__main__": + update = "--update" in sys.argv + replay_dir = os.path.dirname(os.path.abspath(__file__)) + ref_commit_fn = os.path.join(replay_dir, "model_replay_ref_commit") + + # load logs + lr = list(LogReader(get_url(TEST_ROUTE, SEGMENT))) + frs = { + 'roadCameraState': FrameReader(get_url(TEST_ROUTE, SEGMENT, log_type="fcamera"), readahead=True), + 'driverCameraState': FrameReader(get_url(TEST_ROUTE, SEGMENT, log_type="dcamera"), readahead=True), + 'wideRoadCameraState': FrameReader(get_url(TEST_ROUTE, SEGMENT, log_type="ecamera"), readahead=True) + } + + # Update tile refs + if update: + import urllib + import requests + import threading + import http.server + from openpilot.tools.lib.openpilotci import upload_bytes + os.environ['MAPS_HOST'] = 'http://localhost:5000' + + class HTTPRequestHandler(http.server.BaseHTTPRequestHandler): + def do_GET(self): + assert len(self.path) > 10 # Sanity check on path length + r = requests.get(f'https://api.mapbox.com{self.path}', timeout=30) + upload_bytes(r.content, urllib.parse.urlparse(self.path).path.lstrip('/')) + self.send_response(r.status_code) + self.send_header('Content-type','text/html') + self.end_headers() + self.wfile.write(r.content) + + server = http.server.HTTPServer(("127.0.0.1", 5000), HTTPRequestHandler) + thread = threading.Thread(None, server.serve_forever, daemon=True) + thread.start() + else: + os.environ['MAPS_HOST'] = BASE_URL.rstrip('/') + + log_msgs = [] + # run replays + if not NO_MODEL: + log_msgs += model_replay(lr, frs) + if not NO_NAV: + log_msgs += nav_model_replay(lr) + + # get diff + failed = False + if not update: + with open(ref_commit_fn) as f: + ref_commit = f.read().strip() + log_fn = get_log_fn(ref_commit, TEST_ROUTE) + try: + all_logs = list(LogReader(BASE_URL + log_fn)) + cmp_log = [] + + # logs are ordered based on type: modelV2, driverStateV2, nav messages (navThumbnail, mapRenderState, navModel) + if not NO_MODEL: + model_start_index = next(i for i, m in enumerate(all_logs) if m.which() in ("modelV2", "cameraOdometry")) + cmp_log += all_logs[model_start_index:model_start_index + MAX_FRAMES*2] + dmon_start_index = next(i for i, m in enumerate(all_logs) if m.which() == "driverStateV2") + cmp_log += all_logs[dmon_start_index:dmon_start_index + MAX_FRAMES] + if not NO_NAV: + nav_start_index = next(i for i, m in enumerate(all_logs) if m.which() in ["navThumbnail", "mapRenderState", "navModel"]) + nav_logs = all_logs[nav_start_index:nav_start_index + NAV_FRAMES*3] + cmp_log += nav_logs + + ignore = [ + 'logMonoTime', + 'modelV2.frameDropPerc', + 'modelV2.modelExecutionTime', + 'driverStateV2.modelExecutionTime', + 'driverStateV2.dspExecutionTime', + 'navModel.dspExecutionTime', + 'navModel.modelExecutionTime', + 'navThumbnail.timestampEof', + 'mapRenderState.locationMonoTime', + 'mapRenderState.renderTime', + ] + if PC: + ignore += [ + 'modelV2.laneLines.0.t', + 'modelV2.laneLines.1.t', + 'modelV2.laneLines.2.t', + 'modelV2.laneLines.3.t', + 'modelV2.roadEdges.0.t', + 'modelV2.roadEdges.1.t', + ] + # TODO this tolerance is absurdly large + tolerance = 2.0 if PC else None + results: Any = {TEST_ROUTE: {}} + log_paths: Any = {TEST_ROUTE: {"models": {'ref': BASE_URL + log_fn, 'new': log_fn}}} + results[TEST_ROUTE]["models"] = compare_logs(cmp_log, log_msgs, tolerance=tolerance, ignore_fields=ignore) + diff_short, diff_long, failed = format_diff(results, log_paths, ref_commit) + + print(diff_long) + print('-------------\n'*5) + print(diff_short) + with open("model_diff.txt", "w") as f: + f.write(diff_long) + except Exception as e: + print(str(e)) + failed = True + + # upload new refs + if (update or failed) and not PC: + from openpilot.tools.lib.openpilotci import upload_file + + print("Uploading new refs") + + new_commit = get_commit() + log_fn = get_log_fn(new_commit, TEST_ROUTE) + save_log(log_fn, log_msgs) + try: + upload_file(log_fn, os.path.basename(log_fn)) + except Exception as e: + print("failed to upload", e) + + with open(ref_commit_fn, 'w') as f: + f.write(str(new_commit)) + + print("\n\nNew ref commit: ", new_commit) + + sys.exit(int(failed)) diff --git a/selfdrive/test/process_replay/model_replay_ref_commit b/selfdrive/test/process_replay/model_replay_ref_commit new file mode 100644 index 0000000..786c2f2 --- /dev/null +++ b/selfdrive/test/process_replay/model_replay_ref_commit @@ -0,0 +1 @@ +e8b359a82316e6dfce3b6fb0fb9684431bfa0a1b diff --git a/selfdrive/test/process_replay/process_replay.py b/selfdrive/test/process_replay/process_replay.py new file mode 100644 index 0000000..5119be0 --- /dev/null +++ b/selfdrive/test/process_replay/process_replay.py @@ -0,0 +1,800 @@ +#!/usr/bin/env python3 +import os +import time +import copy +import json +import heapq +import signal +import platform +from collections import OrderedDict +from dataclasses import dataclass, field +from typing import Any +from collections.abc import Callable, Iterable +from tqdm import tqdm +import capnp + +import cereal.messaging as messaging +from cereal import car +from cereal.services import SERVICE_LIST +from cereal.visionipc import VisionIpcServer, get_endpoint_name as vipc_get_endpoint_name +from openpilot.common.params import Params +from openpilot.common.prefix import OpenpilotPrefix +from openpilot.common.timeout import Timeout +from openpilot.common.realtime import DT_CTRL +from panda.python import ALTERNATIVE_EXPERIENCE +from openpilot.selfdrive.car.car_helpers import get_car, interfaces +from openpilot.selfdrive.manager.process_config import managed_processes +from openpilot.selfdrive.test.process_replay.vision_meta import meta_from_camera_state, available_streams +from openpilot.selfdrive.test.process_replay.migration import migrate_all +from openpilot.selfdrive.test.process_replay.capture import ProcessOutputCapture +from openpilot.tools.lib.logreader import LogIterable +from openpilot.tools.lib.framereader import BaseFrameReader + +# Numpy gives different results based on CPU features after version 19 +NUMPY_TOLERANCE = 1e-7 +PROC_REPLAY_DIR = os.path.dirname(os.path.abspath(__file__)) +FAKEDATA = os.path.join(PROC_REPLAY_DIR, "fakedata/") + +class DummySocket: + def __init__(self): + self.data: list[bytes] = [] + + def receive(self, non_blocking: bool = False) -> bytes | None: + if non_blocking: + return None + + return self.data.pop() + + def send(self, data: bytes): + self.data.append(data) + +class LauncherWithCapture: + def __init__(self, capture: ProcessOutputCapture, launcher: Callable): + self.capture = capture + self.launcher = launcher + + def __call__(self, *args, **kwargs): + self.capture.link_with_current_proc() + self.launcher(*args, **kwargs) + + +class ReplayContext: + def __init__(self, cfg): + self.proc_name = cfg.proc_name + self.pubs = cfg.pubs + self.main_pub = cfg.main_pub + self.main_pub_drained = cfg.main_pub_drained + self.unlocked_pubs = cfg.unlocked_pubs + assert(len(self.pubs) != 0 or self.main_pub is not None) + + def __enter__(self): + self.open_context() + + return self + + def __exit__(self, exc_type, exc_obj, exc_tb): + self.close_context() + + def open_context(self): + messaging.toggle_fake_events(True) + messaging.set_fake_prefix(self.proc_name) + + if self.main_pub is None: + self.events = OrderedDict() + pubs_with_events = [pub for pub in self.pubs if pub not in self.unlocked_pubs] + for pub in pubs_with_events: + self.events[pub] = messaging.fake_event_handle(pub, enable=True) + else: + self.events = {self.main_pub: messaging.fake_event_handle(self.main_pub, enable=True)} + + def close_context(self): + del self.events + + messaging.toggle_fake_events(False) + messaging.delete_fake_prefix() + + @property + def all_recv_called_events(self): + return [man.recv_called_event for man in self.events.values()] + + @property + def all_recv_ready_events(self): + return [man.recv_ready_event for man in self.events.values()] + + def send_sync(self, pm, endpoint, dat): + self.events[endpoint].recv_called_event.wait() + self.events[endpoint].recv_called_event.clear() + pm.send(endpoint, dat) + self.events[endpoint].recv_ready_event.set() + + def unlock_sockets(self): + expected_sets = len(self.events) + while expected_sets > 0: + index = messaging.wait_for_one_event(self.all_recv_called_events) + self.all_recv_called_events[index].clear() + self.all_recv_ready_events[index].set() + expected_sets -= 1 + + def wait_for_recv_called(self): + messaging.wait_for_one_event(self.all_recv_called_events) + + def wait_for_next_recv(self, trigger_empty_recv): + index = messaging.wait_for_one_event(self.all_recv_called_events) + if self.main_pub is not None and self.main_pub_drained and trigger_empty_recv: + self.all_recv_called_events[index].clear() + self.all_recv_ready_events[index].set() + self.all_recv_called_events[index].wait() + + +@dataclass +class ProcessConfig: + proc_name: str + pubs: list[str] + subs: list[str] + ignore: list[str] + config_callback: Callable | None = None + init_callback: Callable | None = None + should_recv_callback: Callable | None = None + tolerance: float | None = None + processing_time: float = 0.001 + timeout: int = 30 + simulation: bool = True + main_pub: str | None = None + main_pub_drained: bool = True + vision_pubs: list[str] = field(default_factory=list) + ignore_alive_pubs: list[str] = field(default_factory=list) + unlocked_pubs: list[str] = field(default_factory=list) + + +class ProcessContainer: + def __init__(self, cfg: ProcessConfig): + self.prefix = OpenpilotPrefix(clean_dirs_on_exit=False) + self.cfg = copy.deepcopy(cfg) + self.process = copy.deepcopy(managed_processes[cfg.proc_name]) + self.msg_queue: list[capnp._DynamicStructReader] = [] + self.cnt = 0 + self.pm: messaging.PubMaster | None = None + self.sockets: list[messaging.SubSocket] | None = None + self.rc: ReplayContext | None = None + self.vipc_server: VisionIpcServer | None = None + self.environ_config: dict[str, Any] | None = None + self.capture: ProcessOutputCapture | None = None + + @property + def has_empty_queue(self) -> bool: + return len(self.msg_queue) == 0 + + @property + def pubs(self) -> list[str]: + return self.cfg.pubs + + @property + def subs(self) -> list[str]: + return self.cfg.subs + + def _clean_env(self): + for k in self.environ_config.keys(): + if k in os.environ: + del os.environ[k] + + for k in ["PROC_NAME", "SIMULATION"]: + if k in os.environ: + del os.environ[k] + + def _setup_env(self, params_config: dict[str, Any], environ_config: dict[str, Any]): + for k, v in environ_config.items(): + if len(v) != 0: + os.environ[k] = v + elif k in os.environ: + del os.environ[k] + + os.environ["PROC_NAME"] = self.cfg.proc_name + if self.cfg.simulation: + os.environ["SIMULATION"] = "1" + elif "SIMULATION" in os.environ: + del os.environ["SIMULATION"] + + params = Params() + for k, v in params_config.items(): + if isinstance(v, bool): + params.put_bool(k, v) + else: + params.put(k, v) + + self.environ_config = environ_config + + def _setup_vision_ipc(self, all_msgs: LogIterable, frs: dict[str, Any]): + assert len(self.cfg.vision_pubs) != 0 + + vipc_server = VisionIpcServer("camerad") + streams_metas = available_streams(all_msgs) + for meta in streams_metas: + if meta.camera_state in self.cfg.vision_pubs: + frame_size = (frs[meta.camera_state].w, frs[meta.camera_state].h) + vipc_server.create_buffers(meta.stream, 2, False, *frame_size) + vipc_server.start_listener() + + self.vipc_server = vipc_server + self.cfg.vision_pubs = [meta.camera_state for meta in streams_metas if meta.camera_state in self.cfg.vision_pubs] + + def _start_process(self): + if self.capture is not None: + self.process.launcher = LauncherWithCapture(self.capture, self.process.launcher) + self.process.prepare() + self.process.start() + + def start( + self, params_config: dict[str, Any], environ_config: dict[str, Any], + all_msgs: LogIterable, frs: dict[str, BaseFrameReader] | None, + fingerprint: str | None, capture_output: bool + ): + with self.prefix as p: + self._setup_env(params_config, environ_config) + + if self.cfg.config_callback is not None: + params = Params() + self.cfg.config_callback(params, self.cfg, all_msgs) + + self.rc = ReplayContext(self.cfg) + self.rc.open_context() + + self.pm = messaging.PubMaster(self.cfg.pubs) + self.sockets = [messaging.sub_sock(s, timeout=100) for s in self.cfg.subs] + + if len(self.cfg.vision_pubs) != 0: + assert frs is not None + self._setup_vision_ipc(all_msgs, frs) + assert self.vipc_server is not None + + if capture_output: + self.capture = ProcessOutputCapture(self.cfg.proc_name, p.prefix) + + self._start_process() + + if self.cfg.init_callback is not None: + self.cfg.init_callback(self.rc, self.pm, all_msgs, fingerprint) + + # wait for process to startup + with Timeout(10, error_msg=f"timed out waiting for process to start: {repr(self.cfg.proc_name)}"): + while not all(self.pm.all_readers_updated(s) for s in self.cfg.pubs if s not in self.cfg.ignore_alive_pubs): + time.sleep(0) + + def stop(self): + with self.prefix: + self.process.signal(signal.SIGKILL) + self.process.stop() + self.rc.close_context() + self.prefix.clean_dirs() + self._clean_env() + + def run_step(self, msg: capnp._DynamicStructReader, frs: dict[str, BaseFrameReader] | None) -> list[capnp._DynamicStructReader]: + assert self.rc and self.pm and self.sockets and self.process.proc + + output_msgs = [] + with self.prefix, Timeout(self.cfg.timeout, error_msg=f"timed out testing process {repr(self.cfg.proc_name)}"): + end_of_cycle = True + if self.cfg.should_recv_callback is not None: + end_of_cycle = self.cfg.should_recv_callback(msg, self.cfg, self.cnt) + + self.msg_queue.append(msg) + if end_of_cycle: + self.rc.wait_for_recv_called() + + # call recv to let sub-sockets reconnect, after we know the process is ready + if self.cnt == 0: + for s in self.sockets: + messaging.recv_one_or_none(s) + + # empty recv on drained pub indicates the end of messages, only do that if there're any + trigger_empty_recv = False + if self.cfg.main_pub and self.cfg.main_pub_drained: + trigger_empty_recv = next((True for m in self.msg_queue if m.which() == self.cfg.main_pub), False) + + for m in self.msg_queue: + self.pm.send(m.which(), m.as_builder()) + # send frames if needed + if self.vipc_server is not None and m.which() in self.cfg.vision_pubs: + camera_state = getattr(m, m.which()) + camera_meta = meta_from_camera_state(m.which()) + assert frs is not None + img = frs[m.which()].get(camera_state.frameId, pix_fmt="nv12")[0] + self.vipc_server.send(camera_meta.stream, img.flatten().tobytes(), + camera_state.frameId, camera_state.timestampSof, camera_state.timestampEof) + self.msg_queue = [] + + self.rc.unlock_sockets() + self.rc.wait_for_next_recv(trigger_empty_recv) + + for socket in self.sockets: + ms = messaging.drain_sock(socket) + for m in ms: + m = m.as_builder() + m.logMonoTime = msg.logMonoTime + int(self.cfg.processing_time * 1e9) + output_msgs.append(m.as_reader()) + self.cnt += 1 + assert self.process.proc.is_alive() + + return output_msgs + + +def controlsd_fingerprint_callback(rc, pm, msgs, fingerprint): + print("start fingerprinting") + params = Params() + canmsgs = [msg for msg in msgs if msg.which() == "can"][:300] + + # controlsd expects one arbitrary can and pandaState + rc.send_sync(pm, "can", messaging.new_message("can", 1)) + pm.send("pandaStates", messaging.new_message("pandaStates", 1)) + rc.send_sync(pm, "can", messaging.new_message("can", 1)) + rc.wait_for_next_recv(True) + + # fingerprinting is done, when CarParams is set + while params.get("CarParams") is None: + if len(canmsgs) == 0: + raise ValueError("Fingerprinting failed. Run out of can msgs") + + m = canmsgs.pop(0) + rc.send_sync(pm, "can", m.as_builder().to_bytes()) + rc.wait_for_next_recv(False) + + +def get_car_params_callback(rc, pm, msgs, fingerprint): + params = Params() + if fingerprint: + CarInterface, _, _ = interfaces[fingerprint] + CP = CarInterface.get_non_essential_params(fingerprint) + else: + can = DummySocket() + sendcan = DummySocket() + + canmsgs = [msg for msg in msgs if msg.which() == "can"] + has_cached_cp = params.get("CarParamsCache") is not None + assert len(canmsgs) != 0, "CAN messages are required for fingerprinting" + assert os.environ.get("SKIP_FW_QUERY", False) or has_cached_cp, \ + "CarParamsCache is required for fingerprinting. Make sure to keep carParams msgs in the logs." + + for m in canmsgs[:300]: + can.send(m.as_builder().to_bytes()) + _, CP = get_car(can, sendcan, Params().get_bool("ExperimentalLongitudinalEnabled")) + params.put("CarParams", CP.to_bytes()) + return CP + + +def controlsd_rcv_callback(msg, cfg, frame): + # no sendcan until controlsd is initialized + if msg.which() != "can": + return False + + socks = [ + s for s in cfg.subs if + frame % int(SERVICE_LIST[msg.which()].frequency / SERVICE_LIST[s].frequency) == 0 + ] + if "sendcan" in socks and (frame - 1) < 2000: + socks.remove("sendcan") + return len(socks) > 0 + + +def calibration_rcv_callback(msg, cfg, frame): + # calibrationd publishes 1 calibrationData every 5 cameraOdometry packets. + # should_recv always true to increment frame + return (frame - 1) == 0 or msg.which() == 'cameraOdometry' + + +def torqued_rcv_callback(msg, cfg, frame): + # should_recv always true to increment frame + return (frame - 1) == 0 or msg.which() == 'liveLocationKalman' + + +def dmonitoringmodeld_rcv_callback(msg, cfg, frame): + return msg.which() == "driverCameraState" + + +class ModeldCameraSyncRcvCallback: + def __init__(self): + self.road_present = False + self.wide_road_present = False + self.is_dual_camera = True + + def __call__(self, msg, cfg, frame): + self.is_dual_camera = len(cfg.vision_pubs) == 2 + if msg.which() == "roadCameraState": + self.road_present = True + elif msg.which() == "wideRoadCameraState": + self.wide_road_present = True + + if self.road_present and self.wide_road_present: + self.road_present, self.wide_road_present = False, False + return True + elif self.road_present and not self.is_dual_camera: + self.road_present = False + return True + else: + return False + + +class MessageBasedRcvCallback: + def __init__(self, trigger_msg_type): + self.trigger_msg_type = trigger_msg_type + + def __call__(self, msg, cfg, frame): + return msg.which() == self.trigger_msg_type + + +class FrequencyBasedRcvCallback: + def __init__(self, trigger_msg_type): + self.trigger_msg_type = trigger_msg_type + + def __call__(self, msg, cfg, frame): + if msg.which() != self.trigger_msg_type: + return False + + resp_sockets = [ + s for s in cfg.subs + if frame % max(1, int(SERVICE_LIST[msg.which()].frequency / SERVICE_LIST[s].frequency)) == 0 + ] + return bool(len(resp_sockets)) + + +def controlsd_config_callback(params, cfg, lr): + controlsState = None + initialized = False + for msg in lr: + if msg.which() == "controlsState": + controlsState = msg.controlsState + if initialized: + break + elif msg.which() == "onroadEvents": + initialized = car.CarEvent.EventName.controlsInitializing not in [e.name for e in msg.onroadEvents] + + assert controlsState is not None and initialized, "controlsState never initialized" + params.put("ReplayControlsState", controlsState.as_builder().to_bytes()) + + +def locationd_config_pubsub_callback(params, cfg, lr): + ublox = params.get_bool("UbloxAvailable") + sub_keys = ({"gpsLocation", } if ublox else {"gpsLocationExternal", }) + + cfg.pubs = set(cfg.pubs) - sub_keys + + +CONFIGS = [ + ProcessConfig( + proc_name="controlsd", + pubs=[ + "can", "deviceState", "pandaStates", "peripheralState", "liveCalibration", "driverMonitoringState", + "longitudinalPlan", "liveLocationKalman", "liveParameters", "radarState", + "modelV2", "driverCameraState", "roadCameraState", "wideRoadCameraState", "managerState", + "testJoystick", "liveTorqueParameters", "accelerometer", "gyroscope" + ], + subs=["controlsState", "carState", "carControl", "sendcan", "onroadEvents", "carParams"], + ignore=["logMonoTime", "controlsState.startMonoTime", "controlsState.cumLagMs"], + config_callback=controlsd_config_callback, + init_callback=controlsd_fingerprint_callback, + should_recv_callback=controlsd_rcv_callback, + tolerance=NUMPY_TOLERANCE, + processing_time=0.004, + main_pub="can", + ), + ProcessConfig( + proc_name="radard", + pubs=["can", "carState", "modelV2"], + subs=["radarState", "liveTracks"], + ignore=["logMonoTime", "radarState.cumLagMs"], + init_callback=get_car_params_callback, + should_recv_callback=MessageBasedRcvCallback("can"), + main_pub="can", + ), + ProcessConfig( + proc_name="plannerd", + pubs=["modelV2", "carControl", "carState", "controlsState", "radarState"], + subs=["longitudinalPlan", "uiPlan"], + ignore=["logMonoTime", "longitudinalPlan.processingDelay", "longitudinalPlan.solverExecutionTime"], + init_callback=get_car_params_callback, + should_recv_callback=FrequencyBasedRcvCallback("modelV2"), + tolerance=NUMPY_TOLERANCE, + ), + ProcessConfig( + proc_name="calibrationd", + pubs=["carState", "cameraOdometry", "carParams"], + subs=["liveCalibration"], + ignore=["logMonoTime"], + should_recv_callback=calibration_rcv_callback, + ), + ProcessConfig( + proc_name="dmonitoringd", + pubs=["driverStateV2", "liveCalibration", "carState", "modelV2", "controlsState"], + subs=["driverMonitoringState"], + ignore=["logMonoTime"], + should_recv_callback=FrequencyBasedRcvCallback("driverStateV2"), + tolerance=NUMPY_TOLERANCE, + ), + ProcessConfig( + proc_name="locationd", + pubs=[ + "cameraOdometry", "accelerometer", "gyroscope", "gpsLocationExternal", + "liveCalibration", "carState", "gpsLocation" + ], + subs=["liveLocationKalman"], + ignore=["logMonoTime"], + config_callback=locationd_config_pubsub_callback, + tolerance=NUMPY_TOLERANCE, + ), + ProcessConfig( + proc_name="paramsd", + pubs=["liveLocationKalman", "carState"], + subs=["liveParameters"], + ignore=["logMonoTime"], + init_callback=get_car_params_callback, + should_recv_callback=FrequencyBasedRcvCallback("liveLocationKalman"), + tolerance=NUMPY_TOLERANCE, + processing_time=0.004, + ), + ProcessConfig( + proc_name="ubloxd", + pubs=["ubloxRaw"], + subs=["ubloxGnss", "gpsLocationExternal"], + ignore=["logMonoTime"], + ), + ProcessConfig( + proc_name="torqued", + pubs=["liveLocationKalman", "carState", "carControl"], + subs=["liveTorqueParameters"], + ignore=["logMonoTime"], + init_callback=get_car_params_callback, + should_recv_callback=torqued_rcv_callback, + tolerance=NUMPY_TOLERANCE, + ), + ProcessConfig( + proc_name="modeld", + pubs=["roadCameraState", "wideRoadCameraState", "liveCalibration", "driverMonitoringState"], + subs=["modelV2", "cameraOdometry"], + ignore=["logMonoTime", "modelV2.frameDropPerc", "modelV2.modelExecutionTime"], + should_recv_callback=ModeldCameraSyncRcvCallback(), + tolerance=NUMPY_TOLERANCE, + processing_time=0.020, + main_pub=vipc_get_endpoint_name("camerad", meta_from_camera_state("roadCameraState").stream), + main_pub_drained=False, + vision_pubs=["roadCameraState", "wideRoadCameraState"], + ignore_alive_pubs=["wideRoadCameraState"], + init_callback=get_car_params_callback, + ), + ProcessConfig( + proc_name="dmonitoringmodeld", + pubs=["liveCalibration", "driverCameraState"], + subs=["driverStateV2"], + ignore=["logMonoTime", "driverStateV2.modelExecutionTime", "driverStateV2.dspExecutionTime"], + should_recv_callback=dmonitoringmodeld_rcv_callback, + tolerance=NUMPY_TOLERANCE, + processing_time=0.020, + main_pub=vipc_get_endpoint_name("camerad", meta_from_camera_state("driverCameraState").stream), + main_pub_drained=False, + vision_pubs=["driverCameraState"], + ignore_alive_pubs=["driverCameraState"], + ), +] + + +def get_process_config(name: str) -> ProcessConfig: + try: + return copy.deepcopy(next(c for c in CONFIGS if c.proc_name == name)) + except StopIteration as ex: + raise Exception(f"Cannot find process config with name: {name}") from ex + + +def get_custom_params_from_lr(lr: LogIterable, initial_state: str = "first") -> dict[str, Any]: + """ + Use this to get custom params dict based on provided logs. + Useful when replaying following processes: calibrationd, paramsd, torqued + The params may be based on first or last message of given type (carParams, liveCalibration, liveParameters, liveTorqueParameters) in the logs. + """ + + car_params = [m for m in lr if m.which() == "carParams"] + live_calibration = [m for m in lr if m.which() == "liveCalibration"] + live_parameters = [m for m in lr if m.which() == "liveParameters"] + live_torque_parameters = [m for m in lr if m.which() == "liveTorqueParameters"] + + assert initial_state in ["first", "last"] + msg_index = 0 if initial_state == "first" else -1 + + assert len(car_params) > 0, "carParams required for initial state of liveParameters and CarParamsPrevRoute" + CP = car_params[msg_index].carParams + + custom_params = { + "CarParamsPrevRoute": CP.as_builder().to_bytes() + } + + if len(live_calibration) > 0: + custom_params["CalibrationParams"] = live_calibration[msg_index].as_builder().to_bytes() + if len(live_parameters) > 0: + lp_dict = live_parameters[msg_index].to_dict() + lp_dict["carFingerprint"] = CP.carFingerprint + custom_params["LiveParameters"] = json.dumps(lp_dict) + if len(live_torque_parameters) > 0: + custom_params["LiveTorqueParameters"] = live_torque_parameters[msg_index].as_builder().to_bytes() + + return custom_params + + +def replay_process_with_name(name: str | Iterable[str], lr: LogIterable, *args, **kwargs) -> list[capnp._DynamicStructReader]: + if isinstance(name, str): + cfgs = [get_process_config(name)] + elif isinstance(name, Iterable): + cfgs = [get_process_config(n) for n in name] + else: + raise ValueError("name must be str or collections of strings") + + return replay_process(cfgs, lr, *args, **kwargs) + + +def replay_process( + cfg: ProcessConfig | Iterable[ProcessConfig], lr: LogIterable, frs: dict[str, BaseFrameReader] = None, + fingerprint: str = None, return_all_logs: bool = False, custom_params: dict[str, Any] = None, + captured_output_store: dict[str, dict[str, str]] = None, disable_progress: bool = False +) -> list[capnp._DynamicStructReader]: + if isinstance(cfg, Iterable): + cfgs = list(cfg) + else: + cfgs = [cfg] + + all_msgs = migrate_all(lr, old_logtime=True, + manager_states=True, + panda_states=any("pandaStates" in cfg.pubs for cfg in cfgs), + camera_states=any(len(cfg.vision_pubs) != 0 for cfg in cfgs)) + process_logs = _replay_multi_process(cfgs, all_msgs, frs, fingerprint, custom_params, captured_output_store, disable_progress) + + if return_all_logs: + keys = {m.which() for m in process_logs} + modified_logs = [m for m in all_msgs if m.which() not in keys] + modified_logs.extend(process_logs) + modified_logs.sort(key=lambda m: int(m.logMonoTime)) + log_msgs = modified_logs + else: + log_msgs = process_logs + + return log_msgs + + +def _replay_multi_process( + cfgs: list[ProcessConfig], lr: LogIterable, frs: dict[str, BaseFrameReader] | None, fingerprint: str | None, + custom_params: dict[str, Any] | None, captured_output_store: dict[str, dict[str, str]] | None, disable_progress: bool +) -> list[capnp._DynamicStructReader]: + if fingerprint is not None: + params_config = generate_params_config(lr=lr, fingerprint=fingerprint, custom_params=custom_params) + env_config = generate_environ_config(fingerprint=fingerprint) + else: + CP = next((m.carParams for m in lr if m.which() == "carParams"), None) + params_config = generate_params_config(lr=lr, CP=CP, custom_params=custom_params) + env_config = generate_environ_config(CP=CP) + + # validate frs and vision pubs + all_vision_pubs = [pub for cfg in cfgs for pub in cfg.vision_pubs] + if len(all_vision_pubs) != 0: + assert frs is not None, "frs must be provided when replaying process using vision streams" + assert all(meta_from_camera_state(st) is not None for st in all_vision_pubs), \ + f"undefined vision stream spotted, probably misconfigured process: (vision pubs: {all_vision_pubs})" + required_vision_pubs = {m.camera_state for m in available_streams(lr)} & set(all_vision_pubs) + assert all(st in frs for st in required_vision_pubs), f"frs for this process must contain following vision streams: {required_vision_pubs}" + + all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime) + log_msgs = [] + try: + containers = [] + for cfg in cfgs: + container = ProcessContainer(cfg) + containers.append(container) + container.start(params_config, env_config, all_msgs, frs, fingerprint, captured_output_store is not None) + + all_pubs = {pub for container in containers for pub in container.pubs} + all_subs = {sub for container in containers for sub in container.subs} + lr_pubs = all_pubs - all_subs + pubs_to_containers = {pub: [container for container in containers if pub in container.pubs] for pub in all_pubs} + + pub_msgs = [msg for msg in all_msgs if msg.which() in lr_pubs] + # external queue for messages taken from logs; internal queue for messages generated by processes, which will be republished + external_pub_queue: list[capnp._DynamicStructReader] = pub_msgs.copy() + internal_pub_queue: list[capnp._DynamicStructReader] = [] + # heap for maintaining the order of messages generated by processes, where each element: (logMonoTime, index in internal_pub_queue) + internal_pub_index_heap: list[tuple[int, int]] = [] + + pbar = tqdm(total=len(external_pub_queue), disable=disable_progress) + while len(external_pub_queue) != 0 or (len(internal_pub_index_heap) != 0 and not all(c.has_empty_queue for c in containers)): + if len(internal_pub_index_heap) == 0 or (len(external_pub_queue) != 0 and external_pub_queue[0].logMonoTime < internal_pub_index_heap[0][0]): + msg = external_pub_queue.pop(0) + pbar.update(1) + else: + _, index = heapq.heappop(internal_pub_index_heap) + msg = internal_pub_queue[index] + + target_containers = pubs_to_containers[msg.which()] + for container in target_containers: + output_msgs = container.run_step(msg, frs) + for m in output_msgs: + if m.which() in all_pubs: + internal_pub_queue.append(m) + heapq.heappush(internal_pub_index_heap, (m.logMonoTime, len(internal_pub_queue) - 1)) + log_msgs.extend(output_msgs) + finally: + for container in containers: + container.stop() + if captured_output_store is not None: + assert container.capture is not None + out, err = container.capture.read_outerr() + captured_output_store[container.cfg.proc_name] = {"out": out, "err": err} + + return log_msgs + + +def generate_params_config(lr=None, CP=None, fingerprint=None, custom_params=None) -> dict[str, Any]: + params_dict = { + "OpenpilotEnabledToggle": True, + "DisengageOnAccelerator": True, + "DisableLogging": False, + } + + if custom_params is not None: + params_dict.update(custom_params) + if lr is not None: + has_ublox = any(msg.which() == "ubloxGnss" for msg in lr) + params_dict["UbloxAvailable"] = has_ublox + is_rhd = next((msg.driverMonitoringState.isRHD for msg in lr if msg.which() == "driverMonitoringState"), False) + params_dict["IsRhdDetected"] = is_rhd + + if CP is not None: + if CP.alternativeExperience == ALTERNATIVE_EXPERIENCE.DISABLE_DISENGAGE_ON_GAS: + params_dict["DisengageOnAccelerator"] = False + + if fingerprint is None: + if CP.fingerprintSource == "fw": + params_dict["CarParamsCache"] = CP.as_builder().to_bytes() + + if CP.openpilotLongitudinalControl: + params_dict["ExperimentalLongitudinalEnabled"] = True + + if CP.notCar: + params_dict["JoystickDebugMode"] = True + + return params_dict + + +def generate_environ_config(CP=None, fingerprint=None, log_dir=None) -> dict[str, Any]: + environ_dict = {} + if platform.system() != "Darwin": + environ_dict["PARAMS_ROOT"] = "/dev/shm/params" + if log_dir is not None: + environ_dict["LOG_ROOT"] = log_dir + + environ_dict["REPLAY"] = "1" + + # Regen or python process + if CP is not None and fingerprint is None: + if CP.fingerprintSource == "fw": + environ_dict['SKIP_FW_QUERY'] = "" + environ_dict['FINGERPRINT'] = "" + else: + environ_dict['SKIP_FW_QUERY'] = "1" + environ_dict['FINGERPRINT'] = CP.carFingerprint + elif fingerprint is not None: + environ_dict['SKIP_FW_QUERY'] = "1" + environ_dict['FINGERPRINT'] = fingerprint + else: + environ_dict["SKIP_FW_QUERY"] = "" + environ_dict["FINGERPRINT"] = "" + + return environ_dict + + +def check_openpilot_enabled(msgs: LogIterable) -> bool: + cur_enabled_count = 0 + max_enabled_count = 0 + for msg in msgs: + if msg.which() == "carParams": + if msg.carParams.notCar: + return True + elif msg.which() == "controlsState": + if msg.controlsState.active: + cur_enabled_count += 1 + else: + cur_enabled_count = 0 + max_enabled_count = max(max_enabled_count, cur_enabled_count) + + return max_enabled_count > int(10. / DT_CTRL) diff --git a/selfdrive/test/process_replay/ref_commit b/selfdrive/test/process_replay/ref_commit new file mode 100644 index 0000000..46d6842 --- /dev/null +++ b/selfdrive/test/process_replay/ref_commit @@ -0,0 +1 @@ +43efe1cf08cba8c86bc1ae8234b3d3d084a40e5d diff --git a/selfdrive/test/process_replay/regen.py b/selfdrive/test/process_replay/regen.py new file mode 100644 index 0000000..8e88220 --- /dev/null +++ b/selfdrive/test/process_replay/regen.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +import os +import argparse +import time +import capnp +import numpy as np + +from typing import Any +from collections.abc import Iterable + +from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS, FAKEDATA, ProcessConfig, replay_process, get_process_config, \ + check_openpilot_enabled, get_custom_params_from_lr +from openpilot.selfdrive.test.process_replay.vision_meta import DRIVER_FRAME_SIZES +from openpilot.selfdrive.test.update_ci_routes import upload_route +from openpilot.tools.lib.route import Route +from openpilot.tools.lib.framereader import FrameReader, BaseFrameReader, FrameType +from openpilot.tools.lib.logreader import LogReader, LogIterable +from openpilot.tools.lib.helpers import save_log + + +class DummyFrameReader(BaseFrameReader): + def __init__(self, w: int, h: int, frame_count: int, pix_val: int): + self.pix_val = pix_val + self.w, self.h = w, h + self.frame_count = frame_count + self.frame_type = FrameType.raw + + def get(self, idx, count=1, pix_fmt="yuv420p"): + if pix_fmt == "rgb24": + shape = (self.h, self.w, 3) + elif pix_fmt == "nv12" or pix_fmt == "yuv420p": + shape = (int((self.h * self.w) * 3 / 2),) + else: + raise NotImplementedError + + return [np.full(shape, self.pix_val, dtype=np.uint8) for _ in range(count)] + + @staticmethod + def zero_dcamera(): + return DummyFrameReader(*DRIVER_FRAME_SIZES["tici"], 1200, 0) + + +def regen_segment( + lr: LogIterable, frs: dict[str, Any] = None, + processes: Iterable[ProcessConfig] = CONFIGS, disable_tqdm: bool = False +) -> list[capnp._DynamicStructReader]: + all_msgs = sorted(lr, key=lambda m: m.logMonoTime) + custom_params = get_custom_params_from_lr(all_msgs) + + print("Replayed processes:", [p.proc_name for p in processes]) + print("\n\n", "*"*30, "\n\n", sep="") + + output_logs = replay_process(processes, all_msgs, frs, return_all_logs=True, custom_params=custom_params, disable_progress=disable_tqdm) + + return output_logs + + +def setup_data_readers( + route: str, sidx: int, use_route_meta: bool, + needs_driver_cam: bool = True, needs_road_cam: bool = True, dummy_driver_cam: bool = False +) -> tuple[LogReader, dict[str, Any]]: + if use_route_meta: + r = Route(route) + lr = LogReader(r.log_paths()[sidx]) + frs = {} + if needs_road_cam and len(r.camera_paths()) > sidx and r.camera_paths()[sidx] is not None: + frs['roadCameraState'] = FrameReader(r.camera_paths()[sidx]) + if needs_road_cam and len(r.ecamera_paths()) > sidx and r.ecamera_paths()[sidx] is not None: + frs['wideRoadCameraState'] = FrameReader(r.ecamera_paths()[sidx]) + if needs_driver_cam: + if dummy_driver_cam: + frs['driverCameraState'] = DummyFrameReader.zero_dcamera() + elif len(r.dcamera_paths()) > sidx and r.dcamera_paths()[sidx] is not None: + device_type = next(str(msg.initData.deviceType) for msg in lr if msg.which() == "initData") + assert device_type != "neo", "Driver camera not supported on neo segments. Use dummy dcamera." + frs['driverCameraState'] = FrameReader(r.dcamera_paths()[sidx]) + else: + lr = LogReader(f"cd:/{route.replace('|', '/')}/{sidx}/rlog.bz2") + frs = {} + if needs_road_cam: + frs['roadCameraState'] = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/fcamera.hevc") + if next((True for m in lr if m.which() == "wideRoadCameraState"), False): + frs['wideRoadCameraState'] = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/ecamera.hevc") + if needs_driver_cam: + if dummy_driver_cam: + frs['driverCameraState'] = DummyFrameReader.zero_dcamera() + else: + device_type = next(str(msg.initData.deviceType) for msg in lr if msg.which() == "initData") + assert device_type != "neo", "Driver camera not supported on neo segments. Use dummy dcamera." + frs['driverCameraState'] = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/dcamera.hevc") + + return lr, frs + + +def regen_and_save( + route: str, sidx: int, processes: str | Iterable[str] = "all", outdir: str = FAKEDATA, + upload: bool = False, use_route_meta: bool = False, disable_tqdm: bool = False, dummy_driver_cam: bool = False +) -> str: + if not isinstance(processes, str) and not hasattr(processes, "__iter__"): + raise ValueError("whitelist_proc must be a string or iterable") + + if processes != "all": + if isinstance(processes, str): + raise ValueError(f"Invalid value for processes: {processes}") + + replayed_processes = [] + for d in processes: + cfg = get_process_config(d) + replayed_processes.append(cfg) + else: + replayed_processes = CONFIGS + + all_vision_pubs = {pub for cfg in replayed_processes for pub in cfg.vision_pubs} + lr, frs = setup_data_readers(route, sidx, use_route_meta, + needs_driver_cam="driverCameraState" in all_vision_pubs, + needs_road_cam="roadCameraState" in all_vision_pubs or "wideRoadCameraState" in all_vision_pubs, + dummy_driver_cam=dummy_driver_cam) + output_logs = regen_segment(lr, frs, replayed_processes, disable_tqdm=disable_tqdm) + + log_dir = os.path.join(outdir, time.strftime("%Y-%m-%d--%H-%M-%S--0", time.gmtime())) + rel_log_dir = os.path.relpath(log_dir) + rpath = os.path.join(log_dir, "rlog.bz2") + + os.makedirs(log_dir) + save_log(rpath, output_logs, compress=True) + + print("\n\n", "*"*30, "\n\n", sep="") + print("New route:", rel_log_dir, "\n") + + if not check_openpilot_enabled(output_logs): + raise Exception("Route did not engage for long enough") + + if upload: + upload_route(rel_log_dir) + + return rel_log_dir + + +if __name__ == "__main__": + def comma_separated_list(string): + return string.split(",") + + all_procs = [p.proc_name for p in CONFIGS] + parser = argparse.ArgumentParser(description="Generate new segments from old ones") + parser.add_argument("--upload", action="store_true", help="Upload the new segment to the CI bucket") + parser.add_argument("--outdir", help="log output dir", default=FAKEDATA) + parser.add_argument("--dummy-dcamera", action='store_true', help="Use dummy blank driver camera") + parser.add_argument("--whitelist-procs", type=comma_separated_list, default=all_procs, + help="Comma-separated whitelist of processes to regen (e.g. controlsd,radard)") + parser.add_argument("--blacklist-procs", type=comma_separated_list, default=[], + help="Comma-separated blacklist of processes to regen (e.g. controlsd,radard)") + parser.add_argument("route", type=str, help="The source route") + parser.add_argument("seg", type=int, help="Segment in source route") + args = parser.parse_args() + + blacklist_set = set(args.blacklist_procs) + processes = [p for p in args.whitelist_procs if p not in blacklist_set] + regen_and_save(args.route, args.seg, processes=processes, upload=args.upload, outdir=args.outdir, dummy_driver_cam=args.dummy_dcamera) diff --git a/selfdrive/test/process_replay/regen_all.py b/selfdrive/test/process_replay/regen_all.py new file mode 100644 index 0000000..656a5b8 --- /dev/null +++ b/selfdrive/test/process_replay/regen_all.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +import argparse +import concurrent.futures +import os +import random +import traceback +from tqdm import tqdm + +from openpilot.common.prefix import OpenpilotPrefix +from openpilot.selfdrive.test.process_replay.regen import regen_and_save +from openpilot.selfdrive.test.process_replay.test_processes import FAKEDATA, source_segments as segments +from openpilot.tools.lib.route import SegmentName + + +def regen_job(segment, upload, disable_tqdm): + with OpenpilotPrefix(): + sn = SegmentName(segment[1]) + fake_dongle_id = 'regen' + ''.join(random.choice('0123456789ABCDEF') for _ in range(11)) + try: + relr = regen_and_save(sn.route_name.canonical_name, sn.segment_num, upload=upload, use_route_meta=False, + outdir=os.path.join(FAKEDATA, fake_dongle_id), disable_tqdm=disable_tqdm, dummy_driver_cam=True) + relr = '|'.join(relr.split('/')[-2:]) + return f' ("{segment[0]}", "{relr}"), ' + except Exception as e: + err = f" {segment} failed: {str(e)}" + err += traceback.format_exc() + err += "\n\n" + return err + + +if __name__ == "__main__": + all_cars = {car for car, _ in segments} + + parser = argparse.ArgumentParser(description="Generate new segments from old ones") + parser.add_argument("-j", "--jobs", type=int, default=1) + parser.add_argument("--no-upload", action="store_true") + parser.add_argument("--whitelist-cars", type=str, nargs="*", default=all_cars, + help="Whitelist given cars from the test (e.g. HONDA)") + parser.add_argument("--blacklist-cars", type=str, nargs="*", default=[], + help="Blacklist given cars from the test (e.g. HONDA)") + args = parser.parse_args() + + tested_cars = set(args.whitelist_cars) - set(args.blacklist_cars) + tested_cars = {c.upper() for c in tested_cars} + tested_segments = [(car, segment) for car, segment in segments if car in tested_cars] + + with concurrent.futures.ProcessPoolExecutor(max_workers=args.jobs) as pool: + p = pool.map(regen_job, tested_segments, [not args.no_upload] * len(tested_segments), [args.jobs > 1] * len(tested_segments)) + msg = "Copy these new segments into test_processes.py:" + for seg in tqdm(p, desc="Generating segments", total=len(tested_segments)): + msg += "\n" + str(seg) + print() + print() + print(msg) diff --git a/selfdrive/test/process_replay/test_debayer.py b/selfdrive/test/process_replay/test_debayer.py new file mode 100644 index 0000000..edf2cbd --- /dev/null +++ b/selfdrive/test/process_replay/test_debayer.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 +import os +import sys +import bz2 +import numpy as np + +import pyopencl as cl # install with `PYOPENCL_CL_PRETEND_VERSION=2.0 pip install pyopencl` + +from openpilot.system.hardware import PC, TICI +from openpilot.common.basedir import BASEDIR +from openpilot.tools.lib.openpilotci import BASE_URL +from openpilot.system.version import get_commit +from openpilot.system.camerad.snapshot.snapshot import yuv_to_rgb +from openpilot.tools.lib.logreader import LogReader +from openpilot.tools.lib.filereader import FileReader + +TEST_ROUTE = "8345e3b82948d454|2022-05-04--13-45-33/0" + +FRAME_WIDTH = 1928 +FRAME_HEIGHT = 1208 +FRAME_STRIDE = 2896 + +UV_WIDTH = FRAME_WIDTH // 2 +UV_HEIGHT = FRAME_HEIGHT // 2 +UV_SIZE = UV_WIDTH * UV_HEIGHT + + +def get_frame_fn(ref_commit, test_route, tici=True): + return f"{test_route}_debayer{'_tici' if tici else ''}_{ref_commit}.bz2" + + +def bzip_frames(frames): + data = b'' + for y, u, v in frames: + data += y.tobytes() + data += u.tobytes() + data += v.tobytes() + return bz2.compress(data) + + +def unbzip_frames(url): + with FileReader(url) as f: + dat = f.read() + + data = bz2.decompress(dat) + + res = [] + for y_start in range(0, len(data), FRAME_WIDTH * FRAME_HEIGHT + UV_SIZE * 2): + u_start = y_start + FRAME_WIDTH * FRAME_HEIGHT + v_start = u_start + UV_SIZE + + y = np.frombuffer(data[y_start: u_start], dtype=np.uint8).reshape((FRAME_HEIGHT, FRAME_WIDTH)) + u = np.frombuffer(data[u_start: v_start], dtype=np.uint8).reshape((UV_HEIGHT, UV_WIDTH)) + v = np.frombuffer(data[v_start: v_start + UV_SIZE], dtype=np.uint8).reshape((UV_HEIGHT, UV_WIDTH)) + + res.append((y, u, v)) + + return res + + +def init_kernels(frame_offset=0): + ctx = cl.create_some_context(interactive=False) + + with open(os.path.join(BASEDIR, 'system/camerad/cameras/real_debayer.cl')) as f: + build_args = ' -cl-fast-relaxed-math -cl-denorms-are-zero -cl-single-precision-constant' + \ + f' -DFRAME_STRIDE={FRAME_STRIDE} -DRGB_WIDTH={FRAME_WIDTH} -DRGB_HEIGHT={FRAME_HEIGHT} -DFRAME_OFFSET={frame_offset} -DCAM_NUM=0' + if PC: + build_args += ' -DHALF_AS_FLOAT=1 -cl-std=CL2.0' + debayer_prg = cl.Program(ctx, f.read()).build(options=build_args) + + return ctx, debayer_prg + +def debayer_frame(ctx, debayer_prg, data, rgb=False): + q = cl.CommandQueue(ctx) + + yuv_buff = np.empty(FRAME_WIDTH * FRAME_HEIGHT + UV_SIZE * 2, dtype=np.uint8) + + cam_g = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=data) + yuv_g = cl.Buffer(ctx, cl.mem_flags.WRITE_ONLY, FRAME_WIDTH * FRAME_HEIGHT + UV_SIZE * 2) + + local_worksize = (20, 20) if TICI else (4, 4) + ev1 = debayer_prg.debayer10(q, (UV_WIDTH, UV_HEIGHT), local_worksize, cam_g, yuv_g) + cl.enqueue_copy(q, yuv_buff, yuv_g, wait_for=[ev1]).wait() + cl.enqueue_barrier(q) + + y = yuv_buff[:FRAME_WIDTH*FRAME_HEIGHT].reshape((FRAME_HEIGHT, FRAME_WIDTH)) + u = yuv_buff[FRAME_WIDTH*FRAME_HEIGHT:FRAME_WIDTH*FRAME_HEIGHT+UV_SIZE].reshape((UV_HEIGHT, UV_WIDTH)) + v = yuv_buff[FRAME_WIDTH*FRAME_HEIGHT+UV_SIZE:].reshape((UV_HEIGHT, UV_WIDTH)) + + if rgb: + return yuv_to_rgb(y, u, v) + else: + return y, u, v + + +def debayer_replay(lr): + ctx, debayer_prg = init_kernels() + + frames = [] + for m in lr: + if m.which() == 'roadCameraState': + cs = m.roadCameraState + if cs.image: + data = np.frombuffer(cs.image, dtype=np.uint8) + img = debayer_frame(ctx, debayer_prg, data) + + frames.append(img) + + return frames + + +if __name__ == "__main__": + update = "--update" in sys.argv + replay_dir = os.path.dirname(os.path.abspath(__file__)) + ref_commit_fn = os.path.join(replay_dir, "debayer_replay_ref_commit") + + # load logs + lr = list(LogReader(TEST_ROUTE)) + + # run replay + frames = debayer_replay(lr) + + # get diff + failed = False + diff = '' + yuv_i = ['y', 'u', 'v'] + if not update: + with open(ref_commit_fn) as f: + ref_commit = f.read().strip() + frame_fn = get_frame_fn(ref_commit, TEST_ROUTE, tici=TICI) + + try: + cmp_frames = unbzip_frames(BASE_URL + frame_fn) + + if len(frames) != len(cmp_frames): + failed = True + diff += 'amount of frames not equal\n' + + for i, (frame, cmp_frame) in enumerate(zip(frames, cmp_frames, strict=True)): + for j in range(3): + fr = frame[j] + cmp_f = cmp_frame[j] + if fr.shape != cmp_f.shape: + failed = True + diff += f'frame shapes not equal for ({i}, {yuv_i[j]})\n' + diff += f'{ref_commit}: {cmp_f.shape}\n' + diff += f'HEAD: {fr.shape}\n' + elif not np.array_equal(fr, cmp_f): + failed = True + if np.allclose(fr, cmp_f, atol=1): + diff += f'frames not equal for ({i}, {yuv_i[j]}), but are all close\n' + else: + diff += f'frames not equal for ({i}, {yuv_i[j]})\n' + + frame_diff = np.abs(np.subtract(fr, cmp_f)) + diff_len = len(np.nonzero(frame_diff)[0]) + if diff_len > 10000: + diff += f'different at a large amount of pixels ({diff_len})\n' + else: + diff += 'different at (frame, yuv, pixel, ref, HEAD):\n' + for k in zip(*np.nonzero(frame_diff), strict=True): + diff += f'{i}, {yuv_i[j]}, {k}, {cmp_f[k]}, {fr[k]}\n' + + if failed: + print(diff) + with open("debayer_diff.txt", "w") as f: + f.write(diff) + except Exception as e: + print(str(e)) + failed = True + + # upload new refs + if update or (failed and TICI): + from openpilot.tools.lib.openpilotci import upload_file + + print("Uploading new refs") + + frames_bzip = bzip_frames(frames) + + new_commit = get_commit() + frame_fn = os.path.join(replay_dir, get_frame_fn(new_commit, TEST_ROUTE, tici=TICI)) + with open(frame_fn, "wb") as f2: + f2.write(frames_bzip) + + try: + upload_file(frame_fn, os.path.basename(frame_fn)) + except Exception as e: + print("failed to upload", e) + + if update: + with open(ref_commit_fn, 'w') as f: + f.write(str(new_commit)) + + print("\nNew ref commit: ", new_commit) + + sys.exit(int(failed)) diff --git a/selfdrive/test/process_replay/test_fuzzy.py b/selfdrive/test/process_replay/test_fuzzy.py new file mode 100644 index 0000000..adff06f --- /dev/null +++ b/selfdrive/test/process_replay/test_fuzzy.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +import copy +from hypothesis import given, HealthCheck, Phase, settings +import hypothesis.strategies as st +from parameterized import parameterized +import unittest + +from cereal import log +from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA +from openpilot.selfdrive.test.fuzzy_generation import FuzzyGenerator +import openpilot.selfdrive.test.process_replay.process_replay as pr + +# These processes currently fail because of unrealistic data breaking assumptions +# that openpilot makes causing error with NaN, inf, int size, array indexing ... +# TODO: Make each one testable +NOT_TESTED = ['controlsd', 'plannerd', 'calibrationd', 'dmonitoringd', 'paramsd', 'dmonitoringmodeld', 'modeld'] + +TEST_CASES = [(cfg.proc_name, copy.deepcopy(cfg)) for cfg in pr.CONFIGS if cfg.proc_name not in NOT_TESTED] + +class TestFuzzProcesses(unittest.TestCase): + + # TODO: make this faster and increase examples + @parameterized.expand(TEST_CASES) + @given(st.data()) + @settings(phases=[Phase.generate, Phase.target], max_examples=10, deadline=1000, suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large]) + def test_fuzz_process(self, proc_name, cfg, data): + msgs = FuzzyGenerator.get_random_event_msg(data.draw, events=cfg.pubs, real_floats=True) + lr = [log.Event.new_message(**m).as_reader() for m in msgs] + cfg.timeout = 5 + pr.replay_process(cfg, lr, fingerprint=TOYOTA.COROLLA_TSS2, disable_progress=True) + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/test/process_replay/test_processes.py b/selfdrive/test/process_replay/test_processes.py new file mode 100644 index 0000000..88e46ab --- /dev/null +++ b/selfdrive/test/process_replay/test_processes.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python3 +import argparse +import concurrent.futures +import os +import sys +from collections import defaultdict +from tqdm import tqdm +from typing import Any + +from openpilot.selfdrive.car.car_helpers import interface_names +from openpilot.tools.lib.openpilotci import get_url, upload_file +from openpilot.selfdrive.test.process_replay.compare_logs import compare_logs, format_diff +from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS, PROC_REPLAY_DIR, FAKEDATA, check_openpilot_enabled, replay_process +from openpilot.system.version import get_commit +from openpilot.tools.lib.filereader import FileReader +from openpilot.tools.lib.logreader import LogReader +from openpilot.tools.lib.helpers import save_log + +source_segments = [ + ("BODY", "937ccb7243511b65|2022-05-24--16-03-09--1"), # COMMA.BODY + ("HYUNDAI", "02c45f73a2e5c6e9|2021-01-01--19-08-22--1"), # HYUNDAI.SONATA + ("HYUNDAI2", "d545129f3ca90f28|2022-11-07--20-43-08--3"), # HYUNDAI.KIA_EV6 (+ QCOM GPS) + ("TOYOTA", "0982d79ebb0de295|2021-01-04--17-13-21--13"), # TOYOTA.PRIUS + ("TOYOTA2", "0982d79ebb0de295|2021-01-03--20-03-36--6"), # TOYOTA.RAV4 + ("TOYOTA3", "f7d7e3538cda1a2a|2021-08-16--08-55-34--6"), # TOYOTA.COROLLA_TSS2 + ("HONDA", "eb140f119469d9ab|2021-06-12--10-46-24--27"), # HONDA.CIVIC (NIDEC) + ("HONDA2", "7d2244f34d1bbcda|2021-06-25--12-25-37--26"), # HONDA.ACCORD (BOSCH) + ("CHRYSLER", "4deb27de11bee626|2021-02-20--11-28-55--8"), # CHRYSLER.PACIFICA_2018_HYBRID + ("RAM", "17fc16d840fe9d21|2023-04-26--13-28-44--5"), # CHRYSLER.RAM_1500 + ("SUBARU", "341dccd5359e3c97|2022-09-12--10-35-33--3"), # SUBARU.OUTBACK + ("GM", "0c58b6a25109da2b|2021-02-23--16-35-50--11"), # GM.VOLT + ("GM2", "376bf99325883932|2022-10-27--13-41-22--1"), # GM.BOLT_EUV + ("NISSAN", "35336926920f3571|2021-02-12--18-38-48--46"), # NISSAN.XTRAIL + ("VOLKSWAGEN", "de9592456ad7d144|2021-06-29--11-00-15--6"), # VOLKSWAGEN.GOLF + ("MAZDA", "bd6a637565e91581|2021-10-30--15-14-53--4"), # MAZDA.CX9_2021 + ("FORD", "54827bf84c38b14f|2023-01-26--21-59-07--4"), # FORD.BRONCO_SPORT_MK1 + + # Enable when port is tested and dashcamOnly is no longer set + #("TESLA", "bb50caf5f0945ab1|2021-06-19--17-20-18--3"), # TESLA.AP2_MODELS + #("VOLKSWAGEN2", "3cfdec54aa035f3f|2022-07-19--23-45-10--2"), # VOLKSWAGEN.PASSAT_NMS +] + +segments = [ + ("BODY", "regen997DF2697CB|2023-10-30--23-14-29--0"), + ("HYUNDAI", "regen2A9D2A8E0B4|2023-10-30--23-13-34--0"), + ("HYUNDAI2", "regen6CA24BC3035|2023-10-30--23-14-28--0"), + ("TOYOTA", "regen5C019D76307|2023-10-30--23-13-31--0"), + ("TOYOTA2", "regen5DCADA88A96|2023-10-30--23-14-57--0"), + ("TOYOTA3", "regen7204CA3A498|2023-10-30--23-15-55--0"), + ("HONDA", "regen048F8FA0B24|2023-10-30--23-15-53--0"), + ("HONDA2", "regen7D2D3F82D5B|2023-10-30--23-15-55--0"), + ("CHRYSLER", "regen7125C42780C|2023-10-30--23-16-21--0"), + ("RAM", "regen2731F3213D2|2023-10-30--23-18-11--0"), + ("SUBARU", "regen86E4C1B4DDD|2023-10-30--23-18-14--0"), + ("GM", "regenF6393D64745|2023-10-30--23-17-18--0"), + ("GM2", "regen220F830C05B|2023-10-30--23-18-39--0"), + ("NISSAN", "regen4F671F7C435|2023-10-30--23-18-40--0"), + ("VOLKSWAGEN", "regen8BDFE7307A0|2023-10-30--23-19-36--0"), + ("MAZDA", "regen2E9F1A15FD5|2023-10-30--23-20-36--0"), + ("FORD", "regen6D39E54606E|2023-10-30--23-20-54--0"), +] + +# dashcamOnly makes don't need to be tested until a full port is done +excluded_interfaces = ["mock", "tesla"] + +BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/" +REF_COMMIT_FN = os.path.join(PROC_REPLAY_DIR, "ref_commit") +EXCLUDED_PROCS = {"modeld", "dmonitoringmodeld"} + + +def run_test_process(data): + segment, cfg, args, cur_log_fn, ref_log_path, lr_dat = data + res = None + if not args.upload_only: + lr = LogReader.from_bytes(lr_dat) + res, log_msgs = test_process(cfg, lr, segment, ref_log_path, cur_log_fn, args.ignore_fields, args.ignore_msgs) + # save logs so we can upload when updating refs + save_log(cur_log_fn, log_msgs) + + if args.update_refs or args.upload_only: + print(f'Uploading: {os.path.basename(cur_log_fn)}') + assert os.path.exists(cur_log_fn), f"Cannot find log to upload: {cur_log_fn}" + upload_file(cur_log_fn, os.path.basename(cur_log_fn)) + os.remove(cur_log_fn) + return (segment, cfg.proc_name, res) + + +def get_log_data(segment): + r, n = segment.rsplit("--", 1) + with FileReader(get_url(r, n)) as f: + return (segment, f.read()) + + +def test_process(cfg, lr, segment, ref_log_path, new_log_path, ignore_fields=None, ignore_msgs=None): + if ignore_fields is None: + ignore_fields = [] + if ignore_msgs is None: + ignore_msgs = [] + + ref_log_msgs = list(LogReader(ref_log_path)) + + try: + log_msgs = replay_process(cfg, lr, disable_progress=True) + except Exception as e: + raise Exception("failed on segment: " + segment) from e + + # check to make sure openpilot is engaged in the route + if cfg.proc_name == "controlsd": + if not check_openpilot_enabled(log_msgs): + # FIXME: these segments should work, but the replay enabling logic is too brittle + if segment not in ("regen6CA24BC3035|2023-10-30--23-14-28--0", "regen7D2D3F82D5B|2023-10-30--23-15-55--0"): + return f"Route did not enable at all or for long enough: {new_log_path}", log_msgs + + try: + return compare_logs(ref_log_msgs, log_msgs, ignore_fields + cfg.ignore, ignore_msgs, cfg.tolerance), log_msgs + except Exception as e: + return str(e), log_msgs + + +if __name__ == "__main__": + all_cars = {car for car, _ in segments} + all_procs = {cfg.proc_name for cfg in CONFIGS if cfg.proc_name not in EXCLUDED_PROCS} + + cpu_count = os.cpu_count() or 1 + + parser = argparse.ArgumentParser(description="Regression test to identify changes in a process's output") + parser.add_argument("--whitelist-procs", type=str, nargs="*", default=all_procs, + help="Whitelist given processes from the test (e.g. controlsd)") + parser.add_argument("--whitelist-cars", type=str, nargs="*", default=all_cars, + help="Whitelist given cars from the test (e.g. HONDA)") + parser.add_argument("--blacklist-procs", type=str, nargs="*", default=[], + help="Blacklist given processes from the test (e.g. controlsd)") + parser.add_argument("--blacklist-cars", type=str, nargs="*", default=[], + help="Blacklist given cars from the test (e.g. HONDA)") + parser.add_argument("--ignore-fields", type=str, nargs="*", default=[], + help="Extra fields or msgs to ignore (e.g. carState.events)") + parser.add_argument("--ignore-msgs", type=str, nargs="*", default=[], + help="Msgs to ignore (e.g. carEvents)") + parser.add_argument("--update-refs", action="store_true", + help="Updates reference logs using current commit") + parser.add_argument("--upload-only", action="store_true", + help="Skips testing processes and uploads logs from previous test run") + parser.add_argument("-j", "--jobs", type=int, default=max(cpu_count - 2, 1), + help="Max amount of parallel jobs") + args = parser.parse_args() + + tested_procs = set(args.whitelist_procs) - set(args.blacklist_procs) + tested_cars = set(args.whitelist_cars) - set(args.blacklist_cars) + tested_cars = {c.upper() for c in tested_cars} + + full_test = (tested_procs == all_procs) and (tested_cars == all_cars) and all(len(x) == 0 for x in (args.ignore_fields, args.ignore_msgs)) + upload = args.update_refs or args.upload_only + os.makedirs(os.path.dirname(FAKEDATA), exist_ok=True) + + if upload: + assert full_test, "Need to run full test when updating refs" + + try: + with open(REF_COMMIT_FN) as f: + ref_commit = f.read().strip() + except FileNotFoundError: + print("Couldn't find reference commit") + sys.exit(1) + + cur_commit = get_commit() + if not cur_commit: + raise Exception("Couldn't get current commit") + + print(f"***** testing against commit {ref_commit} *****") + + # check to make sure all car brands are tested + if full_test: + untested = (set(interface_names) - set(excluded_interfaces)) - {c.lower() for c in tested_cars} + assert len(untested) == 0, f"Cars missing routes: {str(untested)}" + + log_paths: defaultdict[str, dict[str, dict[str, str]]] = defaultdict(lambda: defaultdict(dict)) + with concurrent.futures.ProcessPoolExecutor(max_workers=args.jobs) as pool: + if not args.upload_only: + download_segments = [seg for car, seg in segments if car in tested_cars] + log_data: dict[str, LogReader] = {} + p1 = pool.map(get_log_data, download_segments) + for segment, lr in tqdm(p1, desc="Getting Logs", total=len(download_segments)): + log_data[segment] = lr + + pool_args: Any = [] + for car_brand, segment in segments: + if car_brand not in tested_cars: + continue + + for cfg in CONFIGS: + if cfg.proc_name not in tested_procs: + continue + + cur_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{cur_commit}.bz2") + if args.update_refs: # reference logs will not exist if routes were just regenerated + ref_log_path = get_url(*segment.rsplit("--", 1)) + else: + ref_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{ref_commit}.bz2") + ref_log_path = ref_log_fn if os.path.exists(ref_log_fn) else BASE_URL + os.path.basename(ref_log_fn) + + dat = None if args.upload_only else log_data[segment] + pool_args.append((segment, cfg, args, cur_log_fn, ref_log_path, dat)) + + log_paths[segment][cfg.proc_name]['ref'] = ref_log_path + log_paths[segment][cfg.proc_name]['new'] = cur_log_fn + + results: Any = defaultdict(dict) + p2 = pool.map(run_test_process, pool_args) + for (segment, proc, result) in tqdm(p2, desc="Running Tests", total=len(pool_args)): + if not args.upload_only: + results[segment][proc] = result + + diff_short, diff_long, failed = format_diff(results, log_paths, ref_commit) + if not upload: + with open(os.path.join(PROC_REPLAY_DIR, "diff.txt"), "w") as f: + f.write(diff_long) + print(diff_short) + + if failed: + print("TEST FAILED") + print("\n\nTo push the new reference logs for this commit run:") + print("./test_processes.py --upload-only") + else: + print("TEST SUCCEEDED") + + else: + with open(REF_COMMIT_FN, "w") as f: + f.write(cur_commit) + print(f"\n\nUpdated reference logs for commit: {cur_commit}") + + sys.exit(int(failed)) diff --git a/selfdrive/test/process_replay/test_regen.py b/selfdrive/test/process_replay/test_regen.py new file mode 100644 index 0000000..41d67ea --- /dev/null +++ b/selfdrive/test/process_replay/test_regen.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +import unittest + +from parameterized import parameterized + +from openpilot.selfdrive.test.process_replay.regen import regen_segment, DummyFrameReader +from openpilot.selfdrive.test.process_replay.process_replay import check_openpilot_enabled +from openpilot.tools.lib.openpilotci import get_url +from openpilot.tools.lib.logreader import LogReader +from openpilot.tools.lib.framereader import FrameReader + +TESTED_SEGMENTS = [ + ("PRIUS_C2", "0982d79ebb0de295|2021-01-04--17-13-21--13"), # TOYOTA PRIUS 2017: NEO, pandaStateDEPRECATED, no peripheralState, sensorEventsDEPRECATED + # Enable these once regen on CI becomes faster or use them for different tests running controlsd in isolation + # ("MAZDA_C3", "bd6a637565e91581|2021-10-30--15-14-53--4"), # MAZDA.CX9_2021: TICI, incomplete managerState + # ("FORD_C3", "54827bf84c38b14f|2023-01-26--21-59-07--4"), # FORD.BRONCO_SPORT_MK1: TICI +] + + +def ci_setup_data_readers(route, sidx): + lr = LogReader(get_url(route, sidx, "rlog")) + frs = { + 'roadCameraState': FrameReader(get_url(route, sidx, "fcamera")), + 'driverCameraState': DummyFrameReader.zero_dcamera() + } + if next((True for m in lr if m.which() == "wideRoadCameraState"), False): + frs["wideRoadCameraState"] = FrameReader(get_url(route, sidx, "ecamera")) + + return lr, frs + + +class TestRegen(unittest.TestCase): + @parameterized.expand(TESTED_SEGMENTS) + def test_engaged(self, case_name, segment): + route, sidx = segment.rsplit("--", 1) + lr, frs = ci_setup_data_readers(route, sidx) + output_logs = regen_segment(lr, frs, disable_tqdm=True) + + engaged = check_openpilot_enabled(output_logs) + self.assertTrue(engaged, f"openpilot not engaged in {case_name}") + + +if __name__=='__main__': + unittest.main() diff --git a/selfdrive/test/process_replay/vision_meta.py b/selfdrive/test/process_replay/vision_meta.py new file mode 100644 index 0000000..b3c3dc0 --- /dev/null +++ b/selfdrive/test/process_replay/vision_meta.py @@ -0,0 +1,43 @@ +from collections import namedtuple +from cereal.visionipc import VisionStreamType +from openpilot.common.realtime import DT_MDL, DT_DMON +from openpilot.common.transformations.camera import tici_f_frame_size, tici_d_frame_size, tici_e_frame_size, eon_f_frame_size, eon_d_frame_size + +VideoStreamMeta = namedtuple("VideoStreamMeta", ["camera_state", "encode_index", "stream", "dt", "frame_sizes"]) +ROAD_CAMERA_FRAME_SIZES = {"tici": tici_f_frame_size, "tizi": tici_f_frame_size, "neo": eon_f_frame_size} +WIDE_ROAD_CAMERA_FRAME_SIZES = {"tici": tici_e_frame_size, "tizi": tici_e_frame_size} +DRIVER_FRAME_SIZES = {"tici": tici_d_frame_size, "tizi": tici_d_frame_size, "neo": eon_d_frame_size} +VIPC_STREAM_METADATA = [ + # metadata: (state_msg_type, encode_msg_type, stream_type, dt, frame_sizes) + ("roadCameraState", "roadEncodeIdx", VisionStreamType.VISION_STREAM_ROAD, DT_MDL, ROAD_CAMERA_FRAME_SIZES), + ("wideRoadCameraState", "wideRoadEncodeIdx", VisionStreamType.VISION_STREAM_WIDE_ROAD, DT_MDL, WIDE_ROAD_CAMERA_FRAME_SIZES), + ("driverCameraState", "driverEncodeIdx", VisionStreamType.VISION_STREAM_DRIVER, DT_DMON, DRIVER_FRAME_SIZES), +] + + +def meta_from_camera_state(state): + meta = next((VideoStreamMeta(*meta) for meta in VIPC_STREAM_METADATA if meta[0] == state), None) + return meta + + +def meta_from_encode_index(encode_index): + meta = next((VideoStreamMeta(*meta) for meta in VIPC_STREAM_METADATA if meta[1] == encode_index), None) + return meta + + +def meta_from_stream_type(stream_type): + meta = next((VideoStreamMeta(*meta) for meta in VIPC_STREAM_METADATA if meta[2] == stream_type), None) + return meta + + +def available_streams(lr=None): + if lr is None: + return [VideoStreamMeta(*meta) for meta in VIPC_STREAM_METADATA] + + result = [] + for meta in VIPC_STREAM_METADATA: + has_cam_state = next((True for m in lr if m.which() == meta[0]), False) + if has_cam_state: + result.append(VideoStreamMeta(*meta)) + + return result diff --git a/selfdrive/test/profiling/.gitignore b/selfdrive/test/profiling/.gitignore new file mode 100644 index 0000000..76acac7 --- /dev/null +++ b/selfdrive/test/profiling/.gitignore @@ -0,0 +1,2 @@ +cachegrind.out.* +*.prof diff --git a/selfdrive/test/profiling/__init__.py b/selfdrive/test/profiling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/selfdrive/test/profiling/lib.py b/selfdrive/test/profiling/lib.py new file mode 100644 index 0000000..7f3b012 --- /dev/null +++ b/selfdrive/test/profiling/lib.py @@ -0,0 +1,91 @@ +from collections import defaultdict, deque +from cereal.services import SERVICE_LIST +import cereal.messaging as messaging +import capnp + + +class ReplayDone(Exception): + pass + + +class SubSocket(): + def __init__(self, msgs, trigger): + self.i = 0 + self.trigger = trigger + self.msgs = [m.as_builder().to_bytes() for m in msgs if m.which() == trigger] + self.max_i = len(self.msgs) - 1 + + def receive(self, non_blocking=False): + if non_blocking: + return None + + if self.i == self.max_i: + raise ReplayDone + + while True: + msg = self.msgs[self.i] + self.i += 1 + return msg + + +class PubSocket(): + def send(self, data): + pass + + +class SubMaster(messaging.SubMaster): + def __init__(self, msgs, trigger, services, check_averag_freq=False): + self.frame = 0 + self.data = {} + self.ignore_alive = [] + + self.alive = {s: True for s in services} + self.updated = {s: False for s in services} + self.rcv_time = {s: 0. for s in services} + self.rcv_frame = {s: 0 for s in services} + self.valid = {s: True for s in services} + self.freq_ok = {s: True for s in services} + self.recv_dts = {s: deque([0.0] * messaging.AVG_FREQ_HISTORY, maxlen=messaging.AVG_FREQ_HISTORY) for s in services} + self.logMonoTime = {} + self.sock = {} + self.freq = {} + self.check_average_freq = check_averag_freq + self.non_polled_services = [] + self.ignore_average_freq = [] + + # TODO: specify multiple triggers for service like plannerd that poll on more than one service + cur_msgs = [] + self.msgs = [] + msgs = [m for m in msgs if m.which() in services] + + for msg in msgs: + cur_msgs.append(msg) + if msg.which() == trigger: + self.msgs.append(cur_msgs) + cur_msgs = [] + + self.msgs = list(reversed(self.msgs)) + + for s in services: + self.freq[s] = SERVICE_LIST[s].frequency + try: + data = messaging.new_message(s) + except capnp.lib.capnp.KjException: + # lists + data = messaging.new_message(s, 0) + + self.data[s] = getattr(data, s) + self.logMonoTime[s] = 0 + self.sock[s] = SubSocket(msgs, s) + + def update(self, timeout=None): + if not len(self.msgs): + raise ReplayDone + + cur_msgs = self.msgs.pop() + self.update_msgs(cur_msgs[0].logMonoTime, self.msgs.pop()) + + +class PubMaster(messaging.PubMaster): + def __init__(self): + self.sock = defaultdict(PubSocket) diff --git a/selfdrive/test/profiling/profiler.py b/selfdrive/test/profiling/profiler.py new file mode 100644 index 0000000..6571825 --- /dev/null +++ b/selfdrive/test/profiling/profiler.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +import os +import sys +import cProfile +import pprofile +import pyprof2calltree + +from openpilot.common.params import Params +from openpilot.tools.lib.logreader import LogReader +from openpilot.selfdrive.test.profiling.lib import SubMaster, PubMaster, SubSocket, ReplayDone +from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS +from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA +from openpilot.selfdrive.car.honda.values import CAR as HONDA +from openpilot.selfdrive.car.volkswagen.values import CAR as VW + +BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/" + +CARS = { + 'toyota': ("0982d79ebb0de295|2021-01-03--20-03-36/6", TOYOTA.RAV4), + 'honda': ("0982d79ebb0de295|2021-01-08--10-13-10/6", HONDA.CIVIC), + "vw": ("ef895f46af5fd73f|2021-05-22--14-06-35/6", VW.AUDI_A3_MK3), +} + + +def get_inputs(msgs, process, fingerprint): + for config in CONFIGS: + if config.proc_name == process: + sub_socks = list(config.pubs) + trigger = sub_socks[0] + break + + # some procs block on CarParams + for msg in msgs: + if msg.which() == 'carParams': + m = msg.as_builder() + m.carParams.carFingerprint = fingerprint + Params().put("CarParams", m.carParams.copy().to_bytes()) + break + + sm = SubMaster(msgs, trigger, sub_socks) + pm = PubMaster() + if 'can' in sub_socks: + can_sock = SubSocket(msgs, 'can') + else: + can_sock = None + return sm, pm, can_sock + + +def profile(proc, func, car='toyota'): + segment, fingerprint = CARS[car] + segment = segment.replace('|', '/') + rlog_url = f"{BASE_URL}{segment}/rlog.bz2" + msgs = list(LogReader(rlog_url)) * int(os.getenv("LOOP", "1")) + + os.environ['FINGERPRINT'] = fingerprint + os.environ['SKIP_FW_QUERY'] = "1" + os.environ['REPLAY'] = "1" + + def run(sm, pm, can_sock): + try: + if can_sock is not None: + func(sm, pm, can_sock) + else: + func(sm, pm) + except ReplayDone: + pass + + # Statistical + sm, pm, can_sock = get_inputs(msgs, proc, fingerprint) + with pprofile.StatisticalProfile()(period=0.00001) as pr: + run(sm, pm, can_sock) + pr.dump_stats(f'cachegrind.out.{proc}_statistical') + + # Deterministic + sm, pm, can_sock = get_inputs(msgs, proc, fingerprint) + with cProfile.Profile() as pr: + run(sm, pm, can_sock) + pyprof2calltree.convert(pr.getstats(), f'cachegrind.out.{proc}_deterministic') + + +if __name__ == '__main__': + from openpilot.selfdrive.controls.controlsd import main as controlsd_thread + from openpilot.selfdrive.locationd.paramsd import main as paramsd_thread + from openpilot.selfdrive.controls.plannerd import main as plannerd_thread + + procs = { + 'controlsd': controlsd_thread, + 'paramsd': paramsd_thread, + 'plannerd': plannerd_thread, + } + + proc = sys.argv[1] + if proc not in procs: + print(f"{proc} not available") + sys.exit(0) + else: + profile(proc, procs[proc]) diff --git a/selfdrive/test/scons_build_test.sh b/selfdrive/test/scons_build_test.sh new file mode 100644 index 0000000..a3b33f7 --- /dev/null +++ b/selfdrive/test/scons_build_test.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +SCRIPT_DIR=$(dirname "$0") +BASEDIR=$(realpath "$SCRIPT_DIR/../../") +cd $BASEDIR + +# tests that our build system's dependencies are configured properly, +# needs a machine with lots of cores +scons --clean +scons --no-cache --random -j$(nproc) \ No newline at end of file diff --git a/selfdrive/test/setup_vsound.sh b/selfdrive/test/setup_vsound.sh new file mode 100644 index 0000000..a6601d0 --- /dev/null +++ b/selfdrive/test/setup_vsound.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +{ + #start pulseaudio daemon + sudo pulseaudio -D + + # create a virtual null audio and set it to default device + sudo pactl load-module module-null-sink sink_name=virtual_audio + sudo pactl set-default-sink virtual_audio +} > /dev/null 2>&1 diff --git a/selfdrive/test/setup_xvfb.sh b/selfdrive/test/setup_xvfb.sh new file mode 100644 index 0000000..692b84d --- /dev/null +++ b/selfdrive/test/setup_xvfb.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# Sets up a virtual display for running map renderer and simulator without an X11 display + +DISP_ID=99 +export DISPLAY=:$DISP_ID + +sudo Xvfb $DISPLAY -screen 0 2160x1080x24 2>/dev/null & + +# check for x11 socket for the specified display ID +while [ ! -S /tmp/.X11-unix/X$DISP_ID ] +do + echo "Waiting for Xvfb..." + sleep 1 +done + +touch ~/.Xauthority +export XDG_SESSION_TYPE="x11" +xset -q \ No newline at end of file diff --git a/selfdrive/test/test_onroad.py b/selfdrive/test/test_onroad.py index de8a442..4be9b8a 100755 --- a/selfdrive/test/test_onroad.py +++ b/selfdrive/test/test_onroad.py @@ -29,7 +29,7 @@ from openpilot.tools.lib.logreader import LogReader # Baseline CPU usage by process PROCS = { - "selfdrive.controls.controlsd": 41.0, + "selfdrive.controls.controlsd": 46.0, "./loggerd": 14.0, "./encoderd": 17.0, "./camerad": 14.5, @@ -424,4 +424,4 @@ class TestOnroad(unittest.TestCase): if __name__ == "__main__": - pytest.main() + unittest.main() diff --git a/selfdrive/test/test_updated.py b/selfdrive/test/test_updated.py new file mode 100644 index 0000000..dd79e03 --- /dev/null +++ b/selfdrive/test/test_updated.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python3 +import datetime +import os +import pytest +import time +import tempfile +import unittest +import shutil +import signal +import subprocess +import random + +from openpilot.common.basedir import BASEDIR +from openpilot.common.params import Params + + +@pytest.mark.tici +class TestUpdated(unittest.TestCase): + + def setUp(self): + self.updated_proc = None + + self.tmp_dir = tempfile.TemporaryDirectory() + org_dir = os.path.join(self.tmp_dir.name, "commaai") + + self.basedir = os.path.join(org_dir, "openpilot") + self.git_remote_dir = os.path.join(org_dir, "openpilot_remote") + self.staging_dir = os.path.join(org_dir, "safe_staging") + for d in [org_dir, self.basedir, self.git_remote_dir, self.staging_dir]: + os.mkdir(d) + + self.neos_version = os.path.join(org_dir, "neos_version") + self.neosupdate_dir = os.path.join(org_dir, "neosupdate") + with open(self.neos_version, "w") as f: + v = subprocess.check_output(r"bash -c 'source launch_env.sh && echo $REQUIRED_NEOS_VERSION'", + cwd=BASEDIR, shell=True, encoding='utf8').strip() + f.write(v) + + self.upper_dir = os.path.join(self.staging_dir, "upper") + self.merged_dir = os.path.join(self.staging_dir, "merged") + self.finalized_dir = os.path.join(self.staging_dir, "finalized") + + # setup local submodule remotes + submodules = subprocess.check_output("git submodule --quiet foreach 'echo $name'", + shell=True, cwd=BASEDIR, encoding='utf8').split() + for s in submodules: + sub_path = os.path.join(org_dir, s.split("_repo")[0]) + self._run(f"git clone {s} {sub_path}.git", cwd=BASEDIR) + + # setup two git repos, a remote and one we'll run updated in + self._run([ + f"git clone {BASEDIR} {self.git_remote_dir}", + f"git clone {self.git_remote_dir} {self.basedir}", + f"cd {self.basedir} && git submodule init && git submodule update", + f"cd {self.basedir} && scons -j{os.cpu_count()} cereal/ common/" + ]) + + self.params = Params(os.path.join(self.basedir, "persist/params")) + self.params.clear_all() + os.sync() + + def tearDown(self): + try: + if self.updated_proc is not None: + self.updated_proc.terminate() + self.updated_proc.wait(30) + except Exception as e: + print(e) + self.tmp_dir.cleanup() + + + # *** test helpers *** + + + def _run(self, cmd, cwd=None): + if not isinstance(cmd, list): + cmd = (cmd,) + + for c in cmd: + subprocess.check_output(c, cwd=cwd, shell=True) + + def _get_updated_proc(self): + os.environ["PYTHONPATH"] = self.basedir + os.environ["GIT_AUTHOR_NAME"] = "testy tester" + os.environ["GIT_COMMITTER_NAME"] = "testy tester" + os.environ["GIT_AUTHOR_EMAIL"] = "testy@tester.test" + os.environ["GIT_COMMITTER_EMAIL"] = "testy@tester.test" + os.environ["UPDATER_TEST_IP"] = "localhost" + os.environ["UPDATER_LOCK_FILE"] = os.path.join(self.tmp_dir.name, "updater.lock") + os.environ["UPDATER_STAGING_ROOT"] = self.staging_dir + os.environ["UPDATER_NEOS_VERSION"] = self.neos_version + os.environ["UPDATER_NEOSUPDATE_DIR"] = self.neosupdate_dir + updated_path = os.path.join(self.basedir, "selfdrive/updated.py") + return subprocess.Popen(updated_path, env=os.environ) + + def _start_updater(self, offroad=True, nosleep=False): + self.params.put_bool("IsOffroad", offroad) + self.updated_proc = self._get_updated_proc() + if not nosleep: + time.sleep(1) + + def _update_now(self): + self.updated_proc.send_signal(signal.SIGHUP) + + # TODO: this should be implemented in params + def _read_param(self, key, timeout=1): + ret = None + start_time = time.monotonic() + while ret is None: + ret = self.params.get(key, encoding='utf8') + if time.monotonic() - start_time > timeout: + break + time.sleep(0.01) + return ret + + def _wait_for_update(self, timeout=30, clear_param=False): + if clear_param: + self.params.remove("LastUpdateTime") + + self._update_now() + t = self._read_param("LastUpdateTime", timeout=timeout) + if t is None: + raise Exception("timed out waiting for update to complete") + + def _make_commit(self): + all_dirs, all_files = [], [] + for root, dirs, files in os.walk(self.git_remote_dir): + if ".git" in root: + continue + for d in dirs: + all_dirs.append(os.path.join(root, d)) + for f in files: + all_files.append(os.path.join(root, f)) + + # make a new dir and some new files + new_dir = os.path.join(self.git_remote_dir, "this_is_a_new_dir") + os.mkdir(new_dir) + for _ in range(random.randrange(5, 30)): + for d in (new_dir, random.choice(all_dirs)): + with tempfile.NamedTemporaryFile(dir=d, delete=False) as f: + f.write(os.urandom(random.randrange(1, 1000000))) + + # modify some files + for f in random.sample(all_files, random.randrange(5, 50)): + with open(f, "w+") as ff: + txt = ff.readlines() + ff.seek(0) + for line in txt: + ff.write(line[::-1]) + + # remove some files + for f in random.sample(all_files, random.randrange(5, 50)): + os.remove(f) + + # remove some dirs + for d in random.sample(all_dirs, random.randrange(1, 10)): + shutil.rmtree(d) + + # commit the changes + self._run([ + "git add -A", + "git commit -m 'an update'", + ], cwd=self.git_remote_dir) + + def _check_update_state(self, update_available): + # make sure LastUpdateTime is recent + t = self._read_param("LastUpdateTime") + last_update_time = datetime.datetime.fromisoformat(t) + td = datetime.datetime.utcnow() - last_update_time + self.assertLess(td.total_seconds(), 10) + self.params.remove("LastUpdateTime") + + # wait a bit for the rest of the params to be written + time.sleep(0.1) + + # check params + update = self._read_param("UpdateAvailable") + self.assertEqual(update == "1", update_available, f"UpdateAvailable: {repr(update)}") + self.assertEqual(self._read_param("UpdateFailedCount"), "0") + + # TODO: check that the finalized update actually matches remote + # check the .overlay_init and .overlay_consistent flags + self.assertTrue(os.path.isfile(os.path.join(self.basedir, ".overlay_init"))) + self.assertEqual(os.path.isfile(os.path.join(self.finalized_dir, ".overlay_consistent")), update_available) + + + # *** test cases *** + + + # Run updated for 100 cycles with no update + def test_no_update(self): + self._start_updater() + for _ in range(100): + self._wait_for_update(clear_param=True) + self._check_update_state(False) + + # Let the updater run with no update for a cycle, then write an update + def test_update(self): + self._start_updater() + + # run for a cycle with no update + self._wait_for_update(clear_param=True) + self._check_update_state(False) + + # write an update to our remote + self._make_commit() + + # run for a cycle to get the update + self._wait_for_update(timeout=60, clear_param=True) + self._check_update_state(True) + + # run another cycle with no update + self._wait_for_update(clear_param=True) + self._check_update_state(True) + + # Let the updater run for 10 cycles, and write an update every cycle + @unittest.skip("need to make this faster") + def test_update_loop(self): + self._start_updater() + + # run for a cycle with no update + self._wait_for_update(clear_param=True) + for _ in range(10): + time.sleep(0.5) + self._make_commit() + self._wait_for_update(timeout=90, clear_param=True) + self._check_update_state(True) + + # Test overlay re-creation after tracking a new file in basedir's git + def test_overlay_reinit(self): + self._start_updater() + + overlay_init_fn = os.path.join(self.basedir, ".overlay_init") + + # run for a cycle with no update + self._wait_for_update(clear_param=True) + self.params.remove("LastUpdateTime") + first_mtime = os.path.getmtime(overlay_init_fn) + + # touch a file in the basedir + self._run("touch new_file && git add new_file", cwd=self.basedir) + + # run another cycle, should have a new mtime + self._wait_for_update(clear_param=True) + second_mtime = os.path.getmtime(overlay_init_fn) + self.assertTrue(first_mtime != second_mtime) + + # run another cycle, mtime should be same as last cycle + self._wait_for_update(clear_param=True) + new_mtime = os.path.getmtime(overlay_init_fn) + self.assertTrue(second_mtime == new_mtime) + + # Make sure updated exits if another instance is running + def test_multiple_instances(self): + # start updated and let it run for a cycle + self._start_updater() + time.sleep(1) + self._wait_for_update(clear_param=True) + + # start another instance + second_updated = self._get_updated_proc() + ret_code = second_updated.wait(timeout=5) + self.assertTrue(ret_code is not None) + + + # *** test cases with NEOS updates *** + + + # Run updated with no update, make sure it clears the old NEOS update + def test_clear_neos_cache(self): + # make the dir and some junk files + os.mkdir(self.neosupdate_dir) + for _ in range(15): + with tempfile.NamedTemporaryFile(dir=self.neosupdate_dir, delete=False) as f: + f.write(os.urandom(random.randrange(1, 1000000))) + + self._start_updater() + self._wait_for_update(clear_param=True) + self._check_update_state(False) + self.assertFalse(os.path.isdir(self.neosupdate_dir)) + + # Let the updater run with no update for a cycle, then write an update + @unittest.skip("TODO: only runs on device") + def test_update_with_neos_update(self): + # bump the NEOS version and commit it + self._run([ + "echo 'export REQUIRED_NEOS_VERSION=3' >> launch_env.sh", + "git -c user.name='testy' -c user.email='testy@tester.test' \ + commit -am 'a neos update'", + ], cwd=self.git_remote_dir) + + # run for a cycle to get the update + self._start_updater() + self._wait_for_update(timeout=60, clear_param=True) + self._check_update_state(True) + + # TODO: more comprehensive check + self.assertTrue(os.path.isdir(self.neosupdate_dir)) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/test/test_valgrind_replay.py b/selfdrive/test/test_valgrind_replay.py new file mode 100644 index 0000000..75520df --- /dev/null +++ b/selfdrive/test/test_valgrind_replay.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +import os +import threading +import time +import unittest +import subprocess +import signal + +if "CI" in os.environ: + def tqdm(x): + return x +else: + from tqdm import tqdm # type: ignore + +import cereal.messaging as messaging +from collections import namedtuple +from openpilot.tools.lib.logreader import LogReader +from openpilot.tools.lib.openpilotci import get_url +from openpilot.common.basedir import BASEDIR + +ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'command', 'path', 'segment', 'wait_for_response']) + +CONFIGS = [ + ProcessConfig( + proc_name="ubloxd", + pub_sub={ + "ubloxRaw": ["ubloxGnss", "gpsLocationExternal"], + }, + ignore=[], + command="./ubloxd", + path="system/ubloxd", + segment="0375fdf7b1ce594d|2019-06-13--08-32-25--3", + wait_for_response=True + ), +] + + +class TestValgrind(unittest.TestCase): + def extract_leak_sizes(self, log): + if "All heap blocks were freed -- no leaks are possible" in log: + return (0,0,0) + + log = log.replace(",","") # fixes casting to int issue with large leaks + err_lost1 = log.split("definitely lost: ")[1] + err_lost2 = log.split("indirectly lost: ")[1] + err_lost3 = log.split("possibly lost: ")[1] + definitely_lost = int(err_lost1.split(" ")[0]) + indirectly_lost = int(err_lost2.split(" ")[0]) + possibly_lost = int(err_lost3.split(" ")[0]) + return (definitely_lost, indirectly_lost, possibly_lost) + + def valgrindlauncher(self, arg, cwd): + os.chdir(os.path.join(BASEDIR, cwd)) + # Run valgrind on a process + command = "valgrind --leak-check=full " + arg + p = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid) + + while not self.replay_done: + time.sleep(0.1) + + # Kill valgrind and extract leak output + os.killpg(os.getpgid(p.pid), signal.SIGINT) + _, err = p.communicate() + error_msg = str(err, encoding='utf-8') + with open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "a") as f: + f.write(error_msg) + f.write(5 * "\n") + definitely_lost, indirectly_lost, possibly_lost = self.extract_leak_sizes(error_msg) + if max(definitely_lost, indirectly_lost, possibly_lost) > 0: + self.leak = True + print("LEAKS from", arg, "\nDefinitely lost:", definitely_lost, "\nIndirectly lost", indirectly_lost, "\nPossibly lost", possibly_lost) + else: + self.leak = False + + def replay_process(self, config, logreader): + pub_sockets = list(config.pub_sub.keys()) # We dump data from logs here + sub_sockets = [s for _, sub in config.pub_sub.items() for s in sub] # We get responses here + pm = messaging.PubMaster(pub_sockets) + sm = messaging.SubMaster(sub_sockets) + + print("Sorting logs") + all_msgs = sorted(logreader, key=lambda msg: msg.logMonoTime) + pub_msgs = [msg for msg in all_msgs if msg.which() in list(config.pub_sub.keys())] + + thread = threading.Thread(target=self.valgrindlauncher, args=(config.command, config.path)) + thread.daemon = True + thread.start() + + while not all(pm.all_readers_updated(s) for s in config.pub_sub.keys()): + time.sleep(0) + + for msg in tqdm(pub_msgs): + pm.send(msg.which(), msg.as_builder()) + if config.wait_for_response: + sm.update(100) + + self.replay_done = True + + def test_config(self): + open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "w").close() + + for cfg in CONFIGS: + self.leak = None + self.replay_done = False + + r, n = cfg.segment.rsplit("--", 1) + lr = LogReader(get_url(r, n)) + self.replay_process(cfg, lr) + + while self.leak is None: + time.sleep(0.1) # Wait for the valgrind to finish + + self.assertFalse(self.leak) + + +if __name__ == "__main__": + unittest.main() diff --git a/selfdrive/test/update_ci_routes.py b/selfdrive/test/update_ci_routes.py new file mode 100644 index 0000000..a9f4494 --- /dev/null +++ b/selfdrive/test/update_ci_routes.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +import os +import re +import subprocess +import sys +from collections.abc import Iterable + +from tqdm import tqdm + +from openpilot.selfdrive.car.tests.routes import routes as test_car_models_routes +from openpilot.selfdrive.test.process_replay.test_processes import source_segments as replay_segments +from openpilot.tools.lib.azure_container import AzureContainer +from openpilot.tools.lib.openpilotcontainers import DataCIContainer, DataProdContainer, OpenpilotCIContainer + +SOURCES: list[AzureContainer] = [ + DataProdContainer, + DataCIContainer +] + +DEST = OpenpilotCIContainer + +def upload_route(path: str, exclude_patterns: Iterable[str] = None) -> None: + if exclude_patterns is None: + exclude_patterns = [r'dcamera\.hevc'] + + r, n = path.rsplit("--", 1) + r = '/'.join(r.split('/')[-2:]) # strip out anything extra in the path + destpath = f"{r}/{n}" + for file in os.listdir(path): + if any(re.search(pattern, file) for pattern in exclude_patterns): + continue + DEST.upload_file(os.path.join(path, file), f"{destpath}/{file}") + + +def sync_to_ci_public(route: str) -> bool: + dest_container, dest_key = DEST.get_client_and_key() + key_prefix = route.replace('|', '/') + dongle_id = key_prefix.split('/')[0] + + if next(dest_container.list_blob_names(name_starts_with=key_prefix), None) is not None: + return True + + print(f"Uploading {route}") + for source_container in SOURCES: + # assumes az login has been run + print(f"Trying {source_container.ACCOUNT}/{source_container.CONTAINER}") + _, source_key = source_container.get_client_and_key() + cmd = [ + "azcopy", + "copy", + f"{source_container.BASE_URL}{key_prefix}?{source_key}", + f"{DEST.BASE_URL}{dongle_id}?{dest_key}", + "--recursive=true", + "--overwrite=false", + "--exclude-pattern=*/dcamera.hevc", + ] + + try: + result = subprocess.call(cmd, stdout=subprocess.DEVNULL) + if result == 0: + print("Success") + return True + except subprocess.CalledProcessError: + print("Failed") + + return False + + +if __name__ == "__main__": + failed_routes = [] + + to_sync = sys.argv[1:] + + if not len(to_sync): + # sync routes from the car tests routes and process replay + to_sync.extend([rt.route for rt in test_car_models_routes]) + to_sync.extend([s[1].rsplit('--', 1)[0] for s in replay_segments]) + + for r in tqdm(to_sync): + if not sync_to_ci_public(r): + failed_routes.append(r) + + if len(failed_routes): + print("failed routes:", failed_routes) diff --git a/system/camerad/test/.gitignore b/system/camerad/test/.gitignore new file mode 100644 index 0000000..d67473e --- /dev/null +++ b/system/camerad/test/.gitignore @@ -0,0 +1,2 @@ +jpegs/ +test_ae_gray diff --git a/system/camerad/test/check_skips.py b/system/camerad/test/check_skips.py new file mode 100644 index 0000000..0814ce4 --- /dev/null +++ b/system/camerad/test/check_skips.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# type: ignore +import cereal.messaging as messaging + +all_sockets = ['roadCameraState', 'driverCameraState', 'wideRoadCameraState'] +prev_id = [None,None,None] +this_id = [None,None,None] +dt = [None,None,None] +num_skipped = [0,0,0] + +if __name__ == "__main__": + sm = messaging.SubMaster(all_sockets) + while True: + sm.update() + + for i in range(len(all_sockets)): + if not sm.updated[all_sockets[i]]: + continue + this_id[i] = sm[all_sockets[i]].frameId + if prev_id[i] is None: + prev_id[i] = this_id[i] + continue + dt[i] = this_id[i] - prev_id[i] + if dt[i] != 1: + num_skipped[i] += dt[i] - 1 + print(all_sockets[i] ,dt[i] - 1, num_skipped[i]) + prev_id[i] = this_id[i] diff --git a/system/camerad/test/get_thumbnails_for_segment.py b/system/camerad/test/get_thumbnails_for_segment.py new file mode 100644 index 0000000..21409f3 --- /dev/null +++ b/system/camerad/test/get_thumbnails_for_segment.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 +import argparse +import os +from tqdm import tqdm + +from openpilot.tools.lib.logreader import LogReader + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("route", help="The route name") + args = parser.parse_args() + + out_path = os.path.join("jpegs", f"{args.route.replace('|', '_').replace('/', '_')}") + os.makedirs(out_path, exist_ok=True) + + lr = LogReader(args.route) + + for msg in tqdm(lr): + if msg.which() == 'thumbnail': + with open(os.path.join(out_path, f"{msg.thumbnail.frameId}.jpg"), 'wb') as f: + f.write(msg.thumbnail.thumbnail) + elif msg.which() == 'navThumbnail': + with open(os.path.join(out_path, f"nav_{msg.navThumbnail.frameId}.jpg"), 'wb') as f: + f.write(msg.navThumbnail.thumbnail) diff --git a/system/camerad/test/stress_restart.sh b/system/camerad/test/stress_restart.sh new file mode 100644 index 0000000..0445dcb --- /dev/null +++ b/system/camerad/test/stress_restart.sh @@ -0,0 +1,9 @@ +#!/bin/sh +cd .. +while :; do + ./camerad & + pid="$!" + sleep 2 + kill -2 $pid + wait $pid +done diff --git a/system/camerad/test/test_ae_gray.cc b/system/camerad/test/test_ae_gray.cc new file mode 100644 index 0000000..06d7849 --- /dev/null +++ b/system/camerad/test/test_ae_gray.cc @@ -0,0 +1,83 @@ +#define CATCH_CONFIG_MAIN +#include "catch2/catch.hpp" + +#include + +#include +#include + +#include "common/util.h" +#include "system/camerad/cameras/camera_common.h" + +#define W 240 +#define H 160 + + +#define TONE_SPLITS 3 + +float gts[TONE_SPLITS * TONE_SPLITS * TONE_SPLITS * TONE_SPLITS] = { + 0.917969, 0.917969, 0.375000, 0.917969, 0.375000, 0.375000, 0.187500, 0.187500, 0.187500, 0.917969, + 0.375000, 0.375000, 0.187500, 0.187500, 0.187500, 0.187500, 0.187500, 0.187500, 0.093750, 0.093750, + 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, 0.917969, 0.375000, 0.375000, + 0.187500, 0.187500, 0.187500, 0.187500, 0.187500, 0.187500, 0.093750, 0.093750, 0.093750, 0.093750, + 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, 0.093750, + 0.093750, 0.093750, 0.093750, 0.093750, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000}; + + +TEST_CASE("camera.test_set_exposure_target") { + // set up fake camerabuf + CameraBuf cb = {}; + VisionBuf vb = {}; + uint8_t * fb_y = new uint8_t[W*H]; + vb.y = fb_y; + cb.cur_yuv_buf = &vb; + cb.rgb_width = W; + cb.rgb_height = H; + + printf("AE test patterns %dx%d\n", cb.rgb_width, cb.rgb_height); + + // mix of 5 tones + uint8_t l[5] = {0, 24, 48, 96, 235}; // 235 is yuv max + + bool passed = true; + float rtol = 0.05; + // generate pattern and calculate EV + int cnt = 0; + for (int i_0=0; i_0 rtol*evgt) { + passed = false; + } + + // report + printf("%d/%d/%d/%d/%d: ev %f, gt %f, err %f\n", h_0, h_1, h_2, h_3, h_4, ev, evgt, fabs(ev - evgt) / (evgt != 0 ? evgt : 0.00001f)); + cnt++; + } + } + } + } + assert(passed); + + delete[] fb_y; +} diff --git a/system/camerad/test/test_camerad.py b/system/camerad/test/test_camerad.py new file mode 100644 index 0000000..4081156 --- /dev/null +++ b/system/camerad/test/test_camerad.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +import pytest +import time +import numpy as np +from flaky import flaky +from collections import defaultdict + +import cereal.messaging as messaging +from cereal import log +from cereal.services import SERVICE_LIST +from openpilot.selfdrive.manager.process_config import managed_processes + +TEST_TIMESPAN = 30 +LAG_FRAME_TOLERANCE = {log.FrameData.ImageSensor.ar0231: 0.5, # ARs use synced pulses for frame starts + log.FrameData.ImageSensor.ox03c10: 1.1} # OXs react to out-of-sync at next frame +FRAME_DELTA_TOLERANCE = {log.FrameData.ImageSensor.ar0231: 1.0, + log.FrameData.ImageSensor.ox03c10: 1.0} + +CAMERAS = ('roadCameraState', 'driverCameraState', 'wideRoadCameraState') + +# TODO: this shouldn't be needed +@flaky(max_runs=3) +@pytest.mark.tici +class TestCamerad: + def setup_method(self): + # run camerad and record logs + managed_processes['camerad'].start() + time.sleep(3) + socks = {c: messaging.sub_sock(c, conflate=False, timeout=100) for c in CAMERAS} + + self.logs = defaultdict(list) + start_time = time.monotonic() + while time.monotonic()- start_time < TEST_TIMESPAN: + for cam, s in socks.items(): + self.logs[cam] += messaging.drain_sock(s) + time.sleep(0.2) + managed_processes['camerad'].stop() + + self.log_by_frame_id = defaultdict(list) + self.sensor_type = None + for cam, msgs in self.logs.items(): + if self.sensor_type is None: + self.sensor_type = getattr(msgs[0], msgs[0].which()).sensor.raw + expected_frames = SERVICE_LIST[cam].frequency * TEST_TIMESPAN + assert expected_frames*0.95 < len(msgs) < expected_frames*1.05, f"unexpected frame count {cam}: {expected_frames=}, got {len(msgs)}" + + dts = np.abs(np.diff([getattr(m, m.which()).timestampSof/1e6 for m in msgs]) - 1000/SERVICE_LIST[cam].frequency) + assert (dts < FRAME_DELTA_TOLERANCE[self.sensor_type]).all(), f"{cam} dts(ms) out of spec: max diff {dts.max()}, 99 percentile {np.percentile(dts, 99)}" + + for m in msgs: + self.log_by_frame_id[getattr(m, m.which()).frameId].append(m) + + # strip beginning and end + for _ in range(3): + mn, mx = min(self.log_by_frame_id.keys()), max(self.log_by_frame_id.keys()) + del self.log_by_frame_id[mn] + del self.log_by_frame_id[mx] + + def test_frame_skips(self): + skips = {} + frame_ids = self.log_by_frame_id.keys() + for frame_id in range(min(frame_ids), max(frame_ids)): + seen_cams = [msg.which() for msg in self.log_by_frame_id[frame_id]] + skip_cams = set(CAMERAS) - set(seen_cams) + if len(skip_cams): + skips[frame_id] = skip_cams + assert len(skips) == 0, f"Found frame skips, missing cameras for the following frames: {skips}" + + def test_frame_sync(self): + frame_times = {frame_id: [getattr(m, m.which()).timestampSof for m in msgs] for frame_id, msgs in self.log_by_frame_id.items()} + diffs = {frame_id: (max(ts) - min(ts))/1e6 for frame_id, ts in frame_times.items()} + + def get_desc(fid, diff): + cam_times = [(m.which(), getattr(m, m.which()).timestampSof/1e6) for m in self.log_by_frame_id[fid]] + return (diff, cam_times) + laggy_frames = {k: get_desc(k, v) for k, v in diffs.items() if v > LAG_FRAME_TOLERANCE[self.sensor_type]} + + def in_tol(diff): + return 50 - LAG_FRAME_TOLERANCE[self.sensor_type] < diff and diff < 50 + LAG_FRAME_TOLERANCE[self.sensor_type] + if len(laggy_frames) != 0 and all( in_tol(laggy_frames[lf][0]) for lf in laggy_frames): + print("TODO: handle camera out of sync") + else: + assert len(laggy_frames) == 0, f"Frames not synced properly: {laggy_frames=}" diff --git a/system/camerad/test/test_exposure.py b/system/camerad/test/test_exposure.py new file mode 100644 index 0000000..50467f9 --- /dev/null +++ b/system/camerad/test/test_exposure.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +import time +import unittest +import numpy as np + +from openpilot.selfdrive.test.helpers import with_processes, phone_only +from openpilot.system.camerad.snapshot.snapshot import get_snapshots + +TEST_TIME = 45 +REPEAT = 5 + +class TestCamerad(unittest.TestCase): + @classmethod + def setUpClass(cls): + pass + + def _numpy_rgb2gray(self, im): + ret = np.clip(im[:,:,2] * 0.114 + im[:,:,1] * 0.587 + im[:,:,0] * 0.299, 0, 255).astype(np.uint8) + return ret + + def _is_exposure_okay(self, i, med_mean=None): + if med_mean is None: + med_mean = np.array([[0.2,0.4],[0.2,0.6]]) + h, w = i.shape[:2] + i = i[h//10:9*h//10,w//10:9*w//10] + med_ex, mean_ex = med_mean + i = self._numpy_rgb2gray(i) + i_median = np.median(i) / 255. + i_mean = np.mean(i) / 255. + print([i_median, i_mean]) + return med_ex[0] < i_median < med_ex[1] and mean_ex[0] < i_mean < mean_ex[1] + + @phone_only + @with_processes(['camerad']) + def test_camera_operation(self): + passed = 0 + start = time.time() + while time.time() - start < TEST_TIME and passed < REPEAT: + rpic, dpic = get_snapshots(frame="roadCameraState", front_frame="driverCameraState") + wpic, _ = get_snapshots(frame="wideRoadCameraState") + + res = self._is_exposure_okay(rpic) + res = res and self._is_exposure_okay(dpic) + res = res and self._is_exposure_okay(wpic) + + if passed > 0 and not res: + passed = -passed # fails test if any failure after first sus + break + + passed += int(res) + time.sleep(2) + self.assertGreaterEqual(passed, REPEAT) + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/Dockerfile b/tinygrad_repo/test/Dockerfile new file mode 100644 index 0000000..22be7b4 --- /dev/null +++ b/tinygrad_repo/test/Dockerfile @@ -0,0 +1,12 @@ +FROM ubuntu:20.04 + +# Install python3.8, and pip3 +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3.8 \ + python3-pip \ + && rm -rf /var/lib/apt/lists/* + +# Install python dependencies +COPY . ./tinygrad +WORKDIR tinygrad +RUN pip install -e . diff --git a/tinygrad_repo/test/__init__.py b/tinygrad_repo/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tinygrad_repo/test/external/dist/test_collectives.py b/tinygrad_repo/test/external/dist/test_collectives.py new file mode 100644 index 0000000..a707e1d --- /dev/null +++ b/tinygrad_repo/test/external/dist/test_collectives.py @@ -0,0 +1,62 @@ +from extra import dist +from tinygrad.jit import TinyJit +if __name__ == "__main__": + dist.preinit() + +from extra.dist import collectives +from tinygrad.helpers import CI, getenv +from tinygrad.tensor import Tensor +import numpy as np + +@TinyJit +def allreduce_jit(t:Tensor, cache_id=None) -> Tensor: + return collectives.allreduce(t, cache_id=cache_id).realize() + +SIZE = 2048 if not CI else 2 +SIZE_2 = 255 if not CI else 3 + +def run(): + # set a deterministic seed so that both ranks generate the same random tensor + Tensor.manual_seed(42) + + rank = getenv("RANK") + + # loop 3 times to make sure it works with the jit + for _ in range(3): + # create a tensor to send + t = Tensor.zeros(SIZE, SIZE) if rank != 0 else Tensor.ones(SIZE, SIZE) + t2 = allreduce_jit(t.contiguous().realize(), cache_id="test") + assert np.allclose(np.ones((SIZE, SIZE)), t2.numpy()), f"{t2.numpy()} wasn't ones" + + # reset jit + allreduce_jit.cnt = 0 + allreduce_jit.input_replace = {} + + # test uneven chunk sizes + for _ in range(3): + # create a tensor to send + t = Tensor.ones(SIZE_2, SIZE_2, SIZE_2) if rank == 0 else Tensor.zeros(SIZE_2, SIZE_2, SIZE_2) + t2 = allreduce_jit(t.contiguous().realize(), cache_id="test2") + assert np.allclose(np.ones((SIZE_2, SIZE_2, SIZE_2)), t2.numpy()), f"{t2.numpy()} wasn't ones" + + print(f"rank {rank} passed") + +if __name__ == "__main__": + if getenv("HIP"): + from tinygrad.runtime.ops_hip import HIP + devices = [f"hip:{i}" for i in range(HIP.device_count)] + else: + from tinygrad.runtime.ops_gpu import CL + devices = [f"gpu:{i}" for i in range(len(CL.devices))] if not CI else ["gpu:0", "gpu:0"] + world_size = len(devices) + + dist.init_oob(world_size) + + processes = [] + for rank, device in enumerate(devices): + processes.append(dist.spawn(rank, device, fn=run, args=())) + for p in processes: p.join() + + # exit with error code if any of the processes failed + for p in processes: + if p.exitcode != 0: exit(p.exitcode) diff --git a/tinygrad_repo/test/external/dist/test_world.py b/tinygrad_repo/test/external/dist/test_world.py new file mode 100644 index 0000000..28065e9 --- /dev/null +++ b/tinygrad_repo/test/external/dist/test_world.py @@ -0,0 +1,68 @@ +from extra import dist +from tinygrad.jit import TinyJit +if __name__ == "__main__": + dist.preinit() + +from extra.dist import world +from tinygrad.helpers import CI, getenv +from tinygrad.tensor import Tensor +import numpy as np + +@TinyJit +def send_jit(t, target_rank, cache_id=None) -> Tensor: + return world.send(t, target_rank, cache_id=cache_id).realize() + +@TinyJit +def recv_jit(t, target_rank, cache_id=None) -> Tensor: + return world.recv(t, target_rank, cache_id=cache_id).realize() + +SIZE = 2048 if not CI else 2 + +def run(): + # set a deterministic seed so that both ranks generate the same random tensor + Tensor.manual_seed(42) + + rank = getenv("RANK") + + # loop 3 times to make sure it works with the jit + for _ in range(3): + # create a tensor to send + t = Tensor.randn(SIZE, SIZE) + + # send to rank 1 + if rank == 0: + send_jit(t, 1, cache_id="test") + elif rank == 1: + t2 = Tensor.empty(SIZE, SIZE) + recv_jit(t2, 0, cache_id="test") + + # recv from rank 1 + if rank == 0: + t2 = Tensor.empty(SIZE, SIZE) + recv_jit(t2, 1, cache_id="test2") + elif rank == 1: + send_jit(t2, 0, cache_id="test2") + + # check that the received tensor is the same as the sent tensor + if rank == 0: + assert np.allclose(t.numpy(), t2.numpy()), f"{t2.numpy()} wasn't equal to {t.numpy()}" + + print(f"rank {rank} passed") + +if __name__ == "__main__": + if getenv("HIP"): + devices = ["hip:0", "hip:1"] + else: + devices = ["gpu:0", "gpu:1" if not CI else "gpu:0"] + world_size = len(devices) + + dist.init_oob(world_size) + + processes = [] + for rank, device in enumerate(devices): + processes.append(dist.spawn(rank, device, fn=run, args=())) + for p in processes: p.join() + + # exit with error code if any of the processes failed + for p in processes: + if p.exitcode != 0: exit(p.exitcode) diff --git a/tinygrad_repo/test/external/external_copy_benchmark.py b/tinygrad_repo/test/external/external_copy_benchmark.py new file mode 100644 index 0000000..9c1e61b --- /dev/null +++ b/tinygrad_repo/test/external/external_copy_benchmark.py @@ -0,0 +1,27 @@ +import unittest +from tinygrad.helpers import prod +from tinygrad.ops import Device +from tinygrad.tensor import Tensor +from tinygrad.helpers import GlobalCounters +from tinygrad.jit import CacheCollector + +class TestCopy(unittest.TestCase): + def test_add1(self): + pts = [] + for i in range(16384, 16384*256, 16384): + t = Tensor.randn(i).realize() + CacheCollector.start() + t.assign(t+1).realize() + fxn, args, _ = CacheCollector.finish()[0] + GlobalCounters.reset() + def run(): return fxn(args, force_wait=True) + ct = min([run() for _ in range(10)]) + mb = prod(t.shape)*t.dtype.itemsize*2*1e-6 + print(f"{mb*1e3:.2f} kB, {ct*1e3:.2f} ms, {mb/ct:.2f} MB/s") + pts.append((mb, mb/ct)) + from matplotlib import pyplot as plt + plt.plot([x[0] for x in pts], [x[1] for x in pts]) + plt.show() + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/external/external_llama_eval.py b/tinygrad_repo/test/external/external_llama_eval.py new file mode 100644 index 0000000..dfbbcba --- /dev/null +++ b/tinygrad_repo/test/external/external_llama_eval.py @@ -0,0 +1,102 @@ +from lm_eval.base import BaseLM +from lm_eval import evaluator, tasks +import torch, json, argparse + +from examples.llama import LLaMa +from tinygrad.tensor import Tensor +from tinygrad.ops import Device + +class LLaMaAdaptor(BaseLM): + def __init__( + self, + model_size="7B", + model_gen=1, + device="", + quantize=False, + batch_size=1, + max_batch_size=1, + do_sample=False, + temperature=1.0, + checkpoint_path="", + tokenizer_path="", + ): + super().__init__() + + if batch_size is None: + batch_size = 1 + self.do_sample = do_sample + self.temperature = temperature + self._device = device + + assert isinstance(model_gen, int) + assert isinstance(model_size, str) + assert isinstance(batch_size, int) + assert isinstance(checkpoint_path, str) + assert isinstance(tokenizer_path, str) + + self.llama = LLaMa.build(checkpoint_path, tokenizer_path, model_gen, model_size, quantize) + + @classmethod + def create_from_arg_string(cls, arg_string, additional_config=None): + kwargs = {el.split("=")[0]: el.split("=")[1] for el in arg_string.split(",")} + return cls(**kwargs, **additional_config) + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.llama.tokenizer.eos_id() + + @property + def max_length(self): + return 1024 + + @property + def max_gen_toks(self): + return 256 + + @property + def batch_size(self): + return 1 + + @property + def device(self): + return self._device + + def tok_encode(self, string: str): + return [self.llama.tokenizer.bos_id()] + self.llama.tokenizer.encode(string) + + def tok_decode(self, tokens): + return self.llama.tokenizer.decode(tokens) + + def _model_call(self, inps): + Tensor.no_grad = True + return torch.Tensor(self.llama.model(Tensor(inps.numpy()), 0).numpy()) + + def greedy_until(self, requests): + continuations = [] + for request in requests: + prompt, until = request[0], request[1]['until'] + output = self.llama.greedy_until(prompt, until, max_length=128, temperature=0.0) + continuations.append(output[len(prompt):]) + return continuations + + def _model_generate(self, context, max_length, eos_token_id): + raise NotImplementedError() + +if __name__ == '__main__': + print(f"using {Device.DEFAULT} backend") + + parser = argparse.ArgumentParser(description='Run LLaMA evals in tinygrad', formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--size', type=str, default="7B", help="Size of model to use [7B, 13B, 30B, 65B] for Gen 1, [7B, 13B] for Gen 2") + parser.add_argument('--gen', type=int, default="1", help="Generation of the model to use [1, 2]") + parser.add_argument('--quantize', action='store_true', help="Quantize the weights to int8 in memory") + parser.add_argument('--eval', type=str, default="arc_easy", help="Run in evaluation mode") + parser.add_argument('--limit', type=int, default=None, help="Limit tests in eval") + parser.add_argument('--weights', type=str, default="./weights/LLaMa/", help="Location of the weights") + parser.add_argument('--tokenizer', type=str, default="./weights/LLaMa/tokenizer.model", help="Location of the tokenizer") + args = parser.parse_args() + + # run eval and exit + adaptor = LLaMaAdaptor(model_gen=args.gen, model_size=args.size, quantize=args.quantize, checkpoint_path=args.weights, tokenizer_path=args.tokenizer, device="cpu") + results = evaluator.evaluate(adaptor, tasks.get_task_dict(args.eval.split(",")), False, 0, args.limit) + print(json.dumps(results, indent=2)) diff --git a/tinygrad_repo/test/external/external_model_benchmark.py b/tinygrad_repo/test/external/external_model_benchmark.py new file mode 100644 index 0000000..b784f11 --- /dev/null +++ b/tinygrad_repo/test/external/external_model_benchmark.py @@ -0,0 +1,128 @@ +import csv, pathlib, time, numpy as np +from os import getenv +import torch +torch.set_num_threads(1) +import onnx +from onnx.helper import tensor_dtype_to_np_dtype +import onnxruntime as ort +from onnx2torch import convert +from extra.utils import download_file +from extra.onnx import get_run_onnx +from tinygrad.helpers import OSX, DEBUG +from tinygrad.tensor import Tensor +from tinygrad.ops import Device + +MODELS = { + "resnet50": "https://github.com/onnx/models/raw/main/vision/classification/resnet/model/resnet50-caffe2-v1-9.onnx", + "openpilot": "https://github.com/commaai/openpilot/raw/v0.9.4/selfdrive/modeld/models/supercombo.onnx", + "efficientnet": "https://github.com/onnx/models/raw/main/vision/classification/efficientnet-lite4/model/efficientnet-lite4-11.onnx", + "shufflenet": "https://github.com/onnx/models/raw/main/vision/classification/shufflenet/model/shufflenet-9.onnx", + "commavq": "https://huggingface.co/commaai/commavq-gpt2m/resolve/main/gpt2m.onnx", + + # broken in torch MPS + #"zfnet": "https://github.com/onnx/models/raw/main/vision/classification/zfnet-512/model/zfnet512-9.onnx", + # TypeError: BatchNormalization() got an unexpected keyword argument 'is_test' + #"densenet": "https://github.com/onnx/models/raw/main/vision/classification/densenet-121/model/densenet-3.onnx", + # AssertionError: only onnx version >= 10 supported for slice + #"bert": "https://github.com/onnx/models/raw/main/text/machine_comprehension/bert-squad/model/bertsquad-8.onnx", + # really slow + #"resnet18": "https://github.com/onnx/models/raw/main/vision/classification/resnet/model/resnet18-v2-7.onnx", +} + +CSV = {} +open_csv = None +torch.manual_seed(1) + +def benchmark(mnm, nm, fxn): + tms = [] + for _ in range(3): + st = time.perf_counter_ns() + ret = fxn() + tms.append(time.perf_counter_ns() - st) + print(f"{mnm:15s} {nm:25s} {min(tms)*1e-6:7.2f} ms") + CSV[nm] = min(tms)*1e-6 + return min(tms), ret + +#BASE = pathlib.Path(__file__).parents[2] / "weights" / "onnx" +BASE = pathlib.Path("/tmp/onnx") +def benchmark_model(m, validate_outs=False): + global open_csv, CSV + CSV = {"model": m} + + fn = BASE / MODELS[m].split("/")[-1] + download_file(MODELS[m], fn) + onnx_model = onnx.load(fn) + output_names = [out.name for out in onnx_model.graph.output] + excluded = {inp.name for inp in onnx_model.graph.initializer} + input_shapes = {inp.name:tuple(x.dim_value if x.dim_value != 0 else 1 for x in inp.type.tensor_type.shape.dim) for inp in onnx_model.graph.input if inp.name not in excluded} + input_types = {inp.name: tensor_dtype_to_np_dtype(inp.type.tensor_type.elem_type) for inp in onnx_model.graph.input if inp.name not in excluded} + #input_types = {k:v if v!=np.float16 else np.float32 for k,v in input_types.items()} # cast + np_inputs = {k:torch.randn(shp).numpy().astype(input_types[k]) for k,shp in input_shapes.items()} + assert len(input_shapes) < 30, f"too many input shapes {len(input_shapes)}" + + # print input names + if DEBUG >= 2: print([inp.name for inp in onnx_model.graph.input if inp.name not in excluded]) + + for device in ["METAL" if OSX else "GPU", "CLANG"]: # + (["CUDA"] if torch.cuda.is_available() else []): + Device.DEFAULT = device + inputs = {k:Tensor(inp) for k,inp in np_inputs.items()} + tinygrad_model = get_run_onnx(onnx_model) + benchmark(m, f"tinygrad_{device.lower()}_jitless", lambda: {k:v.numpy() for k,v in tinygrad_model(inputs).items()}) + + from tinygrad.jit import TinyJit + tinygrad_jitted_model = TinyJit(lambda **kwargs: {k:v.realize() for k,v in tinygrad_model(kwargs).items()}) + for _ in range(3): {k:v.numpy() for k,v in tinygrad_jitted_model(**inputs).items()} + benchmark(m, f"tinygrad_{device.lower()}_jit", lambda: {k:v.numpy() for k,v in tinygrad_jitted_model(**inputs).items()}) + del inputs, tinygrad_model, tinygrad_jitted_model + + try: + torch_model = convert(onnx_model) + torch_inputs = [torch.tensor(x) for x in np_inputs.values()] + benchmark(m, "torch_cpu", lambda: torch_model(*torch_inputs)) + + torch_device = "mps" if OSX else "cuda" + torch_mps_model = torch_model.to(torch_device) + torch_mps_inputs = [x.to(torch_device) for x in torch_inputs] + benchmark(m, f"torch_{torch_device}", lambda: torch_mps_model(*torch_mps_inputs)) + except Exception as e: print(f"{m:16s}onnx2torch {type(e).__name__:>25}") + + # bench onnxruntime + ort_options = ort.SessionOptions() + ort_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL + ort_options.log_severity_level = 3 # no warnings + for backend in ["CPU", "CUDA" if not OSX else "CoreML"]: # https://onnxruntime.ai/docs/execution-providers/ + provider = backend+"ExecutionProvider" + if provider not in ort.get_available_providers(): continue + ort_sess = ort.InferenceSession(str(fn), ort_options, [provider]) + benchmark(m, f"onnxruntime_{backend.lower()}", lambda: ort_sess.run(output_names, np_inputs)) + del ort_sess + + if validate_outs: + rtol, atol = 2e-3, 2e-3 # tolerance for fp16 models + inputs = {k:Tensor(inp) for k,inp in np_inputs.items()} + tinygrad_model = get_run_onnx(onnx_model) + tinygrad_out = tinygrad_model(inputs) + + ort_sess = ort.InferenceSession(str(fn), ort_options, ["CPUExecutionProvider"]) + onnx_out = ort_sess.run(output_names, np_inputs) + onnx_out = dict([*[(name,x) for name, x in zip(output_names, onnx_out)]]) + + assert_allclose(tinygrad_out, onnx_out, rtol=rtol, atol=atol) + print(f"{m:16s}outputs validated with rtol={rtol:.1e}, atol={atol:.1e}") + + if open_csv is None: + open_csv = csv.DictWriter(open('onnx_inference_speed.csv', 'w', newline=''), fieldnames=list(CSV.keys())) + open_csv.writeheader() + open_csv.writerow(CSV) + +def assert_allclose(tiny_out:dict, onnx_out:dict, rtol=1e-5, atol=1e-5): + assert len(tiny_out) == len(onnx_out) and tiny_out.keys() == onnx_out.keys() + for k in tiny_out.keys(): + tiny_v, onnx_v = tiny_out[k], onnx_out[k] + if tiny_v is None: assert tiny_v == onnx_v + else: np.testing.assert_allclose(tiny_v.numpy(), onnx_v, rtol=rtol, atol=atol, err_msg=f"For tensor '{k}' in {tiny_out.keys()}") + +if __name__ == "__main__": + if getenv("MODEL", "") != "": benchmark_model(getenv("MODEL", ""), True) + else: + for m in MODELS: benchmark_model(m, True) diff --git a/tinygrad_repo/test/external/external_multi_gpu.py b/tinygrad_repo/test/external/external_multi_gpu.py new file mode 100644 index 0000000..8edd67f --- /dev/null +++ b/tinygrad_repo/test/external/external_multi_gpu.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# cd disassemblers/ && git clone --recursive github.com:geohot/cuda_ioctl_sniffer.git +# LD_PRELOAD=$PWD/disassemblers/cuda_ioctl_sniffer/out/sniff.so GPU=1 python3 test/external/external_multi_gpu.py +import numpy as np +from tinygrad.tensor import Tensor +from tinygrad.helpers import colored +from tinygrad.helpers import Timing +from tinygrad.runtime.ops_gpu import CL + +# TODO: support multidevice in cuda +device = 'gpu' + +if __name__ == "__main__": + sz = 1024*1024*256 # 1 GB + #sz = 1024*64 + + with Timing("CPU creation: ", on_exit=lambda x: f", {(sz*4*2)/x:.2f} GB/sec"): + c0 = Tensor.ones(sz, device="cpu").realize() + c1 = (Tensor.ones(sz, device="cpu")/2).realize() + + with Timing("CPU -> 0: ", on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + a0 = c0.to(f'{device}:0').realize() + CL.synchronize() + with Timing("CPU -> 1: ", on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + b1 = c1.to(f'{device}:1').realize() + CL.synchronize() + + # cross copy. this is going through the CPU + with Timing("0 -> CPU -> 1: ", on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + a1 = a0.to(f'{device}:1').realize() + CL.synchronize() + with Timing("1 -> CPU -> 0: ", on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + b0 = b1.to(f'{device}:0').realize() + CL.synchronize() + + # sum + with Timing("0+0 -> 0 (sum): ", on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + ab0 = (a0 + b0).realize() + CL.synchronize() + with Timing("1+1 -> 1 (sum): ", on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + ab1 = (a1 + b1).realize() + CL.synchronize() + + # cross device sum (does this work?) + # is this making a copy first? is that copy through the CPU? + # the slowness comes from the *blocking* clprg call, is this pyopencl? + with Timing(colored("0+1 -> 0 (sum): ", "red"), on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + abx0 = (a0 + b1).realize() + CL.synchronize() + + with Timing(colored("1+0 -> 1 (sum): ", "red"), on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + abx1 = (b1 + a0).realize() + CL.synchronize() + + # copy back + # NOTE: half of this slowness is caused by allocating memory on the CPU + with Timing("0 -> CPU: ", on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + cc0 = ab0.numpy() + with Timing("1 -> CPU: ", on_exit=lambda x: f", {(sz*4)/x:.2f} GB/sec"): + cc1 = ab1.numpy() + + # same + print("testing") + np.testing.assert_allclose(cc0, cc1) + + # devices + print(ab0) + print(ab1) + print(abx0) + print(abx1) diff --git a/tinygrad_repo/test/external/external_osx_profiling.py b/tinygrad_repo/test/external/external_osx_profiling.py new file mode 100644 index 0000000..6f9b215 --- /dev/null +++ b/tinygrad_repo/test/external/external_osx_profiling.py @@ -0,0 +1,41 @@ +from tinygrad.runtime.ops_gpu import CLProgram, CL, CLBuffer +from tinygrad.helpers import dtypes +import time + +N = 1000000 +a = CLBuffer(N, dtypes.float32) +b = CLBuffer(N, dtypes.float32) +c = CLBuffer(N, dtypes.float32) + +prg = CLProgram("test", """__kernel void test(__global float *a, __global float *b, __global float *c) { + int idx = get_global_id(0); + a[idx] = b[idx] + c[idx]; +}""") +prg.clprgs[0](CL.cl_queue[0], [N,], None, a._buf, b._buf, c._buf) +t1 = time.monotonic_ns() +e1 = prg.clprgs[0](CL.cl_queue[0], [N,], None, a._buf, b._buf, c._buf) +CL.synchronize() +t2 = time.monotonic_ns() +time.sleep(3) +t3 = time.monotonic_ns() +e2 = prg.clprgs[0](CL.cl_queue[0], [N,], None, a._buf, b._buf, c._buf) +CL.synchronize() +t4 = time.monotonic_ns() + +print(e1.profile.queued) +print(e1.profile.submit) +print(e1.profile.start) +print(e1.profile.end) + +print(e1, e2) +print(t2-t1, e1.profile.end - e1.profile.start) +print(t4-t3, e2.profile.end - e2.profile.start) +print(t3-t2, e2.profile.queued-e1.profile.end) +print((t3-t2) / (e2.profile.start-e1.profile.end), "ratio") + +print("ratio since boot", t1/e1.profile.start) + +print(e1.profile.start) +print(e1.profile.end) +print(e2.profile.start) +print(e2.profile.end) diff --git a/tinygrad_repo/test/external/external_test_allocator_on_models.py b/tinygrad_repo/test/external/external_test_allocator_on_models.py new file mode 100644 index 0000000..04911a6 --- /dev/null +++ b/tinygrad_repo/test/external/external_test_allocator_on_models.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +import unittest, gc +import numpy as np +from tinygrad.tensor import Tensor +from tinygrad.nn.state import get_state_dict +from tinygrad.helpers import GlobalCounters +from tinygrad.runtime.lib import RawBuffer, LRUAllocator +from tinygrad.helpers import dtypes, prod +from tinygrad.ops import Device +from test.helpers import derandomize_model + +from examples.llama import Transformer + +ALLOCATED_DEV_BUFS = 0 +class FakeDeviceBuffer: + def __init__(self, sz, dt, device): + self.id = 1 + self.size = sz + self.dtype = dt + self.device = device + + global ALLOCATED_DEV_BUFS + ALLOCATED_DEV_BUFS += 1 +class FakeAllocator(LRUAllocator): + def _do_alloc(self, size, dtype, device, **kwargs): return FakeDeviceBuffer(size, dtype, device) + def _do_free(self, buf): + buf.id -= 1 + assert buf.id == 0, f"Free should be called once, but {buf.id}" + def __del__(self): # Fake allocator should clear all buffers after each test. + for v in self.cached_buffers.values(): + for buf, _ in v: self._free_buffer(buf) + +FAKE_GLOBAL_ALLOCATOR = None +class FakeBuffer(RawBuffer): + def __init__(self, size, dtype, device='0'): + global FAKE_GLOBAL_ALLOCATOR + super().__init__(size, dtype, allocator=FAKE_GLOBAL_ALLOCATOR, **{'device': device}) + assert self._buf.size == size and self._buf.dtype == dtype and self._buf.device == device, "This allocator requires 100% match of dtype and size." + @classmethod + def fromCPU(cls, x:np.ndarray, **kwargs): return cls(prod(x.shape), dtypes.from_np(x.dtype), **kwargs) + def toCPU(self): return np.empty(self.size, dtype=self.dtype.np) +class FakeProgram: + def __init__(self, name:str, prg:str): pass + def __call__(self, *bufs, global_size, local_size, wait=False): pass + +def helper_test_correctness(gen, train): + from tinygrad.runtime.ops_gpu import CL, CLAllocator + old_alloc = CL.cl_allocator + CL.cl_allocator = CLAllocator(0) + no_alloc_result = train(*gen()).numpy() + Device[Device.DEFAULT].synchronize() + CL.cl_allocator = CLAllocator(512<<30) # Test cache correctness, so cache as much as possible, 512gb + for _ in range(4): + GlobalCounters.reset() + np.testing.assert_allclose(train(*gen()).numpy(), no_alloc_result, rtol=1e-3, atol=1e-5) + Device[Device.DEFAULT].synchronize() + assert len(CL.cl_allocator.cached_buffers) != 0, "Cache must be used" + CL.cl_allocator = old_alloc + +def __helper_test_alloc_count(gen, train): + was_alloc = ALLOCATED_DEV_BUFS + for _ in range(2): + train(*gen()) + return ALLOCATED_DEV_BUFS - was_alloc + +def helper_test_alloc_count(mm, gen, train): + global FAKE_GLOBAL_ALLOCATOR + backup_program = Device[Device.DEFAULT].runtime + backup_buffer = Device[Device.DEFAULT].buffer + Device[Device.DEFAULT].runtime = FakeProgram + Device[Device.DEFAULT].buffer = FakeBuffer + Device[Device.DEFAULT].method_cache.clear() + FAKE_GLOBAL_ALLOCATOR = FakeAllocator(16<<30) + new_allocs = __helper_test_alloc_count(gen, train) + Device[Device.DEFAULT].method_cache.clear() + FAKE_GLOBAL_ALLOCATOR = FakeAllocator(0) + old_allocs = __helper_test_alloc_count(gen, train) + print(f"{mm}: llama: old allocs count {old_allocs}, new allocs count {new_allocs}") + assert new_allocs < old_allocs, f"Hmm, doesn't cache work any more?" + Device[Device.DEFAULT].runtime = backup_program + Device[Device.DEFAULT].buffer = backup_buffer + FAKE_GLOBAL_ALLOCATOR = None + +def check_gc(): + if Device.DEFAULT == "GPU": + gc.collect() # Need to collect Tensors. + from extra.introspection import print_objects + assert print_objects() == 0 + +class TestAllocators(unittest.TestCase): + @unittest.skipUnless(Device.DEFAULT == "GPU", "Not Implemented") + def test_lru_allocator_tiny_llama(self): + old_type = Tensor.default_type + Tensor.default_type = dtypes.float16 + + args_tiny = {"dim": 1024, "multiple_of": 256, "n_heads": 8, "n_layers": 8, "norm_eps": 1e-05, "vocab_size": 1000} + def __test(): + model = Transformer(**args_tiny) + derandomize_model(model) + def test(t): return model(t, 0).realize() + helper_test_correctness(lambda: (Tensor([[1,]]),), test) + __test() + Tensor.default_type = old_type + check_gc() + + @unittest.skipUnless(Device.DEFAULT == "GPU", "Not Implemented") + def test_lru_allocator_tiny_llama_alloc_counts(self): + args_tiny = {"dim": 1024, "multiple_of": 256, "n_heads": 8, "n_layers": 8, "norm_eps": 1e-05, "vocab_size": 1000} + def test_alloc_count(t): + model = Transformer(**args_tiny) + for v in get_state_dict(model).values(): v.assign(Tensor.empty(*v.shape, dtype=v.dtype)) + return model(t, 0).realize() + helper_test_alloc_count("llama", lambda: (Tensor([[2,]]),), test_alloc_count) + check_gc() + + @unittest.skip("huge for CI") + def test_stable_diffusion(self): + from examples.stable_diffusion import UNetModel + model = UNetModel() + derandomize_model(model) + def test(t, t2): return model(t, 801, t2).realize() + helper_test_correctness(lambda: (Tensor.randn(1, 4, 16, 16),Tensor.randn(1, 77, 768)), test) + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_embedding.py b/tinygrad_repo/test/external/external_test_embedding.py new file mode 100644 index 0000000..9d6bd7f --- /dev/null +++ b/tinygrad_repo/test/external/external_test_embedding.py @@ -0,0 +1,8 @@ +from tinygrad.tensor import Tensor +from tinygrad.nn import Embedding + +if __name__ == "__main__": + vocab_size = 50257 + dim = 128 + test = Embedding(vocab_size, dim) + ret = test(Tensor([[1,2,3]])).numpy() diff --git a/tinygrad_repo/test/external/external_test_gpu_ast.py b/tinygrad_repo/test/external/external_test_gpu_ast.py new file mode 100644 index 0000000..0c98552 --- /dev/null +++ b/tinygrad_repo/test/external/external_test_gpu_ast.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from tinygrad.ops import LazyOp, ReduceOps, BinaryOps, UnaryOps, MovementOps +from tinygrad.shape.shapetracker import ShapeTracker, View, ZeroView +from tinygrad.runtime.ops_gpu import GPUBuffer, CLProgram, CLCodegen +#from tinygrad.runtime.ops_metal import MetalBuffer as GPUBuffer, MetalProgram as CLProgram, MetalCodegen as CLCodegen +from tinygrad.helpers import getenv +from extra.lib_test_ast import test_ast + +import platform +OSX = platform.system() == "Darwin" + +def compile_and_test_ast(ast, local_size=None): + k = CLCodegen(ast) + prg = k.codegen().build(CLProgram) + if local_size is not None: prg.local_size = local_size + for i in range(5): prg(prg.lower(k.bufs)) + if getenv("TEST", 0): test_ast(k) + +class TestAST(unittest.TestCase): + def test_conv_zeroview_ast(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 3, 4), views=[View((1, 1, 3, 4), (2, 2, 2, 1), -3), ZeroView((1, 1, 1, 2), ((0, 1), (0, 1), (-1, 2), (-1, 3))), View((1, 1, 3, 4), (0, 0, 4, 1), 0)])) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 3, 4), views=[View((1, 1, 3, 4), (0, 0, 0, 0), 0)])) + op1 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + ast = LazyOp(UnaryOps.RELU, (op1,), None) + compile_and_test_ast(ast) + + def test_cifar_conv(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(512, 1, 128, 32, 32, 64, 3, 3), views=[View((512, 64, 34, 34), (65536, 1024, 32, 1), -33), ZeroView((512, 64, 32, 32), ((0, 512), (0, 64), (-1, 33), (-1, 33))), View((512, 1, 128, 32, 32, 64, 3, 3), (73984, 73984, 0, 34, 1, 1156, 34, 1), 0)]), hostbuf=GPUBuffer(shape=(512, 64, 32, 32), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(512, 1, 128, 32, 32, 64, 3, 3), views=[View((512, 1, 128, 32, 32, 64, 3, 3), (0, 0, 576, 0, 0, 9, 3, 1), 0)]), hostbuf=GPUBuffer(shape=(128, 64, 3, 3), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + ast = LazyOp(ReduceOps.SUM, (op0,), (512, 1, 128, 32, 32, 1, 1, 1)) + compile_and_test_ast(ast) + + def test_cifar_conv_backward(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(256, 1, 512, 3, 3, 512, 8, 8), views=[View((256, 512, 10, 10), (64, 16384, 8, 1), -9), ZeroView((256, 512, 8, 8), ((0, 256), (0, 512), (-1, 9), (-1, 9))), View((256, 1, 512, 3, 3, 512, 8, 8), (51200, 51200, 0, 10, 1, 100, 10, 1), 0)]), hostbuf=GPUBuffer(shape=(512, 256, 8, 8), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(256, 1, 512, 3, 3, 512, 8, 8), views=[View((256, 1, 512, 3, 3, 512, 8, 8), (0, 0, 64, 0, 0, 32768, 8, 1), 0)]), hostbuf=GPUBuffer(shape=(512, 512, 8, 8), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (256, 1, 512, 3, 3, 1, 1, 1)) + ast = LazyOp(MovementOps.RESHAPE, (op1,), (256, 512, 3, 3)) + compile_and_test_ast(ast) + + def test_first_op_conv(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 3, 3, 3, 4), views=[View((1, 130, 258, 1, 12), (393216, 3072, 12, 12, 1), -3084), ZeroView((1, 128, 256, 1, 12), ((0, 1), (-1, 129), (-1, 257), (0, 1), (0, 12))), View((1, 64, 128, 8, 4, 3, 3, 3, 4), (0, 6192, 24, 0, 0, 3096, 12, 4, 1), 0)]), hostbuf=GPUBuffer(shape=(128, 768, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 3, 3, 3, 4), views=[View((1, 64, 128, 8, 4, 3, 3, 3, 4), (0, 0, 0, 432, 4, 144, 16, 48, 1), 0)]), hostbuf=GPUBuffer(shape=(8, 108, 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 64, 128, 8, 4, 1, 1, 1, 1)) + buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 1, 1, 1, 1), views=[View((1, 64, 128, 8, 4, 1, 1, 1, 1), (0, 0, 0, 4, 1, 1, 1, 1, 1), 0)]), hostbuf=GPUBuffer(shape=(32,), force_create=True)) + op2 = LazyOp(BinaryOps.ADD, (op1,buf2,), None) + op3 = LazyOp(UnaryOps.RELU, (op2,), None) + buf3 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 1, 1, 1, 1), views=[View((1, 64, 128, 8, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([1.], dtype=np.float32))) + buf4 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 1, 1, 1, 1), views=[View((1, 64, 128, 8, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([1.], dtype=np.float32))) + op4 = LazyOp(UnaryOps.EXP, (op2,), None) + op5 = LazyOp(BinaryOps.SUB, (buf4,op4,), None) + op6 = LazyOp(UnaryOps.RELU, (op5,), None) + op7 = LazyOp(BinaryOps.MUL, (buf3,op6,), None) + op8 = LazyOp(BinaryOps.SUB, (op3,op7,), None) + ast = LazyOp(MovementOps.RESHAPE, (op8,), (64, 1024, 4)) + compile_and_test_ast(ast) + + def test_second_op_conv(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 1, 1, 3, 3), views=[View((1, 66, 130, 32, 1), (262144, 4096, 32, 1, 1), -4128), ZeroView((1, 64, 128, 32, 1), ((0, 1), (-1, 65), (-1, 129), (0, 32), (0, 1))), View((1, 64, 128, 8, 4, 1, 1, 3, 3), (266240, 4160, 32, 4, 1, 12480, 12480, 4160, 32), 0)]), hostbuf=GPUBuffer(shape=(64, 1024, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 1, 1, 3, 3), views=[View((1, 64, 128, 8, 4, 1, 1, 3, 3), (0, 0, 0, 36, 1, 0, 0, 12, 4), 0)]), hostbuf=GPUBuffer(shape=(8, 9, 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 64, 128, 8, 4, 1, 1, 1, 1)) + buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 1, 1, 1, 1), views=[View((1, 64, 128, 8, 4, 1, 1, 1, 1), (0, 0, 0, 4, 1, 1, 1, 1, 1), 0)]), hostbuf=GPUBuffer(shape=(32,), force_create=True)) + op2 = LazyOp(BinaryOps.ADD, (op1,buf2,), None) + op3 = LazyOp(UnaryOps.RELU, (op2,), None) + buf3 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 1, 1, 1, 1), views=[View((1, 64, 128, 8, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([1.], dtype=np.float32))) + buf4 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 8, 4, 1, 1, 1, 1), views=[View((1, 64, 128, 8, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([1.], dtype=np.float32))) + op4 = LazyOp(UnaryOps.EXP, (op2,), None) + op5 = LazyOp(BinaryOps.SUB, (buf4,op4,), None) + op6 = LazyOp(UnaryOps.RELU, (op5,), None) + op7 = LazyOp(BinaryOps.MUL, (buf3,op6,), None) + op8 = LazyOp(BinaryOps.SUB, (op3,op7,), None) + ast = LazyOp(MovementOps.RESHAPE, (op8,), (64, 1024, 4)) + compile_and_test_ast(ast) + + def test_third_op_conv(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 4, 4, 1, 1, 8, 4), views=[View((1, 64, 128, 4, 4, 1, 1, 8, 4), (0, 4096, 32, 0, 0, 0, 0, 4, 1), 0)]), hostbuf=GPUBuffer(shape=(64, 1024, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 4, 4, 1, 1, 8, 4), views=[View((1, 64, 128, 4, 4, 1, 1, 8, 4), (0, 0, 0, 128, 4, 0, 0, 16, 1), 0)]), hostbuf=GPUBuffer(shape=(4, 32, 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 64, 128, 4, 4, 1, 1, 1, 1)) + buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 64, 128, 4, 4, 1, 1, 1, 1), views=[View((1, 64, 128, 4, 4, 1, 1, 1, 1), (0, 0, 0, 4, 1, 1, 1, 1, 1), 0)]), hostbuf=GPUBuffer(shape=(16,), force_create=True)) + op2 = LazyOp(BinaryOps.ADD, (op1,buf2,), None) + ast = LazyOp(MovementOps.RESHAPE, (op2,), (64, 512, 4)) + compile_and_test_ast(ast) + + # VALIDHACKS=1 IMAGE=2 DEBUG=4 PYTHONPATH="." GPU=1 OPT=2 python3 test/external_test_gpu_ast.py TestAST.test_reduce_op + # 164 time 27.75 ms running re_S128_4 with [128] None count 4 runtime 1016.06 us 2.07 GFLOPS () -> (128, 1) + # 169 time 22.51 ms running matmul with [4, 16, 128] [4, 16, 16] count 5 runtime 110.08 us 19.06 GFLOPS ('-DMATMUL',) -> (128, 1) + def test_reduce_op(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 128, 4, 1, 1, 512, 4), views=[View((1, 1, 1, 128, 4, 1, 1, 512, 4), (0, 0, 0, 0, 0, 0, 0, 4, 1), 0)]), hostbuf=GPUBuffer(shape=(1, 512, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 128, 4, 1, 1, 512, 4), views=[View((1, 1, 1, 128, 4, 1, 1, 512, 4), (0, 0, 0, 8192, 4, 0, 0, 16, 1), 0)]), hostbuf=GPUBuffer(shape=(128, 2048, 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 1, 1, 128, 4, 1, 1, 1, 1)) + buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 128, 4, 1, 1, 1, 1), views=[View((1, 1, 1, 128, 4, 1, 1, 1, 1), (512, 512, 512, 4, 1, 1, 1, 1, 1), 0)]), hostbuf=GPUBuffer(shape=(512,), force_create=True)) + op2 = LazyOp(BinaryOps.ADD, (op1,buf2,), None) + op3 = LazyOp(UnaryOps.RELU, (op2,), None) + ast = LazyOp(MovementOps.RESHAPE, (op3,), (1, 128, 4)) + compile_and_test_ast(ast) + + def test_alt_reduce_op(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 1, 1, 128, 4, 1, 1, 512, 4), views=[View((1, 1, 1, 1, 1, 128, 4, 1, 1, 512, 4), (0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 1), 0)]), hostbuf=GPUBuffer(shape=(1, 512, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 1, 1, 128, 4, 1, 1, 512, 4), views=[View((1, 1, 1, 1, 1, 128, 4, 1, 1, 512, 4), (0, 0, 0, 0, 0, 8192, 4, 0, 0, 16, 1), 0)]), hostbuf=GPUBuffer(shape=(128, 2048, 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 1, 1, 1, 1, 128, 4, 1, 1, 1, 1)) + buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 1, 1, 128, 4, 1, 1, 1, 1), views=[View((1, 1, 1, 1, 1, 128, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 4, 1, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(512,), force_create=True)) + op2 = LazyOp(BinaryOps.ADD, (op1,buf2,), None) + buf3 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 1, 1, 128, 4, 1, 1, 1, 1), views=[View((1, 1, 1, 1, 1, 128, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), force_create=True)) + op3 = LazyOp(BinaryOps.MAX, (op2,buf3,), None) + ast = LazyOp(MovementOps.RESHAPE, (op3,), (1, 128, 4)) + compile_and_test_ast(ast) + + # re_S32_16_36_6 is fast + def test_1x1_36_6(self): # 36 <- 6 + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 36, 4, 1, 1, 6, 4), views=[View((1, 32, 64, 1, 1, 36, 4, 1, 1, 6, 4), (0, 1536, 24, 0, 0, 0, 0, 0, 0, 4, 1), 0)]), hostbuf=GPUBuffer(shape=(32, 384, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 36, 4, 1, 1, 6, 4), views=[View((1, 32, 64, 1, 1, 36, 4, 1, 1, 6, 4), (0, 0, 0, 0, 0, 96, 4, 0, 0, 16, 1), 0)]), hostbuf=GPUBuffer(shape=(36, 24, 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1)) + buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1), views=[View((1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 4, 1, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(144,), force_create=True)) + op2 = LazyOp(BinaryOps.ADD, (op1,buf2,), None) + buf3 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1), views=[View((1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), force_create=True)) + op3 = LazyOp(BinaryOps.MAX, (op2,buf3,), None) + buf4 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1), views=[View((1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([1.], dtype=np.float32))) + op4 = LazyOp(UnaryOps.EXP, (op2,), None) + op5 = LazyOp(BinaryOps.SUB, (buf4,op4,), None) + buf5 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1), views=[View((1, 32, 64, 1, 1, 36, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), force_create=True)) + op6 = LazyOp(BinaryOps.MAX, (op5,buf5,), None) + op7 = LazyOp(BinaryOps.SUB, (op3,op6,), None) + ast = LazyOp(MovementOps.RESHAPE, (op7,), (32, 2304, 4)) + compile_and_test_ast(ast, None if OSX else (16, 16, 4)) + + # re_S32_16_6_36 is slow + def test_1x1_6_36(self): # 6 <- 36 + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 6, 4, 1, 1, 36, 4), views=[View((1, 32, 64, 1, 1, 6, 4, 1, 1, 36, 4), (0, 9216, 144, 0, 0, 0, 0, 0, 0, 4, 1), 0)]), hostbuf=GPUBuffer(shape=(32, 2304, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 6, 4, 1, 1, 36, 4), views=[View((1, 32, 64, 1, 1, 6, 4, 1, 1, 36, 4), (0, 0, 0, 0, 0, 576, 4, 0, 0, 16, 1), 0)]), hostbuf=GPUBuffer(shape=(6, 144, 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 32, 64, 1, 1, 6, 4, 1, 1, 1, 1)) + buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 6, 4, 1, 1, 1, 1), views=[View((1, 32, 64, 1, 1, 6, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 4, 1, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(24,), force_create=True)) + op2 = LazyOp(BinaryOps.ADD, (op1,buf2,), None) + buf3 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 6, 4, 1, 1, 1, 1), views=[View((1, 32, 64, 1, 1, 6, 4, 1, 1, 1, 1), (0, 1536, 24, 0, 0, 4, 1, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(32, 384, 4), force_create=True)) + op3 = LazyOp(BinaryOps.ADD, (op2,buf3,), None) + ast = LazyOp(MovementOps.RESHAPE, (op3,), (32, 384, 4)) + compile_and_test_ast(ast, (6, 16, 4)) + + # re_S32_16_6_24 + def test_1x1_6_24(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 6, 4, 1, 1, 24, 4), views=[View((1, 32, 64, 1, 1, 6, 4, 1, 1, 24, 4), (0, 6144, 96, 0, 0, 0, 0, 0, 0, 4, 1), 0)]), hostbuf=GPUBuffer(shape=(32, 1536, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 6, 4, 1, 1, 24, 4), views=[View((1, 32, 64, 1, 1, 6, 4, 1, 1, 24, 4), (0, 0, 0, 0, 0, 384, 4, 0, 0, 16, 1), 0)]), hostbuf=GPUBuffer(shape=(6, 96, 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 32, 64, 1, 1, 6, 4, 1, 1, 1, 1)) + #buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 32, 64, 1, 1, 6, 4, 1, 1, 1, 1), views=[View((1, 32, 64, 1, 1, 6, 4, 1, 1, 1, 1), (0, 0, 0, 0, 0, 4, 1, 0, 0, 0, 0), 0)]), hostbuf=GPUBuffer(shape=(24,), force_create=True)) + #op2 = LazyOp(BinaryOps.ADD, (op1,buf2,), None) + ast = LazyOp(MovementOps.RESHAPE, (op1,), (32, 384, 4)) + compile_and_test_ast(ast, (6, 4, 8)) + + def test_full_reduce_op(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 512), views=[View((1, 512), (512, 1), 0)]), hostbuf=GPUBuffer(shape=(1, 1, 1, 128, 4, 1, 1, 1, 1), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 512), views=[View((1, 512), (0, 1), 0)]), hostbuf=GPUBuffer(shape=(512,), force_create=True)) + op0 = LazyOp(BinaryOps.ADD, (buf0,buf1,), None) + buf2 = GPUBuffer(shape=ShapeTracker(shape=(1, 512), views=[View((1, 512), (0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([2.], dtype=np.float32))) + op1 = LazyOp(BinaryOps.POW, (op0,buf2,), None) + op2 = LazyOp(ReduceOps.SUM, (op1,), (1, 1)) + buf3 = GPUBuffer(shape=ShapeTracker(shape=(1, 1), views=[View((1, 1), (0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([0.5], dtype=np.float32))) + op3 = LazyOp(BinaryOps.POW, (op2,buf3,), None) + buf4 = GPUBuffer(shape=ShapeTracker(shape=(1, 1), views=[View((1, 1), (0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([1.e-12], dtype=np.float32))) + op4 = LazyOp(BinaryOps.SUB, (op3,buf4,), None) + op5 = LazyOp(UnaryOps.RELU, (op4,), None) + buf5 = GPUBuffer(shape=ShapeTracker(shape=(1, 1), views=[View((1, 1), (0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([1.e-12], dtype=np.float32))) + op6 = LazyOp(BinaryOps.ADD, (op5,buf5,), None) + buf6 = GPUBuffer(shape=ShapeTracker(shape=(1, 1), views=[View((1, 1), (0, 0), 0)]), hostbuf=GPUBuffer(shape=(1,), backing=np.array([3.4e+38], dtype=np.float32))) + op7 = LazyOp(BinaryOps.SUB, (op3,buf6,), None) + op8 = LazyOp(UnaryOps.RELU, (op7,), None) + op9 = LazyOp(BinaryOps.SUB, (op6,op8,), None) + op10 = LazyOp(UnaryOps.RECIPROCAL, (op9,), None) + ast = LazyOp(MovementOps.RESHAPE, (op10,), (1, 1)) + compile_and_test_ast(ast) + + def test_1239_reduce(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 1239, 4, 1, 1, 64, 4), views=[View((1, 1, 1, 1239, 4, 1, 1, 64, 4), (0, 0, 0, 0, 0, 0, 0, 4, 1), 0)]), hostbuf=GPUBuffer(shape=(1, 64, 4), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(1, 1, 1, 1239, 4, 1, 1, 64, 4), views=[View((1, 1, 1, 1239, 4, 1, 1, 64, 4), (0, 0, 0, 1024, 4, 0, 0, 16, 1), 0)]), hostbuf=GPUBuffer(shape=(1239, 256, + 4), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1, 1, 1, 1239, 4, 1, 1, 1, 1)) + ast = LazyOp(MovementOps.RESHAPE, (op1,), (1, 1, 1, 1, 4956)) + compile_and_test_ast(ast) + + def test_enet_first_conv_bs32(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(8, 1, 32, 112, 112, 3, 3, 3), views=[View((8, 3, 225, 225), (150528, 50176, 224, 1), 0), ZeroView((8, 3, 224, 224), ((0, 8), (0, 3), (0, 225), (0, 225))), View((8, 1, 32, 112, 112, 3, 3, 3), (151875, 151875, 0, 450, 2, 50625, 225, 1), 0)]), hostbuf=GPUBuffer(shape=(8, 3, 224, 224), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(8, 1, 32, 112, 112, 3, 3, 3), views=[View((8, 1, 32, 112, 112, 3, 3, 3), (0, 0, 27, 0, 0, 9, 3, 1), 0)]), hostbuf=GPUBuffer(shape=(32, 3, 3, 3), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (8, 1, 32, 112, 112, 1, 1, 1)) + ast = LazyOp(MovementOps.RESHAPE, (op1,), (8, 32, 112, 112)) + compile_and_test_ast(ast) + + def test_enet_reduce_bs32(self): + buf0 = GPUBuffer(shape=ShapeTracker(shape=(3, 1, 32, 3, 3, 32, 112, 112), views=[View((3, 32, 225, 225), (50176, 150528, 224, 1), 0), ZeroView((3, 32, 224, 224), ((0, 3), (0, 32), (0, 225), (0, 225))), View((3, 1, 32, 3, 3, 32, 112, 112), (1620000, 1620000, 0, 225, 1, 50625, 450, 2), 0)]), hostbuf=GPUBuffer(shape=(32, 3, 224, 224), force_create=True)) + buf1 = GPUBuffer(shape=ShapeTracker(shape=(3, 1, 32, 3, 3, 32, 112, 112), views=[View((3, 1, 32, 3, 3, 32, 112, 112), (0, 12845056, 401408, 0, 0, 12544, 112, 1), 0)]), hostbuf=GPUBuffer(shape=(1, 1, 32, 1, 1, 32, 112, 112), force_create=True)) + op0 = LazyOp(BinaryOps.MUL, (buf0,buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (3, 1, 32, 3, 3, 1, 1, 1)) + ast = LazyOp(MovementOps.RESHAPE, (op1,), (3, 32, 3, 3)) + compile_and_test_ast(ast) + + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_image.py b/tinygrad_repo/test/external/external_test_image.py new file mode 100644 index 0000000..3e246ee --- /dev/null +++ b/tinygrad_repo/test/external/external_test_image.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +import os +import unittest +import numpy as np +if 'IMAGE' not in os.environ: + os.environ['IMAGE'] = '2' +os.environ['GPU'] = '1' +os.environ['OPT'] = '2' +from tinygrad.tensor import Tensor +from tinygrad.nn import Conv2d +Tensor.no_grad = True + +class TestImage(unittest.TestCase): + def test_create_image(self): + t = Tensor.ones(128, 128, 1) + t = t.reshape(128, 32, 4) + 3 + t.realize() + np.testing.assert_array_equal(t.numpy(), np.ones((128,32,4))*4) + + def test_sum_image(self): + t1 = Tensor.ones(16, 16, 1).reshape(16, 4, 4) + 3 + t1.realize() + t1 = t1.sum() + t1.realize() + assert t1.numpy() == 16*4*4*4, f"got {t1.numpy()}" + + def test_add_image(self): + t1 = Tensor.ones(16, 16, 1).reshape(16, 4, 4) + 3 + t2 = Tensor.ones(16, 16, 1).reshape(16, 4, 4) + 4 + t1.realize() + t2.realize() + t3 = t1 + t2 + t3.realize() + np.testing.assert_array_equal(t3.numpy(), np.ones((16,4,4))*9) + + def test_padded_conv(self): + bs, in_chans, out_chans = 1,12,32 + tiny_conv = Conv2d(in_chans, out_chans, 3, bias=None, padding=1) + tiny_dat = Tensor.ones(bs, 12, 64, 128) + tiny_conv(tiny_dat).realize() + + def test_op_conv(self): + bs, in_chans, out_chans = 1,12,32 + tiny_conv = Conv2d(in_chans, out_chans, 3, bias=None, padding=1) + tiny_dconv = Conv2d(out_chans, out_chans, 1, bias=None, padding=0) + tiny_dat = Tensor.ones(bs, 12, 64, 128) + p2 = tiny_conv(tiny_dat).relu() + p2 = tiny_dconv(p2) + p2.realize() + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_jit_on_models.py b/tinygrad_repo/test/external/external_test_jit_on_models.py new file mode 100644 index 0000000..f03615b --- /dev/null +++ b/tinygrad_repo/test/external/external_test_jit_on_models.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from tinygrad.tensor import Tensor +from tinygrad.jit import TinyJit, JIT_SUPPORTED_DEVICE +from tinygrad.helpers import dtypes, CI +from tinygrad.ops import Device +from test.helpers import derandomize_model + +from examples.llama import Transformer + +def helper_test_jitted_correctness(gen, train, train_jit): + nojit = train(*gen()).numpy() + for _ in range(5): jit = train_jit(*gen()).numpy() + np.testing.assert_allclose(nojit, jit, rtol=1e-3, atol=1e-5) + +@unittest.skipUnless(Device.DEFAULT in JIT_SUPPORTED_DEVICE, "needs JIT") +class TestJittedModels(unittest.TestCase): + def test_jitted_tiny_llama(self): + old_type = Tensor.default_type + Tensor.default_type = dtypes.float16 + + args_tiny = {"dim": 1024, "multiple_of": 256, "n_heads": 8, "n_layers": 8, "norm_eps": 1e-05, "vocab_size": 1000} + model = Transformer(**args_tiny) + derandomize_model(model) + def test(t): return model(t, 0).realize() + + @TinyJit + def test_jit(t): return model(t, 0).realize() + helper_test_jitted_correctness(lambda: (Tensor([[1,]]),), test, test_jit) + Tensor.default_type = old_type + + @unittest.skipUnless(not CI, "huge for CI") + def test_jitted_stable_diffusion(self): + from examples.stable_diffusion import UNetModel + model = UNetModel() + derandomize_model(model) + def test(t, t2): return model(t, 801, t2).realize() + + @TinyJit + def test_jit(t, t2): return model(t, 801, t2).realize() + helper_test_jitted_correctness(lambda: (Tensor.randn(1, 4, 16, 16),Tensor.randn(1, 77, 768)), test, test_jit) + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_onnx_backend.py b/tinygrad_repo/test/external/external_test_onnx_backend.py new file mode 100644 index 0000000..bb41463 --- /dev/null +++ b/tinygrad_repo/test/external/external_test_onnx_backend.py @@ -0,0 +1,208 @@ +import unittest +from onnx.backend.base import Backend, BackendRep +import onnx.backend.test +import numpy as np +from tinygrad.tensor import Tensor +from typing import Any, Tuple +from tinygrad.helpers import getenv, CI + +# pip3 install tabulate +pytest_plugins = 'onnx.backend.test.report', + +from extra.onnx import get_run_onnx + +class TinygradModel(BackendRep): + def __init__(self, run_onnx, input_names): + super().__init__() + self.fxn = run_onnx + self.input_names = input_names + + def run(self, inputs: Any, **kwargs: Any) -> Tuple[Any, ...]: + real_inputs = {k:v for k,v in zip(self.input_names, inputs)} + ret = self.fxn(real_inputs, debug=True) + return tuple(x.numpy() if isinstance(x, Tensor) else [i.numpy() for i in x] if isinstance(x, list) else np.array(x) for x in ret.values()) + +class TinygradBackend(Backend): + @classmethod + def prepare(cls, model, device): + input_all = [x.name for x in model.graph.input] + input_initializer = [x.name for x in model.graph.initializer] + net_feed_input = [x for x in input_all if x not in input_initializer] + print("prepare", cls, device, net_feed_input) + run_onnx = get_run_onnx(model) + return TinygradModel(run_onnx, net_feed_input) + + @classmethod + def supports_device(cls, device: str) -> bool: + return device == "CPU" + +backend_test = onnx.backend.test.BackendTest(TinygradBackend, __name__) + +# no support for reduce with multiply (needs llop) +backend_test.exclude('test_reduce_prod_*') + +# TODO figure out why it's returning wrong values, geohotstan's uneducated guess is it's due to imprecision from float64 (double) -> float32 +# see Type Constraints: https://onnx.ai/onnx/operators/onnx_aionnxpreviewtraining_Adam.html#type-constraints +backend_test.exclude('test_adam_multiple_cpu') +backend_test.exclude('test_nesterov_momentum_cpu') + +# we only support float32 +backend_test.exclude('uint8') +backend_test.exclude('uint16') +backend_test.exclude('uint32') +backend_test.exclude('uint64') +backend_test.exclude('int8') +backend_test.exclude('int16') +backend_test.exclude('float64') +backend_test.exclude('string') + +backend_test.exclude('test_pow_types_int*') +backend_test.exclude('test_cast_*') +backend_test.exclude('test_castlike_*') +backend_test.exclude('test_convinteger_*') +backend_test.exclude('test_matmulinteger_*') + +backend_test.exclude('test_reduce_log_sum_exp*') # dependent on actual float64 implementation for backends +backend_test.exclude('test_operator_add*') # dependent on float64 math. Without it values default to 0 or inf + +# we don't support indexes +# backend_test.exclude('test_argmax_*') # Needs more work: select_last_index +# backend_test.exclude('test_argmin_*') # Needs more work: select_last_index +backend_test.exclude('test_nonzero_*') + +# no support for mod +backend_test.exclude('test_mod_*') + +# no boolean ops (2d, 3d, 4d) +backend_test.exclude('test_bitshift_*') + +# no scatternd gathernd +backend_test.exclude('test_gathernd_*') +backend_test.exclude('test_scatternd_*') + +# no quantize +backend_test.exclude('test_dynamicquantizelinear_*') +backend_test.exclude('test_qlinearmatmul_*') +backend_test.exclude('test_qlinearconv_*') +backend_test.exclude('test_quantizelinear_*') + +# no rnn +backend_test.exclude('test_gru_*') +backend_test.exclude('test_rnn_*') +backend_test.exclude('test_lstm_*') +backend_test.exclude('test_simple_rnn_*') + +# no control flow +backend_test.exclude('test_if_*') +backend_test.exclude('test_loop*') +backend_test.exclude('test_range_float_type_positive_delta_expanded_cpu') # requires loop + +# unsupported (strange) ops +backend_test.exclude('test_bitwise_*') +backend_test.exclude('test_blackmanwindow_*') +backend_test.exclude('test_bernoulli_*') +backend_test.exclude('test_cumsum_*') +backend_test.exclude('test_det_*') + +backend_test.exclude('test_tril_zero_cpu') # TODO: zero array support +backend_test.exclude('test_triu_zero_cpu') # TODO: zero array support + +backend_test.exclude('test_col2im_*') +backend_test.exclude('test_hammingwindow_*') +backend_test.exclude('test_hannwindow_*') +backend_test.exclude('test_hardmax_*') +backend_test.exclude('test_gridsample_*') +backend_test.exclude('test_dft_*') +backend_test.exclude('test_einsum_*') +backend_test.exclude('test_strnorm_*') +backend_test.exclude('test_unique_*') +backend_test.exclude('test_sequence_*') +backend_test.exclude('test_nonmaxsuppression_*') +backend_test.exclude('test_reversesequence_*') +backend_test.exclude('test_roialign_*') +backend_test.exclude('test_top_k_*') +backend_test.exclude('test_tfidfvectorizer_*') +backend_test.exclude('test_stft_*') +backend_test.exclude('test_melweightmatrix_*') + +# more strange ops +backend_test.exclude('test_basic_deform_conv_*') +backend_test.exclude('test_deform_conv_*') +backend_test.exclude('test_lppool_*') +backend_test.exclude('test_depthtospace_*') +backend_test.exclude('test_spacetodepth_*') +backend_test.exclude('test_scan*') +backend_test.exclude('test_split_to_sequence_*') +backend_test.exclude('test_resize_downsample_scales_cubic_*') # unsure how to implement cubic +backend_test.exclude('test_resize_downsample_sizes_cubic_*') # unsure how to implement cubic +backend_test.exclude('test_resize_upsample_scales_cubic_*') # unsure how to implement cubic +backend_test.exclude('test_resize_upsample_sizes_cubic_*') # unsure how to implement cubic + +# rest of the failing tests +backend_test.exclude('test_averagepool_2d_dilations_cpu') # dilations != 1 not supported for avgpool +backend_test.exclude('test_convtranspose_autopad_same_cpu') # TODO geohotstan has no idea how this is done, autopad requires output_shape but output_shape requires pads from autopad +backend_test.exclude('test_optional_has_element_empty_optional_input_cpu') # Attempts to create Tensor from None +backend_test.exclude('test_range_int32_type_negative_delta_expanded_cpu') # AttributeProto.GRAPH not implemented +backend_test.exclude('test_reshape_allowzero_reordered_cpu') # reshaping to 0 shape +backend_test.exclude('test_resize_downsample_scales_linear_antialias_cpu') # antialias not implemented +backend_test.exclude('test_resize_downsample_sizes_linear_antialias_cpu') # antialias not implemented +backend_test.exclude('test_resize_tf_crop_and_resize_cpu') # unsure about fill value after clip +backend_test.exclude('test_operator_addconstant_cpu') # bad data type + +# issue 1556 https://github.com/tinygrad/tinygrad/issues/1556 +backend_test.exclude('test_isinf_cpu') +backend_test.exclude('test_isinf_negative_cpu') +backend_test.exclude('test_isinf_positive_cpu') +backend_test.exclude('test_isnan_cpu') + +# issue 1791 fast math messes with these https://github.com/tinygrad/tinygrad/issues/1791 +backend_test.exclude('test_resize_upsample_sizes_nearest_axes_2_3_cpu') +backend_test.exclude('test_resize_upsample_sizes_nearest_axes_3_2_cpu') +backend_test.exclude('test_resize_upsample_sizes_nearest_cpu') + +# issue 2067 potentially also a fastmath issue https://github.com/tinygrad/tinygrad/issues/2067 +if getenv('METAL'): + backend_test.exclude('test_maxpool_2d_pads_cpu') + backend_test.exclude('test_maxpool_2d_same_lower_cpu') + +# Don't know how to treat special TensorProto like TensorProto.FLOAT8E4M3FN +if getenv("CPU") or getenv("TORCH"): + backend_test.exclude('test_dequantizelinear_axis_cpu') + backend_test.exclude('test_dequantizelinear_cpu') + +# compiled backends cannot reshape to and from 0 +if getenv('LLVM') or getenv('GPU') or getenv('CLANG') or getenv('METAL') or getenv('CUDA'): + backend_test.exclude('test_slice_start_out_of_bounds_cpu') + backend_test.exclude('test_constantofshape_int_shape_zero_cpu') + +if getenv('GPU') or getenv('METAL'): + backend_test.exclude('test_mish_cpu') # weird inaccuracy + backend_test.exclude('test_mish_expanded_cpu') # weird inaccuracy + backend_test.exclude('test_eyelike_with_dtype_cpu') # backend does not support dtype: Double + +# Segfaults in CI +if (getenv('LLVM') or getenv('CUDA')) and CI: + backend_test.exclude('test_max_float16_cpu') + backend_test.exclude('test_min_float16_cpu') + +# disable model tests for now since they are slow +if not getenv("MODELTESTS"): + for x in backend_test.test_suite: + if 'OnnxBackendRealModelTest' in str(type(x)): + backend_test.exclude(str(x).split(" ")[0]) +else: + # model tests all pass! + backend_test.include('test_resnet50') + backend_test.include('test_inception_v1') + backend_test.include('test_inception_v2') + backend_test.include('test_densenet121') + backend_test.include('test_shufflenet') + backend_test.include('test_squeezenet') + backend_test.include('test_bvlc_alexnet') + backend_test.include('test_zfnet512') + backend_test.include('test_vgg19') + +globals().update(backend_test.enable_report().test_cases) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_opt.py b/tinygrad_repo/test/external/external_test_opt.py new file mode 100644 index 0000000..1e3b01e --- /dev/null +++ b/tinygrad_repo/test/external/external_test_opt.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python +import os + +import torch +if "OPT" not in os.environ: + os.environ["OPT"] = "2" + +import gc +import numpy as np + +import unittest +from tinygrad.tensor import Tensor, Device +from tinygrad import nn +from tinygrad.helpers import getenv +from tinygrad.nn import optim +from tinygrad.helpers import GlobalCounters +from tinygrad.lazy import PUSH_PERMUTES +from tinygrad.jit import CacheCollector + +class CLCache: + def __init__(self, allowed=None, strict=False, preclear=True): self.allowed, self.strict, self.preclear = allowed, strict, preclear + def __enter__(self): + if self.preclear: + gc.collect() + for x in [x for x in gc.get_objects() if isinstance(x, Tensor)]: + x.realize() + GlobalCounters.reset() + CacheCollector.start() + print("cache: entering") + def __exit__(self, type, value, traceback): + cache = CacheCollector.finish() + print(f"cache: exiting with size {len(cache)}", f"allowed {self.allowed}" if self.allowed is not None else "") + if self.allowed is not None: + assert len(cache) <= self.allowed and (not self.strict or len(cache) == self.allowed), f"used too many kernels! {len(cache)} > {self.allowed}" + +from models.convnext import ConvNeXt +from models.efficientnet import EfficientNet +from models.resnet import ResNet18 +from models.vit import ViT +from tinygrad.nn.state import get_parameters + +@unittest.skipUnless(Device.DEFAULT == "GPU", "Not Implemented") +class TestInferenceMinKernels(unittest.TestCase): + def setUp(self): + Tensor.training = False + + @unittest.skipIf(not PUSH_PERMUTES, "this test requires PUSH_PERMUTES") + def test_convnext(self): + model = ConvNeXt() + for p in get_parameters(model): p.assign(np.zeros(p.shape, dtype=p.dtype.np)) + img = Tensor.randn(1, 3, 224, 224) + with CLCache(129): + model(img).realize() + + def test_enet(self): + model = EfficientNet(getenv("ENET_NUM", 0), has_se=False) + for p in get_parameters(model): p.assign(np.zeros(p.shape, dtype=p.dtype.np)) + img = Tensor.randn(1, 3, 224, 224) + with CLCache(51): + model.forward(img).realize() + + def test_enet_se(self): + model = EfficientNet(getenv("ENET_NUM", 0), has_se=True) + for p in get_parameters(model): p.assign(np.zeros(p.shape, dtype=p.dtype.np)) + img = Tensor.randn(1, 3, 224, 224) + # TODO: this seems very high + with CLCache(115): + model.forward(img).realize() + + def test_resnet(self): + model = ResNet18() + for p in get_parameters(model): p.assign(np.zeros(p.shape, dtype=p.dtype.np)) + img = Tensor.randn(1, 3, 224, 224) + with CLCache(26): + model.forward(img).realize() + + def test_vit(self): + model = ViT(embed_dim=192, num_heads=3) + for p in get_parameters(model): p.assign(np.zeros(p.shape, dtype=p.dtype.np)) + img = Tensor.randn(1, 3, 224, 224) + with CLCache(222): # NOTE: this is way too high + out = model.forward(img) + assert len(CacheCollector.cache) == 0, "ViT prerealized?" + out.realize() + + def test_llama(self): + from examples.llama import Transformer + args_tiny = {"dim": 512, "multiple_of": 256, "n_heads": 8, "n_layers": 4, "norm_eps": 1e-05, "vocab_size": 1000} + model = Transformer(**args_tiny) + for p in get_parameters(model): p.assign(np.zeros(p.shape, dtype=p.dtype.np)) + with CLCache(85): + model(Tensor([[1,2,3,4]]), 0).realize() + +@unittest.skipUnless(Device.DEFAULT == "GPU", "Not Implemented") +class TestOptBinOp(unittest.TestCase): + def _test_no_binop_rerun(self, f1, f2=None, allowed=1): + a = Tensor.randn(16, 16) + b = Tensor.randn(16, 16) + with CLCache(): + c = f1(a, b) + if f2 is not None: d = f2(a, b) + c.realize() + if f2 is not None: d.realize() + assert len(CacheCollector.cache) == allowed, "binop was rerun!" + if f2 is not None: np.testing.assert_allclose(c.numpy().ravel(), d.numpy().ravel(), rtol=1e-3, atol=1e-5) + + def test_no_binop_rerun(self): return self._test_no_binop_rerun(lambda a,b: a*b, lambda a,b: (a*b).reshape(16, 16, 1)) + def test_no_binop_rerun_alt(self): return self._test_no_binop_rerun(lambda a,b: (a*b).reshape(16, 16, 1), lambda a,b: a*b) + def test_no_binop_rerun_reduce_broadcast(self): return self._test_no_binop_rerun(lambda a,b: a.sum()+b, lambda a,b: a.sum().reshape(1,1)+b, allowed=2) + @unittest.skip("this test started failing with the new change, based movementop issue") + def test_no_binop_rerun_transposed(self): return self._test_no_binop_rerun(lambda a,b: (a.T*b.T).T, lambda a,b: a*b) + def test_no_binop_rerun_mid_reshape(self): return self._test_no_binop_rerun(lambda a,b: (a*b).reshape(256)+a.reshape(256)) + + # currently non working tests + #def test_no_binop_rerun_preshape(self): return self._test_no_binop_rerun(lambda a,b: a.reshape(16, 16, 1)*b.reshape(16, 16, 1), lambda a,b: a*b) + #def test_no_binop_rerun_reduce(self): return self._test_no_binop_rerun(lambda a,b: (a*b).sum(), lambda a,b: (a*b).reshape(16, 16, 1).sum()) + #def test_no_binop_rerun_reduce_alt(self): return self._test_no_binop_rerun(lambda a,b: a.sum(1)+b[0], lambda a,b: a.sum(1).reshape(1,16)+b[0]) + +@unittest.skipUnless(Device.DEFAULT == "GPU", "Not Implemented") +class TestOptReduceLoop(unittest.TestCase): + @unittest.skip("this is broken") + def test_loop_left(self): + a = Tensor.randn(16, 16) + b = Tensor.randn(16, 16) + with CLCache(): + t = a.sum(0) + b = t.reshape(16,1).expand(16,16).sum(0) + c = (t+b) + c.realize() + assert len(CacheCollector.cache) == 2, "loop left fusion broken" + + def test_loop_right(self): + a = Tensor.randn(16, 16) + b = Tensor.randn(16, 16) + with CLCache(): + t = a.sum(0) + b = t.reshape(16,1).expand(16,16).sum(0) + c = (b+t) + c.realize() + assert len(CacheCollector.cache) == 2, "loop right fusion broken" + +@unittest.skipUnless(Device.DEFAULT == "GPU", "Not Implemented") +class TestOptWChild(unittest.TestCase): + def test_unrealized_child(self): + a = Tensor.randn(16, 16) + b = Tensor.randn(16, 16) + with CLCache(): + c = (a*b).sum() + d = c+1 + e = c+2 + d.realize() + assert len(CacheCollector.cache) == 2, "don't fuse if you have children" + +@unittest.skipUnless(Device.DEFAULT == "GPU", "Not Implemented") +class TestOpt(unittest.TestCase): + def test_muladd(self): + a,b,c = [Tensor.ones(2,2) for _ in range(3)] + with CLCache(): + d = a * b + c + d.realize() + assert len(CacheCollector.cache) == 1, "optimizer didn't fold muladd" + np.testing.assert_allclose(d.numpy(), np.ones((2,2))*2, rtol=1e-5) + + def test_fold_reduce_elementwise(self): + img = Tensor.ones(32) + addme = Tensor.ones(1) + with CLCache(): + ret = img.sum() + addme + ret.realize() + assert len(CacheCollector.cache) == 1, "optimizer didn't fold reduce/elementwise" + assert ret.numpy()[0] == 33 + + def test_fold_batchnorm(self): + with Tensor.train(): + img = Tensor.ones(1,32,4,4) + bn = nn.BatchNorm2d(32, track_running_stats=False) + with CLCache(): + img_bn = bn(img).realize() + print(img_bn) + assert len(CacheCollector.cache) == 3, f"optimizer didn't fold batchnorm, got {len(CacheCollector.cache)}" + # Tensor.training = False + + def test_fold_conv_sgd(self): + with Tensor.train(): + img = Tensor.ones(2,3,4,4) + c1 = nn.Conv2d(3,32,3) + opt = optim.SGD(get_parameters(c1)) + with CLCache(): + opt.zero_grad() + c1(img).relu().sum().backward() + opt.step() + # TODO: this should be 4, but the sum output child stays around + # with pushing_permutes it can be 3 + # TODO: broken with optim fixes + assert len(CacheCollector.cache) in [4,5,6], f"optimizer didn't fold conv-backward SGD, got {len(CacheCollector.cache)}" + # Tensor.training = False + + def test_fold_2convs_sgd(self): + with Tensor.train(): + img = Tensor.ones(2,3,64,64) + c1 = nn.Conv2d(3,16,3,bias=False) + c2 = nn.Conv2d(16,32,3,bias=False) + opt = optim.SGD(get_parameters([c1, c2])) + with CLCache(allowed=9): + opt.zero_grad() + c2(c1(img).relu()).relu().sum().backward() + opt.step() + # Tensor.training = False + + def test_fold_4convs_sgd(self): + with Tensor.train(): + img = Tensor.ones(2,3,64,64) + c1 = nn.Conv2d(3,4,3,bias=False) + c2 = nn.Conv2d(4,8,3,bias=False) + c3 = nn.Conv2d(8,16,3,bias=False) + c4 = nn.Conv2d(16,32,3,bias=False) + opt = optim.SGD(get_parameters([c1, c2, c3, c4])) + with CLCache(allowed=19): + opt.zero_grad() + c4(c3(c2(c1(img).relu()).relu()).relu()).relu().sum().backward() + opt.step() + # Tensor.training = False + + def test_fold_conv_batchnorm_sgd(self): + with Tensor.train(): + img = Tensor.ones(1,3,4,4) + c1 = nn.Conv2d(3,32,3) + bn = nn.BatchNorm2d(32, track_running_stats=False) + opt = optim.SGD(get_parameters([c1, bn])) + with CLCache(allowed=18): # this is too high + img_bn = bn(c1(img)).elu().sum() + opt.zero_grad() + img_bn.backward() + opt.step() + # Tensor.training = False + + def test_fold_conv_batchnorm_notrain(self): + img = Tensor.ones(1,3,8,8) + c1 = nn.Conv2d(3,32,3) + bn = nn.BatchNorm2d(32, track_running_stats=False) + # precache the bn + img_conv = bn(c1(img)).relu().realize() + with CLCache(): + img_conv = bn(c1(img)).relu().realize() + assert len(CacheCollector.cache) == 1, f"optimizer didn't fold conv-batchnorm at test time, got {len(CacheCollector.cache)}" + + def test_fold_conv_batchnorm(self): + with Tensor.train(): + img = Tensor.ones(1,3,8,8) + c1 = nn.Conv2d(3,32,3) + bn = nn.BatchNorm2d(32, track_running_stats=False) + with CLCache(): + img_conv = bn(c1(img)).relu().realize() + print(img_conv) + assert len(CacheCollector.cache) == 4, f"optimizer didn't fold conv-batchnorm, got {len(CacheCollector.cache)}" + + def test_fold_conv_elu(self): + img = Tensor.ones(1,4,8,8) + c1 = nn.Conv2d(4, 4, kernel_size=3) + c2 = nn.Conv2d(4, 4, kernel_size=3) + with CLCache(): + img_conv = img.sequential([c1, Tensor.elu, c2, Tensor.elu]).realize() + print(img_conv) + assert len(CacheCollector.cache) == 2, "optimizer didn't fold conv/elu" + + def test_fold_conv_relu(self): + img = Tensor.ones(1,4,8,8) + c1 = nn.Conv2d(4, 4, kernel_size=3) + c2 = nn.Conv2d(4, 4, kernel_size=3) + with CLCache(): + img_conv = img.sequential([c1, Tensor.relu, c2, Tensor.relu]).realize() + print(img_conv) + assert len(CacheCollector.cache) == 2, "optimizer didn't fold conv/relu" + + def test_fold_conv_relu_nobias(self): + img = Tensor.ones(1,4,8,8) + c1 = nn.Conv2d(4, 4, kernel_size=3, bias=False) + c2 = nn.Conv2d(4, 4, kernel_size=3, bias=False) + with CLCache(): + img_conv = img.sequential([c1, Tensor.relu, c2, Tensor.relu]).realize() + print(img_conv) + assert len(CacheCollector.cache) == 2, "optimizer didn't fold conv/relu" + + def test_permute_was_pushed(self): + a = Tensor.randn(16, 16, 16) + with CLCache(): + c = a.sum(2) + d = c.permute(1,0).contiguous() + d.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(a.numpy().sum(2).transpose(1,0), d.numpy(), rtol=1e-3, atol=1e-5) + if PUSH_PERMUTES: assert cache_len == 1, "permute wasn't pushed!" + + def test_permute_was_pushed_through_contract_reshape(self): + a = Tensor.randn(4, 4, 4, 4, 4) + with CLCache(): + c = a.sum(-1) + d = c.reshape(16,16).permute(1,0).contiguous() + d.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(a.numpy().sum(-1).reshape(16,16).transpose(1,0), d.numpy(), rtol=1e-3, atol=1e-5) + if PUSH_PERMUTES: assert cache_len == 1, "permute wasn't pushed!" + + def test_permute_was_pushed_through_contractw1s_reshape(self): + a = Tensor.randn(4, 4, 4, 4, 4) + with CLCache(): + c = a.sum(-1) + d = c.reshape(16,1,16).permute(2,1,0).contiguous() + d.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(a.numpy().sum(-1).reshape(16,1,16).transpose(2,1,0), d.numpy(), rtol=1e-3, atol=1e-5) + if PUSH_PERMUTES: assert cache_len == 1, "permute wasn't pushed!" + + # TODO: push permute through expansion reshape + @unittest.skip("expansion can't push expand permute yet") + @unittest.skipIf(not PUSH_PERMUTES, "this test requires PUSH_PERMUTES") + def test_permute_was_pushed_through_expand_reshape(self): + a = Tensor.randn(16, 16, 16) + with CLCache(): + c = a.sum(2) + d = c.reshape(4,4,4,4).permute(2,3,0,1).contiguous() + d.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(a.numpy().sum(2).transpose(1,0).reshape(4,4,4,4), d.numpy(), rtol=1e-3, atol=1e-5) + if PUSH_PERMUTES: assert cache_len == 1, "permute wasn't pushed!" + + @unittest.skipIf(PUSH_PERMUTES, "this test is broken with PUSH_PERMUTES") + def test_no_reduceop_rerun(self): + a = Tensor.randn(16, 16, 16) + with CLCache(): + c = a.sum(2) + d = a.sum(2).permute(1,0) + c.realize() + d.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(c.numpy().transpose(1,0), d.numpy(), rtol=1e-3, atol=1e-5) + assert cache_len == 1, "reduceop was rerun!" + + @unittest.skipIf(PUSH_PERMUTES, "this test is broken with PUSH_PERMUTES") + def test_no_reduceop_rerun_alt(self): + a = Tensor.randn(16, 16, 16) + with CLCache(): + c = a.sum(2).permute(1,0) + d = a.sum(2) + c.realize() + d.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(c.numpy(), d.numpy().transpose(1,0), rtol=1e-3, atol=1e-5) + assert cache_len == 1, "reduceop was rerun!" + + def test_fold_with_contiguous(self): + a = Tensor.randn(16, 16, 16) + b = Tensor.randn(16, 16) + with CLCache(): + c = (a.sum(2).contiguous() + b).contiguous() + c.realize() + cache_len = len(CacheCollector.cache) + assert cache_len == 1, "contiguous wasn't folded" + + def _test_fold_expand_reduce_helper(self, n, m, axis, allowed): + b = torch.ones(n, m).sum(axis).reshape(n, 1).expand(n, m).sum(axis) + with CLCache(allowed=allowed): + a = Tensor.ones(n, m).sum(axis).reshape(n, 1).expand(n, m).sum(axis) + a.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5) + return cache_len + + def test_expand_reduce_is_folded_on_same_axis(self): + for axis in [0, 1]: + for n in [4, 8, 16]: + b = torch.ones(n, n).sum(axis).reshape(n, 1).expand(n, n).sum(axis) + with CLCache(allowed=2): + a = Tensor.ones(n, n).sum(axis).reshape(n, 1).expand(n, n).sum(axis) + a.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5) + return cache_len + + def test_expand_reduce_is_not_folded_on_different_axes(self): + axis1, axis2 = 0, 1 + for n in [4, 8, 16]: + b = torch.ones(n, n).sum(axis1).reshape(n, 1).expand(n, n).sum(axis2) + with CLCache(allowed=3): + a = Tensor.ones(n, n).sum(axis1).reshape(n, 1).expand(n, n).sum(axis2) + a.realize() + cache_len = len(CacheCollector.cache) + np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5) + return cache_len + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_optim.py b/tinygrad_repo/test/external/external_test_optim.py new file mode 100644 index 0000000..2851f11 --- /dev/null +++ b/tinygrad_repo/test/external/external_test_optim.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +import unittest +import numpy as np +import tensorflow as tf +import tensorflow_addons as tfa +from tinygrad.tensor import Tensor +from tinygrad.nn.optim import LAMB + +np.random.seed(1337) +x_init = np.random.randn(1,4).astype(np.float32) +W_init = np.random.randn(4,4).astype(np.float32) +m_init = np.random.randn(1,4).astype(np.float32) + +class TinyNet: + def __init__(self): + self.x = Tensor(x_init.copy(), requires_grad=True) + self.W = Tensor(W_init.copy(), requires_grad=True) + self.m = Tensor(m_init.copy()) + + def forward(self): + out = self.x.matmul(self.W).relu() + out = out.log_softmax(1) + out = out.mul(self.m).add(self.m).sum() + return out + +class TinyNetTF: + def __init__(self): + self.x = tf.Variable(x_init.copy(), trainable=True) + self.W = tf.Variable(W_init.copy(), trainable=True) + self.m = tf.constant(m_init.copy()) + + def forward(self): + out = tf.matmul(self.x, self.W) + out = tf.nn.relu(out) + out = tf.nn.log_softmax(out, axis=1) + out = tf.multiply(out, self.m) + self.m + out = tf.reduce_sum(out) + return out + +def step(optim, steps=1, kwargs={}): + net = TinyNet() + optim = optim([net.x, net.W], **kwargs) + for _ in range(steps): + out = net.forward() + optim.zero_grad() + out.backward() + optim.step() + return net.x.detach().numpy(), net.W.detach().numpy() + +def step_tf(optim, steps=1, kwargs={}): + net = TinyNetTF() + optim = optim(**kwargs) + for _ in range(steps): + with tf.GradientTape() as tape: + out = net.forward() + grads = tape.gradient(out, [net.x, net.W]) + optim.apply_gradients(zip(grads, [net.x, net.W])) + return net.x.numpy(), net.W.numpy() + +class ExternalTestOptim(unittest.TestCase): + def _test_optim(self, tinygrad_optim, tensorflow_optim, steps, opts, atol, rtol): + for x,y in zip(step(tinygrad_optim, steps, kwargs=opts), + step_tf(tensorflow_optim, steps, kwargs=opts)): + np.testing.assert_allclose(x, y, atol=atol, rtol=rtol) + + def _test_lamb(self, steps, opts, atol, rtol): self._test_optim(LAMB, tfa.optimizers.LAMB, steps, opts, atol, rtol) + + def test_lamb(self): self._test_lamb(1, {'lr': 0.001}, 1e-5, 0) + def test_lamb_high_lr(self): self._test_lamb(1, {'lr': 10}, 1e-5, 1e-5) + + def test_multistep_lamb(self): self._test_lamb(10, {'lr': 0.001}, 1e-5, 0) + def test_multistep_lamb_high_lr(self): self._test_lamb(10, {'lr': 10}, 1e-5, 3e-4) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_speed_llama.py b/tinygrad_repo/test/external/external_test_speed_llama.py new file mode 100644 index 0000000..7635fc8 --- /dev/null +++ b/tinygrad_repo/test/external/external_test_speed_llama.py @@ -0,0 +1,57 @@ +# NOTE: this only tests the speed of the LLaMA codegen, it doesn't actually run the net +import unittest, time +import numpy as np +from examples.llama import Transformer, MODEL_PARAMS +from test.test_net_speed import start_profile, stop_profile +from tinygrad.tensor import Tensor +from tinygrad.ops import Device +from tinygrad.nn.state import get_state_dict +from tinygrad.ops import Compiled +from tinygrad.helpers import dtypes, prod +from tinygrad.runtime.lib import RawBuffer + +class FakeProgram: + def __init__(self, name:str, prg:str): pass + def __call__(self, *bufs, global_size, local_size, wait=False): pass + +class RawFakeBuffer(RawBuffer): + @classmethod + def fromCPU(cls, x:np.ndarray, **kwargs): return cls(prod(x.shape), dtypes.from_np(x.dtype), **kwargs) + def toCPU(self): return np.empty(self.size, dtype=self.dtype.np) + +class TestLLaMASpeed(unittest.TestCase): + @unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "only test for compiled backends") + def test_llama_compile(self): + backup_program = Device[Device.DEFAULT].runtime + backup_buffer = Device[Device.DEFAULT].buffer + Device[Device.DEFAULT].runtime = FakeProgram + Device[Device.DEFAULT].buffer = RawFakeBuffer + + print("testing llama python run time") + model = Transformer(**MODEL_PARAMS["1"]["7B"]["args"]) + print("built model") + # assign fake tensors to the values + for v in get_state_dict(model).values(): v.assign(Tensor.empty(*v.shape, dtype=v.dtype)) + print("assigned empty tensors, doing warmup") + + def run_llama(st, empty_method_cache=True): + if empty_method_cache: Device[Device.DEFAULT].method_cache.clear() + tms = [time.perf_counter()] + for i in range(10): + model(Tensor([[2]]), i).realize() + tms.append(time.perf_counter()) + timings = [(tms[i+1]-tms[i])*1000 for i in range(len(tms)-1)] + print(f"{st:15s} mean runtime: {sum(timings)/len(timings):7.2f}ms, runs: ", ", ".join(f'{x:7.2f}' for x in timings)) + + run_llama("codegen") + run_llama("methodcache", False) + + pr = start_profile() + run_llama("profile") + stop_profile(pr, sort='time', frac=0.1) + + Device[Device.DEFAULT].runtime = backup_program + Device[Device.DEFAULT].buffer = backup_buffer + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_uops_graphing.py b/tinygrad_repo/test/external/external_test_uops_graphing.py new file mode 100644 index 0000000..85885a3 --- /dev/null +++ b/tinygrad_repo/test/external/external_test_uops_graphing.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +import unittest +from tinygrad.tensor import Tensor +from tinygrad.codegen.linearizer import Linearizer +from tinygrad.renderer.opencl import OpenCLRenderer +from tinygrad.graph import graph_uops +from tinygrad.nn import Conv2d + +class TestUopsGraph(unittest.TestCase): + def test_matmul(self): + N = 1024 + a = Tensor.rand(N,N) + b = Tensor.rand(N,N) + si = (a@b).lazydata.schedule()[-1] + lin = Linearizer(si.ast) + lin.hand_coded_optimizations() + print(lin.colored_shape()) + uops = lin.linearize().uops + graph_uops(uops) + for u in uops: print(u) + print(OpenCLRenderer("matmul", uops)[0]) + + def test_reduce(self): + a = Tensor.rand(1024*1024) + si = a.sum().lazydata.schedule()[-1] + lin = Linearizer(si.ast) + lin.hand_coded_optimizations() + uops = lin.linearize().uops + graph_uops(uops) + #print(OpenCLRenderer("reduce", uops)[0]) + + def test_conv(self): + x = Tensor.rand(1,3,16,16) + c = Conv2d(3, 16, (3,3)) + si = c(x).elu().lazydata.schedule()[-1] + lin = Linearizer(si.ast) + lin.hand_coded_optimizations() + uops = lin.linearize().uops + graph_uops(uops) + print(lin.colored_shape()) + print(OpenCLRenderer("conv", uops)[0]) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_yolo.py b/tinygrad_repo/test/external/external_test_yolo.py new file mode 100644 index 0000000..3631cab --- /dev/null +++ b/tinygrad_repo/test/external/external_test_yolo.py @@ -0,0 +1,36 @@ +import io +import unittest +from pathlib import Path + +import cv2 +import requests # type: ignore +import numpy as np + +from tinygrad.tensor import Tensor +from examples.yolov3 import Darknet, infer, show_labels +from extra.utils import fetch + +chicken_img = cv2.imread(str(Path(__file__).parent / 'efficientnet/Chicken.jpg')) +car_img = cv2.imread(str(Path(__file__).parent / 'efficientnet/car.jpg')) + +class TestYOLO(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.model = Darknet(fetch("https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg")) + print("Loading weights file (237MB). This might take a while…") + cls.model.load_weights("https://pjreddie.com/media/files/yolov3.weights") + + @classmethod + def tearDownClass(cls): + del cls.model + + def test_chicken(self): + labels = show_labels(infer(self.model, chicken_img), confidence=0.56) + self.assertEqual(labels, ["bird"]) + + def test_car(self): + labels = show_labels(infer(self.model, car_img)) + self.assertEqual(labels, ["car"]) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/external_test_yolov8.py b/tinygrad_repo/test/external/external_test_yolov8.py new file mode 100644 index 0000000..df5ae04 --- /dev/null +++ b/tinygrad_repo/test/external/external_test_yolov8.py @@ -0,0 +1,76 @@ +import numpy as np +from extra.utils import fetch, download_file, get_child +from examples.yolov8 import YOLOv8, get_variant_multiples, preprocess, postprocess, label_predictions +from pathlib import Path +import unittest +import io, cv2, os +import onnxruntime as ort +import ultralytics +from tinygrad.nn.state import safe_load, load_state_dict + +class TestYOLOv8(unittest.TestCase): + def test_all_load_weights(self): + for variant in ['n', 's', 'm', 'l', 'x']: + weights_location = Path(__file__).parents[2] / "weights" / f'yolov8{variant}.safetensors' + download_file(f'https://gitlab.com/r3sist/yolov8_weights/-/raw/master/yolov8{variant}.safetensors', weights_location) + + depth, width, ratio = get_variant_multiples(variant) + TinyYolov8 = YOLOv8(w=width, r=ratio, d=depth, num_classes=80) + state_dict = safe_load(weights_location) + load_state_dict(TinyYolov8, state_dict) + print(f'successfully loaded weights for yolov{variant}') + + def test_predictions(self): + test_image_urls = ['https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg', 'https://www.aljazeera.com/wp-content/uploads/2022/10/2022-04-28T192650Z_1186456067_UP1EI4S1I0P14_RTRMADP_3_SOCCER-ENGLAND-MUN-CHE-REPORT.jpg'] + variant = 'n' + weights_location = Path(__file__).parents[2] / "weights" / f'yolov8{variant}.safetensors' + depth, width, ratio = get_variant_multiples(variant) + TinyYolov8 = YOLOv8(w=width, r=ratio, d=depth, num_classes=80) + state_dict = safe_load(weights_location) + load_state_dict(TinyYolov8, state_dict) + + for i in range(len(test_image_urls)): + img_stream = io.BytesIO(fetch(test_image_urls[i])) + img = cv2.imdecode(np.frombuffer(img_stream.read(), np.uint8), 1) + test_image = preprocess([img]) + predictions = TinyYolov8(test_image) + post_predictions = postprocess(preds=predictions, img=test_image, orig_imgs=[img]) + labels = label_predictions(post_predictions) + assert labels == {5: 1, 0: 4, 11: 1} if i == 0 else labels == {0: 13, 29: 1, 32: 1} + + def test_forward_pass_torch_onnx(self): + variant = 'n' + weights_location_onnx = Path(__file__).parents[2] / "weights" / f'yolov8{variant}.onnx' + weights_location_pt = Path(__file__).parents[2] / "weights" / f'yolov8{variant}.pt' + weights_location = Path(__file__).parents[2] / "weights" / f'yolov8{variant}.safetensors' + + download_file(f'https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8{variant}.pt', weights_location_pt) + # the ultralytics export prints a lot of unneccesary things + if not weights_location_onnx.is_file(): + model = ultralytics.YOLO(model=weights_location_pt, task='Detect') + model.export(format="onnx",imgsz=[640, 480]) + + depth, width, ratio = get_variant_multiples(variant) + TinyYolov8 = YOLOv8(w=width, r=ratio, d=depth, num_classes=80) + state_dict = safe_load(weights_location) + load_state_dict(TinyYolov8, state_dict) + + image_location = [np.frombuffer(io.BytesIO(fetch('https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg')).read(), np.uint8)] + orig_image = [cv2.imdecode(image_location[0], 1)] + + input_image = preprocess(orig_image) + + onnx_session = ort.InferenceSession(weights_location_onnx) + onnx_input_name = onnx_session.get_inputs()[0].name + onnx_output_name = onnx_session.get_outputs()[0].name + onnx_output = onnx_session.run([onnx_output_name], {onnx_input_name: input_image.numpy()}) + + tiny_output = TinyYolov8(input_image) + + # currently rtol is 0.025 because there is a 1-2% difference in our predictions + # because of the zero padding in SPPF module (line 280) maxpooling layers rather than the -infinity in torch. + # This difference does not make a difference "visually". + np.testing.assert_allclose(onnx_output[0], tiny_output.numpy(), atol=5e-4, rtol=0.025) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/fuzz_shapetracker.py b/tinygrad_repo/test/external/fuzz_shapetracker.py new file mode 100644 index 0000000..cb012c2 --- /dev/null +++ b/tinygrad_repo/test/external/fuzz_shapetracker.py @@ -0,0 +1,61 @@ +import random +from tinygrad.helpers import DEBUG +from test.unit.test_shapetracker import CheckingShapeTracker +random.seed(42) + +def do_permute(st): + perm = list(range(0, len(st.shape))) + random.shuffle(perm) + perm = tuple(perm) + if DEBUG >= 1: print("st.permute(", perm, ")") + st.permute(perm) + +def do_pad(st): + c = random.randint(0, len(st.shape)-1) + pad = tuple((random.randint(0,2), random.randint(0,2)) if i==c else (0,0) for i in range(len(st.shape))) + if DEBUG >= 1: print("st.pad(", pad, ")") + st.pad(pad) + +def do_reshape_split_one(st): + c = random.randint(0, len(st.shape)-1) + poss = [n for n in [1,2,3,4,5] if st.shape[c]%n == 0] + spl = random.choice(poss) + shp = st.shape[0:c] + (st.shape[c]//spl, spl) + st.shape[c+1:] + if DEBUG >= 1: print("st.reshape(", shp, ")") + st.reshape(shp) + +def do_reshape_combine_two(st): + if len(st.shape) < 2: return + c = random.randint(0, len(st.shape)-2) + shp = st.shape[:c] + (st.shape[c] * st.shape[c+1], ) + st.shape[c+2:] + if DEBUG >= 1: print("st.reshape(", shp, ")") + st.reshape(shp) + +def do_shrink(st): + c = random.randint(0, len(st.shape)-1) + while 1: + shrink = tuple((random.randint(0,s), random.randint(0,s)) if i == c else (0,s) for i,s in enumerate(st.shape)) + if all(x= 1: print("st.shrink(", shrink, ")") + st.shrink(shrink) + +def do_stride(st): + c = random.randint(0, len(st.shape)-1) + stride = tuple(random.choice([-2,-1,2]) if i==c else 1 for i in range(len(st.shape))) + if DEBUG >= 1: print("st.stride(", stride, ")") + st.stride(stride) + +def do_expand(st): + c = [i for i,s in enumerate(st.shape) if s==1] + if len(c) == 0: return + c = random.choice(c) + expand = tuple(random.choice([2,3,4]) if i==c else s for i,s in enumerate(st.shape)) + if DEBUG >= 1: print("st.expand(", expand, ")") + st.expand(expand) + +if __name__ == "__main__": + ops = [do_permute, do_pad, do_shrink, do_reshape_split_one, do_reshape_combine_two, do_stride, do_expand] + for _ in range(200): + st = CheckingShapeTracker((random.randint(2, 10), random.randint(2, 10), random.randint(2, 10))) + for i in range(8): random.choice(ops)(st) + st.assert_same() diff --git a/tinygrad_repo/test/external/fuzz_symbolic.py b/tinygrad_repo/test/external/fuzz_symbolic.py new file mode 100644 index 0000000..290930d --- /dev/null +++ b/tinygrad_repo/test/external/fuzz_symbolic.py @@ -0,0 +1,69 @@ +import itertools +import random +from tinygrad.helpers import DEBUG +from tinygrad.shape.symbolic import Variable +random.seed(42) + +def add_v(expr, rng=None): + if rng is None: rng = random.randint(0,2) + return expr + v[rng], rng + +def div(expr, rng=None): + if rng is None: rng = random.randint(1,9) + return expr // rng, rng + +def mul(expr, rng=None): + if rng is None: rng = random.randint(-4,4) + return expr * rng, rng + +def mod(expr, rng=None): + if rng is None: rng = random.randint(1,9) + return expr % rng, rng + +def add_num(expr, rng=None): + if rng is None: rng = random.randint(-4,4) + return expr + rng, rng + +def lt(expr, rng=None): + if rng is None: rng = random.randint(-4,4) + return expr < rng, rng + +def ge(expr, rng=None): + if rng is None: rng = random.randint(-4,4) + return expr >= rng, rng + +def le(expr, rng=None): + if rng is None: rng = random.randint(-4,4) + return expr <= rng, rng + +def gt(expr, rng=None): + if rng is None: rng = random.randint(-4,4) + return expr > rng, rng + +if __name__ == "__main__": + ops = [add_v, div, mul, add_num, mod] + for _ in range(1000): + upper_bounds = [*list(range(1, 10)), 16, 32, 64, 128, 256] + u1 = Variable("v1", 0, random.choice(upper_bounds)) + u2 = Variable("v2", 0, random.choice(upper_bounds)) + u3 = Variable("v3", 0, random.choice(upper_bounds)) + v = [u1,u2,u3] + tape = [random.choice(ops) for _ in range(random.randint(2, 30))] + # 10% of the time, add one of lt, le, gt, ge + if random.random() < 0.1: tape.append(random.choice([lt, le, gt, ge])) + expr = Variable.num(0) + rngs = [] + for t in tape: + expr, rng = t(expr) + if DEBUG >= 1: print(t.__name__, rng) + rngs.append(rng) + if DEBUG >=1: print(expr) + space = list(itertools.product(range(u1.min, u1.max+1), range(u2.min, u2.max+1), range(u3.min, u3.max+1))) + volume = len(space) + for (v1, v2, v3) in random.sample(space, min(100, volume)): + v = [v1,v2,v3] + rn = 0 + for t,r in zip(tape, rngs): rn, _ = t(rn, r) + num = eval(expr.render()) + assert num == rn, f"mismatched {expr.render()} at {v1=} {v2=} {v3=} = {num} != {rn}" + if DEBUG >= 1: print(f"matched {expr.render()} at {v1=} {v2=} {v3=} = {num} == {rn}") diff --git a/tinygrad_repo/test/external/graph_batchnorm.py b/tinygrad_repo/test/external/graph_batchnorm.py new file mode 100644 index 0000000..59e3b79 --- /dev/null +++ b/tinygrad_repo/test/external/graph_batchnorm.py @@ -0,0 +1,61 @@ +import unittest +from tinygrad.nn.state import get_parameters +from tinygrad.tensor import Tensor +from tinygrad.nn import Conv2d, BatchNorm2d, optim + +def model_step(lm): + with Tensor.train(): + x = Tensor.ones(8,12,128,256, requires_grad=False) + optimizer = optim.SGD(get_parameters(lm), lr=0.001) + loss = lm.forward(x).sum() + optimizer.zero_grad() + loss.backward() + del x,loss + optimizer.step() + +class TestBatchnorm(unittest.TestCase): + def test_conv(self): + class LilModel: + def __init__(self): + self.c = Conv2d(12, 32, 3, padding=1, bias=False) + def forward(self, x): + return self.c(x).relu() + lm = LilModel() + model_step(lm) + + def test_two_conv(self): + class LilModel: + def __init__(self): + self.c = Conv2d(12, 32, 3, padding=1, bias=False) + self.c2 = Conv2d(32, 32, 3, padding=1, bias=False) + def forward(self, x): + return self.c2(self.c(x)).relu() + lm = LilModel() + model_step(lm) + + def test_two_conv_bn(self): + class LilModel: + def __init__(self): + self.c = Conv2d(12, 24, 3, padding=1, bias=False) + self.bn = BatchNorm2d(24, track_running_stats=False) + self.c2 = Conv2d(24, 32, 3, padding=1, bias=False) + self.bn2 = BatchNorm2d(32, track_running_stats=False) + def forward(self, x): + x = self.bn(self.c(x)).relu() + return self.bn2(self.c2(x)).relu() + lm = LilModel() + model_step(lm) + + def test_conv_bn(self): + class LilModel: + def __init__(self): + self.c = Conv2d(12, 32, 3, padding=1, bias=False) + self.bn = BatchNorm2d(32, track_running_stats=False) + def forward(self, x): + return self.bn(self.c(x)).relu() + lm = LilModel() + model_step(lm) + + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/external/test_example.py b/tinygrad_repo/test/external/test_example.py new file mode 100644 index 0000000..5af94e5 --- /dev/null +++ b/tinygrad_repo/test/external/test_example.py @@ -0,0 +1,74 @@ +import unittest +import numpy as np +from tinygrad.ops import Device +from tinygrad.tensor import Tensor +from tinygrad.helpers import getenv, CI + +def multidevice_test(fxn): + exclude_devices = getenv("EXCLUDE_DEVICES", "").split(",") + def ret(self): + for device in Device._buffers: + if device in ["DISK", "SHM", "FAKE"]: continue + if not CI: print(device) + if device in exclude_devices: + if not CI: print(f"WARNING: {device} test is excluded") + continue + with self.subTest(device=device): + try: + Device[device] + except Exception: + if not CI: print(f"WARNING: {device} test isn't running") + continue + fxn(self, device) + return ret + +class TestExample(unittest.TestCase): + @multidevice_test + def test_convert_to_cpu(self, device): + a = Tensor([[1,2],[3,4]], device=device) + assert a.numpy().shape == (2,2) + b = a.cpu() + assert b.numpy().shape == (2,2) + + @multidevice_test + def test_2_plus_3(self, device): + a = Tensor([2], device=device) + b = Tensor([3], device=device) + result = a + b + print(f"{a.numpy()} + {b.numpy()} = {result.numpy()}") + assert result.numpy()[0] == 5. + + @multidevice_test + def test_example_readme(self, device): + x = Tensor.eye(3, device=device, requires_grad=True) + y = Tensor([[2.0,0,-2.0]], device=device, requires_grad=True) + z = y.matmul(x).sum() + z.backward() + + x.grad.numpy() # dz/dx + y.grad.numpy() # dz/dy + + assert x.grad.device == device + assert y.grad.device == device + + @multidevice_test + def test_example_matmul(self, device): + try: + Device[device] + except Exception: + print(f"WARNING: {device} test isn't running") + return + + x = Tensor.eye(64, device=device, requires_grad=True) + y = Tensor.eye(64, device=device, requires_grad=True) + z = y.matmul(x).sum() + z.backward() + + x.grad.numpy() # dz/dx + y.grad.numpy() # dz/dy + + assert x.grad.device == device + assert y.grad.device == device + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/extra/test_export_model.py b/tinygrad_repo/test/extra/test_export_model.py new file mode 100644 index 0000000..675e46d --- /dev/null +++ b/tinygrad_repo/test/extra/test_export_model.py @@ -0,0 +1,50 @@ +import unittest +from extra.export_model import export_model, EXPORT_SUPPORTED_DEVICE +from tinygrad.tensor import Tensor, Device +import json + +class MockMultiInputModel: + def forward(self, x1, x2, x3): + return x1 + x2 + x3 + +class MockMultiOutputModel: + def __call__(self, x1): + return x1 + 2.0, x1.pad(((0, 0), (0, 1))) + 1.0 + +# TODO: move compile_efficientnet tests here +@unittest.skipUnless(Device.DEFAULT in EXPORT_SUPPORTED_DEVICE, f"Model export is not supported on {Device.DEFAULT}") +class TextModelExport(unittest.TestCase): + def test_multi_input_model_export(self): + model = MockMultiInputModel() + inputs = [Tensor.rand(2,2), Tensor.rand(2,2), Tensor.rand(2,2)] + prg, inp_sizes, _, _ = export_model(model, "", *inputs) + prg = json.loads(prg) + + assert len(inputs) == len(prg["inputs"]) == len(inp_sizes), f"Model and exported inputs don't match: mdl={len(inputs)}, prg={len(prg['inputs'])}, inp_sizes={len(inp_sizes)}" + + for i in range(len(inputs)): + assert f"input{i}" in inp_sizes, f"input{i} not captured in inp_sizes" + assert f"input{i}" in prg["buffers"], f"input{i} not captured in exported buffers" + + for i, exported_input in enumerate(prg["inputs"]): + assert inputs[i].dtype.name == exported_input["dtype"], f"Model and exported input dtype don't match: mdl={inputs[i].dtype.name}, prg={exported_input['dtype']}" + + def test_multi_output_model_export(self): + model = MockMultiOutputModel() + input = Tensor.rand(2,2) + outputs = model(input) + prg, _, out_sizes, _ = export_model(model, "", input) + prg = json.loads(prg) + + assert len(outputs) == len(prg["outputs"]) == len(out_sizes), f"Model and exported outputs don't match: mdl={len(outputs)}, prg={len(prg['outputs'])}, inp_sizes={len(out_sizes)}" + + for i in range(len(outputs)): + assert f"output{i}" in out_sizes, f"output{i} not captured in out_sizes" + assert f"output{i}" in prg["buffers"], f"output{i} not captured in exported buffers" + + for i, exported_output in enumerate(prg["outputs"]): + assert outputs[i].dtype.name == exported_output["dtype"], f"Model and exported output dtype don't match: mdl={outputs[i].dtype.name}, prg={exported_output['dtype']}" + + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/extra/test_extra_helpers.py b/tinygrad_repo/test/extra/test_extra_helpers.py new file mode 100644 index 0000000..6832b97 --- /dev/null +++ b/tinygrad_repo/test/extra/test_extra_helpers.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import os, cloudpickle, tempfile, unittest, subprocess +from extra.helpers import enable_early_exec, cross_process, _CloudpickleFunctionWrapper + +def normalize_line_endings(s): return s.replace(b'\r\n', b'\n') + +class TestEarlyExec(unittest.TestCase): + def setUp(self) -> None: + self.early_exec = enable_early_exec() + + def early_exec_py_file(self, file_content, exec_args): + with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as temp: + temp.write(file_content) + temp_path = temp.name + try: + output = self.early_exec((["python3", temp_path] + exec_args, None)) + return output + finally: + os.remove(temp_path) + + def test_enable_early_exec(self): + output = self.early_exec_py_file(b'print("Hello, world!")', []) + self.assertEqual(b"Hello, world!\n", normalize_line_endings(output)) + + def test_enable_early_exec_with_arg(self): + output = self.early_exec_py_file(b'import sys\nprint("Hello, " + sys.argv[1] + "!")', ["world"]) + self.assertEqual(b"Hello, world!\n", normalize_line_endings(output)) + + def test_enable_early_exec_process_exception(self): + with self.assertRaises(subprocess.CalledProcessError): + self.early_exec_py_file(b'raise Exception("Test exception")', []) + + def test_enable_early_exec_type_exception(self): + with self.assertRaises(TypeError): + self.early_exec((["python3"], "print('Hello, world!')")) + +class TestCrossProcess(unittest.TestCase): + + def test_cross_process(self): + def _iterate(): + for i in range(10): yield i + results = list(cross_process(_iterate)) + self.assertEqual(list(range(10)), results) + + def test_cross_process_exception(self): + def _iterate(): + for i in range(10): + if i == 5: raise ValueError("Test exception") + yield i + with self.assertRaises(ValueError): list(cross_process(_iterate)) + + def test_CloudpickleFunctionWrapper(self): + def add(x, y): return x + y + self.assertEqual(7, cloudpickle.loads(cloudpickle.dumps(_CloudpickleFunctionWrapper(add)))(3, 4)) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/extra/test_lr_scheduler.py b/tinygrad_repo/test/extra/test_lr_scheduler.py new file mode 100644 index 0000000..9aa9b86 --- /dev/null +++ b/tinygrad_repo/test/extra/test_lr_scheduler.py @@ -0,0 +1,107 @@ +import numpy as np +import torch +import unittest +from tinygrad.tensor import Tensor +from tinygrad.nn.state import get_parameters +from tinygrad.nn.optim import Adam +from extra.lr_scheduler import MultiStepLR, ReduceLROnPlateau, CosineAnnealingLR, OneCycleLR +from extra.training import train, evaluate +from extra.datasets import fetch_mnist +import pytest + +pytestmark = [pytest.mark.exclude_cuda, pytest.mark.exclude_gpu] + +np.random.seed(1337) +Tensor.manual_seed(1337) + +X_train, Y_train, X_test, Y_test = fetch_mnist() + +class TinyBobNet: + def __init__(self): + self.l1 = Tensor.scaled_uniform(784, 128) + self.l2 = Tensor.scaled_uniform(128, 10) + + def parameters(self): + return get_parameters(self) + + def forward(self, x): + return x.dot(self.l1).relu().dot(self.l2).log_softmax() + +def lr_scheduler_training(sched_fn=None, args=None): + model = TinyBobNet() + optim = Adam(model.parameters(), lr=0.01) + if sched_fn is not None: sched = sched_fn(optim, **args) + for _ in range(25): + train(model, X_train, Y_train, optim, 100) + if sched_fn is not None: + if isinstance(sched, ReduceLROnPlateau): + sched.step(evaluate(model, X_test, Y_test)) + else: + sched.step() + return evaluate(model, X_test, Y_test) + +def current_lr(optim): return optim.param_groups[0]['lr'] if hasattr(optim, 'param_groups') else optim.lr +def get_lrs(optim, sched, epochs, steps=1, accs=None): + lr = current_lr(optim) + if not isinstance(lr, float): lr = lr.numpy()[0] + lrs = [lr] + for e in range(epochs): + for _ in range(steps): + optim.step() + sched.step() if accs is None else sched.step(accs[e]) + lr = current_lr(optim) + if not isinstance(lr, float): lr = lr.numpy()[0] + lrs.append(lr) + return lrs + +class TestLrScheduler(unittest.TestCase): + def _test_lr_scheduler(self, tinygrad_sched, torch_sched, epochs, opts, atol, rtol): + accs = opts.pop('accs', None) + tinygrad_optim, torch_optim = Adam([], lr=0.01), torch.optim.Adam([torch.tensor([0.], requires_grad=True)], lr=0.01) + tinygrad_sched, torch_sched = tinygrad_sched(tinygrad_optim, **opts), torch_sched(torch_optim, **opts) + + tinygrad_lrs = get_lrs(tinygrad_optim, tinygrad_sched, epochs, accs=accs) + torch_lrs = get_lrs(torch_optim, torch_sched, epochs, accs=accs) + + np.testing.assert_allclose(tinygrad_lrs, torch_lrs, atol=atol, rtol=rtol) + + def _test_multisteplr(self, epochs, opts, atol, rtol): + self._test_lr_scheduler(MultiStepLR, torch.optim.lr_scheduler.MultiStepLR, epochs, opts, atol, rtol) + def _test_reducelronplateau(self, epochs, opts, atol, rtol): + opts['accs'] = np.random.randn(epochs) + self._test_lr_scheduler(ReduceLROnPlateau, torch.optim.lr_scheduler.ReduceLROnPlateau, epochs, opts, atol, rtol) + def _test_cosineannealinglr(self, epochs, opts, atol, rtol): + opts['T_max'] = epochs + self._test_lr_scheduler(CosineAnnealingLR, torch.optim.lr_scheduler.CosineAnnealingLR, epochs, opts, atol, rtol) + def _test_onecyclelr(self, epochs, opts, atol, rtol): + opts['total_steps'] = epochs + self._test_lr_scheduler(OneCycleLR, torch.optim.lr_scheduler.OneCycleLR, epochs, opts, atol, rtol) + + def test_multisteplr(self): self._test_multisteplr(10, {'milestones': [1, 2, 7]}, 1e-6, 1e-6) + def test_multisteplr_gamma(self): self._test_multisteplr(10, {'milestones': [1, 2, 7], 'gamma': 0.1337}, 1e-6, 1e-6) + + def test_reducelronplateau(self): self._test_reducelronplateau(100, {}, 1e-6, 1e-6) + def test_reducelronplateau_max(self): self._test_reducelronplateau(100, {'mode': 'max'}, 1e-6, 1e-6) + def test_reducelronplateau_factor(self): self._test_reducelronplateau(100, {'factor': 0.1337}, 1e-6, 1e-6) + def test_reducelronplateau_patience(self): self._test_reducelronplateau(100, {'patience': 3}, 1e-6, 1e-6) + def test_reducelronplateau_threshold(self): self._test_reducelronplateau(100, {'threshold': 1e-6}, 1e-6, 1e-6) + def test_reducelronplateau_threshold_mode(self): self._test_reducelronplateau(100, {'threshold_mode': 'abs'}, 1e-6, 1e-6) + + def test_cosineannealinglr(self): self._test_cosineannealinglr(100, {}, 1e-6, 1e-6) + def test_cosineannealinglr_eta_min(self): self._test_cosineannealinglr(100, {'eta_min': 0.001}, 1e-6, 1e-6) + + def test_onecyclelr(self): self._test_onecyclelr(1000, {'pct_start': 0.3, 'anneal_strategy': 'linear', + 'cycle_momentum': False, 'div_factor': 25.0, + 'final_div_factor': 10000.0, 'max_lr':1e-5}, 1e-6, 1e-6) + @unittest.skip("slow") + def test_training(self): + without = lr_scheduler_training() + sched_fns = [MultiStepLR, ReduceLROnPlateau, CosineAnnealingLR, OneCycleLR] + argss = [{'milestones': [5, 7, 10, 15], 'gamma': 0.5}, {'factor': 0.5, 'patience': 2}, {'T_max': 25, 'eta_min': 0.001}, + {'pct_start': 0.3, 'anneal_strategy': 'linear', 'cycle_momentum': False, 'div_factor': 25.0, 'final_div_factor': 10000.0, 'max_lr':1e-5, 'total_steps': 25}] + for sched_fn, args in zip(sched_fns, argss): + with_sched = lr_scheduler_training(sched_fn, args) + assert with_sched > without + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/extra/test_utils.py b/tinygrad_repo/test/extra/test_utils.py new file mode 100644 index 0000000..2c0b831 --- /dev/null +++ b/tinygrad_repo/test/extra/test_utils.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +import io, unittest +import os +import tempfile +from unittest.mock import patch, MagicMock + +import torch +import numpy as np +from tinygrad.helpers import CI +from extra.utils import fetch, temp, download_file +from tinygrad.nn.state import torch_load +from PIL import Image + +@unittest.skipIf(CI, "no internet tests in CI") +class TestFetch(unittest.TestCase): + def test_fetch_bad_http(self): + self.assertRaises(AssertionError, fetch, 'http://httpstat.us/500') + self.assertRaises(AssertionError, fetch, 'http://httpstat.us/404') + self.assertRaises(AssertionError, fetch, 'http://httpstat.us/400') + + def test_fetch_small(self): + assert(len(fetch('https://google.com'))>0) + + def test_fetch_img(self): + img = fetch("https://media.istockphoto.com/photos/hen-picture-id831791190") + pimg = Image.open(io.BytesIO(img)) + assert pimg.size == (705, 1024) + +class TestFetchRelative(unittest.TestCase): + def setUp(self): + self.working_dir = os.getcwd() + self.tempdir = tempfile.TemporaryDirectory() + os.chdir(self.tempdir.name) + with open('test_file.txt', 'x') as f: + f.write("12345") + + def tearDown(self): + os.chdir(self.working_dir) + self.tempdir.cleanup() + + #test ./ + def test_fetch_relative_dotslash(self): + self.assertEqual(b'12345', fetch("./test_file.txt")) + + #test ../ + def test_fetch_relative_dotdotslash(self): + os.mkdir('test_file_path') + os.chdir('test_file_path') + self.assertEqual(b'12345', fetch("../test_file.txt")) + +class TestDownloadFile(unittest.TestCase): + def setUp(self): + from pathlib import Path + self.test_file = Path(temp("test_download_file/test_file.txt")) + + def tearDown(self): + os.remove(self.test_file) + os.removedirs(self.test_file.parent) + + @patch('requests.get') + def test_download_file_with_mkdir(self, mock_requests): + mock_response = MagicMock() + mock_response.iter_content.return_value = [b'1234', b'5678'] + mock_response.status_code = 200 + mock_response.headers = {'content-length': '8'} + mock_requests.return_value = mock_response + self.assertFalse(self.test_file.parent.exists()) + download_file("https://www.mock.com/fake.txt", self.test_file, skip_if_exists=False) + self.assertTrue(self.test_file.parent.exists()) + self.assertTrue(self.test_file.is_file()) + self.assertEqual('12345678', self.test_file.read_text()) + +class TestUtils(unittest.TestCase): + def test_fake_torch_load_zipped(self): self._test_fake_torch_load_zipped() + def test_fake_torch_load_zipped_float16(self): self._test_fake_torch_load_zipped(isfloat16=True) + def _test_fake_torch_load_zipped(self, isfloat16=False): + class LayerWithOffset(torch.nn.Module): + def __init__(self): + super(LayerWithOffset, self).__init__() + d = torch.randn(16) + self.param1 = torch.nn.Parameter( + d.as_strided([2, 2], [1, 2], storage_offset=5) + ) + self.param2 = torch.nn.Parameter( + d.as_strided([2, 2], [1, 2], storage_offset=4) + ) + + model = torch.nn.Sequential( + torch.nn.Linear(4, 8), + torch.nn.Linear(8, 3), + LayerWithOffset() + ) + if isfloat16: model = model.half() + + path = temp(f"test_load_{isfloat16}.pt") + torch.save(model.state_dict(), path) + model2 = torch_load(path) + + for name, a in model.state_dict().items(): + b = model2[name] + a, b = a.numpy(), b.numpy() + assert a.shape == b.shape + assert a.dtype == b.dtype + assert np.array_equal(a, b) +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/helpers.py b/tinygrad_repo/test/helpers.py new file mode 100644 index 0000000..a7e42fa --- /dev/null +++ b/tinygrad_repo/test/helpers.py @@ -0,0 +1,15 @@ +from tinygrad.ops import LazyOp, LoadOps +from tinygrad.nn.state import get_parameters + +# for speed +def derandomize(x): + if isinstance(x, LazyOp): + new_op = LoadOps.EMPTY if x.op == LoadOps.RAND else x.op + return LazyOp(new_op, tuple([derandomize(s) for s in x.src]), x.arg) + x.op = derandomize(x.op) + return x + +def derandomize_model(model): + for p in get_parameters(model): + p.lazydata = derandomize(p.lazydata) + p.realize() diff --git a/tinygrad_repo/test/models/efficientnet/Chicken.jpg b/tinygrad_repo/test/models/efficientnet/Chicken.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9dd4815dbaca705f6fab98300246667f1da0324e GIT binary patch literal 110248 zcmb5UXH-+&6FwR`gx-6TAcQJK2+}(V0wMG&Rir0$1e7XD4IvciB{T&DUb-MnT9A%( zkls58qNvy3|K7Fk{d#Agb3|wNXbY^NXSTuiAgC)$;kgFB%~BT3Uc88CjT?? zKLh{M`uC9k)A~P4{(rsxb_3|i2~r3xi3sQcg!BYN^aOuD19$;|e-8a0?*RX|kdl#* z6A@Do0to<={}$-d0SJixxk^k#LiX=VL`X(JNJI=Ep(kbFCzE1SH+CeyhhmbBN-Jg- z(5Ua3JflFYvpAiDWc*~KF`8N?Kvp)v`z8Mj@elwANdCV-0OEi9CnX~QkW&Ev{a2;` ze}Mj-!9PS|A^-v49}+!334;`=I^#WKM=~ZAr7@+6IgU1nxPDU3{hZO4K++pjFbCp^jjpF zuF%&5*?gGA7ay#d&as?c-$0`V@-K*RZ@EaY{^shg=XFAyL@1TSHP=FAMSjzMYZ7KX z@YQS0n~mp1;{jFPig%xT8v#YAyOJAU=p4q|>z>wN3Y`rMHlY(nkYJ(!4bPUd2laU? zLLq|eo1J`hLB<4PP(P#}?aCg-)_^V^vc^DI&iv zs?1=^xZ?pd?hZj<-Uo;49z$)iYa+|vc)ridx1T0l;u%_D&*B`=y6CMPw9SHi=f)oN z#?%yMZG3rb&=x)aSosEG4SXxbckC6l1qC3uHuqXVNTo{o-El!E7^V){&AGI5IY-<7 z&J;fWg4rG0P*Xw{eBD8d2k5&27WZmF@(*Htdq3s9F#SQ)9wcp)EI-2N%?*F z(K_G18s{X7#c@~h*dh>pb^q}ZzXuu-E#+M7zV>x`Bksom@Fo|igD@S^y2KBuArL!! zLV(zt%Q8zpll!`Y{Zr2g$F`#$^N;ERDw&oUDkiXnxhk4XvO5cMa}dkOWCtbmw4+*v z!{#T^s3Jnb?9yC74dp_FfvAJP;ed!-RjYZj@*0H3?AhF2pw#k^h5tbK`C^f0&jRwp zPnYgQk^fL{1h>Hbj_%*ax%-Wq?&lI#*Wo$wiGGA<6wgY3no_uH1EUt)--%>a3lWD> z1>=rSvNqCsyN~YG;LpCECi&Yg1@7=Zmi*kW>aW1QDL|T5-#Pc@6XCqvcNMctOmQ9$ zol(p{S?M5sx0D~I|h+rJjaRwr#J$1kwo|L9qKi3zW3hdA}XNr6-W z852DY@Jm1-c!xkwhM*9eLDYR4p*rS1PEzc5z3s!cUK1q(9#`P53L*%wO%g|E@M!cH zIj6zwSDeI}Y!jlA`wj=Q%0l^9^E1gdnspee3ARvnWbIceV@xKq7wY`NgDv?1((6K> z<=Yd2IBPEB;!=7u*2@@?eoc4sQ)_Sn=xM^4+q?BvdFxsTqKP|KZ?_2P3@Uq+@YYeSPRDJ1)2BmQDpxb&BSTYQZ+B*bxu`QBLKJ1AejI-=*>^+Loot zGE@hpe8VTE*FP5U7AAbc&MV2amP2Un)T2xx-|J$(4337g(2g7%$SDbMAaw)bO${x} zuYu<0V+M>&=HA`vFdvH=8R}ufI+NZg?!SP1dCR5t;X29QhRkPcS}-0sRl*>vBI;pAVBDhk{Pk1>|9#hL$*_XQ7<>uWTVOPR?Cvoh~ycJQyw!Pw$R8r z)0bU4e1{Rip}sNU41o4^xJe5G4>}5*>#|MqI*Um+!Ii7SBhc=MkjiM%S}jC53EF zvvFf%Uzi@8D-UADiMMA&B0w1CU+@`B_8WjLSyK@GaUQNts&E6 zZ{%Y$c9Tc*ZYPgsfHnAuC}&FJzM?5vHZJ!f@7aKP!jhMutGbBMG^CbgXXKTtE5jvp zzM6kLyu97Ufza{z3hvu$J>2I|C?Pfaz-A9Se;VCyrUE*=5{ z5|6=_-mpuq;D_^3H0}F=XMP*xaO z2DyPyqr&z-z@n4p($#6{1xESE=3&7(IZ?_t%MVJ(nAZ*^+PAiXtM#z_QtUIdgJS%TN2_23Y*vIJXN`hGx{DO&u&6En z)x5T*UYPV0CjB}#TGvmh%bH*-%b8**-_2D@@(AJ%*n?e-%gSsn9MG+icbfyhTKQ`X|O#Uw*ea{@E zz&AXefGdRnP^N8yD&ikvM-rp(;?2kAUo+_Z?{sROdyOiU+;eZHx3-3An6l_{t$oGF z%MEQ;-!393jLoGIXhOB8Y}b1)V)HI9!;DQ&DQE&`wk+SkiJcWLHX6kruGR7LuIS5i z#d$A7-#-*RVKUIGud^?3H>kuBs%`!JV|q16(R!Z2&JP7%t9&vevgkq7yVz-U*ystW zrNXMrqc?lg%jJhfb%WNMM1}+k!c5)Eg99#~vf9qQapP}+Y1l&WA*X*(qh9OlYU37e zmVWT(&=0c9t|DOHNQsOrVW}w%HA}#a0NR~~U6+eWlDzrhx+A$s z_h#Lxp+bvY#85+8%J>2>==6rCaiP^Uy&kgsP`~+5n%&YQaYtFs@emgn(O|*ZXe1?dj z`{Z54{m1^wFse^$qC!qD8gb3>_BWO`DLZ``m9*RT`ZnHLNL-w+0C)XjI|Fr>^h%nvF~Q#%%E7O8e>%=|6RQuc-=SDae3%={<33|H;4h2406Nyt1PfC?DrkRsvBjK+|W zam_rgXb%$wHcB&N*!gX**^78tjw-Sm_n4G`iNfsN$4mS60SzuJc6CkbYU?QcJs;N9%{s-er)<< z%ue24Ky(Kd)thR{oRQYDJ3f`kp#b&N4U9vvpo`>74^(0MGAf^KLpILTm0lD`ZFy$4tlO_jx*qdG0q6$BgiGImTfnw+*p6o=qI7%Luo2>i z){(%r5Ad1Lcte*~lcH_>;xlmXs>BK2Cq?ELkud%+K30t8_3=t5wgqw!Xn&>(y)M4- z+61fj#nmXxh;LO_M*Bfz%Nvf@`4aC%@bd|7`9QcruXkc9Sx}-i+tMeg&6kEuqI}oi znWpWWfG_l&?nHivlG_dUnq`E#T$l2@Wp1`}5A5vy2NnO!l*AC;c3CX)T7hy@!I0Rn zPtPCw!S{<%8REOuN9JDhj~|MC%Ct(^cDa9dDty$UmK57d0q z;&SIB+zr0A<_%Q4{5DoAg%8>)=@H=x>>ub63W1E@D}DElHXvRvRMeh%-S813hCzYr zrd;;t+*>h5n#@gbmNythp_qbNY!;^tbr3`j?^WaoBO1_YDat5|5H>YqGil3}mr8^w zI;M3>ds%XOL8_y1ESE5$6Cdt2W->Ub#@pAN{O)IML(3U%0%Z92%wF|_t^#!Z2{F_v zlg7OARmYmzY|R?`!*!-|bG{LwF!;s;MUJm*8zi^zKnA&6>pAL2O`kBqX!T{b~Z2v9J=d`sP@N^jiiIe1wv>L8PHCVwa8=szHV)tE`ytUFVhZ^pSPT ziv18rNb3(>rlBj>oaY~OZqBr)Ee5&q5numAq^Af1TP?l7-DRSD+mP;`z>B~md5z2a z{QK0&_%T$H1=iJaaZ00Oh2+&$xjL3e?sw#I+^pzYJmVI3RfFC(rkh=e6lx zBw(J^#+XKsSgHt;9`EVnQ|vRl`*l7xRSf7o*6!=h=0Wo6~{%N=%1U!r`Nqg#44oRTssy)PBYQBC@jO++4YOAZB6YMswaa9`)(zhIT~-We1XeFaa2Ef9~rY6 z&CA+WvL2t%HKa!s_}|X)@|YN~;NLu#jb&`a-JbW$JF({VgvGs91!D*?r~*IHp3lr1 zeM468TT6aN;qiRQ&t0O$6_}WZvPSG#FPv&2n=v|4-vYj5nem$TnR*smf_vk$FRO%) zn|q`;Lab|)4&=^b9sI)Ea0)n#8h+DzFMv99>f(a9R2D ztwcxiQ#?|7*Lhd7F0n$h-U(|GGo~M?4m70odug3UbK&>){!o{}7gJXT9AWL2Gc$$R z!;bzyDeeyG*3ssZowFA=O>T>wKEIlWf`;=ch&WpkLoCYFH#65=@)%87~d zd|yJ<*4t3#9o^Oi;BEd(5$c)e?8ets2Cjfhy&we>SrZ15TiD@j0+8I_VFgpLK$zoC zl!1BWUA|6Ls;9WYlo=E=wAr$0@8x0mygYLr9A=*`971MIoT4t!`CR1&&dm5N7DfBG zScFnv7=A>~ws0U?`?7>&+J?9)UiMc3=NobaHq2p;W62e`8+{lZb<*y7!-&D8jT1ZX z>-$%~@z$=3CdFXS{r#BCFeG6iMj@l^i5wD|n2FfBB6ar7Rcy&2j?;&EZ%;eLR)$@L zej%-Kv)~W*u~`4F>S>eV-ostp5<@B-z0^h~0*n!pGgQ#@Jvo8pKs6>$2&HCJald2l zqJ+w^Ld1`|sCoCxh8p*PQk7#X-h^vaJl0`ZmY?jVh_@mmgf7#WRl=7=^G4CbtTj$y zT=_3x+~Y!DU$0f&MD~JbxxrLjxvG?VoT8%ND{eiJ(qLViuTbRKgR^#llk)kb8J81; zM#MSHllEN*ns7QJz`Z!KC@tssjZ&GkzAQsyZchB`#Ui8GywqHww}P+R-=1SRVE{;( z4GYCDLetI~7!1~ot)w!E+!!g^>0ZzS_p`zmgXzKu^y|PW*X<8txW6`j8;QNy+Q<$3 zDAerK@BZZ8$2uWlxIlsDXqU@O%yhf8f$MUWmAguL#~}fB*vG5EGUaJe$PjdVdz5OQ zbcwh;t=pVu6>(2K3tpl_&@bzy!_>siLJ|6Y`5FI4O>EeNFX*fq9Stz`e=o1=1c_~V zD4u`2Se*99-^|Wl-^wPyJO&dlThP#^m9mJqGfVmlXicbT`+QLo9=jAXGnXxln5|RH zuRs>(Z4Tjgy0cjdss;l}KSx0M&J_QZ93H1!*)RDpdW_G!TGo)$XCEIKRaww(_*f?} zAE9H|({`1FGJI2=R#!#VbQjnF%ghiyAEOR2TJU#-(;N)>yeDbA)C-Htit*3*xq@E+ zMOF|}Y^G`I?JV{yvHVD+n1OKEh$x@u(C9okLu7BgpOw3Qd#*8g;jA#AP4i=@-l*wE zLq*Pc>v5HbMr6+=?)7=Vt-M*osjT2imC&iP>TVN6$BUI{v}UhfiVrd`x^8(C!rbeP zHxbjG`#`S*D(q9O@w>rq<}SFBh)^s7t}aIdMRZcN6Ghkum~_$1>jjL4L4NgOX&LE* zUi9|*_kW0*jpCTFxP(3VZ9k>0vr_C5Of^6#RYC^pOPQA|_R`7SN{EPO_3IicaDFRr z^q06>^>LLD1yASuvgqNk(;*GkM2)G%w5E{LCD}VT&3+sEu#cI`++u{gWti`m^ko!c z(!r8%C2U)CcHI%|tp`{Bh;V5M$VkvE^FE8jgsyR!NI~ao$w*Y63Tk)o36b&b*56eA z#Bjz9bctBq))tG51=Dss1o$@uYIZ3>?Wv6!%n*IuXMA{w^OuzvyV z?;qt`xvzu1sB2BzsU?_W(Te^@@6~-7)_dhW@RjYxTeD9&(vxkErLKRL?KHIW zH({~2w?QgbE;GMgg&LcUx06iIY5kdC)1$cj&h;0t(N;dmOJ`?sP#_^z%Y#MOy~a=M zgFq3U4iiOGNunfdt#yxueZd}E+{3DIcFs*0kymQ=-%#2f1G-$iu)r|afl&BVRy0mr zi`QqMM5K!upBvQ-q`#Ea?q5A0CoC7V+fsZdIu^o4 zVSrv;*ZUnwQcS6l$~Nxfu6T^ecUG*HsSz7d7kQSxC7Ob!!)?foXi(+iY|=J(DQ_%Q z>;qo4=?_mRy*c+SCeA+j<}xMh0kN)%^3& z^0(qOfEQdj219rTcTIoSA7ZXT&(uZ66sLLw$z7I?ia%I8c5h%ZQ)n|GvMm4|_;JpS zh+rp66DV@3{^}1loei34Vcj3Ec zQOuZ!j=b%~yNsJ>e6^)l{~1C09!w#kE@EPgmfGLx`5aX!We-3oU<`#~80t`My!cB~ z$mWQCp~zJFUw~{DByOb@pW&oo&VV_)6D_*752%Rg#nR=Zp=}YLGB&*N*K2+xbM+N> zwzS#12|H(4TaB+TdNY>*gk`kh2qrX{>=fKS5|(mf8ED;oqmKpbNKYbP$#2KNvx zwM=D)RWqo|D_GW?9QP>S1owlh#~6#g#ah43#c-~q@{cmSY3)n_7NlHYhS`P4*oit7 zoINGl=9^}c%qm-@G02e1osV@l22bQE5`ECvVj$r7V(4M-7onSsS7$rdXPIxVhBk8T zTQcr{z+*DRyvhHV&cxS04zWSTA4!fdPNcUe)?XEYuvw1soL*)zEhFgcn$zP{Ym?+x zOru1!6cN?;g{2RoXINWe3Zyh1>0i{qsY@wy^Fu!wrGmWRBE#2r2gS8tICLA@_v&ar zr_XXbA{{AU3n(=q&@TQxVaX=HWuiA;EVVY1`}anH+VPM8VGgakV3^}$8+mv(hzR61 z@$a_&D6pZC3s-K?WviTU&tiZAWT}0eMxTE{S|OtD`}A(Wt_9p zugw?K-MEPQL$rKT4oB_7d97k=W6e)n*#X%^hw8wzhPtkifc$fSfQl9u+*2~7bbqqzf39|l$bvd$Mll3Wfl z-HG|f)x=i01Q9^pYs&^_^t!Ld)T_)*D5;mrgEQ)xKdeM&5wKtKmIa;#Q7+-mxrAE}iS1gCxh(zb(&&bA5PpU2m^h$rm-q zeQB8BjCY+X7-4q=E?k0We*c<^1KTc>WTpOBCPczV^?bUf{MmNcXXVTh6ate+RIm)Z z`Pa?+xRH)uc73(HuD6U1<1ofOqfim7C7fti^PZgrN)1lS8YuCR^xj$VKkSd9sk^x2 zEB!5^j-7ASGs7Qxx_BGCg6yP;s>y+?czuQ$KiU^knbl!u8afj) zsn)=fbks#LH@Net1D4j}7sMYz*7e*AKJQX{HQawMMHCw#!?>!MYroQeCU(U4razS-b9eFOhC|0a<65qdP2Df?uASbXHes z#w6qg7!h8#nb6Hxx|EGIHXB)+YVvI>_x|_O*yxoLNiT;pDkhfJsm|!Bzq?5a*out( z_^L$s>efzJLWKJZ?Y-sqxj1L=YjKxKF6Vvg%rw#g2QXeu;W4S*V%&R>s8Z6`Ot}I= zZO^*}`#g-Uf>0#IsALd>4VaIF;Qmv>gjrRUue_#Yf(G3eaa${kI(qhf1x96N@AX!k z=xZ~I&ZUltuhuMVjBR@A$2 z`H!QO1f#}L`d;y|@n(&!R-X5}583gCt4Q+8+DA6HPj-D=lHv;k-$`U;@~;1wtqZw; zFwd(`tT;o~4R{9=_E1X2%evb(N&8o2jUU`pR!`l^Cts|r7#8(B#|8F{y!+7@ z2<_+UV1u<-6})k5MUZY2r-5VSyY`qH`;{#1=i%WGb=1uPUbGcV&bD4=PaP;55(<*RFl2yxBmP^(aH%z=d9w`Lmvs{Jp#hNH``FoUf&m}fSoD7l zF63!kUlx9`;8D!zrnn4(eT*et4)j>e$lIEBthfX{#C_PCu+bYE(`|ah?v($UyAA02 z03%n~G+4=W-0{AF@`9exYrTk^Cq|`ljkiP(NpoX^B7hAqs(kwXbp*Yo_T`3wdED1{ z5xvIPCNT41pK%pIJ|B9;#o;cF%Padm8joXs-v?LzUDd`)DIr~Yl=;2sp{RKJ<#QJ} zLT;BbO-tQiOI*(#DvSu(P0&iIGij`+%X~C^N~jdim@;@)WY=5Y{e`O5DX`WdD=3h{ z&;+BBVK8UYI{$tkb4ez)zNwhMgn`SF(wE3)Z{WM9{|-Sd6%f zw%4as%n5V-w+hk>Mo#2fhjyC2d{rpP%5qL54 z(z*|U&Zx-{8cBKih~TV4F}Ci4H9nA7K@UvR9?PS0pzU{-n~iDS!A#Cg%aC>jOjIUVLUy z+}*m>g^EVrDg%pPDWy4s`pJWk+Pam{=k8Mrfc(;Be|<4~8!tTq&28dBsmCe|9i4g+ zE!Jzv^QS2@mGhcem=RMg{pwaja)A^P54xZaBJba^+emyM^6-x~s-d=`5JIVs*^R($ zII%xD6hk+K?oR&#m>Ai7{i6N#HrumPc|A<^^z0P|^O@Yo`6)qJitw^2HA)22V$`5l zP|;Rd5JJgoUE*nLQ&)YRMrXegAAi&s%riG^#k);#?*+Ra*rE_0T4`X2&c&4MX(w`Hthn}ectC2?m-g@&jrOoQYH}CnT*+`;a0E@yZb1&;{ zb-sI(wGvHpx8a7hxldh;lBOf!Ukhg9w;!&*Jgq(vAwnw)zw?kndSivh2=M=zhfrt{ zCs#sYGW!<5r{3+D6dOP@8r#ZbY8EM_D{H&$XGJho9eAj;5dNM-&skdHGJcsyyxk>Ch~%zQe`gk60rE4Znh{ewdSyH#N#*Dx#BRj> zj#U0*5b&tIChm>JExp~dB*OXvg_;=x{5z#(%X`8mT+Z7zdVEnZ!Xb;Kix!KAWp1Bg zh=O?@ogDNo#r&dJXNxquc|So+3T=L9B#*}7O_Cjyjn1X$hj%5RG;MYTSI+|NCdBDb z2DyPHaRzdswhaK&DfAL=cjayN8+FmrS#)N{y9ObiT@MadbVuss%rQYUFCXp46_xdy zhgHrUiMOs!pvm%){mVisVIquw@^zC+BaJ+q9rT$?R$UwNm8PcD^uFCC3ll6Lzw%hb zS?haODbmoQm8-my=<*-R!v|KeN*Ubq<5@Q)58^X!3gdxMhSK{B>NYJ_H}pj`rVPvzJ5?LDPCgs$>mtz_mer9Jw#IdC03BdV zdd*{sdsb5l0PQNMyy_TP0!zHMd8T!i#SNzDeSQ3`Nom#*wafoF7Z)@Uw#((eh;F|n z=&?3hE#ck_ddVX??BFH>9!^_@=!xup@=RfcQrBHu4@Zw2;^BG72+tx)D`A4LnysZ= zwRHl4k;P07i-N<|k>I}hNL17SwL)VGp+nk8!k?9vAf3Lb9`1d$jS&ndva_xNm~yf( z2KZxol*}FN#TqQ?`8+xrUgrGeIYN`94r{MoCmPYnkcWP8@}f~O*9)s}xAJTRMn*zb za5r)423But=<(Eay{zYBcxg@QW1L1=0!^HsWaEnno8p>u6^F96>mF9jI(DEbE{Jnz z)P0!lST;uTI-zeiR#?pbY%YiWG<)98eTr}2_glb0);+Aes5{1VdX37+E*#dtM2hP8 zd>u4w7{17O4C4s=K|xwcTQdF^pjRVsa;9(`WB(VBctu&L)W7~HNFxkaLB@UgxXW-f z+QuWt+}Q~OSj+;b%z$#jqUtGjz9=$-2`Bk!0UD7*8`<6hA0j(9QcW$FS^e@#w=Xsl zZZ#BO5|ksexEhfW1)~G73Q<&@oksfArKlBu83__8%+7P>rE$UgqWeN-t-Vf}Fgaw1 ze2gK-A6+bHU0dn(=e$2fCanjuil3hfuk&#mcVvX%g;A1;dXMaA*Q_?h1|RLo?& zWL!ZC#r-Z}(HqMiht#TaH_YQg-gKhQ_V!(#n#<%84rj>1TbwF9HBo}mb$zLWPo40Y zg#d`=rS)bY>FTV#_pchkZ`}wDYwH1(bG{eY<^$?0Xxsgs5ObUZ?VqH)3)K*^E8Iup z7J^{UOx8d}eGSv>N+Za2FY2cW>R>R4VMuwRX5;#>MxiN_bF%>2Z>VXncY(u@0&6&C zcNJQ;M7tCGcW^xaXj2NZ7c36j1n`l>6>$H_)*J6gE3Rw(hC)3m8s$rwo zES$z%>qI-HxiUZ6or0nXPGTjd%at;~ZJ*c%LfKR+)?uJ@OdDrzc$xCXwu;Oi>Dt-2 z@-P2C{+h#n;(i6#<~hy9SLZ0IBij}hr5-D`-yXCGm-IXbUahFt?Y#v{u3-}q1o z5tA{GADSA#^2Z_VuQBFCPO7f;6}1f`uT5%ey3wS-6#oLU*%J540;5Tqr_}fGM*;g- zofsCac!ny-ZNp2W-X;m`bjQb4I+W?*?82n6(|bxj29wxh1->1euoECAP(T+>V*<0;P-0*Y z+v*00c3Rz&XXU3C+b+fXl-x5!jjJf7AQerH<kVtE2&T<=IbuCciO$0zV?cr->e^v92zY$Z9t@|3;_s?T}=F-&oSKbiNZ3Y!*(;P1Xd)rOD9Bnf{U(UUqHXgWNcL-^6ersZ>PH`++1o~TkBt|*1wwh z>l9UExiz2cM$pzRffgCFJ=(`RS|(_D`T!5dg#7B*y&Da7)cCBYU>}Fw435>%Wd}bz zDvNTc`<*W)=~TK7y1bY&S^5wcwID9lrb4F*gDc?I+y(fowDt0)dasKi+&vF>83>=% zF)4~Ta+Og)%SVzi6#Oq_WHDF_CdtHgV1MEi4-;)_w`dV1Qfd5iGBOmkI~Cik|9T}= zCLx@5GH~>SB`q6+AjFW;IxkNz^?EI3WPief&^R3ZH`%i8r7f|vLJj=DRn%|KxE8$H zv$d+%e*oK_x;%egT0dLf4v>nW6pPPY+<|T~1J7Rz3w$AfYCiT-W)EUjdr)*sn z*V8O)QcubfCVfuI*Ys8J#@#RAFR{L;#1b*roI#ZMtnh2fu0`Q8tj>HW+u zG@e-p926|H21CAAT+cg_3BuXQtM>)DL>&d{HamkIKQRix$qJbKe_Cy|HnH3g{RMc7 zPZX?&wnuhmlI&04B!ZmBx!?`GX4?c^@1*nB z8+%GN*hqkxAdB3ZxHzWZQ;J1#;3*D{30JVwu?WFWUnNVv6ZfI*u@DYmS_azbkGCM zw3bvn;xp+358S4XBnPAo1Uo#Ya03(kq9V|wcO^=Pew~jBxQC3 zqCW)FZHmIOJ}RX|R6eTAv2pILwUApTu4r4%b)CR&tDQ&I{+Yjg7ij(^ve?yq1@E*+ zPRlN?kHO~xc|U5zvyJ)xCTr5!zsg;;4FPi0R;aL}8D`luy9CA|E+KJX#)Bgy=~k>$xZB=B9HZUS%W20p8%T(>pV#2zv`U z4kQkoy<~qd6fkWDLo7-{{rjvqn{QyPyst|u16f%Co5%_)PorA(Zc&Ya~c^c|=kDRJ?C>3wx`+Hqt21Iai|Ba32elxvsU!%3|+9PV#Y0~*(%&U>AE%^LUalGwRwwB zaX4dG;d#)M*<$0^m;FJ57G zOE@OG*VSP!DFuYmmz_E{o`ago|F}F9gi7T1t3^PV9wU5MZSz}u-Jazpc%KS9N7SdK zkg4W#vlue_H8RNH+jCScH9Zs^C#Gl=8Dz90$+y+MtPa0^7_OcA7oe-Ksq!)-7Ra>qrcg{?s(eE zX$Wz{q&LsOfah-?(oaXv#pA5msHc$?$eJgg!%xXZfuyO-i$#})ODts>docj zSJ<`JaiWX}G|WUWzhZeZjq;)sq!qzp#Sy($7Wp=<`L~g(wSa@c4|)5vN>Fp;=4qqZ ze9}~(E~}ncvzpDD842-DJes!dqAV(jN`hrc(0A#>0kWoxxA032oCSY5`pTF?!VdtZ zN!p!y6<#&J)@Fsho1!pTcHrWD1hnd}$Ev+10Um5s3TISbZ{5xWd{t-wV>eSq%qq5J zGhSQ@eM_J+ZN2x_ki6Sk)hlB|xaJFvW^LLh8yk)yaEg^Q8BdHZAt=AWhFL0>Y5$yQ zJ1L$aos~%u9SfYC7$UMbZlAly2!%S1E{rqn5FW1jWY31u=u#-~{RJ!+T$QT|3vph& zrE{&>qHQ-+R;t=*-?Ml$Ff!9fNQjND{{}g=li;2s`Vd_@lWG0{p7H$LV2FU?1-NNe z$Fac-({22Z>yh7Ug+-T{ut?CJT!i198@wV6A6{2DYFUh;x*$JN>B^A)agvl$v?LfB zG|sLi3?(6*(c0cYZCnWnbMB-aebA`kF@h|b+td#r2_>5 z_)*jQ3L|Vn-;2M&#J3HTEoRDA#wK3j8!!c%P1>f$xupVg2zMzN~h!+ex%mY**QmSeD$ohxjj!IA)2cQ(Iq4Gp-@##k(cHxZM?e z^gvM5PmXmQvJ>t5^&_#M^a(c3jrcd}#}uf{9~twg3CozigjxngBK-(X`^hfiPFg8i z4r0%Wd=U8>eo-AUNk7AkYm59t9)C0r#AF%UmlcbOw>Jt0@1)(&xoLWEtd|yak^bNJ zh*{Zk%+h-V;g8|d!F<9|sb5hId?y`Anx=1rLm_-ZRS*`_(e}e~Xp%zIXz*=CnVnPEq;`_+nA0V;8%Q66dSwBAI5^ zvk@^;*u6FOjoK`rLpIuY->SE?)?%xm?&@q8kllR)ea3k31VF`h!D7Weo)a-$ueB*V zvc^aFIz4&c?xiGFI7I(AiRyCjIYTXggW0}*EipY#wdj$n+%;oxtCn@H2mzLUnP+hI zon*;*VI#dOftiAf>&n;4hO6TrM{j&|1i`#~-~prSs{1BVC8JGUF5E16yDQrUQe2;% zG;CouNXEQM8l`%Z0D_kL>r%!V>ijF1RP*s?WH$OWc`=3A{u&-OcH3kbYoBd;rj}vi zx zSJh&Mg>TMlyt(>EP=cu;72p|EpVUqNvw(`XZW!T_9$ls~*d;cYL9Nch@h)HCi#t;V z_wxsN>1S(>5qeGcjBoQipDJ(#*SwIIjRdxi@H{AY8u1>$%WH7_Yc> zu0DgNM>el(ee_RQtUG9~%oBtZ8P2%KZdH#i=fI&fE@qhz?G%<5AcrlQzx8 zJQ`vcj%PzNCg1>pls0@#!^SGVW=%xkdMaffHY_hsj&}2mU3wViO*wpQ>alj z4R(2P_gnGy6i_1W`a|vR$#4mS`w(4g4{4Ah7r3W&v+Jg`I08#;(qQ(w&JxEWKY+$! zFZ29wC!LJ6X_Z>;^wN*ATDR@Fuo(nN1y_5)$S4%|jBU$zB6Y11oo^Ml{qh~PO}(@F zhZ%pqGe7xlYh)iHTLA-#$Tjp68qUefR^o;nxSa20%cL-tKcGt*bqfx3k=@`jW;=>d zsuJ97q417;l0j1gt(<|X6W{Ri*D`2?IS9J=!VjG5gg1qL$LHt?{R{0@3|cd+D{t^w zA9;^uH+eQJ^p%-<0|obm1P4(mzfeDoRRkjIA3vgB1`FT3|I&QE@j+F+9}Mc9kaY3& z`CU@!sUoFf7k`C29j(pg&#KdY1bvgLve5yr=0bW-sY2}LS6aYdeOpvCOpDN{op&s{ zg{DQ_F!85TEp$okQ+_WwQp1iC6F41xsKusBcQB+8m1MH%0VYn*ESXCzjc;*Wp^*yZ zwqAwwazBix8cl<4yp0i^1|blpGY2BDSye#ltBu(=6rMcGv^62CvlM$PjoajuO68=% zpF=greW*i-R+Q|?*etp5=8;=24lkR`X5tl^dqFL_nvvnTI#S#AdOzok)4`^nymKZ?^U>g50Ej?we zvpbi{pcFYln1>@CK*n!R4T)BOH;J|+9N)j6VMz-_2b6v#TqK5%cFqLkevXrEg2e7} zVZaiB=`;}o4A+;+o-|y5eBz);a;q9++AGSdcWWCt!H+lu&d=@g(JdydgZ0a z$%gjSr@w&m6B@GDRs*zWXS5X_NA3P?;L3R&G743BExUr8Ap$68G^*u~FnvW=;B>W* z#Otepw07;R#uL3Ep>1XnytD)%8W*%o;^@us!Xi@rN!LoLmc&w=Pe~Qwf)>k)s1T5( z{|LC50L zuv?Q)6iV-WaHF*?C6g`vp#;4AAZ8w6vZ-`WzT&{-Xr#NIk7yscBqZ zMr323+(S%IP{atY)Kz99`+F4F&;0TEeJ{QDCLX)&0z^~grRmR%yhyi6zX_C4BE=$W zb;I2{B?mccqY5bP53&Mdj;i#x2jzo!g^ll$ExyABXqZ6tw~?l_Hn!QWQyi)MM|m{E z{Tiw!ZR=!wVyt>^=30oaypwtr1VHIAseKO_?-%`ElUp$Op>^i-seS?bxj2@;4uisk z&CX9jfY;ZQ;Bx-Y>OjGw+8;)<`OC^oVSUkWz2}F1RqPm5c)fR<-#zdl( z!8_gx6wn36$t|dQ>3pl!{8Yjqwe%_v<%R>nH8H zuf!c>Lf`^jWyn=Khoa-$vYulA-o8|`tB~luCtaawrk9iWfyS`ZDO&(+})S4F`KIWw>J(fZQJ0$nM zl{-mE%8(tjDB>@wxZl@vF@8Ub@dy5>1Jyh4`OX-`U?NEDaD8mlHb?O)I;HyOWE(Z% z8-QEh(;Nw<3YYi> zo*ABtWG{jR8-f)*2AB;Ru;v$vtLTvEQCpF)&U_#O=G$x&O3=Hv@23T5n&6yE{5S&7 zWJp`;y7=Hr`4O<~fTEvW{Z1knvK9A#0gOO%zwurU94~GtT0;r(kB0fdC%ty#mEZ-D z4Yi_31;mwSJ@PB5zsi_)*D#jSc7x@{e_HZn%WuM^)&j5qNj2o)d08_1C{KpsnKV;amx%FO z3mNP@>6gw<3R{b|WVAnTOtk^;9l=>xwW zwA16lel`Q1$O`K*?Wzs{djrJ{D{K?2+;Q0~?hXf(r6`~r;1lIY3;{kGPDw%%qqMz* zlx)w^mRJb6T(JVqSXT+GD{uq>$j@pkV?yj~9H^&l$PNNhjAM>_Q{D?mT-px{@w;L? zUFl0{B;$`WO_&-D(503)3o7j+JA5?f(-cc>*(tH(11bi6g{7pD+I#X1I6x_e$84xX zX-MjM3n&U1Ms*iqDJnISyc9asX z+|pYqCuv-5P`%QdQs}m&c%ZO8VU9Vc#jSKK^$rcV?xwVtO{3fX(JkfzO7MRSaFP7Y zKE?-G$}r7yWZSUWBE(@?I62Srq3Yo-;aGIT*?7A$*lEP1r#`tJo^=ZNK?D_dp&nhq zR~=7bT*b(70Oymz^{B{cF8qWJsNg}jD!$*egT6AH@IsHB9w@fn;|njal!cHp$25o2y~<{tgO2LQX`2hJGS))4 z0MxuOZD*NSs11eF4mF=^dr*QxeL=3UsWYBXCZ@BfxU__P!#~QpE>03Avz0CC#+H>O zF9bGR!93@^S2ajU1OT9_8GNcMth#%3acbJgBRp}F^{B{bN$r#anlEF*u9x1PU1LjW zdq(1}IR12lqQ7vm5XdfU3i@BP&G42yPZ3G_*0v7QaIca;ZNgzhlNm@KGW|L8_NUCi z@`8LsaHb=O?tr48vOk0x?(n_g5z60wba-uz`3m>`l>AP&JRziqC<{<(Mv%*eK!8UI zC+`noMF)h@fOklJL8xM~rD111^X6(Yb7OTYNWkNi49=xNl8~25xd4uMs?A*EH&*Is zHh_IoCe^5Rgq1Ak0QdaqF-z&fdRT985mQ=;fI>^49kgRw3XXr5j_O6NP>h8k#2-PL zGC1roqGT2eWZ&I&DMsLgccKWl34rpnq~AbDNp${wxb~#)DSO52fv%dN45iZIlA-kP zMncd|RD7k`Bf3y(exNEUL60rQ{%+||7&S1+n|s+uI1|rhST+9uRqi3D65aN2P$Yb> z9MXg$_p%YxZ7u`^lpR-^yroymijQIc0H`DL=A02Qo29ymgFAUzi|UNDzLbmA!9xJ@ z>E@PUYGstMv|7qhJx!A1P6b|OloTB0XNvcOi=+x>Um<=**UNE9TCY<1^>Uk%CX>Q* z{E>v9_rcYbT(~n6om49eY~j=N^Y5O z^h^$%hRwoKFrF9dN|o0M1Rq6ZAtkRDZqEFYMFjkeq>sgvd2VtU2uMK~I5{8If-Wea zN99G)*BqpLpmCgfQ2WX%HxL0*O*tJcs1zffc@@i)`y|0@f)2f|GLpT_r5Ua!-zCzP ze=2Ovb8g9p0rtE+6=7H$8cKoz@k(k&}b! zKfMhz&n0`t0cr*rSRq-@)`s7dhs~TQ`LJSaJCvOE8TF*^DW}ZQ!VH@ZJ1ZqCXmQ7{ zopH{(>!$?WLP$h)wo(>?zv2|+cPH5oB~b&}177W|l;t}~_Wrcyl0Knm+rD#g^kd3 z`7>V|6dMD!O)>+B&R2_#uz>C`CM{~l2dK|#bd%W-IIt*X+jR9Pl%6xd-AQW*DahJS zd@Y~Z^W08IC0ys8)N>r-xDCb@HQVQepn|n=-*;;6;zC7;u{l@~4tiEFr$GlN3hi8M zZse5F%TCoQn3p90qL&Y#C;3rrAr8acM!yc=rC|tnf;r7CaJX9w8!&DQaT}7e+c`by zmzM*Ak|Z~{M6BBG1pxl|oO$_I97qY)yB_O#n{C5*Dnq3A1dsEYWzIIjE*Iw~g*wH) zXi^TxlAQUD=7RBD6pJMmMNyzi+H<%MK0DGJ=Nl4{9E6MaSf@jj0m@gLf3~CQ0!eWzVyx)jiLD$Wbvd&cjfXytE=`B0J%!q%rc00fbbWTVZ59FXG(4L?txR zff!j&n8kJ1?5&z}#q1D;$}&J--U`9by+u8lHdh))SRh$2Tkze_BX+WSo>CL~+2WCqUOK4R@U7VUMQcEkJlkI9~%C8WGeDL9w?uOiON^Wros90pNL3 zMJqtHk|NVWGjLD%a- z9DU!FCnp?=8Q?VGKEiKhUp1DI64dA|r?J8Jp=#P(+##wd`NBh8dTb?vuLUJ1o)i9N zy$lw&vWBRJ;@84YU%EcrmGc=#3geF7<|w01Y@y^17+n=ZZMLNi#R?umrZ<(PJ)-8y zMjXg}4@@9V6uc<2~rHGf1#Ph@2mCtKIra%Yf{Aw&V* zon!91v=-efFZY)`DGBj8$LH@)G3C4=j39+lHSVsM%JwJ33E#-|{{V_J=G=HGu}B-0 zO8See=P=X7PdJ0T=M~3S5Ixh7P&E0RRa|eWHw8_9WT%Qs$mH;84Ak9wEWGrPH1_4cM^s*IAHZ6N&0)#+`1Wj4Y-(#g;L zoMp6T~kt5F+Lu%&M} z3IGG!(wHyG4H>|_#n>0T=a4z3pCd;l63aD~D7GG*4QU<4xwX`# z9E0-;j((&$2?nC~Lee#B3qfr!1rf$K3gmj5ZI&xQAFdTPUFKWnz2M?rk>q5Grz0w@LJgm< zR=fbMt>>O-b6#^~&~5GPpq|yG)gPTCVDp@QRnt~VMUOX8kLqk3CC13_$>xZTq@Gg_ zB@SFQVu&Fwr26uE(cUTbNO=R4AoimRTDN#4=N->#^m86p0lpMmjYx!~DSu~Dk7JWW z?{{SDO}ImIwH{KUkr;Aw-n!!<;I3}D_fRh1)Swhha=&z*KUzaetzwi0ebD^t(~1LP z>cy)cSdm4Sr!dplE*Xs{TE&yG0By)_=lE>Y7tp|i(5!Xv;{{YlFRp-wh z)HBKSP7SMSJI<}-r^R%SF}VJ8)0#laA(y?s->M~LwW%prin4g@PwPzC$ih3R)WdM6 zDO!qgoC8jVkTSb$U3yiq_Y{PeHi8M~%if!P&Jxd*TrHO(h|GrAMmWYj1xUa)fIFzk zS#xzC_F|6_M34v^dQsrHIZL!U>l1;rw4$&F9sNJd(x1Y%(6I+x6b|8p=jr_Dc>v{F zqW}va*$F}10)j^%SE0Ss9;jPu_fRi0wQ3SesrGhWtWuMlxvjw(1(Gm*AtvnBV+K$nAs8A!w4}+;XC)7Z^%Z zo#lAv1Da4is8yC0bSJ?J3C`310E+1WaNBSyVA|e&vg3+a`ckcoF1uqg<#XrEL^d0*ss6IAXa%^@8(%pDYJW~ z<)v#=XaOk+;Ey~}^uUpXEBRAyLrzjcurtW?qYPHIQx0|3aTP!%TPa3BV$Y|YcStj)a0^@FMN|Jfc7|5vx z$#9Tb8=ESV)Eh`kzAKq4+(5^EJ&ius45Qw`YlY2@vACv{A+#yNoyyvKoK$sHNNB=# zAT6A#yKR8;@^P_yGs(wpYBrvv2VDgd15MG)ohK#v0cD~Tu$+t^K}epxq1>kAnYQ#t z7XFyH^fKUNxUvTTW1Nou)I0T%(UPO2s9?zDPFuPuE=PeQ>~0Qla63@W*P7GHZQ7GZ z28Y56rYe*YG0J_-2{%%UnPh#=)2 z>7I;BM54b3VBelcYFX+g9&(s=t6&1Rx(B9G<2J@y#?y?Ez^+NJ?iK|JE~rh&Rt1d) zvRhGXfsS&VWK!F!JJmqeRFeCI@6(!T*o>t}M%*NRhyG}qZAg~gb9p?b#kCJGgrtRR zr5xuuz^A6voCU!|8Tq6J*xO$7-E%mlG3VH^% zQWTJ0%DeEc=g;t|Sh|6vWjXUg)(FlvzIC85^2T`YP91fp7fbn(!Cqwb9!_!gmkLMn zq@8)a%14X=P;)wqC1oGJfAa55f6X-Hq{%qsa-i3(WvyJg^0UW(^V)+t%Xe~`6$G63 zUKgyMN>p5V&tBcX&MElyILO@9*VKS=pyg|ao5D-yaP;RM^l;SUC}+$;&k7}a`O>YX zLWjLeLst;)hZ0EZi>SF8>@8}`$2q{_jTKSy)PpHDy47!3wpafEz48(XYjB)h)}aH4XXr6DOc0Aar1f<>m?T?mHNbx;sVT* zk>5YglN5lCR}A^}QPUj+e6Kb~mj<|HExEdU&JWQIJuyikWya4u9!6+4PDXCJD$fM) zwwt*HxKJKrJ@J!LER?u%oNT??RHS)Hai&L#(oYyKpF5N*NuG zYC~i<8BMs{gh0{~v?LX@MstkkKjtb%2fE9vS;iX!#08*y{%isDuS+w)vK+jZFVH_O8BH9#ip2f5fF3f=j7%EU1q~(=M6T z!Xh~=Q3Fjpg`^b}pppDvN)6QxCc~6#UU^fE$bB2p-9B8?f>&+2eXn;RO0wgL8`cQ~ zd73H!UFLE_o=z8sUw8t&iz75cE=_`JLK-fkAC{!0YIQ0q?f|JqNnw;VG?sn>i+*p4 z-T>MN?M#}GRr5}Yz!HQmOYQ+7UVryfJMyfoWyc66L!l&ulZ^31K<`Ca)rE;sFH2H+ zSIl!qhFmHoYqOQC#lQzJQmzOifr5SMZf($wak3Vq&QxTrztAnF)&lqZ zrG8%Z)u2xNshHN}K9wzK2~ix3QigDYz*}tETaJKG(A~~`g?Y`g=;GmcG1j$&1z{QO zgIo2Hp1gF>scW+bY&rA?Fm7_0VBEfsO>^qM{umPEdT_W zZ5ytENk|>R;C>aEYBL`dT405{Nhz5D4z{+6K^X$2XypZ=MocX0BBu5w3M#^s2V>mT zf)Q|nk-#Yk&A9Q#!ZJz4NVS>5iJn3Z;D}>elu%chETPo z032u1nR&?pDj<0YGk8+SUgU(Q+Z7`V&*nwBq=4e9wz;>74*(Inlg>QxUX{QDblc`= zxl|LW8Wl6;Jl}!8<|@D)>Y1t4IlL&DYlB6umwIERE+ip8V^%Y{JahELR*sy!k# zo-PV_KWXRj8$@{A&AdwSHh1_IZBl#C8PDvy&?JFHVb zG~>xqQS=OO;XVRT;p_nK{(94mt$4{RuB#3<1r^zJGe}ZPjH4-6kQhJlMnkAP*(xb_ zSd+qeX{{>DJ{Rs5Q{T2Y{{WhurkT#NQ>Fg^sWqc%)>E<7u#Drkk*SDY;4+wd$|j9z zWw^USc|Ex8_*BgUj=~Zea-`D`1&ygCusZ?gPZpO+bf-&Wg|e)+*}=nUBObJ>*O1I5 zHTcg9BXXs9MD8PlQ9;rcUhCd5S}B(MYH=<+qOA7xs_k7vh5<=rhjw^a5k2HNg*c*n zckkY-l@%_z1C-mPchzTH^{*-p01ZdfRX7VKTQDb;kH>Lf4B;gDP@gt-l2RP};Y2>N zr7SBsJ;Ck%cAzDclpE$KQsANz?=B8-2p#yYHdg_JF_@8*HXK??f>uD=*cvVhxA2^7 ztOJsyV##$SDk@gd#xeZrN*cq!F-PmgM1|T z*5?;k91pD|1(VJ(pRE@aN#^UmKL8XlGCQhsf}%;qFXvni3Tjtf*~%Q54nmwM2v;4z z9jZb-P4~)RzQshj9$HIem4XfiKdl(uP<|fiMc2=8y+@BF3jmCajDz2?uEDFF{H~d8 zmjp`0XG3Ht?>r~if6Yk$0LpF`1$a~dELJ%%oFHK;+nkRo`$wO5WO3wQ#S>n~z&#uuAJ{+zKg+tlw=|QjXt0e=0fJn{UWUHD{M8czW?k zSxVHU6Uu#QGHP+!#DqjOL>^I78pRkuE){SOmmK$|#-SIl?u~}11DvAhx{WfOP6pP% z=bj0ospV_N)|64M4G5*X!BS9!;PyEbd*uLF6zeK(0bj4`m~mSH)c|{7(&MQBa*;Ji z;Gi3>wubW7({2KDfzNSHKClzWUeo6r@UXS0T2luAgN}Kn89=ezEVR8U9o!*%rlVyk z%9o!!1I=;uKps`0u$v$kwF^!u!k0ikfYa2q!N$s+tVBi6br$BJNF^##K&fAxZLn5v zT`h?xg}-dO#an>Bva%I`5<7A0PDK5};YJ#g9c!ZH-XSHO$VSje?w&`#)}Qdv-ZjP4 zq)Wpsv>j>T$jQ%R&2+|Wji!)ia`c@#C0E(2;>jpg(MNi^3Ma@7{!J>rYpDr_3N(zy-#Hd=1ZZc6s z3vx2AmkRUGrAx-}0t!Iu7WYK#GL#Le#=+m-n3_?@QRUV_I;J+&IFt-FQ@DDjlFE#( z!#KLFx|!uP`>80}GtWHckAhI)Zs@qDFG>5mPCkWU8<+EWezhN{RojkGm2NgEVLwTU zPBxbtPU543vj_1tlk57S4);}Bs+$6(Ed4$)yd{=Wkb51u%~fX9+)paD>C3Q=Vd-@- zxj9MsfzB#5_3pjHTV2f>#oYbep*Dot84%kem>ZPjD*ZMAL)aJ{^8jDlen!s=t2@MP{ zBztGdxlqzugog_)B@&cv%7OOssHs>>Y?>}0EmW|gmiHaN_dU3(wxSKLySq72i)U)o z6tWMXJ*uHr*Bsv|wz!3&^XWpnoSgPK{VFb?p6RmDkU#LhzScJ2JDxM&wKHd193+Nf z*SgY^B}q!~;mOBpkE`9ZSlLI++hC!Y?x}6^@{&EV%{0L4TK7Ua{GopsB}+(AB#$L| z*BD97*E4(tvmMtIgn;6YxL0p#l!h`wA}|TDDq)A*Z99vKNjUC&zO^L`PUh>*8%=_h zM37Yto8=bqtdDWVdUEyplC z_Zl^{uC<9Pz3if0j@|~>7lHo(ar})w*j(Frjn}f^wHRx2(4l=dgZ1|X4uA?9KS1KvqJEbjzv=opFj_m^{ z1m}t>l9|P}1vcm93*2C&Zdi5Vc|0pLL!d3lDf66^IMheVmzP{haG;->q2!6={w2|S zWUc3|g=lPetKFYpYB{WN8BT{ASzy~D+tOXMx6^}txuf!f=@Lt((Se>vvXw+H%z>U>RW0#Dmm}XCQ#zs6vLZr zcSB8S$qYP@UQpbjxRQAGsOaACLGGF?7bfaY_9TTkqLNdOsj25pEvVFDm8mV^x0x_<58$q~FTQ#-z zqo@7K9)6yu-?eEvW8WJABPBHIo15~c)Fj4^=}PnX`Fee7SHyLW>?Ja!f|RE*FHGpg{{XTPy+Cc_ zoZ_e8t~R(^ZjBxgh23&W3Q15_PXe5mRdB#omO+b^52rMiq13YrP{)Lo9^_>9^r<}~ z$C>_K6)UJ**0Sd%T`raU8Q!%+(HA~iOJ@q?E04moO*=+@H{`5WQ<%B?M+>1^Km9mt z&$AinK?+s@?njr>tYc3AWT9e$Lt|?#eSPo)Yqi9Z>^1=#Ngp;prFHa;Dn>Zo4nm=t z@8SC4rXO}ZR@_b!tPpca6s`inNI=#f!jW%ii_8MFNNw!nAygCY5xx_GTU~AUh*L)P zmb9KT-yBpeUhCy7I4Bu636Y;XsC>ES{P(J5eNk(PUJ_=@z|@i&*y$_B9Q$I1gH{0J z3&KlasRy-+kd@@GBa%Sx^r@futF=c|Oaivd+q^ZEWQBT;)p6KXmn61Q*x7`rD{vhD z0G_mmNo*inFx~7Hz?$}aGyqCS9>SV!K()K1$mCCKa4*BoNj#o=eQ1$uT_xL8Zl!4| zhT_-Q{{TAk-YmNu+g)O+{;3LePPR&vGDb7^s+-g`7j3MpbQ0}1^-D;0-SS*kLR1Fs z-Nj=y^#{Z%$jY6g1u!l4zF1!$LsA-bQLwrOrlba(eAyV|Uq@p9M4L1cZe`77>Elsux&<|nX zicYUS)?FsLpjg>&LDTWprj;c-K+hh3jbgg|mvOn_RO_cBvMo#)z;Mrk*dH!<{{RhE zVt8>?r_0;>EeV1gWS0^!p`80uqE?HaM4u6j{3gg&b>WmGY+e96fE*J^ncJEyf^&`T z)damla24X@jGX6#ObT+I4JEs~tqB+PDC0O8AIhJ78?U`K*ElGI-<&cQON4rQ{uH>W zh~*N%1ibIW!idY#I zS~9{y2sy#ykMp%H?ZRp2o2Z3_02RFB9Q)L4qRci-mo#9nK}qmXqMVP=)l#{bxn6Ef z#|U`-TB=ond>pSn%_ufhrKxH;Dj@TLP_ndcT{h$`U$>4vP_In!jwz=L z@RDtXfm4RusaOR0a%edt1X)JCfE~c0$+t8Bqy-Mx9$(<5ye`S9BOg>eWu?WOuL~m} zd18|2t-wuqU~;9SKY*1FyTSTYg*e;FA|~K=PD{SC>Ai}2 zCF^pWZAFMlNF}wPr72Q^k_avW56g-cZA>x3HjI;hvi)!PZ~O(&YTpMcdd8{5R_uJS z+Sc;s&h{ec_orP;(0vEe_U&V=^j3=3w~b9DaIJA~XjABm3A44?31uXF%W**gYU~?c zJMk+vr3^%Ppb&A-f3ko708d}wmXEG9n$DuunBfZ!*zhb!_fS{}g7q`RX$twy^&XK+_`~?7_!#b!_<=W0QZs8Lb4+8K2a#dwZTqRd zpd8jT|b;fbaZRNRpLfV{+?o(h7{grJv&Pc~6kwH_)9tL^-s_pz({{RPd ze+;N-E2fipscwCpVC;Uj`y}+aU?`HU$j&(Osc#9kviND^8Bnq=ZKY|!Po^_WOBfhT zFUM{TsUbU#510AnMTpJu%H(IbQLB9kN>1Q$`Tqc(bnuSDAvWuSa8@=SX)9JTfx>t_ zsWC<8Uff7iNScP5}pj$of#y+>5RTeqQTdJ=r8FMDE9@ zZuGib`9f18{N0xGdt{duqECMQRA+?aecUBE;F~vv1nPrpPzy=n;9z8>KyUyR=bU9U=343i#PK*F z5)M_LUn&8F7iZNsZ!+9wIcZ8(1_uP~{x7XdHOA@Iv~mK3xFKo_PSM!lk<;NVszc}*BrE0~UPJNkQ#u6+bG&B+i6s1~y2xz<9)ilX3Cqu#~$@$iMAFianal5x`mf)YDB%M0N{46)5pG0 z&=slRcz1HL{T9(ozcLvO1d^{#)vjn-aBD@DU)0qW!tAd_bc+lz6qdjo53OHdfV2gb z5(|i3CDE3MXoAvQZ3+2C4;iPN&8Wt`qX>7Xc6cg;?#9`7DLu!QDGha#f>CwL&ai1a zM43+oB@L}azg>~{29nNc;97l}c1LgUZ-;58auFYEeT%lxeu^rb{Q4uRrm9&5cezo+( zl2|PoqG4>T4?=N7?OqZGd}BO!s7i{A?mfbICUO5>Gytbmk9x63sqM=1sd8%K~+zJ zwTtAmq@Zq49A`BV6SH!lohh;gyWCcY@)x)hf(Iv)pZwFTdD?x~1F%Y7%(`VEJ5r@& z=LbGjCIBECCmj1X9H^GvQ?6xhB!T1)6iZmrZ)BGpyzsosbgj%Gq7i}I;F@vr08Nv? z0!Uh^cx44h+MamLINgIbPwcyF4igVaO8x2-8#^~(;PVJ31KS- z1HD{y`B~g$9&c*}V63V_n{1^j18;J9s~{~slb~$@Fzv`lX~W%E<0Fdj9*CHamc?&I zWcb__9#0sq5pph^-(!NYTq30mAcK*d(&abGX~VEX%Sup!72CFbK8MbPm;w?V?xEzV zA)*46-<~^qRXVuZ*iAM&8=!g4jnB5Fs2t6Cf7- z*StITQBF9HPUWvT$MyPC47qai;uf-nnJhGqm1pq!`O`Hqj%YTSbrqL$DSaHOSWYEaoSi5&9af~vQ#4`iEf#1lZNa(D4`F13)U)K? zdfjupTI5F5e`cPim~~l7oJui(+iC#(sZP+@-~ro;jdPr8j7=cg`R+ee`jgW?4yfq* zMw>^ejjf16BX|I9{5wtU>)-0Px|68&cCplVd4KfUquK3nn{E_$Vlijl3;xD6Bjh3L8?<1aX|HNl*s_Ri8?k&tW~g{{Z29Rq?<0IHA`*A!*%F zQ0?Ld5wX|>!QM-|?oGd<@zae5bkr9eOK#RWjT#o!ahTM`l=LMy(%wLKC}FgX+twC7 zVnRtAkSK~;cZb~IA6_s1RbTk%{vK*RBWP5*ZjU2GkVqWj?2yZCH~MpK{enGDPc+9+ zEpl{bhtcnkZ0%{1Q;^=wlCiXtkU`PSA#k|kmGEE3Km9ax zoeQI%RiRYAdf4)AF!BePtphncdn$8qT!>R0ivIwQ8bfRF*3!~arq&J!P$M9N!5*}! zWMLOyii}zoz+!F48dXham zVB^Y+ZA3?ufXl2=<*6$rFC+8jhlXhUMb_W!w%0ibNd@E;9>R@$@!2iT10B%S!sUQa zhtP6zXbGoa2Pub0cVT|ou8pM$4#{a)Cp`0AqN|J!QiF]wcFh|VYY_PRrxV83RmpAaE0G?Zrw1zy>&RZPKj$84eTC4nVqDnJ}y zZXc~jnd89RxcgiJza5m`(uAoX9A#PKy&US8uWRKx!g5N>5-T#~rJc>X5^z(TZuF;V zpls3x1IliCjjcFG&3&3PA*7bU+y-}d^{ob)Asa+&C8~o`bg)U!=68#P&MlIlWV6#tZtT4E{Ix|*-}6Rf^+_7y;03X zW!5eU$2^d1I7E7SQSpmtakr8?0DD!NPmzS#q$@Uo6M;`#Q;Sqkow(z@TG;MvZnn)O z-^&*ZExtLCbWq!#06ujiqO$cW#a;gZ#1;DJW}0=^V|Xkn)qs+4IjzS`U=}MG>S@U) z%c$B(>z#@l1i;)!NB-8#yLlW#>%x`C}-xh-o96d!fdMnk|-bnZAvIk%_QrKfMo zPdUlODYJT`1?^z$?io}^sO+%Y&!FRt-1?938p%4k z1{;#N^VwH<523Vg0WJib9OoGHteRRzixde2Ucgc=PnlIW0V7TFI#& z+%BuNFkm(vNLlaXYj&%v02Ozvm}l8~ep78^6(w2EYN}P%hUHf(D)WvMSelnsvA1YF z&!M0jssi8w@6F#TNx5lDNg*jxyY}t%sR*k91$X8#d*yykZ89URB%~~)_TvW>4Q{1& zmFkK6-6-xha;Ke8R#VTFW?Hn@dAA9+)*eR)Xxq$rJJqx~*yFWRUr-4k@}%i?6LQZ(Gb>Ht{p>`xlF`-w4=RA$R54uhU!2NRBc^xz0~Yy!j$5(xmm~8 z&ZhqW<%Y|WyvpEsTaGp2AO)={B#e1r`hO~ls?>oPLf6%TZ*(~L83Y9G8Oipjx_X=4 z!2sH_1;=zZF4$$MX(HxJCgUwIpjzti0-x>X%ph zBA11Yh<}lQJC9G&pfyPYhMVN{EhpA@`mVQ&i3|8$uRU zxx?S->qgT9xh^LYZ&a^-;-xJmL;_Ao^y81`Qsi-RizVEi%687-d=1ObBaDBZl-AtY zH2aVWDMhKr-C1eF$>d|Vu%wqedA?GcAr8q|zhLemN=nk;#^5~Yn8^V;*N z54FNb2RQVpIyZT8o@9x2NJT*fh+U{_jv%YoL9v}JFtB)Q za9l{RZHU8Ic|4FoBeEyB^%G6sWW#}Myu9VXxDp`3ki=z#+ubTk9506xwX`SY1de&Y zJepIdYpl`DimB7+l@zUX(#8Wzn&Jr_U@k?il5g9LUdpiQtxbQ>`iYi|h&N zCBG%f>vX#L1=lx#u%(g4>?~*BG(--(er5cXe-gSx^c{7oQ&hXmC2``{xxXJoxfcLI zY&w>S81&|(VP|!O7WNC zYIyWKeJT=4HWytDBad>W7;(H{DIM|Ky%_~F1;9uyE?=fK?`@0%7l_vxaWnTI%sJ@PADvH2h#`q#Z1Ko&5BosZDawdH4AZ@ zbR}ei_lP{_(uUP+#t;gTrnvxJC?zRVQXW&eN%^sk{>HNXMw!x#C!F(qs3%)O_sA|3 zq?{)QvHsOt`4?WpvU#pOk#3epg)75K)>0STdG@P?1tD)dtA9?kf>oQbM?YaovPz0Z zJu2l&!Uk5~rVk)R+Ctd7js2L-B};K`B?%bsny}~>@$OWfrtPc*uKxghDD*J`+Xh3? zcRHkusEpRfL%=0s+M)3!B&*+g=~HoF65MnQ3}i(?j(Y!e^y-#E?#qik~rqFzOZeWn3`fU*;MJ0 zNClC#DZ$9cYRLRcn|4DqjvIndyy*+$a+99qk1wr1BW%Qn5CxZ1W!#!*m8Ds53}+Y^ z_x|;w>J{an+m&FhsL62dh2?5wmrHAP2>_guf2C%+j=5k#DxXtWat8~{eNM`i;h}qZ z$@CQh-Fdr-LdRZxQG7eoa#;aDj@aY;{{RhGD7Br#1xQC7lgl>`&j1qR7V<+0H7496TBb?mdl^Y6Z zL#ZI_0A%F%AJU^6Qd%!`w^SCJju(M-LL6CHT8`dEdmhv=)b1>yswX6=NOzFpl$5PW z1HV7#rARwb+}kxZ4NE!s$P<3UiV2gA-P(MSy>6m_R<+f)`-MtSFoY)H`L z)7_gAn>N;4L#K96JW|@ubGc|d z1DZaLKshM*nnk_S+RLX(5Q5&_-23EJqKJY41r{bPz)oE|WjGslpDdp~YS78sw+NQG z+f#1SZzx)durN|T!T8hdb;4Mj?e|lNwk4MoKp=R9U!^)-gyUwrR}?W|ql5T= z&2n>bh2Sofmk6pTTCk(RP>+oIr4leyGxHWowvw@&oa^WIR&#fVe8IzWb8K)yzo)W4(bjuC+S_f^O==cx^OG^g6BsmHQq-2g+@Q(v z2JXWdAY}Pb+I2({v`X0ka3}WfjQtZj#+&X3$Hn1+Gp8>$e{N07Yzid7`EqdHZn5e26TBfO@Z!c?Y^7OY|10C1wN*w1>j!vh}ugOy{h){Im}xAI)`VlQi( zdVBuLC#szeTpdEaZK^MJ`#qvO6ed0&G2N2*`4Jdx?4)hV%JPAc{{W(2fH$RMk1>j! zgdL*%ubhMNV5VWmTaKh-<~;2uHQJ`#sj+XUMQp=)8}izK<2e-y$gcd9wzz|39X1b& z*;QFYHO}GMfV=~b=TQ?lhY&(o7da`BY)Y621xYFg z2Lpjp)U7rbQZZcpRGKC2j8fx=1xh0xU=vL5yG{5`8{7|7th=yqHp^vRox4;$M7i2I zLdq?Iw;@e^u#}AL2LtBsL`K^M=7HOVKGW~14iJ)7Pk;HRCOb!&Ht6o_1qAu=WGO*t zDg zBONFn`(Q~z2~(Nl%eNIbMHW@Fr`&hkD_>w}2rZ<5PT$Y`=B-rZ?OTSH)3H7J17c0z zsWxT{VQBk6D&x>+`&9n`PQ(nw6|YHXJiOU`aC|QG{wsEiepyBe6OJoUo}=mlHlQzN<#~@M1q*c4Z9)Ivg#3z)|k^rqpF?{&ghl)HpZo5)r|R!KaSaq0Qg?InRB6!}SQ z*W`(XAu9QlrG4Cb;*W~3wf5|WYlkB%VkEpW9eKrPoy4Bq>TWuLe3i*0EjYs0xJyIF zXyg&bJiX`&x~<1(OBUP-DRF&|Hx%qDy=tp_JuEK6FDNupx55Wjgd_skcKxXen26 z53XuTnYOc%_mFI?ZH~a+&B#KDN_+ZL>@tu=k!;1&Hp^v0Dr^*?Ir9GiKMI*;N0!OQ z$?A{eLGZV)X~0m?5#guVO0B#+23n_IA4^u5iu zX{*KZEu!lpG-&d?I8tB~{_(&Q^H*5WQ$mH$Orn8vFfcmR;hgQ*Lhj_2{|~qHzaY#Y@`_Y-06Li4aPQ`)Rrwr zX!~Pw8M((Vyq^hAC@Hss+e(s?o&v&hF_JS)RCJc71dnfrXCHk1lmwIppt4B`78bF$ zHbFY)PxQ~M7uWUoPIVo+!Ko$eQY65WCNgC0Z-&&AD}`r`$dDe*0_2bASp^H!|(~!)R zu;>v0Bpm$Xf$Q3(A+3-%0efk=7Qzsf3GcqRi`$;rrX|!R2KG$J5Im^W##>D%{{T+w zTyl7&QfiVpmn}%mt#ux!JUpdQ-f=|Z%OCinhM-8k)VQ6?O5BY*XekW7kQ6dXMh9Nb>es+nkr!Q$RbANZdywp48B- zZq71OfVd5%Vqk#t1H@2KUU9V4T$QYAuiZ9H?AzrQS{_1x&h;o~0HpEF7R^b6)iUE_ zVvbPLW-#1TgZxKnr_;sAG^ubDzefA5g{u;CW0) zLBg%N%T)s}k56I2aTSB$#e==xR?QmJ#Bb>z6WkQ2O;v(NZwH&#S(%9(-3 z@S=Nbu@c%0XHp$kB<8tMRS*{?Gds@++8~S>3h~2i(h(VU zLNe+CyiVVTYL(OHkB?Pp>De+5p;ngB2_h&7IY3T3-lrPc85lK6;+en(NpIIaOL2%8HeQ+!arCb*PUR>`6urJ*cNi z=zw%1(+aX`cso?)0SWF6ES~7NS+sL>pOFZe3MT}EjEZXD-4omB{kt%|5qV+f?bmfTi>K)=}0`ZjTA7@ER{=P^4-19A(FMg)DvH{?k}&T5&EQ z`lvuXIYIYZ0@VxzxPhJyf3-aN7n}s<@onAJ-JK!DC2C5y1Dut2`p^}%nA#63)vrr? z>Qxq6KuJnep8I(1+Ne6DgLxrWXTz|x;9;d9O28+Po&gk!5P3|C0daziEz_GRWul@6 zI6jp#<5{*&$02HwlJkwWckT+>c+Y=ouWD1C$32*wcnbKb{dRH&JfhS#8GM1opxD{XZ&U?XbEQSpmCBf}N-oQgFN+ z;N!JLO5z?%cqn(g?F}jcH(wE0-^Sp4sWBTm=s6xZ9g$oc$`IRSP7*VWRD~$+yvWE# z@NXNO7+Q|(V|SkvYaT*1rJ)u5w<$^AyaV34B=t&jY<*U5x3CJlB@hp9dXr#q7YVnt z6JW7FJODzR+NAT}JW&QVxLmIWmbiMTc}}$RA7(?LMQX~`>Fr2k+xQ0wZpI3X{eX!H zQ=3qGU?@i9yCl z7#*?a^zTx!)3(5;!%e}mnYY@KlBEpn&N&``O6(NzxR(^}!ZHFog>F{x+es{_4cQ&O zqPsgAZzw|>f?POGtr>1vb)2Uzk_jO>{Af5H8f+6$zc=MtEk6th_#h;XIUVX&7aSBp zaB#Rr@fkyFXl*$FfJI3}^5e>MXM|S6Y4DsR0uzOOF;ek9&9LP)yRcZ>N?l4+MhF~_ zN;n(kErrcFTC;aZAg2mQ9OjnujQ3APEs6$u+!pv!!hZHhsOpyhEEI{Cz0hs$)RNSa zq4QwikyPsZy2<9s39_#`wjhqT5{#tq4{i-X%){Axb+g$fZfq?sWUC+?k`MHz;)TcY zF1h}S3u4qmpEAyIM79STuYOcrr|jsRDk3EB40x#VFfw-T8T@gY9;O|vB!wxVQ^z%I zFwAVnI0AhEAQTDJj;8B_6hKFr1bhtKAN5(c0#}zlVSiFN zDr($|q7vXr6cnVF6gCi4g&Y+px2VBuNT-eg;n-a80przb`uy-1w;VJKn`B>%a&6kc zr@}SforPkBtfJ`KWrI#dVb30M{o7LIeC{onZLL@t3Cd35oy7d0sZ_0QnmW3BIBf%- z!18dY8jL%b`f*ZSyMdp>>f>+My_HPr-^H&|^&(_UvcYx@focduWFRfMK#)>iZ2YR) zl(q8m2Ym7>jYHD$c*0nt4Zd^V?6WOKN<9vV)eVcqEf_t%zP44@qjvoh)o!75+d=h* zS9Ox#t@Y}p$h1ndSlGBhwuHFrJ}mbi`JDNgaIGx>8b%70&l;Rm6jbz8b+bGHPslhV z*dMnx7|&(-t=C_~-nk~VPeo4^-BEiI(BK+L0C8^F0^3Edn|eQ{5M;RI2uqc8TxiTU zZ*{2U4?ihNQCeHWhhb1yEjSakNGJ+OB$|P)iM23-#tS{IAQ5wsdw%QhFM@qCbzK|g z^+t`^?rhL5E-}Zo`=_T)FA$+$ZPrKMRC~4?ceU~JVPGA=Zcajihb1ZpZfdR3-Fv8X{N=2;XyvDmb8g@1UoCuP_$Q%tZ7^+Y zAjxC3(i>Dfd$68J1!sziKT%(})FQYiZzuK|DDL&ZEeW}v@ZAYv^ zC8subaK9NoFZ>PFI!?63P&YvD!tgLhq4aVNfu=XF_o;A=qOT@b1&ElMJEKc}nkiQF zwYMI;1>_`?z5Y?Yol#(PYXH10-WSk>y?x0gEAC?ByG()Rjl@cAOtiJkWJ9J0Ob% z#^WkJ9hoqc=~8i$Pj4~(znxgRb4zP~m_}4xRF{&@;HXQ&8dz~IB`8J@1pfdk)aa}Q z(+-Ou1e4T?5fb{6+LW9e1KbZcZK*nTZE_BxG}sYPode z_BRMUZMSJ@^j|~$2i5{DvfjTCg#@K4{Hsxpa0XUQs77yKtu5PF2{AC+p+KkBh-{wf z5yvSBsL-j)Q3WTtAW<;Lnb=i$))v&^)a_92XrVFel3_fmpR1sy1=RpXGx*cOXAzXP z!bnMHBBZ$Pa!z^Uy(`7;ltDc5gfB6Y8Mv%)LQmnJT4H%HoMTAB<>y-yiB$x-JV z3d?%9owo>WMb;Kojfm)RZVkRFNhEoKeQPhK9m=n@xw0L+Kv8g^2qh}PIOHEAPnL^a z5a!vp3dK4P<3+%hhsYG%(WF8OF_)gfwD;!(lJpS@SANxs`)sJBE5 z_Y?rM2AxA;N*Op$J*c{PM$_(?v+ta#C;-JCBHRZn10JdV)VhFL6U;3c3U+kBP?t9R z*v32V6@xbU)(hS32dw6M(w%t^RAqNE`XSqWE3nfyVRmxQql>?^6x;p5-LA4M8`MJ(1EsE%8M!;=!TEwthWTc(>1L!>I=vv{0+lz}7!LrHP;vHHD01gSo zNkthSR8g=O0||v4YEyn9vK!ia_8(tbL*h5Tbh7+;Le+LaZM7DZq!i?lp8V0!H15Un zkroBL5S**qtp&6kAeCnzlfu!6feXZu? zG?W06ct4kX)a@hlVx4T9Dc55uX~ITwJNCt0w7CGa(ad+MfRSSl$8kX$3Pdm0uwvW42xEp+mSCJHZ7 zDz~g>psB=^kU2aWi)^n3?g-LaMYu{SZUvA1-yrctfBE#9l!`iG}vreLC>8our@wQjlopvIaCdQ_0(&7UeZosj5C!X*Eog7Vfib90o6gV~?lqzqIee z{{T$QzWoklXxGg+(%tZ`43f<4Hs^?Tz8c71+0wR@3cy|zkPpfLF%=)ljm|BzeMckT z9lo8^oo!pAfsoaWt{`)kzZnGZ4T!KjpsSC=FIcoA55&+q+eWjX?&Zhl%Z|jR8+B>` zscyt=Wscr|%@1%eBhNd8%1Ga~k(IhY@gozQ1wwRV^Q+I$^RqWg}SolVg5Q zC8GHKf@RCI^#1^+(sX{Noo0gjOerOmNJ?FqJ!?U6$E7R)5Vs47QOEw;5{8g4k_67P z*9M-Dxr5F5N%!yf?dSjs*;A}Ol7f0_Hx}*$8(<4@w0E%mPD<(L!Bf=iKyfz(!Bv_hI5A&jg{f*D8uoyY>UZ!;SG1kGs;kpoZ&D@^PVXzfvi$4#ozV@d zFRZ)YIKE4XaR&z^stfV|0Mwp?T7Dty{b2Z1w`x0AO7*jH?l(CMvaxJ8)JalgsuEn~ z3`lL(-Kq!G#HSg6Sa5+BXQ$+IZq?z+6Q{bIy0%NG^(ExJXq)b z^hW$u_;W+lI&7MCV{X^q*pEUz!e76moVHvgz}%6Ve<{Lv zIPOC;t!KhgRxqp|^Yp0-2D@$hFK*3}mi4EeDp^B$UQf3l=kuw76V5N2Y(`qG}lCoN$Dd#2bWj1`v&xybwXpe!{)H-wCaz z*DHHsTsyXw(~RfPQIyt|*$8=Gn>9hsl(nfLDZ+n*W6SGS%3%{o2p=9-I><77Q?vdu`z9e(o*ule4OVrm}%q=^I?93 z^d~_>BqPamUZls6(||hO++#WZRV@_nX|t7|UHE4ogj`#1vT^hz!HY4Lf{*{PehLZCQN;{c>oioO`c04pLKAxe34oRX18C zq8{2?$8olwYt9#hS@cNfP?Wl`K;+{loG$K@Bv#1%P#ye_vD`4EZYfeh&uS6Z?44_U z7cqL#5l+pJ;M%0*{uP|{R^h-=`inA8qN~iXlowJ~2n6sl0IbRh16`0+YCDpCt3q-a zQouvIP;!&O8UFw=PZu@aih0!*7~rcu^DRi0^1@Q0o(|sTvYkp``3tO}X61g!f`u@b z$VftPoO!6{{{S@%`GIf=yJLC60K8Z{8E!~wI0$fW7#IOw)N5WYnP4|Qh-vFPsR3=3 zHc9P|l=)KUdx)~**^32*_9QgHLyeRWa7pfYs=Jwa2plE&c5RPj2Xso`xVD7jxZ^qf zKMERV?8!-%#EUHp+Laax!3)mpWbuLR{(01OHBfWQK~G%T{{X74ciHPq#~Vu3jrs4K zb5=TdI3NO*;M^#PvWuuzN=YZbWB&jYTd0Mu7rZXTPTQ7u5>$3^oC*?3Uqs8`$t5zh z)G1fVjKht?n}>11p)T0ZoGS){(^Wc)wnNHoOVam^KJ;O5VQn_N1UQaC{{TJeMw~Kc zkr~P-sQNy;N_$0IWCOuO%r&*7_@v-t<{9rwbS0z}Tk!W2b)j&1C}^ke@z1>*69aM* z-@;MqIm&6k;9e1sl1b!@ccJQ@=VUH2GkmREP1ukVkWT~yKt>$zD&)i3>D#5{_~}MA zB#a!6^*ch#keR<>WaEn+Yhkq^A*oGbP0C2|`tnJg6p2OMnTe!6x0&Tv_2oqLpvQdbdzyf>E%GW1EAk?Vmm=IRIxAqU8E*J) z6(2)`_|OtL#9a1D3>=lD{l~*-qyVnRK2;S|2HU2lXyGOGJ<&x@0r!9>pE|C(0O#Fs zv(X^$#mP>T=K~oe^O_Cpu_nn)c;f#6vK&>pZmA@pTMr`x^``A?rEan{9mP*H??-msn`Nnrcgyq}9%<95$`wt>m&+q%sr=!hDidgeP&v0X_RvPP5d~ zP(tBPiCi#!J=fEp!;Y0mC8*Tu>`XzSw>HbaWUT5Z#YL}CYFnM&*Ldo+@^h}0v1QW` zU2Z%!@^+* z<#L;=zZso9x$EZQ%F~@i(sB#Sk8~GTobdwEtYt`fVJT8bAf)5dik#>kiPUM!q|_T5 z{Wj!}-F->Zu4b1Lere^tAP#A8AP{)BYadi0sc*V2@uY>yQk5k^OU~WO!bt+Ow`o+)1+E(%d~=cXzy4R(`b|SAv{>`>R8zzB8jYYjQId{Z*wc(qX1b5~mxL z8q)9yQqmd96yr{O$_<63ND5E^&7!NChLRfh2v{}~dmQK3`g?X+-kYp&W^L`9i#U&S zoR41iQF_Oy5bxSB&C(3On*|A5rdv0aPLPEuLn>2iLr)|OjPK3~<2~xBRYL^QwmH1E z?Qh!tifU=4u6EgEw_q)_TkVUYuAJzr)zjf?sy`6@Ot{OsSnLaW-K6Z8aJ6!r-p%eh zpsbeLE~j=*4%H7l){)cw3kS0Ra|k%4;)dy+rOir1Y< z*ZTLR_B%EH$E9@S_$D^4vsrC$QsBB;5>%N=YD&t~Rx+)~+#E_&-b&lSMt{^=G*H!e zk`cAwu-K8c&KGGm=RT__l9bZwZ(|(i$TH$>Y#g+Gj{y;M&sINihHci{jJE8n5sOil zub3%iw5tmL0CpU0U4|5#`BpEj(3svy85hX!{{WjS9ZWfE+%i{4oO%N&FkQGVON5z; zdDMVHTtXC;mHDAtl!XuBaUk|qD%0@etp5PwYP>g-;K|Ri^fdqaZL0?i-*4zg)+<#lI#fsWlA(hd}Ho2sNJqW=IMqGin zC{lMG4l21(Q0>?#S73()xuxvV@0Uhk$5IeZ2_SbF^!;i2eL_M*Zl|;j4f=&Zjz0d0 z4ppsI)EG+S0c+*5EsA=B8*h zM{UOnkF9(b)#!mPZO73fZxJ7D#im0kLc3!XVP>e~kXBt+rpW`ym9}t8$!sVsAq=N> za!)ignv|v2OaMa8(`q3@!EgP`An{XC);wb9f^IMr{3gfxrfxR_Z1E7%_C`m_0(d8nravG_*G(~NVk|}eye;N*J)oH8z@tnPpOSeaWI{dpA|r32f3>Ju{gVH zSJQoApwk^c<|FJCn9*9SS#C|ZUrIp<#^Zu{tA9_hvG)fH@<-#nzG1{;mC?SJb;3Ig zGJ`1rTyeU#jXk%H#e82=R9PWvu#mde(P#rLw1nhxuPTr+lo6=E3Q%>TIZh13ocb!g z6l4_SbpY5U?xgDLDs5p>5S-`u`}KfUFjCV}>voeGYEtgfLFYUX?Oskr5WTFq{nV@JxJq<3>Wf80 zpZltX)g*?fT~W44w7I`>8%ZiT!iSw?+uBL6RgVkI2`f@ed@O_`D%;5)+KZ%w$nBo1 zl_$d&^j43;YAPL49N|mOapmV(Wn)7@vWl2usP;(X zg}XKb3XGQBK~j)%27d6Oqn6IG7YWkKjxwMwR^}9>j|_zjsU&%E{HTf8FNFJy+mwlU3A%DBFG9wskO{#5TvXhG2ii_wHAj9H05!?H2IMBxwfsi z%`@SR#aTYs@~BEGQXWZ8kS*m3t6%M_T-U>8WThm0zPa?P6*=H9MhQ3cS`gqq(l)%3 zq!Hiwe>#`jf1+@X>S8I431zu11hhtSeChb;obh0sik;F}G$%Jjbtax`RWhe1G8+zH zZ&q+KifX&0<~DzCqWaVLZO|JopakBk{%YrRo~E@CZ*`Te2<1F(BzcTfJ7^@Plpj_1 z{{YAT0D)B1x&f9Z`GaqdqE_f#E}cns{gAb8Z3`$-7&!XWy>w&?S$1%xEH-+-|$4}uqRq1HOWI(zu;>P3LBvptlE@na;#yC;_LZq1h z1$=oU3r&i9>4{_(KI3E@{c6EZzy++DYg>CK_McMM;U&OH?hbtFR-U}Iye{~6b@@&B z5mO>CWz?&Wm>e9?^%{YDQ?HiTMG+LsEr{3~Ksiz7qF-CNyH6>S@-JnnF5rmZpsXtg zKT+phY{L9uH@Mi4ZhUmkGt#J^P=GyN)Yz8R>EYINhsqS zXP)%*!sH~qhXs4M$^F{0K~^vioewLCZIdB`lp_7d4XLuSNhI=5_$m^&fn?l_ztJdl z8kf0iZNh)u?@&^?*EUH9(}cJz$d>5@p-9Ndpr&KUxm|OvHjJW(7R9JEHe_-;e<}si z5Nw+a#mXp+WqCPSXyX~k^E3rZ+}v=SU}$bpOHRDhH8PJ84JZQONjOm*vOLaD>ru6m zTyA${^fSmBV`(9$k$!FLp%Z+z*&S;0sh2g)I|Ti@av485m}rG8Cm?`G7#tq`k7|YS zm@Mb;7Q662pk(TvtD?~(!=;g$02~yK2r`O;z%G08U)-&CU+Cy{8dAp`4&H*? z3t@AzWxG`9%}BEbbeQoGxDw+*)Tio|WD(_`p{XG(E@5NGo7sMO>wPP%=~{^pNJKsG zO}}2=sw!rzzv;L4+@dOdqC!@NjA;sBp+x6`0Uh)5sA7nu)8VLL3^qRbRbHm*-D5|U zj;LUKFE}9B-`M1Rion*kko&F2wYzak&MY?EV#m(4lZ2#^_lYDA#+e;D3G)Dcg*rYi z$kF_#xrdFeZffegg%#Q5NM&ul(`w_xN?$;AAa_g3K4e$QeMs4-k@Fr6gHK{F zxLkL*xIUH(E9ux>9{0ZG$Dgm=5ooBksdkx{sPisyr@?A9(Uu!oR`!`%d{iX=0Jt5; z2aG3*rdB#ZHp5^tFZJN7olK+3(VpKzdwmtgwd%W)ZT4t#E`j+jDpkH4$pas1LXhBP zWQ=_6yx;(hs|q8y2C7lU;};)qzbc_LY~JU6({^OXO|~5H7)mxa32DryacnI(K8i|$ zhvGY>Kz$ML>!^(MVv2vq7{T=m@!$B!_)6U)S^of=q94hrCgZSwf3ZYbCqwlIsIH-{ zDW>ihHj1{YR^6;4>fzx288vyQ)9PxZ*-paa?fzHAL6;UBuDkptf2QWM)R!3d?PGVE z!%71&;W9!}J>5{xyjDo6A8qYOYBq`2oe zQqW?y@=zK8EDVovOv|)4E27|niK4?=lnF=!j^l$>$p%OXx4llc7_iHZa;=<`ll`cQ z25)$A;SVc`ZGl2pNe(S)Dj85X=lWFHeo*c<VOq>vlqlaH0nQk1BD*v|yp%%7I;%#dpLl?MYTne9a9E zPA=Va&dcY~PFZyNQ*w&)sX%Q9Bh;Mvesy-ys9ht#aJW)*6lPfmblOyyj5oDpmk91W zzbe%!Xt8d~UeR=&tpdpr)GbWJSTcwyM1&LUPtu&Pf;Km43uWk?BuO)FFc&|ycC)^I z(RKLCaj4;0Ax9j03d%K^-x)0!U!tE4I!*~#*EPuJq}q;T`>c7Yc(9zDn>I|6e0Q+l0&5x9&eX85j)62TVU4Bk{VCrEsuXg2% z)A27h!zHGk`$%km7Bio%Zu&O&HpDNKA6|S*BlP@Ym82`}a#|@kAz*+-Pd*nLVLE02 z?&V6`{xc&5j~47(`mExV+IODG=FHN3!pNil0PcDv)dRx1w(5I-?pIwA9hAK>2k?dZ zI>@QX{oJK2=bR{<3S_i|U<7i%Q`9LBFK}A0dF5UG>xsuY=sQsBi4U~u3+=b-m3dc^ z9{s(QXVe;x#c^1if%6{g$*9d%iLV5e5mje5Y2jmmb!4Tr7cvi)cpr^qwRDlRo13cR z$PU8IaJfNlD#9FWt!HX^`=jepdIbzc-YSLEYRJN{w@Hp6r?#y{!GKS8JlG}vfE4lBAXvkY&o@AgK_xDoB zl*F_l)u#bCNC5MSn2ue|zjRQV-H?;tH&{XUr&87fLVKvWVB z)6S$RUfuvo0>>-*@n~w_ZIJJVQg$g^jN<^*!x&CGCxE%fbYEJITv=Q36cvy{&OZ_9TC_A&<}VJ*>VB2! z8l56m)BtW5L%tUJ@2NH9`pG-;$GAUr=dGY_?`s=%>S;t2*YKX=(>+E`ap! zRkxzLh~S?4YK+$2^bp11~$w>D3NhEgeqiw)3Dl$`)C+{Gff58d6V&K)@rOf4A17s_PkhT`aWTg4$m6D3SbL zxu`=?XM3hgN))_b9SSXi60m+#+cf_G%{b+yan}aHE#I2*-&4y{my_K2RNkKf{8vs# z2`Po8B>kb}fZ-)79nET*J8aO8r;Wsc!gfpfZS1IJE8SOm9&)6`xg`=pI}sJbpP3$h zMuj7K$xE;8lCK&^S6SnzCRDp@i0tKL4-rWv9lW;zRl4W7tlHjJ z(4WHJUBT0xccyByn?}hSyXbtR{`dPXT*K2yvNxkO`)i&hCg`Zw1|2Eh+=jL&?Bk4~ zdHKA-9s7Gr&;!G2-}tV-srs!|(^N?mcG_AT*AGM9{{XsoYpc|!7>hbA8>E|ikyo`V zPl&^*Lb2lKC1eDtB!U8vN$!$G8K_mt&|Mhg+Tm!OIjF%SK3(nw!q@C_IO5!@Yp8U) zGu%*NhM#f6`^}2uB=^s3cKm7}#AIM}BJMdh{{U>My-m|GO(WX&;FK?7>4Y2Hh&xth@kxLepNrCvB54Rq}zAT30&6lU+KIH3Sn2|X$loH_F zwPD9jRsR5TO1!+Q9-f`G4jvo4t+&&FG4)w&uJOYHA zZ7v{iao8Q%`lg^Yx>s9a83Vtd{{X^%NmxPlH&z!=Y6(`h?&(Fr@uwz5VQOtZd6lWG zA>^Ln#4QKdZ52_Zg^zHOA?9zd?mxn=NJ+CK9G>3F*?bpVwC=y^u8`@ry+eZ0I_n-Y zA!{i^UOx4^Qunc-H#~nAP*c)YW zQ4YM15u)~jmk@4CyEfg(>lc2TlBpwZxOSSxt;M3{--?+=_KXY^(Epxu0U48 z9Z=j3XgPX|Poe(+<$sxM{WtKlSDw;S#z-g)9~509>sE*Txux&6`AL&F# z)O2;QlNrL^Iy0p8IvU2ZrZU1U*>T6NU$SIs8>j5o5*sQ}#s(__>RKC0=S7A0UcJ!l z)jE3BAYR|ubMsf&9gQ``H-HJx%gFCR>C;H&+?=n&PmNs?G`&sjV8zM`as$a$I7sv3 zy;+0aadn2`-s{v@FLX-08_{9URE z)xpo&jE+Li3X*3k$`HwLjt{1OwIdB^yGK8Ia&`-Y5xF(ggbk` zu$G+KO7Kqa;mHS|^Uk!&Xm*UPT8160uvK5E3VP)tON!x_6MzQ=2kAp<6uuz1SFV%R zzFJ#5@Re3|9fn+&5ZZiIBRu)>{{S_bYkDzpBp=m%nbCg`Bc`XkZn7}Z_Bqg9FSL~- z!dIW49%IU|y>2H&@#wN2i~j%;{Ak^^**vCn?Ljt6eiXEoDI=AG+z($`wRDwqo2+sd z=f_rcIjNAbDQ-HEmhL3HK^u-U^uYWp=^X;0rJm}V9_!niuWO$YQ)Pc@Ml+0O1Xh7e zV{>^^5xCFUv zg)yg+3b0yvr54)=1aVO&m^a-^%_$`tcq$F7sALW@KhLEO=&o)PEg)Q!ztj=gP+!b* z>QmZ@hMPG_koOo>*HA}q#UY{;t^WZ0d8AcPTwtil!;1(!1YxA~=AgPE3`2@OndE&1 zWqP)SBiwv8_f?9W%_NQjm%C_fWS2hByE~1t-(s~S1NhVMX>W;vfuyQ!S$p0{x_DkC zsr4X6I7f0p1F8-S%GH#ku2%ziqO_sFoEkUYoXQ}s@^Y=;Lo1C{!_ z(~qUDa1wmSAm*YR@=8Rq@B${b+?~Ku!i!+?pejNsT+Ah~kl}6?M=lFfjx;csXXe23 zKHse~Sv9!8O9O59DJV*91-A-Me2#d|Gf|({?HHPSzT2l@x${vBx!b z(c$r48|tU@C=9ZUU2*0vqia-5a_Mn>rlALXIOtYCd~?air@e2{(#K6o3FL+MkHU>2 z802R5`X*kdwAt^Fqd68D5RVNg!Ui&Y$u*x(s!0^Qn=h^PT^5>V4UHTlPK;`sZ%rVWmlEdGt>WTh}}7*6N$|2y>Z6?4T>@fKRm5>nwXvPeZ)Ixw zTk#&9dpV~hA;e8V!kZ`VA7TA!hpI_u`S@R<{{Rd+&JA*i#Qy-r!B&S-)1f^i zhONzJ&fUG2?Dz0X{B6Yp2TXO8{hG+7c0m{ck3G8pc=c6oW}ta`PfuEAIP1%dh4=uH z11VC-QWSpjNhldlVc!`!s^+^^8yjKg-!@;iy(b+zn(D}HcY&W!I6qACi1et}+B7>>wbtc<%Kr}RWeXW815?aFD?(MJ8rlQN+;}V~= z0#u@r*!mHU!?i|-crv;-_s6x5AK6dUkLDNfNESYXc0X0CVrp6&x?HW{rfm}; zw6&$Py)fKQ&V?mIx^u>RaDM0#(&iTrzt_28FY8Cf)uJ3h(7Xc(kR~ph_ML%b;`l0V`a2rvShSAaHS8#)qf0Gyqn=pi^15xBeA+ziSki{>>g)4dF#e zQP1Fzd*l06)r|mQT%&7t*$yEP?va60>X9X5l&M=neR=QwsaEt#GLgy+x!97|k04xH zaw?VyZKUzrfR%qbkFTO+v76ydQFSFvGe}{=n2!uM3CCDqv^c_)Fnf2enEK04Y;r80 ztU8`a!EMdZo#oxJ-f6b}U=PfE>Kmu3Hb}8q0WRF(beEs8$C8p#rw{=Lzd8Q^n)(-| zYn^)%osQwzL$eTuQ2ycritX-CooqT`xyrCoUiUmLiyIBH!iXJABfDQ!qfayaupN{^|Sv|K2Pnjy4?j4HVO#Z0!^!72+sH>N!Jugz|ij5Wp1 zD!BOo0Q#(RpKM#*bUTL;<>WuiRnn#4u-OT;Ta+Z5s@7XeiXegpGmpZegGZul!U+~B zkEX6EQV|{H5|i@e)WcYCJ0lpuYaUj4lFWA&!OwUhOj-5%l}Y=m25?1iVm46vjS z1DWmpsrVRo2NXGB{0D`|zx4ywGc?tSn`~#DWpdV87(TuK077e4=xSp$`-!+2S|3Hm z*2s4YDwWZWnzgC)yVofzcqOKk;#RIez|Y}VZCg-x7hkHs3A!Sg+9;}j<09b`N!1Z- zUIm@k(zka6Y)^m0{{Tv_O-|`J7b*Jwq--wmT78qA2Td&(OMS0YPCJ?2ofCW`Fjhj$j^$YP-#HBP80Ngnz&>qBl)Sja-a|Y6|Uxmj~eQTLg-!0ciIswu< zta<)9WaVUbs$F#xq-EFJ-4&|Eq|9X}YU)mh={%S!L+eBSBaQRDK|trrJRi=hHQfsN z$b10CKsmo7KSh{-5&r-fwB1KamX1ORvHpc=%+Rkvo>AZyoIAc#<}=)S{uSEY3Rnni zgDd&8>UaYyrVYT{Y_w$QC84FhhEIk&pEQb&mWJMA-B+Qhb1)DDkGDdUwjM*f6`h#i zC{%ohXt%0%rNo=v7ug}H#5CzDPoN|8s7d5-eh>ta!kfC=l4G`%D-RU{eqZNB(MKzu z1=1SZo1qj~-tip@NIQtmPh;y>x)wMb@T6gf-zkfuZ69ZlprrzLWGD^=XqsUb!Y(#v zYxzt^lerMsg0egRTtdc>-YH7~C0wu9qo>tYN8(x_A85@Al znLJT7FBSnaG&uomYg8g9?5JOGmcxnz%RaOv6GN_0)a=|`e?$=Ua0)yirq+|5{2B_h zazZ4;dz>Tt1%bP&3f)3t%pY~c45XiN`SVoTlgSt*MNZd7ZjB?@CEJ32+Z{m}%3K_! zUyW62C3W_@37INGtz|l<*-E##gdpPw(24>|wuf282P}l-sN0EPHl(z$o;Oa zz-&T=($-Mz(lFSas959-6rWttlw$Ikw-;m*<6_6N~c$@WC( zLbxLYV77G)B#?;iE_*N8T_0c5=sk;bNIUHzMPEdx<`^7ozJA!Jm$GpNE&VRV$^;a!j+dQ&gYq9h_iq_JRqUpPo-7PV}_GV@7VZwv4 zY$)#BpF9o-9)yo0PwMhIswUOP$r<)MXWc_Ke+Zo5a*H*liMr|dr&VdS!(F|p3w74K zP_neBeEt?YjzIw9&Y?6-avF#HomKZ*!10mA(lr_vNIXdSAr5K#mAWIyxyPS2*Kttc z-{6qLm;pl_%1%M^^&C{ieI$;wE&#Xs5USPExWl{g5rR9fwI6|9Xr8P80H)7K=R>$V zE_JRH%{?W3iE=CJLUHOmGSSU`jC>2K`OcN61X;`lr|ehdN5;x;`0lc(aKL$MhtP|! zollFOh<>B=SEx5=F6tSuZH_5QZ3;hdfjDJH8dq&NjKZVcgoEFoI)-mhPRE8sIWs(zPl;mp4hS@4D z;DqCW{(DtYwq{D~oUGQp<<2Dw+#)Cvk_wVJ0GyhJl1{-0){BI_yTEL6*jP`P#%ZU& z%W!hI=t(vq086$u^Mz?|Jbc5w1oLr{o$vrR8L}BRSud@}P}8az`HyM^(e5~0@T@mq z?64-%rd(2ALK3n#8OsS7oqqzjBeIl9LuBks% z?@PD7^J3il?JN{JoD-AVBg;Ph>vhtg4HpYj(qq7N)nB$IO!=#bvh0u)RUOoYD}Z|t zIovx{fHWTJa!1=7`lrRUhS-RN%4T?RN#h#;Lb2R|=Sy>JyvGq>7pV6C0JZ>?%5lJz z;SKBn1Lc$3j%lZ`kV5T9^5GJ(r^B%LaWXTUtIO$F-&pkdeecT8HEKQ2Ce}+!#ex0u z*5JU~fsaAxD$!S&f#Oic0ye11BG$4Oot>uK|)HTgaWgkG41ci=|&oy z8}gR+xu9-MMx^1iAbsF)aC@9ku770nMS@!DTeDHp_VpgeBzH8X%0g-J=IJ47lHJc) z+1f%Ew7v;B?lVtEQsI<|TXAc0yT|bFr!1nx9}b}oB?IMN0YS&F<3dR2+;hq$(GWPo zg7}Ej_SWt4WWtjT*EuB~B61EtT8FHBR?C1)0bp;%mon37@uD!}hzdX%IQ0B$^J7E9 zNGg>rhbvZfh)RmKp*T~uIa5j>e~%;UR`!Ot;SVG~^rh5x5Saoit(fE;s`Ur^)9}Cn z$`QN_BUoehPQ|RP#*g;zQbo;a!YgQKunKkN%(%;N6)i*N8@*|onqwqsxg$2`DqBWG zal)ot1wgMJ{MKhITO}dL_@OQq34p6wW65c!5K^tgVE+J0o~HrL7Dv;?W3@?4(&gA; z_L7BD+5%Wj3pfw({B!I}_%$e?d~U(0H=ga5-P4Iww@VhfHM~ zIU?6dmL)Fdb~B~By-IU&KaOa)*c#g&$~R7@ZA9VH9!j#>n(3$~=!jPPb;an=+d(KT zs~O|Z(A0U9F!+vNLF&H@3$*W;f!VdeRkFj>y+x<*M%{UzA#4MHR&p^@0-dr);>5mx z>2HeF`aY&~?M>cG_g5RI9ayl!l*E@7)RFgyPam16YO2Pz?Kod=bU#<=QxPmK1go>A zz9eimxJtOO;;^)j;ZGPG(kV4IZrbh&vURO|hK zPpou^pAnpXBuj77Nx30@EM}Ase_zJ4rc-Y#6fqERg!KnV(6IK_L#-?mjC}1MBA;KS zMe2Gpn%ra}*zjAi$#~A3cGTmEz$LX{61M&ivG~f?)EgmAEHkVYC?Z-GzCbTUsTY#gy zKRU$$NLLGvjWrd_;1t$8Bq@J+4B*IeMf=M{@Z`jpCR;QJRaL7W34c2l|yyKy!(ouyd z`BNqC@PP_Ng`+l8_Lz#91@$4ls#ZWL^2e@zlr=kESaO+_%>@Tcv%N8`iAc_Q{{WgA zSMJ);!ak0#A!O6~Y`}Op@^+OPnbPp!JA_7|J3zvA3(0|RQW3$x!gGvOyHKn?0S)A; zvmQi%j4lQL0I91s-E|@cnYJ|vkjk8yFT-spFG)};Tg%!qTvAeiR;-)^jQLg_qw3G5 z)!i{B%^QDGzhu9MJsNFOqz}!K;UNLHB-jpexPzm*OZM$s5w|0>;VKA8LV?(O=a0kl zt*2Y-4-SlwRNqQ`9i@j>6*IIDIQ>^jIuzTQR@T({k`k!NW%#c&>Tto8}g6{~EHM~oW_YEcA+$>6l; z1IUbyKZ!nhm# zkPiL7Dv_KLKnAAteO5r?$t+8Kct7`1x|9;OKN3N5eLJtIZ}b} z>VuZY4K}A^46Ok6+!{iN?VQ)&U&8HE%>?mhXm}$1t_A-9d#}tt;{t`T>vYpKwk~FV z#~-rdkNW3ye^6U)@89f9tHVS4ln^&71p9eb@6{bX+Ltl*S(n3)idA|>_*&z&!Ex*0 z?QY*M0@*PoiH$k2(6QY4A46GpTInAu!sd@E`(NOP$BHI2n(RD!{{U6jNgW?@9LgI9oBavzvmHlei2FjyUzDMGJ#$QG%V`h`!xxZKSpp zw>igRGgG>C7UG$8F)knk39FFPhzM3tR7fd3laqm;Zh5VXr|1JLEUj8boIn<8s(Qu* zF#N+i5erC?|mSBh#d(O?o9T2ToPr4kVZWkjJtAYgy9BefVyKPXZy zh!>M=6d5N|IR^ZH_kDTPWL}`@GM7X?wy}*JlY+t*mbb+*(*$%@9f3O+I$FmEnvKm--&c zwslN7E=EcU7m<~AKK0hlEpVE7+(p9dH%hOFfv22O)R2{>DH!wOfs&=FR>Zj@I{kY? zmiYxQ5bCqQSG801>hKtLl!@GS@RCvO?kSMNX+cKL{-cLuw?I`De!@KViydV<#!ICA z(^kD4mOAA61)_XAb4^PxlXdf7a=n)wWG&S%VnPSa+v!>bH6h9MUs&qBLmOPs4-3nB zg(<;8(oeq?=TJMkCLXqHY*iFEsO!l@~Dd43THCxI9rEFe-bQ#qJpz6 zLHgnZTmACXEh|Y|q~rt0qV##qJTEeE7H{!yNT=4ddSRxVztK3fmrrgmivmZ9;qY1h;-)xvxlr3x2jUSj23WP9@!%ZQ{y_ez}SVl zPCc42E$M}&O9e$F6!}u7nTN}Jg%31jfrNv-IJmBa0FuxJNh8Q}{{R&Y zA#jMe@TBQ%S_gD}(}3LA=xJkiM?U`m=4rYo-L^t%05t7tBWB0o&yyR&$O=8hGfz$C zK8Q+ronTk;?MXu_Dne2~IODb|Qjh>|WK*h17AbS6COD?l%S2=a5jwLIRs_)36!x0=|3dTC+(};>H7sar=4_zjc3w zs6)CEj@ULg{ZdCw^zPfCQ7=&@z~qTgT>YUfqktpF4sAnO{{VWo5>0ng)#R>$kq7f( ze%C8((9?zXm*5*%+DLfAnmZbJnmrMw>a2vU8(r!=Udcw}$M zcL&gpNLoFj5toC;5M`@S+b$w%5?-}5V+hQcpD~6oTLbq||KO^l=Qq1?cjsqhYl9AY@U5!Tdeu)#~y{v7|ud(_d-%=SR z-G;lBbuU)kKd4F6aqVf2umavhPn3{(D@sTqJpTX>0OJ*m(N9qsFu3_|!sP9WMl?Cu z?0tXkq3jx4Q!x_h685V^{VOrj>{%lpXoyY$-i+=8zCheL^r_8J%rLfUV0Sm4brLo* zUkT?cvi|_-7pv9KxgQZ$g~Ywd^Ikh5BZAnn_PspT;7$TNm3f*vHkS*8u}yXR`w}CFSZxKsp2{Mmq5$M4K1HCd zC0LM!4TwsI1GjOG)h5@D!zuWvL-|p1tc*S4ZD}b{{v~?SrjQYkkA{V<;2~#h&!R&7 zL@8-V!b+2lJ?VfP(~cB_XaUGW_lC`$D})q%qdchgPngAA^eQoX`l6!I_Z#+ApQsv% znGCj)@oDzI_c`T2_E7c5rEOY8HJUdCWul-g94e`*wH#>bd`p}5E!>AQlmqMZK9yx; zjRPS|H_=t?U9M7;yoTe2D{0A>6e7R0Fi2}Vta=1#9J{#{QGiW7WYTAwb zZzP z4U&`dWA9P=7ODvaN(jI=2(rvpEQYT7EpjdHq&7m52LPW=)D+sClV!Ii%e$*6s+XX|4^j6{yn}H>~!v9rd_2qvarW_WdfqH&KgNl()BSfZ-+ei|f>r zpN#s{8Ur8`+#gz^YP!*okPZ_PgNvwJ9Ob_ig@t8f1Y@4n@{=6qf74yUrpPx$8iXqa9SE6C(Ha5bm{1P zYPK-($@?us;qx6EjKi?~`!AW7rMTh7CH^#2)lTYIA87po z>^q$~x7#Acjc;vH8gB?jPEtpq{`JY?z}mk#HEP$sQup^n*4;A5i}N&7gMvWq?mt={ zAREpK;0(FIA{ex>Hq*wbprLl*Wi~>wQ-O~pvUvBw!Z|m zRBE9jFyO1z{^}Czt*y`;OTGQYQ|mP=l2^3Kk2RFWpPHAPZ3Lxz@Hz7KrBF-1A!t+* zV$M#uHpbKQr7H;8j1ND0w$L<-ZY!FDng$lF+QOFtaLxfZ?VMGrnnr>>(AsDQLhCH{ z%@$PRo^hNHBkNW=DFHTWvAK=C+!pu!AR1T!2ujDVolFOSn=mw+*?xY-!`n8LoumRg zpIY;!#n6&48w1Mrs}Mq)4Wv3xCqABdp=u@ao2Qu-y{x99+nQ*2No^^}s{M4kg@Hgk z-9~Cg+Jek>)Y5XK@sB#hI*zQzsW#bFTEl6;N*z=jVl!p5sSXwVr##V8${rDJExu5_ zv3C~C1p`WOm?K~pk01eBE-FTelk?V(A^&EL6 z)uGn|X;9#}ge9N=J%Dj%>#){s)EZcw%?=kXckSQyU%Fqyy=#v}(Sj`2u%D>8{-_UJ z>j`=?OOHxXxmcyZMXpOOawP3Z+~)816_pTus=K1VW@&_Ta!0j}7WgTP<+}}>svYO4 zQLL9It@hRh_f9sK8)?UhOUihqK`!n&`9hBUhrZ%G>rbQ51IG6qujd?oiPu$47~K8| zOQ{mvBGl{$*(VBRSsOq*xZR#Re>#(74ZGVQ_fnJUZ+Drex7jkZ{r1&!x81GR)w0tL z5SCU(P}{l6LWiLQjQJ7oRm%M~NZ#2X2IKb$P1bgnxY&;2MKh+g4w=0{xJP-1qd2eK zK7+XrgZwKVO7bJ=?@vjoQq{od+z2C)+!RG+vXFT~;8&J?1*Laq=0~odby_IUNq_Bv|@*LlrD>c)Opnh-K9j!2GezkP6C9s5R{K3jt*#sKWMN{v6nvTK1-QNAOXib z(NWApY!U2=!D&RiCAWfG;j_R3yxPJD0XSF`^KGVDG73?g_8f|vhCu6GDZDL_4VIWv zS#1e%L+=rj{{V_PkB=vmeLN(bp~tPOQ>Su?z&RxI+qeAFLUKq7Vw#K$s^QfuvoWK~ zLY!92XK%Rj{VP!ER)-rEWu|}ua=Fi|wR6wTmdYOrX|xq3O7c1Tsve&8s*0_Rvr!Td z(5wEfQ}Wp;W!D;MNF{Bn=Eg^HpD)&#hOoKVC{bG7z$)DCmzeC;CC8&$;w+S<*Opa* z=62^8^ffsFCAVoRMZ_G10ega*L$VPHIxa4!C2p-#hwr!9eLqRB3FszQ?ljm86y`z`RU1j!PMRhF{nlPo5jqMit`=OY6 zuHScgK)lnbBRNkvC+S(nupI%o9DA>Z{wL72^-gp{H!Cv4a^s|6{JasyNBC-~{{Ylw zw{6xRBP{#be?_>*+}lr*pn?eCXNndLWzribp>{Hi;W*T(3^-DiHW9(d#b}*BIdhtO zcT*9x+V)1!t(#KW2uTW01P?6MuS+wC+|;ZDO4X<>wHySr0mrchjFzC1y}T{gtdaaS z%2R-(^PF>4s=Y*la+zgDY(f-PnTV+?bHGXzRr0I*nuL>72K%^EQ8)p@%_~kg%S0>y zuK3Ri9<>8kE%MZtKbxg=s|#qD@lgb<5Pn_-Rca=)(C%&Q;E_67YT1m~DGO6zqyyPU zK6Ud{nxu_&nl5`R#r??SBY1B$He&|h(0C|i&E7+|mui^PsrQ}Ia+!^XetA-Qqewe0 zA-QS77j#)sax{yN8e4Z#NTy5q# z2}R)Lt2NT!A=epoqOIvyBOKuQes$_+oSxx%rzqP`TZ0i4rvM4b!cuw1e4oyx9Z=U( zmb;bbNCzcpM}mK8NN54M!d3XzRYvBvvCX<_CvLeR6We9EYi?0Wb`&S(_c-*e8j^nv zwh2sNE?+vI>W$P_wJ%bwI&Bh*k>Vh@pbrIIzrAbU4kB~+)H~?5zMge8gs&>iWLlHi6s}{D-FRjH0OWDK z!td6iyFy_pSn%8m894qSkNKzxYMMpbSI-)iaO{GA>Y&?UY$)(?!Qc<2L`hgl+=Qmp z4$C18SEyTlGCUUox8R>{d7x`GhL0#hR#ZAbq;yTSSBP)6ON#|0W8S-^*IW=(rkkZ2 zpkoOk)tvz{(k4xerM9oU0puu;Ecs@l>uJlKgsQHDselakFxeYR@D*@!M1Sobj zIj6KSw;hsn`nv{);H!&iX+8{a7L_%RUq7Wz(Nws*%!Add{NriDB6Ya`0J}V3WBJvy z!3Ckg9i>$Hu6sx)OZ&~Yg6)(5sd>cyr>XM(QJ0!VHadG>~brQQO9N$gY3{RqaF$jNL=~xQ zuE|qv1cH>Dk54L?(rI_)$rUwreUA%vHk58WD6$mUD%;=J@Tr#5hQ7*npk9;{&5_2R zv9|eb4kVDSa6h$5In5XWRCM8KjnOQp7a$xWv&!;Ko`#n1}SAOwIK=$3s-z+ zH4|3mhi>&!EN^ARUyMB>{Jm+_xvZcy5iA*sbdr@iqcPR2l6-<1Joca|9341wv62Ve zFWmnCfOTF6L{bZ6M$`MM(bns3C{~%G97Ld$t;zo8;etJGk`caQb_zl zsnbQ*3@Nu%g`~OQV+0)h+Y z%)`HA*XcJ=bvhS$)0g&MygHSKP5n#D0oCDM)70r8HH!w;_gF zM%NR`7{+VlzP_g)F?BCbLR@g0_RCWZp&>yfV;*1UrDuhqty58H%x+fH%XFUrm3`yi z>GYr`Wyhi(t4rKCLkf#2hQg3B=~2ELn~{>M(`keoEDL?)rF)ig0p$7l{&c9`PbxXo zc5+_cFYZQ=v?0`n$8nsRYsYXrCtXiquv@Q8IG|Fm5k*+Y?Vs?|?T`>i8BGY!0WvMq zyfmfEl_Y?8{#7MR`wXXSH!b6JFDR_uKc3hX%l0FTDCzMQMExK~J}94<`UFY=MEZV(-g z=xu3M4tNJXo@qQBc~*m0T<;Hc?t;~yXm>Z8qY#E9Ta=K7k{wU($QzP9j(b#fHljnU zs&A_;b<&Se+3pYvqC7UzmGZ10F9X-`tK=0RMjUdj6uOH`M)I`T(a-8Q45Cbu65>Ke zTKn1K^r2r@bNsQ&W?e*=marntH_;m%h2=Q&QRK{C3f6v`KzDg(nLN;3#?n<=?rYri>O4)MQ+$si|1j8iI4FRl-q@6hgN- zkUtT{SE^$Ho1MzF^z;(cRW=qlgzxB$`r{Vpog}9iki%Ho?oLzetoN)Bt#0gqw;sRv zTTKR!^Kgs1y1gz~{PEW*iz*z#fK8%ril?QQn@Pzn zkK!I*g-YqMkU%M#Iuh?Q(*sRMmdk-)2H>eXRk$CN4)`^%bhSO42vYK}F`iJLHnpWp zw&5F^NC^q^3HnyAPQiVlM(sAbDq$@tqJ;$DoF8s0)p6OjT=94+Mo?*}=w-zA;W<6) zJ=EmdNhu6<&D1q@K_Qg|8WscI0Yuwz0g5d9j77E*Y1IxWPC0m=2lIcn1 z2VAYJxQ`QZ5*7{;e2qp--QIsy#>hpALfPy#x%T8bw6hvv8B2q){{ZAX>kiZOhe$4e z7iF6~a9W~eM3rKez?#5QXXh&&l!4nH+NYVNx}1RiREC`n^X3W1r=GW6t?m>lZDA*F z4?m4tblRZdae}N*r`Q5h>K!_WHwP!jR@JZOD+BMJQ}63hy0a|no(iwm(s)Fco>!|J zU$v{ll7N*cgS*hs+GJ(Mz$o@Oj!G_e-k7b*Lr$Z^7|wjMJN}d=svQfRj3z1_aBbN^ z*jY@wrVtLoouHHFp7oq*XMJgLU8IvXDB9y_*=V>fLw~Ljl>_1``xZ$#`!i0CG53iz^hZWeh$$`VwtX5)(lwr!RCcYZNq(ec;=Gi` z+}UqLgD!KWm{FdT?h1+!l1>MH1$ixzINrkcS{#G#UQI37l9wG|f=KkHCWJUCJvUHv z54c@S{{T+DwYQ2xS?x1pg5|brfwU_rN&f(8`gu{*l$diQ z?xCAd7}W)Fv|b=iOH1+BZCNU9!~&q7V^CFkbK@!2Ok{9qR_mn}hFG=<3QI~_k`Eqc zlS>Pj-1lEgd^FG-Fa)h`sYSEOWA=KMz*)+dlgRd~RShSIau?8A&qzjTh})GeD^D4 zULJYJ5*9)SV_Gc@KznQt2)#C&B?_F8%mTK z^uei(9ugXEs&y5@M~iZ#FHJEP(%KeCT0tGL<@~9-$#y#ZmCky>@@Pv<-&=0swX&V4 z!1~ZsTAO5(Z)I1dao@Q#6>^Csr@$!jP)^h8J?d$(2e_W-y6U6ZC~ne$bh4mH$<92D zK-Dw}cKcmdW2rVsZEto}q9ln5b7p-{J?foY(%sJCIpN*cOU)~-+qle($XZm|Nhb&H z)D1;WLrRgQZ;K~}IaK$q-?Mckv#Qsb-VATIX)wzz1aL@k#+2b#pz-tWNvL?WzYZuv zi4q52>~Oz%{uK1Hsk9g=hv5;l5`ArT%I$2K2E%2sPi{1ap0l%bK}w21v)gJfJ?P^)|uVmrtoNd)|-1YrFL{#5k!+vz+<@;~3`ikD1ua^08) z#aSpFAdb6S{Xae&RLE_`x#;hb$0NFUBp*fAe_hrRvR2B@%5n+_C9|E~EA>}JdWF(v zZF@=1k%ON@LTs~? zB}SL%kQ%d1Nly1#4iXPLkAKVQO+J_?x#>tg=vz-kkqT}ke27sP{{ZFwaYDySwA?89 zV`#xnAt@1jH03Ey+=5O&3OKge2qMr0*Yu0C3uVvV;bSE@^QCXNJFuhQQMG2Z-QuLJ zZ-cQ(AQCu9#U>WSAA2QJRuJr!TDr3hxsc|gdstrWpdU=}kH)oa5!NGb3eihmxi79X zoze!igFTkZfI6hCpF`$7M-_aQVriVxh^?<<;B!wX$4iD3iMGj6(77P^$U?Z?{@^`F zWBJueA!4&dZ9shnFWcqI(RYa9Xy!Sn*O8cje3V+u+MNC5Uly*y!-?9VRg z6$@u~h|yXP2N}p7K=!Gf8l1}S6!5G%O(%&Vmgxi4zNcK`w)}WZ%!+{K#5#eo4j=ZC zPbv5It6&yaCW{NXS8IP-u7*R=?$R5UkWTUr{QH5x&yl8`<`!NU$wpTGuEMy+F`8(2 zcsrU(f`~tdl|$;&HISEgT0g@Yb2g^f!R0u<8?I0t*Uo*h#}b9`j~lvG+XvT!ST3g{ z9P_+d>+KG^&|d8t$0pycmvF51bWgNIX~x=0yflRENav4As@5@=!u-4I4K1yxBo5^U zOwnySFemK$KvvPm-v0oKG^Mf$ z><_8Ool&*8uF`v^A&dfqST1;pQr%LJPIxDZ&8ow%a;5-o%dhK<=A#0nAfz5Q9(Nz9 z?ewUsnL`IEGN1#V5FPIHr39AB5aMvZl<+*cpj}j5%8v2g1&Xd(XHWz)5;UnOK}g4c zKOt4E3JGFu!2~S!No?Q~HF2lhV=Z0?xHPkp2k?)ll@VSfu{?!m(Q4yAh%a`Ae`h}> zExhAtDciMN5%_ue)NLH)t_0%hYf!6<7hQf`<)zfJ+hfKgaz|wQXV37dNoVn+@Y|9V zep&amW70b%x8OPx;sxO(jANhWMQC)VaR=2!{w9keeMcSms!`YimH-&+vFFV>)f5G+ zC`X&P4^%g#rKLB9kap*8Gm64A>AWi9xlJ|Sl+V`gMsCijXerpV0JIM*4&T6+z5+_w;w ztP%o7YIU_YHdFIxWvwm;g%&th9<*A39N`#f%~vl(YFl=ph{IBr!t$n^8B3@=(tfmz zkZg$$F+BUGcUi0&a(C?Br08Zt8gA>%nB)Hd*-HK-`vPP%G|_{0zI}h+S-!R zdwioCi1ngmpxFB!7uNj|O3IpHJuj8xkdiuV#djeD&%}r+DttWU_ z#9s1Y{l0fhHkJgsQO`1WXti~ zZ5jL7eL1RS7m?dZS-gPwjd9^@ z^$O}*wBFh^`YQQi)Q+)oaFdj&!{^_crRbW6^8x%-o>uqVhZ4I3@DWsy}5H09%0F^Y$ zVJaC`I00EsJ)H*rU8-KG`FmdV0Ej)a<8c zCn|qXiI0VjCnpNdYiTNxBTZ`kJ-Qo+WvI^}junQ+!cV#g8TCP`Cx+3w83qpt9XlgK zolP&<8tH9YKIPN>Q>WvI7YF7ru$ZemM1U5p`7OxGXQ?QOnU5m& z$JJ9SzK581T{N>IDj$xeISGGv#2+fcNANf?_k8<~LC&*Bs)E5CR(6~pZr4>h$OIE_ zxd?ke%+<|Z5e*lmMQrV7W9KMWKQ8R~XB^Yoo z;cx;Dx3|nT`-30<02QIa7teArf_;TkDy5Q@rHso6xIaZ(jy4xKUhH_k-CmtD)cDcv z5^QEe3uO`5kC1*LHJ@{<%o|rk>R*>%NiI z_F*Lv9DK%7k~?k%BRKNU6|HJ;xJVw}%RiB!&2*jb>b{{q8V$zjqHWC2g|XB!lqe{3 zk=)jA*4k;70&SIL(`tccIaY`NF0k$7~AO48g%hNZ8|Gt+12m z02!u4g84);L7!wdF^2XmG|*ael5^kbif!Z)gQ;^*)eX+HH!-D0Z6T!;nN;EieaFGgdM70jVc50Ap^!4Bp%;E{OZ%BRqgWK82Yj~;ZiMaXftndWWivh z?_PnMy4r^C4;U$k z+IHKNA8v}~tkg|FdMJJo`Q1`UBqu$EX<8d)sUR~T@}wxDeBd@bDPE~+SXwH`{k{YC z_{}97Q=T_tp2sAb)ahVsObuvSPM1a@U;%Q|s~uQwM2SgyT#1gjNkaE+^Rw3P?T4<8M9a8kl@m z7L`5I8oTJLSvxxrx`WV;pRVDl;!b6WD~__2I1h9Jcp+Z774!c9Sn2MKzY~vc_T86)i|l%Di%sj&oGG!=h=wb@`FivbFO${_8cu`wfJd?Ur5d zg#CY7MHNuK=BhY$&`IGf7UMS?n{TBU+D1-2e1&Ox9Z4<1kC9fjC%hM0G1q4sTC8OK^%%gVq65&R~qUpX8jX~9|LOFY|? z*G=_4+RHu@Q7+1LsM*&GUyo?{vq|J^m^yPb`+*m2M;$Ze@eU%l&SQOwB!;;@Q-@D zXp^+RY2|V4+vXg1Ol#}Ry%_}sULueP9rN|64y=HPceEy42ywDZ#gO&8Q6q9nP*seN zUwZjsq9?YvmWwV!A#1Ktwxzm6k!HCG1;EilfKWel;~>?~NYntR404y!BXp7ra=uZ% zFy$&=T(v7IQr>PlwjoMA$9np=rDJfT4n5b^ohe7Qm7&Abs(&s@)PF z?u6AVoLF1jl@8zD8T+~Bi+2jS9k&at@{jM1)5ZoV5dIWZeY`_t*wEK_vE#_n_oZ*J z+(2!>PYk*IE)Pl-uDi;dbqM)(-;@h`({rp>iI$1TQXFkxBC*2L4Moz@IOB%(o74qGIJ{2eEgfNMa?`%-jQ;N=JLR{_6mz3<4hnOtjV7Z2k|CFW zRF2dAY6kBjJ;-2(Hf=sb$bB(Vuxl-ifJzt4Ul2M}(8syBw<^H36Hn@!YGy@KQRFE* zw<8#@RB4p8tR;rUV9l{4j#6$Vs#Uj9JeP*%l&%$@>*qu1x-+SrW5Sezd;ps_U=(?;igIF|SOv9^lb^aV{p$v* zlWfAvDb->O4IxWkFGBZgWCwSYkTaa-vmU0D&f?Ar@T&4{vQtAsRVlE%1+)L$ztfdOcB=9N= zPzFbn**_B;_IXu6*Ha`*5}~O@M4Y65yC$e|)E^UTyC)kl7imSk9O?Y2H#>VqC@n~r z6{RF6yJ~qJAo^|HM^rG7*f;YzDy`T$vkUy&D zo}lT>D{LF)ps>!`&zwuvYRhp8i(lxP2 z+<|lH^gz(`G#f3aSlA`W@ik)A9W5)IlA-}VSP2bhKi-;~R*~)xaI;#cWxL-S;vPbC z^ha^}Z(+1(JG`5~B5aEs_{(h=0a6y#tHN^bd7K#*Ut(rj=AwU)Xw0q>s5nL+Iod_VH3=l^BQhjFK~Ca zz)FXqzz6fL>JvdN1Z^#`X}9X5<&~{trtZ#1vbi?yfa?45Z*#4WN>Y?kQnA=6AMHN1 z`VsJFN;G;zF^)Ec_^;RcteTfnR#x8y=HF~Cj(jb2!bRfgY_aN?0o$`l1-A+_;$cAN zK1#v;D^xSIf*z|>PbML>V43k}lZ z8+f5ir79mT?0x?L&c0#libEAiD$@}pp;x7>t<{yOBzD{UvsI2tmxK#tM@N|B;lki8 zZRZ}p=dCj7Ky`|ao=co8#(Z?FAOr*)eA)cPG$paPDTcez0P3;|O5>2VdE|E$*3{hC zq3SAw0!rSk?!KoaAqrCY2g|pmIC9)7-CVofxhk)$FTrsoC6Y-To^mJ&oA*%CHtoN< zR!ffgl3T;0oMax~jS6Zui#TBeQudbJ+^lX}WyVv5#-1%>DauYW^`dY{Cxj#CZqQnQ zF78|(g&~OTEWMAAk1TrqDdrnW8W8eM))Cz=zhyZkvl??BzQ}P7r+}mAe_FZGS4hT| zNx?x$en)j%x~HZk++B7AWT-5XIsX2Xx>35#86`VSDGnAKty?n=XiYmV;O)Jp6bA1e zLiQCT4#Pa-ocq_%-5;iS6vKZdY27=gh}pZjK$?m*re>d!KIw0Pb-6mhUySkskj6%M zN%F^YR=~k*@T}1^ekT>gez>|n>UqeQEwLCoz>d(ux#1xB4k#I2VgnQMyLN44vxPq8 za*y_VV@`ND!-Z)H@duuK={xU^Q(@kLSYDY~-?L7OA-L?zKq16{l0xy^dkP>$?zp>k z&^0$JNyNItPljG|dc%KpBhx3(-m?C;(!_v}w3^R|Q#(u)gy03x{Vz1G+Q5Cs6ruur zQ~X08p{z!-G6u(H%K)yM#BvqB%5hSJc%?xbmG_n2qG61+)(QqtGi5zuilsECfLc;X z11Hbx=~|~xw1$FtPSVr{n^{e?m4~FpQh`E&Nb=pE=~^W-NbH)C{%y-vA_CZx8P@_- zfPDM@S^H2e^5pDho@GlZNFBXBt2)-Jwvq~us03svD0jt#AT+G3rwIw< zgY8vW*j_Fbef2}`v8Pcn7w8k%eg*;~qC zC43C`%>hrOl5Pf2Fc2_o6R%A573W)t9l0v;G82Uyk}BP#Q#pXPMEuXN1*YLyT{(UW zj|HhDIP!*1Jd7VPNw1^uEZ}ZR5$(5{$(IHH029TK%C#{t7@i<8l(-5(@tS zl{nUx60LJj9YY#G%4u|Z^CL^@`&H62>t#rEWl7`ksEujB##OVVw06yS-@^LR_%8T} z5-k?AN|?76w57G6D;ccb7=b%GD})~n*vZ6>7`n54ZR?W}Ah^Yy&$KkG;O^x|(-l|N zCOftLAz8^j)QuKfHQCC*el=HYEhHSYzT|e`{{VWa()iFw3}*K1zD8|0X&|rL zCcJ9vHSp-#q^#r~N>8n9npJFd4I9Q&`!3DHvTe+|qEtwW{{Y-b2eJIB!013AqGEH2 z!cR1op49g%phzf=gOGhl^Qj1)TFV78_mLR|Yw7^0c4F8{0+hnkFg);0E~(ZA{m?YU z&2cM~{dBfD5%_a}Q*oXuP{Ho5NA;~2rRLukhukd#MSG9pHnJ1Ywx|~fEiL6KiReLC zIr&^5_VzVpb1`*&qXe9HSKp%3V?ueD(l)lY8vt%rNh8R5)_vDcXr?=l9gy8!7&PBR z?bOeWITCcn(uUUI3m6y!iusd5aGs>yV6jtp^9K4An?YYue6z|W*49TLo&lh`hemJA zS%W1RtmvBv1CqBOThfWnOk;A6{i;a>aar5CW$?MuZB^+g<%n2>^*8DmYAV1CNvyW^fISdJHj zpJP>-C?4tD+bH+nb;`G$9E`*{vxG)idg}(-^yW~t?Ey$BB&hmhcjr?TH69&427MRP zKf<1nDmt<`96YRPx%BL*WZN;-{W908wOpf5zFa0hYjy&1m!h!Z!;VNe>=N3DCkKKs zGgye%cxTWjz|vPP5A4MT0_8IhE{ zmr#_cK`G@y1YjH=m=5&?EUk&I0D?(YtuA9(1Gn5dOOcK&tM6A>ho!p98cfBoU%a*y z^9>TA87`$nCnvtpq0gBl@knGoBnID?D%aDnx=Lp^g5UJ5OT?F6>HPatw1(~qbg7k{ z*i%b%A#2^`fRqk>s5dw^Q0rrrB}p3{P#n{a=q?V1jd|5K9V=&+BNn7X*rX^VIH!^c z&z?T=e+rhOmF5=6P0Ox7MIWk7;SKUP%1;SnuWTBpNa|Q<4;dF!9q29LNS7H4+EBl7 zjl({No+@WcmKQPJFCVx*qxvZNKQv^w3@C$1^%>1f-P=jtH1x)i8*&on?cz8*Ew$$d zW(Xrafg+gKv{tc*d@LV+Q%T4onOkQ zBJS**@}L#^6I|CMXNlsl4X+%3`Ct30jcrVBhsR*cfor|6a=x^F9{wLR>$Yf?<-}?- zgK3ntI5MXc?;#&S>tCi_AtPz%<7-8((T}iSiS?AvhftJ*d%54#t5;^|9kWbcmm39DGq${&YukuFLYTZ>_1k@WQZYn1Vq9k)?a>LI>nsoR2^ zF}5THtf#O)=jT&m9fO5gtF8_{VwE>}U%GjFfL?HZwdS0t_^NN+2%CTH5|y}tu%1Q% z$rQIz2WHm`l@(x%Cm%(A8!la9)sd}@NRME%hg0^c&wbP8>B0My5f5K*bVf-~|o`c`*HZ3uEmvW5sM7Xl@flyKNAKIk#npV@0BR9B1^)8Ug zH-^S<>vhi?oJMVmmYF|hNG+T==>r^l)J+UtEATR);t!}GGbAeL#QS{7(i@2Ibfs!a zwvGt$=j)m}v&N@&-NKaAV0!?#<$izDwyDlYy4)U2Hkwdu3~nVrWbx_6T>czU3aPwP zz((}F1Llw)MOgZqcCfv0R4ZH1Zi_CES19F4PyYZ9FRgtZEY|G;>t>0TV{h4Wlds)C z)tYm~vAFD5X>lY1gPq`?yMf1Q9cbCgXU#sWBc_;D)kPQ=a^e z4JDAd{$}7LyKy&9EeX@BU8#i2bq$z^A*o4uKfFLZ1N1($OidThE3V+CqAjepmWu#2 zZ(u0w@b(fjz)AP-?_P0eJ68*Et+zq?&dgigzei3bfZ!e*NFh5?IP5(9s)5$vwjIsk zM(PuY+4Ws+i8*Vt#D8uOw!*NLV;{--A6?5Pzqw&B;vS`i|a)z|zQCYkKnValhy!sz8qL17K4%XD>- zj1rDV<56|=(Gz{BxXvzxbtKe94z`pS&LxDX;oWfxi-3wjypQRX^ClS{{RM39b2vEu7^mf$@ff z@0dR7@#&j%T~DOe)B6GtpAZERl5zh4H8DY>GOm>W0J%@GrQ`=8Bp8i2|iV< zYEcgzrMxe!w0fY;HcwC$I+bsDzK0z_L2#ePnv(I%n~3bNO3fAGR=QDb%fNyoNLwpO zRTa zV?pXp6;%71$W?B)>Dg!@mN)jX1#5gtbZ$>eI;D5D!f2+;DZ?rm>^3Wsk@O~q(Y1jr zMUs9d;HNr4kFKR;kn%7>RoWxYis}@mUI8ixa2W!foU%*;wv8H+rknA!*e=DGD{0eu$*z`f4wsmD_-60Uof@h&n@8r*rG~} ze2_|uVM-YUaB5#okg?Y8q+=HHr@UvJOlMM+ETIST$gL)pa^~$RxMJAr1*>a{3paz3 zr(DK;%@6_mDP}VsWrFgbvVO*jv>Yw9Aw%4x)6=ta9e&7Rf>%3ww{A|V>2=lUi3Z^b z_mrG}+M~zmj8&UUt@&Dp-vPiYL^YdBT;AbP9RaT;i9#xat*K51f$iMm1KzZHr?d;V zg`sILc3tecPi9gq@oX2g74I&1V<*cQ&*xu0b+=M6?{pyLMD;$f9>Tz!T}r$^aMEzz zWeRo2SOd8O(y|VZ#aqEy_e@g!>+DEl%`hP|VDC5P= zmW3xyhce({8t5Ba7s-#$btqIe{$%n9aevSb_^QF_CPz8kFcn);(&L5R8&gL{#&*WO zqUtA|2x8{S^vp05j3mc#*A9NZ5Y1+tK=`GeXyW{X`meM90E5j2^3+!W#?U^-Jw`o+<{7 z>@Pkr!TptzvbS!Y*1~M|So@r1R*#B}D+QbWqt!Mz(QX!44~CkX9D>{Ly1$iesCR4x zsX6ROu9zvz_9EH>H6#=t(`T*@^%lpfBSr=!{jg+iCkb^fWhGviIOe6yf+!3RJR6Up zMRg@ObOt}nGy18`X>oGh@eyLTUG2?1W_{(ku{?z`;?$=QJ@Des)KaM7WVEl89D**Q zX({&UWR7k96phbH{{Yi{baX+tvn1W3IUKTZ1F{+l8T8AIq=Ekc$v(B|>Sc8xkGl>% z!RPcUDwD&Wh>4C`Z;nC&(ob47MW0&qYgJ2b)u~_?p0{}TN@bv7X$eDL-CrPpd$Fwpm7?yok8nG)}Ca$ew8_UxdJ z$w|+D%+;nzF5pV4O&Ms&;W#d{AtKz}A=!6I3M+70rN7mJex$C^3YyC5Dps0Y@tKWn*F_tq9lZ;K+W9czB=A*M>u z%qD<#OUFA|Ydn+h=sfC9DxI<5tB*}+)9SS?ri;HP)72(^DLykgh0s`*OGiqyROL9_ zfa_ZUFS^=DCD(kCJiz?@C>Fy#Oofc{-@5x-M(J7&GL5Vax&Huio*iZQxQ*3rmS|3@ zYLd)k3qm|)ZUstkCn^Wuy(!f76Uf-z*mAyK__3xLwHC4#*`<84{8fLcrNZIVhP2L? zGQa87PHHr%t(+yM_k^gApd|hkrf5Ap=KlaO!w4-Kr<)52vhBA|wN{tZJsz{GLTSi1 zw}f=9`K_aQJZCCbzpY#Pi>Ij{M`*w_ZCY1btp=N9zq(g<8MWPwF=$F%|OlSR*`qRDq58xJ|PWy zC0@TuzG<~vt_i?GQjg3O0xk%ZZFRz+;?(9i^XJN~)Xr&WAxy_Nmn&9H=tfJ3Ne2T3 zC!T(uRLyjuj5i4r$+UtHqV1;c+?SnqX9R@y8LX#NqI871{3xm3Uh57L^4}?T3#*L> z&_Gzo2N?eVim1t3EKl}T*_*Z5O<4NH6T>YSO}iZ9cg^z;Y|$rP-a}deU0`##OU->H zw_->Q2b)?)%eh>Re2p<*Yg}P=tXrI_4%`HNx!IAWH0xsok;c!hQRsB38o|6P%l;YH z$8s_kyN%kxX??eD7a$o}`2q&)0rc~$WgQoo0xqj{JtwF_NesQwo|K!UFUni7Usysw zDjsyD{*ZVDZg^0NLT zaG6%UG>A?m_u?z}fT%6Re5n-FbVas#1rEw)lXnFZe~imHSO`cQ4=j7qwMw0>;Und@ z8#G;diPf~}t3sP{Q;j7pl1Myb-mg6cRVjAIEkC6C7Fix8b4ulhU-i?P-t~2wg{4fm zREE+{3Q^{x^{swL{$0S@#ILma7p9p_Nwkdeex8e%I-XQ#W?W;kyfU}80qQ*QR)@p< zwv)}0MN2onCL1>CU8-OqEk_Yj;c>hxgyR7Aq-bEZ<ywI+raPk{-IjM}w_(ydg|xtpQ|Z(Vz(!KmrKIt9TNKcea%#m`L@ zuKHc3b=A4A7UK>(PpLz?+bQH^RWgRWLtNl^a``{uF0g3zhiT6^Cm&T?I!n^)Ub547 zRZ5H!`eP|gtssI3^5gKM>+6g}>^_U_ip@=oF|tfu6LkLoPRrBp__6mWXRrq}6xALv zv$YP~XkMgTGTRMdey=K%n z%JF3v5|gy$N+n%~ziujm%A-@NmxFP9COQ*JbyYbs{uZq6>KKhM%&BR_wl<;Rler*( zdH3X1hf}C*ZY**KNYt@46VC+N$i~NaieUK86}ZMQGNbb!WBAj0ReN0Rx5}W&7RPy! zZK=Cl$tis@AKl!7tby-ZH5Df78=~WPmfBN|2qvp+t~K{AjICKRjm}8f#56P!F~AJavD$W5S_#n_sIkB?OzpFRmnYk zuO|ZwJufv9lgLe8ptRdsas*Q98*MG82`j+$$MY2-(lQSf&$=t7e6w|}7xzRgC&i$@#ehhU8Y(Ah?1_39Weigh_ zR|eDD*;;k|Wv`^#4s4wrV|&1e?0xD|8ctP%ws1?#SFL0+@wP)EsYC9itQkiRM z;u5rDk%9tVm;2vw*+Uuc#>XMpHUxFz#*be@pXp38!>yy<>Ij4T$l|$5=RLbN`GT-lR*34GQV)>!AK6tM z1=Gva2PNs3?XAg-?36-Oh{?~0q@BxIQ0;(|k?mF5s*>lCY?Y+c*24C(aP9@{c~THG zTJ#2>vRm1VS#jAB=9CoVD3_F?k&hyV`*YaQu%8mqO`IWhEkw@~s$)19{j>Y2jcw9} z>W-`F$jmsV&1TgyyvTc!n{Y05 zk0o1+WA60dr|Xp=gv!3WkmJofepWx)RtKjXd76@i@`nh?%KOLQ=RmfDPHja%{$!aq z>zR(0&YqiT!X25$npg3#x3R+m*C_ibswS~E0S zbw|Snqv>yirm{r;jqF&%$nO0(}KI}@VnKHob=nER{oZ0 zdwQcj#75dG02fq1!QP|JNzZ!q6X{ybq0PpmvnBwrk*e`T#J4*mJ%AgjT#=3i0qR6Wi$<%|Eymf5YljntY#U>Eo`;eC_ z>@EeYvUc#`%UGfAe7R4N)#6Fd{fO`yic~S2zwm3|~J@P>b8KZA-;$J0OuTk#y z32J0)aG_wyNaT&jf^ccJxqyIADft6Mh&NUNSI1)5g#8^BWya8?Cwr1mO4?A)eeq7o zC}<@J*MjY&AzeYEE!QnLiIQ)2Ng`8$Qd@LyQ#tOEPu=2?_?veG^IlEMg@Hr8Kh$Ll zl&jO$zEJ8)!rGG?#ufoo?XwpSZmgPa$+l^Rd^K1r~JgK@}NHLpQgA!cas`(ys zvCYn0g?NrU7E%;F{Ho8URymLpe3d0gZJXT=CHWz?)YHX=%7TwLJG)~ZLap?bZ;^ln zI0-Em!9)-dCTMZJ**qw%S;^a{{S*87V39~NOlzusJN4qQKc?28Ho|e2~&tT4iWY5 z{OXgZYA7Reaf=!l$$Hsr2vH!fYiGs|M) z>&;^bLXtDP*Yc^N#+wEDfOME|a?nCqT(yC?I^;LjS0EA!@QK_zwB(}_Q!vptV zq#esXgVMDQo}q@63kRl2F{k-jf* zrDz=X9|NNBFv#7d$+by!V=0XW4(=`Lu75@M!K>3-60HI~3@mldG7Q(n68Cuc1Ur~B} zqb)A&hiQa|7KC&2sNjBp)w(m92cDLEgjFCpGSq`xLtlqKw)7 zk#tH5urM2ReLpYCtuz{lna^qe0OVrS`d?e$;%PISJB8cMkLmcjzoJ@R1;`>(E<^h| zf`B;~$3NbwTFkB9W((uqvW{INsQ0ki#{U4iU+FH9#p$B2cA&yi%a0VaNZ_RQsJH8l zm^H`g`Y)qn>r-6>ZwDjYSM3d^m2=fsOF&16mL)h8K*%F`^zG&P)tob6YyDT!RXs`u zj>m(N)oZ$0qUKt9k7=K2E01u7ww87$10?4M@FJ`F)X~iF&vorf)fGPA;hCk%Mp4zY|tVZS7^( z*1r@s;<648A1|#%bQYRfUJS!0IbTck%m$l5e62Z74~dNq=8Nim);t}}TPMbC*1+r* zkOBO~OZ-FAT^!}@7;^dApHeZ`I$QISm-=?@;IbjN5)|Sxj04S6I%{*}CdyW;D!b6yh}ii#2<nhRml6C0wg%KXj|ww%(-GL+N7yR=y_Dnm0BZcDg*b zU8m~3S!dFAr7g$alH6njjEt#4BlR`&=fsUw@23>8^5u+y8VYu z^m^&kW?{K-ddYHUNJ3#523u@QPmID6KJt`M4>{!^yH9^)2dKI~KJ9a{ zX{|#sy)CUFJiMg)dZvJ|v0?(lb=hwlPF&T0;#f!;6@J8X^h;Ew+Qw+g|g>+Fs< zIx)T%-u|GaMV{SoiLGPmeM%(SoO)ZVz%Qjuq92E}AmsThBq)P|l16*f3u=eWZj|Rf z%F9szZk!>fu@(j0&VRxw_<`sQ-lxgZuR&HWIZy?068S^8LGw#*fJS=-5!i~b z)Ym=&+i>zQqk1b^WKDZy>@WIZR105ECSLS~iX7A!+QQF#L#)ME+EjtUf)(Tv<7X-E zl^-$M0<&5%Jh6i&;a)$I$=xGQ^4{Kk2}Zq4=|!(n>x)(QNlQ(-Z0N5pY$LTRhjos3 zr+Pesw*r-RP$UE8Q&V+3Er>DMxIFR*{>4&s`f7-Y-T6)rV%L9#Zn_nySJN)AX^lg3 zTee*%cl#P|kraX9u^G}l8J^8IBYSG^urDUHFM?fHCY7l%=l=kLabw!R_xm2}4*1>C z<5szS<0AtfpA@0{^pqfdBrK(`>;-{pR2 z)d3rPCk+1pbm+O=l&e^RjoZ}HT^Jq0_#+>W=}M4}2X)7^ZWzi<>bq;Hk0-||4RBJT zJD`)rGdSHDY2gg!dMb6PVyUU}ij>#c!j9D|iutFisdm)dD!kLS=XWU|d)5n8!}d%9 z971^|9Fl!14xE7NIYQ4;ALMHD3i|UcINOO@UBLwfJ%_30q$944jyq0NMQuJdxmT@m zsir`m9brHO9EAEBuGc<8HsElpQ(LmJ{{ROXE;K}5`s1xFDVb?bZMt!%la$Jy zufFm}m~46a=9pwL#d*taY__k0pAWy~Yei(@@i61ipZhIeiEg;*JuM#bWrIBzJM>1r z(;=pnlDC$2p({UA+uEY^m>&1|uIB6S$1#wzXzUWdOZ*ILcDh$HskZ%A4z9Sh`%m_I zP&iWlVlsUJtwso9a{vyzg3jmG=8i(!V3o=L0E*3j)*pwilWH6EyTb0*_|p?ITy~HH zapPwJ3P0}TpTewNJiyoZjN}zl>i+;0@VLM0_FTgFaQLiNx7Yga@vCGCd%o@kf@ zSn}{WM7BZU*bAqv>ceG)E<4(RnZo|qEwuSTR@06$aqZrwKn?RA5btfQ?odLlG44`+ z-F}r46R6JC)Xqw=*p-jvMm|Q83}Xw#xSO;jH&*qAiEis}fG#%+d%;02Z8>L;;^(yw zEoIwxiznM0j&9;Gzcn4qTC_*)7#9mhqEHE2FA|qZQavfQPZNdil_MBWnYK!GYxsJC zBW!8F^Y>Q(L(tp#LRJ3&XzhwyCf~+NKz7FEXOz9Xn%7_T4=eBvulYH8p7^4k#Su4j()U4z87tVPf%Q zZf?#`b!)yEHDk9E8E-hTgzs71@|EMT_4BWkz9Z;viN)k)VBJfkIvI_I75R~WGi~u0 zX=%?N#ovm{G`f=K*s74Zb&#Zqs4b!+3QAS9g0FupROp*^sSOj#9Szv#toqNOsJL!ZrU>y zhvO(DWR6psp=ugi8&Edl>82kjC9)_p|ghd8DP3unSx+><=Un zh07R7nHD;okpBRR^1;uDv%vEo+O0B%+TnhwMGKqwyYiP8SkU9oTi?5M5>&6B1dl9( z+MkL-Sz?CpSi`y5Lb5Ka!?9C@gXTs_Cq2C|Xs1GXOtzx|R$>bdW04hyY03h5a=rt?2XA7>~ z9hYNOzSirv2o9*Eebbfa9=_FAd@qjVt94hMw*v~r>KR&HsLV~bA(W}1^9+C#qsD)& zKH0bhN0^*ly}T;VtnGSxQU@i+emYc+$X7f4c*pXt$y)LJvC5LCrf?zTWGaiVI&TsC z)Ju}tLKfND0tq2Q2Z8!`qcm#Y36E#0MzcyLdnG*a%B@<5qYlcy$)6PXWcTBY{{Z0C zwwqS9KEf`jwS6|zT4!PekOwjy=upmc4Cq=czige!T2*Ko6y) ztIG%j&(pm-&``%mr?wAXR0R%-=EBJsEKYb=ho@RfKU6wd3r{@K8wh&~CB8zO>Ls!r&8WeK(1~eCQ7IpIflxBe*qz}wo1pX#tCAh9Dwp(IPCDmR zHOiG7p$UADK*uT=#w$vXP`xu9ZWr0KTA--XVA|~1tN#G-!J(I^`h%-y&5ET(5}feK z9!SDR@}X&{9XavO3kmp{su-YbBf7ZoP0ml1xB?! z6S4!B&hB%Z{luz6)M;#3J~{ela?t>0xW$!#YYJJNT49E$osRHcdyMi($*!F^HD z)y#KKO9?r!T|W5M(9F-%8gpCQn@g^=hSa42-2@7d>)H`DF6NA{mUS45CN2w>@~@w> z-dtwl>~Tko`I^HzdKbq~TiHx9rNyAxG%dFTCLBi}+#TM5xMX9B+In_SCED&$k%C)w zVl1vRE(32!*)1g?CvoH-=~_)yI}8LkB7+0Hie%U%N11MbRNiF7k`!`&@^Q!ORqZNW zCO7+2$?tA}s4 zS*A-sQ%G$qIUZQ_^{oe`4(T_$)ia}M?yId04JVK4in@!|84)ehW;nLmTEPvY7!9Cs zkynV%jWyTkN5jsAiV9&YjTc;))*WNi`j1{WZjUBfqi!lG1d&?qjnfSIQzOPy?yFkZ z-XdU6t3OZMzUi6Q2yL|8-~~P6DOf5|Pj9Ezs9jL+4S}uTESutwL>)?##b@Ij6m9;V zeJsE0uZkN5y5j_}ZEinymZw|XmYZR`*r@u- zI3N`7aXVb%N7dOU@A?J$d86u;6%*Apt+U$8eTQU}__K*R!`6KtaK(h?aXvYSZdL)dwWtLxKvT8M2qvqmr5KSh4{e-mfYBonsocq9YOy?*_ceQBlU z>Z_RQq(s|azqR|386iQW#0cd8_fA%oD{=Pn?*5^qNb~a!aNIvjA+&8$I#z$jfJ0Az zbgj|fv9%@hSTxPHY%3M{=R{^4duuWF?JcKZ`AAU;8;I|N#wxi*;mISMKLfjV{S~gX zOAIlYEMCmtxk{V8hW`N5OZqi7j=f&&vSp>nGTLV=O46U0c+DK{+qCnHfxw_DOX+F> zV~cN(Wi?MUT7^G@I|H`ldW7D;_*&(SKhztZqqMbum|)xF1SoAz)g&R%gz}W}l6mC& z)ZJ7ONWnCZ1=t@!e??Q%!|9}TEKEB^$o~KrP<>L+T3+pZi=?f_G}x9Um4g2N84 zFjzs#t4BghG{9g4K+8nvif z5B~sjZJggx{zqgXuKIN*A6M;q+RGisEjPO#fRrB~0BY}B@nlXVW)6g3tsW!mpfp}p*e`56qAoL^53ZL7e&4wFA?n*t1g|? z_vR!)Z6e!dlJbx52HnVEO6|N1r8|ySv|}fV`zPTa#JJ{bG}@O7j5PZDe}(Yx#}9@N zY==~#XS=}cf9kjO9hXCHaG*ep#OVb=0FWP#Zi!Bm>8_Ku+%DQjO+`cq6B5xwF3E8G?qqOP*k|6MHM*Cw zJ+WoC<$jU;F6nxGI)RWl*sy&vkvHFfO$_~-ey&x<;WJann$^~0Ds-t12Km{y(yUrx zkH7#7+UcszMnh2MpTbIebb9xF)q3AdTUudp^ymKoM20YO=o~3n@=uiwrc?nqbIpa( zI{u!)PUkZvQ_uAU_%F0OY3nwdzlL3bd4(ZyRUI3M$_WY2s6WoH5!d{@?sfc!g$1X_ z9d$2)e-Pz-5NItkanjn4S80os!EI9$l?B4FwC`<783)R?T?$bo0#=+keEA-p`dOrf z+=FG(zJhgb$NO&AV3{pZ;8O1rN|FSfXW z*tXlFOe7HU5wsyb_)5r<$$Hr7~ycsAnVaO89i$VPHP(tG|Cd9E^K3@F0M z2q_QM+l-eqqgry-^Y?3N_jqY(U?m`@l&vQiG`T8c5!efq%x%qwDkI@$6u5I`M3kv9 z)Y{X8ByQlI09Up-^r{C|bo6zQoMl66S~#A>^11@tmm*0(d1^x>;1vzLdiJlAt9p6# z)un`3UkeP>s2vqSVf9f?%2U5;aR8;lNJc$*{JT~9k^DmS=pJ=f}$U2ASz*d-m)UdW#% z_ToxYi6fo8v+GtWdM2<7s)bE^p7UdXq-||oB|tZHrbCKBBxk#0JRIZSjPXKMI0tMf ziU!HFNUeNMh#s^29&6o2WR3QXSzuD8Ct&%U2st2+a1S2z#Q`mqz@8R4)ty5vE~3y+ zl#sad7F)32OUZF@)CS-P!6vLO-0pZ*_$ot4a5h~8_*9*(HAME>LYzbtapN99PyYZK zu4=H2`8Kl9G#TZcmv;)`YwMlj+jEO%jdfb$sUZQT(2`fj%t`X()K2D{*-JD_;thk8 zw~2bXMVBSFTvH6B?N5kt|%JybU#?J8D=SLeh+qUkSCPiYT$YJJzhZ@(h+ zc!)wT&J=w`McQ0@Wi-0Y)7QFE#+jWa;r8b4Y?U^G0a8=|C-U3{O1_3h#M|R=x~X*k z09D6VE7;+XlF@IL#~OK=Nqq|nEd=p}E9*}hau@*ardcqN$LnMqmZM(pEkuMuTtNpQ zyyjZ2163;vJVsv~}7;i%ptV@wKu* zK0tX@8=^XKlo7~5%ksW2)|%O=g-39Ru2$75bZILkO)0Vu*V3W2NVd*oDh7qm=ypRXXAGhfxCqJ{A{kP6MZzh5=A1OAqUqpLL`vfO1|+)s=a`2>Xb$^QU~ z`HSFbbGnK0xZiOn_f;lG;t_rUT<_~fpmGny*GY960GU8$zd$EY$WZ_3mVkgJVF;M zFFJL&%8wxpw5QU(_9?}WDYt8zf#)bw=$k^&Wy z3lLhEeLBCtHYteXiFLG~lAt?QlMO+1IN+yR02fvRPuU!oZO5A>_9D}QryJU? zKU!^-4QR89afbE<1K4^ptF@z1>z5^#n1Gc?OF$%{z!Sjr6b8P=Rl+0ejHaayA;hFl zPTsWNrrfnWAdsZVS{07uC13GbXT(ZpQql;|7E^lL8puI!b+vhIH!Jf)9w9D(4Sb(Z zQsFUZ9o0q_7h2a=t~KE{du&)m!U7pi?{kjTy7cT$WiWSgzLEGp(LN-iSB$FL)Xiq~ z&PuJxd1bFW4Azs=jr_-z_Y72i6+XtLKAW{#EYn}R%?NdT^0e|zY0uT1=)^IqkNhBW z%#tew(tG~^?YsGif|Wttw50MC&uW&e(ikaxNjdjbr#wwkV70>PuRtv^G&K9}pwpjZ ziFAJ7C7BO+UpiS+4xvE@BOEwS)grLXWNqB$W0f_gbZtE>{{S%N<<|X;*R4x7Hk;|+ zK`(l`T9c)0vk_s7_N9eKw?=uTZF3mpg}i`Pa&Vq_6`of`6)=I*o3I;afCyUk1h1Z= zsw!sx05QXHwb)~bEUl|(+UDlNf{N@2y+@li>8H_ZT9|k|s81k)u{j6B z$DkyX`g7}DqoI3q7jINPqg@Nzwo_~O3R_cZ=e3rth9flvn~kz3j`TzVg*X(inanHi z5EG1d02A9Znuy<1J){5@gbfTrmJ;T>%Ja&J)k%Wo&p{$%-?9|c=A4L7QW`1oH?3+= zBLpjAMtB0CYS?0xjgyPvSTxBCnIqYfLBHsoop00ebj;UvZJHvT)HB>=F_OU{#Bd9e z8&Lif;Y5t!l9A_Aol3^CKM}u<;@1AEx6$=3ocz3&*e*@~0H@tpew{m|^)`#0K7GR5 zBti;$YRZ%LmX;Co@NhvT0HkBg;*8NNVUi~jK|h-LH}_TUpoXfJq%nYiPnVlo{>c~7 z4Qrybd$&%Q_09vcw4~ChZN0TF7MXCJq@a*euIM-t@hM4d(+lgZrm;L)UrdcTt9V}Ji=3s51LP6F-2+m zJwOq?j{F3W4{_+EDs^b(4}61b{y=g^)icM#?xc+qsO;GyT;k~4goNB87TXGG zw%jRj4m}Rzk--EXN>r3pb4fS=v^erO&TsZX4xpyBJDx$6m^scd{SzA1qG7_bPPwYs ztIerVlAFb_o_2xrKEUpAfTEO;cnU!Z1RI8#QsyzQYrOW)Yx{**)hBI4hBsi}%6`3y zvbsA`+^!alGi|zGc&FQv|KyK!CC)?8goGT@*?siz(ebuGL18PSK7{J1Luc_#( z;HZdA3xk?_ubOJ9WuwnQ72$z&t`geMduP0nwi7YA_kmKYXyOc-= zVD;@xYIX89gJf*6C((UN@E=2`HkzPqAf9kaov>PBUV3dd&wq&;+o@Z7!w7f`rP49D z4?ay%{9xhXNNiWrleD$oELmt!-7V?u!=>`|Z0@AOcIe4t{{Tz8Zf)H5C&&(H+Gjq6 z?sS6U*g$FpwasHCaGD?n2>Zn;9&|M=N6jA+AH+L* zr|Tn$LR3%dPCC(KVDeHBKqf^PyjOf$y5SMN8%0IVu_LYjO?M_vqe> zR;TImlOIq;k{WLgu_*x{@!QlNaYhE+wjn6Wd!@Lz+%#sRbxkup>tuE1#0H``Y{G&!Eu{G;`zT*u|M0k{;#WjZA^c%-z4@1ox9L_EPVF@hFH`#)x zt{pwma~8;x4&3nEw$ZUiX*kZ;8}e89DpjxbQ4V*aSk2K*=zF_&Yg}P(fa1Vb6jDdz zKSSP-_<=VG1S8k=Y&)H=>}Z-FmcneBkWL;!FUah^SWdsQDpqjThFcL8U1X>df>2<)*rpwRoNJ+g7eD_5na02>rxaqh6& znRoVpVQRqutp5Od^q|_o2`=NxmA|dIP*T7M+MEnypTe@sO(?i5WyII`WeH+FOPkl9I&r0iKrzE=`_iKm;>!S3-`Pt^v= z%Y29?@iXj~1(ubuVOw^GGI84-{{UJ&!()RRl8lmTTMLFdj@NE3$@?SivXGFK5T4); zgS`gYt#{3Wxo+a zZsh~V~OK~ic~&40%-pL6IUVrDY{W7r5$6}`Y|l63`V#*>{E}Gaokqq zDL$#jN8wgD$jBX&+9gA)H1<3nI2I|X@X@TLX;>Rv3Q}D=cO`ppG5-KH8>v&594(Wk zXOWcQt``Mb3$C_u<8_Agbq1X)0U-bq5C;H$IjY@VDQ&@4ADXk;0uR*krAE6wElLOq zLYoAy8Cm679=zg*f}YWa=afvfA+m1mWHF~N#y+UFvg$Us<0J*AA71{IHzg2rE=s6s zx}jum09xSx07Y>ABQ)N$>I0oWO^v4ROg9t4Q)_T11zh?0a45>k)?5L=3jz3b)iK31 z;&Z`CM}87|?$m$U5p+bEu|VDgcaq`B9Q?Tio+;N)+?CRkxGa;Y4jdbq}r1Sq|)iK+5xEdOfOQr^P@-g|)Ep?m7PeJ!)e> z)*jd+Y-b>;HtLUTTLQD4C1x}W6seLyCDNq=d#I95PvUAvs6UxaG|n6Z17PABb>uh=-cX?qG4@~(AuPzp-uy9tu4M2?~g20_Mc8~ z;yWsOoxW?H6ECLrm#mibNSs42N(bd5meJ>0JT2Ky3Y2c`yTY=%UQ2PONQ~Qru&v4? z{?woz{{W3R_HM`tqqvKXR`iG2hjWgl0$i3?83Uh{2|zjga+)?swluJD$_~ebn(7U@ zTh73e(@H5RQ(4FB`czx#cLVBD8P;BbHg_UcsR}pPA~`Nb4F9?!LDmu zxg5uMB_K4nNMEs+TedAsN0alZ1I@cotNLFr1(Ro)bQD;uMJA0j(4 zCCcK-mO%=^1mM@snkPsXzjd*7^&dw601fRsUog68YOb3WknwKT;#*f3#yA5NvQy|q ztOI}-++Pm94bxIW7y_;LZi`r6lH~WT(tK>?Ta<85y;&(qje(#ArPq8XjElDj3s&@Q z(=ynhZ;vM5G$(?E5()PQy;wAv{{Wf{jk?Ubv*GNJ!bE6Kr&c?LjheJdW#Hge60(vr z?^7LHshlW&lQJi4=#Qa+klAwzlStc}|sJ&ZWx3wLJH$}O%=3P{e32qXsZ7LwACkK9iARm$jQonWP%0lB3k|Cvp3mwOAt(03e3dX|FNd$U}-_qz4O=!lno6Cn9B-o`A7Pe&gX+yyz=jQ|E zQ1;;W!4&SBG;Cpt)&Tzizv)uCgol3G@W0@#O=Q}vx$C`Qn0JgMvb!u5-5SHzKuVeRL9`$F!+KP4=bkF9nUmuu# zNBB#hhwhD9wO3Vi&YY2@qiWa+JMJi&wFENI2XdE#ocRR$(yHFr?AD8_2T1hvm6gna z?A?BE+v>Yj)82u6BS*WPM8`@r-Ir3>WVT|KQlJvyA9(Zg9(?FcKU#pb?t5%nKwm`a zzX@Tbk|$F_(|q4JBkr{N@9_n5(-Y=tZ4+&T%V<{TUv(iYLubur)cPmP)n{Jnsf5Nj zcw0|J{6B@zGJ2EeJ(oTpbzEzWQ#V-7xh6f9B+tIJZ9~dWIXLp*XY#6BC3N%n&ULI- zy;G&fPfhae7bo^n7hj8uPNcisEw`(W+iYdEu)@&hRW-ri!cUSDz$-j?3Y@CeL+`lU zaH8mXJ?#xF7B@&OG1jP^Ila4j(;9`ldkK)^j|U`=m>&C;+XAc6)d)`bO`NZFx*yC% zvSGrzS`S%T^^LaQt3xYMyu8~Hw8hUJV_?2_>TL$qJ{wK~ z;IIDx%t!EVr*3*{rG04W-kWYTx268dxvDc`3?*N6#|ZPt@9SIt07&ZKut(d4jcW9+ zYsq(Y!PVb-fn?J9Yo@P9p8N(PE-R0vL3`Q8LRIKN99DqR_<)S?R9I!5u%cd$-8Iw* zbL>#=FS~DfDqL4GH^gvf2_${wlgB-&_~Ma->^C0ku7_*MX}yuZNIIhAo1N=N>zMCb zH5wgTUMr};UPsEHqx>Xi@fh@`X{G*Qjl(O0&2k#8kHe=={{Uuc%iYDvCNq9ALu*LN zQdTmQsCpa_4LE6mak^-@hX9W?y~);fyP{u@XGmFT)$Rc*{{ZAW5%r-aj_>miMFB_6hQWxqo*;UaPaJc8u zCCv^yu9~*9W7JTtHmBZ1sES91LXn&+(L?x81dk(vDA)!-?1pi%vgS6c)lYQytTMEu z>Hh$0g&}J260wE0c*+m29E0ucNUfu6jB^hKL27V^G0k|ww7N-|jiFZ<$@v$`g1a`7 zD=4MX{$8cPEN-fkWTX<0=G$e7DFt*%UlrfW$ zkAHktC0(ktR_r-ZQUXA*OpE>g3pNzT4lxOYB!^Mkc1Pa&)|AS~olXclY={kHVSKOaNn$2~}`#*5(fe+D+KtLRYi?;fhG3>78In6Wb@pbdN9z`Eb+)KBxe4o!TYV&O|_X0CCAgZ z3M;|D!OlHsVM+nq;-*b(j%f0ss~)epT6H8#rKW4Jr^j$Dw^FPWW8d(irlf15vxW+- zRtD6P$BTlyL(@$!(r=GWp1Y?t)TS)`Sd7Po65m8z~@1XtFD;d~(aDO_Lsdy&qaOl3Z zgHVOBu!hO#f3o+aw4LG&!ZprRwaa4wts}B=&H)t#Q&b*2FczgAnew8Zt zx3GY!n)}NsTwTSvT_)-mP~_;V6^B{c<0@QO$Ys^%I0^GVrBvgXnZ$qX*T!8LP#>;M z05%RP?DPZuQ94C$g>AQMD7NIQVsag2Dgb+uD&pF=Fj?)rmD^e9Dw8JmE60E97s21e zHix(yYgQ{@s2Y}+SxCnc2wdCh7s2(*L&SsD%?Bad zuAI37&EnXvoe~>A#61=I991aO8jT=J6THA&6>XyH2T7%}C!Dt;=z~af+evBtI^EYw z#am_c1UQrTkU#GB>?;%2`lJ3yqA73UGmx_$qw3i!UftLrvztk=;#b{Sjwa+>eH;}-e9F(eG*cO+vKSJxUzDxBBa_E5ID zi7vxi%K;1T&#B_4x;l}Jr*x0(ot*hV?p2<=-x{!KyHilr!iq|<M`YrcHr!A6#9LZn;M&RQarJt@TZNecrUeFbTHlm7tsJH1L(3EbsAPo)mWj>&8HBxEZi?+*2AtZ;ODM{6Qj&9RH+7HHem zxe#A6Wg)lL>YFN8a4<4IB-7D6Lff3B2+i-f0a2^ zH;)mv3LLbL#D#SfsdMdVPUheJ;ZeZCk&kL0OoX+-5|&yT6=UjlrIs}=l(7`42tZ1` zg0Y&Gu9R=M?4<2?Qr4TCn%`^gpB!CS*?$nd3nxP-~RvsdOkTE9bm6E zhW(n;5!_R4sN;ZrDcvVa4X^=zgXp~$*x>{OQGQ{Gk=D>{uLO?R&0CqLVPABbX2d|U zulK7QR$Xuw&_+!SH2`V2T3sv*nSda6hnyq7%v`F20-v$%vJ4@)f8P=kq8JGg;(uK2qvQ4VX?p z!lQK!FsFMp77M%!5&a28y87j6(_LAJvY)eA>@Zg@SZzEw8?(dguDOIczDFX@Fvtg>zzLgL1d)PyArB{ABXPFj1ilCXII zE9M}cDxpV6SXv$Wts_yHlil&3vb1`B*`)QdR^Fz(>E>>6*$tvo&!Hvc1mkMdNISfO zk8#2BHFFWuy_+R)z$-hds-6djMAMS6TCBGHDbm~CrnxFy>5MWOj>~8ZaSktysK?$4 z19v=a2k@#s3V7(Gk=Y{wSesQ*C3H};zEAg7@1+v@Pdla-X--`&UOFQpTg^HHPBIeM zO2SfbRh$A6GsyO-odxtxqXyt(g-z;~*SAq{v9UJ4DM!$mT5{!g)wk>0P~jqd)y5xg zKrM!j0N9nB06+*(IUI_q^wT1qX=RqcliglAywt-@3{l+Ky}PY1h%E;nSnK84tanHk z3D)RMe*CK_mm&Ns3qF7)19y?nam@v-Q5kg)4x91}``s1MTBn1j8E0u|dB?TA6efYw zcfC0UldF{dngzY!mHnhCvkxi(K?@&v5q2bI^o)^pTVk8H#imfGL4 zu$l_%27F~(Z|>X0?%n`Mc=&E311TY0%6+-Vm!%h_xCZ>aO}7p){S|9hX=n4P@3q|e zebXi{Q0xp!xI@3&o`V^2(6*(eZN6Iv_<}b4+>YEFcBwjjLY0p`BW?)f9EDh`(dyjB zzMbA+c*wusvcaahmp;n!ph`qpmgzf-9tv=oV0J9HI10`(+OS8l?qv?7=om%r$0X{S z8)^V}#Es_ty>I?il+#^8T(+Y^c3Olbk%-dT+O>8C8~DK)>_??s^u1;A7RLO8z(4Qx zS)QrVtQVQ1%saIH_Wrg|w;qw`o9?n+uw>5Dciq94WZKr-QYW+yOD~|O3tt2UaK9}1 zRjgF_gvTK^9D4l~Iof=1NZQI~hRfXB2kY#K?b;U8svDf1o;NN;RLDYKarI%uIiH)v z2arZPQ(A74YQ&M#0$LA#e*XYPY@4d2qT2mXfZ`3U#k*NCB78qsk!+GL)~hC~P~Pg; zmI*RkKkr|V@&gFkvA!FP&tyaJOV@vh{coynR~>fR;KP~}#CR!yTSRoDDMDE%$abUjnJSFN=IuD) zRr<0D7SoM~%&bqw?^^+Hb7g2zyw{d2r;RD{uE}0d1@U=ia`Dr4N!lvT_z^*{_w$ z%_H$AsQM|^OO~9vI_2j~$dp;CiT5%^P4$oQNOC|PGWv$;=iihioQrKIWYWu&HC zrbt<3@Y+y9l#zvPQTGKy@uRDpNDj4%azo_=3oem-5%m{QYZxE(qn3FRbcERLHpIj! zE;QSrME#mUNj?jh-L)=lB%~3X(`Kog!wK_xCLK`IAQ$s4opp1j$dREJc}VT5ys|{cJAyG^{lG6?+(qBWbJdDLq}xr(A_V#-yU_zj!J!P{JYns;o!37Y6x8}@{Xbur*6 zqA->h$s~dEe@f4Fc^@}C1s)g(uwOUkg>KShcxf+D6LqO#JJ8k&ke$H;elgA|rUD&sl~t|B9CmgH z_e-eyolVPIibgpkb5cQvZM{_%9;s=T?-*7q;rFk1Zk=n}W!@B*qa`k+uWGqa%_65w zie}_x52{H9trMx97t}XM{w6d>S9D}JTACHRQbm2A4bZ$PZRB(KdDLKw!v0&=&swy4 zhKEod!glr{S3b7<7qvbN{bkWBkaiz0F3En0%s<)ULBES4Nd@k!5bq3@AhhM0EceFH_*5X{dOA{@iY4YZ~v zp@DLmhnZz@UlMlYDD&U+@~%`&W0J%bR@b#Qcy5Ds3ci=d)2Mnq8XErqe#_R)yN{T~ zXL4#AYPWDMz9FZocAL~^aq{k5ot<$J9+1b{QiY+?-qJqk90B~ZTYOIsXeWy=v?H$F z7;{J<`zsCb?{(C6&ZO9)xhhzQ{t8xwor@lr&wN!E;$1vV(_UN6l?UP!?sXIHGj3OV zv{%H-nqKW>>1M5Yw{-mST5T>K#E#yc^^ANn>OT^R^zO)LUpwg$x>{GbepNYrRGky6 z8;cdRH(-*LxYCvPRD0s8-CxqLX!-hK<_Gl)Kl~ekR9E6Q2C38iY3WB*EWvMR&$Xfy zwIlxk*|@WTynnRz2iCMsiK=BrnC;E8t-{c{S5&8UEVQy4IE*fZUsTi9FBz9ov^v0A zR1c^JnxgA;gawz?v5cL~9;pX)yk9N$c%~D@*$kch`UB=Y_@iQxwA3I-IZi;&5Q}zB ztufSFuA#Tvt@o(lIFzL+P&^(gS5nrYmrdAsMb8id zXE`VFsJ@t|h*9krJApz)6Pz}*_f>nSoeI=ik`puZ+_t5xASfUZtoiwKTaLG<$yGww zST1a@Iu@R3rw@~Y6E~m_>Mbh{O}^sKfp%qrms?QasCypu@SjoY6?z7`#|(Le{)#VJ ziZ@ls;qaYz^;mUIo{>IlF>UUtAH48J&{f+cdG@cUer!~h&9|JWib&ehdwmeznYKPO z+qRuCc8IA2WhA2*9{8=FM${u@A*Hr9Qi1!h(u#DJh`ec=DxOrv?zdNzR_4Wa^C2Mr z09u!hj7ZELN_mbW4&`Nf?Br%P+^uigY{Do9HHP?Mj06ZI)EsnUPN<^f( zMtJba!6VwJ<_E?c*ZQy0AK}wpiZb2l{i3?5Z!$KSIY?I2;PH>|R&6|%w!wax=@YU_ zM&HptPPSqAptU8k(HZ38rv*+CwA4_!_qtT-Jy9Qe)E{h)^ef~)b$-(6<7gL4)oQ50 zYFlq4_d7|(XbG=-D>$T%R@krRHLNAC!co|wc&Ms~R0gNgEqL6}rlDMvon$;2+Ttb2KM#l>c91>)f-Lq3eTpaL_`;yhW*K9ErGo* zC?&L&c_~0Ej>m2)A77%5DwyE|oT;9at$g%yR2N(~A5^J~%5_W?+NH6w=@}~fI?G^^ z0+p7QPm#bN|Wm_v39%!MjD#_~?++t*66A;H|qC z<}?29A_jeGw^yh5Q#vv^R((5E62K+53dHGsRj91GenqnHHeJ+!Xlu z#zxRjuYWAmTN@-H%{jmObVWdhceT;rgU`38-Cf-Upsg1!nOUMUBHJ~KXjFwwL!m*a z`+j}LxCz2fZ(b_rzKv=Qt6&7Cx&7L2tSy;bYX#Jh|2}B4hK4 zW<*+&TNv8Elpe(!uzA9i4OKdJx&r5ecHH`Q;aIxDnyrF8zG46vIk`e3P|33OQSEzg zW(CJi-EIwA)ecfNCAQIk+R#U1@}4_j)powJrV7Va#4gvHSmPeIRt++eDfG{gke0om zn}d9w)>l(eAv(iO+w_F(FyHK%PPZ+`n~+!b$l)X<1A(y_w`D>Uq4UVCr-GoJ>J=F@rr}aBL%dm z?gME*I(a-NBzuh2Y*R8PzD99%V55>*8rZSA)>&QaCA&!3qdg&n-?WUW4K}H9zGOyp zq?D|Il8o)ZJ-NZ}PrFrYEDjlCjglGD@>NP(!`ctu+y4M4-rv{puA*BTu(a*BR7QO% zbxph$#Mt;aR>gDTZO1-igPdeZx2W4t#>VE^%{-4q6E{b?mRCsg{I?D}f~+JivCWYt z&l3LBXjh<|CGg=5TwBHxpp$}!)5ubD^8gQVSGr!O17l?8vGxA|!q4B*U~haz?7tTL zC%%0@qI1C37wa{`UaGOimM+XCE1e84*mB0FXU~O)t(Y`u}|t}zx`U43D4b6 z2*Zm+9-UFHdBY><-D1Cd@mitY^PG6u|H3UVeZ%3CM zEv|1ICqKeH@M?C39Zq`|zRquBf>j4pmqel^va@p?>5vqE{d3ff189C%Q@U$-)^lHR zMezxs>`1B4JJzGm;3{8FrmBa0vw7}vI|LPbC@PC1p_65YZc2g>dNb9lGg_lTTI?D- zRWy|Zp$w$~D;eQQ;Bb8EN=oRQH@3|@A=^t?0of~zj6K&T{{Yl0L24V%!B@o{uUgxR zZ}IKwel(DfrsX$+a@q4Z@99?lh}K@ohs=y$2h~OOJq&V1{{Xf*10j42*}Bho=#Ght zrJ&24ZSq}G-pSyF9(eK<^s_*Xh2ZkC6L126nbVk^YMRC8O|7$XZ9&wy70EsRZ(e_u z5hMaYEOHahiLwE;KVWp0!TGv3P0kv$n0n0cpptmv7Fx$ei+6^N^I-?$-|C zS#N+S9bf6+Syelu(^yPEOiR=`BgBb4lCS-T+WW32An->W$2`#5hL9>_0o?W@o2YG0 zg_Q$g9&X3e^j}W5EdzX?WQnb`RjTI-D=i0g4v-2`jCB716VXBC#{2xe!0qOd0}Xve zJNyda^mnh-M9^zB6y#FZ@!9^rRaG^Nu9Q1BeZa~ZS`7qAy$UV|i_fL`a;<+0bU^>`N2*CiP`S!sS=peSqjk}sf zm5H#nxNd0}&b{`v$6zGLD&h2Y3Rk`{Rj#6h9j+agZ`CaU%>gzoH#J&rUOH3&NLofb zyHsosXn3&fs;s^}6kes)Zbi~FA)!EST9j4ilS1k@bUOlEgu5j!dQo<1l*o2do5Gz^ zfKT_V)2Jk53?=wMk{Ae2t9MY{Z8r;CDGH9Yk>RPoFY}Hn6G7Btlop#PvsdTx8zsiI zu-$Hp{kmAVJn8wISB81@HCyU3<1R83eQZ)meb>6Gy-jvRc%UN5Vj{HPk`eAXm-K}$0rW#Q3V=E;t4@wJCXnTX% z%9*dxX|)Pt9Jyad9}0gF8Vjw~D7P&keT{F`Xh8dAt;K*=eDDu?9x8T7+wkDNU+SKS zpG}m>Pxw(s#RtLOiTH`ruU#O(V#U-*$SPt&5)xLi&ed_qC(44IrKiyPqdGINc&jt=Ev2<}9EQjt@DlyQOMO^ij&(;e8d;oj@N&8;Q-8hxj?w&0M;jVAEG*{i!8+ zj<4cu`x=S(okr*BuOkNE*#p$|qG}@-Hva3RzZXnQ)AvrEUZO0uEsL8YDINI(oL9;J z00w37>t?5zb}Jm|*A@wB&u~KOedABudYSO=Zq-(j)7OhjO}@PMT0(gh^h{JS*3cN+ z`3=<;qKrL-qwxZ{>C*3rJuB3WU#R*wt)n)?$6ax_VcZ1FLIB84fBi(9oO$A^v|4lN z=JDbm%Qz#^JyoGg90IBzskfrM@#Fo5I<(`jeW7i|NnvF2;y~c~`hGPPs!$O-p*(Uv zinTk0ugJ0Qu-Vlq}oObG3oTg8#F2F zZRHfWQU=maJ^13N^11hAHIFHsLY_tf2;iyys_Q2k({Hqd_(?3_D2x=JD!hz9@kaeV1HE2g{3an zxi=j-2`)CE>d;C40iH4VX11?~)TXM6Al#c}MNb)P?3ss4jVotPmC+(J-BSBFaYW>f zIp(*<2x;0Wz`TVQ0I=M0Qg%iD{iIgJ2@b6{22y-*c?mh~%@wCoJ+cVN$|Nkvv;}dD=aEz5QXRFz>F)yJSuG>0I!kg9({Z<3EsAj=T&HgzQA?A< zC;5%SW=dy(jir1(^tV!Pyy@%7w@XwO;@ zRLd>u1?ix&tb?(@Kb1vGrQBg$v|*4O6(YZNz3WrA3r)&Qh*xHsQvt^nw2)3QkK}3^ zO$pZ8Li)4euAus982HWs2pgxpeET${u_k)AAzMm{yHO3(O9kw|NP1i1D@+Y(J(a9u z;!X@$NC~)tNYCE-Qe>vaLejL(s*Tw$OFOT`oJmsDmgcrHPaF6@=4;*_7L%m)bR+?} zXNT55wj<=2l>HUnqbhu^TK7)ZvTX9D;&mQN@1Y5hvXjP1@A%Yfopr0lrpEa9Sy?Wb z>p=9D81u_2P-W$1JJrD9zY#lGEil@vE_Q)0IQgxJ(N9o-eC?Rt_NcH_uq&X(>?q?9I7lY)}6 zI8jkMaC4FeY|!)Rt)`>g!NAYedYf7mb*2}a-PpeD%)X97DxS zmm@ebNd%{qj4uTwb~Rb){c@R+_%$PIhqgaeW9n@Vsz;eA{OlvwI8t3l)f#2H#*l@h z=Ue1QY30e>v550s$q8*L{t^KsDEXXFy-BD;EYZbGK>*yI$IvBo)g4HVNov|f#l6S6 zxw->gGj+E?fjY$e8Dnu|CG@Eq@!Omf!Hpf{?AiWGyP}4MS>OJ_L=5*Dk%(rkB*xhCSif1Y7&0 z#*x&sEi!Eyu1S#ckfx*0KyPr6o!fnI6p}p$1cTUB48-V|%BKvp*x_F43npvnf#h5t zPU)l7TgCeI6;YmHyFQnajm;)Zvhywh+$i&c2^dJ=9&=VY$z*vKA+0~Z*2<})(89-D zd7j_*?w5KmPeih+M3}I(70xo*A@2%jjTL|WsoJ7k2642=~H-(b1*yoKkT_4wMtvz z)QkEbfBUEPo2xp1S9Ib`txs^iTD6_I){D@Y{pwvUoT#?o`L-0F5CvgCZU;COCrTof zj*0gYH~qS+)#>w7RW#Fp=$+rv>9MPRDPZWW4+~LfYsJ!C<7-RGkXERyg(NtYr0}Ew z3BczbRcVV+@S$s>Cvf%V-PK|~n3ATf!#hB4?S9FH@a5O-MQVVZVP;ZqmWdM9#lf`= zi$g0>B!ET~0Xa&(p!-bp-nBbf<3MmuLaX(+O~C0sT5)3BkEr@9$E)?_n@imEo~TvF z8g6?<3^nZGxUyt3!_8OM0r7iA-vsl9t|a``fa>o~zijmZ^yC z-&VCY9F!$sY(hXu;Ctr19=O3$Jog>Pe5dMqebdqw23f;}q0~OCrr?{VjL;goCD!Qs zD|Im*NJ^3j$oab{QuSSAMB`0D{l(d_>`;0qP78{Pxc1u@2@whm9cL;ec+~YVsyUHY-nh+72y zim~ZQ7cYm6Lu8*FBIReB`-f6HZcxBb{KZ-2iP4t5sul9=AcYgue!c1|VYsr}{vV4V zI>rz2V+ZiyQYM%QH=JE@^N>k*b=yDmw$K`%PiW?PuW*{Hh0g9or68;2MhU`k>N8G= zoLF4KnutC3d$7AhlXMnt_$bduw4A?S*dLXeY;gpWXY_Ng7AfIN&I zY^P?Cz=<8fS#^`P-C1c?rD3afoMCI(ab=JF$nrqIcOGdY`TJ9@_R4NBcpNITdC@Ig zu9oW=ixyOwk{V*$CA8%}BdQ5P4msGR9^O=-;L&s0H#F^Q3yv%aT-x}AO|r|?X_h@K z>aLKZx>_zcfE;xwcW7U*BxHLLL`3&Jp~Q3kge(*9U?&8oeXi)@B}Q#)R>Unuv>g12 zE94JhP+HmYfEIDo^106dCt@8^-ct8u6l5HBH4g(1+$zHX-G0eIu4k#eJf!l4sHmks zkEi2L)k1dR7U=S#bd#>ew*%QBJF+`u`&K#EMZOUQK~)!cmh83dddbUfq+wSHYVZ`W z2RtX957x0wGgK6^4aVUEDYTbzxp1$R%|&U66}fGsyYedsl=1pjk543Z4GwNpi|bnE z;mKRn?wdej(B-m4vEe06A@r?YTZZJ3>_HrNsSQGgIODf<8&^c;fPGiUh$I|}(w5%8 zNy#R>yuU)!pSq$egk34GBZcTVr1A=ay>ne%Y>o!tufnw}?|jG9*8sBfo=+-9-t>OrG=qKi(5>hrAWU1S1UYH3(XBrDc}mf&rWebxci-wq5% z#ei8_Z;k%|g&i~T57No^NCqQX!aQRp*>a1G%Q(pShj4u;Xc__9?iCM1*J@~LTGyLc z{>z0w2b!Bvz=^H;SE>PSzM~G0KHl%ka^QrHK71Fwy(KvZQQ(Y0OIrL^T+k~n3JDOwl=SN{- zD^r+H{od4P#W`Fwc^*3{?N+F^ndI|@+y4NKwF{MpN4}Wlm7+@|tFS5rSIM6bwPw#; zBxtd*1!q+CW=TwdTFaxq2-=^1j=SkQlP;S~m?%hbL*zd$2nW}i`U9nP4Xu1m76$62 zNFOZkYep~1;}6CCmqC0z_1{NbG{w1B3&JmGl#-P0QWEO0PU0{!a(Dy1O=$GZGD`zZ z?mo+D&|gagW*3dZ+;&}dwsl@zn?~4g;>#J-NkUYO<0@nkf)q#NRqcJwgF8WUf1=6f z)M2Emb0drpc=lCWt9qH?CE4*@X_(R$g)NkJ1fEAeRYlTu7frpx1gQ#rcfgcw7(AjK zKdmI`2JEu!s}dl{T5`&V2RQQlYNMn@C2ePgIJJ){%}1)=3(K0oZ|a+!VARZ}6U;em zhON<%=mkCUD?{oXC{t{AKUD`mQ8DtJ>_}b!#uh*r{VO&2y-5{c{?H7x zEdKyd2ge<(tSp^dHqUIlOt&(YZjLMGeOzn=RL{Z(2z0Hj9&m?*mrI^f&!#;^RDE*w z)#+wS%z~#1m7JV^D!=IQ7gu=lo)m!{iV4-anfvwb^LSdf_^uoeATw7D3Rg=|`CN#O z!Ph(^JuPsB`z~{jEBA%45IqJkYQ)H{@pUN-Wc?K8vYOmjQIj3rg%Fx-l$;!slU}Ta zw*YZ}?71TxTv`-bI-`Bko(iqkX{{kB1xfJ^IjF6UGLzcsT9-u^i4Snl$~DdWQ|iX*$do>fS;TW-1g6ysR9PlnfhlY~K%OKeYkEqFI5^4tY6_|B2Mg&wm7}QI0U1p-Yx(cSC`_3wk~k_Sy+XdU z4$EJmpsGG#gk03RfvH8*%5mmTrg)~^JZ=S8DIZr8!-*=vbmv{YF~*|Fc}QPlljl`^ zMYcCzO8hzMXS9aBUF{PQcMyz~Iy}6FYB7!^VSQhw>tMz#tWAPF!qDSRp(|+#BWVM* zb?vwz5nVi04BB#4p3=AZFGhx@WvV2=;BhM)?IXYE+aC2#)Z#5SYQ#FXKx4zVHtn%i z+Ro~=>9#SOR=QS0%1sj8*AwfaJkGzwTll{eKGEzCUPJ26Wp{Xi>fJZ3&U)=#| z{{VBmbGQ@2P6vD(b5>}zT6c}JHqQ1{y4@bK zH|~Jls1cn*{{UHDC0(tqsAf`&tStZ(px`8*szA^E!1K;(iK$T;%a5-L>!($Gs~R7{ z3ej9NBQ4wXgo~fpYwu{1njh6wRDqz(YAYlhXsx|Y- zW6sb|`|I>p26cr@X_#^E`7Qaett_1jVE8CN$GYGudODI3-Dge4(6i8wjJ4&i`sayuHNvrcP+7Y zCNYPeKe{cp=@;en=1XrhzqGWqm|?{7>*-#6BYai8aI+XbLUKVGBZ$j#5g7dja&PsH*Fui*!%3Mr?ARHBCN>SK00S zUBOsw9~fGt8lrqHL29?gk~TIRWr9|m45eUh2>_&yMn-e=rladB#>;ttJ+1~*68t-* z@BxMAx#u56V4~|=WQ`pzV-Uf0rMBXnl-f`nJHA4IPtJ3}Bt#wp!xr8{j3jRFviTS;(ynsF%b!_KGfw(c{yob5bdgX`U@TA1c}l#4j?g#5Jb zrek6!b4T4vX#Hhwyl9(Yk1A!aTiWHa%jte7TQecLqn_MgWk3_iP$?q>RDg4KY#4a` zE!{@e#Qy*vbF#+_dV)Aj?wa*mQs})uuOM0+w8&MkA?XY(r2hcvJXC__Gr-!R>x>hQ zDq@DI?G9{Y0zaax>-ua}4vo^>U%$FNm(^MNl4!a}xh~lnDRtuxM&-664WYn5&KyFN z6_NZZ&p4@Cm(3$a^TuuM{!~txr$tTV0{6xJ)H|JSj}KloqxGJmRLeunwwTZD{o0#9 zcMcBA8SZ=4Qkt8?t#Og}1Pgjtt5lR>)P5{2vw_NJX_WGbHJy{Jwo8OV`~qwc0e^jFgFX=88#S!;}%8LO|>&S5wLyOm5{!z6ODC zS5Y-@UZG6sGbz=!q*`u{E5UWdsBPLu3eS}-(-O}Pab!F@P8^;VYo?a9{5;qt$JNPK z?-1-l+y4MOS2K{HtoNw;%_16_7t`Ma5&r$uePt_Ujm&p&me4wz2T<9>JDcn#;Mh=Y zjC`&va-^rw9#oO!2_1l{pThpBeM}Cbt~ZuE8}|dhx~z2Ov*L$R%a=C=XgzbAaq(`7 zwwryLN7(Y>3P=vbsJ1b;B+hb1(D?#1u`gY@}FR#x+t=e|mGUc)S z0u~Zd!nxpn@n}%`8jsZF+ZNNyjla+AptbqQiLp1AkUz@dwewy@zV#CCsw6zr$j?G{ zI*NORrQx)U`t7NR>@(OYDOeaWiz_>#gO{cw-d}HP53OaP=H^N5?az9y>JQ&2XjzW2 zFqU(-gaV$xCa7n3XyI0JKeg_%YIQ*loP2 zc|NDNI-F-AK2-Z4amtZ{NvM!+YjB%u=oY$7 zs@ZW7nkGtjC8B<0@K5p|ts$<3-R@Zi9=uR; z)W$FlRicZaW&Beg(OGz%Fwok6OKGXopch<{JQ)uC-Hv&GFV>-@)SZ2;?5uh`G>oV; z(hDqb?-#z7T%2u-H|=dXDJ4E4(Ud6%_(H$kgG2nmb?kDdbr0ojhmd$pPM36VNOdms zF4jAb{XHNoKDI|@!RG_E-h7AURozElm50KRTpsEZQm1V^PJFhp>{oXBBi6g8PPEmx zM`~tfYP~kg0VVLQ>SZ{_51;d;#i})q0>qDH^G>hQE~%07v{+vXgmnu<={pC+wg8!9rJ2*Q|vv`3rj60=HM+3?F1~UCT417m=Y7>H_r0oC!*qm~GJ9n+NgI^Z# zY#VM*s%KJj;)#x=7l@p=e?qz6@x`WBf7Uu4{!!wEN??@{<+&&F;;yV?$j6Vyt}_F2BKHt7lXRPSG37UUwz*Sbg_ zGIRcV)i+6{Ybng0iGKe8?o%62T$_2V84gO)ubnsw^CGKVGg2E|>-wcjx$OtIPmZ!Z z;(p3)r%qQ1EsXndTIW#49|hclZOpAIu5|aO8cIJWtGw$P{&GI@~nQM*OKm6%XX}Ykkkn8q$i%}{{Y1q*07S_o&&wU%8=RLBlp`E zPJXp(xv}(>z3U=}gFY~EE&@1SGDUl&D*PDC5#x4L%yBrIN8KShe{r~5BD*qdg&A@Q zN{R3IRpr$0h=-2qLU=Y-l&`^$yxC7{>D5&=v zoRwVluCXyfij-5_;X}CXPsKoQb#l_Fc9CGH_fYjD%iHhALflh1J5m+*f6gjm-WyiE z(>e*LcxV_^`gV|dS@9Jduj*>8sp_^iXUmKSa68D%H{XSGl&tDU%# z<@AP|(-1f#R>h69cT%nbPCyDzVn4lSx}7wTl4C38{<_jB=ROpo;VDxp%c%N?Y_-a` zv1x2uB@2rZ6}1UiQ)@z!Rl=^keU9%`!Un4chFyH9*CnYxopl9)P6UAM)*-pa1^ z52czm@2KyqW;(1AoP0@u{6%rF0FYEX!nTre*q>UZ*G%p1dpIWwLrAK;ZENy-ql+$z zzUa7Cy+NjLR^3qhFqJj+p`qd>1QG_}jJlupWv(nOvxcA6+VOA?1D4bak+HZ6V zsS54R>75%cUBtk-y0(`Qau%kEKtDDKAPkTP;qO)LUWCTx!qRrTwbZ_uJ0=dCfw`jh zwU+jtyvd7ezQVrQ)u|)wQ-JRYeX==Mf0bNwo)6_r)4EDf=DoYxiK;B9Ep%?!e_N*x zo$GFlzYA|qTAH01A*D9)P0iG&?Mn#xLRLOg!qRylnl77DrkJ*!zsmy0J^ujmtJQjC zOb>Nay!?zKU24N@N4*%Y#7w!3Ax*Z00)(x6DL@0tI42%OL(3VG)ihvYJ^&YRwY~jR zUZ1R4?QFQ(Z*@dEkE-jT%NA3w%9EBEdP7)jBn{h2bIyBtaA=|6Ixhkc zNjws(^EyUL<3AS3vH-H`Mxe_>mJ9T|73Dcgk;9Ad)_Daxhs+K~1RM%xj*vt_XO#MS zscM;-D+qYIeN&Rh*J(?N<9A83b=2VCZE)Ubt=3kaKYCOGu%A@-6>Ep7d?@VOxbbUn zsr7v>$zBY0Z@9oib}KfF6DQrGTy*3KF0F64(h#wg6OsT4$9$Z3^{9BL;g&-qjgQe! z$yYQ{wl;!5!acp)ZFh|?DRHignRel?5)w8o#AhWxHz#ij8RsK`%@ZJOZQKp}dMF91 zWZiwX9_d}v*L4=D4>cYw-p_8j88-J4^FSA#K_EP#kKLWYDpF5yN#KJ|(o)Slz_eMe z$JGg2RLEF&*tlg1=}j?kZ}!lf$?S_7qN^rZs`Y zfpCAib5Yg2O@)Twi~S0)mOI<`y)6z-m$tjD zBi|GSeFToEGO+m^F@6G_q|_t%aS{ALu5om4;)d+Tx4Y^ocTGU`&Bq;Y!0}6;YZ)O4 zN1o%x1wm6&O$9)yxh}sxs@bi>3#h(bgSY66x-ZloqO;U{gt!-Ou0+%FE=qDuxi=R! zv`YIel@Z&vIjf~q%#aTZ*xw*}s#H`F#RzPy4)^q3Vb_qaonp02hXLD+d%J02Ga;?Q z9d!u#l7c-|!J%sFs$|^@e5V%(4H}Z30@q3|+nahSZSfcI>((7h@W1gA-k#LcT)4fb z+>sLA!zIPErapfOL&*HN;x(QifZwWTlGn^xbg(3F?k z`y=T?+2KiWoE(GZIpV&`qNwp()T(72gpZG`eUjJ*FI%BTbZ)7Z4o-!%*+_qpD<%Sk2dRIprX= zUar1su9!m8dMjE@ixn)Spt7^I7*2D6&wkX)C|KrsQ__Ip<>g(@g3G;LdP~!NXED#) zGXB7rw-maM{pXrdZN++pWdwpb1o|5Jcc|c>PS>QUfy6i)k3u~xzIfIur=ZmzD^5Yb z*-y;s9pcHWE_REQxr|v|Y&+ire;Z0qmgJn*)EX^&D(Tr+&KxqfYE4{J#T`R9KeAkP z)2Whep9YAN_7RQ?Rs@ zd*YaF$t_24D{sy+m2>7tYKD=;-dmxb(l8dzv(s^GVil@YwsUT+WA}X&lx<4`S#olk`Y%lAdt78Wsg0@ToKFCdNFP(&`wAaHp{0(Qta%qGHah-d zRl=|{)7u1S8V=V0t-?g;O*BVi4XyoqrVM4H$}&{e zqSYEZNk(zU?98(ma1`87;XkRSE48R%VUk0H$)$Jy0CBiq=Z78yxoS#TWyP$V4|=w; z2RPcxc#4iki?m@gwEqB0FIN|%JhIab?gc?!p2Ny?BuHjrDMOYeJZ!B^#g?MbQ~2Ut?Q89Hs8yFr)@e;H%9cu z^6R)tQUa3F$9(x@f!I{ZluNgFC1ZVi))H3MGKQSoe`F8PzllzxX>Cy^^&CmK>4iJ< zGWaD9q@LiKk(QglXnPJ;jo~sXMn`M8bUp1+r8=z!KCrS{n6S=y1gSW}N{3~E_!`Z1 z`3+>=8@#q(Is8C$QPk~dwZ?rC-tB9Ns-oSrMd^a=vTF(maJQ8BoPmS&q;!kj9r2G; z@58M?!%p^tk&wA-&|aC{te+X0V?TF+J&KK0bKe1bzk{rx{SyMMU`)-wdv?zcIBr89u)6bY|QI@ z6==9H$^*z{x0*p#PXoyL)&7pZ<>=WYqQli&wVHTpKc7c~&foFsvhhyf+G6K@$ zvbHEF5K=h+VErnS^To1DC_MiFb(~i|-ucR@T}+JauT{!Sg(yZ#g+t69b4NiK6e0fr z^(UI*;98397iE|9d)w3-@h}MEzj~t3)7qfz^}2X8w!r?W_0=6z${Vt;#sOggCqGKl z^%)#s*iU628!sh-tru-#?2Fij+ku#&2~&zm``q^WQT;tb;@R&UDGG?4Ar0;Q(F%1Q zq0|vW5Vyp&vb3P97zei%E!I^HG`h1eigNOlFOfFOBnFbfG~W>HE`+JLtwHsv5?_p#*cJ*Et(qvYityyM;=|z^c7%Z z0Secrz-$oB!s9M-8;=#mr*c72Pjf`m(F4l3X|#9BPHQbP9FF0cA=wL8ae-2R2H*u{ zqo!+ItJ1VnNS5b?xPq<%hIp#(sZn;>eM|7aONhKUglz|>?8#`Ulm^`XB$4;4G1aEd z7vJ3(qBFy5t=R|aT>%LJOi3kzP6F}TskIY}+=X?o)9gQ%6@0yEcWmyNr1Z z(mYnX@St?oxIaFqhSOnoOmwM71L^%MtQpB#exAmd@{4VD&#}sAHm{Ts_3uVR=Gx^s z;IKO53!ohk)GCSgTsTvVcR0;Dz!du#Vzk??1e^{Joi_P4g$rG%m~s$eI@r}6W^AscH-Gxx8>nAndV}vhsvB)hv#@7l~HXM)PAn-p*j;+#+g2R%t zzN6~6o-gRA&rt3Q(i*=nukC+?TL(6XGbhgHe6fNe^@LO*odx>RykPndTq4@gLEIM7=X5=b#+J?vOb_l|B z^m4Y z#_^;z3@3VLsU+J_%Z5DW+X+;6B^|2?S`-K+AP`98WQ@?V$oOiAr#J&XmqpOFDym`g zO}RXJt$RmS?b@qVrLB8(U3yu^!PRz@#S%(&2LBhh1%#{Lsfh8x*51LI++NsLRXuFbT1QP;nfnB5&B7u|U5tiEaGVt6f3pL*Jn`C-MX1uUChu>Y zW9$ADH2Q5+^SH42T!o;yqjmMs7s$V9(>EyqxYFBS+8SBTaN}yilifcuay#Tyj5Ms2 zSTx_+c3Ebo#qI~HBI-v`ZoLkJX}LV?Ym#0_M~@6{+_Bgf2>sb3$z1WkIHfa8bYzf0 z9?29nLaF{6N9wDNtN6~i-W-neJv_Nfk`h0A30u~)p6VowpDqB+2muEw%@&O2xA70U zI_ZyB^`}yG6}~0T-gJC)Ig}YH!<_#B5|hV}9DO^~Rc@aw(=?Y@1mw^~946Ouim|%? z07d*mF$L*NU!qF2Lqg?SW?RbJ+OP_|G6}&883iLJocF5@CY1ezhu+?bfi*-$;5cNS z=~1Qby5h_GBJ(nI+l;45lJgRCwdrU9M1ka`qaFVMi3i@8j)l!@T+^GTE9#i(3oVN$ zze=^^8fyK0bU}7htE@FmXKR5awRnMN^Vp;B%^OWKn`Sg!&ILu)jqJHN1y6Nw>8`eD zT~E*#Gl<|`um2)!6t3}7y0!3P>Y{QJ}tWO zrmfmq4wZ`=S4;4rN8C_`gPBg!dx8i*QfupOnyQu5&Av%rHtLahb#g=-VRYotFrE0A z=?1ImuAqSE)OtDBHEx-Zv>oo--61z9VP0t;`gv(jk!}2a^Ip{?Wj$1LNrt@bK9}sP z)uFM}wpZ}~04W~*kM>BLjjLAl%IO*{pQm?tw>ba8jSQPn!+o?NezaglE$|$Lmiy&x~9nn@aYJuEqZVr|jwXUX$y+ zDSKj4AGck;-54!6{ltW1;Q5acw?F)=9{8n2d84ZXoE43HN1@p}Y5@E~#Vvk6(Wx}n zfYUm+PJN**B66)WLuollQcI011J`S)`r@R%9%}AuUqbHrAKg9h>amY&>15z!B{#&b zi`3mF_XV=X0-k$0@z~Unb-ma+Lb~Jre(6C&Ua=Au# z!cx_YCp=^yZ(4SYS-4diLPH9)wo5Ey9upEsad}V5mh!Bgzz;t7_o63bo%Th=4(5WS zqFpR@d$QA(&N=5J&-v9RdECd6PYH>PYP%~hHryjx|qLihc$Xa4}CfRp!-l0wg^ zN=KbR*J$+gENrbear_|#4LomoEjI(3tLde*9-QeGhMBFk*^hJ9QWB@+xgo;fX+=BM zkOA`LI6jpjGg(PaWOi&WPxVWRvvuvw`+6=^{8W5V-XzVjKV`HtYSWVA_8~i90eaF?I#2C z?@nk`4H1$%s-BsuY^5gM(B_oZ=xa(ud2mC|z49Qa3^qQL6;%K-?7GtEE5N60mgQLQ zhFe-9$lfVJ?I9%R zoD=U-j+Q|ncsvCc(bNu+zcnGdj~>dl^_NLQ&^LG*X2jDCK$@fksONiN_aFZN*XdV! z7CK-Y)7ox7?(NZFqiR6LN#j43Dx=g92&W`^*<*hc^y1f< z@5-^78t-PiCA~2^*x{I)xK|r!=+kyk6y2`dt42B!*$bzp_=k(#vJ5 zuD0Hz1-1KLGiy##o>z>FpL(WvUeEbCkm?dp}E5tjwVyh=++2w6Z)tjH5Q2>;6*nr`kn&!r0C5%5F34KFdo4AKoEnADF4#8mvO^ z38Mam(*t1+P{r>qAf5_HXo+t#c8=&; z@~~2_e(>|nQ}k#7BFX9@XILwP(fxNO9q}4Y;~6&_Jc6&0eC#>kjMnv{Ri6pF@(Q1U z#1PPqI?2-!rCV=PTc$LdQ;W)@V1*|<{JvGE>1_rdE)?Y}L!>07th?i{z->(hwc(>_ z9!dkZ<5M*7fnZhciXbBdDg8RX6%IH+-V@%Bb}+4!5up8M@GZPber)%zJR_jF$yLf- z-|X|0kl9iKumx1LljSSv?}fEQSPoH@vsJiTWQGXNJo{E*sJ6oW*XVabSMH27g{`Sx zV(}eGTWf6h&%P?I=5uwX)Ic2F%D-JK%*TIv)LtQ38S$8G#qxBulKqRu-P_4kEk^KF8)rFK-7rs_sU(_+RKvO}lxxz>Ml5 zHn6lI02L%C6_K7Yc<;f-J?fp*S~PBHzhxDqYSM?`@>O}!+LGgLyTPAgmon38aSCCW z30PY#s5YhUDI=c1W5{vuSU#mp>Uj&}HV3lMypzO4qVDGDL#TCOclIu-OE0w|70EBH zDRhM?vAe`=E_VRqBhdY7wp?SF6=tNSYvOcgxUzq$$rfFM?T{3)Xt}V=##wuXG_pdz zU@|)?L(j^u^z=-To7;P;R+&qMa7@XC=D&a~>8!FEncis3N zYQEY)>CM#bKBD;P+Tq^i2g1~Pm#5n8R^2&#n+DwVKbrKX*>b$pZ8OO>z^T48_)>zPY zvghinlIU)`84QF2#m5ORX!wY>wJQ5MHj)s2z>}01j}R4%p)(&Yq#E zjy9PCa_@9qe@{$9Vsw~oyC+Y>Cs3~)L7#lm?ZUO`i^yfo@*<&5Gc~fcZVsVGDOb#_ zDJQYP&H)B#Gs`FPV>?=ZTmJxr4^e5*(zUX_@A{?}Q*^^xV_3y_F$uP%HiRj*R^pOY zl7ojOv>X_&;M92b&K zQXfWsV~|0?^dN)9BAtPkiTOO;Hp-E^$6&Aq*8czsy*JeDkuD2|X?6mbbxbJ-wnq!)H8HlWsb z%Q~XTG40dh1ujF#aHtTZ1t@rU;AaFMVn@9q6Ej>0Aqi1V*#n5dAKfncIn;$ixqs9b zsL$OO_A@p!SsPgaz>?oT!}ATQNa>rQA(^o>z}V$P^VNP>b2 zQjNf1@$9O)okN*99hLa$4aZPRwCKy6b}UXvv`rZ=h2hYYl2icWc5p!LSB*Z1 z!uGYN496U*MxijAKXB&D<%9iO9kS_vjT_z81Q*?M>lurk#V#lix7J7{9)k%L^p8kt z1+>j+7+*g1i-c8H9n-_%J~LOjx_41qBgTUTrZVemPwyT|+9`1d9Boh}5$9a^uU^mi zX9fZtf)Akw1!dh^TpLd4fb(;IqUc73wDf~WUi7!aysardOdF)NMzY*W5c9fzOYQi9 zZSPPem3*PbauT99j4P8%(silosM}cc#jZV7J}Swjhv!^xv|NMU%BXc8?AjktYkfC) zw?%tW+9yba^q49sZN;>$yU}WdnW{*QhSAWJ`*(niLdS&S%~{!&4Bv^=aj9bZU@|8 zEP9RAOZbUM*XizecG+jY5UIqPe~5W)m6p{yH&5$`*PR!sVu@Cg!o0Oi=urSy08 zE$u$aHIeALi{O!!E#!Z7g=qSl=_U_nHb1)NZ^llee^c15zqC9>Ez6UEzIbot=lRv^ zrFA1D^E{8j2kuq>0O8xneMFAV2lZ6Pr;r3z5fuq(4d4|Nj!h3dkCuJJsclAJP3^_k zOgdX(jHV!6+&&;-p|u2e$nW*5)|^OgR7L;|?5(#-^t}B=8&cfWHHhm%L`FN3g*c*; z;p4bMRtKR~NKObA9nn<&cVibt*It=u{UZd=pAp;CK1R((#+Iw+SYTY%JF1W>%HAO=mg$a4qURYO(`+J=QIPN%XeB7KG$+fLsS+xenFd4!UqEN_#K*?7_s#=Ov z{{V!`y~$6sCooKn;VeY>B|t=(5xw}2r3ev?gK_XipO%=77w2#x*eb33FOsw+|2_>K=%OTAaZ~V;Ln+YEIBzq zK>p`QBP3^9c{!#yQd)Z6{TP}&njlU0esZBZ)7r((OM&Hp-tWMlpnHgE ztJ3vX?S2ZaOQ7v!6}QXp%@QxATYjY7LSI#^=4b!60-aLK zuxCZVbbm9w$tPO8AYAjaf_e&;%*sKSi}7Bdj5 z*fHZuB8UAWviYK)#xw!+ss9|i*RX-Fw$l#;^st#f^DCoe1Kw8QUW-<^kLix)y!Ice z56RTidOjWhV)5j+tpB7wuZFn z#=ep7hL{}6a!WBOT|21+lGs&RTW48lz$o3~`@Xe{ME7DJTb((KG9MG{4lrAV6{)|B z!(9j7uAKzEd`nR9c=P2&xMP5l5y}|0b@9BnK44lgC!X`se(#j`e2JzGlV=He76XZG zkm)l}@&5>5ImUt~NjL^vUITFpq>e^feHjzlid0Wc!u0BU^VEstoPwXY!K+SnCW&DYb}2$eAM}?7y0xg zrtvq&h{7a~X!O47&!?Vm(tC$Y$c9hS)pVhk){b`orVj*VI)8O&z!i2Ue5%)Ejh@+K zPWrG1UtfFN0h;NQyR3bklGTa268k!D)fsISP0_hgNHeuM@=)ky}u-Htn9T&3C`4l?Xz!12wiX0oG7Ur9!Uz~oDidq%%fh` zP+UanN`=LR7Z2&{YN{~4O;;;6AZW@=ps%2@@YXY%?kXgJBZ+n^kT)}w-Mp$n>${@i zHE$J;2GfbeF|b7^KRP3mC0&J|bQ$vB4aHkcSyWm(m~zP8SR1-nE+okmXavfkNY5?x zn5Y&KUHRRu~Xy^eTk`NQl{zL5*n+@a?yaOn>TD*RRnL?lx`8nxAAT}Zx6 zNhJBYWz4TQiIM(VRS|?zsO=kbwUC)ex*_FlI$~&^8q?9g13;q^Ch~sk2vg7dACE8H z0R{}meNo&zYa89P0)}?Z^7ez*Z=$yx+o8CJi6b!CfLIoFeckhEJ!xf6dx-%2A5(2% z6Z`A$vyf;tJ&SBqw#=?E>t6tug!wFiiB8USTAr&p&{&ie9-lWPc{&+!F+Qkt?GXd~ zM3Vy8Cnr}Sj0%4I7!VbOL#evy+5a`ttAE5zaL%b{&~@i9en_E!Wma9FA#gesA72RodP%^5#Y!sD z2>b#xIec8K^RErt!fq8C6^^TBpY~T$vl!yldFxARm?n&4b(S1h#ij~!Fo zs#b})#3mPh&8J9!xWA>WIOBx%+T-@o~A5T7=5v;E_DTiughrP^eXn|N&jeo22 zu~}b-y2J5*(Cq3$V&VlmGx$G`37%hI{C#E^R2v?-@j@)NsHB007DSfo!~ zz0mx^lDDMgeFd!sx{ivF#kqho4*&GP6$9c)2RK$S-0 zuMC&;`!1HiBz$|z@+WD``%uYS?<=kVL!TQXSwLVxm{PFti2l*PAC4@c z65$wo<^)+INzH`Kgo8+z+o6&2AI`=G+FqR5@hQ~5v{R@}h|~TnHncP@WuNCGsVRsz zT^*JAH-aCLcAJec)c12i%7qOKm2YMqy%K%XbS@3_lQN_*(M zM|$$DCuB#{^a^jjg+(~68L7^aytLFAHuZ`uaL&4^U$Fdajqct&zB2Q4iS?+)N2GcmGW->VK;Fl0f* zQ;U~ZWI1>3W1hVb!*Lz`^fh57Fav%EpvnyIoLVgB@zLlwuZ&)VUM8y+B6^#A@xG#& zVymJ6Ju!j;33Trr#+s_tYVl47Re{WN{ndfd*i>0UgS zB43VkzH`ng*D6eI5?dCLl!c$mkEB_YU1{YZq`2fI{dow90l(X-(F_&BaSmS!kWljMpN zn?oHg#8j)PT?D2ze^o?yVX|6sbf)XQl3`cnQnYQNsoIo)j9#5&bFXAS`>VM0XZ=sI4G13Kr+bCO>zjP2t*`CKO>+(#Gqz@a{ff*58k78mPH)pS?`ZMe6>37-H1%q+!pHZuC0kfplDaEDc1&Me>MH`{ESbr5R@;Z9H z1RUM(Z^+jKgOMNr#5ynhUp0v}yQMlVpc6m5svLJL;TfBkVWwEBeK8?N@y=7b- z#k-ydTI5!ZnzNGwqYmZ4Pj(Kxhd}g!OA=}Jfz7=(b<>OYX*>RXSMc9s{Laa>fPGE> z(rQW}kDjZdBzYFy`JfXq`zN`50AIZ6`f-mQR|Cz_^!#XcKVp((p@YY~vu@SuvUY#s zRO37n_v#dB5c9feBFo5noNdwxs(v@bu30H>;JtoHJSSB}craRbDLXnQ)X zYd>|nwozv&OykQ@d&HEe%szkEj33TrZ$oauyQc%IBiXabtcb%(7vbw<$%lvkx|17md+Qk;nhr~xW755(2!PT z@4(>&JjJh6n<4sI_?a=Z-xyYJ3?vGym^0xw82jVRFa7_Z5o@^9W3cFy;=GNeLqw); z&aCa2<%IUs^An+-s&P>?lg$vZE~W9V+2hn&YzkU*LIuGl)oI*(Gb4}eoT-RWLZ#iB z;|t_v-waMFs8}vz2Pk1xX)-o$ z*of{;hV~PCrY|ErvY(f1Q{g_%m@YR0?^r9Ayr!Sc+u7+$?kV+hYhS*t)cW@Hd&EKY zPu%cc{$o+liN^kR{)rxH|w|QJQK0WqH=- zpS?T4rbRb} za(Pe6Vc@B^mh*~tY-P&2j;av`GbEM;(?+KkQ!Ha=sF$9%(-bx@ZHR*I0N5sPs_3tn z^72nnt1JZoieTQ)OW$mh8@#G|eLfbjW>jZqb7@CDf(+yTt@JNMCk{DTMMT)6<6HQp zsTZ&CYAsEbT~-d2=|}DW9lk1D z{(TBKAi9LFtss?@C*=Ft5!puiLE~RaN}~xS3*NyyKnL?%i{AE^q&i>b=P0_5!owr^ zLO8zF2~5>1EV)B&;9^H#&Q3b23JwZ}^avCyKC9|DO|mu^^y|H}NEt#Wm?&v1 zQ4Pi6sqky_7l_K*`S##X9%BP|h9d9b)hsvk2uD69J-bJ&+ZVXl_fX&-@p222Ylq70vHH2tA zYp&sh(ayV$1KPbaL;Ah+zL?uL&FFls&JGYDVU|kbD>z9F9@mf>g%kMU*3dg5sH%sC zZSo3NPYLo1l{qw9&d0@z{`8)77-{{R3-XitC$aQU7`eY9&}qNfg)vX?65(=k_TF}< z$vlNhhE6ud@4R<6S$#u1zm*f!XJ%yIG&M0b#xq=LW*1W$`PKE2i7L-TeV$}_}b)wF#D6OHi*LBn0%1f z8NXiTMhODTqPpGm>vFIr@F;f4ww(VprVQ%Xy|x z=_OQ4rKn~_URycLud)J_ZbCdtFf^q%Hf0_a78cru=fP!hA&g-K?tyM8@R~ghVkgT8 zuW|d#o5Pc1>4wJk)qXqEhbFD^Ve!)C=lbt=lN4DbJ3mVYp%E$Oz z3|rZ}VDS+V^sxU0TXJeLD4SRyaS`+67ne2_3y+RfJ4EvzELc0h91Gl-#Z@&t68Hm@ zg;W3@>}R!u^9I9~uHnPuGl`$$L_lqhTa^)r940v#pNvZ`Ex&i!U6V0wvXq}o8s*PK z9XYmA;eM+1-p*(fIMl~&OVuryG@iE^SzQ&MPI3o`^0@DKaV|q96~6`ssC!)^+tGHbK7j7@GLQF6>69|CH_^7%N;^* z$zYv5`@$%37^}a0qX*3Ng#E_A;bY+-q+wE(8}LKUj;Xln)0acVWzIh@=hTB}y)&pJ zr&P5$-Y-cW@_trXa;8Chx&c6{$3N{dH|EyNsZ?wj=-ff0*It3Vv7@6iIGSkY^%|;b_3i#)c@ifd)BaL zs5d>?jSJaku7&{Bt?D`yB|j;!WF$6^^=3M{5`ZQTzS?{>uQt5?GvZn5ziq)(>(jz0 z^H!u-`YCi`UZg&nqq}vl)rkj_I1XJ#8ihPXz{TQj4*PhdD$3ke`xh26j7#hIk1R@P z=rL0>YM0Am>DM8n75ZL$?@1E*+A~<}84wLCY56N^`DMPpl7-5ei>}{%rdsSla;FK> zdwEj*ATgv6Q26z35YXr>=Pr02R~*z3tU?pE*fLDn=AEJs@qGD)1Q8$Ip2k|4#_o$+ z!#_zcp&xe*!gH`K&d!*(jZhgmZzOJe1X~p8(>4~=MeRitH+gH!UU4+#m*j+n^*djB z7mb8~b=j~m7h3#OmWPG362H~{$a_Aod5vV_=@84tXLbly@ud7?6T_X2f zr@m-Q{-t5<{q?fvSLke~*3Rx*yhL&7-ItF4tgNlHr&Q$(=F{D(Lgu|h4EPC$CI0AV zvu92T`;7H-e0*Z}>-ewa#Vbj~mAy+>($^Yo5pINo>gLtFj!9Y5xA@JOhO)C-X}F&e zh)%G%_LB&c7}F0aDn(HD_n)96ogb2XfF%a2i1-!)*&2ap)I7P8vlAydEG*|aab(lh zw&dwAhY?@!$AH8L2w?S$i8w`_%|fr>H%8p}ws>g>yECzQ#JZjjqeed4swWXK0)hIt zSiWx-#6P>to3#Cc`V2qvLEr@ z0hq~y1L34M-5&Kla163r)G5WSwnp&a4@w1{xp{`yy}+U0!=6V`^uGSE#K&g(OQgdr z3G@K%vG{34ol=6C!ZDTFJg+DYgd0=8UF`Z*_^+fYZG2Nc*3^M`bmSB1jS^K;@{Jhk zAjTFVC5~6OYBXXRiMlQdh3 zeEf*}MzR_@U}?x!MY^fX(vihUTZ9=uCiruO^Fz;5MM%E9!FHN;CE5faio22Ov(W67 zTH>%^t{5P-IJix%t}Cn-cQEhwCncGZmfUh_(tqH<`3tdY#rSn2*d~Y$EmL>MWkQc` zT&)9K5v(LHb8r^_To7kYQ_eJQLT`JFzbfJDz1)PsOtUJ2pRSEx_lg(Y0p1>r?N0e< zEquawhVpZ4d1y=YTB8PZuIwQXSVyw z!|Y$2^!`67!%}vY0nk?Kx6cb$A|_G~|LkV=1&9?jh3v7!SxxoNkUBx^Ar#TY1?dcT zJ`cVF;PPfyZRXOmBGIlAP66Kf<6sPtDf1K6sE!2*o}du&`cdDsK^l{^XB&4ZRN)Q>yoEn%R#HEloW@@$-@}I4j(9ns)t=iHCW9QpZ(Up{q04mBUU*Z*eZ+<4mFx$+PHDc@rzAVrI5>yICC^jr314z>2;5{k z71gktug+zNZ?ibmq~v49bXi?Jcj;`cUpaQEEp-ebj{dw68Zqa7)xz3I<`-K%`3KS) z+Fv=@(^53TnnjhQ)4)|ttWD4_N0Y%|Yo$>z0|nUjz0CA#e}S%omRi1-wTJv84Yq?2 z{2)R~Cm0u1+oRKj7BeMMxB5Hv4WFlC;7^_n{+)V|gc{*I<=D(Pw;4#R#Dv$kc$AwL z>Ni2M7-5c*7#X23&CW;V*H=uTQ9InsDk>zP;{9nZ0~N06%hL6?;=)I&5vo~Jz1`@S z`J(p$c?1Fjw=cfgd^o}H_b3;T$+4vxx;*8m&8^eX>R~D!}XaButRy#PN4V{epx2FQ&&(z<5agm2F@Miu!^DjNQR*7sWreU z#iNF?LG*zwDGZFv;aXZDvP=?B2P&~z&u|`+foV%wfb!|!-qge-60zKP$`AOugwWTY zBTCCd1{vrss!`hq7h++pQryR`w}wT|Q(CC;0>CkGkleKss zEMJ75Tjn~p;DzX0ocnvIL~Q35^R6OYz5%2NrZQmQ^}>isx;5bO3ES&ST9Uza2hZ9Qwwx zpWrXj(7vh`KKqNg;L}VxBl}gOJ-RtGHuWI`a?(r7`6c0p}qt7nWhL~vYHsIE-NoavG;{evk05}aM{!2IHjCOHjI3hDxu7A z??J=y!p2JgFu3uJJWw_m8r{VFJuWt@!$whUtiSw}3Gsum3eg8PBBKrcI&A^3p#2ak zueT=gAs42`rT`+0*22?*X+6vZKMR-AqDXn39ZmKy1qsF?Wp&@`vp>fHmz0;u$?B%4 z=7c{c7L6Y=2n@V6V1<=m9q{`G`q$D*~o6G*50q?^^U0u(bnVn9Exx#p% zkRCiZ0&9RHRQ`$V0Zu6cxDddm_!Q@Z8UtAS`F(dEpc57RR3{Gz2A4gKJhzuMz9%*;;tehVyOw{?WMCo+*CKk0i>G6uLP+>Mll5UPVY(UvQTWsU LXEZ^2H}}5)XT`zU literal 0 HcmV?d00001 diff --git a/tinygrad_repo/test/models/efficientnet/imagenet1000_clsidx_to_labels.txt b/tinygrad_repo/test/models/efficientnet/imagenet1000_clsidx_to_labels.txt new file mode 100644 index 0000000..2e3ae32 --- /dev/null +++ b/tinygrad_repo/test/models/efficientnet/imagenet1000_clsidx_to_labels.txt @@ -0,0 +1,1000 @@ +{0: 'tench, Tinca tinca', + 1: 'goldfish, Carassius auratus', + 2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', + 3: 'tiger shark, Galeocerdo cuvieri', + 4: 'hammerhead, hammerhead shark', + 5: 'electric ray, crampfish, numbfish, torpedo', + 6: 'stingray', + 7: 'cock', + 8: 'hen', + 9: 'ostrich, Struthio camelus', + 10: 'brambling, Fringilla montifringilla', + 11: 'goldfinch, Carduelis carduelis', + 12: 'house finch, linnet, Carpodacus mexicanus', + 13: 'junco, snowbird', + 14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea', + 15: 'robin, American robin, Turdus migratorius', + 16: 'bulbul', + 17: 'jay', + 18: 'magpie', + 19: 'chickadee', + 20: 'water ouzel, dipper', + 21: 'kite', + 22: 'bald eagle, American eagle, Haliaeetus leucocephalus', + 23: 'vulture', + 24: 'great grey owl, great gray owl, Strix nebulosa', + 25: 'European fire salamander, Salamandra salamandra', + 26: 'common newt, Triturus vulgaris', + 27: 'eft', + 28: 'spotted salamander, Ambystoma maculatum', + 29: 'axolotl, mud puppy, Ambystoma mexicanum', + 30: 'bullfrog, Rana catesbeiana', + 31: 'tree frog, tree-frog', + 32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui', + 33: 'loggerhead, loggerhead turtle, Caretta caretta', + 34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea', + 35: 'mud turtle', + 36: 'terrapin', + 37: 'box turtle, box tortoise', + 38: 'banded gecko', + 39: 'common iguana, iguana, Iguana iguana', + 40: 'American chameleon, anole, Anolis carolinensis', + 41: 'whiptail, whiptail lizard', + 42: 'agama', + 43: 'frilled lizard, Chlamydosaurus kingi', + 44: 'alligator lizard', + 45: 'Gila monster, Heloderma suspectum', + 46: 'green lizard, Lacerta viridis', + 47: 'African chameleon, Chamaeleo chamaeleon', + 48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis', + 49: 'African crocodile, Nile crocodile, Crocodylus niloticus', + 50: 'American alligator, Alligator mississipiensis', + 51: 'triceratops', + 52: 'thunder snake, worm snake, Carphophis amoenus', + 53: 'ringneck snake, ring-necked snake, ring snake', + 54: 'hognose snake, puff adder, sand viper', + 55: 'green snake, grass snake', + 56: 'king snake, kingsnake', + 57: 'garter snake, grass snake', + 58: 'water snake', + 59: 'vine snake', + 60: 'night snake, Hypsiglena torquata', + 61: 'boa constrictor, Constrictor constrictor', + 62: 'rock python, rock snake, Python sebae', + 63: 'Indian cobra, Naja naja', + 64: 'green mamba', + 65: 'sea snake', + 66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus', + 67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus', + 68: 'sidewinder, horned rattlesnake, Crotalus cerastes', + 69: 'trilobite', + 70: 'harvestman, daddy longlegs, Phalangium opilio', + 71: 'scorpion', + 72: 'black and gold garden spider, Argiope aurantia', + 73: 'barn spider, Araneus cavaticus', + 74: 'garden spider, Aranea diademata', + 75: 'black widow, Latrodectus mactans', + 76: 'tarantula', + 77: 'wolf spider, hunting spider', + 78: 'tick', + 79: 'centipede', + 80: 'black grouse', + 81: 'ptarmigan', + 82: 'ruffed grouse, partridge, Bonasa umbellus', + 83: 'prairie chicken, prairie grouse, prairie fowl', + 84: 'peacock', + 85: 'quail', + 86: 'partridge', + 87: 'African grey, African gray, Psittacus erithacus', + 88: 'macaw', + 89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita', + 90: 'lorikeet', + 91: 'coucal', + 92: 'bee eater', + 93: 'hornbill', + 94: 'hummingbird', + 95: 'jacamar', + 96: 'toucan', + 97: 'drake', + 98: 'red-breasted merganser, Mergus serrator', + 99: 'goose', + 100: 'black swan, Cygnus atratus', + 101: 'tusker', + 102: 'echidna, spiny anteater, anteater', + 103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus', + 104: 'wallaby, brush kangaroo', + 105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', + 106: 'wombat', + 107: 'jellyfish', + 108: 'sea anemone, anemone', + 109: 'brain coral', + 110: 'flatworm, platyhelminth', + 111: 'nematode, nematode worm, roundworm', + 112: 'conch', + 113: 'snail', + 114: 'slug', + 115: 'sea slug, nudibranch', + 116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore', + 117: 'chambered nautilus, pearly nautilus, nautilus', + 118: 'Dungeness crab, Cancer magister', + 119: 'rock crab, Cancer irroratus', + 120: 'fiddler crab', + 121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica', + 122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus', + 123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish', + 124: 'crayfish, crawfish, crawdad, crawdaddy', + 125: 'hermit crab', + 126: 'isopod', + 127: 'white stork, Ciconia ciconia', + 128: 'black stork, Ciconia nigra', + 129: 'spoonbill', + 130: 'flamingo', + 131: 'little blue heron, Egretta caerulea', + 132: 'American egret, great white heron, Egretta albus', + 133: 'bittern', + 134: 'crane', + 135: 'limpkin, Aramus pictus', + 136: 'European gallinule, Porphyrio porphyrio', + 137: 'American coot, marsh hen, mud hen, water hen, Fulica americana', + 138: 'bustard', + 139: 'ruddy turnstone, Arenaria interpres', + 140: 'red-backed sandpiper, dunlin, Erolia alpina', + 141: 'redshank, Tringa totanus', + 142: 'dowitcher', + 143: 'oystercatcher, oyster catcher', + 144: 'pelican', + 145: 'king penguin, Aptenodytes patagonica', + 146: 'albatross, mollymawk', + 147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus', + 148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca', + 149: 'dugong, Dugong dugon', + 150: 'sea lion', + 151: 'Chihuahua', + 152: 'Japanese spaniel', + 153: 'Maltese dog, Maltese terrier, Maltese', + 154: 'Pekinese, Pekingese, Peke', + 155: 'Shih-Tzu', + 156: 'Blenheim spaniel', + 157: 'papillon', + 158: 'toy terrier', + 159: 'Rhodesian ridgeback', + 160: 'Afghan hound, Afghan', + 161: 'basset, basset hound', + 162: 'beagle', + 163: 'bloodhound, sleuthhound', + 164: 'bluetick', + 165: 'black-and-tan coonhound', + 166: 'Walker hound, Walker foxhound', + 167: 'English foxhound', + 168: 'redbone', + 169: 'borzoi, Russian wolfhound', + 170: 'Irish wolfhound', + 171: 'Italian greyhound', + 172: 'whippet', + 173: 'Ibizan hound, Ibizan Podenco', + 174: 'Norwegian elkhound, elkhound', + 175: 'otterhound, otter hound', + 176: 'Saluki, gazelle hound', + 177: 'Scottish deerhound, deerhound', + 178: 'Weimaraner', + 179: 'Staffordshire bullterrier, Staffordshire bull terrier', + 180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier', + 181: 'Bedlington terrier', + 182: 'Border terrier', + 183: 'Kerry blue terrier', + 184: 'Irish terrier', + 185: 'Norfolk terrier', + 186: 'Norwich terrier', + 187: 'Yorkshire terrier', + 188: 'wire-haired fox terrier', + 189: 'Lakeland terrier', + 190: 'Sealyham terrier, Sealyham', + 191: 'Airedale, Airedale terrier', + 192: 'cairn, cairn terrier', + 193: 'Australian terrier', + 194: 'Dandie Dinmont, Dandie Dinmont terrier', + 195: 'Boston bull, Boston terrier', + 196: 'miniature schnauzer', + 197: 'giant schnauzer', + 198: 'standard schnauzer', + 199: 'Scotch terrier, Scottish terrier, Scottie', + 200: 'Tibetan terrier, chrysanthemum dog', + 201: 'silky terrier, Sydney silky', + 202: 'soft-coated wheaten terrier', + 203: 'West Highland white terrier', + 204: 'Lhasa, Lhasa apso', + 205: 'flat-coated retriever', + 206: 'curly-coated retriever', + 207: 'golden retriever', + 208: 'Labrador retriever', + 209: 'Chesapeake Bay retriever', + 210: 'German short-haired pointer', + 211: 'vizsla, Hungarian pointer', + 212: 'English setter', + 213: 'Irish setter, red setter', + 214: 'Gordon setter', + 215: 'Brittany spaniel', + 216: 'clumber, clumber spaniel', + 217: 'English springer, English springer spaniel', + 218: 'Welsh springer spaniel', + 219: 'cocker spaniel, English cocker spaniel, cocker', + 220: 'Sussex spaniel', + 221: 'Irish water spaniel', + 222: 'kuvasz', + 223: 'schipperke', + 224: 'groenendael', + 225: 'malinois', + 226: 'briard', + 227: 'kelpie', + 228: 'komondor', + 229: 'Old English sheepdog, bobtail', + 230: 'Shetland sheepdog, Shetland sheep dog, Shetland', + 231: 'collie', + 232: 'Border collie', + 233: 'Bouvier des Flandres, Bouviers des Flandres', + 234: 'Rottweiler', + 235: 'German shepherd, German shepherd dog, German police dog, alsatian', + 236: 'Doberman, Doberman pinscher', + 237: 'miniature pinscher', + 238: 'Greater Swiss Mountain dog', + 239: 'Bernese mountain dog', + 240: 'Appenzeller', + 241: 'EntleBucher', + 242: 'boxer', + 243: 'bull mastiff', + 244: 'Tibetan mastiff', + 245: 'French bulldog', + 246: 'Great Dane', + 247: 'Saint Bernard, St Bernard', + 248: 'Eskimo dog, husky', + 249: 'malamute, malemute, Alaskan malamute', + 250: 'Siberian husky', + 251: 'dalmatian, coach dog, carriage dog', + 252: 'affenpinscher, monkey pinscher, monkey dog', + 253: 'basenji', + 254: 'pug, pug-dog', + 255: 'Leonberg', + 256: 'Newfoundland, Newfoundland dog', + 257: 'Great Pyrenees', + 258: 'Samoyed, Samoyede', + 259: 'Pomeranian', + 260: 'chow, chow chow', + 261: 'keeshond', + 262: 'Brabancon griffon', + 263: 'Pembroke, Pembroke Welsh corgi', + 264: 'Cardigan, Cardigan Welsh corgi', + 265: 'toy poodle', + 266: 'miniature poodle', + 267: 'standard poodle', + 268: 'Mexican hairless', + 269: 'timber wolf, grey wolf, gray wolf, Canis lupus', + 270: 'white wolf, Arctic wolf, Canis lupus tundrarum', + 271: 'red wolf, maned wolf, Canis rufus, Canis niger', + 272: 'coyote, prairie wolf, brush wolf, Canis latrans', + 273: 'dingo, warrigal, warragal, Canis dingo', + 274: 'dhole, Cuon alpinus', + 275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus', + 276: 'hyena, hyaena', + 277: 'red fox, Vulpes vulpes', + 278: 'kit fox, Vulpes macrotis', + 279: 'Arctic fox, white fox, Alopex lagopus', + 280: 'grey fox, gray fox, Urocyon cinereoargenteus', + 281: 'tabby, tabby cat', + 282: 'tiger cat', + 283: 'Persian cat', + 284: 'Siamese cat, Siamese', + 285: 'Egyptian cat', + 286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', + 287: 'lynx, catamount', + 288: 'leopard, Panthera pardus', + 289: 'snow leopard, ounce, Panthera uncia', + 290: 'jaguar, panther, Panthera onca, Felis onca', + 291: 'lion, king of beasts, Panthera leo', + 292: 'tiger, Panthera tigris', + 293: 'cheetah, chetah, Acinonyx jubatus', + 294: 'brown bear, bruin, Ursus arctos', + 295: 'American black bear, black bear, Ursus americanus, Euarctos americanus', + 296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus', + 297: 'sloth bear, Melursus ursinus, Ursus ursinus', + 298: 'mongoose', + 299: 'meerkat, mierkat', + 300: 'tiger beetle', + 301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', + 302: 'ground beetle, carabid beetle', + 303: 'long-horned beetle, longicorn, longicorn beetle', + 304: 'leaf beetle, chrysomelid', + 305: 'dung beetle', + 306: 'rhinoceros beetle', + 307: 'weevil', + 308: 'fly', + 309: 'bee', + 310: 'ant, emmet, pismire', + 311: 'grasshopper, hopper', + 312: 'cricket', + 313: 'walking stick, walkingstick, stick insect', + 314: 'cockroach, roach', + 315: 'mantis, mantid', + 316: 'cicada, cicala', + 317: 'leafhopper', + 318: 'lacewing, lacewing fly', + 319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", + 320: 'damselfly', + 321: 'admiral', + 322: 'ringlet, ringlet butterfly', + 323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus', + 324: 'cabbage butterfly', + 325: 'sulphur butterfly, sulfur butterfly', + 326: 'lycaenid, lycaenid butterfly', + 327: 'starfish, sea star', + 328: 'sea urchin', + 329: 'sea cucumber, holothurian', + 330: 'wood rabbit, cottontail, cottontail rabbit', + 331: 'hare', + 332: 'Angora, Angora rabbit', + 333: 'hamster', + 334: 'porcupine, hedgehog', + 335: 'fox squirrel, eastern fox squirrel, Sciurus niger', + 336: 'marmot', + 337: 'beaver', + 338: 'guinea pig, Cavia cobaya', + 339: 'sorrel', + 340: 'zebra', + 341: 'hog, pig, grunter, squealer, Sus scrofa', + 342: 'wild boar, boar, Sus scrofa', + 343: 'warthog', + 344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius', + 345: 'ox', + 346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis', + 347: 'bison', + 348: 'ram, tup', + 349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis', + 350: 'ibex, Capra ibex', + 351: 'hartebeest', + 352: 'impala, Aepyceros melampus', + 353: 'gazelle', + 354: 'Arabian camel, dromedary, Camelus dromedarius', + 355: 'llama', + 356: 'weasel', + 357: 'mink', + 358: 'polecat, fitch, foulmart, foumart, Mustela putorius', + 359: 'black-footed ferret, ferret, Mustela nigripes', + 360: 'otter', + 361: 'skunk, polecat, wood pussy', + 362: 'badger', + 363: 'armadillo', + 364: 'three-toed sloth, ai, Bradypus tridactylus', + 365: 'orangutan, orang, orangutang, Pongo pygmaeus', + 366: 'gorilla, Gorilla gorilla', + 367: 'chimpanzee, chimp, Pan troglodytes', + 368: 'gibbon, Hylobates lar', + 369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus', + 370: 'guenon, guenon monkey', + 371: 'patas, hussar monkey, Erythrocebus patas', + 372: 'baboon', + 373: 'macaque', + 374: 'langur', + 375: 'colobus, colobus monkey', + 376: 'proboscis monkey, Nasalis larvatus', + 377: 'marmoset', + 378: 'capuchin, ringtail, Cebus capucinus', + 379: 'howler monkey, howler', + 380: 'titi, titi monkey', + 381: 'spider monkey, Ateles geoffroyi', + 382: 'squirrel monkey, Saimiri sciureus', + 383: 'Madagascar cat, ring-tailed lemur, Lemur catta', + 384: 'indri, indris, Indri indri, Indri brevicaudatus', + 385: 'Indian elephant, Elephas maximus', + 386: 'African elephant, Loxodonta africana', + 387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens', + 388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca', + 389: 'barracouta, snoek', + 390: 'eel', + 391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch', + 392: 'rock beauty, Holocanthus tricolor', + 393: 'anemone fish', + 394: 'sturgeon', + 395: 'gar, garfish, garpike, billfish, Lepisosteus osseus', + 396: 'lionfish', + 397: 'puffer, pufferfish, blowfish, globefish', + 398: 'abacus', + 399: 'abaya', + 400: "academic gown, academic robe, judge's robe", + 401: 'accordion, piano accordion, squeeze box', + 402: 'acoustic guitar', + 403: 'aircraft carrier, carrier, flattop, attack aircraft carrier', + 404: 'airliner', + 405: 'airship, dirigible', + 406: 'altar', + 407: 'ambulance', + 408: 'amphibian, amphibious vehicle', + 409: 'analog clock', + 410: 'apiary, bee house', + 411: 'apron', + 412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin', + 413: 'assault rifle, assault gun', + 414: 'backpack, back pack, knapsack, packsack, rucksack, haversack', + 415: 'bakery, bakeshop, bakehouse', + 416: 'balance beam, beam', + 417: 'balloon', + 418: 'ballpoint, ballpoint pen, ballpen, Biro', + 419: 'Band Aid', + 420: 'banjo', + 421: 'bannister, banister, balustrade, balusters, handrail', + 422: 'barbell', + 423: 'barber chair', + 424: 'barbershop', + 425: 'barn', + 426: 'barometer', + 427: 'barrel, cask', + 428: 'barrow, garden cart, lawn cart, wheelbarrow', + 429: 'baseball', + 430: 'basketball', + 431: 'bassinet', + 432: 'bassoon', + 433: 'bathing cap, swimming cap', + 434: 'bath towel', + 435: 'bathtub, bathing tub, bath, tub', + 436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', + 437: 'beacon, lighthouse, beacon light, pharos', + 438: 'beaker', + 439: 'bearskin, busby, shako', + 440: 'beer bottle', + 441: 'beer glass', + 442: 'bell cote, bell cot', + 443: 'bib', + 444: 'bicycle-built-for-two, tandem bicycle, tandem', + 445: 'bikini, two-piece', + 446: 'binder, ring-binder', + 447: 'binoculars, field glasses, opera glasses', + 448: 'birdhouse', + 449: 'boathouse', + 450: 'bobsled, bobsleigh, bob', + 451: 'bolo tie, bolo, bola tie, bola', + 452: 'bonnet, poke bonnet', + 453: 'bookcase', + 454: 'bookshop, bookstore, bookstall', + 455: 'bottlecap', + 456: 'bow', + 457: 'bow tie, bow-tie, bowtie', + 458: 'brass, memorial tablet, plaque', + 459: 'brassiere, bra, bandeau', + 460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty', + 461: 'breastplate, aegis, egis', + 462: 'broom', + 463: 'bucket, pail', + 464: 'buckle', + 465: 'bulletproof vest', + 466: 'bullet train, bullet', + 467: 'butcher shop, meat market', + 468: 'cab, hack, taxi, taxicab', + 469: 'caldron, cauldron', + 470: 'candle, taper, wax light', + 471: 'cannon', + 472: 'canoe', + 473: 'can opener, tin opener', + 474: 'cardigan', + 475: 'car mirror', + 476: 'carousel, carrousel, merry-go-round, roundabout, whirligig', + 477: "carpenter's kit, tool kit", + 478: 'carton', + 479: 'car wheel', + 480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM', + 481: 'cassette', + 482: 'cassette player', + 483: 'castle', + 484: 'catamaran', + 485: 'CD player', + 486: 'cello, violoncello', + 487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone', + 488: 'chain', + 489: 'chainlink fence', + 490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour', + 491: 'chain saw, chainsaw', + 492: 'chest', + 493: 'chiffonier, commode', + 494: 'chime, bell, gong', + 495: 'china cabinet, china closet', + 496: 'Christmas stocking', + 497: 'church, church building', + 498: 'cinema, movie theater, movie theatre, movie house, picture palace', + 499: 'cleaver, meat cleaver, chopper', + 500: 'cliff dwelling', + 501: 'cloak', + 502: 'clog, geta, patten, sabot', + 503: 'cocktail shaker', + 504: 'coffee mug', + 505: 'coffeepot', + 506: 'coil, spiral, volute, whorl, helix', + 507: 'combination lock', + 508: 'computer keyboard, keypad', + 509: 'confectionery, confectionary, candy store', + 510: 'container ship, containership, container vessel', + 511: 'convertible', + 512: 'corkscrew, bottle screw', + 513: 'cornet, horn, trumpet, trump', + 514: 'cowboy boot', + 515: 'cowboy hat, ten-gallon hat', + 516: 'cradle', + 517: 'crane', + 518: 'crash helmet', + 519: 'crate', + 520: 'crib, cot', + 521: 'Crock Pot', + 522: 'croquet ball', + 523: 'crutch', + 524: 'cuirass', + 525: 'dam, dike, dyke', + 526: 'desk', + 527: 'desktop computer', + 528: 'dial telephone, dial phone', + 529: 'diaper, nappy, napkin', + 530: 'digital clock', + 531: 'digital watch', + 532: 'dining table, board', + 533: 'dishrag, dishcloth', + 534: 'dishwasher, dish washer, dishwashing machine', + 535: 'disk brake, disc brake', + 536: 'dock, dockage, docking facility', + 537: 'dogsled, dog sled, dog sleigh', + 538: 'dome', + 539: 'doormat, welcome mat', + 540: 'drilling platform, offshore rig', + 541: 'drum, membranophone, tympan', + 542: 'drumstick', + 543: 'dumbbell', + 544: 'Dutch oven', + 545: 'electric fan, blower', + 546: 'electric guitar', + 547: 'electric locomotive', + 548: 'entertainment center', + 549: 'envelope', + 550: 'espresso maker', + 551: 'face powder', + 552: 'feather boa, boa', + 553: 'file, file cabinet, filing cabinet', + 554: 'fireboat', + 555: 'fire engine, fire truck', + 556: 'fire screen, fireguard', + 557: 'flagpole, flagstaff', + 558: 'flute, transverse flute', + 559: 'folding chair', + 560: 'football helmet', + 561: 'forklift', + 562: 'fountain', + 563: 'fountain pen', + 564: 'four-poster', + 565: 'freight car', + 566: 'French horn, horn', + 567: 'frying pan, frypan, skillet', + 568: 'fur coat', + 569: 'garbage truck, dustcart', + 570: 'gasmask, respirator, gas helmet', + 571: 'gas pump, gasoline pump, petrol pump, island dispenser', + 572: 'goblet', + 573: 'go-kart', + 574: 'golf ball', + 575: 'golfcart, golf cart', + 576: 'gondola', + 577: 'gong, tam-tam', + 578: 'gown', + 579: 'grand piano, grand', + 580: 'greenhouse, nursery, glasshouse', + 581: 'grille, radiator grille', + 582: 'grocery store, grocery, food market, market', + 583: 'guillotine', + 584: 'hair slide', + 585: 'hair spray', + 586: 'half track', + 587: 'hammer', + 588: 'hamper', + 589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier', + 590: 'hand-held computer, hand-held microcomputer', + 591: 'handkerchief, hankie, hanky, hankey', + 592: 'hard disc, hard disk, fixed disk', + 593: 'harmonica, mouth organ, harp, mouth harp', + 594: 'harp', + 595: 'harvester, reaper', + 596: 'hatchet', + 597: 'holster', + 598: 'home theater, home theatre', + 599: 'honeycomb', + 600: 'hook, claw', + 601: 'hoopskirt, crinoline', + 602: 'horizontal bar, high bar', + 603: 'horse cart, horse-cart', + 604: 'hourglass', + 605: 'iPod', + 606: 'iron, smoothing iron', + 607: "jack-o'-lantern", + 608: 'jean, blue jean, denim', + 609: 'jeep, landrover', + 610: 'jersey, T-shirt, tee shirt', + 611: 'jigsaw puzzle', + 612: 'jinrikisha, ricksha, rickshaw', + 613: 'joystick', + 614: 'kimono', + 615: 'knee pad', + 616: 'knot', + 617: 'lab coat, laboratory coat', + 618: 'ladle', + 619: 'lampshade, lamp shade', + 620: 'laptop, laptop computer', + 621: 'lawn mower, mower', + 622: 'lens cap, lens cover', + 623: 'letter opener, paper knife, paperknife', + 624: 'library', + 625: 'lifeboat', + 626: 'lighter, light, igniter, ignitor', + 627: 'limousine, limo', + 628: 'liner, ocean liner', + 629: 'lipstick, lip rouge', + 630: 'Loafer', + 631: 'lotion', + 632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system', + 633: "loupe, jeweler's loupe", + 634: 'lumbermill, sawmill', + 635: 'magnetic compass', + 636: 'mailbag, postbag', + 637: 'mailbox, letter box', + 638: 'maillot', + 639: 'maillot, tank suit', + 640: 'manhole cover', + 641: 'maraca', + 642: 'marimba, xylophone', + 643: 'mask', + 644: 'matchstick', + 645: 'maypole', + 646: 'maze, labyrinth', + 647: 'measuring cup', + 648: 'medicine chest, medicine cabinet', + 649: 'megalith, megalithic structure', + 650: 'microphone, mike', + 651: 'microwave, microwave oven', + 652: 'military uniform', + 653: 'milk can', + 654: 'minibus', + 655: 'miniskirt, mini', + 656: 'minivan', + 657: 'missile', + 658: 'mitten', + 659: 'mixing bowl', + 660: 'mobile home, manufactured home', + 661: 'Model T', + 662: 'modem', + 663: 'monastery', + 664: 'monitor', + 665: 'moped', + 666: 'mortar', + 667: 'mortarboard', + 668: 'mosque', + 669: 'mosquito net', + 670: 'motor scooter, scooter', + 671: 'mountain bike, all-terrain bike, off-roader', + 672: 'mountain tent', + 673: 'mouse, computer mouse', + 674: 'mousetrap', + 675: 'moving van', + 676: 'muzzle', + 677: 'nail', + 678: 'neck brace', + 679: 'necklace', + 680: 'nipple', + 681: 'notebook, notebook computer', + 682: 'obelisk', + 683: 'oboe, hautboy, hautbois', + 684: 'ocarina, sweet potato', + 685: 'odometer, hodometer, mileometer, milometer', + 686: 'oil filter', + 687: 'organ, pipe organ', + 688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO', + 689: 'overskirt', + 690: 'oxcart', + 691: 'oxygen mask', + 692: 'packet', + 693: 'paddle, boat paddle', + 694: 'paddlewheel, paddle wheel', + 695: 'padlock', + 696: 'paintbrush', + 697: "pajama, pyjama, pj's, jammies", + 698: 'palace', + 699: 'panpipe, pandean pipe, syrinx', + 700: 'paper towel', + 701: 'parachute, chute', + 702: 'parallel bars, bars', + 703: 'park bench', + 704: 'parking meter', + 705: 'passenger car, coach, carriage', + 706: 'patio, terrace', + 707: 'pay-phone, pay-station', + 708: 'pedestal, plinth, footstall', + 709: 'pencil box, pencil case', + 710: 'pencil sharpener', + 711: 'perfume, essence', + 712: 'Petri dish', + 713: 'photocopier', + 714: 'pick, plectrum, plectron', + 715: 'pickelhaube', + 716: 'picket fence, paling', + 717: 'pickup, pickup truck', + 718: 'pier', + 719: 'piggy bank, penny bank', + 720: 'pill bottle', + 721: 'pillow', + 722: 'ping-pong ball', + 723: 'pinwheel', + 724: 'pirate, pirate ship', + 725: 'pitcher, ewer', + 726: "plane, carpenter's plane, woodworking plane", + 727: 'planetarium', + 728: 'plastic bag', + 729: 'plate rack', + 730: 'plow, plough', + 731: "plunger, plumber's helper", + 732: 'Polaroid camera, Polaroid Land camera', + 733: 'pole', + 734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', + 735: 'poncho', + 736: 'pool table, billiard table, snooker table', + 737: 'pop bottle, soda bottle', + 738: 'pot, flowerpot', + 739: "potter's wheel", + 740: 'power drill', + 741: 'prayer rug, prayer mat', + 742: 'printer', + 743: 'prison, prison house', + 744: 'projectile, missile', + 745: 'projector', + 746: 'puck, hockey puck', + 747: 'punching bag, punch bag, punching ball, punchball', + 748: 'purse', + 749: 'quill, quill pen', + 750: 'quilt, comforter, comfort, puff', + 751: 'racer, race car, racing car', + 752: 'racket, racquet', + 753: 'radiator', + 754: 'radio, wireless', + 755: 'radio telescope, radio reflector', + 756: 'rain barrel', + 757: 'recreational vehicle, RV, R.V.', + 758: 'reel', + 759: 'reflex camera', + 760: 'refrigerator, icebox', + 761: 'remote control, remote', + 762: 'restaurant, eating house, eating place, eatery', + 763: 'revolver, six-gun, six-shooter', + 764: 'rifle', + 765: 'rocking chair, rocker', + 766: 'rotisserie', + 767: 'rubber eraser, rubber, pencil eraser', + 768: 'rugby ball', + 769: 'rule, ruler', + 770: 'running shoe', + 771: 'safe', + 772: 'safety pin', + 773: 'saltshaker, salt shaker', + 774: 'sandal', + 775: 'sarong', + 776: 'sax, saxophone', + 777: 'scabbard', + 778: 'scale, weighing machine', + 779: 'school bus', + 780: 'schooner', + 781: 'scoreboard', + 782: 'screen, CRT screen', + 783: 'screw', + 784: 'screwdriver', + 785: 'seat belt, seatbelt', + 786: 'sewing machine', + 787: 'shield, buckler', + 788: 'shoe shop, shoe-shop, shoe store', + 789: 'shoji', + 790: 'shopping basket', + 791: 'shopping cart', + 792: 'shovel', + 793: 'shower cap', + 794: 'shower curtain', + 795: 'ski', + 796: 'ski mask', + 797: 'sleeping bag', + 798: 'slide rule, slipstick', + 799: 'sliding door', + 800: 'slot, one-armed bandit', + 801: 'snorkel', + 802: 'snowmobile', + 803: 'snowplow, snowplough', + 804: 'soap dispenser', + 805: 'soccer ball', + 806: 'sock', + 807: 'solar dish, solar collector, solar furnace', + 808: 'sombrero', + 809: 'soup bowl', + 810: 'space bar', + 811: 'space heater', + 812: 'space shuttle', + 813: 'spatula', + 814: 'speedboat', + 815: "spider web, spider's web", + 816: 'spindle', + 817: 'sports car, sport car', + 818: 'spotlight, spot', + 819: 'stage', + 820: 'steam locomotive', + 821: 'steel arch bridge', + 822: 'steel drum', + 823: 'stethoscope', + 824: 'stole', + 825: 'stone wall', + 826: 'stopwatch, stop watch', + 827: 'stove', + 828: 'strainer', + 829: 'streetcar, tram, tramcar, trolley, trolley car', + 830: 'stretcher', + 831: 'studio couch, day bed', + 832: 'stupa, tope', + 833: 'submarine, pigboat, sub, U-boat', + 834: 'suit, suit of clothes', + 835: 'sundial', + 836: 'sunglass', + 837: 'sunglasses, dark glasses, shades', + 838: 'sunscreen, sunblock, sun blocker', + 839: 'suspension bridge', + 840: 'swab, swob, mop', + 841: 'sweatshirt', + 842: 'swimming trunks, bathing trunks', + 843: 'swing', + 844: 'switch, electric switch, electrical switch', + 845: 'syringe', + 846: 'table lamp', + 847: 'tank, army tank, armored combat vehicle, armoured combat vehicle', + 848: 'tape player', + 849: 'teapot', + 850: 'teddy, teddy bear', + 851: 'television, television system', + 852: 'tennis ball', + 853: 'thatch, thatched roof', + 854: 'theater curtain, theatre curtain', + 855: 'thimble', + 856: 'thresher, thrasher, threshing machine', + 857: 'throne', + 858: 'tile roof', + 859: 'toaster', + 860: 'tobacco shop, tobacconist shop, tobacconist', + 861: 'toilet seat', + 862: 'torch', + 863: 'totem pole', + 864: 'tow truck, tow car, wrecker', + 865: 'toyshop', + 866: 'tractor', + 867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi', + 868: 'tray', + 869: 'trench coat', + 870: 'tricycle, trike, velocipede', + 871: 'trimaran', + 872: 'tripod', + 873: 'triumphal arch', + 874: 'trolleybus, trolley coach, trackless trolley', + 875: 'trombone', + 876: 'tub, vat', + 877: 'turnstile', + 878: 'typewriter keyboard', + 879: 'umbrella', + 880: 'unicycle, monocycle', + 881: 'upright, upright piano', + 882: 'vacuum, vacuum cleaner', + 883: 'vase', + 884: 'vault', + 885: 'velvet', + 886: 'vending machine', + 887: 'vestment', + 888: 'viaduct', + 889: 'violin, fiddle', + 890: 'volleyball', + 891: 'waffle iron', + 892: 'wall clock', + 893: 'wallet, billfold, notecase, pocketbook', + 894: 'wardrobe, closet, press', + 895: 'warplane, military plane', + 896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin', + 897: 'washer, automatic washer, washing machine', + 898: 'water bottle', + 899: 'water jug', + 900: 'water tower', + 901: 'whiskey jug', + 902: 'whistle', + 903: 'wig', + 904: 'window screen', + 905: 'window shade', + 906: 'Windsor tie', + 907: 'wine bottle', + 908: 'wing', + 909: 'wok', + 910: 'wooden spoon', + 911: 'wool, woolen, woollen', + 912: 'worm fence, snake fence, snake-rail fence, Virginia fence', + 913: 'wreck', + 914: 'yawl', + 915: 'yurt', + 916: 'web site, website, internet site, site', + 917: 'comic book', + 918: 'crossword puzzle, crossword', + 919: 'street sign', + 920: 'traffic light, traffic signal, stoplight', + 921: 'book jacket, dust cover, dust jacket, dust wrapper', + 922: 'menu', + 923: 'plate', + 924: 'guacamole', + 925: 'consomme', + 926: 'hot pot, hotpot', + 927: 'trifle', + 928: 'ice cream, icecream', + 929: 'ice lolly, lolly, lollipop, popsicle', + 930: 'French loaf', + 931: 'bagel, beigel', + 932: 'pretzel', + 933: 'cheeseburger', + 934: 'hotdog, hot dog, red hot', + 935: 'mashed potato', + 936: 'head cabbage', + 937: 'broccoli', + 938: 'cauliflower', + 939: 'zucchini, courgette', + 940: 'spaghetti squash', + 941: 'acorn squash', + 942: 'butternut squash', + 943: 'cucumber, cuke', + 944: 'artichoke, globe artichoke', + 945: 'bell pepper', + 946: 'cardoon', + 947: 'mushroom', + 948: 'Granny Smith', + 949: 'strawberry', + 950: 'orange', + 951: 'lemon', + 952: 'fig', + 953: 'pineapple, ananas', + 954: 'banana', + 955: 'jackfruit, jak, jack', + 956: 'custard apple', + 957: 'pomegranate', + 958: 'hay', + 959: 'carbonara', + 960: 'chocolate sauce, chocolate syrup', + 961: 'dough', + 962: 'meat loaf, meatloaf', + 963: 'pizza, pizza pie', + 964: 'potpie', + 965: 'burrito', + 966: 'red wine', + 967: 'espresso', + 968: 'cup', + 969: 'eggnog', + 970: 'alp', + 971: 'bubble', + 972: 'cliff, drop, drop-off', + 973: 'coral reef', + 974: 'geyser', + 975: 'lakeside, lakeshore', + 976: 'promontory, headland, head, foreland', + 977: 'sandbar, sand bar', + 978: 'seashore, coast, seacoast, sea-coast', + 979: 'valley, vale', + 980: 'volcano', + 981: 'ballplayer, baseball player', + 982: 'groom, bridegroom', + 983: 'scuba diver', + 984: 'rapeseed', + 985: 'daisy', + 986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", + 987: 'corn', + 988: 'acorn', + 989: 'hip, rose hip, rosehip', + 990: 'buckeye, horse chestnut, conker', + 991: 'coral fungus', + 992: 'agaric', + 993: 'gyromitra', + 994: 'stinkhorn, carrion fungus', + 995: 'earthstar', + 996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa', + 997: 'bolete', + 998: 'ear, spike, capitulum', + 999: 'toilet tissue, toilet paper, bathroom tissue'} \ No newline at end of file diff --git a/tinygrad_repo/test/models/test_bert.py b/tinygrad_repo/test/models/test_bert.py new file mode 100644 index 0000000..86588e0 --- /dev/null +++ b/tinygrad_repo/test/models/test_bert.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from tinygrad.tensor import Tensor +from tinygrad.ops import Device +import torch + +def get_question_samp(bsz, seq_len, vocab_size, seed): + np.random.seed(seed) + in_ids= np.random.randint(vocab_size, size=(bsz, seq_len)) + mask = np.random.choice([True, False], size=(bsz, seq_len)) + seg_ids = np.random.randint(1, size=(bsz, seq_len)) + return in_ids, mask, seg_ids + +def set_equal_weights(mdl, torch_mdl): + from tinygrad.nn.state import get_state_dict + state, torch_state = get_state_dict(mdl), torch_mdl.state_dict() + assert len(state) == len(torch_state) + for k, v in state.items(): + assert k in torch_state + torch_state[k].copy_(torch.from_numpy(v.numpy())) + torch_mdl.eval() + +class TestBert(unittest.TestCase): + def test_questions(self): + from models.bert import BertForQuestionAnswering + from transformers import BertForQuestionAnswering as TorchBertForQuestionAnswering + from transformers import BertConfig + + # small + config = { + 'vocab_size':24, 'hidden_size':2, 'num_hidden_layers':2, 'num_attention_heads':2, + 'intermediate_size':32, 'hidden_dropout_prob':0.1, 'attention_probs_dropout_prob':0.1, + 'max_position_embeddings':512, 'type_vocab_size':2 + } + + # Create in tinygrad + Tensor.manual_seed(1337) + mdl = BertForQuestionAnswering(**config) + + # Create in torch + with torch.no_grad(): + torch_mdl = TorchBertForQuestionAnswering(BertConfig(**config)) + + set_equal_weights(mdl, torch_mdl) + + seeds = (1337, 3141) + bsz, seq_len = 1, 16 + for _, seed in enumerate(seeds): + in_ids, mask, seg_ids = get_question_samp(bsz, seq_len, config['vocab_size'], seed) + out = mdl(Tensor(in_ids), Tensor(mask), Tensor(seg_ids)) + torch_out = torch_mdl.forward(torch.from_numpy(in_ids).long(), torch.from_numpy(mask), torch.from_numpy(seg_ids).long())[:2] + torch_out = torch.cat(torch_out).unsqueeze(2) + np.testing.assert_allclose(out.numpy(), torch_out.detach().numpy(), atol=5e-4, rtol=5e-4) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/models/test_efficientnet.py b/tinygrad_repo/test/models/test_efficientnet.py new file mode 100644 index 0000000..76108a5 --- /dev/null +++ b/tinygrad_repo/test/models/test_efficientnet.py @@ -0,0 +1,115 @@ +import ast +import pathlib +import sys +import unittest + +import numpy as np +from PIL import Image + +from tinygrad.helpers import getenv +from tinygrad.tensor import Tensor +from models.efficientnet import EfficientNet +from models.vit import ViT +from models.resnet import ResNet50 + +def _load_labels(): + labels_filename = pathlib.Path(__file__).parent / 'efficientnet/imagenet1000_clsidx_to_labels.txt' + return ast.literal_eval(labels_filename.read_text()) + +_LABELS = _load_labels() + +def preprocess(img, new=False): + # preprocess image + aspect_ratio = img.size[0] / img.size[1] + img = img.resize((int(224*max(aspect_ratio,1.0)), int(224*max(1.0/aspect_ratio,1.0)))) + + img = np.array(img) + y0, x0 =(np.asarray(img.shape)[:2] - 224) // 2 + img = img[y0: y0 + 224, x0: x0 + 224] + + # low level preprocess + if new: + img = img.astype(np.float32) + img -= [127.0, 127.0, 127.0] + img /= [128.0, 128.0, 128.0] + img = img[None] + else: + img = np.moveaxis(img, [2, 0, 1], [0, 1, 2]) + img = img.astype(np.float32)[:3].reshape(1, 3, 224, 224) + img /= 255.0 + img -= np.array([0.485, 0.456, 0.406]).reshape((1, -1, 1, 1)) + img /= np.array([0.229, 0.224, 0.225]).reshape((1, -1, 1, 1)) + return img + + +def _infer(model: EfficientNet, img, bs=1): + Tensor.training = False + img = preprocess(img) + # run the net + if bs > 1: img = img.repeat(bs, axis=0) + out = model.forward(Tensor(img)).cpu() + return _LABELS[np.argmax(out.numpy()[0])] + +chicken_img = Image.open(pathlib.Path(__file__).parent / 'efficientnet/Chicken.jpg') +car_img = Image.open(pathlib.Path(__file__).parent / 'efficientnet/car.jpg') + +class TestEfficientNet(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.model = EfficientNet(number=getenv("NUM")) + cls.model.load_from_pretrained() + + @classmethod + def tearDownClass(cls): + del cls.model + + def test_chicken(self): + label = _infer(self.model, chicken_img) + self.assertEqual(label, "hen") + + def test_chicken_bigbatch(self): + label = _infer(self.model, chicken_img, 2) + self.assertEqual(label, "hen") + + def test_car(self): + label = _infer(self.model, car_img) + self.assertEqual(label, "sports car, sport car") + +class TestViT(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.model = ViT() + cls.model.load_from_pretrained() + + @classmethod + def tearDownClass(cls): + del cls.model + + def test_chicken(self): + label = _infer(self.model, chicken_img) + self.assertEqual(label, "cock") + + def test_car(self): + label = _infer(self.model, car_img) + self.assertEqual(label, "racer, race car, racing car") + +class TestResNet(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.model = ResNet50() + cls.model.load_from_pretrained() + + @classmethod + def tearDownClass(cls): + del cls.model + + def test_chicken(self): + label = _infer(self.model, chicken_img) + self.assertEqual(label, "hen") + + def test_car(self): + label = _infer(self.model, car_img) + self.assertEqual(label, "sports car, sport car") + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/models/test_end2end.py b/tinygrad_repo/test/models/test_end2end.py new file mode 100644 index 0000000..8ea00b0 --- /dev/null +++ b/tinygrad_repo/test/models/test_end2end.py @@ -0,0 +1,165 @@ +import torch +from torch import nn +import unittest +import numpy as np +from tinygrad.nn.state import get_parameters, get_state_dict +from tinygrad.nn import optim, Linear, Conv2d, BatchNorm2d +from tinygrad.tensor import Tensor +from extra.datasets import fetch_mnist +from tinygrad.helpers import CI + +def compare_tiny_torch(model, model_torch, X, Y): + with Tensor.train(): + model_torch.train() + model_state_dict = get_state_dict(model) + for k,v in model_torch.named_parameters(): + if not CI: print(f"initting {k} from torch") + model_state_dict[k].assign(Tensor(v.detach().numpy())).realize() + + optimizer = optim.SGD(get_parameters(model), lr=0.001) + optimizer_torch = torch.optim.SGD(model_torch.parameters(), lr=0.001) + + Xt = torch.Tensor(X.numpy()) + np.testing.assert_allclose(X.numpy(), Xt.detach().numpy()) + + out = model(X) + loss = (out * Y).mean() + if not CI: print(loss.realize().numpy()) + + out_torch = model_torch(torch.Tensor(X.numpy())) + loss_torch = (out_torch * torch.Tensor(Y.numpy())).mean() + if not CI: print(loss_torch.detach().numpy()) + + # assert losses match + np.testing.assert_allclose(loss.realize().numpy(), loss_torch.detach().numpy(), atol=1e-4) + + # zero and backward + optimizer.zero_grad() + loss.backward() + optimizer_torch.zero_grad() + loss_torch.backward() + + for k,v in list(model_torch.named_parameters())[::-1]: + g = model_state_dict[k].grad.numpy() + gt = v.grad.detach().numpy() + if not CI: print("testing grads", k) + np.testing.assert_allclose(g, gt, atol=1e-3, err_msg=f'grad mismatch {k}') + + # take the steps + optimizer.step() + optimizer_torch.step() + + # assert weights match (they don't!) + for k,v in model_torch.named_parameters(): + if not CI: print("testing weight", k) + np.testing.assert_allclose(model_state_dict[k].numpy(), v.detach().numpy(), atol=1e-3, err_msg=f'weight mismatch {k}') + +def get_mnist_data(): + X_train, Y_train, X_test, Y_test = fetch_mnist() + BS = 32 + num_classes = 10 + X = Tensor(X_test[0:BS].astype(np.float32)) + Y = np.zeros((BS, num_classes), np.float32) + Y[range(BS),Y_test[0:BS]] = -1.0*num_classes + return X, Tensor(Y) + +class TestEnd2End(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.X, cls.Y = get_mnist_data() + + def setUp(self): + torch.manual_seed(123) + + def test_linear_mnist(self): + class LinTiny: + def __init__(self, has_batchnorm=False): + self.l1 = Linear(784, 128) + self.l2 = Linear(128, 10) + self.bn1 = BatchNorm2d(128) if has_batchnorm else lambda x: x + def __call__(self, x): + return self.l2(self.l1(x)).relu().log_softmax(-1) + class LinTorch(nn.Module): + def __init__(self, has_batchnorm=False): + super().__init__() + self.l1 = nn.Linear(784, 128) + self.l2 = nn.Linear(128, 10) + def forward(self, x): + return self.l2(self.l1(x)).relu().log_softmax(-1) + compare_tiny_torch(LinTiny(), LinTorch(), self.X, self.Y) + + def test_bn_mnist(self): + class LinTiny: + def __init__(self): + self.l1 = Linear(784, 128) + self.l2 = Linear(128, 10) + self.bn1 = BatchNorm2d(128) + def __call__(self, x): + return self.l2(self.bn1(self.l1(x).reshape(x.shape[0], -1, 1, 1)).reshape(x.shape[0], -1).relu()).log_softmax(-1) + class LinTorch(nn.Module): + def __init__(self): + super().__init__() + self.l1 = nn.Linear(784, 128) + self.l2 = nn.Linear(128, 10) + self.bn1 = nn.BatchNorm2d(128) + def forward(self, x): + return self.l2(self.bn1(self.l1(x).reshape(x.shape[0], -1, 1, 1)).reshape(x.shape[0], -1).relu()).log_softmax(-1) + compare_tiny_torch(LinTiny(), LinTorch(), self.X, self.Y) + + def test_bn_alone(self): + np.random.seed(1337) + X = Tensor(np.random.randn(32, 10, 1, 1).astype(np.float32)) + Y = Tensor(np.random.randn(32, 10, 1, 1).astype(np.float32)) + compare_tiny_torch(BatchNorm2d(10), nn.BatchNorm2d(10), X, Y) + + def test_bn_linear(self): + BS, K = 2, 1 + eps = 0 + X = Tensor([1,0]).reshape(BS, K, 1, 1) + Y = Tensor([-1,0]).reshape(BS, K, 1, 1) + class LinTiny: + def __init__(self): + self.l1 = Conv2d(K, K, 1, bias=False) + self.bn1 = BatchNorm2d(K, affine=False, track_running_stats=False, eps=eps) + def __call__(self, x): return self.bn1(self.l1(x)) + class LinTorch(nn.Module): + def __init__(self): + super().__init__() + self.l1 = nn.Conv2d(K, K, 1, bias=False) + self.bn1 = nn.BatchNorm2d(K, affine=False, track_running_stats=False, eps=eps) + def forward(self, x): return self.bn1(self.l1(x)) + model_torch = LinTorch() + with torch.no_grad(): + model_torch.l1.weight[:] = 1. + compare_tiny_torch(LinTiny(), model_torch, X, Y) + + def test_conv_mnist(self): + class LinTiny: + def __init__(self, has_batchnorm=False): + self.c1 = Conv2d(1, 8, 3, stride=2) + self.c2 = Conv2d(8, 16, 3, stride=2) + self.l1 = Linear(16*6*6, 10) + if has_batchnorm: + self.bn1, self.bn2 = BatchNorm2d(8), BatchNorm2d(16) + else: + self.bn1, self.bn2 = lambda x: x, lambda x: x + def __call__(self, x): + return self.l1(self.bn2(self.c2(self.bn1(self.c1(x)).relu())).relu().reshape(x.shape[0], -1)).log_softmax(-1) + class LinTorch(nn.Module): + def __init__(self, has_batchnorm=False): + super().__init__() + self.c1 = nn.Conv2d(1, 8, 3, stride=2) + self.c2 = nn.Conv2d(8, 16, 3, stride=2) + self.l1 = nn.Linear(16*6*6, 10) + if has_batchnorm: + self.bn1, self.bn2 = nn.BatchNorm2d(8), nn.BatchNorm2d(16) + else: + self.bn1, self.bn2 = lambda x: x, lambda x: x + def forward(self, x): + return self.l1(self.bn2(self.c2(self.bn1(self.c1(x)).relu())).relu().reshape(x.shape[0], -1)).log_softmax(-1) + for has_batchnorm in [False, True]: + with self.subTest(has_batchnorm=has_batchnorm): + compare_tiny_torch(LinTiny(has_batchnorm), LinTorch(has_batchnorm), self.X.reshape((-1, 1, 28, 28)), self.Y) + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/models/test_mnist.py b/tinygrad_repo/test/models/test_mnist.py new file mode 100644 index 0000000..f3f37c3 --- /dev/null +++ b/tinygrad_repo/test/models/test_mnist.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from tinygrad.nn.state import get_parameters +from tinygrad.tensor import Tensor, Device +from tinygrad.nn import optim, BatchNorm2d +from extra.training import train, evaluate +from extra.datasets import fetch_mnist +import pytest + +pytestmark = [pytest.mark.exclude_gpu, pytest.mark.exclude_clang] + +# load the mnist dataset +X_train, Y_train, X_test, Y_test = fetch_mnist() + +# create a model +class TinyBobNet: + def __init__(self): + self.l1 = Tensor.scaled_uniform(784, 128) + self.l2 = Tensor.scaled_uniform(128, 10) + + def parameters(self): + return get_parameters(self) + + def forward(self, x): + return x.dot(self.l1).relu().dot(self.l2).log_softmax() + +# create a model with a conv layer +class TinyConvNet: + def __init__(self, has_batchnorm=False): + # https://keras.io/examples/vision/mnist_convnet/ + conv = 3 + #inter_chan, out_chan = 32, 64 + inter_chan, out_chan = 8, 16 # for speed + self.c1 = Tensor.scaled_uniform(inter_chan,1,conv,conv) + self.c2 = Tensor.scaled_uniform(out_chan,inter_chan,conv,conv) + self.l1 = Tensor.scaled_uniform(out_chan*5*5, 10) + if has_batchnorm: + self.bn1 = BatchNorm2d(inter_chan) + self.bn2 = BatchNorm2d(out_chan) + else: + self.bn1, self.bn2 = lambda x: x, lambda x: x + + def parameters(self): + return get_parameters(self) + + def forward(self, x:Tensor): + x = x.reshape(shape=(-1, 1, 28, 28)) # hacks + x = self.bn1(x.conv2d(self.c1)).relu().max_pool2d() + x = self.bn2(x.conv2d(self.c2)).relu().max_pool2d() + x = x.reshape(shape=[x.shape[0], -1]) + return x.dot(self.l1).log_softmax() + +class TestMNIST(unittest.TestCase): + def test_sgd_onestep(self): + np.random.seed(1337) + model = TinyBobNet() + optimizer = optim.SGD(model.parameters(), lr=0.001) + train(model, X_train, Y_train, optimizer, BS=69, steps=1) + for p in model.parameters(): p.realize() + + def test_sgd_threestep(self): + np.random.seed(1337) + model = TinyBobNet() + optimizer = optim.SGD(model.parameters(), lr=0.001) + train(model, X_train, Y_train, optimizer, BS=69, steps=3) + + def test_sgd_sixstep(self): + np.random.seed(1337) + model = TinyBobNet() + optimizer = optim.SGD(model.parameters(), lr=0.001) + train(model, X_train, Y_train, optimizer, BS=69, steps=6, noloss=True) + + def test_adam_onestep(self): + np.random.seed(1337) + model = TinyBobNet() + optimizer = optim.Adam(model.parameters(), lr=0.001) + train(model, X_train, Y_train, optimizer, BS=69, steps=1) + for p in model.parameters(): p.realize() + + def test_adam_threestep(self): + np.random.seed(1337) + model = TinyBobNet() + optimizer = optim.Adam(model.parameters(), lr=0.001) + train(model, X_train, Y_train, optimizer, BS=69, steps=3) + + def test_conv_onestep(self): + np.random.seed(1337) + model = TinyConvNet() + optimizer = optim.SGD(model.parameters(), lr=0.001) + train(model, X_train, Y_train, optimizer, BS=69, steps=1, noloss=True) + for p in model.parameters(): p.realize() + + def test_conv(self): + np.random.seed(1337) + model = TinyConvNet() + optimizer = optim.Adam(model.parameters(), lr=0.001) + train(model, X_train, Y_train, optimizer, steps=100) + assert evaluate(model, X_test, Y_test) > 0.93 # torch gets 0.9415 sometimes + + def test_conv_with_bn(self): + np.random.seed(1337) + model = TinyConvNet(has_batchnorm=True) + optimizer = optim.AdamW(model.parameters(), lr=0.003) + train(model, X_train, Y_train, optimizer, steps=200) + assert evaluate(model, X_test, Y_test) > 0.94 + + def test_sgd(self): + np.random.seed(1337) + model = TinyBobNet() + optimizer = optim.SGD(model.parameters(), lr=0.001) + train(model, X_train, Y_train, optimizer, steps=600) + assert evaluate(model, X_test, Y_test) > 0.94 # CPU gets 0.9494 sometimes + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/models/test_onnx.py b/tinygrad_repo/test/models/test_onnx.py new file mode 100644 index 0000000..feffdab --- /dev/null +++ b/tinygrad_repo/test/models/test_onnx.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python +import os +import time +import io +import unittest +import numpy as np +import onnx +from extra.utils import fetch, temp +from extra.onnx import get_run_onnx +from tinygrad.tensor import Tensor +from tinygrad.helpers import CI +import pytest + +pytestmark = [pytest.mark.exclude_gpu, pytest.mark.exclude_clang] + +def run_onnx_torch(onnx_model, inputs): + import torch + from onnx2torch import convert + torch_model = convert(onnx_model).float() + with torch.no_grad(): + torch_out = torch_model(*[torch.tensor(x) for x in inputs.values()]) + return torch_out + +OPENPILOT_MODEL = "https://github.com/commaai/openpilot/raw/v0.9.4/selfdrive/modeld/models/supercombo.onnx" + +np.random.seed(1337) + +class TestOnnxModel(unittest.TestCase): + def test_benchmark_openpilot_model(self): + dat = fetch(OPENPILOT_MODEL) + onnx_model = onnx.load(io.BytesIO(dat)) + run_onnx = get_run_onnx(onnx_model) + def get_inputs(): + np_inputs = { + "input_imgs": np.random.randn(*(1, 12, 128, 256)), + "big_input_imgs": np.random.randn(*(1, 12, 128, 256)), + "desire": np.zeros((1, 100, 8)), + "traffic_convention": np.array([[1., 0.]]), + "nav_features": np.zeros((1, 256)), + "features_buffer": np.zeros((1, 99, 128)), + } + inputs = {k:Tensor(v.astype(np.float32), requires_grad=False) for k,v in np_inputs.items()} + return inputs + + for _ in range(7): + inputs = get_inputs() + st = time.monotonic() + tinygrad_out = run_onnx(inputs)['outputs'] + mt = time.monotonic() + tinygrad_out.realize() + mt2 = time.monotonic() + tinygrad_out = tinygrad_out.numpy() + et = time.monotonic() + if not CI: print(f"ran openpilot model in {(et-st)*1000.0:.2f} ms, waited {(mt2-mt)*1000.0:.2f} ms for realize, {(et-mt2)*1000.0:.2f} ms for GPU queue") + + if not CI: + import cProfile + import pstats + inputs = get_inputs() + pr = cProfile.Profile(timer=time.perf_counter_ns, timeunit=1e-6) + pr.enable() + tinygrad_out = run_onnx(inputs)['outputs'] + tinygrad_out.realize() + tinygrad_out = tinygrad_out.numpy() + if not CI: + pr.disable() + stats = pstats.Stats(pr) + stats.dump_stats(temp("net.prof")) + os.system(f"flameprof {temp('net.prof')} > {temp('prof.svg')}") + ps = stats.sort_stats(pstats.SortKey.TIME) + ps.print_stats(30) + + def test_openpilot_model(self): + dat = fetch(OPENPILOT_MODEL) + onnx_model = onnx.load(io.BytesIO(dat)) + run_onnx = get_run_onnx(onnx_model) + print("got run_onnx") + inputs = { + "input_imgs": np.random.randn(*(1, 12, 128, 256)), + "big_input_imgs": np.random.randn(*(1, 12, 128, 256)), + "desire": np.zeros((1, 100, 8)), + "traffic_convention": np.array([[1., 0.]]), + "nav_features": np.zeros((1, 256)), + "features_buffer": np.zeros((1, 99, 128)), + } + inputs = {k:v.astype(np.float32) for k,v in inputs.items()} + + st = time.monotonic() + print("****** run onnx ******") + tinygrad_out = run_onnx(inputs)['outputs'] + mt = time.monotonic() + print("****** realize ******") + tinygrad_out.realize() + mt2 = time.monotonic() + tinygrad_out = tinygrad_out.numpy() + et = time.monotonic() + print(f"ran openpilot model in {(et-st)*1000.0:.2f} ms, waited {(mt2-mt)*1000.0:.2f} ms for realize, {(et-mt2)*1000.0:.2f} ms for GPU queue") + + Tensor.no_grad = True + torch_out = run_onnx_torch(onnx_model, inputs).numpy() + Tensor.no_grad = False + print(tinygrad_out, torch_out) + np.testing.assert_allclose(torch_out, tinygrad_out, atol=1e-4, rtol=1e-2) + + def test_efficientnet(self): + dat = fetch("https://github.com/onnx/models/raw/main/vision/classification/efficientnet-lite4/model/efficientnet-lite4-11.onnx") + input_name, input_new = "images:0", True + self._test_model(dat, input_name, input_new) + + def test_shufflenet(self): + dat = fetch("https://github.com/onnx/models/raw/main/vision/classification/shufflenet/model/shufflenet-9.onnx") + print(f"shufflenet downloaded : {len(dat)/1e6:.2f} MB") + input_name, input_new = "gpu_0/data_0", False + self._test_model(dat, input_name, input_new) + + @unittest.skip("test is very slow") + def test_resnet(self): + # NOTE: many onnx models can't be run right now due to max pool with strides != kernel_size + dat = fetch("https://github.com/onnx/models/raw/main/vision/classification/resnet/model/resnet18-v2-7.onnx") + print(f"resnet downloaded : {len(dat)/1e6:.2f} MB") + input_name, input_new = "data", False + self._test_model(dat, input_name, input_new) + + def _test_model(self, dat, input_name, input_new, debug=False): + onnx_model = onnx.load(io.BytesIO(dat)) + print("onnx loaded") + from test.models.test_efficientnet import chicken_img, car_img, preprocess, _LABELS + run_onnx = get_run_onnx(onnx_model) + + def run(img): + inputs = {input_name: preprocess(img, new=input_new)} + tinygrad_out = list(run_onnx(inputs, debug=debug).values())[0].numpy() + return tinygrad_out.argmax() + + cls = run(chicken_img) + print(cls, _LABELS[cls]) + assert _LABELS[cls] == "hen" or _LABELS[cls] == "cock" + cls = run(car_img) + print(cls, _LABELS[cls]) + assert "car" in _LABELS[cls] or _LABELS[cls] == "convertible" + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/models/test_real_world.py b/tinygrad_repo/test/models/test_real_world.py new file mode 100644 index 0000000..31c8102 --- /dev/null +++ b/tinygrad_repo/test/models/test_real_world.py @@ -0,0 +1,100 @@ +import unittest, time +import numpy as np +from tinygrad.tensor import Tensor +from tinygrad.nn import optim +from tinygrad.nn.state import get_parameters +from tinygrad.jit import TinyJit, JIT_SUPPORTED_DEVICE +from tinygrad.ops import Device, GlobalCounters +from tinygrad.helpers import CI, dtypes, getenv, prod +from test.helpers import derandomize_model + +from examples.gpt2 import Transformer as GPT2Transformer, MODEL_PARAMS as GPT2_MODEL_PARAMS +from examples.hlb_cifar10 import SpeedyResNet +from examples.llama import Transformer as LLaMaTransformer, MODEL_PARAMS as LLAMA_MODEL_PARAMS +from examples.stable_diffusion import UNetModel + +def helper_test(nm, gen, train, max_memory_allowed, max_kernels_allowed, all_jitted=False): + tms = [] + for _ in range(4): + GlobalCounters.reset() + GlobalCounters.mem_used = 0 + Device[Device.DEFAULT].synchronize() + st = time.perf_counter_ns() + train(*gen()) + Device[Device.DEFAULT].synchronize() + tms.append(time.perf_counter_ns() - st) + + kernels_used = len(train.jit_cache) if hasattr(train, "jit_cache") else None + print(f"{nm}: used {GlobalCounters.mem_used/1e9:.2f} GB and {kernels_used} kernels in {min(tms)/1e6:.2f} ms") + assert GlobalCounters.mem_used/1e9 < max_memory_allowed, f"{nm} used more than {max_memory_allowed:.2f} GB" + assert not kernels_used or kernels_used <= max_kernels_allowed, f"{nm} used more than {max_kernels_allowed} kernels" + if all_jitted: + assert kernels_used > 0 and kernels_used == GlobalCounters.kernel_count, f"only {kernels_used} out of {GlobalCounters.kernel_count} were jitted" + +class TestRealWorld(unittest.TestCase): + def setUp(self): + self.old_type = Tensor.default_type + np.random.seed(2002) + + def tearDown(self): + Tensor.default_type = self.old_type + + @unittest.skipUnless(not CI, "too big for CI") + def test_stable_diffusion(self): + model = UNetModel() + derandomize_model(model) + @TinyJit + def test(t, t2): return model(t, 801, t2).realize() + helper_test("test_sd", lambda: (Tensor.randn(1, 4, 64, 64),Tensor.randn(1, 77, 768)), test, 18.0, 967) + + @unittest.skipUnless(Device.DEFAULT in JIT_SUPPORTED_DEVICE and Device.DEFAULT not in ["LLVM"], "needs JIT, too long on CI LLVM") + def test_llama(self): + Tensor.default_type = dtypes.float16 + + args_tiny = {"dim": 1024, "multiple_of": 256, "n_heads": 8, "n_layers": 8, "norm_eps": 1e-05, "vocab_size": 1000} + model = LLaMaTransformer(**(args_tiny if CI else LLAMA_MODEL_PARAMS["1"]["7B"]["args"])) + derandomize_model(model) + @TinyJit + def test(t): return model(t, 0).realize() + # NOTE: only test one pass, not testing the dynamic shape autoregressive part + helper_test("test_llama", lambda: (Tensor([[1,]]),), test, 0.22 if CI else 13.5, 126 if CI else 486, all_jitted=True) + + @unittest.skipUnless(Device.DEFAULT in JIT_SUPPORTED_DEVICE and (Device.DEFAULT not in ["LLVM"] or not CI), "needs JIT, too long on CI LLVM") + def test_gpt2(self): + Tensor.default_type = dtypes.float16 + + args_tiny = {"dim": 1024, "n_heads": 8, "n_layers": 8, "norm_eps": 1e-5, "vocab_size": 1000} + model = GPT2Transformer(**(args_tiny if CI else GPT2_MODEL_PARAMS["gpt2-medium"])) + derandomize_model(model) + @TinyJit + def test(t): return model(t, 0).realize() + helper_test("test_gpt2", lambda: (Tensor([[1,]]),), test, 0.21 if CI else 0.9, 129 if CI else 369, all_jitted=True) + + @unittest.skipUnless(Device.DEFAULT in JIT_SUPPORTED_DEVICE and (Device.DEFAULT not in ["LLVM", "CLANG"] or not CI), "needs JIT, too long on CI LLVM and CLANG") + def test_train_cifar(self): + # TODO: with default device + #old_default = Device.DEFAULT + #Device.DEFAULT = "FAKE" + #Device['fake'].codegen = Device[old_default].codegen + + with Tensor.train(): + model = SpeedyResNet(Tensor.ones((12,3,2,2))) + optimizer = optim.SGD(get_parameters(model), lr=0.01, momentum=0.8, nesterov=True, weight_decay=0.15) + + BS = 32 if CI else 512 + + @TinyJit + def train(X): + out = model(X) + loss = out.mean() + optimizer.zero_grad() + loss.backward() + optimizer.step() + + helper_test("train_cifar", lambda: (Tensor.randn(BS, 3, 32, 32),), train, (1.0/48)*BS, 154) # it's 154 on metal + + # reset device + #Device.DEFAULT = old_default + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/models/test_rnnt.py b/tinygrad_repo/test/models/test_rnnt.py new file mode 100644 index 0000000..51a934b --- /dev/null +++ b/tinygrad_repo/test/models/test_rnnt.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from tinygrad.tensor import Tensor +from models.rnnt import LSTM +import torch + +class TestRNNT(unittest.TestCase): + def test_lstm(self): + BS, SQ, IS, HS, L = 2, 20, 40, 128, 2 + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.LSTM(IS, HS, L) + + # create in tinygrad + layer = LSTM(IS, HS, L, 0.0) + + # copy weights + with torch.no_grad(): + layer.cells[0].weights_ih.assign(Tensor(torch_layer.weight_ih_l0.numpy())) + layer.cells[0].weights_hh.assign(Tensor(torch_layer.weight_hh_l0.numpy())) + layer.cells[0].bias_ih.assign(Tensor(torch_layer.bias_ih_l0.numpy())) + layer.cells[0].bias_hh.assign(Tensor(torch_layer.bias_hh_l0.numpy())) + layer.cells[1].weights_ih.assign(Tensor(torch_layer.weight_ih_l1.numpy())) + layer.cells[1].weights_hh.assign(Tensor(torch_layer.weight_hh_l1.numpy())) + layer.cells[1].bias_ih.assign(Tensor(torch_layer.bias_ih_l1.numpy())) + layer.cells[1].bias_hh.assign(Tensor(torch_layer.bias_hh_l1.numpy())) + + # test initial hidden + for _ in range(3): + x = Tensor.randn(SQ, BS, IS) + z, hc = layer(x, None) + torch_x = torch.tensor(x.numpy()) + torch_z, torch_hc = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3) + + # test passing hidden + for _ in range(3): + x = Tensor.randn(SQ, BS, IS) + z, hc = layer(x, hc) + torch_x = torch.tensor(x.numpy()) + torch_z, torch_hc = torch_layer(torch_x, torch_hc) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/models/test_train.py b/tinygrad_repo/test/models/test_train.py new file mode 100644 index 0000000..b987ee3 --- /dev/null +++ b/tinygrad_repo/test/models/test_train.py @@ -0,0 +1,83 @@ +import unittest +import time +import numpy as np +from tinygrad.nn.state import get_parameters +from tinygrad.nn import optim +from tinygrad.tensor import Device +from tinygrad.helpers import getenv +from extra.training import train +from models.convnext import ConvNeXt +from models.efficientnet import EfficientNet +from models.transformer import Transformer +from models.vit import ViT +from models.resnet import ResNet18 +import pytest + +pytestmark = [pytest.mark.exclude_gpu, pytest.mark.exclude_clang] + +BS = getenv("BS", 2) + +def train_one_step(model,X,Y): + params = get_parameters(model) + pcount = 0 + for p in params: + pcount += np.prod(p.shape) + optimizer = optim.SGD(params, lr=0.001) + print("stepping %r with %.1fM params bs %d" % (type(model), pcount/1e6, BS)) + st = time.time() + train(model, X, Y, optimizer, steps=1, BS=BS) + et = time.time()-st + print("done in %.2f ms" % (et*1000.)) + +def check_gc(): + if Device.DEFAULT == "GPU": + from extra.introspection import print_objects + assert print_objects() == 0 + +class TestTrain(unittest.TestCase): + def test_convnext(self): + model = ConvNeXt(depths=[1], dims=[16]) + X = np.zeros((BS,3,224,224), dtype=np.float32) + Y = np.zeros((BS), dtype=np.int32) + train_one_step(model,X,Y) + check_gc() + + def test_efficientnet(self): + model = EfficientNet(0) + X = np.zeros((BS,3,224,224), dtype=np.float32) + Y = np.zeros((BS), dtype=np.int32) + train_one_step(model,X,Y) + check_gc() + + @unittest.skipIf(Device.DEFAULT == "WEBGPU", "too many buffers for webgpu") + def test_vit(self): + model = ViT() + X = np.zeros((BS,3,224,224), dtype=np.float32) + Y = np.zeros((BS,), dtype=np.int32) + train_one_step(model,X,Y) + check_gc() + + def test_transformer(self): + # this should be small GPT-2, but the param count is wrong + # (real ff_dim is 768*4) + model = Transformer(syms=10, maxlen=6, layers=12, embed_dim=768, num_heads=12, ff_dim=768//4) + X = np.zeros((BS,6), dtype=np.float32) + Y = np.zeros((BS,6), dtype=np.int32) + train_one_step(model,X,Y) + check_gc() + + def test_resnet(self): + X = np.zeros((BS, 3, 224, 224), dtype=np.float32) + Y = np.zeros((BS), dtype=np.int32) + for resnet_v in [ResNet18]: + model = resnet_v() + model.load_from_pretrained() + train_one_step(model, X, Y) + check_gc() + + def test_bert(self): + # TODO: write this + pass + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/models/test_waifu2x.py b/tinygrad_repo/test/models/test_waifu2x.py new file mode 100644 index 0000000..0b34ae0 --- /dev/null +++ b/tinygrad_repo/test/models/test_waifu2x.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +import pathlib +import unittest +import numpy as np +from tinygrad.tensor import Tensor +from tinygrad.ops import Device + +class TestVGG7(unittest.TestCase): + def test_vgg7(self): + from examples.vgg7_helpers.waifu2x import Vgg7, image_load + + # Create in tinygrad + Tensor.manual_seed(1337) + mdl = Vgg7() + mdl.load_from_pretrained() + + # Scale up an image + test_x = image_load(pathlib.Path(__file__).parent / 'waifu2x/input.png') + test_y = image_load(pathlib.Path(__file__).parent / 'waifu2x/output.png') + scaled = mdl.forward_tiled(test_x, 156) + scaled = np.fmax(0, np.fmin(1, scaled)) + np.testing.assert_allclose(scaled, test_y, atol=5e-3, rtol=5e-3) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/models/test_whisper.py b/tinygrad_repo/test/models/test_whisper.py new file mode 100644 index 0000000..b68dea5 --- /dev/null +++ b/tinygrad_repo/test/models/test_whisper.py @@ -0,0 +1,25 @@ +import unittest +import pathlib +from tinygrad.ops import Device +from examples.whisper import init_whisper, transcribe_file + +@unittest.skipUnless(Device.DEFAULT == "METAL", "Some non-metal backends spend too long trying to allocate a 20GB array") +class TestWhisper(unittest.TestCase): + @classmethod + def setUpClass(cls): + model, enc = init_whisper("tiny.en") + cls.model = model + cls.enc = enc + + @classmethod + def tearDownClass(cls): + del cls.model + del cls.enc + + def test_transcribe_file(self): + # Audio generated with the command on MacOS: + # say "Could you please let me out of the box?" --file-format=WAVE --data-format=LEUI8@16000 -o test + # We use the WAVE type because it's easier to decode in CI test environments + filename = str(pathlib.Path(__file__).parent / "whisper/test.wav") + transcription = transcribe_file(self.model, self.enc, filename) + self.assertEqual("<|startoftranscript|><|notimestamps|> Could you please let me out of the box?<|endoftext|>", transcription) diff --git a/tinygrad_repo/test/models/waifu2x/input.png b/tinygrad_repo/test/models/waifu2x/input.png new file mode 100644 index 0000000000000000000000000000000000000000..9ae415a9531a21ecc587b503ecc2e893386bf242 GIT binary patch literal 7321 zcmeHKc{r5a`yWI0oivqYnk31X%{G(WC~MY|5MwrjS*+6zca-Z*TAQ{a)AadavvE`|q8(W}fFc=e|GZzCZWpoadZmr=2^b#Mg;~ zKp-g_YrG3^ZEo zO51pQyXv-4rFRl)-1kV$TkhLw6N}QS;h%bIOvcq(`?fPvA z1Lx2xwY!y#Ado;D&C=4z#?tc7#DR2AM;$e`ZgNsgaCE)vq9r{E9XgVa>#?#@4qScE zX~T2zOSr7sPckQU)wk%R#s=QHm3Ox^?odUUbXhEPUU2B$P|+LMrdOGYd+*E-d^BnO zV~o`w#eY(YYI$b|KFK)M!?k_9X7}9~_jtKt@eMG{qpagal963q#^A^XyJ_w3f9x+a zzwVEMLghDS6MFBgzk@kdc`H26@Saw=ZRAcPf=k?;G%t6f=+|DM9s}7ZOlHpJRJlVV zs=FhycG;)!P8ya4;6^0E({{Cbp1!^}F68Sl;;`c9404%k3M|D++dNium6V%vT#C>n zZi`YmH}~U*w5sT2FY)S9-{C!GD(gsHGT%fRsDT-0HR_72#oz1?KAnd0^ z5#Lo?9nu>76*TO>JA3`WfGh;zI(xIN#n0_c*d|5TpVz}+>C%hS4_Bp#;`pOPEboG zn+(;}(bYl1t$4IhG|WUCYQ!c{a4z`mKOul8W0*ge%fcZLVPRo9VHh1In~FeTu~-BW zjXDh#twQM~geg?V}12PGztEc;eykF=XCWRYDV3WA2INcjQ>^FzjXa02L6%pzv}uwqf7j+;}n?z?1I98!xBN6;|v_M zg#GMx;6dM)zh~-7jsq=XENc%A2&A)d`6s|DHVp+DMYuK&Rw85K3KFX{52>~*0!?eV zR(rUXO#1S!9JI1oB$J?FG_F5%d9&yqBfSO$lK5eH zFn-nM-h(Hy1E7!*84^gw++ygix)*!u#?tHZN3X&&CtgjCFnk-bb6)mEeXOaAs$W>F zZ<|?|%&}3lk`r75e|Xr=SGJLF4iXd=kc0>!|MtKNKJ%hVXcM0WkyeeL>p#$D5_fW) ze?V-4M1K7%XydAsSNnLwa^02inGaq2;yf8!y8BKYzRHLBv1A!FQnT+>`pWp~Pj(3= z6=an|AR?y>(xxK$Baa4$AG99^y_L@F)HrUl&+bsnwVM6y>YryPCs|`T#f1XtsgCOM zA8`rZ8VY1Gc}rlNXyKc;PbuFYY%bmi-bA&tGg5U7zr?NY3EJslhp&1gCmB#NHNI5} zY~7mH{Q8a`YUqREQ~z|Hk&c2njNcP z5Bpe&=a-kGwUs244y`hml@$^=boW@KYn;%7#pt&#Au!p2#Pu$M;2a^t)SUD76rB-6 zja1OF8OwaxK~|P8(i#a#PljSWWQBtC*N4`4cZww<-3C51cgQu4Cxp*{3?B;%&r3W( zsF?_Nd)(E|?X(x_@Zh(9Icgp6q=`pDNIYJa8MN`q6WNU$H^w&+@XmNyp^4CK85tQ> zUOHYnYeH*OPPJ(45Ul74Ix;YlIapRxuI6)7v66Y9cdzs`$714sKmbL66Rxn8v9yto zUbGuDfk?`W3eA5Vzx-igNm5!`UOWLLD-?6^pt#>vNP*@<3#WF$Wc=+n!qDtqJ>ABe zA^nGi>GUfBtE*lrQ<}0`MUpD=7`2bbRzJM*#*xpJ7m71aOiY|!So$W_EbH7Dj>B}#Zela#R zOzKusliRlmZz`?xG!5i}$hTim|6qVd-yRr%E-fvc#&U!k`W<0M8()7oVy%8ds^>#o&Jg_@b=CTnB~n1sRLuu+g$Z} zwmJD=oGuBp2QC1jW0!6g1@A0TNNWb%<6f)+LaFY*u}6DLLEAau(q_^q+y3|xI7;jc z_hiv%F*xldqokl9wzO1BK3PcmfS|KtaJ844ySuB0M{?7kwarpd`4w~U^_^^A>S}ZS z3*}*`OGr7h?1vuON`PvBQnO2`SNBZwjFJkHIs%Hz%a6T#7x02%0My=G)}@rc9c67y z`JkHodq*xNFyPPv${!7Sp?(irPsIO#1uvhvmTw|zTR~Ef=ABOkY~bqAzKSa4^i<1i+GC-+qZ8Q z6^<+K%`Aw)%L8aFlqO0A;rd=F`J((dX@+0;J>sbz>#dj4Q{H_Ra<=%yIT0oF=H9Ig zdy0V0S=!AR4vb%}tep}6Ec4SXsExXoNW8j=uc2D*SorRbjam9K!D{Qn*C~AUL9}3E z+ii~2Yl=jsN0?*PwOb@K)q!*Yu2)5+amHE)+12cC--*hv*_89;QJDt*gZIFq_LmMj zzX6R@>aH<+uz7?eTl`vLGSWKLs))E|`uNjHxhF`{v9;~@sb$@_l{zX9?b#m7fA0(} zS!@2*wN5@c-l(w`6?*|bB-^os8&Ns539M-u6>%T2 zy?%;iIDBPlZF4BHc@Cng+B&X`ARnw`NacSLJ@S@;NPwk1dn;tt^0?;V)6W$X1;Q}SwLB!@j-WEb~_3e2#Ew7tRU>jDA-&{UN75s0;ppF7(p{`$OZb@uA#EHOQ= z+btIx58M~L*itr>rt)Z9dFu7Pv(6VD&Q2{}f_A-)-DKwNtpf7g)NCpuCU*y0__06d z#m=N@D^6$zYYHM>fM`44+u=4Nk8rZOn-$u}OT19s3*{?4&O!)adYnxmM!_i(z=u(tM=o<+9Ek z(G=Zi;{F|B!ed)mw()uO5(_V%oTI-PO3rOwJhlHJSX{yFr%>9x6hs$nPAJ1={5WXyN#WTFL z)Ab_-5acbf>7$AG*OO3XtTzdKPHgHS`@;jU(cJsF1=mvUOlDP4*K1>uWU}1q@D_Rmn>Lvtw-(Y{FC4>_-&>NVxN=}eWJgR-=&VzYt7cq z6?H6j&d7q)#HR^$_Xqp$R86=)Al7S6Fq9Vt#nYNySzyV)i{BCptYD4D9Zj%hO83vo z6qV-hQ#a&{9*(6wsCj;fpSez2{!EOh_k^$O-lM!5uIk0zvlig@HWJ64%7D&YzT8Zy zSbZ8)@s5vp-sji_N#~A(Rii#Rw@7oC89$Js7z3)7K6ufm;oG8!ckP}V&3DrEK;ClEqqR!c_HK>` z<%)M-EEk<`Ue(*zl>pMo+&ca;Z_9d3Zh)ro8S%!v@|4eYhS8RN5zuJ#+SwiUviqka zdy6esBNcY$##-jx4pze#_=5K`;}6^mf6nw&(9ZRM(R|CmE(nz)E@!^bg(5I1ePL!N zdai>ezKYbMrEX_FAowIdD{&nE`X#JE3gg~Lp1b|Rr|Gay>}RF!Ug@ilpvASdeaEm9 z`)?}m)*s`&oLM@7G7(GGihiFObp3D>Uqz!#tYgFwAT=9U!IGdh*=ZX z==lese}TA$CM?XVc^RHJrLaGt`QChP(zbPj)g_G-LFf2EB%TOOkHa{slu8KLKKKHn zw&iB*5OjT_(fID^Ni(5yA2;7%Hmp9IrWBHz$Iq#8t4nJeD>P}l%%<;O{a&>J@}Sph z`oIBmX1FUC{H7yOzI{$cab1M0@2I$;vJ-b{ciJBjeBLQd(w3m6QQ@&SF#d330I6*r8Ud+gX-!V#BSIjiPF z>HNyd%G!>G_>{nbfYmY!%3m>tIacasuCB-5ZNse>s|@Nv=L-S4EUByWo(pxqw71yP zGvxa8<&uE~qP@;nUzNwX9$A4!rP4=oOSbw@*S~Lhez9{YBLDjJ40=!WF{_O?Z{Ezp zP*dNJK_H2OYE9pzZ&6L=XBD$JtGx`=0!k%#nMqnl7-u6LKCXQf{C#s@{ngoVvCCas z^8(B4+}$&I`5TyE|KL!R81!I4cW38`ikT>rJl+NiJ%RSA2t7Hdg7Y=mLfPprktdb= zJ3BS@-G_EbbiI6eDI0Xb{jv^DKwVN&GHG&ha&6B0&k>0GtUR%kB5~J>!UIk*fPMdX w=OsuM57MxnceH7K(l-tyED45G`G447&r>=+o7*xDoDx7bRy*-!7QV6n1K)blkpKVy literal 0 HcmV?d00001 diff --git a/tinygrad_repo/test/models/waifu2x/output.png b/tinygrad_repo/test/models/waifu2x/output.png new file mode 100644 index 0000000000000000000000000000000000000000..b105a2e2cf668f6a055ec1a560dea68f6dcd1582 GIT binary patch literal 14906 zcmbVT^LHh@)342^wr$(mVr$zrwzh4%-MY87-EM7bYumT&efRq>ygy`4a-zx1$Gh$8^?$z}C3ef29O^ae307KO} zTX7r!gRG^7L1G;k#)pK#-8YE`heWz-7z+Z2@zOT?S1DM_W*h`gyo&q34t7Ym#zB!> z|BvSE!^8Hnkusv}2meOaCWrGH486R144QqN3o~88a9ts#@d}~}x{WKVdcMCAIF2At zujO+Q$W3%zL1!CAKq4*bV#X4y!sF4W`yUHsL7wb8s#<}CEc&y72E)iA#{;~(CJ`Gg z(M#?36Bu3(99|#IxO&Z-4%b8fn~zh__4emQd>|djCmVsipIcHodd(WG7P)EmNjJi& z&|HGZ{Z_I;?tg2H4Q9>^o2Um>hL@Wd}73Ut=#8Yj$56VcISy0r7WKHLM5 zxKsYtIDbC20{dgL8~eKJyI1VPviKgJ`3aT8E`sDNS=24?JHO{{G0(@3SMU}EIF=43 z{Fn!bp+;kR{E~VQD@_@%h5SwQeY7+GGEEf$OA%3_NG>v?tO}A5Nx<{@ zO8JpsC_Tn*yFb+*wc)<9%zj<4kr8QL{{B6YIGv~T`Lmk!#xB1xOtmCU6QOJhpGUC9c6c9#w- z=xjb9o_3N!^5;9-s2+}-NhA7c;!K|z8$OP@ld{LW@371)XmOo{w%6?Oe%GUIG*g=* z)sg*O=A6((PtuM_jxKH0`Y8xD0nW>K3WoN*>m&kY1A*9Swf6K)4N_u;H<>*;5=E=o z2M0TW$}GtA>J{%g_L2GXX_oIa-*e+W4^~dACho{%AKWaCSwTkkk`&fvfptH4B@%|% z1KUm-uh3k$)W!R$%{W0MxUrQt@w`(?8Krk0+U|~(bBf7V$D&Ug0%juJfW-biW#iSK z`%b_r-;H@1eAkzxgN!Fcoche#q3Q1)?rksk7%oLraFRNB(TXv#w!h?n3yt&lyWe{a zW7-GiU56WP)n8T7yBI}#eFS{Ke8gE2Qkyoub`D;V{0;h_s_o9akNnnIfzzH=ZdQpK zSTrfpK!>=6P03`JSt*q?cv)$r@R{T=5)sj%VIc#! zJ7BUo0Gy^0Atvqvy5i;bFsIkgx3E`FhJCWJG_O@>7FQ)v7sY5pl~uHS&N5-gzQJ>r z-wwd=-MFfotGmTTK+FDT914YXOcz5c`L%C9_=@ak2^vAD2t)y<)RR>qRPp>iWe3)g z!mvLt99$$U!{W=AC;$rxB~jQ9=Amf?{KbVukH7;#>^ z#;!nR;l^)aeex8)7YLUMA$liD zv56+;??HEt^W~P?e1j-uQ+^5jOWiB>ty=)il=9il<;@~^+Ojw{6~a*!eZa~^t8J?; z;<6DylObI$3#EiMp_H}~UfK%Z4V{Z~fPPTV-Tk@s`qWT;XNeaIU00b*>!d~IuFli- zZ)%ij%YP{Pf9e&w<`QhuVlZdt$^H!4QX)`|$mtQFQ+0o>B0i7vxGWqHLByH`4b2w* z5n0_7{o?+)#Q8JzS9d<;^{<*8TOLSVz`p{utS!LPkglwSeN0kCy-MBo`9rzaux91r zS+`6Tuwt7oSEgHq8IsIK&91@*2}P_u4Mc6Q_L{%yXnywXR{?%%q5Q@8LH0c(@4Z9f z^2CNKCbb_pae3J8=|=@mvtl8EbYH0I&sw5lXE%P9Nrjy5a0Lu60qz~s%L>0!l= z#=e)!Wpmn6f7NW*a_25B#Y>xYvF%v5>B^?GY&qoWV#;0G{u7gCz8f@81etIQdufu} zEN-e!Ld#kS6uSBew9aVW1vnViArY^~;QReroiT`a52*Aw;3zqr!(}g|>BMKdjpKLn~C44sYhC#q&#`Bcw)_u7^}NPM#sh@_Oppyhm5Z$Z^tdtIrt*F5)wL z!AXp=X%NA7N%Rt6$uJH6kGbXzYc4&&udQ-TQ9G>h`}z4KH%&=p>#`DB_T=b8-WXFm z2iCX@aoV9$Hyf8C#CrY~@YHCD%eA|_(=zk1JmxeXS>CA5a=F?c8aDeGcvk;uOF!&D z;KxSWa2;IM==Wqruq1&a@LeROyEuR9{ZpKMtM0XQDVvbciZ!P$ zpskgT7T{5)>N1rVag;tPhB=7x@0_0PVue47 zDEgHJM@>UrQBjc#$Z+e^1t6iK$~(jTvz2t)vJ}dy*452v3u@-nTJ|~Zu|OTx(I8l2 zRWxU`ot5wZJPhK1a!Et6+>v_w>a`nXeQn_8 z>{|bs*%Cl=N;mg+xn>Q(;4+2sX9{Pl%^h{O-y3TCl?Xo)9H0ZTk%dpT~Scwn2WA7&QjbcTM)ffT@4B%eG@HDdib?@@b{Rt zS<9*mp%#5QZ$jFl3*l(GHUS`woq7r$F59@Xl;H^4lM2mBiGdLkQczOaeU+N827K0+ z-L0*X56LZi3_c2Dlj4GHrKKj zEq?V{WMt&!bFo|*zSaS6T^JZCMoEA={ClD@vuPuq7-TJ_+!v-xo@wdf;Qk)dMZh$F zp1dKUWK#joI%Oi2Zdy=}D9pr&M;RsJuCg4%u|~mG3apnjb&LYxxXb^50WhO(TBdGZ zLK`#MO`Gr!R`2HL@9v!8{NaL8sbgI6=rQuxH!6)oz&Au%lGeng8a+n7btyg=DwI(i zFstqGlXhzsqW%k)oUC99MYsKyL@MHV_oDg&1NUZwEt9l6ene}{`!4PHPpewPZYc>4 zyCg>)Q|kJc>C&<<$*9UdwA85iM-1!NE}wP(aidX_Zc7Y{pFIG^zr?`4Imb|ibnurL z|BR@yM6J{)H_3_G)uYz5M~hzUuRpb1jbd-~tgb0>bc=&EA)ytE#=XzvOCeSQ^kijH z95nw?edrm+sLBz2`Vd+Uo?kk9)e~NXRP^r0&?@x<`Xg zw2(3HO+a>XPxCs*P;tiyZU#Mv=F&)=2MAQ?XT;S7cG{KHNw@)8oC3W5XBbbLp}rj0 zGB)dx(*hFkvito(W4)*ds4Y%m$p$Q}qG7s0>@;fG z9~rPMtHIzUymcCOtv&$58jb3e{~ed=2ehl+*xEE;{8}N2Xb-3**U9s^Wi7h02e<5E zKcp3j|8v*awB;}6nzkO9AiHTSz{#^~)6eX4m%Aq9G8BU%rl3uy%T^4=(48c}wdr|? zEA~8Ax`NNieI;O2g;EVCEwBM6*B!xCqxUVy#~aDwO=#Aaz08|~*e@w!mQ+cG!PaVm zCyh}^Baq?hjRb3|@-6EuCJiHn6tIhr4ms-SDWvP}oT30NkvOlke!z-U ze!WRM_Qv{dwn%R{)6)nC(|kiH2^G4n#B~rs+I607zg_Zd@o2r5SoKH3SuJQ%YI-GN zndPgYZ#~&55&GDz9fsPG!i_$HZek6|Woajgu?I!SSa^|T@41!Y6qfRy05fWbRE6NK zWqgzV<}OqT9Ei5Y3A8bCC!}RlDf3`E+6!luS0)&Qeosdjx2!xV1Ilsi+A3pFUL$I< z^LhN3rS=qyGu8ECLS#>uxt z2FUjXOQLTM`kKGgsFwlR%121_;+*`r?&N8qHc^;*d&5ROVK^wyiqPb5Xw+##m~Ep4 z__E<#Y?tbLZx?lqfx+4cFto>mP{en;e7BGQ9E=dAugj((?d*r5=0(wW#v~=!G4n;G zyC@s8n2+MnMi87R_X~(dOR(lXgmYJdP>vrCChDP8^^t?<3~nW1<4#`i)yT^?J=;W3 zYQO%*Ut!KWq~t2XfHZ@Yik%~%vUZ3p=Zc6t)ox7J4v_yy*1f=Y>(4jBv_w7xM-uiW zh(ZojQvBNCE?Nmm3pXMhJUiI=%^iy{?PEPy%Bes58lVy|xQc2D9v;IcEWk{C^c{gO zf*T+8!Uaf8ypJwT!2oqVMBp9z=p?)n$pevSSIUA|aOGPRY$ufNrNZG(A7kcsJC$kM z)$o@2jPSn^Icyy-ZySPd(gxY3^!2mAGm2&q!(}+h0x5tWc03NmfzD39cAyj&%GPOZ{+fiw6 zQTKEK(FN?HSJgT z)ct{j{N6q1_Z73ST%uuY?QcAX^9WUqk>TUy>1@{0_yv`!5fefMAs0NA`$ARt4u#8W zTtA5CdTz*xI2ilA0QY_jfo0IEvsE$;l=$bag`GiTYqx{djXpW8aZD&^vLAf~OM`qK zB6+b(z2f)hws?n@_3eo0vE%z<>8o(7(Id*jK2<;_l>wPF9aV8!=W5iVS6P}?iKJvp z)LK_6hmL+aa6-MXXpY&ff*+!?FklczLZUh5+wQOE+%edL?J{igEsMui7cr&ODf7Jy z;vvXk+ZFXSi&xrNE;60um^Gkd{=fKz-{R9AmmoxZR{iZ4=lfpyPiA>01)Ku5{4Kj7 z%p|*(WHuEWuQ`O0Q!*zN+4q&Pum#W$1_&Mwk(r-umIW`aZ}7vgmi%JPN#O7JoR#{p z74Ag*m?wCr$0r^idCAap+p~&EG1ak}m2jMR&m=ydD1jNY5Uj)Tewc>oy9N;&qpZPI@XRNF8Stbw%-;SY~!- z5`y*jHe3p1(ltfq4NSgr`1J>BQa2^9+=Kavq^()}sy`I>0OnA_{zkaZb*ESG63P=! z)263LoQ?+^UFE4!u!_7kB!oabZU$ZY>&r(*!{_u%9$fZJjm?j`nuZ{BN=wg(|L7Si z0}*w?pZ4~C7tzPiA2VwtG`x*M=o4!y!orZwF#TIg{fW%Y%$(N8;P~&1!Nw>mcwC(O zIT+5+hBu9!hjx=9w}4*c$=?D~V?*|t<)^2J_dF4!7KhZB*=sJh%(8ASGn)@AQ^^+iE+4 zk)ffmsD$6YrHp<3K3Q{1!jqIcC?i-UHK)ciwoSP*%WL7}el6-Wx&n0MmTU64kU-oq ztD%WLj6sp(pgO|uJa6Q5*zxgGw-5c~Eim(L{>@^jSkDasV&UdqUfn?kFg>F+nm&i) zhG}wTRN^Me0P8pu&*ho&RwiXlgm~7YU+(>0^Ed|5Eck-J(5<2|<*A7DvS{lqL za{k)X8-qMK!xWgCv^HP)k64V`R&MixCyIpV)w=6A6a3AbV*7kp@P+*WQAB*S&@t4~ zV0di_%dZVY59_vRt@pdCo`D0h|JJ8ntbXGNs|f)Ky(cMohzu(x8tq43VLh*taesI+I759*X@N1MW0e# zDg96@XZks{jERw4+aFDE8+GeXXf@ra(Z1_-sM==^521`_wq$hY`kNYIlYi+K`vuE5 z2ondot}-+-R68JFJ}_eZd0T&GuBb(uP!@@lsM+8JS`C&X(6t>`1^7bpFumN_rjZJ&AaDDp8~Q^Nw0eb#mDV<=Q=n zJeV7K_44pL*)wBS@Cq`vzicFV1k6_2j5BPXC!CFqK954w-n4({C1j(~drD>W#cH3q zQbJ?+k2EN~{<{2yiXNeMaP$Wzp(vLT>~N$FXrRpyn0r5=1FOE2qJ3}fBL)-tpM`3z znIHY%^aZNvLHjNz?=<5sO3Y{Q@7weEShWFd`?O}Uh4d7?W@ID^X=^rU3BTf(725!D z;o2F5KaNdL5%(0);1HI;|EMZl-fkxq(<6uA#|xG1_h6kOTukqWDPf8bOE!$G)+h(K{8u~bdI)`6;YwE6*}BT+TZi; zSqpZN!rshEnki^R(;nXko#DLZ&JO1Iq{Ls0MqUt!6E~D$ScMKhm~PgN9JRXKzYRWP z8Gi~7qzqA1wXV?4wvJQa5kLKG-}QUJOKfB&NUOPHUWQ&)+}PpFo$@Kh?ir`egZUjH zSFtgK+Fv02^5nlF*-jD9BZ=TLzjWJ)y`ih*Fn;Hem>tkVB@_nhu+`?^dAnYGI9#{V zl$H6q&xO9ec8}`h0P3u7Ba1*oHwcDPwWdoEwK~0XBROEQa?_jxP!sP*dN$Snsrz$u z6Y;93sH-RRD_+=!qM|UY4(yl^RQ4yr1PgDVEIN7$yFqbY^l@(IFI73=Pm#q{ZrR;3Y=BK(=2mR+ti%3-SAwx9oj z;dbL$%qo7N&P+0IgTX|9Wi!@^aAtz!A))s!A?HG)weX ziTR+)>NzeVg&-e$<*UX71`FK6{d|GA-ear%vP*7imB*3qdRyM{YvHuJj-U8OTrw-m z(!Rr;rTxHZ0c#XCtAkj^rQ3b%MSlIx3CxRZ@kqlcw3Z-CjFQ?h&bes88W9^$Rcl)Mgbq zYK9YT#G|FYx2By3{DyB|tBw_wE2fRin0*@5F=S1OS;{B5(R~N8Jx>)UP66fK%O^fs z4A??100>CV3hIkeJ65V^368$URgW#ja2wlU*y7BO;O>X`#=msG5*m=5X-hS~v3&YJ zmcM(|Fnp(c3qbV|6Mu(lH5v0#G3;^M(r8ODY=Jo%*4~A*)t$%fA`X#X3B0axZV)RT zF2JBdDrxIV!|Qfmnv!}S+eL(Z)#aN?FzJrQ>$V0mjPmuZ}rSY+8+zE`-k z6oMF?8oI^Nu;?6&j-~o%Uba+9ty>I#N2lR{Y}ui`Gj#O$=-H$>IU| z$QzbL(8)3sf&q){gUhxT>jqllZ^yYlU_bJ!6;Nq3PyVX~Cz(Lf){M0i=j|}jKfxo{ z(sv}rnzMJrdzh*H`RbhaCTKfF4Z#=L&!o17z>!D@Z$`ST*%iNhYhMeyjWdcDa<_A9 zoV$&$>UfUYhpvan+SBv2%fl|?z~U4+z1MM#;MK1MvYQWt7|_2OHG`eGgy7d^!2N+( z(xNqKQ#G#7fz^K;BR3(eJU`=uI4H+95Z7o_P5_os9G@P$`_(4bLUpxwPg0MA?%TQ! z`mcN{w1=id6UpuRJD8b)#GbO_~7+$5%B_!YZ}hcD!rjNbYgjAP8lJnm>%f zVqB>hkHH;EBz~q{NW3}YN~X;AdQ}v~0^=a#r(VW!I4WvBXr|;3oYIH5S-bVYJ|D_? z8p1+vZHikGtO~t`W~}e&jQ+ymfxv@QQOm{!9&qJuKB1d~=x9>j2vPZXy+Tu|sp@@Q;p}fn>(iw-5%xV<(GWomEBtgqfMU3^lE^d%8-b zF>VHY!F|M&Hj=u(^q|<7h5=3Fj$|cu&qBk}PmC2kpEzAcSdwV-L;w3|CI0)PW53(% ztkhi1!RJ?(;U_eeIsx1jE`uj!w>CZRaN1*ruCn(jhDAru!J)DuEMqY610SoaSCgwq zjKtE9BT#W#(^~IFjKVI}s~=$093)-UkzrIQ__}TNc`{{KCSyhPH*ImsX7{@HF@fav zYNHu=gRVM|QWjyE`K(V@eAoD~zdm#8Ij(>x8O1}UODeDtx@H2;NX3e5#O~L-m{4MJ zOPb&eFA?!3Rt<_(Wr|5v6b{FdGUB<8GraS=tk@^q{R?HG?j3Sf`h7aW)Zq-G*CLE_ z+UexPn`0ONtA3;LJ#zlq?H%wsOz=%%`2K=E{p#X7MTU`1fJiFt<0;-@j>a2*tP2E! zy|!fwP*)&dS{g_`2gk-Cp{kNJJ8^ zPN<|gBUzizMLU8ke^nMT%wl3L0nD=%MD0mCE9Rq~bLO3>!8*EL-)j5BQ<%{p->%x8NlQc2kHpgYiQ zV+FL0bz1k~_VS}OqDAs((dx^QHM;t`b-e#1MMBVdL(bY`=T{dW+hZI{+Z#rWwcbE7 z(`^Q_JA}19G($UZO@Cr0@i;-`=l7g%%hxip-HlhIdf+ehPPW6Z1}B+hIZSC)u9KHU z$~Dx-GOjCS>5UjB_K~=;wXBTd(sJ7}Zu{LPU4GHPus$b6*Dp%lOF=wxk#$f19yF62 z2aj@yvQGc{(pqn(mKrT~3PuPnypclC2mIRScr$mu?Ni7an}!z!VbRl7+fNP1-S^S< zd<}RLO8u__mU`g3ghz8!hs7%FO_!NWVefFysGG3}uR@|k=26u09<1NLRJAOgc|Fm=(Ca zXnws{PQIM>$sWR7u*|sZu85_I1Q?>D3k@xN6K`@{x->ml8&z7j+TbP6-!O zQl!E(>$l8;)dphNgQ&pkKDs@JOJ_g60!#FMp&ji4QNykUZolWUGjDhDZRQHUJ_Lrw z3`l?@HJ~TcpyszTAn5r;fEX+(J7$ob9uoB`$0W`NQ!pIiQ;g}|-0$f|JM=uv1#c69 z7pUYldR?^QoLd?0M7x#JwgR{vdkCPZk3H5neT2=tJ9+GwcHWX-e&U1mi%aB^wqi`j zTEU=*PEfuO(?jsb#WQI4bLur#=Nk5)+C5%LKHcWy3y)Pb{OxE@7km`83~sq;LQY>H z%pEoh3Zsv+06u>5d0Z`A4mk%L+$(9wJ%^1sZn9%0d>q@25_Nbvas>``W<8EF)kAHv8NSb? zuLn^dv-5mzPqqQo*)^0%;@ZA{d_QFJS>YU_JS=B2>Q6ANj42KL&WflY<7{9`gmp_e zu<@<&7$|j%5(u+7z+AkcTv>mYlm>x-ov3~fsefC2BQMU~n=Ulw@LbCPm+RfLayJ^KaQ0@N)WSSSK_p;xwGhB2s`^ zB18LutdjQ$dgf{JVRi*HD3fgR=ScOF*3C1!NvyRLqxGD(#7%U?tWKmbWrhYUEe{)v zb9eAwz+(Nhbj+}eXST)Gv+hcz{B3kFB*lRBtaZKL&XQEv*Hxd>X-6+M1{ZDR5~I>~ z<;9G#E!{lKwF~j#p!IPN?beX=&($HAYR`%|9+wcAPDdOa6C%cK34A~+IfU(xiifCh z?&RSWyuP*y9U6zdrUNmuNU{4AC4ubtzF_3mJ}6WvA3JW;H1c4%8!mv0_|bd5Op|)> z?o=(*sp|aAeM*tNm0U&hos(asqQYjo^Rx@cYwW~I;MYm(y?5Y?d?r$Q_B_*6miDifc*0TOD3kM8#uRcMiTJJ z?tI_$eb4+!r)z>GC!YLPbn0;sR=;p!>y@(Ks~SbG5lm|>gDHnL&vwmO<=TI+dA&>) zjJxS;L&%}WI`Q__lol?4>B^+}f$5&FLaBEjQ*r5Z+frzsvWitT`!(l=nruE$$ZrP^0E^2Nc!jPv?Ev7Xy&i;n~>V z#cm{k|ghxjStYUI2yV=+gwJz4F_+}2SEzRhQ)1Yi z8*xk{ZeAC+dfN{-&wlo@?7h7-==BBSDS3%?IKalv7T<`~KAp^P#3X_6dag*pw#41b z-L*-rWIEY@?P0n_4V|r*+RSwev+RATt%JMlsq7u2!e!-HGyb(B zM#&Z}F+K>`1$sXh*O&9$*eL^&_-lt-TQYYPs(#A1701xNMev?3%-+s zsW8C7bh&OlbMe6SltGO&`B^F=V1|N>E-9i59fye`eXr}nZ?iDc)sqQ_oV%ncYb_WJ z`_biq_XbxG-qMRBf>BtTN0Tfk!tHOa(Py5A0W#UhY3C`yBmiXw^m5_Axzvuvp>@#|`I{ z)`&cQi-2ZpK;&2NxguVhm)|!voB}J5t&3qgu6Wk_S*1WG^!IXq!Dn@x_(Q#ksFWWH zDPA+-T7K=ti7(%Y5Ao^^5V(E$5cq zFJjC#-NWToccEj>4xKPr+?iSQJl)x%pt*pHCL_PWSj&HS7e98Hq*W{Pj+?7Wu`XG< zPMSf-n^QlWfPD7Id(0>Jz*egxQ;U9=5en=$rxw1iPp_%}NsV&11hf(+7oA~e-u1Zz zBzlcdWZXyr$!?SSzo$+v`!&`|-_-Aj)yqFBd)yPF5T=a16BRWkbqH1}cD4D=TQ;1! z$hA}Oik7zFLW_$6_}+}yJ5Dy|gkIR@t~(jkz0bOo+E1M#+;SAZRK z5BxsYsQ&=Gg2(;JDPuk8)omL+ZnwW7Nfh!&{6zSC57_$+D zhk*IO<}LG49t>A|J^IGDW!dvOdE4@n0C|RqDbxPsux+zkCb@zPJc-m;loJW)y7 z%-h?pCCNp?8u!J(B}y3SWmb0;k)j9wxkqt&)gP@~Gr>ZyQ4{eau=CUaAf*srTy1UR#j9s``l{&-vY(fL`}DIwo^gzJrV5&){fn&!#fb%{mp z)1=pu3<|e7ph+&=Z&VZ)dO`ktD;ATHOilo`@x56ovHxnr?m|c3`F>_OhHbc~UuDz2 zYRGCKey95d=EBKEew{;*A4cJiJDYZN8sOwHKtksB6M!2iQC3xFtTG-;fn68Lp0hFQ&UUzU`S{>E-G9SiQNFrd4aHUi8VDRHR1Zcm!7+9T85D|H5>rucmB=eK0 z5M)_mE4eU@uN~ROYlVW2Z2fv%W51}UWqh1vEXE!>7e9;DYNLm#WmuY~Y8$H>qpFg6 z8+gQ#xcOaq6-i*E{MdwAtZ>vyUYiav>q%=+>|Ts>)~Ch7H~IQJkWu=9`#5Peoe`z{ zay|3%8@hjltvRa#J8}MM-Q`k|rjH6Pe<2X%dPWIdNq<3PXQhB&yX$ol`&ADO@(kCD z71g=zyn;*%iU92VtNqh?9JPx%dhL0;G@mjI{R@qj#GP){v^T`n#zXDfu=zb96IZB& z$&ROXr%4D1;V)7-+1o9&#&RjzZn?xZ<$kKz{F8`fk)eJ1d#8I(AMJ9q+=VVr&ZDsX zO1opV!-e4j{gk2X=w`0v$PhhycL}cfOYD@yf9O!^D5j=g-ofFQm?!wXZ5c$?a8%5@ zii@t4Bzp1P*gOj#ku@u>mpHHw21=sz!3W*W~)h2C)FWE4kzaN>4 zJkUBG%AHu-=ziWZGh^qHXHb^L;05znzrl)T&+@bG=(fN$ZE|A-R|uc$)NDn7DM^d7 z`0d(de3_ur*t<$nluW*Scak1r-Ky#PjLD9a%A!LwG5*XaLNo*0Be%IiG4Jokh)CI@ zP2A}d+FJpe)oZ>f;V39ci#hB;3Dd?Jt&EC6$+;}9@y^H?)BonU;*MP0o>~ekiYxyP;~0GN)AyyF2E%}@uv3pnoJ7v=6lG>^ zx1iyQe$7~_c+$jw#d?!Qj%wD%xdtH8bomiWP~dVS7ub|Nw<1j?>*n`yqG2n` zQt;|FrHxT0*jK+dF;{496LhUtsqOKF#Z8W$X3WIKncA#`_F(~6hjcOmHZ~*IXka+s zF~;G0+Xo5T%w}5R#!9*j3#Wu+GTn-{YVTXH0G!qcuUoz_lc*57-+t8U zBtzKeoAdp46)AnYa{2~*tioYwb<=ZUx8*$;iG9HtF3DLxOt`%i_6V87F0Ub(m4bxHckYh*k<+h#Z2Wdx?$M}uarTTg#=S`EE7qe$x8Ic!eA}=$ zUGGjAB?=vI8Xwp($%J;(ectuqx)mtLd#><3$8fdYqH5amJL;N!KmECjQE0%7sYUtz z&sIv|=b_5RLPEH^lz_XUQ8mrqCfded*IIp{Ol6^%8F?Z5PC|J5-?>eD&*FTjT8BQ@J*?JU7>j$ zPnGU@*}Aoq39(Hcl?6Nm&7-6(B47j;Yiu?sM^G{pC5&ZL@UFXT&?b7j`(IFU7d`K=`Rj$x1{TnC}uM z+W2ec-|NdqD^|x5m)7>m|196hEZjCk?p-AC1Ngk>onf#PM$NA(X)HKdlCNq=SK%TT z>or)e-E4CBbn~hy;4(}Fq4yWwU-2Dmmd}jei0h|rx-F4=3Z;v%dhE~igmjf4%F&!c z&2+hynBQpOU1)L|hylEk1pk#YAVt!h4Y|F|dMskx-Kte^^2dPPb7(3WUNF9O?mw{j zA9sqpPGbdFi@v3-$}hJE4Y-n{3;W|aeQ7_oY4(<>plcHDl85jd2<@6NRc*HIfQ}5z z?07`?QivF~EFP`EP@~xSrD=}WEpJu$F&)tuf`0&fNfY>|IHMfQ=!1UJlzKf~_XBVC zfaCJu=#AaKr1XF+XK3uOH;y|Ey*^{pr81{TCvIrox?MjArpp)XC)aZtZV15v#{{U~h{z>I&PwRoN6B75Mu&F4nbhTP5&wCO! z@S&HvP)wA)s@e=;Te3tNh$)G6sVqYX308L<@M42w_q=gb!%99oS}0Al{A#IGXUxyWakV07|h0R zsB6-R0A@l8m%#L~E3Eh(*X;R?HM1~R29~4N!#@69t~9O@{i^xGQzyHry4lG$kRg8N z(BiOuRVtO|vMFoHPi%Fmd+XYVwZ`n_2#qqcMlBTfS?2X*N5C-1Mn3>7u$_JVAqJ40 zKj$o9cr8jm*9c9m+CH-we#aYm&(sXh4QpQJp`zV7cqeG}OU4b^YfCSBi5>v7O&N`v=yuER;?= z`IklSSznBSVCnG~ZEF*}@-Z460nZ}M5vV=DUXjk{)w*897PSPAuZPdKeFw(5CX(3D z3%pEAf%B-KX>c1`DaoFwsK=BjunLHVW4_b$IVe%tl(0h0@7PhYIF_jF-DfzPs+vw9CF!uD)DqraCOO1pfF+?w(^(B_t{3cba&d+y;M$%Y68)FDs+|gPvTz1s`4l6{+u3<$i#txd3#22mMpkRHX1ilUkC0yLw&k%OvQz zyC08m3*P_G?4S3Rh|zu1ZNRNwx1OP-qORv|^^^w)z*n#l*5@`>TTy42NK2(Zy*m^12}9+f~r$%{0)4PoC1Y^q}H2`jaPGRO{r6c`9LX zT0~6{BUz{1NJ`USfA9P2nqg@^k_lT6ztN&f>T3OdG*!{(#q51W_gc8-l{$GA&{{G9a{)b=v(?9?7Z@>K?`Ql^w$bpX>_{f2e z9QeqAj~w{OfsY*c$bpX>_{f2e9QeqAj~w{_6$hFd{0&cI`1l{oM-Kdd%7IKKola5G z>5N|L^*_{azRhI$NhOnsWG0s{6mscgJQ_=+GPztfol3-$sWh*$nKU~wK9=} zOpXGPXf%-)K`Kn7bH!3IPu+=Bwou4t646K`md<8#IcUX2jC*-sC>JuBB$cOgdD9XI zrfEC9OQpD)p3(p{NL|)d^fDe7*=#zUh(_ay3_QptV=-QIbyZ5aM2v>e#e6yz2?nCc zOrccBW)gA8sxRx&!a+z2t7zoC*`BFKT zN<_kmOuo`pE|@x#)RRlYRP~8oQA~4l8Ca;sQXyn1BAC?-5F=Qkbuf>* zGKNbLwVf|hi4@qoEKzqrwa$p3IiJs^wxP!O*EYd>xI%bzz#y()nUh zQ$uVtW2ossf310tr|sczEX5!dGVw?>o-UNTDn<1pnab+hNLXV6rG#2rE|&^<247^M z8jHYfGfH%ufnfNVFH8q6(A?GJv?@+B{TNGVmdeFEUEpO=yp2Yghcp_7Fge93O)T{s zp7C))lSpih#bauq2(*l+LT`%}y4`M%FQg`?bLFny!I80vsp;9d*;)Q(CdY;c`+K`93}{LfheH7$x42wxx2vPW z>2$W++Z_%^yREIYwaxDI1Q>yQxqD!AdVXbNd-vez=}KJBO!G zfzILX`oiRJPcaefbT-#NfAaZf{5*bE-$b=uUogsQVboZz@kr3`^U&CqX1h0(stkXtfk8|j7Jzlx5l8gsnrLap^4eGoxP*0mv=AD_V;$y<|hV9nW)Fs()j%8Vcgt}&tPEiE0}^`quXL*80lQ{LIw&a9_2YM{+awSQ;IUc2gWI4`&A} zg$3r}&ua?PG@wctk$kDr-8(R37TojY;L6m=D!ie3aXLeRAc8|8<3 zD;e6z)gB52Dc#-GDhj$>GITA%P&TBY4RMiwD0;_-lH7=Pnk;psnac_n^|GY+hr&CQ z6qF0rf|If{Iv_^N(E${w{k-sjh{F!_gtVj~o`)+I^{z^#3LCmAyeLyj#bOaH#bQPg zu?*wLJk*IHL*scC6VNtc4Q?zRiA7N%sdx%~jxsN1(2a#urih9w<k9j-*DbktC|At| zV`gP03zdvF+|v~eh$qQps9cRlBYl~8KAH=6RlTw7aMaTstwysYTeZ6s@QfBx;Y20T zlL}?>)p$5s=n6!msbV}7gi~HuHkt6a5~+kc7>wkC?g*A*$REn5V!>cO9_{b(`+7!8 z+45kaG>IPWpDM9{daBb5?$AK77sH{eni1oYNd_y*90~b4eVvFI#Hq*aWR1H$u6C!p z)7##`RCjw~Ax|KhWlS?&)t*Yv@c8)9*wp;o#LWES!raWv+}!LqYksu1ySJypDrYbw zn$#Yr)8TZqwX{;2n;M&2nj0IMn_C(i8c?+DZOtuCue;O7vc#CF42+IU%r7l0ZtU)E z?dz_3=K6_eU|E#XDp{`zDk4_>CxWS`s&j3*g$tVmx=maE@vwZ zdiJ!g?kOrAam>3`o2|92-GPD8&Qn`cQ%ie?qovIq!1#%#D}zJ*BeSa;yQqir+uQ4l z%d4xav%U58wZ*BCzDhBd413)jj@IUuhUaz74QPwTx<t&H(0NB3&Jyotm28Iy%2NKfk67O& zR~nmIY}hks59G4L(b3_sJE+3e{Onn?$J^d&4`jPasocQQ=IX-A(ezJ7gq za(KA6y)xEQ?X6^j9+%VE2F1Gi`sdFvBCw`xQ0#DaI9<$%4s0eD8m-;d(%Ss|X_L$6 zwzvB;)xNG`-@^9J*7niW-Tj+aw|8$|U7Q^5ZLZCa^_5HcRKVr5w>CCDfBfVbOl@dt zZfUc(w>u^2jTH5`I^0+k&i1yJwwAglPa4|29!ER69EmwN4R4RmuWw(zdj0zK{q6bT z{{GhT%t$u^KN<2mZJJQ^^$iVDopy)A*?~5ZmhxgCdZ0|((ZROn#>V=`pV!%$)ox$3 zz(SZ<-q_kZeQ|gH`t{x2>-(G2gWb)Qg~`D##6l#{>9V&nL77R-Ev>e8=(Bg9!s_PE{_)kz`}@0>*hS~Z`&(;Evtxag0v=Dm z+rgA?pv6p8SgbifBc)NLR?)7^L-@={I_#Pnbq%dfw-+lcU8?qu%&x3$?j4<8+`M@4 z;`Zk96h5!aPczr@wA$NYhjbl-S69!BVE#L#`cYD7Q`WZrq#0SuFdxRS_G}nJA?)Q! z&%oHs;@Z~U@%iP|_4W1D#pxj&nVXcE5)Z;^=J3-ej~_jH!Z^ZiyQ70u>kpz!WT{Az zNt2<+(2-Pt1!7|Xx;q1y8JOe!BNMaG-`zhvJ~=r#K009aEzeJn4fY@$vEne7;W-R% zZh_^7E5@+!TNd?u6(em{)5OxemeEltS9|)0Mkl6b=H?fdOj(?to0*yz9UkcGt`zgw zoW|`?+cfJ`q`m2ZOd&L_r4^0pvh-R_MWVwbu4OG$WrGZ0gbZVb@L=o@Oa*kWECm#! zWhqGAqP|5}lr3$Gxo~A+ka*=s1n0$v0J1m z;=iFTy7HBOp%^C>FA^Kv=z5G&DP*gl;oqxx+z#Vt3cUDvQET7R7qwq3^44)xRSPyc zMg;{8PthwmBeL63uG~#4Meu(rRr3q{ z5YHcqMe&y&MTP>we=OD)s)O=Ch-a&+pMCJ~(z^1O9&4ojv!@TYJdpaw=MPu>_j^U_ z|6f#IyGLFB`Q6y0HCUzgskz|c8rBM6g!K)plUL@KkMS_gOOa#11ag5s@Wcr55p`&x z=|O*5bt<}+2a)GS1JIcTDXfY6WyNxr8fexA_nFuFOm|uLQ2}4;7sAG>re+Bbfo)u( z`l&`FlKA5ph-T7pgZs&M!Iw>8GGPS+qRFPU+GnoPtrRr^AV!^+TVnd8I}uORlEF3P zzEm=n&Lp@R6e^uEPO|P6UoiHP381G3j({mj79bh{yCADNv>ItD)>|%baZGccN(F~; zAx2#$Ps{mEXquXfF>N73TFv4{E-$8};F1;&4NZb9^1$ufO&=uz1X$)xCjm7jVkvN~ zHWakf@K}OK0HEMhz)0=@`Qk1ONGgUE4;wwnu)oVg%)I=3_(YGW>D)*== zpf%&I3-O}^2|hLUxVoN6Flp#7=DqldOF<7)@fb|9W){{N&7qkPrN!w?QX`pSo}{y> zB-doq3@6{325>J|@XI)>vosP0Fbqsd0ka7p6*ZB|G%JMEF?cjENsj@}l2HS%=CXNM z%LD=N1KkD0dteGZ)nv;gVJg#rYTz@~(H=UQzy|@H71QwaaC=jEMx5yx*C3~p3_vD} z`sH3|q-ao%Iy6c-AX>U0j#<(=hTfqjsGt^-__|Ta?kGs0{6V-yed%l&tx^DjE#R@^ zNtW~IMbto{lrJ((X;YF1D9NjnA~%jN8rWZRc`ZdO2owCaQthhtpm2wVjdL!~9M`-D zS62?B!C5SCAC(R$0R$+}18dK2x3>%SkWJAA%v@IaN#wdIA`X>i4X z-fEGF5eDwmL#Uw+FGbcmCJyGV(-$uf&u#1<9v+=vUB9?{ z_2S}SZDDSrui9P7U=x8MVtQi|db}8K*lt*%vJ)LbD)Dw`A4ViC1IcW~7VjBb+&(-! zIJ(4Kesh0wyuL6uHPqW(PKSM>g8h!w=fdgKiyhhnx7i&Ez(6yC;R~!z)lxduWU-pDVV388Ys0Dg(6H*JpbhYb$eOy#?8Qc%1a7tsRpZe*h~RgB5tz?P53?M|+!1 zo}3+vPynyppYI)-m|NaBIKTbs>sL3|7sm&?YYUV8r5w&y(C0Rj!1zFx!ETY18{oh? zZg;e{;>oqCnJvwpTyHvVKD@V6KV3a+R(+uPqeFtv4jdiDDCH6F|D)!x$F>~L49kP0}OTkQ76 zx`u}OX1l`$e(Gi7M49q&aN1@S{|&Gk>7;-$67!0x~o^#@o_kuZRn&jUN19hlzut*wnsorUf~raH5I zu)A||^Wyr&?bYeQ_QvYWKzFqibF*IB8XkRt$6MckzX0}Zz{hAb7>t6ecG6Xb)W)1> zu4@YwyVCLM(%$ac+5t_vxV*rM-(H*?8S3v!cpTnN`?D_|J*&rgepcVYxV3wLSVO)* zEXFDYAM`Q*Sf6bT^{#wZ!k?bl*ygS`LxO#Q7zq&XN zTu_chGueovvC-CHtH%$v+i|>skX_6KEPt1rI_AEOxeDmrnkXkc;lb_QxsmzHuU?(( z9o^jD9BeEvuB=RUWr)?JJk70cPdoltOM82BU6VwNz187i_5$*{0F5Oh8lOLD43*O^ zf8WOL;>h&*yVu8iM=xH#Jl0DSIe2Gp2d-$s5o~z&_)%-35O5|Ywl*h+R$qN{v$ubF|K|2!V`F<~ZMv^i z?(I%{ot~iI)q=O_Y;A66WQoup8;edJa9c|Qa;u>p2)!ws3p+!@TN~3OOE+J?I@mdX z^A_o_xwE@DKUl$O&jsABK-j~mwG+0eZ)^jI&>IjS2~jAq4jMqvpLM3=Zf|*cdwyi< z?5j7Yd&hTQy}H;3qS;*@>+S08uSDGhb2@EJ&7cD<4KN-d3VZ-kpr=+tslMUK=Z~H7 z1bEo=#^Tu6?w4Pl@1NYh{qp8u=ip#>V|t+4KUhuq+`fpvy_t{(ENA7kfgIq(8iK7z z`Fcv-)6XBP;=ZOt0E{u+@U%k3II=XrL<=xrw(dp67^7v5yP){!G zMfyRc5faay)d5|hZHPy~a%MMR84?+PzOFqO^>rqCr>93pmQJqD4vsGF-@Lp$#gE=! zod#L&Ekrs!Ubn5`*>j+IXXPp-(H=YoEYsdXA+T+w*%(ZKY#l43qo9vK_Q-k`JuIl z_yKqc@K=-hZd~-S*`>9OO(IjrAPxJwyW1czlf+2M1eL=el3*|ZM}+U5Kd-9; zlmgfTE`wDHn4n-Kpu_PT5zOA9aq!LMwGE()z1{6C0xrw*ib;a+5qJ#;(9zONQtSF- zF0?XyHbcm*hIMoXk<8I#9?!lPkYajveraWGePeTDZB42OK$ zNGG5GLGoadIPXN#bl7cvPe%Z$im{mQ>h2yI8=C}rU0PmTTv=FNn46uPR`7GOyaWn>0Y^kgF(IOpK38YE*Vh_s^Ee!V`e3j=AC9@N(O+Tw^|+S z?;alN?;GQ>Z>X;_kss(Q6ALL4xyfg-G*ii3ARkJEnN`7<-xu{_{KRaLpcA_+-j?qO z#)6SB!D!6eH1SMwAfQ|7hIXmDTJEhB`-+vGd{;T&RmfK`Kf&m6sS^1}Diut{Lm7W6 z>dQbP=8J^`ac|li^XI*MkqU!S=VRdf_!lLmJd^lz-QsA zL=O8umWw2_fkG&e3MZqtFVwrqA&#>b` zF`#jrE~-gGTW!zeVkw$M94JPk)5$`NNlz2Aahzd%1Zd-A(Qo6|$P=Vx@Y|5i#qj>} z)JA16LG8g$R7ccf_#nquEWlGD+63$IH}J}5oFiG?j1bdjPa-Hd~2>ZYM)8qpQL(At)h%vXBAi%v03 zaQllXrZtz~l#1_akgii3vO+mvv*wWDjPX%%Qw1g(pO*)@zPPUPUrDy1yFA1}K1f!v zz)8}9kS4@A6^wiULd61>2=^gGEqGgAYfxS*AtrP9A%tZmTwDk%V? zyp_x#cco!r97{a<2GTd*7TkWoqR>U0_PL<;t24fkr6^2Gp zQF9AK-*yg;DZ~|47pKQZ271W$B84Logb#SOG)lge{`A^}vT95rg~2r2e4{U1_!h}R zwSRPKc429mMiX&4Jv%==+TUK8ofzrE?E+;XS;glyUau313JjqJ#aPpQDOE#QRm!C` zaS>L~tjfC<6F|D!wF6;@weWh9ug%u+ph~JOngD2(_ ziK|Go~jsRJA z5%P+ZScJ!?rptzrn@zlin#dIDBohWZ&(>Q1w87Dt7~D92_3aKv9mnUUC5K*OS|(>taXQ?lW?N)<`|`~ne|)>Y zNRtaOS8LPrI(OIP)Xeho?Bvw+)JP>{tAG0VaZ?8ZESk&}E7fW_t0tSthO6uH6B-m4 z48R%*fLj0-VMw=UrgtvB{QWmK+p}XMy=jk}>X!Hr`33XyWCYI64ClSgb zH@e>L@{;<;7^34y^6&%B;WW2&WT&6=HGZ6%?;mNlUttEyWksc&Xis|k{!Z=!N;l6pY(k`yA$XqyD ztVTMCwnkGavN`)_H#Qfi#)rBqnI6tn+#Ua0juKGrCmFS;x5CgV(@#=V5qyw9gH{>@8vm9~Nt2KNMk>W-uq%cvZCuhCtRaX`&EGzk5(Z1eW6YoPT>l*zE(r64r8WTqbVx%c7AX79sA+KahGj9}k!cx~%WXjhvG|ar`jiOOjJ*HpE3>;>< zQaO<>gCMX5Rzn*3_xR;q1a`uD$T?ghi1_V2g0e(qu@MA8H>NT6JbBWf@CION5jtpKN{~v@hnkc=1gdAOE8;L@;(!Sm zQ{Du-FkUydws-gTDSKFg8;W^NA!>R_fybUFP1s2S5Fx4O&z?Sc@|Xgq2H2-yfXD+! z^4Dq?Z8C(iMdjsB5Jg+r*dd9>B*b5wV-_osXJKXnd%j{KIv!;>30!*$U1+L6SA+7H^ z!ZorYN-~uiEP$H$T3b7rNkOv*fTW}8xuvyj%`19zGs>7QW)ngK z?JZ3pixv~a1=?%aV37sYt0!@dEdk1visRCN397l9d&iJf3fcXumrBOkUc*`&9q1`z z!3D|20=<3y%&=D6HDpQ6vbbx|-a4`jTWKzyQd)W4(%e&Y&>}v~09=qA zf|b+=%KP}y7hjMTFCbK{g$IllaY_o1F|BprfT^_vKPP4u$UYZDE3)_Zccdzw9Bh#} zHa6JHpv6LB!oy^+7z^$zbX4%D88$)6@aSoS&8;!3^o>q0t}F5J`0VQT?(W6S_2uai z+as6?aG*+FZX!xB)J^WBKz7XidWOdU(~Li_!Ok_m6uxY2A~NQRfNoTV=Qnm3t(%+c z>lZJsh%p_4;X`w5sIOWgPeFc*65NfyV)k#S%PmAh37RxE6F5;EhIlCUUgOi}?Lolh z+`#-6*`ycu_cvq`UZ0&Y1GeDl^jIIc;~9yBfP%l+9JnfWAZ0?LO3}n(2NrH72t^e# z(FkNT)jwyOB*@Mr#P1b>{wExoC}vEh;F%@ejUfcIaYQ;rXIaKdM& z@Ck6^l0tHE%K-V?G=aoWvonU4VF(i;qm2seDyZ00-{$u_9P#df-oEjbgX8_36GN1= z+M~T~B4)UpJ=Jmn+Y8%Aej4|gfH%)r{p3v2L|V*e`qdsBws|zWalS&OzMk%(#l6Gb zo#UHVFV4@{F|f*;Zfpb|dC?o^MJ^NQn+pJtK) z%%~g2iUnG-cAvY=m8GQwkxveH4zBNC-JD-sU!NcCtgn%oI?{_uNQHxF7zRpFdWvEL zGCma=AW|S6Fjm^rq^VI~*XH%MJ7ZNwYHaQJWN+{C{`Kt@_W0#7!%yS^G^0f3sGoU? znP7)tkGE^Xr@Q!=3fz zrG?1>GA=WTFfag~4fluvvb!llEG%G0gT{y{sRF|vwl;bF4tuza**dgve7?VPc6YD2 zdUJicD==zmqzCLQ85E$?(azElVSEq4J9r3W4ww{WafH^YLY4sQs2(#0*0=hOun?BC8Dt0CS)?op^V2K$~9(XQJoYJlX!f z?tz8#ufM*#dHL11_ebmVWBt`!q?7O%<81UX5Ntve4YY-=guunGqxZB9uM{_lYH^6e zWGc&ll-JtU?9LC342-PY{QifpUcG)xP6-;&en#u`0(uX%-YQ#|N6t1_b<;kXNX@$JKBkI#@H&t2DQH4 zu8j6RdC5gVOEFo6Ao`3!N*)xm4zA$BTQ)CnAl#P;*j(@KUf$l^KL7TwKfSv--JR>k z0f>9;u6UXKRzpPT`g^<5elQz55`ZYVQxF_ug$In#kSQvbbOx}d+3$fljc=??c+R$l zmiXks+40HU@Bj4fYHwwt6arR>`8(s?{aeruOX2!mK5QgSz1?CJTduioC>y}a6+=_@5kIq+nG z$j9b4w#i%{?@9T<6-cvabNaQjG!+l}$m|E41%(srDb~p9hZXu&G8q#aMAYN3M<>s} z{{HKiXIoR{NFd;9Y4H}u=CP=Eww9*`yE6V(rnL>s4ZDrKXURw>C_5mFi-`=rO8=Q^ zcopI`+nMBZ2m@f+c$}S;^}FwXcz3fg7_~PvJ6-NzX<~V6|LAaMWxSG#cv>48S_nWV zbEPi8l6U|i3cv?tMvkR!?Gxj5m_57{Ym=ZH8D?*wXXEAffBNC|&S0eZX+t}3bMLfF zKWI<&<)WST#)ei0J5tGgEaVd5fJaIC4!}yyWtJXO1)L%fLYS~Sn8ygd5uXb%@|PAb zzWMV{-(SsU+n+se>+q*~rU;Dh?C-A64VM#sM^k+>*=1p~zdT85lap1?lA+h6D8Xz^ zz93kjb{=EN34?}LX+wXq*>8t%=8V(p4E{@DCEUs}er zH-Gr)+morR^EvpNFF!gvx45>+-lEx&Y9`>o9<_D&BIKEBLzK1!!xc>wq=?)_YtA4V z;fnE4nTXO7kx0N7>zUhm@%^8Ff4empLZ8>$qunFpbE_tEU}m^G<7fO@+d9DW8Llu0 z6B|d+DoQNzfqU_q%_E(@5IiT6%xp0-P#Y(hjZqTAuD|>1pT9hw&AZ7#v4zmSqYEpG zOB)-DQ=>g8z+UnYxSu(|+JFzJT9zO6pm5a>1xtkdA>4o!S-Aq@C2c&ho`wCpKmP5f zw+AyNKdRFkEA{jaP0i24nw6Qc-W=WmN{Wk-rntzEMnZK4jao1Q*XE!$_KAa9!WLwT z0t$%24p+D`wZ(w`^!52lUy5`if1I__KRP}&zk;Ozyq}G;m(4iiG{x8)I73V{3%@po zB3>6UB+VCOA*)K`OqV?I?wP&2-~aTdH%AKtxiFb^Ne)wBGx@~KlC02?o&wVk_Iha? z=^dgDhlNifTd+OATJ7M^KCcqr*pn9wgxOiu+3B=(gbRZ!=WoCN!@H}^Nfu+6P5x{+ z?CKq!m|a}P#T)8DXChV+s4_QDHFw*U zzx(R;aCy9k*calCQw4A-Cl;2LKwGi&a!e4WHkltRd5s+7${Iu^l4k<+&YX8rT_7xl z_8`LP3(a0UdWE1p-&q)EgA{oOq}gN(K(KR*%b=Jz5%`b+j9p{fW0lJFq;*)U7(vER zBSp^5U?LHfY%7h{VQ;bdGu;zgmv^u3ZjN`>m!@Qka56z6UFaSdnV6cInl|1v+oVa? zBN&CPOi~PO)S^e-Vmx7D34g{4LXa3Flc`Cx#paGzMizF@ugMTSJlNY@hP~_>6Yc_* z!?xVPp}~Ft3vzDc{4n9L{uO?;%t)+XGrcte#&DCmH*yAcx4Xi{{)xq{gQLTJ{NE!I zP-n&lwbhp`&fUa**_~U}4oJdz2vFtTS|*-ib!xI2%{V+n4xuaJX1=m`{mF9Q@Z{Vw zWo@&TZ8V}VGSt~YO#vGMb~3?286Ikwj0OlutMM#?(g@muB%Vv0pyqUxLsq+~X<>~E zgTJ^x*^oB?v9zMLpsR}a1zwn4Q$_-!;snkiyeu%zLWm6$%m!$~mp+OCqO}9upk}mR zxS7@-!zPNR5>;doH%*szW*(UuWI>TD0VoFoO=4F6fJPChe^{H#LFWuCR|Ea51aXNC zkw;)z%n?3^lu;!}NOZK=LIm_{Z^=V>GciOGaE9+*5a>|Y1SoYUcEcP(&$6GPninr&o( z+Jl9@(FMS?%iC9P-`<{Vud*?hz(r3fn=N(q435ZD?5_5(?={8JLhdR4A|I8R$s#r} zW_hc;lXz2+-zEnIL%5~ImFk~fJ2=03ar@%#&CBEUxzRpC4ufo@t+3B;d~$k{1hg`S zkt_^Nc{LbA0|A4GRJyA?_J~OURrZ+5s0~R|yJvBzkVcg_r7%SG)pdLs72?^o}=)u_S7P>3_Wq@C0G0=@l|?b&N+hyD3AmD?C8Ls7!2VTwB2c zMtzV%PY_IUPS%a=04)M*)zR5t_sYquZ@&NL#m*c7yJ}YsmxTzXLSYB1Q$y7Z_zQW2 zTuE+sO?7}Zq4JoNOd0%HRH77OAb^X95+D|hQQ!}Fo$lQ9-tG5){=?g|l>yR$l#50% z3_oD&@NffDDX9=Js|d>l6(pU4&?mk!%3(gv^stb#QWVeeZB*X|gXJP=dC}zmUq80!QRCTWS+r+=9I_f{iy|q4}~l z9M0JA#^t*||K+=@)xMZVI{^|Xk%1{})BW9*$^LwZ@v{+im6fHF3ou}OZe||9gtj%a zY+#4@VMMW{HSHCG=Ll67kMDo@^A9gKM=Pn2Lccjk56*Bh%ii|Hogjd=s&4R>?A4%V^Xi!+ba&+u@!4?B|l3FM27a02KG;)E4R)3H#s@4E$U-6e@d z#xEOlrY6RJW*SMpaByI=&{Aq*Gb{5++jK0uvZFqYVw;aHFby z(i1~EJj5zGK?FdEqEkr_E{Z{!lTM7MrX?X&D*inD8KSI43d==E263EpRsm5=3aL{o zLZo`oNm54ItkiAkJLyw}pU`$%Hmo>9Nh|uHKvm*ED)r|_ewc9ULj?-yn!`kNM2Tpl z>`Z`5)<7-1rL|#2w?y%0#V9(9x(RPRgmX>ctlDi(6zS^&3uBbq4J zM^>lmP$mDN$kb4v4x<`tz{KWENSB?)1pKgEst-kGj0qf@=&quRR@7K^m`YTORS|WG zu2p1eD1=M}%4JaGl4z(2S`srRo~Xd04j1`{$aKCI!GuqWipq;qJ*LRq6fwU-LH+lE@rR23;n}Ldin{-Y zY78}De5T<%==Fyxcx7%;Vsx#cVw_%*pd*?}UL{YJLugDMEgOeP+#ySxJXS7{IEk_6 zl)O~(ciBs-qiwLi#ZacK2q}4(XP($SNjsDttAyVKyE)l@iyvg2tf$m#&Z*;6Ft(5K zO!jS_187JC(~(+59k52K6NE6R$t&kZ)kCf?WT`o)R9cgap3y0W)JKD8KChCg7$?zW zQydnUOQtpH3w%L#uG&IFnI`1jDm|Ef@{#0NI#bBzF~Ze>EXV%=FH8#m+&vS4>++TPC-{mjqMJ%Q=zM7TU%o@QGjN} zon)DFAX*EMpevFn^bZeDuWs(&5M{l+Bq#NFZ)bI~pM9n}Ue7 z5S(TDkO&@UlLC51@kdOMcI-(dJIYfgV1P3AdIb#;LTP-~>`xVY$L3ZxcTex$ef#B$ zi}Rzc)#Zh;E&^BqwHCXA_`jx&4jP0dC`WiIi_gS%Fz+!Y4b&b&f1&cRT4*+L!eFK{ zFtxm~bI1nEZ{OWrU7Tza9vttIcpVN1ySGI9-Uc7$y?jARPYk6*Jw4W2&Fq32`0%d~u81#sPkp%BxB530v z_=o&)oH7F~BlzWHN-YahVC!5;)>C&dt_{f(WDs!Nin23yHbKJYlo3rHqZGQF z*P6Lty8HlQ(Q1JPOynATje4gNQKt5lmDh5hms8kP_(}$DOy2x)g-nVQjb08bf!x%ob)EHTY4@a^I zLNHB~O{IKe6)0QQkFii>o4hxC5Q>%`Y#q)f+l^IEkzIxPW6UZUJ~B)m_#`G%NzJfQ zgDK*TDQ2K7+eDT_EgeY*zzM4n6aow)qm3fF%@|kzU|X3rYs{Vpsy`#HiC_xz6iWj8 z~Mb(&k)E`Vy zler}e2;0ioS=68+ON)Axp{JTqIf!-{4#o(wtRNBmP#(-6Sy~hs7KnGFD5dvgJzz1I z%@eYLFuQ6+?;h+S^u5uS#)Ob6H-^K5kz!G{9v?n4&z5n#;*6s~GCcG1ss_b`q= z*ez21Txx6~EfxbC4c zObm<6LzNckzkhtV!t~7;B|jtcpVj$ymoVNn`-l1-h--Mv2_QOs54**(`(zg=312@l zZp0f#&@j?^C#I@@)Sw6b>a{Ejz0_7M4Mmy63M>x}a$p0AD#98tbs!fs0WmHl=44(l zNW_IP46I?&G+?)m-l!g{sD@ghE4<=@+b*x2}(%oxP+dr_m62}Mf}f>SBkKuHn?DKj{N449f=7RC~c z=c+?fi|dB5BrC}z_k2N;JGh_{i?*GT37nGt5wiWG_EJS*D; zt&2I(YVVAfM^@PT{`T$N)#>5p;tXd#kR!sTj8eIKaC8I_*{#!>Nf~iFhz7T?=^S0{ zLUV$afEo~B5Kf4yl{=2E>fm4lU$Sdr>*nk4fB5$PVsDi#+1=gjtz`HrJ$)p-O_01@ z0!#I9FR2?2JBF2=lXE@9#R0I0eQ-pAAPFH$Knn^odF-vuNMUgG^4*_)`s3SEHlOFQ z6~*Lg&*0GDD7&Qx`YI{lHJdh~+qKyrQw0l7$p)6juE9UNRqlbH3O8VxE7do1@b;&_ z{q)_7t*QP@FqmXE^bTTxju8c#BDpCSB0GT;F(4k|K=f1_l9U$!E+(856H`gw=p*8t z1lcjeY)!Uk_r%Vdzx-eS_QT7K;Zn52;ZGIHePa_GlFDw|an6=ahutUL0Fe2))M1%JRy703;K1ZDMdB-?GP(37sMVSm&o&XR9+iv2pdozyIGq z-mZ3e8^8GUNk?{YdU#+52_BG+;?B+PzxeLo z{`Kuo%T~L&{AAQX zS`qsI!3NAjYn)xlk&W}Oe*DX~C*w)mjRFSnUeLIOjVX z3lYa-uNKX=c8kgM!#pAylCP-j??Nn{0~1g!2bp~wJ9Ei8=?qloyloFPdzW6rsp!Jeg~SAYE1 zKVHx09gjZw-7`Ocxe6T*uk;{wR(8eEylK@UZG<8Nd{8o4gG*D|v0>s_kS(@3q{rz8m zykDz$p8xLC$IjB+u7V8HJsIunVK8g7mEch43P;AXE2kS-DN_i!OO{P zA{mvfeL?nk$yt}T12>Q)j3$W!p49N_>FeMB@$F83xaEsaK5I>lZ=Rf=?9BCNkqg+C zrF@)&uUl;${x~x*mk4=D?IWj)Oc7{s*e3EWjMiqcn!ZV47cXy5RtD1? z0u)IWd;4_GIVY>S1MxfuU8N$OawMxnR)kWe=6^K@|)j2_f;2; zZeLyRPF12_Hb}+_RrZ2rV(i-^h6N9!M23v(tQ<2Iw{RSn{$BuXr4}%UPG77%wtRT^ z?)BkJ*F;oMCWvEoRdP`@zl2Uaj(AqOLOZfw`jkXC@;NxB zUg#4!Kx$~XFuJsV^W~e9*|NX!lmGh2RhZnlesz1a(5pjj-C^dJ{x3$5s1Wg&Xq=J4 z^3jT8Q6U##5Kfr0$0E1GBp?zPvwP=nmBX_Sa8bxrv>d``e>M%nU?zlmkbq z>`e`!h9D3XHwhTqIfh@eCkS$b^x_a^tWFMJLxP4feRKOaZ|+a$%f5zBe*KsxZeQQu z9+0~da5_7~$$W)PmpV#J;*d#BMz*qMnN8A4GI-!|fHPWIx5$J@wtw;P#jD%HnWFpI zC%=8#k)PVVy1PAI8p!ILDoi1E@G;q;$AmUgT)TO7z`y2;QiM362`hp=hkb!$b$sRc z?)BCBV8Z_B*Pk|pyXP6`tKGT2v|keuS;Bwf(B5GXEI33a5Q@#fNm?rcdtAsEEFPpx zyfU$Rczt)ZF&MKy`Q(%5on5m#=eIA87YA}Ia&VPI7Qmd7w^u zsg;5f3Ma}M*;khvBbkmTzx}k%of%m_y}mkHpWw_P`osTz07C`s!Vwk#s60tUAIVUp zRkA*jNm?Gok)_?s`}>o5HoHFi{EH@EVQ6mq^lWc=ygS9%ne-jWLBxv0qJqAHS}?H` zp<)^%+&rBL;@xMjzkYcz-xKd>eqL|)B`V`9+dCUelgg8lU0DN5j4Hq&QGgW_k?64& z1LiGhBblM)!`nBnFE++2lGS#1DAPT@xW2kDIn_VgYj&0_>$c- zTPK&maf{=9rE~(&h<`VM$1^h6t;7j7rIHXMs0;@Iw+7poj27F_|MRGj_P|5wYpduYJY^*%y68g%(T?YEo|LHIRNkt@eh_IZ%55=HN@>%6; z8?+o%D&5bT7qDj{g1GIq*pqeuih|&r53rHt@YW({CJd-xxiBdFe*+6yD2)PFwIYZ{ zzOrxTh%`l zy?p?Lgc}$XfqP6bZ}h295d*K7%YXJ@J~Wt#F00*MgCY#tQ3F(J*v$JoYw!ON7qW>aa#hX;GtYfBP2yED3qqCC6Lw=B&?`vO8-U+e2k!``Nw0_NLnxGIB{A1XDu2l zzetCH=`}|H(`lW|%YWG7BpFC?tS^uwXD^w0fKhcqmtv+Ion+2y(u;IHuK#sHH)x@n zz-dn$FGUJ5ZAo$P6NETGO8iah1TAwsls@7pD{kjB=Oq8jB+t3NoM6V0R2m;us)M}1 zU^!=rlc_jnif^ce`*akS&I5z91fuyQorxDX2!sf=shFdg)KHx$1u6aMEHC}iJZRLu z;J7Wiu7k_y6Bp^6Cei1BFlx6>Rn8;|QT`7aC!ui-Cvh37RHGr(4dx6n1+>l0`kY@X zQM2R4D5r#RNEfxxWa7aJXj8w=#v-WAwMhSvnt# z9xw`Oek#k6W@hG6i@2#v$dfajqT%X4Buq0^Cly#dHFRUDT6DP394n^rBO^X zy1HX%>Cub>bTn@?uQe(b^L0IE-I*#>>-#2&dJd2^a!b~C^C&n8Qv?|l>=9PJG zZhPMh(;}-?P^dLS^{V@|nl*KF*k0{;Hmd^=)m7#uohYZSSoN4=>rBT@Rq8;kydOlp zFa;`b)q1BcShbpZ-&0rf%?-8osr$7bamjF(TXpl#>Vo`39gLkAV71r~VJneyE;y=IF8e7PV3QgCTi?nEnmsVpmlGZh~)~ROeDyuj8-n0Q4x}?@9LsPf% zUd^*;*BWX*)72j+8Ifh~w#e4*sS)5(GYzd?a#yVty8pxIsOk^JBBmjiXq6OGM>H88 znkA}gnHtH%Lmj5E`p~-E^r+S{^-sd5HiFh|y3*?V-*wvDr{D1PqG=k!$GlU$qGEk&y);dEXpOnpP 0, out.numpy(), (np.exp(out.numpy()) - 1)), atol=1e-5) + Tensor.no_grad = False + + @unittest.skipIf(Device.DEFAULT != "TORCH", "Takes too long to compile for Compiled backends") + def test_two_overlapping_binops_no_rerun_wino(self): + Tensor.no_grad = True + old_wino = Tensor.wino + Tensor.wino = True + x = Tensor.randn(1,4,16,16) + w = Tensor.randn(6,4,3,3) + out = x.conv2d(w, padding=(1,1)) + r1, r2 = out.relu(), out.elu() + np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0)) + np.testing.assert_allclose(r2.numpy(), np.where(out.numpy() > 0, out.numpy(), (np.exp(out.numpy()) - 1)), atol=1e-5) + Tensor.wino = old_wino + Tensor.no_grad = False + + def test_first_three(self): + Tensor.no_grad = True + x = Tensor.rand(1,12,128,256) + + w = Tensor.rand(32,12,3,3) + x = x.conv2d(w, stride=(2,2), padding=(1,1)).elu() + + w = Tensor.rand(32,1,3,3) + x = x.conv2d(w, padding=(1,1), groups=32).elu() + + w = Tensor.rand(16,32,1,1) + x = x.conv2d(w).elu() + + x = x.numpy() + print(x.shape) + Tensor.no_grad = False + + def test_elu(self): + Tensor.no_grad = True + x = Tensor.rand(1,12,128,256) + + w = Tensor.rand(32,12,3,3) + x = x.conv2d(w, stride=(2,2), padding=(1,1)) + + x = x.elu() + + w = Tensor.rand(32,1,3,3) + x = x.conv2d(w, padding=(1,1), groups=32) + out = x.numpy() + Tensor.no_grad = False + + def test_reduce_relu(self): + Tensor.no_grad = True + x = Tensor.rand(1,12,128,256) + x = x.sum(keepdim=True).relu() + out = x.numpy() + Tensor.no_grad = False + + def test_bias(self): + Tensor.no_grad = True + from tinygrad.nn import Conv2d + x = Tensor.rand(1,12,128,256) + c = Conv2d(12, 32, 3) + x = c(x).relu() + w = Tensor.uniform(32, 1, 3, 3) + x = x.conv2d(w, groups=32) + out = x.numpy() + Tensor.no_grad = False + + def test_multiadd(self): + w = Tensor.rand(32) + x = Tensor.rand(32).relu() + (w+x).numpy() + + def test_reorder(self): + x = Tensor.rand(1,12,128,256) + w = Tensor.rand(12,12,3,3) + x = x.conv2d(w, padding=(1,1)) + print(x.shape) + x = x.reshape((1, 12, 256, 128)) + x += 1 + x += 1 + x = x.reshape((1, 12, 128, 256)) + x.numpy() + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/test_conv_shapetracker.py b/tinygrad_repo/test/test_conv_shapetracker.py new file mode 100644 index 0000000..9a4642f --- /dev/null +++ b/tinygrad_repo/test/test_conv_shapetracker.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +import unittest +from tinygrad.tensor import Tensor, Device +from tinygrad.nn import Conv2d +from tinygrad.jit import CacheCollector +import pytest + +pytestmark = pytest.mark.webgpu + +#@unittest.skipUnless(Device.DEFAULT == "GPU", "Only GPU supports cache") +@unittest.skip("with JIT changes, you only get the raw buffer") +class TestConvShapetracker(unittest.TestCase): + def test_conv_3x3_one_view(self): + inp = Tensor.randn(1,16,10,10).realize() + conv = Conv2d(16, 32, (3,3)) + conv(inp).realize() + CacheCollector.start() + conv(inp).realize() + test = CacheCollector.finish() + assert len(test) == 1, f"conv should only have one kernel {[x[0].name for x in test]}" + print(test[0][0].prg) + for arg in test[0][1]: + print(arg.st) + assert len(arg.st.views) == 1 + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_custom_function.py b/tinygrad_repo/test/test_custom_function.py new file mode 100644 index 0000000..7d4cca0 --- /dev/null +++ b/tinygrad_repo/test/test_custom_function.py @@ -0,0 +1,107 @@ +# this is an example of how you can write terrible DSP compute breaking ops like warpPerspective +# here we use a CUSTOM op to write atan2 + +import unittest +import numpy as np +from typing import Optional, Tuple +from tinygrad.helpers import prod, dtypes + +# *** first, we implement the atan2 op at the lowest level *** +# `atan2_gpu` for GPUBuffers and `atan2_cpu` for CPUBuffers +from tinygrad.lazy import LazyBuffer, create_lazybuffer +from tinygrad.ops import ASTRunner, Device +from tinygrad.shape.shapetracker import ShapeTracker +import pytest + +pytestmark = pytest.mark.webgpu + +# we don't always have GPU support, so the type signature is the abstract CompiledBuffer instead of GPUBuffer +def atan2_gpu(ret:LazyBuffer, a:LazyBuffer, b:LazyBuffer): + assert a.device == "GPU" and b.device == "GPU", "gpu function requires GPUBuffers" + assert a.dtype == b.dtype and a.dtype == dtypes.float32, "gpu function only supports float32" + ret.realized = Device[ret.device].buffer(prod(ret.shape), ret.dtype) + ASTRunner("atan2_gpu", """ + __kernel void atan2_gpu(global float *c, global float *a, global float *b) { + int idx = get_global_id(0); + c[idx] = atan2(a[idx], b[idx]); + }""", global_size=[prod(ret.shape)]).build(Device[ret.device].compiler, Device[ret.device].runtime).exec([ret.realized, a.realized, b.realized]) + return ret.realized + +def atan2_cpu(ret:LazyBuffer, a:LazyBuffer, b:LazyBuffer): + return Device[ret.device].from_underlying(np.arctan2(a.realized._buf, b.realized._buf)) + +# *** second, we write the ATan2 mlop *** +# NOTE: The derivative of atan2 doesn't need a custom op! https://www.liquisearch.com/atan2/derivative +# In general, it is also optional to write a backward function, just your backward pass won't work without it + +from tinygrad.ops import LazyOp, LoadOps, BinaryOps, UnaryOps +from tinygrad.lazy import LazyBuffer +from tinygrad.tensor import Function + +class ATan2(Function): + def forward(self, a:LazyBuffer, b:LazyBuffer) -> LazyBuffer: + assert prod(a.shape) == prod(b.shape) and a.device == b.device, "shape or device mismatch" + self.a, self.b = a, b + ast = LazyOp(LoadOps.CUSTOM, (a.contiguous(), b.contiguous()), {"GPU": atan2_gpu, "CPU": atan2_cpu}[a.device]) + return create_lazybuffer(a.device, ShapeTracker.from_shape(a.shape), LoadOps, ast, max(a.dtype, b.dtype)) + def backward(self, grad_output:LazyBuffer) -> Tuple[Optional[LazyBuffer], Optional[LazyBuffer]]: + denom = (self.a.e(BinaryOps.MUL, self.a)).e(BinaryOps.ADD, self.b.e(BinaryOps.MUL, self.b)) + return grad_output.e(BinaryOps.MUL, self.b.e(BinaryOps.DIV, denom)) if self.needs_input_grad[0] else None, \ + grad_output.e(BinaryOps.MUL, self.a.const(0).e(BinaryOps.SUB, self.a).e(BinaryOps.DIV, denom)) if self.needs_input_grad[1] else None + +# *** third, we use our lovely new mlop in some tests *** + +from tinygrad.tensor import Tensor + +@unittest.skipUnless(Device.DEFAULT in ["CPU", "GPU"], "atan2 is only implemented for CPU and GPU") +class TestCustomFunction(unittest.TestCase): + def test_atan2_forward(self): + # create some random Tensors, permute them just because we can + a = Tensor.randn(4,4,requires_grad=True).permute(1,0) + b = Tensor.randn(4,4,requires_grad=True).permute(1,0) + + # run the forward pass. note: up until the .numpy(), it's all lazy + c = ATan2.apply(a, b) + print(c.numpy()) + + # check the forward pass (in numpy) + np.testing.assert_allclose(c.numpy(), np.arctan2(a.numpy(), b.numpy()), atol=1e-5) + + # fun fact, this never actually calls forward, so it works in all the backends + def test_atan2_backward(self): + # have to go forward before we can go backward + a = Tensor.randn(4,4,requires_grad=True).permute(1,0) + b = Tensor.randn(4,4,requires_grad=True).permute(1,0) + c = ATan2.apply(a, b) + + # run the backward pass + c.mean().backward() + assert a.grad is not None and b.grad is not None, "tinygrad didn't compute gradients" + print(a.grad.numpy()) + print(b.grad.numpy()) + + # check the backward pass (in torch) + import torch + ta, tb = torch.tensor(a.numpy(), requires_grad=True), torch.tensor(b.numpy(), requires_grad=True) + tc = torch.atan2(ta, tb) + tc.mean().backward() + assert ta.grad is not None and tb.grad is not None, "torch didn't compute gradients" + np.testing.assert_allclose(a.grad.numpy(), ta.grad.numpy(), atol=1e-5) + np.testing.assert_allclose(b.grad.numpy(), tb.grad.numpy(), atol=1e-5) + + def test_atan2_jit(self): + # custom ops even work in the JIT! + from tinygrad.jit import TinyJit + + @TinyJit + def jitted_atan2(a:Tensor, b:Tensor) -> Tensor: + return ATan2.apply(a, b).realize() + + for _ in range(5): + a = Tensor.randn(4,4,requires_grad=True).permute(1,0) + b = Tensor.randn(4,4,requires_grad=True).permute(1,0) + c = jitted_atan2(a, b) + np.testing.assert_allclose(c.numpy(), np.arctan2(a.numpy(), b.numpy()), atol=1e-5) + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/test_dtype.py b/tinygrad_repo/test/test_dtype.py new file mode 100644 index 0000000..8b55153 --- /dev/null +++ b/tinygrad_repo/test/test_dtype.py @@ -0,0 +1,182 @@ +import unittest +import numpy as np +from tinygrad.helpers import CI, DTYPES_DICT, getenv, DType, DEBUG, ImageDType, PtrDType +from tinygrad.ops import Device +from tinygrad.tensor import Tensor, dtypes +from typing import Any, List +from extra.utils import OSX, temp + +def is_dtype_supported(dtype: DType): + # for GPU, cl_khr_fp16 isn't supported (except now we don't need it!) + # for LLVM, it segfaults because it can't link to the casting function + if dtype == dtypes.half: return not (CI and Device.DEFAULT in ["GPU", "LLVM"]) and Device.DEFAULT != "WEBGPU" and getenv("CUDACPU") != 1 + if dtype == dtypes.bfloat16: return False # numpy doesn't support bf16, tested separately in TestBFloat16DType + if dtype == dtypes.float64: return Device.DEFAULT not in ["WEBGPU", "METAL"] and not OSX + if dtype in [dtypes.int8, dtypes.uint8]: return Device.DEFAULT not in ["WEBGPU"] + if dtype in [dtypes.int16, dtypes.uint16]: return Device.DEFAULT not in ["WEBGPU", "TORCH"] + if dtype == dtypes.uint32: return Device.DEFAULT not in ["TORCH"] + if dtype in [dtypes.int64, dtypes.uint64]: return Device.DEFAULT not in ["WEBGPU", "TORCH"] + if dtype == dtypes.bool: + # host-shareablity is a requirement for storage buffers, but 'bool' type is not host-shareable + if Device.DEFAULT == "WEBGPU": return False + # TODO remove triton from here once internal casting is fixed. CAST of fp32s between 0-1 is broken in triton + if getenv("TRITON") == 1: return False + return True + +def get_available_cast_dtypes(dtype: DType) -> List[DType]: return [v for k, v in DTYPES_DICT.items() if v != dtype and is_dtype_supported(v) and not k.startswith("_")] # dont cast internal dtypes + +def _test_to_np(a:Tensor, np_dtype, target): + if DEBUG >= 2: print(a) + na = a.numpy() + if DEBUG >= 2: print(na, na.dtype, a.lazydata.realized) + try: + assert na.dtype == np_dtype + np.testing.assert_allclose(na, target) + except AssertionError as e: + raise AssertionError(f"\ntensor {a.numpy()} does not match target {target} with np_dtype {np_dtype}") from e + +def _assert_eq(tensor:Tensor, target_dtype:DType, target): + if DEBUG >= 2: print(tensor.numpy()) + try: + assert tensor.dtype == target_dtype + np.testing.assert_allclose(tensor.numpy(), target) + except AssertionError as e: + raise AssertionError(f"\ntensor {tensor.numpy()} dtype {tensor.dtype} does not match target {target} with dtype {target_dtype}") from e + +def _test_op(fxn, target_dtype:DType, target): _assert_eq(fxn(), target_dtype, target) +def _test_cast(a:Tensor, target_dtype:DType): _test_op(lambda: a.cast(target_dtype), target_dtype, a.numpy().astype(target_dtype.np).tolist()) +def _test_bitcast(a:Tensor, target_dtype:DType, target): _test_op(lambda: a.bitcast(target_dtype), target_dtype, target) + +class TestDType(unittest.TestCase): + DTYPE: Any = None + DATA: Any = None + @classmethod + def setUpClass(cls): + if not is_dtype_supported(cls.DTYPE): raise unittest.SkipTest("dtype not supported") + cls.DATA = np.random.randint(0, 100, size=10, dtype=cls.DTYPE.np).tolist() if dtypes.is_int(cls.DTYPE) else np.random.choice([True, False], size=10).tolist() if cls.DTYPE == dtypes.bool else np.random.uniform(0, 1, size=10).tolist() + def setUp(self): + if self.DTYPE is None: raise unittest.SkipTest("base class") + + def test_to_np(self): _test_to_np(Tensor(self.DATA, dtype=self.DTYPE), self.DTYPE.np, np.array(self.DATA, dtype=self.DTYPE.np)) + + def test_casts_to(self): list(map( + lambda dtype: _test_cast(Tensor(self.DATA, dtype=dtype), self.DTYPE), + get_available_cast_dtypes(self.DTYPE) + )) + def test_casts_from(self): list(map( + lambda dtype: _test_cast(Tensor(self.DATA, dtype=self.DTYPE), dtype), + get_available_cast_dtypes(self.DTYPE) + )) + + def test_upcast_ops(self): list(map( + lambda dtype: _test_ops(a_dtype=self.DTYPE, b_dtype=dtype, target_dtype=dtype) if dtype.sz > self.DTYPE.sz else None, + get_available_cast_dtypes(self.DTYPE) + )) + def test_upcast_to_ops(self): list(map( + lambda dtype: _test_ops(a_dtype=dtype, b_dtype=self.DTYPE, target_dtype=self.DTYPE) if dtype.sz < self.DTYPE.sz else None, + get_available_cast_dtypes(self.DTYPE) + )) + +def _test_ops(a_dtype:DType, b_dtype:DType, target_dtype:DType): + if not is_dtype_supported(a_dtype) or not is_dtype_supported(b_dtype): raise unittest.SkipTest("dtype not supported") + _assert_eq(Tensor([1,2,3,4], dtype=a_dtype)+Tensor([1,2,3,4], dtype=b_dtype), target_dtype, [2,4,6,8]) + _assert_eq(Tensor([1,2,3,4], dtype=a_dtype)*Tensor([1,2,3,4], dtype=b_dtype), target_dtype, [1,4,9,16]) + _assert_eq(Tensor([[1,2],[3,4]], dtype=a_dtype)@Tensor.eye(2, dtype=b_dtype), target_dtype, [[1,2],[3,4]]) + _assert_eq(Tensor([1,1,1,1], dtype=a_dtype)+Tensor.ones((4,4), dtype=b_dtype), target_dtype, 2*Tensor.ones(4,4).numpy()) + +class TestBFloat16DType(unittest.TestCase): + def setUp(self): + if not is_dtype_supported(dtypes.bfloat16): raise unittest.SkipTest("bfloat16 not supported") + def test_bf16_to_float(self): + with self.assertRaises(AssertionError): + _test_cast(Tensor([100000], dtype=dtypes.bfloat16), dtypes.float32, [100000]) + + def test_float_to_bf16(self): + with self.assertRaises(AssertionError): + _test_cast(Tensor([100000], dtype=dtypes.float32), dtypes.bfloat16, [100000]) + + # torch.tensor([10000, -1, -1000, -10000, 20]).type(torch.bfloat16) + + def test_bf16(self): + t = Tensor([10000, -1, -1000, -10000, 20]).cast(dtypes.bfloat16) + t.realize() + back = t.cast(dtypes.float32) + assert tuple(back.numpy().tolist()) == (9984., -1, -1000, -9984, 20) + + def test_bf16_disk_write_read(self): + t = Tensor([10000, -1, -1000, -10000, 20]).cast(dtypes.float32) + t.to(f"disk:{temp('f32')}").realize() + + # hack to "cast" f32 -> bf16 + dat = open(temp('f32'), "rb").read() + adat = b''.join([dat[i+2:i+4] for i in range(0, len(dat), 4)]) + with open(temp('bf16'), "wb") as f: f.write(adat) + + t = Tensor.empty(5, dtype=dtypes.bfloat16, device=f"disk:{temp('bf16')}").llvm().realize() + back = t.cast(dtypes.float32) + assert tuple(back.numpy().tolist()) == (9984., -1, -1000, -9984, 20) + +class TestHalfDtype(TestDType): DTYPE = dtypes.half + +class TestFloatDType(TestDType): DTYPE = dtypes.float + +class TestDoubleDtype(TestDType): DTYPE = dtypes.double + +class TestInt8Dtype(TestDType): + DTYPE = dtypes.int8 + @unittest.skipIf(getenv("CUDA",0)==1 or getenv("PTX", 0)==1, "cuda saturation works differently") + def test_int8_to_uint8_negative(self): _test_op(lambda: Tensor([-1, -2, -3, -4], dtype=dtypes.int8).cast(dtypes.uint8), dtypes.uint8, [255, 254, 253, 252]) + +class TestUint8Dtype(TestDType): + DTYPE = dtypes.uint8 + @unittest.skipIf(getenv("CUDA",0)==1 or getenv("PTX", 0)==1, "cuda saturation works differently") + def test_uint8_to_int8_overflow(self): _test_op(lambda: Tensor([255, 254, 253, 252], dtype=dtypes.uint8).cast(dtypes.int8), dtypes.int8, [-1, -2, -3, -4]) + +@unittest.skipIf(Device.DEFAULT not in {"CPU", "TORCH"}, "only bitcast in CPU and TORCH") +class TestBitCast(unittest.TestCase): + def test_float32_bitcast_to_int32(self): _test_bitcast(Tensor([1,2,3,4], dtype=dtypes.float32), dtypes.int32, [1065353216, 1073741824, 1077936128, 1082130432]) + @unittest.skipIf(Device.DEFAULT == "TORCH", "no uint32 in torch") + def test_float32_bitcast_to_uint32(self): _test_bitcast(Tensor([1,2,3,4], dtype=dtypes.float32), dtypes.uint32, [1065353216, 1073741824, 1077936128, 1082130432]) + def test_int32_bitcast_to_float32(self): _test_bitcast(Tensor([1065353216, 1073741824, 1077936128, 1082130432], dtype=dtypes.int32), dtypes.float32, [1.0, 2.0, 3.0, 4.0]) + + # NOTE: these are the same as normal casts + def test_int8_bitcast_to_uint8(self): _test_bitcast(Tensor([-1, -2, -3, -4], dtype=dtypes.int8), dtypes.uint8, [255, 254, 253, 252]) + def test_uint8_bitcast_to_int8(self): _test_bitcast(Tensor([255, 254, 253, 252], dtype=dtypes.uint8), dtypes.int8, [-1, -2, -3, -4]) + @unittest.skipIf(Device.DEFAULT == "TORCH", "no uint64 in torch") + def test_int64_bitcast_to_uint64(self): _test_bitcast(Tensor([-1, -2, -3, -4], dtype=dtypes.int64), dtypes.uint64, [18446744073709551615, 18446744073709551614, 18446744073709551613, 18446744073709551612]) + @unittest.skipIf(Device.DEFAULT == "TORCH", "no uint64 in torch") + def test_uint64_bitcast_to_int64(self): _test_bitcast(Tensor([18446744073709551615, 18446744073709551614, 18446744073709551613, 18446744073709551612], dtype=dtypes.uint64), dtypes.int64, [-1, -2, -3, -4]) + + def test_shape_change_bitcast(self): + with self.assertRaises(AssertionError): + _test_bitcast(Tensor([100000], dtype=dtypes.float32), dtypes.uint8, [100000]) + +class TestInt16Dtype(TestDType): DTYPE = dtypes.int16 +class TestUint16Dtype(TestDType): DTYPE = dtypes.uint16 + +class TestInt32Dtype(TestDType): DTYPE = dtypes.int32 +class TestUint32Dtype(TestDType): DTYPE = dtypes.uint32 + +class TestInt64Dtype(TestDType): DTYPE = dtypes.int64 +class TestUint64Dtype(TestDType): DTYPE = dtypes.uint64 + +class TestBoolDtype(TestDType): DTYPE = dtypes.bool + +class TestEqStrDType(unittest.TestCase): + def test_image_ne(self): + assert dtypes.float == dtypes.float32, "float doesn't match?" + assert dtypes.imagef((1,2,4)) != dtypes.imageh((1,2,4)), "different image dtype doesn't match" + assert dtypes.imageh((1,2,4)) != dtypes.imageh((1,4,2)), "different shape doesn't match" + assert dtypes.imageh((1,2,4)) == dtypes.imageh((1,2,4)), "same shape matches" + assert isinstance(dtypes.imageh((1,2,4)), ImageDType) + def test_ptr_ne(self): + # TODO: is this the wrong behavior? + assert PtrDType(dtypes.float32) == dtypes.float32 + #assert PtrDType(dtypes.float32) == PtrDType(dtypes.float32) + #assert PtrDType(dtypes.float32) != dtypes.float32 + def test_strs(self): + self.assertEqual(str(dtypes.imagef((1,2,4))), "dtypes.imagef((1, 2, 4))") + self.assertEqual(str(PtrDType(dtypes.float32)), "ptr.dtypes.float") + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_gc.py b/tinygrad_repo/test/test_gc.py new file mode 100644 index 0000000..49773dd --- /dev/null +++ b/tinygrad_repo/test/test_gc.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +import gc +import unittest +import numpy as np +from tinygrad.tensor import Tensor + +def tensors_allocated(): + return sum([isinstance(x, Tensor) for x in gc.get_objects()]) + +class TestGC(unittest.TestCase): + + def test_gc(self): + a = Tensor.zeros(4, 4, requires_grad=True) + b = Tensor.zeros(4, 4, requires_grad=True) + (a*b).mean().backward() + assert(tensors_allocated() > 0) + del a,b + assert(tensors_allocated() == 0) + + def test_gc_complex(self): + a = Tensor(np.zeros((4, 4), dtype=np.float32), requires_grad=True) + b = Tensor(np.zeros((4, 4), dtype=np.float32), requires_grad=True) + assert(tensors_allocated() == 2) + (a*b).mean().backward() + assert(tensors_allocated() == 4) + del b + assert(tensors_allocated() == 2) + b = Tensor(np.zeros((4, 4), dtype=np.float32), requires_grad=True) + print(tensors_allocated()) + (a*b).mean().backward() + print(tensors_allocated()) + assert(tensors_allocated() == 4) + del b + assert(tensors_allocated() == 2) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_jit.py b/tinygrad_repo/test/test_jit.py new file mode 100644 index 0000000..bb2bafe --- /dev/null +++ b/tinygrad_repo/test/test_jit.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from tinygrad.tensor import Tensor, Device +from tinygrad.jit import TinyJit, JIT_SUPPORTED_DEVICE +import pytest + +pytestmark = pytest.mark.webgpu + +# NOTE: METAL fails, might be platform and optimization options dependent. +@unittest.skipUnless(Device.DEFAULT in JIT_SUPPORTED_DEVICE and Device.DEFAULT not in ["METAL", "WEBGPU"], f"no JIT on {Device.DEFAULT}") +class TestJit(unittest.TestCase): + def test_simple_jit(self): + @TinyJit + def add(a, b): return (a+b).realize() + for _ in range(5): + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + c = add(a, b) + np.testing.assert_allclose(c.numpy(), a.numpy()+b.numpy(), atol=1e-4, rtol=1e-5) + assert len(add.jit_cache) == 1 + + def test_jit_multiple_outputs(self): + @TinyJit + def f(a, b): return (a+b).realize(), (a-b).realize(), (a*b).realize() + for _ in range(5): + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + c, d, e = f(a, b) + np.testing.assert_allclose(c.numpy(), a.numpy()+b.numpy(), atol=1e-4, rtol=1e-5) + np.testing.assert_allclose(d.numpy(), a.numpy()-b.numpy(), atol=1e-4, rtol=1e-5) + np.testing.assert_allclose(e.numpy(), a.numpy()*b.numpy(), atol=1e-4, rtol=1e-5) + assert len(f.jit_cache) == 3 + + def test_nothing_jitted(self): + @TinyJit + def add(a, b): return a+b + with self.assertRaises(AssertionError): + for _ in range(5): + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + c = add(a, b) + + def test_jit_shape_mismatch(self): + @TinyJit + def add(a, b): return (a+b).realize() + for _ in range(5): + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + c = add(a, b) + bad = Tensor.randn(20, 20) + with self.assertRaises(AssertionError): + add(a, bad) + + def test_jit_shape_views_mismatch(self): + @TinyJit + def add(a): return (a+1).realize() + with self.assertRaises(AssertionError): + for i in range(1,5): + # a has an offset that the kernel doesn't know about + a = Tensor.randn(10, 10).realize()[:, i:i+2] + add(a) + + def test_jit_duplicate_fail(self): + # the jit doesn't support duplicate arguments + @TinyJit + def add(a, b): return (a+b).realize() + a = Tensor.randn(10, 10) + with self.assertRaises(AssertionError): + add(a, a) + + def test_kwargs_jit(self): + @TinyJit + def add_kwargs(first, second): return (first+second).realize() + for _ in range(5): + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + c = add_kwargs(first=a, second=b) + np.testing.assert_allclose(c.numpy(), a.numpy()+b.numpy(), atol=1e-4, rtol=1e-5) + assert len(add_kwargs.jit_cache) == 1 + + def test_array_jit(self): + @TinyJit + def add_array(a, arr): return (a+arr[0]).realize() + for i in range(5): + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + a.realize(), b.realize() + c = add_array(a, [b]) + if i >= 2: + # should fail once jitted since jit can't handle arrays + np.testing.assert_allclose(np.any(np.not_equal(c.numpy(),a.numpy()+b.numpy())), True, atol=1e-4, rtol=1e-5) + else: + np.testing.assert_allclose(c.numpy(), a.numpy()+b.numpy(), atol=1e-4, rtol=1e-5) + assert len(add_array.jit_cache) == 1 + + def test_method_jit(self): + class Fun: + def __init__(self): + self.a = Tensor.randn(10, 10) + @TinyJit + def __call__(self, b:Tensor) -> Tensor: + return (self.a+b).realize() + fun = Fun() + for _ in range(5): + b = Tensor.randn(10, 10) + c = fun(b) + np.testing.assert_allclose(c.numpy(), fun.a.numpy()+b.numpy(), atol=1e-4, rtol=1e-5) + assert len(fun.__call__.func.__self__.jit_cache) == 1 + + def test_jit_size1_input(self): + @TinyJit + def f(a, b): return (a+b).realize() + a = Tensor([1, 2, 3]) + for i in range(5): + np.testing.assert_allclose(f(a, Tensor([i])).numpy(), (a+i).numpy(), atol=1e-4, rtol=1e-5) + assert len(f.jit_cache) == 1 + + def test_jit_output_non_tensor_fail(self): + @TinyJit + def f(a, b, i): return (a+b).realize(), i + output1, output2 = [], [] + expect1, expect2 = [], [] + for i in range(5): + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + o1, o2 = f(a, b, i) + output1.append(o1.numpy().copy()) + output2.append(o2) + expect1.append(a.numpy().copy()+b.numpy().copy()) + expect2.append(i) + np.testing.assert_allclose(output1, expect1, atol=1e-4, rtol=1e-5) + # the jit only works with Tensor outputs + assert output2 != expect2 + assert len(f.jit_cache) == 1 + + @unittest.skip("random isn't working in JIT") + def test_jit_random_regen(self): + def f(a, b): + rn = Tensor.randn(*a.shape) + return ((a+b)*rn).realize() + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + + Tensor._seed = 1234 + jf = TinyJit(f) + res = set() + for _ in range(5): + o1 = jf(a, b) + res.add(o1.numpy()[0][0]) + assert len(res) == 5, "All values should be different, rand works in jit." + + Tensor._seed = 1234 + jf2 = TinyJit(f) + res2 = set() + for _ in range(5): + o1 = jf2(a, b) + res2.add(o1.numpy()[0][0]) + assert len(res2) == 5, "All values should be different, rand works in jit." + assert res == res2, "Jit rand is not reproducible with the same seed" + + Tensor._seed = 3421 + jf3 = TinyJit(f) + res3 = set() + for _ in range(5): + o1 = jf3(a, b) + res3.add(o1.numpy()[0][0]) + assert len(res3) == 5, "All values should be different, rand works in jit." + assert res3 != res2, "Jit rand is diff with diff seeds" + + def test_jit_realization_and_sampling(self): + w = Tensor.eye(5) + + @TinyJit + def foo (x): return w.dot(x).realize() + + arg = [ + Tensor([1,2,3,4,5]), + Tensor([1,3,3,4,6]), + Tensor([1,2,5,4,7]), + Tensor([0,2,3,1,0]), + ] + + Y = [foo(e).numpy() for e in arg] + + foo(Tensor([7,7,7,7,7])) + want = [[1., 2., 3., 4., 5.], + [1., 3., 3., 4., 6.], + [1., 2., 5., 4., 7.], + [0., 2., 3., 1., 0.]] + np.testing.assert_allclose(want, Y) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/test_kernel_cache.py b/tinygrad_repo/test/test_kernel_cache.py new file mode 100644 index 0000000..82a38f5 --- /dev/null +++ b/tinygrad_repo/test/test_kernel_cache.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +import unittest +import secrets +import string +from tinygrad.tensor import Tensor +from tinygrad.ops import Device +from tinygrad.helpers import diskcache + +def generate_random_string(length=16): + alphabet = string.ascii_letters + string.digits + return ''.join(secrets.choice(alphabet) for _ in range(length)) + +compile_call_count = 0 + +@diskcache +def helper_test_compile(prg:str) -> bytes: + global compile_call_count + compile_call_count += 1 + return prg.encode() + +class TestKernelCache(unittest.TestCase): + def test_compile_cache(self): + prg1 = generate_random_string(64) + "a" + prg2 = generate_random_string(64) + "b" + cold_compile_res = helper_test_compile(prg1) + warm_compile_res = helper_test_compile(prg1) + assert cold_compile_res == warm_compile_res == prg1.encode() + assert compile_call_count == 1 + + prg2_res = helper_test_compile(prg2) + assert prg2_res == prg2.encode() + assert compile_call_count == 2 + + def test_kernel_cache_in_action(self): + if Device.DEFAULT not in ["CLANG"]: + self.skipTest("No custom kernel cache is implemented") + + a = Tensor.rand(4,4) + b = Tensor.rand(4,4) + x = a + b + x.realize() + + orig_compile_func = Device['CLANG'].compiler + Device['CLANG'].compiler = None # making it not callable + + a1 = Tensor.rand(4,4) + b1 = Tensor.rand(4,4) + x1 = a1 + b1 + x1.realize() # Same kernel should be from cache. + + Device['CLANG'].compiler = orig_compile_func + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/test_lazybuffer.py b/tinygrad_repo/test/test_lazybuffer.py new file mode 100644 index 0000000..c8209ca --- /dev/null +++ b/tinygrad_repo/test/test_lazybuffer.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +import numpy as np +import unittest +from tinygrad.lazy import LazyBuffer +from tinygrad.ops import Device +from tinygrad.tensor import Tensor +from tinygrad.shape.symbolic import Variable +from tinygrad.jit import CacheCollector + +class TestLazyBuffer(unittest.TestCase): + def test_fromcpu_buffer_sharing(self): + a = np.arange(8) + assert LazyBuffer.fromCPU(a).realized._buf is a + + def test_fromcpu_shape_tracker(self): + def helper(a: np.ndarray): + print(a.shape, a.strides, a.flags.c_contiguous) + b = LazyBuffer.fromCPU(a) + #assert b.st.contiguous == a.flags.c_contiguous + assert b.st.shape == a.shape + np.testing.assert_equal(a, Tensor(b).numpy()) + + for ndims in range(1, 4): + a = np.random.randn(*(4,)*ndims).astype(np.float32) + for stride in [-2, 1, 2]: + for start in [0, 1]: + helper(a[(slice(start, None, stride),)*ndims]) + + def test_shuffle_pad_ops_cmpeq(self): + y = Tensor([1]).cat(Tensor([1]) == 0).numpy() + z = Tensor([1, 0]).numpy() + np.testing.assert_allclose(y, z) + + def test_shuffle_pad_ops_div(self): + y = Tensor([1]).cat(Tensor([1]).div(Tensor([2.0]))).numpy() + z = Tensor([1, 0.5]).numpy() + np.testing.assert_allclose(y, z) + + def test_shuffle_pad_ops_log(self): + y = Tensor([1]).cat(Tensor([1]).log()).numpy() + z = Tensor([1, 0]).numpy() + np.testing.assert_allclose(y, z) + + def test_shuffle_pad_ops_exp(self): + y = Tensor([1]).cat(Tensor([1]).exp()).numpy() + z = Tensor([1, np.e]).numpy() + np.testing.assert_allclose(y, z) + + @unittest.skipUnless(Device.DEFAULT in ["METAL", "CUDA", "GPU"], "Only GPU backends supports cache") + def test_children_count(self): + a = Tensor.ones(8,8,8) + d1 = a.sum((0)) + d2 = a.sum((0)).reshape(32,2) + assert len(d1.lazydata.op.src[0].children) == 1 + in1 = d1.reshape(16,4) + d3 = in1.reshape(8,8) + assert len(d3.lazydata.op.src[0].children) == 2 + + CacheCollector.start() + l = Tensor.ones(8,8) + r = Tensor.ones(8,8) + dd = d1 + l + dd.realize() + de = d3 + r + de.realize() + cache = CacheCollector.finish() + assert len(cache) == 3 + assert cache[0][0].name.startswith("r_") # Reduce should not merged 2 times. + assert cache[1][0].name.startswith("E_") + assert cache[2][0].name.startswith("E_") + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/test_lazyop.py b/tinygrad_repo/test/test_lazyop.py new file mode 100644 index 0000000..271b91c --- /dev/null +++ b/tinygrad_repo/test/test_lazyop.py @@ -0,0 +1,21 @@ +import unittest +from tinygrad.tensor import Tensor + +# stuff needed to unpack a kernel +from tinygrad.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer +from tinygrad.helpers import dtypes +from tinygrad.shape.shapetracker import ShapeTracker +from tinygrad.shape.view import View +from tinygrad.shape.symbolic import Variable +inf, nan = float('inf'), float('nan') + +class TestLazyOp(unittest.TestCase): + def test_lazyop_str(self): + t = Tensor.rand(10) + Tensor.rand(10) + s = t.lazydata.schedule() + ast = s[-1].ast + ast_remade = eval(str(ast)) + self.assertEqual(ast, ast_remade) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_linearizer.py b/tinygrad_repo/test/test_linearizer.py new file mode 100644 index 0000000..dd4e897 --- /dev/null +++ b/tinygrad_repo/test/test_linearizer.py @@ -0,0 +1,492 @@ +import numpy as np +import unittest, os + +from tinygrad.codegen.kernel import Opt, OptOps, tensor_cores +from tinygrad.codegen.linearizer import Linearizer, UOps +from tinygrad.ops import Compiled, Device, LoadOps +from tinygrad.tensor import Tensor +from tinygrad.jit import CacheCollector +from tinygrad.realize import run_schedule +from tinygrad.helpers import dtypes, prod + +class TestLinearizer(unittest.TestCase): + def test_arg_dedup(self): + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Only Compiled supports cache") + a, b = Tensor.randn(4), Tensor.randn(4) + np_a, np_b = a.numpy(), b.numpy() + CacheCollector.start() + c = ((a.shrink(((0, 2),)) - a.shrink(((2, 4),))) - (b.shrink(((0, 2),)) - b.shrink(((2, 4),)))).realize() + rawbufs = CacheCollector.finish()[0][1] + assert len(rawbufs) == 3 and set(rawbufs[1:]) == {a.lazydata.realized, b.lazydata.realized} + np_c = (np_a[:2] - np_a[2:]) - (np_b[:2] - np_b[2:]) + np.testing.assert_allclose(np_c, c.numpy(), atol=1e-4, rtol=1e-4) + + def test_load_dedup(self): + # for different leaves in the AST, the same loads may occur. + + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Only Compiled uses linearizer") + + a = Tensor.randn(4).realize() + # these are of size 3 to avoid float4 coalesce + r = a[:-1] + a[1:] + + k = Linearizer(r.lazydata.schedule()[-1].ast) + k.upcast() + k.linearize() + num_loads = len([uop for uop in k.uops if uop.uop == UOps.LOAD]) + assert num_loads <= 4, "more load uops than needed" + assert num_loads >= 4, "unexpected number of uops, maybe this test needs updating?" + + def test_upcast_cse(self): + # when upcasting, within a subtree, there may be common expressions. + + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Only Compiled uses linearizer") + + a, b = Tensor.randn(1).realize(), Tensor.randn(1).realize() + r = a.expand([2]) + b.expand([2]) + + k = Linearizer(r.lazydata.schedule()[-1].ast) + k.upcast() + k.linearize() + num_ops = len([uop for uop in k.uops if uop.uop == UOps.ALU]) + assert num_ops <= 1, "more alu uops than needed" + + def test_zero_fold(self): + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Only Compiled uses linearizer") + + a, b = Tensor.randn(1).realize(), Tensor.randn(1).realize() + r = Tensor.stack([a, b]) + + k = Linearizer(r.lazydata.schedule()[-1].ast) + k.upcast() + k.linearize() + num_ops = len([uop for uop in k.uops if uop.uop == UOps.ALU]) + assert num_ops == 0, "more alu uops than needed" + + @unittest.skip("constant folding not supported yet") + def test_constant_fold(self): + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Only Compiled uses linearizer") + + a, b = Tensor(2), Tensor(3) + r = a * b + + k = Linearizer(r.lazydata.schedule()[-1][0]) + k.linearize() + num_ops = len([uop for uop in k.uops if uop.uop in [UOps.LOAD, UOps.ALU]]) + assert num_ops <= 0, "more load or alu uops than needed" + + def test_tensor_cores(self): + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Only Compiled uses linearizer") + if Device.DEFAULT not in tensor_cores: + self.skipTest("No tensor cores for device") + + for tc in tensor_cores[Device.DEFAULT]: + if tc.arch is not None and tc.arch != os.uname().machine: continue + a, b = Tensor.rand(tc.dims[0], tc.dims[2], dtype=tc.dtype_in), Tensor.rand(tc.dims[2], tc.dims[1], dtype=tc.dtype_in) + np_a, np_b = a.numpy(), b.numpy() + if tc.dtype_out != tc.dtype_in: + r = (a.reshape(tc.dims[0], 1, tc.dims[2]) * b.permute(1,0).reshape(1, tc.dims[1], tc.dims[2])).cast(tc.dtype_out).sum(axis=2) + else: + r = a @ b + realized_ast, _ = helper_realized_ast(r) + k = Linearizer(realized_ast) + k.apply_tensor_cores(1) + k.linearize() + assert len([uop for uop in k.uops if uop.uop == UOps.WMMA]) == 1, "tensor core not triggered" + np_c = np_a @ np_b + np.testing.assert_allclose(np_c, r.numpy(), atol=5e-3, rtol=1e-4) + + def test_limit_dims_to_max_5d_global(self): + t = Tensor.rand(3, 4, 5, 6, 7).pad(((1, 1), (1, 1), (1, 1), (1, 1), (1, 1))) + 1 + sched = [si for si in t.lazydata.schedule() if si.ast.op not in LoadOps] + assert len(sched) == 1 + lin = Linearizer(sched[0].ast) + assert lin.full_shape[:lin.global_dims] == (5, 6, 7, 8, 9) + lin.limit_dims_to_max(global_max=[16, 16, 16], local_max=[16, 16, 16]) + +def helper_realized_ast(r:Tensor): + s = r.lazydata.schedule() + run_schedule(s[:-1]) # run all kernels except the last one + # now all input LazyBuffers buffers in s[-1] should be realized + output_buffer = Device[s[-1].out.device].buffer(prod((s if isinstance(s, int) else s.max for s in s[-1].out.shape)), s[-1].out.dtype, **s[-1].out._device_extra_args()) # allocate an output buffer + return s[-1].ast, [output_buffer] + [l.realized for l in s[-1].inputs] + +class TestFloat4(unittest.TestCase): + def setUp(self): + if not isinstance(Device[Device.DEFAULT], Compiled) or not Device[Device.DEFAULT].linearizer_opts.supports_float4: + self.skipTest("Device does not support float4") + + @staticmethod + def count_float4(k): + return (len([uop for uop in k.uops if uop.uop == UOps.LOAD and uop.dtype == dtypes._float4]), + len([uop for uop in k.uops if uop.uop == UOps.STORE and len(uop.vin) == 3 and uop.vin[2].dtype == dtypes._float4])) + + # TODO: express opts below as auto opts + + def test_float4_basic(self): + a = Tensor.rand(2, 8).realize() + b = Tensor.rand(2, 8).realize() + c = a + b + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.hand_coded_optimizations() + k.linearize() + + assert TestFloat4.count_float4(k) == (2, 1) + + def test_float4_multidim(self): + a = Tensor.rand(2, 8).realize() + b = Tensor.rand(2, 8).realize() + c = a + b + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.shift_to(0, 4) # float4 dimension + k.shift_to(0, 2, insert_before=k.shape_len-1) + k.upcast() + k.upcast() + k.local_dims += 1 + k.linearize() + + assert TestFloat4.count_float4(k) == (4, 2) + + def test_float4_unaligned_load(self): + a = Tensor.rand(9).realize().shrink(((1, 9),)) + b = Tensor.rand(9).realize().shrink(((1, 9),)) + c = a + b + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.hand_coded_optimizations() # implicit trigger float4 dim + k.linearize() + + assert TestFloat4.count_float4(k) == (0, 1) + + def test_float4_multidim_unaligned_load(self): + a = Tensor.rand(2, 9).realize().shrink(((0, 2), (1, 9),)) + b = Tensor.rand(2, 9).realize().shrink(((0, 2), (1, 9),)) + c = a + b + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.shift_to(len(k.full_unupcasted_shape)-1, 4) # manual trigger float4 dim + k.upcast() + k.shift_to(len(k.full_unupcasted_shape)-1, 2, insert_before=k.shape_len-1) + k.upcast() + k.local_dims += 1 + k.linearize() + + assert TestFloat4.count_float4(k) == (0, 2) + + def test_float4_sometimes_unaligned(self): + a = Tensor.rand(1, 1, 8).realize() + b = Tensor.rand(1, 1, 5).realize().shrink(((0, 1), (0, 1), (1, 5))) + c = a.conv2d(b) + # only the first and last conv dot products are aligned in a, and b is never aligned, so no + # float4 should be emitted (the reduce axis of size 4 is the float4 axis here) + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.upcast() + k.linearize() + + assert TestFloat4.count_float4(k) == (0, 0) + + def test_float4_multidim_sometimes_unaligned(self): + a = Tensor.rand(1, 1, 7).realize() + b = Tensor.rand(1, 1, 5).realize().shrink(((0, 1), (0, 1), (1, 5))) + c = a.conv2d(b) + # the first conv dot product is aligned in a. If we upcast the output and reduce + # dimension, then we could do float4 for only that one set of loads, but we currently + # don't. + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.upcast() + k.upcast() + k.linearize() + + assert TestFloat4.count_float4(k) == (0, 1) + + def test_float4_noncontiguous(self): + a = Tensor.rand(4, 2).realize() + b = Tensor.rand(4, 2).realize() + c = a + b + + # we will upcast the top axis of sz 4. they should not be coalesced into float4, + # since the top axis is not contiguous. + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.shift_to(0, 4, top=True) # top axes are float4 axes + k.upcast() + k.linearize() + + assert TestFloat4.count_float4(k) == (0, 0) + + def test_float4_expand(self): + a = Tensor.rand(9).realize().shrink(((1, 9),)) + b = Tensor.rand(2).realize().reshape((2, 1)).expand((2,4)).reshape((8,)) + c = a + b + + # we will upcast the top axis of sz 4. they should not be coalesced into float4, + # since the top axis is not contiguous. + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.shift_to(0, 4) # float4 axis + k.upcast() + k.linearize() + + assert TestFloat4.count_float4(k) == (0, 1) + + def test_float4_heterogeneous(self): + a = Tensor.rand(8).realize() + b = Tensor.rand(9).realize().shrink(((1, 9),)) + c = a + b + + # should float4 b but not a + + s = c.lazydata.schedule()[0] + k = Linearizer(s.ast) + k.shift_to(0, 4) # float4 axis + k.upcast() + k.linearize() + + assert TestFloat4.count_float4(k) == (1, 1) + +class TestHandCodedOpts(unittest.TestCase): + def setUp(self): + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Device does not use linearizer") + + def test_masked_upcast(self): + layer_1 = Tensor.cat(*[Tensor.rand(5) for _ in range(4)]) + layer_2 = Tensor.cat(layer_1.unsqueeze(0), Tensor.rand(6, 20)) + + s = layer_2.lazydata.schedule()[-1] + k = Linearizer(s.ast) + k.hand_coded_optimizations() + assert len(k.bufs) == 6 # make sure all ops are done in one kernel + # masked upcast should upcast masked axis of size 7 + # masked upcast should not upcast large (20) last axis + # float4/other hcopt shouldn't upcast last axis, since we already have 7 upcast, and the last axis is not very contiguous + assert k.upcasted == 1 and k.full_shape[-1] == 7 + + def test_masked_upcast_wino(self): + monster = Tensor.stack([Tensor.stack([Tensor.rand(16) for _ in range(6)]) for _ in range(6)]) + + s = monster.lazydata.schedule()[-1] + k = Linearizer(s.ast) + k.hand_coded_optimizations() + assert len(k.bufs) == 37 # make sure all ops are done in one kernel + # should upcast the two Tensor.stacks + assert k.upcasted >= 2 and k.full_shape[k.shape_len-k.upcasted:k.shape_len].count(6) == 2 + + def test_masked_upcast_wino_full(self): + old_wino = Tensor.wino + Tensor.wino = True + x,w = Tensor.rand(1,4,9,9, requires_grad=True).realize(), Tensor.rand(4,4,3,3, requires_grad=True).realize() + out = Tensor.conv2d(x,w, padding=1) + upcasts = [] + # collect upcasts of tile transform kernels + for i, si in enumerate(out.lazydata.schedule()): + k = Linearizer(si.ast) + k.hand_coded_optimizations() + if k.reduceop is not None: continue # not a tile transform kernel (there is a gemm reduce kernel) + if len(k.bufs) < 100: continue # not a tile transform kernel (there's a permute kernel at the end) + upcasts.append(tuple(k.full_shape[k.shape_len - k.upcasted:k.shape_len])) + assert len(upcasts) == 3 # 3 transformation matrices + assert upcasts.count((6, 6)) == 2 and upcasts.count((4, 4)) == 1 + + out.mean().backward() + for si in x.grad.lazydata.schedule() + w.grad.lazydata.schedule(): + k = Linearizer(si.ast) + k.hand_coded_optimizations() + k.linearize() + if len(k.bufs) < 20: continue # not a tile transform kernel + # heuristic number to make sure that at least some upcasts but not too many upcasts are being done + assert 6 <= prod(k.full_shape[k.shape_len - k.upcasted:k.shape_len]) <= 49 + + Tensor.wino = old_wino + + def test_masked_upcast_many(self): + layer_1 = Tensor.cat(Tensor.rand(3, 4), Tensor.rand(4, 4)) + layer_2 = Tensor.cat(layer_1.unsqueeze(0), Tensor.rand(6, 7, 4)) + layer_3 = Tensor.cat(layer_2.unsqueeze(0), Tensor.rand(6, 7, 7, 4)) + + s = layer_3.lazydata.schedule()[-1] + k = Linearizer(s.ast) + k.hand_coded_optimizations() + assert len(k.bufs) == 5 # make sure all ops are done in one kernel + # check that we don't do too many upcasts + assert prod(k.full_shape[k.shape_len-k.upcasted:k.shape_len]) <= 49 + +def helper_linearizer_opt(r:Tensor, opts=[], apply_tc=False): + wanna_output = None + realized_ast, real_bufs = helper_realized_ast(r) + + def check_opt(opts, create_k, to_prg): + k = create_k() + if apply_tc: + k.apply_tensor_cores(1, opts) + else: + for opt in opts: + k.apply_opt(opt) + prg = to_prg(k) + real_bufs[0] = real_bufs[0].fromCPU(np.zeros((real_bufs[0].size, ), dtype=real_bufs[0].dtype.np)) # Zero to check that all values are filled + prg.exec(real_bufs, force_wait=True) + np.testing.assert_allclose(wanna_output, real_bufs[0].toCPU(), atol=1e-4, rtol=1e-4) + + # Get baseline, which is not optimized at all. + k = Linearizer(realized_ast) + prg = Device[Device.DEFAULT].to_program(k) + prg.exec(real_bufs, force_wait=True) + wanna_output = real_bufs[0].toCPU().copy() + + # Check correctness of handcoded optimiztions. + k = Linearizer(realized_ast) + k.hand_coded_optimizations() + prg = Device[Device.DEFAULT].to_program(k) + real_bufs[0] = real_bufs[0].fromCPU(np.zeros((real_bufs[0].size, ), dtype=real_bufs[0].dtype.np)) # Zero to check that all values are filled + prg.exec(real_bufs, force_wait=True) + np.testing.assert_allclose(wanna_output, real_bufs[0].toCPU(), atol=1e-4, rtol=1e-4) + for x in opts: # Check custom transformations if any. + check_opt(x, lambda: Linearizer(realized_ast), Device[Device.DEFAULT].to_program) + +class TestLinearizerOpts(unittest.TestCase): + def test_local_and_grouped_reduce(self): + if not isinstance(Device[Device.DEFAULT], Compiled) or not Device[Device.DEFAULT].linearizer_opts.has_local or not Device[Device.DEFAULT].linearizer_opts.has_shared: + self.skipTest("Only Compiled uses linearizer with locals and shared") + + N = 128 + Tensor.manual_seed(1882) + a = Tensor.rand(4, 4, N, N) + b = Tensor.rand(4, 4, N) + r = (b.sqrt() + ((a+1).sum(axis=3).exp())) + helper_linearizer_opt(r, [ + [Opt(OptOps.LOCAL, 0, 2)], + [Opt(OptOps.LOCAL, 0, 8)], + [Opt(OptOps.LOCAL, 0, 16)], # Checking how it works with locals + [Opt(OptOps.GROUPTOP, 0, 2)], + [Opt(OptOps.GROUPTOP, 0, 32)], + [Opt(OptOps.GROUPTOP, 0, 64)], # Checking how it works with grouped reduce + [Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUPTOP, 0, 2)], + [Opt(OptOps.LOCAL, 0, 16), Opt(OptOps.GROUPTOP, 0, 16)], + [Opt(OptOps.LOCAL, 0, 32), Opt(OptOps.GROUPTOP, 0, 2)], + [Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUPTOP, 0, 64)], # Checking how it works with locals + grouped reduce + [Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.GROUPTOP, 0, 2), Opt(OptOps.UPCAST, 0, 8), Opt(OptOps.UNROLL, 1, 4)], # Checking how it works with locals + grouped reduce + upcasts + ]) + + def test_upcasts(self): + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Only Compiled uses linearizer") + + N = 16 + Tensor.manual_seed(1772) + a = Tensor.rand(N, N) + b = Tensor.rand(N, N) + r = (a+b).sqrt() * ((a+1).exp()) + helper_linearizer_opt(r, [ + [Opt(OptOps.UPCAST, 0, 2)], + [Opt(OptOps.UPCAST, 0, 4)], + [Opt(OptOps.UPCAST, 0, 8)], # Checking how it works with upcasts + ]) + + def test_full_upcast(self): + if not isinstance(Device[Device.DEFAULT], Compiled): + self.skipTest("Only Compiled uses linearizer") + + Tensor.manual_seed(1772) + a = Tensor.rand(4) + b = Tensor.rand(4) + r = (a+b).sqrt() * ((a+1).exp()) + helper_linearizer_opt(r, [ + [Opt(OptOps.UPCAST, 0, 4)], # Checking how it works with upcasts + ]) + + def test_matmul(self): + if not isinstance(Device[Device.DEFAULT], Compiled) or not Device[Device.DEFAULT].linearizer_opts.has_local or not Device[Device.DEFAULT].linearizer_opts.has_shared: + self.skipTest("Only Compiled uses linearizer with locals and shared") + + N = 128 + Tensor.manual_seed(1552) + a = Tensor.rand(N, N) + b = Tensor.rand(N, N) + r = a@b + helper_linearizer_opt(r, [ + [Opt(OptOps.UPCAST, 0, 2)], + [Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4)], # Checking how it works with upcasts + [Opt(OptOps.LOCAL, 0, 2)], + [Opt(OptOps.LOCAL, 1, 32)], + [Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 4)], + [Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 32)], + [Opt(OptOps.LOCAL, 0, 16), Opt(OptOps.LOCAL, 1, 8)], # Checking how it works with locals + [Opt(OptOps.GROUPTOP, 0, 2)], + [Opt(OptOps.GROUPTOP, 0, 32)], + [Opt(OptOps.GROUPTOP, 0, 32), Opt(OptOps.UNROLL, 0, 4)], # Checking how it works with grouped_reduce + [Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.LOCAL, 1, 2), Opt(OptOps.GROUPTOP, 0, 32)], + [Opt(OptOps.LOCAL, 0, 8), Opt(OptOps.GROUPTOP, 0, 32)], + [Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 0, 8), Opt(OptOps.GROUPTOP, 0, 4)], # Checking how it works with local+grouped_reduce + [Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 2)], # Checking all together + [Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.UPCAST, 0, 8)], # Full global upcast + local + ]) + + def test_double_reduce(self): + if not isinstance(Device[Device.DEFAULT], Compiled) or not Device[Device.DEFAULT].linearizer_opts.has_local or not Device[Device.DEFAULT].linearizer_opts.has_shared: + self.skipTest("Only Compiled uses linearizer with locals and shared") + + N = 128 + Tensor.manual_seed(1552) + a = Tensor.rand(8, N, 8, N) + r = a.sum(axis=(1,3)) + helper_linearizer_opt(r, [ + # openCL / GPU=1 is 256 max threads + [Opt(OptOps.GROUPTOP, 0, 2)], [Opt(OptOps.GROUPTOP, 0, 32)], + [Opt(OptOps.GROUPTOP, 1, 2)], [Opt(OptOps.GROUPTOP, 1, 32)], # Checking how it works with 1 grouped_reduce. + [Opt(OptOps.GROUPTOP, 0, 2), Opt(OptOps.GROUPTOP, 1, 2)], + [Opt(OptOps.GROUPTOP, 0, 16), Opt(OptOps.GROUPTOP, 1, 2)], + [Opt(OptOps.GROUPTOP, 0, 4), Opt(OptOps.GROUPTOP, 1, 64)], # Checking how it works with 2 grouped_reduces. + [Opt(OptOps.GROUPTOP, 0, 16), Opt(OptOps.GROUPTOP, 1, 2), Opt(OptOps.UNROLL, 0, 4)], + [Opt(OptOps.GROUPTOP, 0, 2), Opt(OptOps.GROUPTOP, 1, 32), Opt(OptOps.UNROLL, 2, 4)], # Checking how it works with 2 grouped_reduces + upcasts. + [Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 4), Opt(OptOps.GROUPTOP, 0, 4), Opt(OptOps.GROUPTOP, 1, 4)], + [Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 4), Opt(OptOps.GROUPTOP, 0, 2), Opt(OptOps.GROUPTOP, 1, 32), Opt(OptOps.UNROLL, 1, 4)], # Checking how it works with 2 grouped_reduces + upcasts + locals. + [Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.LOCAL, 1, 2), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.GROUPTOP, 1, 4), Opt(OptOps.UPCAST, 0, 2)], + [Opt(OptOps.LOCAL, 0, 2), Opt(OptOps.LOCAL, 1, 2), Opt(OptOps.GROUPTOP, 0, 8), Opt(OptOps.GROUPTOP, 1, 4), Opt(OptOps.UPCAST, 0, 2), Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.UNROLL, 1, 4)], # Checking how it works with 2 grouped_reduces + upcasts + locals. + [Opt(OptOps.LOCAL, 0, 4), Opt(OptOps.LOCAL, 1, 4), Opt(OptOps.GROUPTOP, 0, 4), Opt(OptOps.GROUPTOP, 1, 4), Opt(OptOps.UPCAST, 0, 2), Opt(OptOps.UPCAST, 0, 2)], # No globals + ]) + + def test_tensor_core_opts(self): + if not isinstance(Device[Device.DEFAULT], Compiled) or not Device[Device.DEFAULT].linearizer_opts.has_local: + self.skipTest("Only Compiled uses linearizer with locals") + if Device.DEFAULT not in tensor_cores: + self.skipTest("No tensor cores for device") + + N = 128 + Tensor.manual_seed(1552) + a = Tensor.rand(N, N) + b = Tensor.rand(N, N) + r = a@b + helper_linearizer_opt(r, [ + [Opt(OptOps.UPCAST, 0, 4)], + [Opt(OptOps.UPCAST, 1, 4)], + [Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4)], # check upcasts + [Opt(OptOps.UNROLL, 0, 2)], # check last unroll + [Opt(OptOps.LASTLOCAL, 0, 4)], # check last local + [Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UNROLL, 0, 2)], # check combo of last unroll and last local + [Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UNROLL, 0, 2)], + [Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UNROLL, 0, 4)], + [Opt(OptOps.UPCAST, 0, 4), Opt(OptOps.UPCAST, 1, 4), Opt(OptOps.UNROLL, 0, 4), Opt(OptOps.LASTLOCAL, 0, 2)], + # [Opt(OptOps.GROUP, 0, 2)] # doesn't work because group_for_reduce dims become early locals (conflicting with TC) + ], apply_tc=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_linearizer_failures.py b/tinygrad_repo/test/test_linearizer_failures.py new file mode 100644 index 0000000..71b20ab --- /dev/null +++ b/tinygrad_repo/test/test_linearizer_failures.py @@ -0,0 +1,21 @@ +import unittest +from tinygrad.codegen.linearizer import Linearizer +from tinygrad.ops import Device + +# stuff needed to unpack a kernel +from tinygrad.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer +from tinygrad.helpers import dtypes +from tinygrad.shape.shapetracker import ShapeTracker +from tinygrad.shape.view import View +from tinygrad.shape.symbolic import Variable +inf, nan = float('inf'), float('nan') + +class TestLinearizerFailures(unittest.TestCase): + @unittest.skip("this is currently failing") + def test_failure_1(self): + ast = LazyOp(op=BinaryOps.ADD, src=(LazyOp(op=BinaryOps.ADD, src=(LazyOp(op=ReduceOps.SUM, src=(LazyOp(op=BufferOps.MEM, src=(), arg=MemBuffer(idx=1, dtype=dtypes.float, st=ShapeTracker(views=(View(shape=(32, 16, 16), strides=(16, 1, 0), offset=0, mask=None, contiguous=False),)))),), arg=(32, 16, 1)), LazyOp(op=BufferOps.MEM, src=(), arg=MemBuffer(idx=2, dtype=dtypes.float, st=ShapeTracker(views=(View(shape=(32, 16, 1), strides=(0, 1, 0), offset=0, mask=None, contiguous=False),))))), arg=None), LazyOp(op=BufferOps.MEM, src=(), arg=MemBuffer(idx=1, dtype=dtypes.float, st=ShapeTracker(views=(View(shape=(32, 16, 1), strides=(16, 1, 0), offset=0, mask=None, contiguous=True),))))), arg=None) + lin = Linearizer(ast) + prg = Device[Device.DEFAULT].to_program(lin) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_net_speed.py b/tinygrad_repo/test/test_net_speed.py new file mode 100644 index 0000000..69675b1 --- /dev/null +++ b/tinygrad_repo/test/test_net_speed.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +import time +import cProfile +import pstats +import unittest +import torch +from tinygrad.tensor import Tensor, Device +import pytest + +pytestmark = [pytest.mark.exclude_cuda, pytest.mark.exclude_gpu, pytest.mark.exclude_clang] + +def start_profile(): + import time + pr = cProfile.Profile(timer=lambda: int(time.time()*1e9), timeunit=1e-6) + pr.enable() + return pr + +def stop_profile(pr, sort='cumtime', frac=0.2): + pr.disable() + ps = pstats.Stats(pr) + ps.strip_dirs() + ps.sort_stats(sort) + ps.print_stats(frac) + +class TestConvSpeed(unittest.TestCase): + + def test_mnist(self): + # https://keras.io/examples/vision/mnist_convnet/ + conv = 3 + inter_chan, out_chan = 32, 64 + + # ****** torch baseline ******* + + torch.backends.mkldnn.enabled = False + + conv = 3 + inter_chan, out_chan = 32, 64 + c1 = torch.randn(inter_chan,1,conv,conv, requires_grad=True) + c2 = torch.randn(out_chan,inter_chan,conv,conv, requires_grad=True) + l1 = torch.randn(out_chan*5*5, 10, requires_grad=True) + + c2d = torch.nn.functional.conv2d + mp = torch.nn.MaxPool2d((2,2)) + lsm = torch.nn.LogSoftmax(dim=1) + + cnt = 5 + fpt, bpt = 0.0, 0.0 + for i in range(cnt): + et0 = time.time() + x = torch.randn(128, 1, 28, 28, requires_grad=True) + x = mp(c2d(x,c1).relu()) + x = mp(c2d(x,c2).relu()) + x = x.reshape(x.shape[0], -1) + out = lsm(x.matmul(l1)) + out = out.mean() + et1 = time.time() + out.backward() + et2 = time.time() + fpt += (et1-et0) + bpt += (et2-et1) + + fpt_baseline = (fpt*1000/cnt) + bpt_baseline = (bpt*1000/cnt) + print("torch forward pass: %.3f ms" % fpt_baseline) + print("torch backward pass: %.3f ms" % bpt_baseline) + + # ****** tinygrad compare ******* + + c1 = Tensor(c1.detach().numpy(), requires_grad=True) + c2 = Tensor(c2.detach().numpy(), requires_grad=True) + l1 = Tensor(l1.detach().numpy(), requires_grad=True) + + cnt = 5 + fpt, bpt = 0.0, 0.0 + for i in range(1+cnt): + et0 = time.time() + x = Tensor.randn(128, 1, 28, 28) + x = x.conv2d(c1).relu().avg_pool2d() + x = x.conv2d(c2).relu().max_pool2d() + x = x.reshape(shape=(x.shape[0], -1)) + out = x.dot(l1).log_softmax() + out = out.mean() + out.realize() + et1 = time.time() + out.backward() + [x.grad.realize() for x in [c1, c2, l1]] + et2 = time.time() + if i == 0: + pr = start_profile() + else: + fpt += (et1-et0) + bpt += (et2-et1) + + stop_profile(pr, sort='time') + fpt = (fpt*1000/cnt) + bpt = (bpt*1000/cnt) + print("forward pass: %.3f ms, %.2fx off baseline %.3f ms" % (fpt, fpt/fpt_baseline, fpt_baseline)) + print("backward pass: %.3f ms, %.2fx off baseline %.3f ms" % (bpt, bpt/bpt_baseline, bpt_baseline)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_nn.py b/tinygrad_repo/test/test_nn.py new file mode 100644 index 0000000..7770864 --- /dev/null +++ b/tinygrad_repo/test/test_nn.py @@ -0,0 +1,339 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from extra.utils import WINDOWS +from tinygrad.helpers import CI +from tinygrad.jit import TinyJit +from tinygrad.tensor import Tensor, Device +from tinygrad.nn import BatchNorm2d, Conv1d, ConvTranspose1d, Conv2d, ConvTranspose2d, Linear, GroupNorm, LayerNorm, LayerNorm2d, Embedding, InstanceNorm +import torch +import pytest + +pytestmark = [pytest.mark.exclude_cuda] + +class TestNN(unittest.TestCase): + def test_sparse_cat_cross_entropy(self): + input = torch.randn(3, 5) + target = torch.empty(3, dtype=torch.long).random_(5) + loss_fun = torch.nn.CrossEntropyLoss(reduction='mean') + loss = loss_fun(input, target) + + input_tiny = Tensor(input.detach().numpy()) + taret_tiny = Tensor(target.detach().numpy()) + loss_tiny = input_tiny.sparse_categorical_crossentropy(taret_tiny) + + np.testing.assert_allclose(loss_tiny.numpy(), loss.detach().numpy(), atol=1e-5, rtol=1e-6) + + def test_batchnorm2d(self, training=False): + szs = [4, 8, 16, 32] + for sz in szs: + # create in tinygrad + Tensor.training = training + bn = BatchNorm2d(sz, eps=1e-5, track_running_stats=training) + bn.weight = Tensor.randn(sz) + bn.bias = Tensor.randn(sz) + bn.running_mean = Tensor.randn(sz) + bn.running_var = Tensor.randn(sz) + bn.running_var.numpy()[bn.running_var.numpy() < 0] = 0 + + # create in torch + with torch.no_grad(): + tbn = torch.nn.BatchNorm2d(sz).eval() + tbn.training = training + tbn.weight[:] = torch.tensor(bn.weight.numpy()) + tbn.bias[:] = torch.tensor(bn.bias.numpy()) + tbn.running_mean[:] = torch.tensor(bn.running_mean.numpy()) + tbn.running_var[:] = torch.tensor(bn.running_var.numpy()) + + np.testing.assert_allclose(bn.running_mean.numpy(), tbn.running_mean.detach().numpy(), rtol=1e-5, atol=1e-6) + np.testing.assert_allclose(bn.running_var.numpy(), tbn.running_var.detach().numpy(), rtol=1e-5, atol=1e-6) + + # trial + inn = Tensor.randn(2, sz, 3, 3) + + # in tinygrad + outt = bn(inn) + + # in torch + toutt = tbn(torch.tensor(inn.numpy())) + + # close + np.testing.assert_allclose(outt.numpy(), toutt.detach().numpy(), rtol=5e-4, atol=1e-6) + + np.testing.assert_allclose(bn.running_mean.numpy(), tbn.running_mean.detach().numpy(), rtol=1e-5, atol=1e-6) + + np.testing.assert_allclose(bn.running_var.numpy(), tbn.running_var.detach().numpy(), rtol=1e-5, atol=1e-6) + + def test_batchnorm2d_training(self): + self.test_batchnorm2d(True) + + def test_linear(self): + def _test_linear(x): + + # create in tinygrad + model = Linear(in_dim, out_dim) + z = model(x) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.Linear(in_dim, out_dim).eval() + torch_layer.weight[:] = torch.tensor(model.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(model.bias.numpy(), dtype=torch.float32) + torch_x = torch.tensor(x.numpy(), dtype=torch.float32) + torch_z = torch_layer(torch_x) + + # test + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-4, rtol=1e-5) + + BS, T, in_dim, out_dim = 4, 2, 8, 16 + _test_linear(Tensor.randn(BS, in_dim)) + _test_linear(Tensor.randn(BS, T, in_dim)) # test with more dims + + def test_conv1d(self): + BS, C1, W = 4, 16, 224//4 + C2, K, S, P = 64, 7, 2, 1 + + # create in tinygrad + layer = Conv1d(C1, C2, kernel_size=K, stride=S, padding=P) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.Conv1d(C1, C2, kernel_size=K, stride=S, padding=P).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.uniform(BS, C1, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-4, rtol=1e-5) + + def test_conv2d(self): + BS, C1, H, W = 4, 16, 224//4, 224//4 + C2, K, S, P = 64, 7, 2, 1 + + # create in tinygrad + layer = Conv2d(C1, C2, kernel_size=K, stride=S, padding=P) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.Conv2d(C1, C2, kernel_size=K, stride=S, padding=P).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.uniform(BS, C1, H, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-4, rtol=1e-5) + + @unittest.skipIf(Device.DEFAULT != "TORCH", "Takes too long to compile for Compiled backends") + def test_conv2d_winograd(self): + BS, C1, H, W = 2, 8, 16, 16 + C2, K, S, P = 8, 3, 1, 1 + + old_wino = Tensor.wino + Tensor.wino = True + + # create in tinygrad + layer = Conv2d(C1, C2, kernel_size=K, stride=S, padding=P) + layer.weight.requires_grad = True + layer.bias.requires_grad = True + + # create in torch + torch_layer = torch.nn.Conv2d(C1, C2, kernel_size=K, stride=S, padding=P).eval() + torch_layer.weight = torch.nn.Parameter(torch.tensor(layer.weight.numpy(), dtype=torch.float32)) + torch_layer.bias = torch.nn.Parameter(torch.tensor(layer.bias.numpy(), dtype=torch.float32)) + + # test + x = Tensor.uniform(BS, C1, H, W, requires_grad=True) + z = layer(x) + torch_x = torch.tensor(x.numpy(), requires_grad=True) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-4, rtol=1e-5) + + m = z.mean() + m.backward() + gw = layer.weight.grad.realize() + gb = layer.bias.grad.realize() + gx = x.grad.realize() + + torch_z.mean().backward() + np.testing.assert_allclose(gw.numpy(), torch_layer.weight.grad.numpy(), atol=5e-4, rtol=1e-5) + np.testing.assert_allclose(gb.numpy(), torch_layer.bias.grad.numpy(), atol=5e-4, rtol=1e-5) + np.testing.assert_allclose(gx.numpy(), torch_x.grad.numpy(), atol=5e-4, rtol=1e-5) + + Tensor.wino = old_wino + + @unittest.skipIf(CI and (WINDOWS or Device.DEFAULT == "WEBGPU"), "runs out of memory in CI") + def test_conv_transpose1d(self): + BS, C1, W = 4, 16, 224//4 + C2, K, S, P = 64, 7, 2, 1 + + # create in tinygrad + layer = ConvTranspose1d(C1, C2, kernel_size=K, stride=S, padding=P) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.ConvTranspose1d(C1, C2, kernel_size=K, stride=S, padding=P).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.uniform(BS, C1, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-4, rtol=1e-5) + + @unittest.skipIf(CI and (WINDOWS or Device.DEFAULT == "WEBGPU"), "runs out of memory in CI") + def test_conv_transpose2d(self): + BS, C1, H, W = 4, 16, 224//4, 224//4 + C2, K, S, P = 64, 7, 2, 1 + + # create in tinygrad + layer = ConvTranspose2d(C1, C2, kernel_size=K, stride=S, padding=P) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.ConvTranspose2d(C1, C2, kernel_size=K, stride=S, padding=P).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.uniform(BS, C1, H, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-4, rtol=1e-5) + + def test_groupnorm(self): + BS, H, W, C, G = 20, 10, 10, 6, 3 + + # create in tinygrad + layer = GroupNorm(G, C) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.GroupNorm(G, C).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.randn(BS, C, H, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3) + + def test_layernorm(self): + N, C, H, W = 20, 5, 10, 10 + + # create in tinygrad + layer = LayerNorm([H, W]) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.LayerNorm([H, W]).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.randn(N, C, H, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3) + + def test_layernorm_2d(self): + N, C, H, W = 20, 5, 10, 10 + + # create in tinygrad + layer = LayerNorm2d(C) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.LayerNorm([C]).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.randn(N, C, H, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x.permute(0,2,3,1)).permute(0,3,1,2) + + def test_instancenorm_2d(self): + N, C, H, W = 20, 5, 10, 10 + + # create in tinygrad + layer = InstanceNorm(C) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.InstanceNorm2d(C, affine=True).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.randn(N, C, H, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3) + + def test_instancenorm_3d(self): + N, C, D, H, W = 20, 5, 3, 10, 10 + + # create in tinygrad + layer = InstanceNorm(C) + + # create in torch + with torch.no_grad(): + torch_layer = torch.nn.InstanceNorm3d(C, affine=True).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + torch_layer.bias[:] = torch.tensor(layer.bias.numpy(), dtype=torch.float32) + + # test + x = Tensor.randn(N, C, D, H, W) + z = layer(x) + torch_x = torch.tensor(x.numpy()) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3) + + def test_embedding(self): + B, T, C, VS = 4, 10, 20, 28 + + # create in tinygrad + layer = Embedding(VS, C) + + with torch.no_grad(): + torch_layer = torch.nn.Embedding(VS, C).eval() + torch_layer.weight[:] = torch.tensor(layer.weight.numpy(), dtype=torch.float32) + + # test + x = Tensor(np.random.randint(0, VS, (B, T)).astype(np.float32)) + z = layer(x) + torch_x = torch.tensor(x.numpy().astype(np.int32)) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=1e-8, rtol=1e-8) + + # test with jit enabled + @TinyJit + def layer_jit(x): + return layer(x).realize() + + for _ in range(3): + x = Tensor(np.random.randint(0, VS, (B, T)).astype(np.float32)) + z = layer_jit(x) + torch_x = torch.tensor(x.numpy().astype(np.int32)) + torch_z = torch_layer(torch_x) + np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=1e-8, rtol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_ops.py b/tinygrad_repo/test/test_ops.py new file mode 100644 index 0000000..edac7ab --- /dev/null +++ b/tinygrad_repo/test/test_ops.py @@ -0,0 +1,1245 @@ +import torch +import time +import math +import numpy as np +import unittest +from tinygrad.tensor import Tensor +from tinygrad.helpers import getenv, IMAGE, DEBUG, CI, dtypes, Context, NOOPT +from tinygrad.ops import Device + +if CI: + import warnings + warnings.filterwarnings("ignore", message="Non-empty compiler output encountered") + +FORWARD_ONLY = getenv("FORWARD_ONLY", 0) +PRINT_TENSORS = getenv("PRINT_TENSORS", 0) +def helper_test_op(shps, torch_fxn, tinygrad_fxn=None, atol=1e-6, rtol=1e-3, grad_atol=1e-4, grad_rtol=1e-3, forward_only=False, vals=None, a=-0.5, b=3): + if tinygrad_fxn is None: tinygrad_fxn = torch_fxn + ts, tst = prepare_test_op(a, b, shps, vals, forward_only) + + st = time.monotonic() + out = torch_fxn(*ts) + torch_fp = time.monotonic() - st + + st = time.monotonic() + ret = tinygrad_fxn(*tst).realize() + tinygrad_fp = time.monotonic() - st + + def compare(s, x,y,atol,rtol): + if PRINT_TENSORS: print(s, x, y) + assert x.shape == y.shape, f"shape mismatch: tinygrad={x.shape} | torch={y.shape}" + try: + np.testing.assert_allclose(x,y, atol=atol, rtol=rtol) + except Exception: + raise Exception(f"{s} failed shape {x.shape}") + + if DEBUG >= 6: + np.set_printoptions(linewidth=200, suppress=True) + print(ret.numpy()) + print(out.detach().numpy()) + compare("forward pass", ret.numpy(), out.detach().numpy(), atol=atol, rtol=rtol) + + torch_fbp, tinygrad_fbp = np.nan, np.nan + if not forward_only and not FORWARD_ONLY: + st = time.monotonic() + (out+1).square().mean().backward() + torch_fbp = time.monotonic() - st + + st = time.monotonic() + (ret+1).square().mean().backward() + for tt in tst: tt.grad.realize() + tinygrad_fbp = time.monotonic() - st + + for i, (t, tt) in enumerate(zip(ts, tst)): + compare(f"backward pass tensor {i}", tt.grad.numpy(), t.grad.detach().numpy(), atol=grad_atol, rtol=grad_rtol) + + if not CI: print("\ntesting %40r torch/tinygrad fp: %.2f / %.2f ms bp: %.2f / %.2f ms " % (shps, torch_fp*1000, tinygrad_fp*1000, torch_fbp*1000, tinygrad_fbp*1000), end="") + +def prepare_test_op(a, b, shps, vals, forward_only=False): + torch.manual_seed(0) + np.random.seed(0) + if shps is None: ts = [torch.tensor(x, requires_grad=(not forward_only)) for x in vals] + else: ts = [torch.tensor((np.random.random(size=x) + a) * b, requires_grad=(not forward_only), dtype=torch.float32) for x in shps] + tst = [Tensor(x.detach().numpy(), requires_grad=(not forward_only and not FORWARD_ONLY)) for x in ts] + return ts, tst + +class TestOps(unittest.TestCase): + + def helper_test_exception(self, shps, torch_fxn, tinygrad_fxn, expected, exact=False, vals=None, a=-0.5, b=3): + ts, tst = prepare_test_op(a, b, shps, vals) + with self.assertRaises(expected) as torch_cm: + torch_fxn(*ts) + with self.assertRaises(expected) as tinygrad_cm: + tinygrad_fxn(*tst) + if exact: self.assertEqual(str(torch_cm.exception), str(tinygrad_cm.exception)) + if not CI: print("\ntesting %40r torch/tinygrad exception: %s / %s" % (shps, torch_cm.exception, tinygrad_cm.exception), end="") + + def test_full_like(self): + a = Tensor([[1,2,3],[4,5,6]]) + b = torch.tensor([[1,2,3],[4,5,6]]) + helper_test_op([], lambda: torch.full_like(b, 4), lambda: Tensor.full_like(a, 4), forward_only=True) + def test_full(self): + helper_test_op([], lambda: torch.full((45,65), 4), lambda: Tensor.full((45,65), 4), forward_only=True) + def test_zeros(self): + helper_test_op([], lambda: torch.zeros(45,65), lambda: Tensor.zeros(45,65), forward_only=True) + helper_test_op([], lambda: torch.zeros([45,65]), lambda: Tensor.zeros([45,65]), forward_only=True) + helper_test_op([], lambda: torch.zeros([]), lambda: Tensor.zeros([]), forward_only=True) + def test_zeros_like(self): + a = Tensor([[1,2,3],[4,5,6]]) + b = torch.tensor([[1,2,3],[4,5,6]]) + helper_test_op([], lambda: torch.zeros_like(b), lambda: Tensor.zeros_like(a), forward_only=True) + def test_empty_0(self): + helper_test_op([], lambda: torch.empty(45,65)*0/0, lambda: Tensor.empty(45,65)*0/0, forward_only=True) + def test_ones(self): + helper_test_op([], lambda: torch.ones(45,65), lambda: Tensor.ones(45,65), forward_only=True) + helper_test_op([], lambda: torch.ones([45,65]), lambda: Tensor.ones([45,65]), forward_only=True) + helper_test_op([], lambda: torch.ones([]), lambda: Tensor.ones([]), forward_only=True) + def test_ones_like(self): + a = Tensor([[1,2,3],[4,5,6]]) + b = torch.tensor([[1,2,3],[4,5,6]]) + helper_test_op([], lambda: torch.ones_like(b), lambda: Tensor.ones_like(a), forward_only=True) + def test_eye(self): + helper_test_op([], lambda: torch.eye(10), lambda: Tensor.eye(10), forward_only=True) + helper_test_op([], lambda: torch.eye(1), lambda: Tensor.eye(1), forward_only=True) + + def test_chunk(self): + tor = torch.arange(13).repeat(8, 1).chunk(6, 1) + ten = Tensor.arange(13).repeat((8, 1)).chunk(6, 1) + assert len(tor) == len(ten) + for i in range(len(tor)): + helper_test_op([], lambda: tor[i], lambda: ten[i], forward_only=True) + + tor = torch.arange(13).repeat(8, 1).chunk(6, 0) + ten = Tensor.arange(13).repeat((8, 1)).chunk(6, 0) + assert len(tor) == len(ten) + for i in range(len(tor)): + helper_test_op([], lambda: tor[i], lambda: ten[i], forward_only=True) + + tor = torch.arange(13).repeat(8, 1).chunk(3, -1) + ten = Tensor.arange(13).repeat((8, 1)).chunk(3, -1) + assert len(tor) == len(ten) + for i in range(len(tor)): + helper_test_op([], lambda: tor[i], lambda: ten[i], forward_only=True) + + tor = torch.arange(13).repeat(8, 3, 3).chunk(3, -2) + ten = Tensor.arange(13).repeat((8, 3, 3)).chunk(3, -2) + assert len(tor) == len(ten) + for i in range(len(tor)): + helper_test_op([], lambda: tor[i], lambda: ten[i], forward_only=True) + + def test_arange(self): + helper_test_op([], lambda: torch.arange(10), lambda: Tensor.arange(10), forward_only=True) + helper_test_op([], lambda: torch.arange(5, 10, 3), lambda: Tensor.arange(5, 10, 3), forward_only=True) + helper_test_op([], lambda: torch.arange(10, 5, -3), lambda: Tensor.arange(10, 5, -3), forward_only=True) + helper_test_op([], lambda: torch.arange(11, 5, -3), lambda: Tensor.arange(11, 5, -3), forward_only=True) + def test_arange_simple(self): + helper_test_op([], lambda: torch.arange(10), lambda: Tensor.arange(10), forward_only=True) + def test_arange_big(self): + helper_test_op([], lambda: torch.arange(256), lambda: Tensor.arange(256), forward_only=True) + + def test_where(self): + helper_test_op( + [(100,)], + lambda x: torch.where(x > 0.5, 4, 2), + lambda x: (x > 0.5).where(4, 2), forward_only=True) + + for shps in [[(8,),(1,),(1,)], [(10,10),(10,),(10,)], [(100,)]*3, [(10,10)]*3]: + helper_test_op( + shps, + lambda x, a, b: torch.where(x > 0.5, a, b), + lambda x, a, b: (x > 0.5).where(a, b), forward_only=True) + + def test_where_permute(self): + helper_test_op( + [(5, 5)], + lambda x: torch.where(x > 0.5, 4, 2).permute((1, 0)), + lambda x: (x > 0.5).where(4, 2).permute((1, 0)), forward_only=True) + + def _test_cmp(self, fxn, reverse=True): + for shps in [[(3, 4, 5), (3, 4, 5)], [(3, 4, 5), (5,)], [(5,), (3, 4, 5)]]: + helper_test_op(shps, fxn, fxn, forward_only=True) + helper_test_op(None, fxn, fxn, forward_only=True, vals=[[0.,1,2], [2.,1,0]]) + helper_test_op(None, lambda x,y: fxn(x,2), lambda x,y: fxn(x,2), forward_only=True, vals=[[0.,1,2], [2.,1,0]]) + helper_test_op(None, fxn, fxn, forward_only=True, vals=[[True, True, False], [False,True,False]]) + if reverse: helper_test_op(None, lambda x,y: fxn(2,y), lambda x,y: fxn(2,y), forward_only=True, vals=[[0.,1,2], [2.,1,0]]) + + def test_cmp_eq(self): self._test_cmp(lambda x,y: x==y, reverse=False) + def test_cmp_gt(self): self._test_cmp(lambda x,y: x>y) + def test_cmp_ge(self): self._test_cmp(lambda x,y: x>=y) + def test_cmp_lt(self): self._test_cmp(lambda x,y: x0, "no 1d dot for images") + def test_dot_1d(self): + helper_test_op([(65), (65)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + helper_test_op([(65), (65,45)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + helper_test_op([(45,65), (65)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + helper_test_op([(32,45,65), (65)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + helper_test_op([(65), (32,65,45)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + self.helper_test_exception([(4), (1,2)], lambda x, y: x.matmul(y), Tensor.dot, expected=(RuntimeError, AssertionError)) + self.helper_test_exception([(2,1), (4)], lambda x, y: x.matmul(y), Tensor.dot, expected=(RuntimeError, AssertionError)) + self.helper_test_exception([(1), (4)], lambda x, y: x.matmul(y), Tensor.dot, expected=(RuntimeError, AssertionError)) + def test_dot(self): + helper_test_op([(45,65), (65,100)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + helper_test_op([(32,45,65), (32,65,100)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + self.helper_test_exception([(2, 4), (1, 3)], lambda x, y: x.matmul(y), Tensor.dot, expected=(RuntimeError, AssertionError)) + self.helper_test_exception([(2, 1), (4, 3)], lambda x, y: x.matmul(y), Tensor.dot, expected=(RuntimeError, AssertionError)) + with self.assertRaises(AssertionError): + a = Tensor(3.14) + a.matmul(a) + def test_simple_cumsum(self): + helper_test_op([(1024)], lambda x: torch.cumsum(x, dim=0), lambda x: Tensor.cumsum(x, axis=0), atol=1e-6) + def test_cumsum(self): + helper_test_op([(20)], lambda x: torch.cumsum(x, dim=0), lambda x: Tensor.cumsum(x, axis=0), atol=1e-6) + helper_test_op([(20,30)], lambda x: torch.cumsum(x, dim=0), lambda x: Tensor.cumsum(x, axis=0), atol=1e-6) + helper_test_op([(20,30)], lambda x: torch.cumsum(x, dim=1), lambda x: Tensor.cumsum(x, axis=1), atol=1e-6) + helper_test_op([(20,30,40)], lambda x: torch.cumsum(x, dim=2), lambda x: Tensor.cumsum(x, axis=2), atol=1e-6) + helper_test_op([(20,30,40)], lambda x: torch.cumsum(x, dim=-1), lambda x: Tensor.cumsum(x, axis=-1), atol=1e-6) + + def test_argmax(self): + self.assertEqual(torch.Tensor([2,2]).argmax().numpy(), Tensor([2,2]).argmax().numpy()) # check if returns first index for same max + helper_test_op([(10,20)], lambda x: x.argmax(), lambda x: x.argmax(), forward_only=True) + helper_test_op([(10,20)], lambda x: x.argmax(0, False), lambda x: x.argmax(0, False), forward_only=True) + helper_test_op([(10,20)], lambda x: x.argmax(1, False), lambda x: x.argmax(1, False), forward_only=True) + helper_test_op([(10,20)], lambda x: x.argmax(1, True), lambda x: x.argmax(1, True), forward_only=True) + def test_argmin(self): + self.assertEqual(torch.Tensor([2, 2]).argmin().numpy(), Tensor([2, 2]).argmin().numpy()) + helper_test_op([(10,20)], lambda x: x.argmin(), lambda x: x.argmin(), forward_only=True) + helper_test_op([(10,20)], lambda x: x.argmin(0, False), lambda x: x.argmin(0, False), forward_only=True) + helper_test_op([(10,20)], lambda x: x.argmin(1, False), lambda x: x.argmin(1, False), forward_only=True) + helper_test_op([(10,20)], lambda x: x.argmin(1, True), lambda x: x.argmin(1, True), forward_only=True) + + def test_matmul_simple(self): + helper_test_op([(4), (4,4)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + def test_matmul(self): + helper_test_op([(64), (64,99)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + + @unittest.skipIf(IMAGE>0, "no batched matmul on images") + def test_matmul_batched(self): + helper_test_op([(3), (1,3,3,5)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + + @unittest.skipIf(IMAGE>0, "no batched matmul on images") + def test_matmul_batched_vector(self): + helper_test_op([(4,3), (1,3,3,5)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4) + def test_small_gemm(self): + helper_test_op([(8,8), (8,8)], lambda x,y: x.matmul(y), lambda x,y: x@y, atol=1e-3) + def test_small_gemm_eye(self): + helper_test_op(None, lambda x,y: x.matmul(y), lambda x,y: x@y, atol=1e-3, vals=[np.eye(8).astype(np.float32), np.eye(8).astype(np.float32)]) + def test_gemm(self): + helper_test_op([(64,64), (64,64)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-3) + def test_big_gemm(self): + helper_test_op([(256,256), (256,256)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-3) + def test_broadcastdot(self): + helper_test_op([(10,45,65), (65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4) + with self.assertRaises(AssertionError): + a = Tensor(3.14) + b = Tensor.ones(3,3) + a @ b + def test_multidot(self): + helper_test_op([(10,45,65), (10,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4) + helper_test_op([(3,3,45,65), (3,3,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4) + def test_sum_simple(self): + helper_test_op(None, lambda x: x.sum(), Tensor.sum, vals=[[1.,1.]]) + def test_sum_full(self): + helper_test_op([(16384)], lambda x: x.sum(), lambda x: x.sum()) + def test_sum_small_full(self): + helper_test_op([(45,5)], lambda x: x.sum(), Tensor.sum) + def test_sum_relu(self): + helper_test_op([(3,4,5)], lambda x: x.relu().sum().relu(), lambda x: x.relu().sum().relu()) + def test_sum(self): + helper_test_op([(45,3)], lambda x: x.sum(), Tensor.sum) + helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=3), lambda x: Tensor.sum(x, axis=3)) + helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(1,3)), lambda x: Tensor.sum(x, axis=(1,3))) + helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(0,2)), lambda x: Tensor.sum(x, axis=(0,2))) + helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(1,2)), lambda x: Tensor.sum(x, axis=(1,2))) + helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=1), lambda x: Tensor.sum(x, axis=1)) + helper_test_op([()], lambda x: x.sum(), Tensor.sum) + def test_min(self): + helper_test_op([(3,3)], lambda x: x.min(), Tensor.min) + helper_test_op([(45,3)], lambda x: x.min(), Tensor.min) + helper_test_op([(45,3)], lambda x: x.min().mul(0.5), lambda x: Tensor.min(x).mul(0.5)) + helper_test_op([()], lambda x: x.min(), Tensor.min) + def test_max(self): + helper_test_op([(45,3)], lambda x: x.max(), Tensor.max) + helper_test_op([(45,3)], lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5)) + helper_test_op(None, lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5), + vals=[ + [[1.0,1.0,0.0,1.0]], + ]) + helper_test_op([(3,4,5,6)], lambda x: x.max(axis=1)[0], lambda x: Tensor.max(x, axis=1)) + helper_test_op([()], lambda x: x.max(), Tensor.max) + def test_mean(self): + helper_test_op([(3,4,5,6)], lambda x: x.mean()) + helper_test_op([()], lambda x: x.mean()) + def test_mean_axis(self): + helper_test_op([(3,4,5,6)], lambda x: x.mean(axis=(1,2)), lambda x: Tensor.mean(x, axis=(1,2))) + def test_std(self): + helper_test_op([(45, 65, 85)], lambda x: torch.std(x), lambda x: Tensor.std(x)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, dim=None, correction=0), lambda x: Tensor.std(x, correction=0)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, dim=None, correction=5), lambda x: Tensor.std(x, correction=5)) + def test_std_axis(self): + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, dim=0), lambda x: Tensor.std(x, axis=0)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, dim=2), lambda x: Tensor.std(x, axis=2)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, dim=[1, 2]), lambda x: Tensor.std(x, axis=[1, 2])) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, dim=None), lambda x: Tensor.std(x, axis=None)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, correction=0, dim=0), lambda x: Tensor.std(x, axis=0, correction=0)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, correction=0, dim=2), lambda x: Tensor.std(x, axis=2, correction=0)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, correction=0, dim=[1, 2]), lambda x: Tensor.std(x, axis=[1, 2], correction=0)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, correction=0, dim=None), lambda x: Tensor.std(x, axis=None, correction=0)) + def test_std_keepdim(self): + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, dim=None, keepdim=True), lambda x: Tensor.std(x, keepdim=True)) + helper_test_op([(45, 65, 85)], lambda x: torch.std(x, dim=0, keepdim=True, correction=0), lambda x: Tensor.std(x, keepdim=True, correction=0, axis=0)) + def test_log_softmax(self): + helper_test_op([(45,65)], lambda x: torch.nn.LogSoftmax(dim=1)(x), Tensor.log_softmax, atol=1e-7, grad_atol=1e-7) + helper_test_op([()], lambda x: torch.nn.LogSoftmax(dim=0)(x), Tensor.log_softmax, atol=1e-7, grad_atol=1e-7) + def test_log_softmax_other_axis(self): + helper_test_op([(10,10,10)], lambda x: x.log_softmax(0), lambda x: x.log_softmax(0), atol=1e-7, grad_atol=1e-7) + helper_test_op([(10,10,10)], lambda x: x.log_softmax(1), lambda x: x.log_softmax(1), atol=1e-7, grad_atol=1e-7) + helper_test_op([(10,10,10)], lambda x: x.log_softmax(2), lambda x: x.log_softmax(2), atol=1e-7, grad_atol=1e-7) + def test_tanh(self): + helper_test_op([(45,65)], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6) + helper_test_op([(45,65)], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6, a=-100) + helper_test_op([()], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6) + def test_hardtanh(self): + for val in range(10, 30, 5): + helper_test_op([(45,65)], lambda x: torch.nn.functional.hardtanh(x,-val, val), lambda x: x.hardtanh(-val, val), atol=1e-6, grad_atol=1e-6) + helper_test_op([()], lambda x: torch.nn.functional.hardtanh(x,-val, val), lambda x: x.hardtanh(-val, val), atol=1e-6, grad_atol=1e-6) + def test_topo_sort(self): + helper_test_op([(45,65)], lambda x: (x+x)*x, lambda x: x.add(x).mul(x), atol=1e-6, grad_atol=1e-6) + helper_test_op([()], lambda x: (x+x)*x, lambda x: x.add(x).mul(x), atol=1e-6, grad_atol=1e-6) + + def test_scalar_mul(self): + helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2) + helper_test_op([()], lambda x: x*2, lambda x: x*2) + def test_scalar_rmul(self): + helper_test_op([(45,65)], lambda x: 2*x, lambda x: 2*x) + helper_test_op([()], lambda x: 2*x, lambda x: 2*x) + def test_scalar_sub(self): + helper_test_op([(45,65)], lambda x: x-2, lambda x: x-2) + helper_test_op([()], lambda x: x-2, lambda x: x-2) + def test_scalar_rsub(self): + helper_test_op([(45,65)], lambda x: 2-x, lambda x: 2-x) + helper_test_op([()], lambda x: 2-x, lambda x: 2-x) + def test_flip_eye_crash(self): + helper_test_op([], lambda: (torch.eye(10)@torch.eye(10).flip(0)), + lambda: (Tensor.eye(10)@Tensor.eye(10).flip(0)), forward_only=True) + + @unittest.skipIf(Device.DEFAULT == "WEBGPU", "this test uses more than 8 bufs passing the WEBGPU limit") #TODO: remove after #1461 + def test_broadcast_full(self): + for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul), + (torch.div, Tensor.div)]: #, (torch.pow, Tensor.pow)]: + for shapes in [((5,13,24,16), (5,1,24,1)), ((1,3,1,7,1), (2,1,5,1,8))]: + with self.subTest(op=torch_op.__name__, shapes=shapes): + helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0) + + def test_broadcast_simple(self): + helper_test_op([(45,65), (45,1)], lambda x,y: x/y, lambda x,y: x/y) + helper_test_op([(45,65), ()], lambda x,y: x/y, lambda x,y: x/y) + + @unittest.skipIf(Device.DEFAULT == "WEBGPU", "this test uses more than 8 bufs passing the WEBGPU limit") #TODO: remove after #1461 + def test_broadcast_partial(self): + for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul), + (torch.div, Tensor.div)]: #, (torch.pow, Tensor.pow)]: + for shapes in [((1,32,32,32), (1,32,1,1)), ((5,13,24,16,2), (1,13,24,1,1)), + ((4,1), (4,5)), ((1,4), (5,4))]: + with self.subTest(op=torch_op.__name__, shapes=shapes): + # NOTE: ANE backwards? + helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0) + + def test_slice_in_bounds_1dim(self): + helper_test_op([(3)], lambda x: x[1:3], lambda x: x[1:3]) + helper_test_op([(3)], lambda x: x[0:2], lambda x: x[0:2]) + helper_test_op([(3)], lambda x: x[-2:2], lambda x: x[-2:2]) + + def test_slice_on_0dim_tensor(self): + helper_test_op([()], lambda x: x[None], lambda x: x[None]) + + with self.assertRaises(IndexError): + a = Tensor(3.14) + a[0] + + def test_slice_int_indexing(self): + helper_test_op([(3)], lambda x: x[1], lambda x: x[1]) + helper_test_op([(3)], lambda x: x[-2], lambda x: x[-2]) + helper_test_op([(10,10)], lambda x: x[1], lambda x: x[1]) + helper_test_op([(3,3,3)], lambda x: x[1,1,1], lambda x: x[1,1,1]) + + def test_slice_in_bounds_multidim(self): + helper_test_op([(3,3,3)], lambda x: x[1:2], lambda x: x[1:2]) + helper_test_op([(3,3,3)], lambda x: x[1:2, 2], lambda x: x[1:2, 2]) + helper_test_op([(3,3,3)], lambda x: x[1:2, 1:2], lambda x: x[1:2, 1:2]) + helper_test_op([(3,3,3)], lambda x: x[1:2, 1:2, 0:-1], lambda x: x[1:2, 1:2, 0:-1]) + + def test_slice_with_none(self): + helper_test_op([(3,3,3)], lambda x: x[None], lambda x: x[None]) + helper_test_op([(3,3,3)], lambda x: x[1:2, None], lambda x: x[1:2, None]) + helper_test_op([(3,3,3)], lambda x: x[1:2, None, 1:2], lambda x: x[1:2, None, 1:2]) + helper_test_op([(3,3,3)], lambda x: x[1:2, 1:2, None, -1], lambda x: x[1:2, 1:2, None, -1]) + + def test_slice_one_endpoint_out_of_bounds(self): + helper_test_op([(3,3,3)], lambda x: x[0:4], lambda x: x[0:4]) + helper_test_op([(3,3,3)], lambda x: x[-6:4], lambda x: x[-6:4]) + helper_test_op([(3,3,3)], lambda x: x[1:50], lambda x: x[1:50]) + helper_test_op([(3,3,3)], lambda x: x[1:50, 1:2, -1], lambda x: x[1:50, 1:2, -1]) + + def test_slice_stride_gt_one(self): + helper_test_op([(7,5,10)], lambda x: x[::2, ::3, ::4], lambda x: x[::2, ::3, ::4]) + helper_test_op([(7,5,10)], lambda x: x[1:5:2, ::3, ::4], lambda x: x[1:5:2, ::3, ::4]) + helper_test_op([(7,5,10)], lambda x: x[1:5:2, 3, ::4], lambda x: x[1:5:2, 3, ::4]) + helper_test_op([(7,5,10)], lambda x: x[1:5:2, None, None, 3, None, ::4], lambda x: x[1:5:2, None, None, 3, None, ::4]) + + def test_slice_negative_strides(self): + # Torch doesn't support slicing with negative steps + a = np.random.randn(10, 10, 10).astype(np.float32) + t = Tensor(a) + np.testing.assert_allclose(a[::-1], t[::-1].numpy()) + np.testing.assert_allclose(a[::-2], t[::-2].numpy()) + np.testing.assert_allclose(a[:, 2:0:-1], t[:, 2:0:-1].numpy()) + np.testing.assert_allclose(a[:, 2:0:-1, 3:1:-2], t[:, 2:0:-1, 3:1:-2].numpy()) + np.testing.assert_allclose(a[4:0:-3, 2:0:-1, -1:-5:-2], t[4:0:-3, 2:0:-1, -1:-5:-2].numpy()) + + @unittest.skip("No suppport for tensors with 0s in shape") + def test_slice_both_endpoints_out_of_bounds(self): + helper_test_op([(3,3,3)], lambda x: x[5:10], lambda x: x[5:10], forward_only=True) + helper_test_op([(3,3,3)], lambda x: x[-15:-7], lambda x: x[-15:-7], forward_only=True) + + @unittest.skip("No suppport for tensors with 0s in shape") + def test_slice_start_gt_end(self): + helper_test_op([(3,3,3)], lambda x: x[-2:2], lambda x: x[-2:2], forward_only=True) + helper_test_op([(3,3,3)], lambda x: x[-2:-5], lambda x: x[-2:-5], forward_only=True) + + @unittest.skip("No suppport for tensors with 0s in shape") + def test_slice_empty(self): + helper_test_op([(10,10)], lambda x: x[1:1], lambda x: x[1:1], forward_only=True) + + @unittest.skip("No suppport for tensors with 0s in shape") + def test_slice_zero_in_shape(self): + helper_test_op([(10,10)], lambda x: x[1:1], lambda x: x[1:1]) # x.shape = (0, 10) + helper_test_op([(3,3,3)], lambda x: x[-2:-5], lambda x: x[-2:-5]) # x.shape = (0, 3, 3) + + def test_slice_errors(self): + a = Tensor.ones(4, 3) + with self.assertRaises(IndexError): + a[1, 77, 77, 77] # IndexError: (finds too many indices before the out of bounds) + a[1, 77] # IndexError: (out of bounds). + a[0, -77] + a[..., ...] # IndexError: only single ellipsis + + def test_slice_ellipsis(self): + helper_test_op([(3,3,3,3)], lambda x: x[..., 0], lambda x: x[..., 0]) + helper_test_op([(3,3,3,3)], lambda x: x[0, ...], lambda x: x[0, ...]) + helper_test_op([(3,3,3,3)], lambda x: x[0, ..., 0], lambda x: x[0, ..., 0]) + helper_test_op([(3,3,3,3)], lambda x: x[0:3, ..., 2:3], lambda x: x[0:3, ..., 2:3]) + helper_test_op([(3,3,3,3)], lambda x: x[None, 0:3, ..., 0, None], lambda x: x[None, 0:3, ..., 0, None]) + + def test_pad2d(self): + helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)), lambda x: x.pad2d(padding=(1,2,3,4))) + helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (-1,2,-3,4)), lambda x: x.pad2d(padding=(-1,2,-3,4))) + helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4), value=5), lambda x: x.pad2d(padding=(1,2,3,4),value=5)) + helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (-1,2,-3,4), value=5), lambda x: x.pad2d(padding=(-1,2,-3,4),value=5)) + def test_pad(self): + helper_test_op([(3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)),lambda x: x.pad(((3,4),(1,2)))) + helper_test_op([(3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4), value=5), lambda x: x.pad(((3,4), (1,2)), value=5)) + helper_test_op([(3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4), value=float("inf")), lambda x: x.pad(((3,4), (1,2)), value=float("inf"))) + helper_test_op([(3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4), value=float("-inf")), lambda x: x.pad(((3,4), (1,2)), value=float("-inf"))) + + def test_transpose(self): + helper_test_op([(3,3,3)], lambda x: x.transpose(1,2), lambda x: x.transpose(1,2)) + helper_test_op([(3,3,3)], lambda x: x.transpose(0,2), lambda x: x.transpose(0,2)) + helper_test_op([(1,2,3,4)], lambda x: x.movedim((3,0,2,1),(0,1,2,3)), lambda x: x.permute(order=(3,0,2,1))) + helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.permute(order=(3,2,1,0))) + helper_test_op([()], lambda x: x.permute(()), lambda x: x.permute(())) + + def test_reshape(self): + helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6))) + helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6))) + helper_test_op([()], lambda x: torch.reshape(x, []), lambda x: x.reshape([])) + helper_test_op([(1,)], lambda x: torch.reshape(x, []), lambda x: x.reshape([])) + helper_test_op([()], lambda x: torch.reshape(x, [1]), lambda x: x.reshape([1])) + + with self.assertRaises(AssertionError): + x = Tensor.ones((4,3,6,6)) + x.reshape([]) + + def test_flip(self): + helper_test_op([(4,3,6,6)], lambda x: torch.flip(x, (0,)), lambda x: x.flip(axis=(0,))) + helper_test_op([(4,3,6,6)], lambda x: torch.flip(x, (0,1)), lambda x: x.flip(axis=(0,1))) + helper_test_op([(4,3,6,6)], lambda x: torch.flip(x, (0,1,3)), lambda x: x.flip(axis=(0,1,3))) + helper_test_op([(4,3,6,6)], lambda x: torch.flip(x, (3,)), lambda x: x.flip(axis=(3,))) + helper_test_op([(4,3,6,6)], lambda x: torch.flip(x, (0,1,3)).flip((0,)), lambda x: x.flip(axis=(0,1,3)).flip(0)) + helper_test_op([(4,3,6,6)], lambda x: torch.flip(x, (3,)), lambda x: x.flip(axis=(-1,))) + helper_test_op([()], lambda x: torch.flip(x, ()), lambda x: x.flip(axis=())) + helper_test_op([(1,)], lambda x: torch.flip(x, ()), lambda x: x.flip(axis=())) + helper_test_op([(4, 3, 6, 6)], lambda x: torch.flip(x, ()), lambda x: x.flip(axis=())) + + def test_squeeze(self): + helper_test_op([(1,3,6,6)], lambda x: torch.squeeze(x, 0), lambda x: x.squeeze(dim=0)) + helper_test_op([(4,3,1,6)], lambda x: torch.squeeze(x, 1), lambda x: x.squeeze(dim=1)) + helper_test_op([(4,3,6,6)], lambda x: torch.squeeze(x, 3), lambda x: x.squeeze(dim=3)) + self.helper_test_exception([(4,3,6,6)], lambda x: torch.squeeze(x, 50), lambda x: x.squeeze(dim=50), expected=IndexError, exact=True) + self.helper_test_exception([(4,3,6,6)], lambda x: torch.squeeze(x, -50), lambda x: x.squeeze(dim=-50), expected=IndexError, exact=True) + helper_test_op([(4,3,6,1)], lambda x: torch.squeeze(x, -1), lambda x: x.squeeze(dim=-1)) + helper_test_op([(4,3,6,6)], lambda x: torch.squeeze(x), lambda x: x.squeeze()) + helper_test_op([(1,3,6,6)], lambda x: torch.squeeze(x), lambda x: x.squeeze()) + helper_test_op([(2,3,1)], lambda x: torch.squeeze(x), lambda x: x.squeeze()) + helper_test_op([()], lambda x: torch.squeeze(x, -1), lambda x: x.squeeze(dim=-1)) + helper_test_op([()], lambda x: torch.squeeze(x, 0), lambda x: x.squeeze(dim=0)) + self.helper_test_exception([()], lambda x: torch.squeeze(x, 10), lambda x: x.squeeze(dim=10), expected=IndexError, exact=True) + helper_test_op([()], lambda x: torch.squeeze(x), lambda x: x.squeeze()) + + def test_unsqueeze(self): + helper_test_op([(4,3,6,6)], lambda x: torch.unsqueeze(x, 0), lambda x: x.unsqueeze(dim=0)) + helper_test_op([(4,3,6,6)], lambda x: torch.unsqueeze(x, 4), lambda x: x.unsqueeze(dim=4)) + helper_test_op([(4,3,6,6)], lambda x: torch.unsqueeze(x, -1), lambda x: x.unsqueeze(dim=-1)) + helper_test_op([(4,3,6,6)], lambda x: torch.unsqueeze(x, -3), lambda x: x.unsqueeze(dim=-3)) + helper_test_op([()], lambda x: torch.unsqueeze(x, 0), lambda x: x.unsqueeze(dim=0)) + + def test_flatten(self): + for axis in range(3): + helper_test_op([(4,3,6,6)], lambda x: torch.flatten(x, start_dim=axis), lambda x: x.flatten(axis)) + helper_test_op([()], lambda x: x.flatten(), lambda x: x.flatten()) + helper_test_op([(1,)], lambda x: x.flatten(), lambda x: x.flatten()) + + def test_detach(self): + helper_test_op([(4,3,6,6)], lambda x: x.detach(), lambda x: x.detach(), forward_only=True) + helper_test_op([()], lambda x: x.detach(), lambda x: x.detach(), forward_only=True) + + def test_expand(self): + arg = (4,3,2,6) + helper_test_op([(4,3,1,6)], lambda x: x.expand(arg), lambda x: x.expand(shape=arg)) + helper_test_op([()], lambda x: x.expand([]), lambda x: x.expand(shape=[])) + + @unittest.skip("very slow") + def test_sd_big_conv(self): + # internal shape (1, 1, 512, 62, 62, 512, 3, 3) overflows a int + helper_test_op([(1,256,64,64), (512,256,3,3)], + lambda x,w: torch.nn.functional.conv2d(x, w), + lambda x,w: x.conv2d(w), atol=1e-2) + + @unittest.skip("slow") + def test_large_bs_conv(self): + # large batch size can cause OpenCL image to exceed max image height on macOS + # (or cause the conv kernel to overflow short sampling coords) + helper_test_op([(4096,3,3,3), (1,3,3,3)], + lambda x,w: torch.nn.functional.conv2d(x, w), + lambda x,w: x.conv2d(w), atol=1e-4, rtol=1e-2) + + @unittest.skip("slow") + def test_large_ic_conv(self): + # large input channel count can cause OpenCL image to exceed max image width on macOS + helper_test_op([(1,2048,3,3), (1,2048,3,3)], + lambda x,w: torch.nn.functional.conv2d(x, w), + lambda x,w: x.conv2d(w), atol=1e-4) + + def test_biased_conv2d(self): + C = 8 + helper_test_op([(1,C,5,5), (C,C,1,1), (C,)], + lambda x,w,b: torch.nn.functional.conv2d(torch.nn.functional.conv2d(x,w,b).relu(),w,b), + lambda x,w,b: Tensor.conv2d(x,w,b).relu().conv2d(w,b), atol=1e-4) + + def test_simple_conv2d(self): + helper_test_op([(1,4,9,9), (4,4,3,3)], + lambda x,w: torch.nn.functional.conv2d(x,w).relu(), + lambda x,w: Tensor.conv2d(x,w).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_simple_conv2d_noopt(self): + # useful with IMAGE enabled + with Context(NOOPT=1): + self.test_simple_conv2d() + + @unittest.skipIf(IMAGE>0, "no conv3d on images") + def test_simple_conv3d(self): + helper_test_op([(1,4,9,9,9), (4,4,3,3,3)], + lambda x,w: torch.nn.functional.conv3d(x,w).relu(), + lambda x,w: Tensor.conv2d(x,w).relu(), atol=1e-4, grad_rtol=1e-5) + + @unittest.skipIf(IMAGE>0, "no conv3d on images") + def test_padded_conv3d(self): + helper_test_op([(1,4,9,9,9), (4,4,3,3,3)], + lambda x,w: torch.nn.functional.conv3d(x,w,padding=1).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=[1,1,1,1,1,1]).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_simple_conv2d_m4(self): + helper_test_op([(1,16,18,18), (16,16,3,3)], + lambda x,w: torch.nn.functional.conv2d(x,w).relu(), + lambda x,w: Tensor.conv2d(x,w).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_simple_conv2d_1x1(self): + helper_test_op([(1,4,9,9), (4,4,1,1)], + lambda x,w: torch.nn.functional.conv2d(x,w).relu(), + lambda x,w: Tensor.conv2d(x,w).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_simple_conv2d_1x1_m4(self): + helper_test_op([(1,16,32,32), (16,16,1,1)], + lambda x,w: torch.nn.functional.conv2d(x,w).relu(), + lambda x,w: Tensor.conv2d(x,w).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_nested_conv2d(self): + helper_test_op([(1,32,9,9), (32,32,3,3), (32,32,3,3)], + lambda x,w1,w2: torch.nn.functional.conv2d(torch.nn.functional.conv2d(x,w1).relu(), w2).relu(), + lambda x,w1,w2: x.conv2d(w1).relu().conv2d(w2).relu(), atol=1e-4, grad_rtol=1e-5) + + # expect reduce nodes == 3 + def test_simple_conv2d_nhwc(self): + # weights (from tf): filter_height x filter_width x in_channels x out_channels + helper_test_op([(2,9,9,10), (3,3,10,20)], + lambda x,w: torch.nn.functional.conv2d(x.permute(0,3,1,2),w.permute(3,2,0,1)).relu(), + lambda x,w: Tensor.conv2d(x.permute(0,3,1,2),w.permute(3,2,0,1)).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_simple_conv2d_batched(self): + helper_test_op([(2,4,9,9), (4,4,3,3)], + lambda x,w: torch.nn.functional.conv2d(x,w).relu(), + lambda x,w: Tensor.conv2d(x,w).relu(), atol=1e-4, grad_rtol=1e-5) + + # conv transpose + + def test_simple_conv_transpose2d(self): + helper_test_op([(2,4,9,9), (4,4,3,3)], + lambda x,w: torch.nn.functional.conv_transpose2d(x,w).relu(), + lambda x,w: Tensor.conv_transpose2d(x,w).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_bias_conv_transpose2d(self): + helper_test_op([(2,4,9,9), (4,4,3,3), (4,)], + lambda x,w,b: torch.nn.functional.conv_transpose2d(x,w,b).relu(), + lambda x,w,b: Tensor.conv_transpose2d(x,w,b).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_grouped_conv_transpose2d(self): + helper_test_op([(2,4,9,9), (4,4,3,3)], + lambda x,w: torch.nn.functional.conv_transpose2d(x,w,groups=2).relu(), + lambda x,w: Tensor.conv_transpose2d(x,w,groups=2).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_padded_conv_transpose2d(self): + for padding in [(1,2), (2,1), 2, 1, 0]: + helper_test_op([(2,4,9,9), (4,4,3,3)], + lambda x,w: torch.nn.functional.conv_transpose2d(x,w,padding=padding).relu(), + lambda x,w: Tensor.conv_transpose2d(x,w,padding=padding).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_dilated_conv_transpose2d(self): + for dilation in [(1,2), (2,1), 2, 1]: + helper_test_op([(2,4,9,9), (4,4,3,3)], + lambda x,w: torch.nn.functional.conv_transpose2d(x,w,dilation=dilation).relu(), + lambda x,w: Tensor.conv_transpose2d(x,w,dilation=dilation).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_strided_conv_transpose2d(self): + for stride in [(2,1), (1,2), 1]: + helper_test_op([(2,4,4,5), (4,4,3,3)], + lambda x,w: torch.nn.functional.conv_transpose2d(x,w, stride=stride).relu(), + lambda x,w: Tensor.conv_transpose2d(x,w,stride=stride).relu(), atol=1e-4, grad_rtol=1e-5) + + @unittest.skipIf(Device.DEFAULT == "METAL" and CI, "broken in METAL CI") + def test_output_padded_conv_transpose2d(self): + for output_padding, stride in [((1,1), (2,3)), ((2,1), (3,2))]: + helper_test_op([(2,4,6,5), (4,4,3,3),(4,)], + lambda x,w,b: torch.nn.functional.conv_transpose2d(x,w,b,output_padding=output_padding,stride=stride).relu(), + lambda x,w,b: Tensor.conv_transpose2d(x,w,b,output_padding=output_padding,stride=stride).relu(), atol=1e-4, grad_rtol=1e-5) + + @unittest.skipIf(IMAGE>0, "no conv3d on images") + def test_simple_conv_transpose3d(self): + helper_test_op([(2,4,9,9,9), (4,4,3,3,3)], + lambda x,w: torch.nn.functional.conv_transpose3d(x,w).relu(), + lambda x,w: Tensor.conv_transpose2d(x,w).relu(), atol=1e-4, grad_rtol=1e-5) + + @unittest.skipIf((IMAGE>0), "no conv1d on images") + def test_conv1d(self): + for bs in [1,8]: + for cin in [1,3]: + for H in [1,2,5]: + for groups in [1,3] if cin == 3 and H == 5 else [1]: + with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H): + helper_test_op([(bs,cin,11), (6,cin//groups,H)], + lambda x,w: torch.nn.functional.conv1d(x,w,groups=groups).relu(), + lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5) + + @unittest.skipIf(IMAGE>0, "no conv1d on images") + def test_simple_padding_conv1d(self): + bs = 6 + cin = 2 + groups = 1 + H = 5 + p = (1,1) + helper_test_op([(bs,cin,11), (6,cin//groups,H)], + lambda x,w: torch.nn.functional.conv1d(torch.nn.functional.pad(x, p),w).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=p).relu(), atol=1e-4) + + @unittest.skipIf(IMAGE>0, "no conv1d on images") + def test_strided_conv1d_simple(self): + bs, H = 2, 3 + helper_test_op([(bs,1,5), (1,1,H)], + lambda x,w: torch.nn.functional.conv1d(x,w,stride=2).relu(), + lambda x,w: Tensor.conv2d(x,w,stride=2).relu(), atol=1e-4) + + @unittest.skipIf(IMAGE>0, "no conv1d on images") + def test_asymmetric_padding_conv1d(self): + for p in [(0,1), (2,1), (2,0)]: + with self.subTest(padding := p): + for n in [3,4]: + for k in [2]: + helper_test_op([(1,1,n), (1,1,k)], + lambda x,w: torch.nn.functional.conv1d(torch.nn.functional.pad(x, p),w).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=p).relu(), atol=1e-4) + helper_test_op([(1,1,n), (1,1,k)], + lambda x,w: torch.nn.functional.conv1d(torch.nn.functional.pad(x, p),w).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=p).relu(), atol=1e-4) + + def _test_conv2d(self, bs=1, cin=1): + for H in [1,2,3]: + for W in [1,2,3,5]: + for groups in [1,3] if cin == 3 and H == 3 and W == 3 else [1]: + with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H, width=W): + helper_test_op([(bs,cin,11,7), (6,cin//groups,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(), + lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5) + def test_conv2d(self): self._test_conv2d(bs=1, cin=3) + def test_conv2d_bs_4_cin_3(self): self._test_conv2d(bs=4, cin=3) + def test_conv2d_bs_1_cin_1(self): self._test_conv2d(bs=1, cin=1) + def test_conv2d_bs_4_cin_1(self): self._test_conv2d(bs=4, cin=1) + + def test_large_input_conv2d(self): + bs = 4 + cin = 16 + groups = 1 + H = 5 + W = 2 + helper_test_op([(bs,cin,64,64), (6,cin//groups,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(), + # needed to relax tolerance on NVIDIA + lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-3, grad_rtol=1e-5) + + def test_simple_grouped_conv2d(self): + bs = 1 + groups = 2 + rcout = 1 + cin = 2 + helper_test_op([(bs,groups*cin,1,1), (groups*rcout,cin,1,1)], + lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(), + lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_medium_grouped_conv2d(self): + bs = 1 + groups = 2 + rcout = 2 + cin = 2 + helper_test_op([(bs,groups*cin,1,1), (groups*rcout,cin,1,1)], + lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(), + lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_depthwise_conv2d(self): + bs = 1 + groups = 32 + rcout = 1 + cin = 1 + helper_test_op([(bs,groups*cin,32,32), (groups*rcout,cin,1,1)], + lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(), + lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_grouped_conv2d(self): + bs = 4 + groups = 5 + rcout = 7 + cin = 3 + helper_test_op([(bs,groups*cin,5,5), (groups*rcout,cin,3,3)], + lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(), + lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_fancy_conv2d(self): + bs = 2 + cin = 3 + cout = 1 + groups = 3 + H,W = 3,3 + helper_test_op([(bs,cin,11,28), (groups*cout,cin//groups,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(), + lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5) + + def test_strided_conv2d_simple(self): + bs,H,W = 2,3,1 + helper_test_op([(bs,1,5,1), (1,1,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,stride=2).relu(), + lambda x,w: Tensor.conv2d(x,w,stride=2).relu(), atol=1e-4) + + def test_strided_conv2d(self): + bs = 4 + cin = 3 + H,W = 3,3 + with self.subTest(stride := 2): + helper_test_op([(bs,cin,11,28), (4,cin,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,stride=2).relu(), + lambda x,w: Tensor.conv2d(x,w,stride=stride).relu(), atol=1e-4) + with self.subTest(stride := (2,1)): + helper_test_op([(bs,cin,11,28), (4,cin,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,stride=stride).relu(), + lambda x,w: Tensor.conv2d(x,w,stride=(2,1)).relu(), atol=1e-4) + + def test_negative_padding_conv2d(self): + n,k = 10, 3 + helper_test_op([(1,1,n,n), (1,1,k,k)], + lambda x,w: torch.nn.functional.conv2d(x[:, :, 1:-1, 1:-1],w).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=-1).relu(), atol=1e-4) + helper_test_op([(1,1,n,n), (1,1,k,k)], + lambda x,w: torch.nn.functional.conv2d(x[:, :, 1:, 1:],w).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=(-1,0,-1,0)).relu(), atol=1e-4) + + def test_simple_padding_conv2d(self): + p = (1,1,1,1) + helper_test_op(None, + lambda x,w: torch.nn.functional.conv2d(torch.nn.functional.pad(x, p),w).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=p).relu(), atol=1e-4, vals=[[[[[2.,3.]]]], [[[[1.]]]]]) + + def test_asymmetric_padding_conv2d(self): + for p in [(0,1,0,1), (2,1,2,1), (2,0,2,1)]: + with self.subTest(padding := p): + for n in [3,4]: + for k in [2]: + helper_test_op([(1,1,n,n), (1,1,k,k)], + lambda x,w: torch.nn.functional.conv2d(torch.nn.functional.pad(x, p),w).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=p).relu(), atol=1e-4) + helper_test_op([(1,1,n,n), (1,1,k,k)], + lambda x,w: torch.nn.functional.conv2d(torch.nn.functional.pad(x, p),w).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=p).relu(), atol=1e-4) + + @unittest.skipIf(Device.DEFAULT == "METAL" and CI, "broken in METAL CI") + def test_padded_conv2d_p21(self): + bs,cin,H,W,padding = 4, 3, 3, 3, (2,1) + helper_test_op([(bs,cin,11,28), (4,cin,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,padding=padding).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=padding).relu(), atol=1e-4) + + @unittest.skipIf(Device.DEFAULT == "METAL" and CI, "broken in METAL CI") + def test_padded_conv2d_p22(self): + bs,cin,H,W,padding = 4, 3, 3, 3, (2,2) + helper_test_op([(bs,cin,11,28), (4,cin,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,padding=padding).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=padding).relu(), atol=1e-4) + + def test_padded_conv2d_1x1(self): + bs,cin,H,W,padding = 4, 3, 1, 1, 2 + helper_test_op([(bs,cin,11,28), (4,cin,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,padding=padding).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=padding).relu(), atol=1e-4) + + def test_padded_conv2d_bs1(self): + bs,cin,H,W,padding = 1, 3, 3, 3, 1 + helper_test_op([(bs,cin,11,28), (4,cin,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,padding=padding).relu(), + lambda x,w: Tensor.conv2d(x,w,padding=padding).relu(), atol=1e-4) + + def test_padding_add(self): + helper_test_op([(64,64), (60,60)], + lambda x,w: x+torch.nn.functional.pad(w, (2,2,2,2)), + lambda x,w: x+w.pad2d((2,2,2,2))) + + def test_dilated_conv2d(self): + bs = 4 + cin = 3 + H,W = 3,3 + for d in [2, (2,1)]: + with self.subTest(dilation := d): + helper_test_op([(bs,cin,11,28), (4,cin,H,W)], + lambda x,w: torch.nn.functional.conv2d(x,w,dilation=dilation).relu(), + lambda x,w: Tensor.conv2d(x,w,dilation=dilation).relu(), atol=1e-4) + + def test_maxpool2d_simple(self): + ksz = (2,2) + helper_test_op([(1,1,2,3)], + lambda x: torch.nn.functional.max_pool2d(x, kernel_size=ksz), + lambda x: Tensor.max_pool2d(x, kernel_size=ksz)) + + def test_maxpool2d(self): + for ksz in [(2,2), (3,3), 2, 3, (3,2), (5,5), (5,1)]: + with self.subTest(kernel_size=ksz): + helper_test_op([(32,2,110,28)], + lambda x: torch.nn.functional.max_pool2d(x, kernel_size=ksz), + lambda x: Tensor.max_pool2d(x, kernel_size=ksz)) + + def test_maxpool2d_bigger_stride(self): + for stride in [(2,3), (3,2), 2, 3]: + with self.subTest(stride=stride): + helper_test_op([(32,2,110,28)], + lambda x: torch.nn.functional.max_pool2d(x, kernel_size=(2,2), stride=stride), + lambda x: Tensor.max_pool2d(x, kernel_size=(2,2), stride=stride)) + + @unittest.skipIf(Device.DEFAULT == "CUDA", "CUDA fails on this") + def test_maxpool2d_unit_stride(self): + helper_test_op([(32,2,110,28)], + lambda x: torch.nn.functional.max_pool2d(x, kernel_size=(5,5), stride=1), + lambda x: Tensor.max_pool2d(x, kernel_size=(5,5), stride=1)) + + def test_maxpool2d_smaller_stride(self): + for stride in [(2,3), (3,2), 2, 3]: + with self.subTest(stride=stride): + helper_test_op([(32,2,110,28)], + lambda x: torch.nn.functional.max_pool2d(x, kernel_size=(5,5), stride=stride), + lambda x: Tensor.max_pool2d(x, kernel_size=(5,5), stride=stride)) + + def test_maxpool2d_dilation(self): + for dilation in [(2, 3), (3, 2), 2, 3]: + helper_test_op([(32,2,110,28)], + lambda x: torch.nn.functional.max_pool2d(x, kernel_size=(5,5), dilation=dilation), + lambda x: Tensor.max_pool2d(x, kernel_size=(5,5), dilation=dilation)) + + def test_avgpool2d(self): + shape = (32,2,111,28) + for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1)]: + with self.subTest(kernel_size=ksz): + helper_test_op([shape], + lambda x: torch.nn.functional.avg_pool2d(x, kernel_size=ksz), + lambda x: Tensor.avg_pool2d(x, kernel_size=ksz), rtol=1e-5) + + def test_global_avgpool2d(self): + helper_test_op([(32,2,111,28)], + lambda x: torch.nn.functional.avg_pool2d(x, kernel_size=(111,28)), + lambda x: Tensor.avg_pool2d(x, kernel_size=(111,28)), rtol=1e-5) + + def test_cat(self): + for dim in range(-2, 3): + helper_test_op([(45,65,9), (45,65,9), (45,65,9)], lambda x,y,z: torch.cat((x,y,z), dim), lambda x,y,z: x.cat(y, z, dim=dim)) + + with self.assertRaises(AssertionError): + a = Tensor(3.14) + a.cat(a) + + def test_multicat(self): + for dim in range(-1, 2): + helper_test_op([(45,65), (45,65), (45,65)], lambda x,y,z: torch.cat((x,y,z), dim), lambda x,y,z: x.cat(y, z, dim=dim)) + + def test_stack(self): + x = Tensor.randn(45, 65, 3) + + for dim in range(-1, 3): + helper_test_op([(45, 65, 3), (45, 65, 3), (45, 65, 3)], lambda x, y, z: torch.stack((x, y, z), dim=dim), lambda x, y, z: Tensor.stack([x, y, z], dim=dim)) + + with self.assertRaises(IndexError): + Tensor.stack([x], dim=77) + + a = Tensor(3.14) + np.testing.assert_allclose(Tensor.stack([a, a]).numpy(), Tensor([3.14, 3.14]).numpy()) + + def test_repeat(self): + x = Tensor.randn(4, 6, 3) + base_repeats = [2, 4, 3] + + for reps in [[], [4], [2, 1], [3, 2, 2]]: + repeats = base_repeats + reps + helper_test_op([(4, 6, 3)], lambda x: x.repeat(*repeats), lambda x: x.repeat(repeats)) + helper_test_op([()], lambda x: x.repeat(*repeats), lambda x: x.repeat(repeats)) + + with self.assertRaises(AssertionError): + x.repeat((2, 4)) + + with self.assertRaises(AssertionError): + x.repeat((2, 0, 4)) + + def test_clip(self): + helper_test_op([(45,65)], lambda x: x.clip(-2.3, 1.2), lambda x: x.clip(-2.3, 1.2)) + + def test_matvecmat(self): + helper_test_op([(1,128), (128,128), (128,128)], lambda x,y,z: (x@y).relu()@z, atol=1e-4) + + def test_matvec(self): + helper_test_op([(1,128), (128,128)], lambda x,y: (x@y).relu(), atol=1e-4) + + # this was the failure in llama early realizing freqs_cis + def test_double_slice(self): + helper_test_op([(4,4)], lambda x: x[:, 1:2][1:2]) + helper_test_op([(4,4)], lambda x: x[1:3][1:2]) + helper_test_op([(4,4)], lambda x: x[:, 1:2][0:1]) + helper_test_op([(4,4)], lambda x: x[:, 1:2][:, 0:1]) + + @unittest.skip("this test is broken #862") + def test_max_inf(self): + n = Tensor([1, float("nan")]).max().numpy() + assert math.isnan(n.item()), f"{n.item()} is not nan" + + def test_inf_where(self): + x = Tensor.full((3, 3), float("inf")) + n = (x < 0).where(x, 1).numpy() + assert np.all(n == 1.) + + def _get_index_randoms(self): + # indices cannot have gradient + # TODO currently does not support IndexError for out of bounds idx values + a = torch.randint(low=-1, high=1, size=(2,1,1,1,1,1), dtype=torch.int64, requires_grad=False) + b = torch.randint(high=1, size=(1,3,1,1,1,1), dtype=torch.int64, requires_grad=False) + c = torch.randint(low=-5, high=5, size=(1,1,4,1,1,1), dtype=torch.int64, requires_grad=False) + d = torch.randint(high=4, size=(2,1,1,5,1,1), dtype=torch.int64, requires_grad=False) + e = torch.randint(high=1, size=(1,1,1,1,6,1), dtype=torch.int64, requires_grad=False) + i, j, k, o, p = [Tensor(tor.detach().numpy().astype(np.int32), dtype=dtypes.int32, requires_grad=False) for tor in [a,b,c,d,e]] + return a,b,c,d,e,i,j,k,o,p + + def test_slice_fancy_indexing_no_dim_collapse(self): + a,b,c,d,e,i,j,k,o,p = self._get_index_randoms() + # no dim collapse from int or dim injection from None + helper_test_op([(2,5,6,5,3,4)], lambda x: x[a,b,c,d,e], lambda x: x[i,j,k,o,p]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[:,b,c,d,:], lambda x: x[:,j,k,o,:]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[a,b,...], lambda x: x[i,j,...]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[a,...,e], lambda x: x[i,...,p]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[...,c,:,e], lambda x: x[...,k,:,p]) + + def test_slice_fancy_indexing_dim_collapse_int(self): + a,b,c,d,e,i,j,k,o,p = self._get_index_randoms() + # dim collapse from int + helper_test_op([(2,5,6,5,3,4)], lambda x: x[1,b,c,d,e], lambda x: x[1,j,k,o,p]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[a,b,3,d,e], lambda x: x[i,j,3,o,p]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[1,b,2,d,2], lambda x: x[1,j,2,o,2]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[a,2,2,2,e], lambda x: x[i,2,2,2,p]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[1,:,3:11:2,d,0:2], lambda x: x[1,:,3:11:2,o,0:2]) + + def test_slice_fancy_indexing_dim_inject_none(self): + a,b,c,d,e,i,j,k,o,p = self._get_index_randoms() + # dim injection from None + helper_test_op([(2,5,6,5,3,4)], lambda x: x[None,b,c,d,e], lambda x: x[None,j,k,o,p]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[a,b,c,d,None], lambda x: x[i,j,k,o,None]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[a,b,None,d,e], lambda x: x[i,j,None,o,p]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[None,b,c,d,None], lambda x: x[None,j,k,o,None]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[a,:,None,d,e], lambda x: x[i,:,None,o,p]) + + def test_slice_fancy_indexing_dim_inject_and_collapse(self): + a,b,c,d,e,i,j,k,o,p = self._get_index_randoms() + # dim injection and collapse + helper_test_op([(2,5,6,5,3,4)], lambda x: x[1,b,None,d,1], lambda x: x[1,j,None,o,1]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[None,b,2,d,None], lambda x: x[None,j,2,o,None]) + helper_test_op([(2,5,6,5,3,4)], lambda x: x[...,1,d,None], lambda x: x[...,1,o,None]) + + def test_slice_fancy_indexing_with_idx(self): + # indexing using idx with different dim + helper_test_op([(2,3)], lambda x: x[torch.tensor([[0,0,0],[0,0,0]]), torch.tensor(1)], lambda x: x[Tensor([[0,0,0],[0,0,0]]), Tensor(1)]) + helper_test_op([(2,3)], lambda x: x[torch.tensor([1]), torch.tensor([[0,0,0],[0,0,0]])], lambda x: x[Tensor([1]), Tensor([[0,0,0],[0,0,0]])]) + + def test_gather(self): + # indices cannot have gradient + # indices cannot be negative (torch gather) + b = torch.randint(3, size=[3,4,5], dtype=torch.int64, requires_grad=False) + a = Tensor(b.detach().numpy().astype(np.int32), dtype=dtypes.int32, requires_grad=False) + helper_test_op([(4,5,6)], lambda x: x.gather(index=b, dim=0), lambda x: x.gather(idx=a, dim=0)) + helper_test_op([(4,5,6)], lambda x: x.gather(index=b, dim=1), lambda x: x.gather(idx=a, dim=1)) + helper_test_op([(4,5,6)], lambda x: x.gather(index=b, dim=2), lambda x: x.gather(idx=a, dim=2)) + helper_test_op([(3,4,5)], lambda x: x.gather(index=b, dim=0), lambda x: x.gather(idx=a, dim=0)) + self.helper_test_exception([(4,5,6)], lambda x: x.gather(index=torch.tensor([1], dtype=torch.int64), dim=0), lambda x: x.gather(idx=Tensor([1], dtype=dtypes.int32), dim=0), expected=(RuntimeError, AssertionError)) + self.helper_test_exception([(2,1,1)], lambda x: x.gather(index=b, dim=0), lambda x: x.gather(idx=a, dim=0), expected=(RuntimeError, AssertionError)) + + def test_scaled_product_attention(self): + helper_test_op([(32,8,16,64), (32,8,16,64), (32,8,16,64)], lambda x,y,z: torch.nn.functional.scaled_dot_product_attention(x,y,z), lambda x,y,z: Tensor.scaled_dot_product_attention(x,y,z)) + helper_test_op([(32,8,16,64), (32,8,16,64), (32,8,16,64), (32,8,16,16)], lambda x,y,z,m: torch.nn.functional.scaled_dot_product_attention(x,y,z,attn_mask=m), lambda x,y,z,m: Tensor.scaled_dot_product_attention(x,y,z,attn_mask=m)) + helper_test_op([(32,8,16,64), (32,8,16,64), (32,8,16,64)], lambda x,y,z: torch.nn.functional.scaled_dot_product_attention(x,y,z,is_causal=True), lambda x,y,z: Tensor.scaled_dot_product_attention(x,y,z,is_causal=True)) + + def test_binary_crossentropy(self): + helper_test_op([(32,10), (32,10)], lambda x,y: torch.nn.functional.binary_cross_entropy(x.sigmoid(),torch.clip(y,0,1)), lambda x,y: x.sigmoid().binary_crossentropy(y.clip(0,1))) + helper_test_op([(32,10), (32,10)], lambda x,y: torch.nn.functional.binary_cross_entropy_with_logits(x,torch.clip(y,0,1)), lambda x,y: x.binary_crossentropy_logits(y.clip(0,1))) + helper_test_op([(32,10), (32,10)], lambda x,y: torch.nn.functional.binary_cross_entropy_with_logits(x,torch.clip(y,0,1)), lambda x,y: x.sigmoid().binary_crossentropy(y.clip(0,1))) + helper_test_op([(32,10), (32,10)], lambda x,y: torch.nn.functional.binary_cross_entropy(x.sigmoid(),torch.clip(y,0,1)), lambda x,y: x.binary_crossentropy_logits(y.clip(0,1))) + +if __name__ == '__main__': + np.random.seed(1337) + unittest.main(verbosity=2) diff --git a/tinygrad_repo/test/test_optim.py b/tinygrad_repo/test/test_optim.py new file mode 100644 index 0000000..df1e53f --- /dev/null +++ b/tinygrad_repo/test/test_optim.py @@ -0,0 +1,98 @@ +import numpy as np +import torch +import unittest +from tinygrad.tensor import Tensor +from tinygrad.nn.optim import Adam, SGD, AdamW +import pytest + +pytestmark = pytest.mark.exclude_cuda + +np.random.seed(1337) +x_init = np.random.randn(1,4).astype(np.float32) +W_init = np.random.randn(4,4).astype(np.float32) +m_init = np.random.randn(1,4).astype(np.float32) + +class TinyNet: + def __init__(self, tensor): + self.x = tensor(x_init.copy(), requires_grad=True) + self.W = tensor(W_init.copy(), requires_grad=True) + self.m = tensor(m_init.copy()) + + def forward(self): + out = self.x.matmul(self.W).relu() + # print(out.detach().numpy()) + out = out.log_softmax(1) + out = out.mul(self.m).add(self.m).sum() + return out + +def step(tensor, optim, steps=1, kwargs={}): + net = TinyNet(tensor) + optim = optim([net.x, net.W], **kwargs) + for _ in range(steps): + out = net.forward() + optim.zero_grad() + out.backward() + optim.step() + return net.x.detach().numpy(), net.W.detach().numpy() + +class TestOptim(unittest.TestCase): + + def _test_optim(self, tinygrad_optim, torch_optim, steps, opts, atol, rtol): + for x,y in zip(step(Tensor, tinygrad_optim, steps, kwargs=opts), + step(torch.tensor, torch_optim, steps, kwargs=opts)): + np.testing.assert_allclose(x, y, atol=atol, rtol=rtol) + + def _test_sgd(self, steps, opts, atol, rtol): self._test_optim(SGD, torch.optim.SGD, steps, opts, atol, rtol) + def _test_adam(self, steps, opts, atol, rtol): self._test_optim(Adam, torch.optim.Adam, steps, opts, atol, rtol) + def _test_adamw(self, steps, opts, atol, rtol): self._test_optim(AdamW, torch.optim.AdamW, steps, opts, atol, rtol) + + def test_sgd(self): self._test_sgd(1, {'lr': 0.001}, 1e-6, 0) + def test_sgd_high_lr(self): self._test_sgd(1, {'lr': 10}, 1e-6, 1e-5) + def test_sgd_wd(self): self._test_sgd(1, {'lr': 0.001, 'weight_decay': 0.1}, 1e-6, 0) + def test_sgd_high_lr_wd(self): self._test_sgd(1, {'lr': 10, 'weight_decay': 0.1}, 1e-6, 1e-5) + + def test_multistep_sgd(self): self._test_sgd(10, {'lr': 0.001}, 1e-6, 0) + def test_multistep_sgd_high_lr(self): self._test_sgd(10, {'lr': 10}, 1e-6, 3e-4) + def test_multistep_sgd_wd(self): self._test_sgd(10, {'lr': 0.001, 'weight_decay': 0.1}, 1e-6, 0) + def test_multistep_sgd_high_lr_wd(self): self._test_sgd(10, {'lr': 9, 'weight_decay': 0.1}, 1e-6, 3e-4) + + def test_multistep_sgd_momentum(self): self._test_sgd(10, {'lr': 0.001, 'momentum': 0.9}, 1e-6, 0) + def test_multistep_sgd_high_lr_momentum(self): self._test_sgd(10, {'lr': 10, 'momentum': 0.9}, 1e-5, 3e-4) + def test_multistep_sgd_momentum_wd(self): self._test_sgd(10, {'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.1}, 1e-6, 0) + def test_multistep_sgd_high_lr_momentum_wd(self): self._test_sgd(10, {'lr': 10, 'momentum': 0.9, 'weight_decay': 0.1}, 1e-5, 3e-4) + + def test_multistep_sgd_nesterov_momentum(self): self._test_sgd(10, {'lr': 0.001, 'momentum': 0.9, 'nesterov': True}, 1e-5, 0) + def test_multistep_sgd_high_lr_nesterov_momentum(self): self._test_sgd(10, {'lr': 10, 'momentum': 0.9, 'nesterov': True}, 1e-5, 3e-4) + def test_multistep_sgd_nesterov_momentum_wd(self): self._test_sgd(10, {'lr': 0.001, 'momentum': 0.9, 'nesterov': True, 'weight_decay': 0.1}, 1e-5, 0) + def test_multistep_sgd_high_lr_nesterov_momentum_wd(self): self._test_sgd(10, {'lr': 9, 'momentum': 0.9, 'nesterov': True, 'weight_decay': 0.1}, 1e-5, 3e-4) + + def test_adam(self): self._test_adam(1, {'lr': 0.001}, 1e-5, 0) + def test_adam_high_lr(self): self._test_adam(1, {'lr': 10}, 1e-4, 1e-4) + def test_adamw(self): self._test_adamw(1, {'lr': 0.001}, 1e-5, 0) + def test_adamw_high_lr(self): self._test_adamw(1, {'lr': 10}, 1e-4, 1e-4) + + def test_multistep_adam(self): self._test_adam(10, {'lr': 0.001}, 1e-5, 0) + def test_multistep_adam_high_lr(self): self._test_adam(10, {'lr': 10}, 2e-4, 5e-4) + + def test_multistep_adamw(self): self._test_adamw(10, {'lr': 0.001}, 1e-5, 0) + def test_multistep_adamw_high_lr(self): self._test_adamw(10, {'lr': 10}, 5e-4, 2e-3) + + def test_duped_weights(self): + for Opt in [Adam, AdamW, SGD]: + losses = [] + for i in range(2): + w = Tensor(x_init.copy()) + opt = Opt([w], lr=0.1) if i == 0 else Opt([w, w], lr=0.1) + + loss = None + for _ in range(3): + loss = w.sum() + opt.zero_grad() + loss.backward() + opt.step() + losses.append(loss.numpy()) + + np.testing.assert_allclose(losses[0], losses[1], atol=1e-4, rtol=0) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/test_randomness.py b/tinygrad_repo/test/test_randomness.py new file mode 100644 index 0000000..cf1b018 --- /dev/null +++ b/tinygrad_repo/test/test_randomness.py @@ -0,0 +1,115 @@ +import math +import unittest +import numpy as np +import torch +from tinygrad.tensor import Tensor +import tinygrad.nn as nn +import pytest +from tinygrad.helpers import dtypes +from functools import partial + +pytestmark = pytest.mark.webgpu + +# https://gist.github.com/devries/11405101 +def ksprob(a): + fac, total, termbf = 2.0, 0.0, 0.0 + a2 = -2.0 * a * a + for j in range(1, 101): + term = fac * math.exp(a2 * j * j) + total += term + if math.fabs(term) <= 0.001 * termbf or math.fabs(term) <= 1e-8 * total: + return total + fac = -fac + termbf = math.fabs(term) + return 1.0 + +def kstest(l1, l2): + n1, n2 = len(l1), len(l2) + l1.sort() + l2.sort() + j1, j2, d, fn1, fn2 = 0, 0, 0.0, 0.0, 0.0 + while j1 < n1 and j2 < n2: + d1, d2 = l1[j1], l2[j2] + if d1 <= d2: + fn1 = (float(j1) + 1.0) / float(n1) + j1 += 1 + if d2 <= d1: + fn2 = (float(j2) + 1.0) / float(n2) + j2 += 1 + dtemp = math.fabs(fn2 - fn1) + if dtemp > d: + d = dtemp + ne = float(n1 * n2) / float(n1 + n2) + nesq = math.sqrt(ne) + prob = ksprob((nesq + 0.12 + 0.11 / nesq) * d) + return prob + +def equal_distribution(tiny_func, torch_func=None, numpy_func=None, shape=(20, 23), alpha=0.05): + Tensor.manual_seed(1337) + torch.manual_seed(1337) + np.random.seed(1337) + assert not (torch_func is None and numpy_func is None), "no function to compare with" + x = tiny_func(*shape).numpy().flatten() + if numpy_func is not None: y = numpy_func(shape).flatten() + if torch_func is not None: z = torch_func(shape).numpy().flatten() + return (numpy_func is None or kstest(x, y) >= alpha) and (torch_func is None or kstest(x, z) >= alpha) + +def normal_test(func, shape=(20, 23), alpha=0.05): return equal_distribution(func, numpy_func=lambda x: np.random.randn(*x), shape=shape, alpha=alpha) + +class TestRandomness(unittest.TestCase): + def test_rand(self): + self.assertFalse(normal_test(Tensor.rand)) + self.assertTrue(equal_distribution(Tensor.rand, torch.rand, lambda x: np.random.rand(*x))) + + def test_randn(self): + self.assertTrue(normal_test(Tensor.randn)) + self.assertTrue(equal_distribution(Tensor.randn, torch.randn, lambda x: np.random.randn(*x))) + + def test_normal(self): + self.assertTrue(normal_test(Tensor.normal)) + self.assertTrue(equal_distribution(Tensor.normal, lambda x: torch.nn.init.normal_(torch.empty(x), mean=0, std=1), lambda x: np.random.normal(loc=0, scale=1, size=x))) + + def test_uniform(self): + self.assertFalse(normal_test(Tensor.uniform)) + self.assertTrue(equal_distribution(Tensor.uniform, lambda x: torch.nn.init.uniform_(torch.empty(x)), lambda x: np.random.uniform(size=x))) + self.assertTrue(equal_distribution(partial(Tensor.uniform, low=-100, high=100, dtype=dtypes.int32), numpy_func=lambda x: np.random.randint(low=-100, high=100, size=x))) + + def test_scaled_uniform(self): + self.assertFalse(normal_test(Tensor.scaled_uniform)) + self.assertTrue(equal_distribution(Tensor.scaled_uniform, lambda x: torch.nn.init.uniform_(torch.empty(x), a=-1, b=1) / math.sqrt(math.prod(x)), lambda x: np.random.uniform(-1, 1, size=x) / math.sqrt(math.prod(x)))) + + def test_glorot_uniform(self): + self.assertFalse(normal_test(Tensor.glorot_uniform)) + self.assertTrue(equal_distribution(Tensor.glorot_uniform, lambda x: torch.nn.init.xavier_uniform_(torch.empty(x)), lambda x: np.random.uniform(-1, 1, size=x) * math.sqrt(6 / (x[0] + math.prod(x[1:]))))) + + def test_kaiming_uniform(self): + Tensor.manual_seed(1337) + torch.manual_seed(1337) + np.random.seed(1337) + for shape in [(128, 64, 3, 3), (20, 24)]: + self.assertTrue(equal_distribution(Tensor.kaiming_uniform, lambda x: torch.nn.init.kaiming_uniform_(torch.empty(x)), shape=shape)) + + def test_kaiming_normal(self): + Tensor.manual_seed(1337) + torch.manual_seed(1337) + np.random.seed(1337) + for shape in [(128, 64, 3, 3), (20, 24)]: + self.assertTrue(equal_distribution(Tensor.kaiming_normal, lambda x: torch.nn.init.kaiming_normal_(torch.empty(x)), shape=shape)) + + def test_conv2d_init(self): + params = (128, 256, (3,3)) + assert equal_distribution(lambda *_: nn.Conv2d(*params).weight, lambda _: torch.nn.Conv2d(*params).weight.detach()) + assert equal_distribution(lambda *_: nn.Conv2d(*params).bias, lambda _: torch.nn.Conv2d(*params).bias.detach()) + + def test_linear_init(self): + params = (64, 64) + assert equal_distribution(lambda *_: nn.Linear(*params).weight, lambda _: torch.nn.Linear(*params).weight.detach()) + assert equal_distribution(lambda *_: nn.Linear(*params).bias, lambda _: torch.nn.Linear(*params).bias.detach()) + + def test_bn_init(self): + params = (64,) + assert equal_distribution(lambda *_: nn.BatchNorm2d(*params).weight, lambda _: torch.nn.BatchNorm2d(*params).weight.detach()) + assert equal_distribution(lambda *_: nn.BatchNorm2d(*params).bias, lambda _: torch.nn.BatchNorm2d(*params).bias.detach()) + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/test_schedule.py b/tinygrad_repo/test/test_schedule.py new file mode 100644 index 0000000..32af9ea --- /dev/null +++ b/tinygrad_repo/test/test_schedule.py @@ -0,0 +1,335 @@ +# this will be the new test_ops for the next level +# schedule confirms the right things are capable of fusing +# NOTE: this has overlap with external_test_opt.py + +import unittest +from typing import List, Optional +from tinygrad.tensor import Tensor +from tinygrad.ops import LoadOps, Device, Compiled +from tinygrad.helpers import DEBUG, dtypes +from tinygrad.codegen.linearizer import Linearizer +from tinygrad.graph import log_schedule_item, print_tree +from tinygrad import nn + +def check_schedule(t:Tensor, allowed:int, to_prerealize:Optional[List[Tensor]]=None, filter_loadops=True): + seen = set() + if to_prerealize: + for pre in to_prerealize: + for s in pre.lazydata.schedule(seen.copy()): + log_schedule_item(s) + seen.add(s.out) + sched = t.lazydata.schedule(seen) + for s in sched: log_schedule_item(s) + if filter_loadops: sched = [s for s in sched if s.ast.op not in LoadOps] + if len(sched) != allowed: print(f"SCHEDULE ISSUE, expecting {allowed} got {len(sched)}") + if len(sched) != allowed or DEBUG >= 3: + for i, s in enumerate(sched): + print("op", i) + print_tree(s.ast) + assert len(sched) == allowed + # test the (non loadops) ops linearize + for s in sched: + if s.ast.op in LoadOps: continue + l = Linearizer(s.ast) + l.hand_coded_optimizations() + l.linearize() + +class TestSchedule(unittest.TestCase): + def test_basic_binop_fusion(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = Tensor.empty(10) + d = a+b+c + check_schedule(d, 1) + + def test_basic_binop_fusion_deep(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = Tensor.empty(10) + d = Tensor.empty(10) + e = a+b+c+d + check_schedule(e, 1) + + def test_mulacc_fusion(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = (a*b).sum() + check_schedule(c, 1) + + def test_mulacc_relu_fusion(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = (a*b).sum().relu() + check_schedule(c, 1) + + def test_binop_reshape_fusion(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = Tensor.empty(5,2) + d = (a+b).reshape(5,2)+c + check_schedule(d, 1) + + def test_binop_permute_fusion(self): + a = Tensor.empty(2,5) + b = Tensor.empty(2,5) + c = Tensor.empty(5,2) + d = (a+b).permute(1,0)+c + check_schedule(d, 1) + + @unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled) or Device.DEFAULT == "LLVM", "only test for compiled backends") + def test_constants_are_embedded(self): + a = Tensor.empty(3,3) * 2 + check_schedule(a, 2, filter_loadops=False) + + def test_binop_elu_fusion(self): + a = Tensor.empty(10) + b = a.elu() + check_schedule(b, 1) + + def test_binop_reshape_reduce_fusion(self): + a = Tensor.empty(100) + b = Tensor.empty(100) + c = (a+b).reshape(10, 10).sum(axis=0, keepdim=True) + check_schedule(c, 1) + + def test_reduce_reshape_binop_fusion(self): + a = Tensor.empty(10,10) + b = Tensor.empty(10) + c = a.sum(axis=0) + b + check_schedule(c, 1) + + @unittest.skip("not pushing permutes through reduces") + def test_reduce_permute_binop_fusion(self): + a = Tensor.empty(10,10,10) + b = Tensor.empty(10,10,1) + c = a.sum(axis=0, keepdim=True).permute(2,1,0) + b + check_schedule(c, 1) + + def test_binop_early_reshape_reduce_fusion(self): + a = Tensor.empty(100) + b = Tensor.empty(100) + c = Tensor.empty(10,10) + d = ((a+b).reshape(10,10) + c).sum(axis=0) + check_schedule(d, 1) + + def test_diamond_folded(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = Tensor.empty(10) + d = Tensor.empty(10) + ab = a+b + e = (ab+c) + (ab+d) + check_schedule(e, 1) + + def test_cache_binaryop(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = a+b + d = a+b + check_schedule(d, 0, [c]) + + @unittest.skip("failing in old lazy") + def test_cache_binaryop_reshaped(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = a+b + d = a.reshape(10,1)+b.reshape(10,1) + check_schedule(d, 0, [c]) + + def test_cache_binaryop_transpose(self): + a = Tensor.empty(10,10) + b = Tensor.empty(10,10) + c = (a.T*b.T).T #.contiguous() + d = a*b + check_schedule(d, 0, [c]) + + def test_cache_two_reduceops(self): + a = Tensor.empty(10) + b = a.sum() + c = a.sum() + bc = b+c + check_schedule(bc, 1) + + def test_fold_double_unary(self): + y = Tensor.empty(2) + out = y.sum(keepdim=True).sqrt().__neg__() + check_schedule(out, 1) + + #@unittest.skip("may want to reconsider this") + def test_fold_batchnorm(self): + with Tensor.train(): + img = Tensor.empty(1,32,4,4) + bn = nn.BatchNorm2d(32, track_running_stats=False) + out = bn(img) + check_schedule(out, 3) + + def test_fold_conv_relu(self): + c1 = nn.Conv2d(3,16,3) + + # run + img = Tensor.ones(2,3,64,64) + out = c1(img).relu() + check_schedule(out, 1, [c1.weight, c1.bias]) + + def test_fold_conv_elu(self): + c1 = nn.Conv2d(3,16,3) + + # run + img = Tensor.rand(2,3,64,64) + out = c1(img).elu() + check_schedule(out, 1, [c1.weight, c1.bias]) + + def test_two_sum(self): + img = Tensor.empty(64,64) + x = (img.sum(0) + img.sum(1)) + out = x.relu() + del x # is 3 without this + check_schedule(out, 2) + + @unittest.skip("failing in old lazy") + def test_push_permute_through_reshape(self): + a = Tensor.empty(16,16) + b = Tensor.empty(16,16) + c = (a+b).reshape(4,4,4,4).permute(2,3,0,1).contiguous() + check_schedule(c, 1) + + @unittest.skip("failing in old lazy") + def test_push_permute_through_reshape_alt(self): + a = Tensor.empty(4,4,4,4) + b = Tensor.empty(4,4,4,4) + c = (a+b).reshape(16,16).permute(1,0).contiguous() + check_schedule(c, 1) + + def test_no_binop_rerun(self): + a = Tensor.empty(16) + b = Tensor.empty(16) + c = a+b + d = (a+b).reshape(16,1) + check_schedule(d, 0, [c]) + + def test_multi_permute_should_collapse(self): + a = Tensor.empty(4,4,4,4) + b = Tensor.empty(16) + c = a.sum((0,1)).cast(dtypes.float16).permute(1,0).reshape(4,4,1).permute(1,0,2).reshape(16) + b + check_schedule(c, 1) + + @unittest.skip("failing in old lazy") + def test_fancy_reshape_fusion(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = a+b + d = a.reshape(10,1)+b.reshape(10,1) + out = c.sum() + d.sum() + check_schedule(out, 1) + + # NOTE: for this to pass, LazyViews must be children of LazyBuffers so the (a+b) runs first + @unittest.skip("not real world") + def test_children_dont_push(self): + a = Tensor.empty(10, 10, 1) + b = Tensor.empty(10, 10, 1) + d = (a+b).expand(10, 10, 10) + e = (a+b).permute(2,1,0) + f = d+e + check_schedule(f, 2) + + def test_dont_fuse_binops_with_children(self): + a = Tensor.empty(10) + b = Tensor.empty(10) + c = Tensor.empty(10) + keep_me = a+b + e = keep_me.sum() # give keep_me a child (NOTE: BinaryOps won't be a child since it will instant fuse) + d = keep_me+c + check_schedule(d, 2) + check_schedule(keep_me, 0, [d]) + + @unittest.skip("failing in old lazy") + def test_permute_breaks_fusion(self): + a = Tensor.empty(10, 10, 10) + b = Tensor.empty(10, 10) + c = (a.sum(axis=2) + b).permute(1,0) + d = c.permute(1,0) + check_schedule(d, 1) + + def test_some_permute_fusion(self): + a = Tensor.empty(8192, 16) + b = Tensor.empty(1, 16) + d = (a.T + b.expand(8192, 16).T) + c = a + b.expand(8192, 16) + e = d.T + check_schedule(c, 1) + check_schedule(e, 1) + + # this is the failing case in openpilot...it's very simple like this + @unittest.skip("failing in old lazy") + def test_image_conv_fusion(self): + from tinygrad.features.image import image_conv2d + w1 = Tensor.empty(16, 16, 1, 1) + b1 = Tensor.empty(16) + w2 = Tensor.empty(16, 16, 1, 1) + b2 = Tensor.empty(16) + w3 = Tensor.empty(16, 16, 1, 1) + b3 = Tensor.empty(16) + + x = Tensor.empty(1, 16, 32, 32) + x = base = image_conv2d(x, w1, b1) + x = image_conv2d(x, w2, b2) + base + x = image_conv2d(x, w3, b3) + + # NOOP, 3 convs, contiguous + check_schedule(x, 5) + + def test_image_conv_fusion_minimal(self): + b1 = Tensor.empty(16) + b2 = Tensor.empty(16) + def p(x): return x.permute(1,0).contiguous().reshape(32,16,1).expand(32,16,16).sum(axis=2).permute(1,0) + + x = Tensor.empty(16, 32) + x = base = p(x) + b1.reshape(16,1) + x = p(x) + x = x + b2.reshape(16,1) + x = x + base + del base + x = p(x) + check_schedule(x, 4) + + def test_image_conv_fusion_more_minimal(self): + b1 = Tensor.empty(16) + def p(x): return x.permute(1,0).contiguous().reshape(32,16,1).expand(32,16,16).sum(axis=2).permute(1,0) + + x = Tensor.empty(16, 32) + x = base = p(x) + b1.reshape(16,1) + x = p(x) + del base + check_schedule(x, 3) + + def test_resnet_block(self): + from models.resnet import BasicBlock + Tensor.training = False + bb = BasicBlock(64,64) + + x = Tensor.empty(1, 64, 32, 32) + out = bb(x) + check_schedule(out, 4) + + def test_contiguous_while_contiguous(self): + x = Tensor.empty(1, 64, 32, 32) + out = x.contiguous() + check_schedule(out, 1, filter_loadops=False) + + def test_contiguous_while_not_contiguous(self): + x = Tensor.empty(1, 64, 32, 32) + out = x.permute(0,2,3,1).contiguous() + check_schedule(out, 2, filter_loadops=False) + + def test_double_from(self): + x = Tensor([1,2,3,4]) + out = x.to('cpu') + check_schedule(out, 0, filter_loadops=False) + + def test_pow_const_tensor(self): + x = Tensor([1,2,3,4]) + out = x ** Tensor(2) + check_schedule(out, 1) + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/tinygrad_repo/test/test_search.py b/tinygrad_repo/test/test_search.py new file mode 100644 index 0000000..400c04a --- /dev/null +++ b/tinygrad_repo/test/test_search.py @@ -0,0 +1,19 @@ +import unittest + +from tinygrad.codegen.linearizer import Linearizer +from tinygrad.features.search import time_linearizer +from tinygrad.ops import Compiled, Device, LoadOps +from tinygrad.tensor import Tensor + +class TestTimeLinearizer(unittest.TestCase): + def setUp(self) -> None: + if not isinstance(Device[Device.DEFAULT], Compiled): raise unittest.SkipTest("only test for compiled backends") + + def test_reasonable_time(self): + si = [si for si in Tensor([1,2,3,4]).add(1).lazydata.schedule() if si.ast.op not in LoadOps][0] + rawbufs = [Device[Device.DEFAULT].buffer(si.out.st.size(), si.out.dtype)] + [Device[Device.DEFAULT].buffer(x.st.size(), x.dtype) for x in si.inputs] + tm = time_linearizer(Linearizer(si.ast), rawbufs, allow_test_size=False, cnt=10) + assert tm > 0 and tm != float('inf') + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_specific_conv.py b/tinygrad_repo/test/test_specific_conv.py new file mode 100644 index 0000000..ac28e6c --- /dev/null +++ b/tinygrad_repo/test/test_specific_conv.py @@ -0,0 +1,57 @@ +import unittest +from tinygrad.tensor import Tensor +from tinygrad.helpers import dtypes +from tinygrad.ops import Device +import pytest +# similar to test/external/external_test_gpu_ast.py, but universal + +pytestmark = pytest.mark.exclude_cuda + +class TestSpecific(unittest.TestCase): + # from openpilot + + # 1x1 6 <- 24 + def test_1x1_6_24(self): + x = Tensor.randn(1, 24*4, 32, 64) + w = Tensor.randn(6*4, 24*4, 1, 1) + x.conv2d(w).permute(0,2,3,1).reshape(32, 384, 4).contiguous().realize() + + def test_vec_mul(self): + # this forces it to be an image... + x = Tensor.ones(1, 512, 4).contiguous().reshape(1, 2048) + w = Tensor.randn(2048, 512) + (x @ w).reshape(1, 128, 4).contiguous().realize() + + @unittest.skipIf(Device.DEFAULT in ["LLVM", "WEBGPU"], "Broken on LLVM and webgpu") + def test_big_vec_mul(self): + # from LLaMA + # 0 buffer<4096, dtypes.float> [View((1024, 1, 1, 4), (4, 0, 0, 1), 0, None)] + # 1 buffer<4096, dtypes.float> [View((1024, 1024, 4, 4), (0, 4, 1, 0), 0, None)] + # 2 buffer<16777216, dtypes.half> [View((1024, 1024, 4, 4), (16384, 4, 1, 4096), 0, None)] + x = Tensor.randn(4096).realize() + w = Tensor.randn(4096, 4096, device='cpu').cast(dtypes.float16).to(Device.DEFAULT).realize() + (x @ w.T).realize() + + # from https://dl.acm.org/doi/pdf/10.1145/3495243.3517020 + + # ~260 GFLOPS on Adreno 640, should be 260*(720/890)*(596/710) = 176.5 on downclocked 630 + # we get 170 + def test_1x1_28_28(self): + x = Tensor.randn(1, 256, 28, 28) + w = Tensor.randn(256, 256, 1, 1) + x.conv2d(w).permute(0,2,3,1).reshape(28, 28*256//4, 4).contiguous().realize() + + # 132 GFLOPS on Adreno 640, should be 132*(720/890)*(596/710) = 90 on downclocked 630 + # gets 54 with broken opt, 74 without opt, and 146 if we pad and opt 3! + def test_3x3_28_28_stride_2(self): + x = Tensor.randn(1, 288, 36, 36) + w = Tensor.randn(384, 288, 3, 3) + x.conv2d(w, stride=2).permute(0,2,3,1).reshape(17, 17*384//4, 4).contiguous().realize() + + def test_3x3_28_28_stride_2_padded(self): + x = Tensor.randn(1, 288, 36, 36) + w = Tensor.randn(384, 288, 3, 3) + x.conv2d(w, stride=2, padding=1).permute(0,2,3,1).reshape(18, 18*384//4, 4).contiguous().realize() + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/test_speed_v_torch.py b/tinygrad_repo/test/test_speed_v_torch.py new file mode 100644 index 0000000..0537d82 --- /dev/null +++ b/tinygrad_repo/test/test_speed_v_torch.py @@ -0,0 +1,288 @@ +import os +os.environ["NVIDIA_TF32_OVERRIDE"] = "0" +os.environ["MKL_NUM_THREADS"] = "1" +os.environ["NUMEXPR_NUM_THREADS"] = "1" +os.environ["OMP_NUM_THREADS"] = "1" +import unittest +import torch +torch.set_num_threads(1) +import time +import numpy as np +np.set_printoptions(linewidth=160) +from tinygrad.ops import Device +from tinygrad.helpers import GlobalCounters +from tinygrad.tensor import Tensor +from tinygrad.nn import Conv2d +from tinygrad.helpers import colored, getenv, CI +from tinygrad.jit import TinyJit +import pytest + +pytestmark = [pytest.mark.exclude_cuda, pytest.mark.exclude_gpu, pytest.mark.exclude_clang] + +IN_CHANS = [int(x) for x in getenv("IN_CHANS", "4,16,64").split(",")] + +torch_dt = torch.float16 if getenv("HALF", 0) else torch.float32 +torch_device = torch.device('mps' if getenv("MPS", 0) else ('cuda' if getenv("TORCHCUDA", 0) else 'cpu')) +if str(torch_device) == "mps": + import torch.mps + sync = lambda: torch.mps.synchronize() +elif str(torch_device) == "cuda": + import torch.cuda + sync = lambda: torch.cuda.synchronize() +else: + sync = lambda: None + +def colorize_float(x): + ret = f"{x:7.2f}x" + if x < 0.75: + return colored(ret, 'green') + elif x > 1.15: + return colored(ret, 'red') + else: + return colored(ret, 'yellow') + +save_ops, save_mem = 0, 0 +CNT = getenv("CNT", 8) +def helper_test_speed(f1, *args): + global save_ops, save_mem + ets = [] + ret = None + cache_defeat = np.zeros((2048,2048)) + for i in range(CNT): + del ret + + # operation cache defeats + args = [(x+1).realize() if isinstance(x, Tensor) else (None if x is None else (x+1)) for x in args] + + # force syncing + [x.numpy() if isinstance(x, Tensor) or str(torch_device) == "cpu" else x.cpu().numpy() for x in args if x is not None] + + # clear 32MB global memory cache (CPU and global memory only) + cache_defeat += 1 + + # manual pre sync + if isinstance(args[0], Tensor): Device[args[0].device].synchronize() + else: sync() + + GlobalCounters.global_ops = 0 + GlobalCounters.global_mem = 0 + st = time.perf_counter() + ret = f1(*args) + if isinstance(ret, Tensor): Device[ret.device].synchronize() + else: sync() + et = (time.perf_counter() - st) * 1000 + if i >= 1: ets.append(et) + if GlobalCounters.global_ops: + save_ops, save_mem = GlobalCounters.global_ops, GlobalCounters.global_mem + return ret.numpy() if isinstance(ret, Tensor) else ret.cpu().numpy(), np.min(ets) + +def helper_test_generic_square(name, N, f1, f2, onearg=False): + torch.manual_seed(0) + torch_a = (torch.rand(N, N, dtype=torch_dt) - 0.5).to(torch_device) + torch_b = (torch.rand(N, N, dtype=torch_dt) - 0.5).to(torch_device) if not onearg else None + + tiny_a = Tensor(torch_a.cpu().numpy()) + tiny_b = Tensor(torch_b.cpu().numpy()) if not onearg else None + + helper_test_generic(f"{name:30s} {N:5d}x{N:5d}", f1, (torch_a, torch_b), TinyJit(lambda a,b:f2(a,b).realize()), (tiny_a, tiny_b)) + +def helper_test_matvec(name, N, M): + torch.manual_seed(0) + torch_a = (torch.rand(N, dtype=torch_dt) - 0.5).to(torch_device) + torch_b = (torch.rand(N, M, dtype=torch_dt) - 0.5).to(torch_device) + + tiny_a = Tensor(torch_a.cpu().numpy()) + tiny_b = Tensor(torch_b.cpu().numpy()) + + helper_test_generic(f"{name:30s} {N:5d}x{M:5d}", lambda a,b: a@b, (torch_a, torch_b), TinyJit(lambda a,b:(a@b).realize()), (tiny_a, tiny_b)) + +prefix = None +def helper_test_generic(name, f1, f1_args, f2, f2_args): + global prefix + with torch.no_grad(): + val_torch, et_torch = helper_test_speed(f1, *f1_args) + val_tinygrad, et_tinygrad = helper_test_speed(f2, *f2_args) + + desc = "faster" if et_torch > et_tinygrad else "slower" + flops = save_ops*1e-6 + mem = save_mem*1e-6 + print(("\r" if not CI else "")+f"{name:42s} {et_torch:7.2f} ms ({flops/et_torch:8.2f} GFLOPS {mem/et_torch:8.2f} GB/s) in torch, {et_tinygrad:7.2f} ms ({flops/et_tinygrad:8.2f} GFLOPS {mem/et_tinygrad:8.2f} GB/s) in tinygrad, {colorize_float(et_tinygrad/et_torch)} {desc} {flops:10.2f} MOPS {mem:8.2f} MB") + np.testing.assert_allclose(val_tinygrad, val_torch, atol=1e-3, rtol=1e-3) + +def helper_test_conv(bs, in_chans, out_chans, kernel_size, img_size_y, img_size_x): + torch.manual_seed(0) + torch_dat = torch.rand(bs, in_chans, img_size_y, img_size_x, dtype=torch_dt).to(torch_device) + torch_conv = torch.nn.Conv2d(in_chans, out_chans, kernel_size, bias=None, dtype=torch_dt).to(torch_device) + + tiny_dat = Tensor(torch_dat.cpu().numpy()) + tiny_conv = Conv2d(in_chans, out_chans, kernel_size, bias=None) + tiny_conv.weight = Tensor(torch_conv.weight.detach().cpu().numpy()) + + def f1(torch_dat): return torch_conv(torch_dat) + def f2(tiny_dat): return tiny_conv(tiny_dat).realize() + helper_test_generic(f"conv bs:{bs:3d} chans:{in_chans:3d} -> {out_chans:3d} k:{kernel_size}", f1, (torch_dat,), TinyJit(f2), (tiny_dat,)) + +@unittest.skipIf(getenv("BIG") == 0, "no big tests") +class TestBigSpeed(unittest.TestCase): + def test_add(self): + def f(a, b): return a+b + helper_test_generic_square('add', 8192, f, f) + def test_exp(self): + def f(a, b): return a.exp() + helper_test_generic_square('exp', 8192, f, f, onearg=True) + def test_gemm_2048(self): + def f(a, b): return a @ b + helper_test_generic_square('gemm', 2048, f, f) + def test_gemm_4096(self): + def f(a, b): return a @ b + helper_test_generic_square('gemm', 4096, f, f) + def test_large_conv_1x1(self): helper_test_conv(bs=32, in_chans=128, out_chans=128, kernel_size=1, img_size_y=128, img_size_x=128) + def test_large_conv_3x3(self): helper_test_conv(bs=4, in_chans=128, out_chans=128, kernel_size=3, img_size_y=130, img_size_x=130) + def test_large_conv_5x5(self): helper_test_conv(bs=4, in_chans=128, out_chans=128, kernel_size=5, img_size_y=132, img_size_x=132) + def test_matvec_4096_16384(self): helper_test_matvec('matvec_4096_16384', 4096, 16384) + def test_matvec_16384_4096(self): helper_test_matvec('matvec_16384_4096', 16384, 4096) + +@unittest.skipIf(getenv("BIG") == 1, "only big tests") +class TestSpeed(unittest.TestCase): + def test_sub(self): + def f(a, b): return a-b + helper_test_generic_square('sub', 4096, f, f) + + @unittest.skipIf(CI and Device.DEFAULT == "WEBGPU", "breaking on webgpu CI") + def test_pow(self): + def f(a, b): return a.pow(b) + helper_test_generic_square('pow', 2048, f, f) + + def test_sum(self): + def f(a, b): return a.sum() + helper_test_generic_square('sum', 2048, f, f, onearg=True) + helper_test_generic_square('sum', 4096, f, f, onearg=True) + + def test_partial_sum(self): + R = 256 + def f(a, b): return a.reshape(int(4096//R), int(4096*R)).sum(axis=1) + helper_test_generic_square('partial_sum', 4096, f, f, onearg=True) + + @unittest.skip("not really used in models") + def test_cumsum(self): + def f0(a, b): return a.cumsum(axis=0) + def f1(a, b): return a.cumsum(axis=1) + helper_test_generic_square('cumsum_0', 256, f0, f0, onearg=True) + helper_test_generic_square('cumsum_1', 256, f1, f1, onearg=True) + + def test_cat(self): + helper_test_generic_square('cat_0', 256, lambda x,y: torch.cat((x,y),dim=0), lambda x,y: x.cat(y,dim=0)) + helper_test_generic_square('cat_1', 256, lambda x,y: torch.cat((x,y),dim=1), lambda x,y: x.cat(y,dim=1)) + + def test_array_packing(self): + N = 2048 + def f(a, b): return a.reshape(N, N // 32, 32).permute(1,0,2).contiguous() + helper_test_generic_square('array_packing', N, f, f, onearg=True) + + def test_permute(self): + for N in [1024, 4096]: + # this is a 64MB tensor, M1 L1 cache is 128kB + # to fit easily in L1, rotations should be 128x128 chunks. 128x128 is also the AMX size + def f(a, b): return a.permute(1,0).contiguous() + helper_test_generic_square('permute', N, f, f, onearg=True) + + def test_double_permute(self): + N = 64 + torch.manual_seed(0) + torch_a = (torch.rand(N, N, N, N, dtype=torch_dt) - 0.5).to(torch_device) + tiny_a = Tensor(torch_a.cpu().numpy()) + def f(a): return a.permute(1,0,3,2).contiguous() + helper_test_generic(f"double_permute {tiny_a.shape}", f, (torch_a,), TinyJit(lambda a: f(a).realize()), (tiny_a,)) + + def test_neg(self): + def f(a, b): return -a + helper_test_generic_square('neg', 4096, f, f, onearg=True) + + def test_exp(self): + def f(a, b): return a.exp() + helper_test_generic_square('exp', 2048, f, f, onearg=True) + + def test_relu(self): + def f(a, b): return a.relu() + helper_test_generic_square('relu', 4096, f, f, onearg=True) + + def test_max(self): + def f(a, b): return a.max() + helper_test_generic_square('max', 4096, f, f, onearg=True) + + def test_mul_sum(self): + def f(a, b): return (a*b).sum() + helper_test_generic_square('mul_sum', 4096, f, f) + + def test_add(self): + for N in [1, 1024, 4096]: + def f(a, b): return a + b + helper_test_generic_square('add', N, f, f) + + def test_add_constant(self): + def f(a, b): return a+2.0 + helper_test_generic_square('add_constant', 4096, f, f, onearg=True) + + def test_add_sq(self): + def f(a, b): return a*a + b*b + helper_test_generic_square('add_sq', 4096, f, f) + + def test_gemm(self): + def f(a, b): return a @ b + helper_test_generic_square('gemm', 1024, f, f) + + def test_gemm_small(self): + def f(a, b): return a @ b + helper_test_generic_square('gemm', 256, f, f) + + def test_gemm_unrolled(self): + N = 512 + def f1(a, b): return a@b.T + def f2(a, b): return (a.reshape(N, 1, N).expand(N, N, N) * b.reshape(1, N, N).expand(N, N, N)).sum(axis=2) + helper_test_generic_square('gemm_unrolled', N, f1, f2) + + def test_gemm_unrolled_permute_l(self): + N = 512 + def f1(a, b): return a.T@b.T + def f2(a, b): return (a.permute(1,0).reshape(N, 1, N).expand(N, N, N) * b.reshape(1, N, N).expand(N, N, N)).sum(axis=2) + helper_test_generic_square('gemm_unrolled_permute_l', N, f1, f2) + + def test_gemm_unrolled_permute_r(self): + N = 512 + def f1(a, b): return a@b + def f2(a, b): return (a.reshape(N, 1, N).expand(N, N, N) * b.permute(1,0).reshape(1, N, N).expand(N, N, N)).sum(axis=2) + helper_test_generic_square('gemm_unrolled_permute_r', N, f1, f2) + + def test_gemm_unrolled_permute_lr(self): + N = 512 + def f1(a, b): return a.T@b + def f2(a, b): return (a.permute(1,0).reshape(N, 1, N).expand(N, N, N) * b.permute(1,0).reshape(1, N, N).expand(N, N, N)).sum(axis=2) + helper_test_generic_square('gemm_unrolled_permute_lr', N, f1, f2) + + def test_matvec_1024_1024(self): helper_test_matvec('matvec_1024_1024', 1024, 1024) + def test_matvec_1024_4096(self): helper_test_matvec('matvec_1024_4096', 1024, 4096) + def test_matvec_4096_1024(self): helper_test_matvec('matvec_4096_1024', 4096, 1024) + def test_matvec_4096_4096(self): helper_test_matvec('matvec_4096_4096', 4096, 4096) + + def test_openpilot_conv2d(self): + bs, in_chans, out_chans = 1,12,32 + torch.manual_seed(0) + torch_dat = torch.rand(bs, 64, 128, 12, dtype=torch_dt).to(torch_device) + torch_conv = torch.nn.Conv2d(in_chans, out_chans, 3, bias=None, padding=1, dtype=torch_dt).to(torch_device) + + tiny_dat = Tensor(torch_dat.cpu().numpy()) + tiny_conv = Conv2d(in_chans, out_chans, 3, bias=None, padding=1) + tiny_conv.weight = Tensor(torch_conv.weight.detach().cpu().numpy()) + + def f1(torch_dat): return torch_conv(torch_dat.permute(0,3,1,2)) + def f2(tiny_dat): return tiny_conv(tiny_dat.permute(0,3,1,2)).realize() + helper_test_generic(f"conv bs:{bs:3d} chans:{in_chans:3d} -> {out_chans:3d} k:3", f1, (torch_dat,), TinyJit(f2), (tiny_dat,)) + + def test_conv2d(self): + for bs in [32]: + for in_chans in IN_CHANS: + for out_chans in [32]: + helper_test_conv(bs, in_chans, out_chans, 3, 34, 34) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_symbolic_jit.py b/tinygrad_repo/test/test_symbolic_jit.py new file mode 100644 index 0000000..f2f0fd8 --- /dev/null +++ b/tinygrad_repo/test/test_symbolic_jit.py @@ -0,0 +1,181 @@ +import unittest +from tinygrad.jit import TinyJit +from tinygrad.helpers import getenv +from tinygrad.shape.symbolic import Variable +from tinygrad.tensor import Tensor, Device +import numpy as np + +@unittest.skipIf(getenv("ARM64") or getenv("PTX"), "ARM64 and PTX are not supported") +@unittest.skipUnless(Device.DEFAULT in ["GPU", "METAL", "CLANG", "CUDA", "LLVM"], f"{Device.DEFAULT} is not supported") +class TestSymbolicJit(unittest.TestCase): + def test_plus1(self): + def f(a): return (a+1).realize() + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + symbolic = jf(a.reshape(3, vi)).reshape(3, i).numpy() + expected = f(a).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_reshape_inside_plus1(self): + def f(a, jit=False, jit_ctx=None): + if jit: a = a.reshape(3, Variable("i", 1, 10).bind(a.shape[1])) + return (a+1).realize() + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10) + a = Tensor.rand(3, i) + symbolic = jf(a, jit=True, jit_ctx={vi: i}).reshape(3, i).numpy() + expected = f(a).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_add(self): + def f(a, b): return (a+b).realize() + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + b = Tensor.rand(3, i) + symbolic = jf(a.reshape(3, vi), b.reshape(3, vi)).reshape(3, i).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_matmul(self): + def f(a, b): return (a@b).realize() + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + b = Tensor.rand(i, 5) + symbolic = jf(a.reshape(3, vi), b.reshape(vi, 5)).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_mixed_with_no_symbol_kernel(self): + def f(a, b): + s = (a@b).realize() + s = (s+s).realize() # this one does not have symbols in input + return s + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + b = Tensor.rand(i, 5) + symbolic = jf(a.reshape(3, vi), b.reshape(vi, 5)).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 2 + + def test_attention(self): + def f(q, k, v): return Tensor.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)).realize() + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + q = Tensor.rand(2, 1, 4, 8) + k = Tensor.rand(2, i, 4, 8) + v = Tensor.rand(2, i, 4, 8) + symbolic = jf(q, k.reshape(2, vi, 4, 8), v.reshape(2, vi, 4, 8)).reshape(2, 4, 1, 8).numpy() + expected = f(q, k, v).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 6 + + def test_cat_dim0(self): + def f(a, b): return a.cat(b, dim=0).realize() + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(i, 3) + b = Tensor.rand(2, 3) + symbolic = jf(a.reshape(vi, 3), b).reshape(i+2, 3).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_cat_dim1(self): + def f(a, b): return a.cat(b, dim=1).realize() + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + b = Tensor.rand(3, 2) + symbolic = jf(a.reshape(3, vi), b).reshape(3, i+2).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_cat_dim0_two_vars(self): + def f(a, b): return a.cat(b, dim=0).realize() + jf = TinyJit(f) + for i in range(1, 5): + for j in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + vj = Variable("j", 1, 10).bind(j) + a = Tensor.rand(i, 3) + b = Tensor.rand(j, 3) + symbolic = jf(a.reshape(vi, 3), b.reshape(vj, 3)).reshape(i+j, 3).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_cat_dim1_two_vars(self): + def f(a, b): return a.cat(b, dim=1).realize() + jf = TinyJit(f) + for i in range(1, 5): + for j in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + vj = Variable("j", 1, 10).bind(j) + a = Tensor.rand(3, i) + b = Tensor.rand(3, j) + symbolic = jf(a.reshape(3, vi), b.reshape(3, vj)).reshape(3, i+j).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_two_vars_plus1(self): + def f(a, b): return (a@b+1).realize() + jf = TinyJit(f) + for i in range(1, 5): + for j in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + vj = Variable("j", 1, 10).bind(j) + a = Tensor.rand(i, 3) + b = Tensor.rand(3, j) + symbolic = jf(a.reshape(vi, 3), b.reshape(3, vj)).reshape(i, j).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + + def test_jit_symbolic_shape_mismatch(self): + @TinyJit + def add(a, b): return (a+b).realize() + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i).reshape(3, vi) + b = Tensor.rand(3, i).reshape(3, vi) + c = add(a, b) + vi2 = Variable("i", 1, 10).bind(7) + a = Tensor.rand(3, 7).reshape(3, vi2) + bad = Tensor.rand(4, 7).reshape(4, vi2) + with self.assertRaises(AssertionError): + add(a, bad) + + def test_shrink(self): + # shrink is a movement, so we pair it with a simple function to test the JIT interaction + def f(a): return (a+1).realize() + jf = TinyJit(f) + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(7, 11) + symbolic = a.shrink(((3,5),(vi,vi+2))) + symbolic = jf(symbolic).numpy() + expected = f(a.shrink(((3,5),(i,i+2)))).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + assert len(jf.jit_cache) == 1 + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/test_symbolic_ops.py b/tinygrad_repo/test/test_symbolic_ops.py new file mode 100644 index 0000000..c446899 --- /dev/null +++ b/tinygrad_repo/test/test_symbolic_ops.py @@ -0,0 +1,124 @@ +import unittest +from tinygrad.jit import JIT_SUPPORTED_DEVICE +from tinygrad.shape.symbolic import Variable +from tinygrad.helpers import getenv +from tinygrad.tensor import Tensor, Device +import numpy as np + +@unittest.skipIf(getenv("ARM64") or getenv("PTX"), "ARM64 and PTX are not supported") +@unittest.skipUnless(Device.DEFAULT in JIT_SUPPORTED_DEVICE and Device.DEFAULT not in ["HIP", "WEBGPU"], f"{Device.DEFAULT} is not supported") +class TestSymbolicOps(unittest.TestCase): + def test_plus1(self): + def f(a): return (a+1).realize() + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + symbolic = f(a.reshape(3, vi)).reshape(3, i).numpy() + expected = f(a).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_add(self): + def f(a, b): return (a+b).realize() + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + b = Tensor.rand(3, i) + symbolic = f(a.reshape(3, vi), b.reshape(3, vi)).reshape(3, i).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_matmul(self): + def f(a, b): return (a@b).realize() + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + b = Tensor.rand(i, 5) + symbolic = f(a.reshape(3, vi), b.reshape(vi, 5)).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_attention(self, dropout_p=0.0): + def f(q, k, v): return Tensor.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), dropout_p=dropout_p).realize() + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + q = Tensor.rand(2, 1, 4, 8) + k = Tensor.rand(2, i, 4, 8) + v = Tensor.rand(2, i, 4, 8) + symbolic = f(q, k.reshape(2, vi, 4, 8), v.reshape(2, vi, 4, 8)).reshape(2, 4, 1, 8).numpy() + expected = f(q, k, v).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_attention_training(self): + with Tensor.train(): + self.test_attention(dropout_p=0.0) + with self.assertRaises(AssertionError): + # symbolic shape dropout is not supported + self.test_attention(dropout_p=0.5) + + def test_cat_dim0(self): + def f(a, b): return a.cat(b, dim=0).realize() + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(i, 3) + b = Tensor.rand(2, 3) + symbolic = f(a.reshape(vi, 3), b).reshape(i+2, 3).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_cat_dim1(self): + def f(a, b): return a.cat(b, dim=1).realize() + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(3, i) + b = Tensor.rand(3, 2) + symbolic = f(a.reshape(3, vi), b).reshape(3, i+2).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_cat_dim0_two_vars(self): + def f(a, b): return a.cat(b, dim=0).realize() + for i in range(1, 5): + for j in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + vj = Variable("j", 1, 10).bind(j) + a = Tensor.rand(i, 3) + b = Tensor.rand(j, 3) + symbolic = f(a.reshape(vi, 3), b.reshape(vj, 3)).reshape(i+j, 3).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_cat_dim1_two_vars(self): + def f(a, b): return a.cat(b, dim=1).realize() + for i in range(1, 5): + for j in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + vj = Variable("j", 1, 10).bind(j) + a = Tensor.rand(3, i) + b = Tensor.rand(3, j) + symbolic = f(a.reshape(3, vi), b.reshape(3, vj)).reshape(3, i+j).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_two_vars_plus1(self): + def f(a, b): return (a@b+1).realize() + for i in range(1, 5): + for j in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + vj = Variable("j", 1, 10).bind(j) + a = Tensor.rand(i, 3) + b = Tensor.rand(3, j) + symbolic = f(a.reshape(vi, 3), b.reshape(3, vj)).reshape(i, j).numpy() + expected = f(a, b).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + + def test_shrink(self): + for i in range(1, 5): + vi = Variable("i", 1, 10).bind(i) + a = Tensor.rand(7, 11) + symbolic = a.shrink(((3,5),(vi,vi+2))) + symbolic = symbolic.numpy() + expected = a.shrink(((3,5),(i,i+2))).numpy() + np.testing.assert_allclose(symbolic, expected, atol=1e-6, rtol=1e-6) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/test_symbolic_shapetracker.py b/tinygrad_repo/test/test_symbolic_shapetracker.py new file mode 100644 index 0000000..1e0b4fe --- /dev/null +++ b/tinygrad_repo/test/test_symbolic_shapetracker.py @@ -0,0 +1,173 @@ +import unittest +from tinygrad.shape.shapetracker import ShapeTracker, View +from tinygrad.shape.symbolic import Variable +from tinygrad.tensor import Tensor + +class TestSymbolic(unittest.TestCase): + def test_symbolic_st(self): + x = Variable("x", 1, 100) + st = ShapeTracker.from_shape((x, 3)) + assert st.shape == (x, 3) + assert st.real_strides() == (3, 1) + + def test_expr_idxs(self): + x = Variable("x", 1, 100) + st = ShapeTracker.from_shape((x, 3)) + idxs = [Variable("x", 0, 100), Variable("y", 0, 100)] + e1, e2 = st.expr_idxs(idxs) + assert e1.render() == "((x*3)+y)" + assert e2.render() == "1" + st = st.permute((1, 0)) + e1, e2 = st.expr_idxs(idxs) + assert e1.render() == "((y*3)+x)" + assert e2.render() == "1" + + def test_cat_dim0_strides(self): + i = Variable("i", 1, 5).bind(3) + j = Variable("j", 1, 5).bind(3) + k = Variable("k", 1, 5).bind(3) + t = Tensor.rand(3, 4).reshape(i, 4).cat(Tensor.rand(3, 4).reshape(j, 4), dim=0).cat(Tensor.rand(3, 4).reshape(k, 4), dim=0) + st = t.lazydata.st + assert st.shape == (i+j+k, 4) + assert st.real_strides() == (4, 1) + t = Tensor.rand(3, 3).reshape(i, 3).cat(Tensor.rand(3, 3).reshape(i, 3), dim=0).cat(Tensor.rand(3, 3), dim=0) + st = t.lazydata.st + assert st.shape == (2*i+3, 3) + assert st.real_strides() == (3, 1) + + def test_cat_dim1_strides(self): + i = Variable("i", 1, 5).bind(4) + j = Variable("j", 1, 5).bind(4) + k = Variable("k", 1, 5).bind(4) + t = Tensor.rand(3, 4).reshape(3, i).cat(Tensor.rand(3, 4).reshape(3, j), dim=1).cat(Tensor.rand(3, 4).reshape(3, k), dim=1) + st = t.lazydata.st + assert st.shape == (3, i+j+k) + assert st.real_strides() == (i+j+k, 1) + +class TestSymbolicVarVals(unittest.TestCase): + def test_var_vals_empty(self): + assert ShapeTracker.from_shape((3, 4, 5)).var_vals == {} + + def test_var_vals_shape(self): + x = Variable("x", 1, 100).bind(3) + assert ShapeTracker.from_shape((x, 3)).var_vals == {Variable("x", 1, 100): 3} + + def test_var_vals_offset(self): + x = Variable("x", 1, 100).bind(3) + st = ShapeTracker.from_shape((4, 3)).shrink(((x, x+1), (0, 3))) + assert st.real_offset() == x * 3 + assert st.var_vals == {Variable("x", 1, 100): 3} + + def test_var_vals_mask(self): + x = Variable("x", 1, 100).bind(3) + view = View.create(shape=(3,4), strides=(4,1), offset=0, mask=((0, x), (0, 4))) + st = ShapeTracker(views=(view,)) + assert st.var_vals == {Variable("x", 1, 100): 3} + + def test_var_vals_complex(self): + x = Variable("x", 1, 100).bind(3) + y = Variable("y", 1, 100).bind(4) + z = Variable("z", 1, 100).bind(5) + st = ShapeTracker.from_shape((x, 5, y)).shrink(((0, x), (z, z+1), (0, 3))) + assert st.real_offset() == y * z + assert st.var_vals == {Variable("x", 1, 100): 3, Variable("y", 1, 100):4, Variable("z", 1, 100): 5} + + def test_shrink_reshape(self): + x = Variable("x", 1, 100).bind(3) + st = ShapeTracker.from_shape((10, 10, 10)).shrink(((x, x+3), (3, 7), (2, 5))) + st = st.reshape((3*4*3,)) + assert st.var_vals == {Variable("x", 1, 100): 3} + +class TestShapeTrackerUnbind(unittest.TestCase): + def test_view_unbind(self): + v = Variable("v", 1, 100) + bv = Variable("v", 1, 100).bind(3) + assert View.create(shape=(bv, 4)).unbind() == View.create(shape=(v, 4)) + + def test_reshape_unbind(self): + v = Variable("v", 1, 100) + bv = Variable("v", 1, 100).bind(3) + t = Tensor.rand(3, 4).reshape(bv, 4) + assert t.lazydata.st.unbind() == ShapeTracker((View.create(shape=(v, 4)),)) + + def test_shrink_unbind(self): + v = Variable("v", 1, 100) + bv = Variable("v", 1, 100).bind(2) + t = Tensor.rand(3, 4).shrink(((bv, bv+1), (0, 4))) + assert t.lazydata.st.unbind() == ShapeTracker((View.create(shape=(1, 4), offset=4*v),)) + +class TestSymbolicReshape(unittest.TestCase): + def test_reshape_into_symbols_simple(self): + for i in range(1, 6): + vi = Variable("i", 1, 5).bind(i) + t = Tensor.rand(i, 4).reshape(vi, 4) + assert t.shape == (vi, 4) + t = Tensor.rand(i, 6).reshape(vi, 2, 3) + assert t.shape == (vi, 2, 3) + + def test_reshape_symbols_reshape_ints(self): + for i in range(1, 6): + vi = Variable("i", 1, 5).bind(i) + t = Tensor.rand(i, 4).reshape(vi, 4) + assert t.shape == (vi, 4) + t = t.reshape(i, 4) + assert t.shape == (i, 4) + + def test_reshape_into_symbols_bad_shape(self): + vi = Variable("i", 1, 10).bind(4) + with self.assertRaises(AssertionError): + t = Tensor.rand(4, 6).reshape(vi, 6).reshape(1, 77) # reshape to a different size new shape through symbolic shape + with self.assertRaises(AssertionError): + t = Tensor.rand(3, 4).reshape(3, (vi+1)) # reshape into non-Variable Node + + def test_two_symbol_reshape(self): + for i in range(1, 6): + for j in range(1, 6): + vi = Variable("i", 1, 5).bind(i) + vj = Variable("j", 1, 5).bind(j) + t = Tensor.rand(i, j).reshape(vi, vj) + assert t.shape == (vi, vj) + # NOTE: this is currently not allowed + # t = t.reshape(1, vi*vj) + # assert t.shape == (1, vi*vj) + t = t.reshape(vj, vi) + assert t.shape == (vj, vi) + +class TestSymbolicExpand(unittest.TestCase): + def test_expand_into_symbols(self): + # TODO: enfore expand only into bound variables + vi = Variable("i", 1, 5) + vj = Variable("j", 1, 5) + a = Tensor([[1], [2], [3]]).expand((3, vi)) + assert a.shape == (3, vi) + a = a.reshape(3, vi, 1).expand((3, vi, vj)) + assert a.shape == (3, vi, vj) + + def test_plus_expands_constant(self): + for i in range(1, 6): + vi = Variable("i", 1, 5).bind(i) + a = Tensor.rand(3, i).reshape(3, vi) + a = a + 1 + assert a.shape == (3, vi) + +class TestSymbolicShrink(unittest.TestCase): + def test_shrink_symbols(self): + vi = Variable("i", 1, 5) + t = Tensor.rand(3, 5).shrink(((0, 2), (vi, vi+1))) + assert t.shape == (2, 1) + +class TestSymbolicShapeExpr(unittest.TestCase): + def test_symbolic_expr_idxs(self): + # taken from symbolic shape llama + i = Variable("i", 1, 120) + gidx0 = Variable("gidx0", 0, i) + lidx1 = Variable("lidx1", 0, 7) + idx = (gidx0, lidx1, Variable.num(1)) + shape = (i+1, 8, 4) + strides = (1, (i*4)+4, i+1) + st = ShapeTracker((View.create(shape, strides), )) + idx, valid = st.expr_idxs(idx) + assert idx.render() == "((lidx1*((i*4)+4))+1+gidx0+i)" + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/test_tensor.py b/tinygrad_repo/test/test_tensor.py new file mode 100644 index 0000000..bb81e02 --- /dev/null +++ b/tinygrad_repo/test/test_tensor.py @@ -0,0 +1,266 @@ +import numpy as np +import torch +import struct +import unittest, copy +import mmap +from tinygrad.tensor import Tensor, Device +from tinygrad.helpers import dtypes +from extra.gradcheck import numerical_jacobian, jacobian, gradcheck +from extra.utils import temp + +x_init = np.random.randn(1,3).astype(np.float32) +U_init = np.random.randn(3,3).astype(np.float32) +V_init = np.random.randn(3,3).astype(np.float32) +W_init = np.random.randn(3,3).astype(np.float32) +m_init = np.random.randn(1,3).astype(np.float32) + +class TestTinygrad(unittest.TestCase): + def test_zerodim_initialization(self): + a = Tensor(55) + b = Tensor(3.14) + + self.assertEqual(a.shape, ()) + self.assertEqual(b.shape, ()) + + def test_plus_equals(self): + a = Tensor.randn(10,10) + b = Tensor.randn(10,10) + c = a + b + val1 = c.numpy() + a += b + val2 = a.numpy() + np.testing.assert_allclose(val1, val2) + + def test_backward_pass(self): + def test_tinygrad(): + x = Tensor(x_init, requires_grad=True) + W = Tensor(W_init, requires_grad=True) + m = Tensor(m_init) + out = x.dot(W).relu() + out = out.log_softmax() + out = out.mul(m).add(m).sum() + out.backward() + return out.numpy(), x.grad.numpy(), W.grad.numpy() + + def test_pytorch(): + x = torch.tensor(x_init, requires_grad=True) + W = torch.tensor(W_init, requires_grad=True) + m = torch.tensor(m_init) + out = x.matmul(W).relu() + out = torch.nn.functional.log_softmax(out, dim=1) + out = out.mul(m).add(m).sum() + out.backward() + return out.detach().numpy(), x.grad, W.grad + + for x,y in zip(test_tinygrad(), test_pytorch()): + np.testing.assert_allclose(x, y, atol=1e-5) + + @unittest.skipIf(Device.DEFAULT == "WEBGPU", "this test uses more than 8 bufs which breaks webgpu") #TODO: remove after #1461 + def test_backward_pass_diamond_model(self): + def test_tinygrad(): + u = Tensor(U_init, requires_grad=True) + v = Tensor(V_init, requires_grad=True) + w = Tensor(W_init, requires_grad=True) + x = u.mul(v).relu() + y = u.mul(w).relu() + out = x.add(y).mul(y).relu() + out = out.log_softmax() + out = out.sum() + out.backward() + return out.numpy(), u.grad.numpy(), v.grad.numpy(), w.grad.numpy() + + def test_pytorch(): + u = torch.tensor(U_init, requires_grad=True) + v = torch.tensor(V_init, requires_grad=True) + w = torch.tensor(W_init, requires_grad=True) + x = u.mul(v).relu() + y = u.mul(w).relu() + out = x.add(y).mul(y).relu() + out = torch.nn.functional.log_softmax(out, dim=1) + out = out.sum() + out.backward() + return out.detach().numpy(), u.grad, v.grad, w.grad + + for x,y in zip(test_tinygrad(), test_pytorch()): + np.testing.assert_allclose(x, y, atol=1e-5) + + def test_nograd(self): + x = Tensor(x_init, requires_grad=False) + m = Tensor(m_init, requires_grad=False) + W = Tensor(W_init, requires_grad=True) + tmp = x.mul(m) + mm = tmp.matmul(W) + out = mm.relu() + out = out.sum() + out.backward() + assert x.grad is None + assert m.grad is None + assert tmp.grad is None + assert mm.grad is not None + assert W.grad is not None + + def test_dropout(self): + with Tensor.train(): + n, rate = 1_000_000, 0.1 + w = Tensor.ones(n).dropout(rate) + non_zeros = np.count_nonzero(w.numpy()) + expected = n * (1 - rate) + np.testing.assert_allclose(non_zeros, expected, rtol=2e-3) + + def test_jacobian(self): + W = np.random.RandomState(42069).random((10, 5)).astype(np.float32) + x = np.random.RandomState(69420).random((1, 10)).astype(np.float32) + + torch_x = torch.tensor(x, requires_grad=True) + torch_W = torch.tensor(W, requires_grad=True) + torch_func = lambda x: torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1) + PJ = torch.autograd.functional.jacobian(torch_func, torch_x).squeeze().numpy() + + tiny_x = Tensor(x, requires_grad=True) + tiny_W = Tensor(W, requires_grad=True) + tiny_func = lambda x: x.dot(tiny_W).relu().log_softmax() + J = jacobian(tiny_func, tiny_x) + NJ = numerical_jacobian(tiny_func, tiny_x) + + np.testing.assert_allclose(PJ, J, atol = 1e-5) + np.testing.assert_allclose(PJ, NJ, atol = 1e-3) + + def test_gradcheck(self): + W = np.random.RandomState(1337).random((10, 5)).astype(np.float32) + x = np.random.RandomState(7331).random((1, 10)).astype(np.float32) + + tiny_x = Tensor(x, requires_grad=True) + tiny_W = Tensor(W, requires_grad=True) + tiny_func = lambda x: x.dot(tiny_W).relu().log_softmax() + + self.assertTrue(gradcheck(tiny_func, tiny_x, eps = 1e-3)) + + # coarse approx. since a "big" eps and the non-linearities of the model + self.assertFalse(gradcheck(tiny_func, tiny_x, eps = 1e-5)) + + def test_random_fns_are_deterministic_with_seed(self): + for random_fn in [Tensor.randn, Tensor.normal, Tensor.uniform, Tensor.scaled_uniform, Tensor.glorot_uniform, Tensor.kaiming_normal]: + with self.subTest(msg=f"Tensor.{random_fn.__name__}"): + Tensor.manual_seed(1337) + a = random_fn(10,10).realize() + Tensor.manual_seed(1337) + b = random_fn(10,10).realize() + np.testing.assert_allclose(a.numpy(), b.numpy()) + + def test_randn_isnt_inf_on_zero(self): + # simulate failure case of rand handing a zero to randn + original_rand, Tensor.rand = Tensor.rand, Tensor.zeros + try: self.assertNotIn(np.inf, Tensor.randn(16).numpy()) + except: raise + finally: Tensor.rand = original_rand + + def test_zeros_like_has_same_dtype(self): + for datatype in [dtypes.float16, dtypes.float32, dtypes.int8, dtypes.int32, dtypes.int64, dtypes.uint8]: + a = Tensor([1, 2, 3], dtype=datatype) + b = Tensor.zeros_like(a) + assert a.dtype == b.dtype, f"a.dtype and b.dtype should be {datatype}" + assert a.shape == b.shape, f"shape mismatch (Tensor.zeros_like){a.shape} != (torch){b.shape}" + + a = Tensor([1, 2, 3]) + b = Tensor.zeros_like(a, dtype=dtypes.int8) + assert a.dtype != b.dtype and a.dtype == dtypes.float32 and b.dtype == dtypes.int8, "a.dtype should be float and b.dtype should be char" + assert a.shape == b.shape, f"shape mismatch (Tensor.zeros_like){a.shape} != (torch){b.shape}" + + def test_ones_like_has_same_dtype_and_shape(self): + for datatype in [dtypes.float16, dtypes.float32, dtypes.int8, dtypes.int32, dtypes.int64, dtypes.uint8]: + a = Tensor([1, 2, 3], dtype=datatype) + b = Tensor.ones_like(a) + assert a.dtype == b.dtype, f"a.dtype and b.dtype should be {datatype}" + assert a.shape == b.shape, f"shape mismatch (Tensor.ones_like){a.shape} != (torch){b.shape}" + + a = Tensor([1, 2, 3]) + b = Tensor.ones_like(a, dtype=dtypes.int8) + assert a.dtype != b.dtype and a.dtype == dtypes.float32 and b.dtype == dtypes.int8, "a.dtype should be float and b.dtype should be char" + assert a.shape == b.shape, f"shape mismatch (Tensor.ones_like){a.shape} != (torch){b.shape}" + + def test_ndim(self): + assert Tensor.randn(1).ndim == 1 + assert Tensor.randn(2,2,2).ndim == 3 + assert Tensor.randn(1,1,1,1,1,1).ndim == 6 + + def test_argfix(self): + self.assertEqual(Tensor.zeros().shape, ()) + self.assertEqual(Tensor.ones().shape, ()) + + self.assertEqual(Tensor.zeros([]).shape, ()) + self.assertEqual(Tensor.ones([]).shape, ()) + + self.assertEqual(Tensor.zeros(tuple()).shape, ()) + self.assertEqual(Tensor.ones(tuple()).shape, ()) + + self.assertEqual(Tensor.zeros(1).shape, (1,)) + self.assertEqual(Tensor.ones(1).shape, (1,)) + + self.assertEqual(Tensor.zeros(1,10,20).shape, (1,10,20)) + self.assertEqual(Tensor.ones(1,10,20).shape, (1,10,20)) + + self.assertEqual(Tensor.zeros([1]).shape, (1,)) + self.assertEqual(Tensor.ones([1]).shape, (1,)) + + self.assertEqual(Tensor.zeros([10,20,40]).shape, (10,20,40)) + self.assertEqual(Tensor.ones([10,20,40]).shape, (10,20,40)) + + def test_numel(self): + assert Tensor.randn(10, 10).numel() == 100 + assert Tensor.randn(1,2,5).numel() == 10 + assert Tensor.randn(1,1,1,1,1,1).numel() == 1 + assert Tensor([]).numel() == 0 + # assert Tensor.randn(1,0,2,5) == 0 # TODO: fix empty tensors + + def test_element_size(self): + for _, dtype in dtypes.fields().items(): + assert dtype.itemsize == Tensor.randn(3, dtype=dtype).element_size(), f"Tensor.element_size() not matching Tensor.dtype.itemsize for {dtype}" + + def test_deepwalk_ctx_check(self): + layer = Tensor.uniform(1, 1, requires_grad=True) + x = Tensor.randn(1, 1, 1) + x.dot(layer).mean().backward() + x = Tensor.randn(1, 1, 1) + x.dot(layer).mean().backward() + + def test_zerosized_tensors(self): + Tensor([]).realize() + Tensor([]).numpy() + + def test_tensor_ndarray_dtype(self): + arr = np.array([1]) # where dtype is implicitly int64 + assert Tensor(arr).dtype == dtypes.int64 + assert Tensor(arr, dtype=dtypes.float32).dtype == dtypes.float32 # check if ndarray correctly casts to Tensor dtype + assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64 # check that it works for something else + + def test_tensor_list_dtype(self): + arr = [1] + assert Tensor(arr).dtype == Tensor.default_type + assert Tensor(arr, dtype=dtypes.float32).dtype == dtypes.float32 + assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64 + + def test_tensor_copy(self): + x = copy.deepcopy(Tensor.ones((3,3,3))) + np.testing.assert_allclose(x.numpy(), np.ones((3,3,3))) + + def test_copy_from_disk(self): + t = Tensor.randn(30, device="CPU").to(f"disk:{temp('test_copy_from_disk')}") + a = t[10:20] + dev = a.to(Device.DEFAULT) + np.testing.assert_allclose(a.numpy(), dev.numpy()) + + # Regression test for https://github.com/tinygrad/tinygrad/issues/1751 + def test_copy_from_numpy_unaligned(self): + # 2**15 is the minimum for repro + arr = np.random.randn(2**15).astype(dtypes.float.np) + fn = temp('test_copy_from_numpy_unaligned') + with open(fn, 'wb') as f: f.write(b't' + arr.tobytes()) + with open(fn, "a+b") as f: memview = memoryview(mmap.mmap(f.fileno(), arr.nbytes + 1)) + ua_arr = np.frombuffer(memview[1:], dtype=arr.dtype, count=arr.shape[0]) + np.testing.assert_allclose(arr, ua_arr) + assert not ua_arr.flags.aligned + # force device copy - to() is opt'd away - Tensor(dev)/1 is ignored + np.testing.assert_allclose(ua_arr, (Tensor(ua_arr)/Tensor(1)).numpy()) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/test_uops.py b/tinygrad_repo/test/test_uops.py new file mode 100644 index 0000000..a18523b --- /dev/null +++ b/tinygrad_repo/test/test_uops.py @@ -0,0 +1,99 @@ +from typing import Optional, Tuple, Any, List +import unittest, math +import numpy as np +from tinygrad.helpers import dtypes, getenv, DType, PtrDType +from tinygrad.tensor import Device +from tinygrad.ops import UnaryOps, BinaryOps, TernaryOps, ASTRunner, Compiled +from tinygrad.codegen.linearizer import UOps, UOp + +def _uops_to_prg(uops): + src, runtime_args = Device[Device.DEFAULT].renderer("test", uops) + return ASTRunner("test", src, + [1] if Device[Device.DEFAULT].linearizer_opts.has_local else None, [1] if Device[Device.DEFAULT].linearizer_opts.has_local else None, + runtime_args=runtime_args).build(Device[Device.DEFAULT].compiler, Device[Device.DEFAULT].runtime) + +def uop(uops:List[UOp], uop:UOps, dtype:Optional[DType], vin:Tuple[UOp, ...], arg:Any=None) -> UOp: + uops.append(UOp(uop, dtype, tuple(vin), arg, len(uops))) + return uops[-1] + +def _test_single_value(vals, op, dtype): + uops = [] + buf_store = uop(uops, UOps.DEFINE_GLOBAL, PtrDType(dtype), (), ('data0', dtype)) + buf_loads = [uop(uops, UOps.DEFINE_GLOBAL, PtrDType(dtype), (), (f'data{i+1}', dtype)) for i in range(len(vals))] + loads = (uop(uops, UOps.LOAD, dtype, [buf_loads[i], uop(uops, UOps.CONST, dtypes.int32, (), 0)]) for i in range(len(vals))) + alu = uop(uops, UOps.ALU, dtype, loads, op) + uop(uops, UOps.STORE, None, (buf_store, uop(uops, UOps.CONST, dtypes.int32, (), 0), alu)) + buf = Device[Device.DEFAULT].buffer(1, dtype) + buf2 = [Device[Device.DEFAULT].buffer.fromCPU(np.array([a], dtype=dtype.np)) for a in vals] + prg = _uops_to_prg(uops) + prg([buf]+buf2) + return buf.toCPU()[0] + +def _test_single_value_const(vals, op, dtype): + uops = [] + buf_store = uop(uops, UOps.DEFINE_GLOBAL, PtrDType(dtype), (), ('data0', dtype)) + loads = (uop(uops, UOps.CONST, dtype, [], a) for a in vals) + alu = uop(uops, UOps.ALU, dtype, loads, op) + uop(uops, UOps.STORE, None, (buf_store, uop(uops, UOps.CONST, dtypes.int32, (), 0), alu)) + buf = Device[Device.DEFAULT].buffer(1, dtype) + prg = _uops_to_prg(uops) + prg([buf]) + return buf.toCPU()[0] + +class TestUOps(unittest.TestCase): + def _equal(self, v1, v2): + if not (math.isnan(v1) and math.isnan(v2)): self.assertAlmostEqual(v1, v2, places=5) + + def _test_uop_fxn(self, bop, fxn, dt=dtypes.float32): + for f in [_test_single_value, _test_single_value_const]: + for a in [-2.0, 0.0, 1.0]: + self._equal(f([a], bop, dt), fxn(a)) + + def _test_bop_fxn(self, bop, fxn, dt=dtypes.float32, no_b_zero=False): + for f in [_test_single_value, _test_single_value_const]: + for a in [-2.0, 0.0, 1.0]: + for b in [-3.0, 1.0] + ([] if no_b_zero else [0.0]): + self._equal(f([a,b], bop, dt), fxn(a,b)) + + def _test_top_fxn(self, bop, fxn, dt=dtypes.float32): + for f in [_test_single_value, _test_single_value_const]: + for a in [-2.0, 0, 1]: + for b in [-3.0, 3.0]: + for c in [-4.0, 4.0]: + self._equal(f([a,b,c], bop, dt), fxn(a,b,c)) + +@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "only test for compiled backends") +class TestFloatUOps(TestUOps): + def test_neg(self): self._test_uop_fxn(UnaryOps.NEG, lambda a: -a) + def test_exp2(self): self._test_uop_fxn(UnaryOps.EXP2, lambda a: np.exp2(a)) + def test_log2(self): self._test_uop_fxn(UnaryOps.LOG2, lambda a: math.log2(a) if a > 0 else float('-inf' if a==0 else 'nan')) + def test_sin(self): self._test_uop_fxn(UnaryOps.SIN, lambda a: math.sin(a)) + def test_sqrt(self): self._test_uop_fxn(UnaryOps.SQRT, lambda a: math.sqrt(a) if a >= 0 else float('nan')) + # this is not on most backends + #def test_recip(self): self._test_uop_fxn(UnaryOps.RECIP, lambda a: 1.0/a if a != 0 else float('inf')) + + def test_add(self): self._test_bop_fxn(BinaryOps.ADD, lambda a,b: a+b) + def test_sub(self): self._test_bop_fxn(BinaryOps.SUB, lambda a,b: a-b) + def test_mul(self): self._test_bop_fxn(BinaryOps.MUL, lambda a,b: a*b) + def test_div(self): self._test_bop_fxn(BinaryOps.DIV, lambda a,b: a/b if b != 0 else a*float('inf')) + def test_max(self): self._test_bop_fxn(BinaryOps.MAX, lambda a,b: max(a,b)) + def test_cmplt(self): self._test_bop_fxn(BinaryOps.CMPLT, lambda a,b: float(a setTimeout(resolve, time)); +} + +function cleanup(err) { + res.kill(); + if(err != null) { + console.error(err); + process.exit(1); + } +} + +async function waitForText(selector, text) { + let n = 0; + let ready = false; + while (n < 10) { + const res = await (await selector.getProperty("textContent")).jsonValue(); + console.log(`waiting for text ${text} got ${res}`); + if(res == text) { + ready = true; + break + } + await timeout(2000); + n += 1 + } + return ready; +} + +puppeteer.launch({ headless: false, args: ["--enable-unsafe-webgpu"]}).then(async browser => { + const page = await browser.newPage(); + page.on("console", message => console.log(`message from console ${message.text()}`)) + .on("pageerror", ({ message }) => console.log(`error from page ${message}`)) + + const res = await page.goto("http://localhost:8000/examples/index.html"); + if(res.status() != 200) throw new Error("Failed to load page"); + const textSelector = await page.waitForSelector("#result"); + const buttonSelector = await page.waitForSelector("input[type=button]"); + const ready = await waitForText(textSelector, "ready"); + if(!ready) throw new Error("Failed to load page"); + await buttonSelector.evaluate(e => e.click()); + const done = await waitForText(textSelector, "hen"); + if(!done) throw new Error("failed to get hen"); + browser.close(); + cleanup(null); +}).catch(err => { + cleanup(err); +}); \ No newline at end of file diff --git a/tinygrad_repo/test/test_winograd.py b/tinygrad_repo/test/test_winograd.py new file mode 100644 index 0000000..297bff8 --- /dev/null +++ b/tinygrad_repo/test/test_winograd.py @@ -0,0 +1,40 @@ +import unittest +from tinygrad.helpers import Timing, CI +from tinygrad.tensor import Tensor +from tinygrad.ops import LoadOps +from tinygrad.codegen.linearizer import Linearizer +from test.test_net_speed import start_profile, stop_profile + +class TestWinograd(unittest.TestCase): + def setUp(self): + self.old = Tensor.wino + Tensor.wino = 1 + def tearDown(self): Tensor.wino = self.old + + def test_speed(self): + x = Tensor.empty(1,4,9,9) + w = Tensor.empty(4,4,3,3) + + with Timing("running conv: "): + out = Tensor.conv2d(x, w) + + with Timing("scheduling: "): + sched = out.lazydata.schedule() + + for i,s in enumerate(sched): + if s.ast.op in LoadOps: continue + ops = s.ast.get_lazyops() + with Timing(f"linearize {i} with {len(ops):4d} ops: "): + l = Linearizer(s.ast) + l.hand_coded_optimizations() + l.linearize() + + def test_profile(self): + x,w = Tensor.rand(1,4,9,9).realize(), Tensor.rand(4,4,3,3).realize() + if not CI: pr = start_profile() + out = Tensor.conv2d(x,w).realize() + if not CI: stop_profile(pr, sort='time') + out.numpy() + +if __name__ == '__main__': + unittest.main(verbosity=2) \ No newline at end of file diff --git a/tinygrad_repo/test/unit/test_disk_cache.py b/tinygrad_repo/test/unit/test_disk_cache.py new file mode 100644 index 0000000..29b9b52 --- /dev/null +++ b/tinygrad_repo/test/unit/test_disk_cache.py @@ -0,0 +1,66 @@ +import unittest +import pickle +from tinygrad.helpers import diskcache_get, diskcache_put + +def remote_get(table,q,k): q.put(diskcache_get(table, k)) +def remote_put(table,k,v): diskcache_put(table, k, v) + +class DiskCache(unittest.TestCase): + def test_putget(self): + table = "test_putget" + diskcache_put(table, "hello", "world") + self.assertEqual(diskcache_get(table, "hello"), "world") + diskcache_put(table, "hello", "world2") + self.assertEqual(diskcache_get(table, "hello"), "world2") + + def test_putcomplex(self): + table = "test_putcomplex" + diskcache_put(table, "k", ("complex", 123, "object")) + ret = diskcache_get(table, "k") + self.assertEqual(ret, ("complex", 123, "object")) + + def test_getotherprocess(self): + table = "test_getotherprocess" + from multiprocessing import Process, Queue + diskcache_put(table, "k", "getme") + q = Queue() + p = Process(target=remote_get, args=(table,q,"k")) + p.start() + p.join() + self.assertEqual(q.get(), "getme") + + def test_putotherprocess(self): + table = "test_putotherprocess" + from multiprocessing import Process + p = Process(target=remote_put, args=(table,"k", "remote")) + p.start() + p.join() + self.assertEqual(diskcache_get(table, "k"), "remote") + + def test_no_table(self): + self.assertIsNone(diskcache_get("faketable", "k")) + + def test_ret(self): + table = "test_ret" + self.assertEqual(diskcache_put(table, "key", ("vvs",)), ("vvs",)) + + def test_non_str_key(self): + table = "test_non_str_key" + diskcache_put(table, 4, 5) + self.assertEqual(diskcache_get(table, 4), 5) + self.assertEqual(diskcache_get(table, "4"), 5) + + def test_dict_key(self): + table = "test_dict_key" + fancy_key = {"hello": "world", "goodbye": 7, "good": True, "pkl": pickle.dumps("cat")} + fancy_key2 = {"hello": "world", "goodbye": 8, "good": True, "pkl": pickle.dumps("cat")} + fancy_key3 = {"hello": "world", "goodbye": 8, "good": True, "pkl": pickle.dumps("dog")} + diskcache_put(table, fancy_key, 5) + self.assertEqual(diskcache_get(table, fancy_key), 5) + diskcache_put(table, fancy_key2, 8) + self.assertEqual(diskcache_get(table, fancy_key2), 8) + self.assertEqual(diskcache_get(table, fancy_key), 5) + self.assertEqual(diskcache_get(table, fancy_key3), None) + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/unit/test_disk_tensor.py b/tinygrad_repo/test/unit/test_disk_tensor.py new file mode 100644 index 0000000..d77a31f --- /dev/null +++ b/tinygrad_repo/test/unit/test_disk_tensor.py @@ -0,0 +1,150 @@ +import pathlib +import unittest +import numpy as np +from tinygrad.tensor import Tensor, Device +from tinygrad.nn.state import safe_load, safe_save, get_state_dict, torch_load +from tinygrad.helpers import dtypes +from tinygrad.runtime.ops_disk import RawDiskBuffer +from tinygrad.helpers import Timing +from extra.utils import fetch_as_file, temp + +def compare_weights_both(url): + import torch + fn = fetch_as_file(url) + tg_weights = get_state_dict(torch_load(fn)) + torch_weights = get_state_dict(torch.load(fn), tensor_type=torch.Tensor) + assert list(tg_weights.keys()) == list(torch_weights.keys()) + for k in tg_weights: + np.testing.assert_equal(tg_weights[k].numpy(), torch_weights[k].numpy(), err_msg=f"mismatch at {k}, {tg_weights[k].shape}") + print(f"compared {len(tg_weights)} weights") + +class TestTorchLoad(unittest.TestCase): + # pytorch pkl format + def test_load_enet(self): compare_weights_both("https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth") + # pytorch zip format + def test_load_enet_alt(self): compare_weights_both("https://download.pytorch.org/models/efficientnet_b0_rwightman-3dd342df.pth") + # pytorch zip format + def test_load_convnext(self): compare_weights_both('https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth') + # TODO: support pytorch tar format with minimal lines + #def test_load_resnet(self): compare_weights_both('https://download.pytorch.org/models/resnet50-19c8e357.pth') + +test_fn = pathlib.Path(__file__).parents[2] / "weights/LLaMA/7B/consolidated.00.pth" +#test_size = test_fn.stat().st_size +test_size = 1024*1024*1024*2 + +# sudo su -c 'sync; echo 1 > /proc/sys/vm/drop_caches' && python3 test/unit/test_disk_tensor.py TestRawDiskBuffer.test_readinto_read_speed +@unittest.skipIf(not test_fn.exists(), "download LLaMA weights for read in speed tests") +class TestRawDiskBuffer(unittest.TestCase): + def test_readinto_read_speed(self): + tst = np.empty(test_size, np.uint8) + with open(test_fn, "rb") as f: + with Timing("copy in ", lambda et_ns: f" {test_size/et_ns:.2f} GB/s"): + f.readinto(tst) + + def test_mmap_read_speed(self): + db = RawDiskBuffer(test_size, dtype=dtypes.uint8, device=test_fn) + tst = np.empty(test_size, np.uint8) + with Timing("copy in ", lambda et_ns: f" {test_size/et_ns:.2f} GB/s"): + np.copyto(tst, db.toCPU()) +@unittest.skipIf(Device.DEFAULT == "WEBGPU", "webgpu doesn't support uint8 datatype") +class TestSafetensors(unittest.TestCase): + def test_real_safetensors(self): + import torch + from safetensors.torch import save_file + torch.manual_seed(1337) + tensors = { + "weight1": torch.randn((16, 16)), + "weight2": torch.arange(0, 17, dtype=torch.uint8), + "weight3": torch.arange(0, 17, dtype=torch.int32).reshape(17,1,1), + "weight4": torch.arange(0, 2, dtype=torch.uint8), + } + save_file(tensors, temp("model.safetensors")) + + ret = safe_load(temp("model.safetensors")) + for k,v in tensors.items(): np.testing.assert_array_equal(ret[k].numpy(), v.numpy()) + safe_save(ret, temp("model.safetensors_alt")) + with open(temp("model.safetensors"), "rb") as f: + with open(temp("model.safetensors_alt"), "rb") as g: + assert f.read() == g.read() + ret2 = safe_load(temp("model.safetensors_alt")) + for k,v in tensors.items(): np.testing.assert_array_equal(ret2[k].numpy(), v.numpy()) + + def test_efficientnet_safetensors(self): + from models.efficientnet import EfficientNet + model = EfficientNet(0) + state_dict = get_state_dict(model) + safe_save(state_dict, temp("eff0")) + state_dict_loaded = safe_load(temp("eff0")) + assert sorted(list(state_dict_loaded.keys())) == sorted(list(state_dict.keys())) + for k,v in state_dict.items(): + np.testing.assert_array_equal(v.numpy(), state_dict_loaded[k].numpy()) + + # load with the real safetensors + from safetensors import safe_open + with safe_open(temp("eff0"), framework="pt", device="cpu") as f: + assert sorted(list(f.keys())) == sorted(list(state_dict.keys())) + for k in f.keys(): + np.testing.assert_array_equal(f.get_tensor(k).numpy(), state_dict[k].numpy()) + + def test_huggingface_enet_safetensors(self): + # test a real file + fn = fetch_as_file("https://huggingface.co/timm/mobilenetv3_small_075.lamb_in1k/resolve/main/model.safetensors") + state_dict = safe_load(fn) + assert len(state_dict.keys()) == 244 + assert 'blocks.2.2.se.conv_reduce.weight' in state_dict + assert state_dict['blocks.0.0.bn1.num_batches_tracked'].numpy() == 276570 + assert state_dict['blocks.2.0.bn2.num_batches_tracked'].numpy() == 276570 + + def test_metadata(self): + metadata = {"hello": "world"} + safe_save({}, temp('metadata.safetensors'), metadata) + import struct + with open(temp('metadata.safetensors'), 'rb') as f: + dat = f.read() + sz = struct.unpack(">Q", dat[0:8])[0] + import json + assert json.loads(dat[8:8+sz])['__metadata__']['hello'] == 'world' + +def helper_test_disk_tensor(fn, data, np_fxn, tinygrad_fxn=None): + if tinygrad_fxn is None: tinygrad_fxn = np_fxn + pathlib.Path(temp(fn)).unlink(missing_ok=True) + tinygrad_tensor = Tensor(data, device="CPU").to(f"disk:{temp(fn)}") + numpy_arr = np.array(data) + tinygrad_fxn(tinygrad_tensor) + np_fxn(numpy_arr) + np.testing.assert_allclose(tinygrad_tensor.numpy(), numpy_arr) + +class TestDiskTensor(unittest.TestCase): + def test_empty(self): + pathlib.Path(temp("dt1")).unlink(missing_ok=True) + Tensor.empty(100, 100, device=f"disk:{temp('dt1')}") + + def test_write_ones(self): + pathlib.Path(temp("dt2")).unlink(missing_ok=True) + + out = Tensor.ones(10, 10, device="CPU") + outdisk = out.to(f"disk:{temp('dt2')}") + print(outdisk) + outdisk.realize() + del out, outdisk + + # test file + with open(temp("dt2"), "rb") as f: + assert f.read() == b"\x00\x00\x80\x3F" * 100 + + # test load alt + reloaded = Tensor.empty(10, 10, device=f"disk:{temp('dt2')}") + out = reloaded.numpy() + assert np.all(out == 1.) + + def test_assign_slice(self): + def assign(x,s,y): x[s] = y + helper_test_disk_tensor("dt3", [0,1,2,3], lambda x: assign(x, slice(0,2), [13, 12])) + helper_test_disk_tensor("dt4", [[0,1,2,3],[4,5,6,7]], lambda x: assign(x, slice(0,1), [[13, 12, 11, 10]])) + + def test_reshape(self): + helper_test_disk_tensor("dt5", [1,2,3,4,5], lambda x: x.reshape((1,5))) + helper_test_disk_tensor("dt6", [1,2,3,4], lambda x: x.reshape((2,2))) + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/unit/test_flopcounter.py b/tinygrad_repo/test/unit/test_flopcounter.py new file mode 100644 index 0000000..22f91ee --- /dev/null +++ b/tinygrad_repo/test/unit/test_flopcounter.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +import unittest +from tinygrad.ops import LazyOp, BinaryOps, ReduceOps, get_lazyop_info, BufferOps, MemBuffer +from tinygrad.shape.shapetracker import ShapeTracker +from tinygrad.helpers import dtypes + +class TestFlopCounter(unittest.TestCase): + def setUp(self): + self.buf0 = LazyOp(BufferOps.MEM, (), MemBuffer(1, dtypes.float32, ShapeTracker.from_shape((4,)))) + self.buf1 = LazyOp(BufferOps.MEM, (), MemBuffer(2, dtypes.float32, ShapeTracker.from_shape((4,)))) + + def test_flops_add(self): + op0 = LazyOp(BinaryOps.ADD, (self.buf0,self.buf1,), None) + info = get_lazyop_info(op0) + self.assertEqual(info.flops, 4) + + def test_flops_add_twice(self): + op0 = LazyOp(BinaryOps.ADD, (self.buf0,self.buf1,), None) + op1 = LazyOp(BinaryOps.ADD, (op0,self.buf1,), None) + info = get_lazyop_info(op1) + self.assertEqual(info.flops, 8) + + def test_flops_add_self(self): + op0 = LazyOp(BinaryOps.ADD, (self.buf0,self.buf1,), None) + op1 = LazyOp(BinaryOps.ADD, (op0,op0,), None) + info = get_lazyop_info(op1) + self.assertEqual(info.flops, 8) + + def test_flops_add_roundabout_self(self): + op0 = LazyOp(BinaryOps.ADD, (self.buf0,self.buf1,), None) + op1 = LazyOp(BinaryOps.ADD, (op0,self.buf1,), None) + op2 = LazyOp(BinaryOps.ADD, (op0,op1,), None) + info = get_lazyop_info(op2) + self.assertEqual(info.flops, 12) + + def test_flops_red(self): + op0 = LazyOp(BinaryOps.MUL, (self.buf0,self.buf1,), None) + op1 = LazyOp(ReduceOps.SUM, (op0,), (1,)) + op2 = LazyOp(BinaryOps.ADD, (op1, op1,), None) + info = get_lazyop_info(op2) + self.assertEqual(info.flops, 9) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/unit/test_helpers.py b/tinygrad_repo/test/unit/test_helpers.py new file mode 100644 index 0000000..60d00c7 --- /dev/null +++ b/tinygrad_repo/test/unit/test_helpers.py @@ -0,0 +1,142 @@ +import unittest +import numpy as np +from tinygrad.helpers import Context, ContextVar, DType, dtypes, merge_dicts, strip_parens, prod +from tinygrad.shape.symbolic import Variable, NumNode + +VARIABLE = ContextVar("VARIABLE", 0) + +class TestContextVars(unittest.TestCase): + # Ensuring that the test does not modify variables outside the tests. + ctx = Context() + def setUp(self): TestContextVars.ctx.__enter__() + def tearDown(self): TestContextVars.ctx.__exit__() + + def test_initial_value_is_set(self): + _TMP = ContextVar("_TMP", 5) + self.assertEqual(_TMP.value, 5) + + def test_multiple_creation_ignored(self): + _TMP2 = ContextVar("_TMP2", 1) + _TMP2 = ContextVar("_TMP2", 2) + self.assertEqual(_TMP2.value, 1) + + def test_new_var_inside_context(self): + # Creating a _new_ variable inside a context should not have any effect on its scope (?) + with Context(VARIABLE=1): + _TMP3 = ContextVar("_TMP3", 1) + _TMP3 = ContextVar("_TMP3", 2) + self.assertEqual(_TMP3.value, 1) + + def test_value_accross_modules(self): + # Mocking module import by invoking the code but not in our globals(). + exec('from tinygrad.helpers import ContextVar;C = ContextVar("C", 13)', {}) # pylint:disable=exec-used + # It should not matter that the first creation was in another module. + C = ContextVar("C", 0) + self.assertEqual(C.value, 13) + + def test_assignment_across_modules(self): + B = ContextVar("B", 1) + # local assignment + B.value = 2 + self.assertEqual(B.value, 2) + # Assignment in another module. + exec('from tinygrad.helpers import ContextVar;B = ContextVar("B", 0);B.value = 3;', {}) # pylint:disable=exec-used + # Assignment in another module should affect this one as well. + self.assertEqual(B.value, 3) + + def test_context_assignment(self): + with Context(VARIABLE=1): + self.assertEqual(VARIABLE.value, 1) + self.assertEqual(VARIABLE.value, 0) + + def test_unknown_param_to_context(self): + with self.assertRaises(KeyError): + with Context(SOMETHING_ELSE=1): + pass + + def test_inside_context_assignment(self): + with Context(VARIABLE=4): + # What you can and cannot do inside a context. + # 1. This type of statement has no effect. + VARIABLE = ContextVar("VARIABLE", 0) + self.assertTrue(VARIABLE >= 4, "ContextVars inside contextmanager may not set a new value") + + # 2. The call syntax however has a local effect. + VARIABLE.value = 13 + self.assertTrue(VARIABLE.value == 13, "Call syntax however works inside a contextmanager.") + + # Related to 2. above. Note that VARIABLE is back to 0 again as expected. + self.assertEqual(VARIABLE.value, 0) + + def test_new_var_inside_context_other_module(self): + with Context(VARIABLE=1): + _NEW2 = ContextVar("_NEW2", 0) + _NEW2 = ContextVar("_NEW2", 1) + self.assertEqual(_NEW2.value, 0) + + code = """\ +from tinygrad.helpers import Context, ContextVar +with Context(VARIABLE=1): + _NEW3 = ContextVar("_NEW3", 0)""" + exec(code, {}) # pylint:disable=exec-used + # While _NEW3 was created in an outside scope it should still work the same as above. + _NEW3 = ContextVar("_NEW3", 1) + self.assertEqual(_NEW3.value, 0) + + def test_nested_context(self): + with Context(VARIABLE=1): + with Context(VARIABLE=2): + with Context(VARIABLE=3): + self.assertEqual(VARIABLE.value, 3) + self.assertEqual(VARIABLE.value, 2) + self.assertEqual(VARIABLE.value, 1) + self.assertEqual(VARIABLE.value, 0) + + def test_decorator(self): + @Context(VARIABLE=1, DEBUG=4) + def test(): + self.assertEqual(VARIABLE.value, 1) + + self.assertEqual(VARIABLE.value, 0) + test() + self.assertEqual(VARIABLE.value, 0) + + def test_context_exit_reverts_updated_values(self): + D = ContextVar("D", 1) + D.value = 2 + with Context(D=3): + ... + assert D.value == 2, f"Expected D to be 2, but was {D.value}. Indicates that Context.__exit__ did not restore to the correct value." + +class TestMergeDicts(unittest.TestCase): + def test_merge_dicts(self): + a = {"a": 1, "b": 2} + b = {"a": 1, "c": 3} + c = {} + d = {"a": 2, "b": 2} + assert merge_dicts([a, b]) == {"a": 1, "b": 2, "c": 3} + assert merge_dicts([a, c]) == a + assert merge_dicts([a, b, c]) == {"a": 1, "b": 2, "c": 3} + with self.assertRaises(AssertionError): + merge_dicts([a, d]) + +class TestDtypes(unittest.TestCase): + def test_dtypes_fields(self): + fields = dtypes.fields() + self.assertTrue(all(isinstance(value, DType) for value in fields.values())) + self.assertTrue(all(issubclass(value.np, np.generic) for value in fields.values() if value.np is not None)) + +class TestStripParens(unittest.TestCase): + def test_simple(self): self.assertEqual("1+2", strip_parens("(1+2)")) + def test_nested(self): self.assertEqual("1+(2+3)", strip_parens("(1+(2+3))")) + def test_casted_no_strip(self): self.assertEqual("(int)(1+2)", strip_parens("(int)(1+2)")) + +class TestProd(unittest.TestCase): + def test_empty(self): self.assertEqual(1, prod(tuple())) + def test_ints(self): self.assertEqual(30, prod((2, 3, 5))) + def test_variable(self): self.assertEqual("(a*12)", prod((Variable("a", 1, 5), 3, 4)).render()) + def test_variable_order(self): self.assertEqual("(a*12)", prod((3, 4, Variable("a", 1, 5))).render()) + def test_num_nodes(self): self.assertEqual(NumNode(6), prod((NumNode(2), NumNode(3)))) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad_repo/test/unit/test_shapetracker.py b/tinygrad_repo/test/unit/test_shapetracker.py new file mode 100644 index 0000000..3786725 --- /dev/null +++ b/tinygrad_repo/test/unit/test_shapetracker.py @@ -0,0 +1,663 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from tinygrad.helpers import prod, DEBUG +from tinygrad.shape.shapetracker import ShapeTracker, View, get_contraction +from tinygrad.shape.symbolic import Variable +from itertools import product + +def shapetracker_getitem(st, val): + locals = {"idx": val, "valid": 1} + idx, valid = st.expr_node() + exec(f"valid={valid.render()};idx={idx.render()}", None, locals) + return locals["idx"] if locals["valid"] else -1 + +class CheckingShapeTracker: + def __init__(self, shape): + self.st = ShapeTracker.from_shape(shape) + self.t = np.arange(prod(shape), dtype=np.int32).reshape(shape) + + @property + def shape(self): + return self.t.shape + + def simplify(self): + self.st = self.st.simplify() + return self + + def reshape(self, new_shape): + self.st = self.st.reshape(new_shape) + self.t = self.t.reshape(new_shape) + return self + + def permute(self, axis): + self.st = self.st.permute(axis) + self.t = np.transpose(self.t, axis) + return self + + def expand(self, new_shape): + self.st = self.st.expand(new_shape) + self.t = np.broadcast_to(self.t, new_shape) + return self + + def flip(self, axis): + self.st = self.st.stride(tuple(-1 if i in axis else 1 for i in range(len(self.shape)))) + self.t = np.flip(self.t, axis) + return self + + def shrink(self, arg): + self.st = self.st.shrink(arg) + self.t = self.t[tuple([slice(x[0], x[1]) for x in arg])] + return self + + def pad(self, arg): + self.st = self.st.pad(arg) + self.t = np.pad(self.t, arg, constant_values=-1) + return self + + def stride(self, arg): + self.st = self.st.stride(arg) + self.t = self.t[tuple([slice(None, None, x) for x in arg])] + return self + + def __getitem__(self, val): + return self.t.flatten()[val] + + @property + def views(self): return self.st.views + + @property + def contiguous(self): return self.st.contiguous + + def assert_same(self): + x = [shapetracker_getitem(self.st, i) for i in range(prod(self.st.shape))] + y = [self[i] for i in range(prod(self.shape))] + idx, valid = self.st.expr_node() + if DEBUG >= 1: print(x, y, self.st.shape, self.shape, idx.render(), valid.render(), self.st) + assert self.st.shape == self.shape + assert x == y, f"mismatch shapetracker:{x} real:{y}" + +class TestRealIssues(unittest.TestCase): + def test_reshape_doesnt_multiview(self): + self.st = ShapeTracker((View.create((256, 256, 2, 2, 2, 2, 2, 256, 8, 2), (0, 8, 0, 4, 0, 0, 2, 16384, 2048, 1), 0, None),)) + self.st.reshape((128, 2, 256, 2, 2, 2, 2, 2, 256, 8, 2)) + assert len(self.st.views) == 1 + +class TestRealDoesntSimplify(unittest.TestCase): + def tearDown(self): + st = self.st.real_strides() + print(st) + self.st = self.st.simplify() + assert len(self.st.views) != 1 + assert None in st + + def test_1(self): + self.st = ShapeTracker(( + View.create((8, 3, 1, 2, 11, 1), (33, 11, 0, 0, 1, 0), 0, None), + View.create((8, 6, 11), (66, 11, 1), 0, None))) + assert self.st.real_strides() == (33, None, 1) + + def test_2(self): + self.st = ShapeTracker(( + View.create((2, 2, 4, 3, 3), (72, 9, 18, -3, -1), 8, None), + View.create((4, 4, 3, 3), (36, 9, 3, 1), 0, None))) + assert self.st.real_strides() == (None, 18, -3, -1) + +class TestRealStrides(unittest.TestCase): + def test_1(self): + self.st = ShapeTracker(( + View.create((2048,), (1,), 0, ((0, 512),)), + View.create((16, 32, 4), (128, 4, 1), 0, None))) + st = self.st.real_strides() + print(self.st, st) + assert st == (None, 4, 1) + +class TestRealSimplifies(unittest.TestCase): + def tearDown(self): + st = self.st.real_strides() + self.st = self.st.simplify() + assert len(self.st.views) == 1 + print(self.st.views[-1].strides, st) + assert self.st.views[-1].strides == st + + def test_1(self): + self.st = ShapeTracker(( + View.create((1, 3, 2, 11, 4, 28), (0, 308, 0, 28, 0, 1), 0, None), + View.create((1, 3, 2, 11, 26, 1, 1, 3), (0, 2464, 0, 112, 1, 0, 0, 29), 0, None))) + + def test_2(self): + self.st = ShapeTracker(( + View.create((8, 3, 3, 11, 2, 28), (924, 308, 0, 28, 0, 1), 0, None), + View.create((8, 1, 6, 10, 28, 3, 2, 1), (5544, 0, 0, 56, 1, 1848, 672, 0), 0, None))) + +class TestIndexExpressions2d(unittest.TestCase): + + def setUp(self): + shapes = [(30, 5), (15, 10), (15, 1), (5, 10), (5, 1)] # Make sure dim0 is a multiple of 5, one of the tests divides this dimension by 5 + offsets = [0, 1, 15, 28, 10000] + self.sts = [ShapeTracker((View.create(base_shape, offset=offset),)) for base_shape in shapes for offset in offsets] + self.offset = [Variable.num(offset) for base_shape in shapes for offset in offsets] + self.shapes = [shape for shape in shapes for offset in offsets] + self.node_exprs = [] + self.idxs_exprs = [] + + def tearDown(self): + for st, offset, shape, node_expr, idxs_expr in zip(self.sts, self.offset, self.shapes, self.node_exprs, self.idxs_exprs): + numel = prod(shape) + assert node_expr(self.default_idx(st.shape)) == st.expr_node()[0] + assert node_expr(self.default_idx(st.shape)) == st.expr_node(None)[0] + assert node_expr(self.default_idx(st.shape)) == st.expr_node('idx')[0] + self.check_bounds(node_expr(self.default_idx(st.shape)), offset, numel) + for idx in [(0, numel-1), (7, 203), (2, 5), (0, 0), (numel, numel), (0, numel), (0, numel+1), (numel+100, numel+100)]: + idx = Variable("idx", idx[0], idx[1]) + assert node_expr(idx) == st.expr_node(idx)[0] + self.check_bounds(node_expr(idx), offset, numel) + + assert idxs_expr(self.default_idxs(st.shape)) == st.expr_idxs()[0] + assert idxs_expr(self.default_idxs(st.shape)) == st.expr_idxs(None)[0] + self.check_bounds(idxs_expr(self.default_idxs(st.shape)), offset, numel) + idx0s = [(0,0), (0, min(1, st.shape[0]-1)), (0, st.shape[0]-1), (min(3, st.shape[0]-1), min(6, st.shape[0]-1)), (st.shape[0]-1, st.shape[0]-1)] + idx1s = [(0,0), (0, min(1, st.shape[1]-1)), (0, st.shape[1]-1), (min(3, st.shape[1]-1), min(6, st.shape[1]-1)), (st.shape[1]-1, st.shape[1]-1)] + idx2s = [(0,0), (0, min(1, st.shape[2]-1)), (0, st.shape[2]-1), (min(3, st.shape[2]-1), min(6, st.shape[2]-1)), (st.shape[2]-1, st.shape[2]-1)] if len(st.shape) == 3 else [None for _ in idx0s] + for idx0, idx1, idx2 in product(idx0s, idx1s, idx2s): + idxs = [Variable(f"idx{i}", idx[0], idx[1]) for i, idx in enumerate((idx0, idx1, idx2)) if idx is not None] + assert idxs_expr(idxs) == st.expr_idxs(idxs)[0] + self.check_bounds(idxs_expr(idxs), offset, numel) + + def default_idx(self, shape): + return Variable("idx", 0, prod(shape)-1) + + def default_idxs(self, shape): + return [Variable(f"idx{i}", 0, d-1) for i,d in enumerate(shape)] + + def check_bounds(self, expr, offset, numel): + assert expr.min >= offset + assert expr.max <= offset + numel - 1 + + def test_noop(self): + for st, base_shape, offset in zip(self.sts, self.shapes, self.offset): + self.node_exprs.append(lambda idx, base_shape=base_shape, offset=offset: idx%prod(base_shape) + offset) + self.idxs_exprs.append(lambda idxs, base_shape=base_shape, offset=offset: idxs[0]*base_shape[1] + idxs[1] + offset) + + def test_permute(self): + new_st = [] + for st, base_shape, offset in zip(self.sts, self.shapes, self.offset): + st = st.permute((1, 0)) + self.node_exprs.append(lambda idx, base_shape=base_shape, offset=offset: idx%base_shape[0]*base_shape[1] + idx//base_shape[0]%base_shape[1] + offset) + self.idxs_exprs.append(lambda idxs, base_shape=base_shape, offset=offset: idxs[0] + idxs[1]*base_shape[1] + offset) + new_st.append(st) + self.sts = new_st + + def test_reshape(self): + new_st = [] + for st, base_shape, offset in zip(self.sts, self.shapes, self.offset): + st = st.reshape((base_shape[0], 1, base_shape[1])) + self.node_exprs.append(lambda idx, base_shape=base_shape, offset=offset: idx%prod(base_shape) + offset) + self.idxs_exprs.append(lambda idxs, base_shape=base_shape, offset=offset: idxs[0]*base_shape[1] + idxs[2] + offset) + new_st.append(st) + self.sts = new_st + + def test_reshape_expand(self): + new_st = [] + for st, base_shape, offset in zip(self.sts, self.shapes, self.offset): + st = st.reshape((base_shape[0], 1, base_shape[1])) + st = st.expand((base_shape[0], base_shape[1], base_shape[1])) + self.node_exprs.append(lambda idx, base_shape=base_shape, offset=offset: idx//(base_shape[1]*base_shape[1])%base_shape[0]*base_shape[1] + idx%base_shape[1] + offset) + self.idxs_exprs.append(lambda idxs, base_shape=base_shape, offset=offset: idxs[0]*base_shape[1] + idxs[2] + offset) + new_st.append(st) + self.sts = new_st + + def test_permute_reshape_1(self): # This tests multiple views + new_st = [] + for st, base_shape, offset in zip(self.sts, self.shapes, self.offset): + st = st.permute((1, 0)) + st = st.reshape((base_shape[0]//5, 1, base_shape[1]*5)) + self.node_exprs.append(lambda idx, base_shape=base_shape, offset=offset: idx%prod(base_shape)%base_shape[0]*base_shape[1] + idx//base_shape[0]%base_shape[1] + offset) + self.idxs_exprs.append(lambda idxs, base_shape=base_shape, offset=offset: (idxs[0]*(base_shape[1]*5)+idxs[2])%base_shape[0]*base_shape[1] + (idxs[0]*(base_shape[1]*5)+idxs[2])//base_shape[0] + offset) + new_st.append(st) + self.sts = new_st + + def test_permute_reshape_2(self): + new_st = [] + for st, base_shape, offset in zip(self.sts, self.shapes, self.offset): + st = st.permute((1, 0)) + st = st.reshape((1, base_shape[0]//5, base_shape[1]*5)) + self.node_exprs.append(lambda idx, base_shape=base_shape, offset=offset: idx%prod(base_shape)%base_shape[0]*base_shape[1] + idx//base_shape[0]%base_shape[1] + offset) + self.idxs_exprs.append(lambda idxs, base_shape=base_shape, offset=offset: (idxs[1]*(base_shape[1]*5)+idxs[2])%base_shape[0]*base_shape[1] + (idxs[1]*(base_shape[1]*5)+idxs[2])//base_shape[0] + offset) + new_st.append(st) + self.sts = new_st + +class TestSimplifyingShapeTracker(unittest.TestCase): + def setUp(self): + self.st = CheckingShapeTracker((1, 10)) + + def tearDown(self): + self.st.assert_same() + + # multiview simplify + def test_expand_contract_simple(self): + self.st = self.st.expand((10, 10)) + self.st = self.st.reshape((100,)) + print(self.st.views) + assert(len(self.st.views) == 2) + self.st = self.st.reshape((10, 10)) + print(self.st.views) + + self.st = self.st.simplify() + print(self.st.views) + assert(len(self.st.views) == 1) + + # multiview simplify + def test_expand_contract_different_shape(self): + self.st.expand((10, 10)) + self.st.reshape((100,)) + print(self.st.views) + assert(len(self.st.views) == 2) + self.st.reshape((2, 5, 2, 5)) + print(self.st.views) + + self.st = self.st.simplify() + print(self.st.views) + assert(len(self.st.views) == 1) + + # multiview simplify + def test_expand_contract_still_complex(self): + self.st.expand((10, 10)) + self.st.reshape((100,)) + print(self.st.views) + assert(len(self.st.views) == 2) + self.st.reshape((5, 20)) + + self.st = self.st.simplify() + print(self.st.views) + assert(len(self.st.views) == 2) + +# Tensor.zeros(2, 4).permute(1,0).reshape(2, 4) +# (d1*4 + d0%4), d1=x//4, d0=x%4 = ((x//4)*4) + (x%4)%4 + +class TestComplexShapeTracker(unittest.TestCase): + def test_add_1s(self): + self.st = CheckingShapeTracker((4, 4)) + self.st.permute((1,0)) + self.st.reshape((1,4,1,4,1)) + assert not self.st.contiguous + self.st.permute((0,3,2,1,4)) + assert self.st.contiguous + + def test_permute_1s_simple(self): + self.st = CheckingShapeTracker((1, 16, 9,9)) + self.st.permute((1,0,2,3)) + assert self.st.contiguous + self.st = CheckingShapeTracker((2, 16, 9,9)) + self.st.permute((1,0,2,3)) + assert not self.st.contiguous + + def test_remove_1s_simple(self): + self.st = CheckingShapeTracker((1, 16, 1, 1)) + self.st.reshape((16,)) + assert self.st.contiguous + + def test_remove_1s(self): + self.st = CheckingShapeTracker((1, 4, 1, 4, 1)) + self.st.permute((0,3,2,1,4)) + self.st.reshape((4,4)) + assert not self.st.contiguous + self.st.permute((1,0)) + assert self.st.contiguous + + def test_permute_reshape(self): + self.st = CheckingShapeTracker((4, 4)) + self.st.permute((1,0)) + self.st.reshape((2, 2, 2, 2)) + # TODO: should also be tested by test_super_complex + assert len(self.st.views) == 1 + + def test_factorize_split(self): + self.st = CheckingShapeTracker((4, 4)) + self.st.permute((1,0)) + self.st.reshape((2, 2, 2, 2)) + self.st.permute((2,3,0,1)) + assert self.st.contiguous + + def test_factorize_combine(self): + self.st = CheckingShapeTracker((4, 4, 4)) + self.st.permute((2, 0, 1)) + self.st.reshape((4, 16)) + self.st.permute((1, 0)) + assert self.st.contiguous + + def test_factorize_combine_add_ones(self): + self.st = CheckingShapeTracker((4, 4, 4)) + self.st.permute((2, 0, 1)) + self.st.reshape((4, 16, 1, 1)) + self.st.permute((1, 0, 2, 3)) + assert self.st.contiguous + + def test_fancy_factorize(self): + self.st = CheckingShapeTracker((32, 3, 3, 1)) + self.st.reshape((8, 4, 3, 3)) + assert len(self.st.views) == 1 + + def test_super_complex_2_fail(self): + self.st = CheckingShapeTracker((4, 4, 4)) + self.st.permute((2, 0, 1)) + self.st.reshape((16, 4)) + assert len(self.st.views) != 1 + + def test_work(self): + self.st = CheckingShapeTracker((64, 1024, 4)) + self.st.reshape((1, 64, 128, 32)) + self.st.permute((0, 3, 1, 2)) + self.st.reshape((1, 32, 1, 64, 128)) + self.st.permute((0, 3, 4, 1, 2)) + assert self.st.contiguous + + def test_work2(self): + self.st = CheckingShapeTracker((64, 1024, 4)) + self.st.reshape((1, 64, 128, 32)) + self.st.permute((0, 3, 1, 2)) + self.st.reshape((1, 1, 32, 64, 128)) + self.st.permute((0, 3, 4, 1, 2)) + self.st.reshape((64, 1024, 4)) + print(self.st.views) + assert self.st.contiguous + +class TestSingleShapeTracker(unittest.TestCase): + def setUp(self): + self.st = CheckingShapeTracker((7,4)) + + def tearDown(self): + self.st.assert_same() + + def test_reshape(self): + self.st.reshape((7,1,4)) + assert self.st.contiguous + + def test_permute(self): + self.st.permute((1,0)) + assert not self.st.contiguous + + def test_shrink(self): + self.st.shrink(((1,2), (0,4))) + assert not self.st.contiguous + + def test_double_permute(self): + self.st.permute((1,0)) + self.st.permute((1,0)) + assert self.st.contiguous + + def test_reshape_permute(self): + self.st.reshape((7,1,4)) + self.st.permute((0,1,2)) + assert self.st.contiguous + + def test_reshape_permute_yes(self): + self.st.reshape((7,1,4)) + self.st.permute((0,2,1)) + assert self.st.contiguous + + def test_reshape_permute_no(self): + self.st.reshape((4,7)) + self.st.permute((1,0)) + assert not self.st.contiguous + +class TestShapeTrackerFuzzFailures(unittest.TestCase): + def setUp(self): + self.st = CheckingShapeTracker((3,3,3)) + def tearDown(self): + self.st.assert_same() + @unittest.skip("simplify doesn't work in this case") + def test_case_1(self): + self.st.shrink(((1, 2), (1, 3), (1, 3))) + self.st.reshape((1, 4)) + self.st.shrink(((0, 1), (1, 3))) + print(self.st.st) + self.st = self.st.simplify() + print(self.st.st) + def test_case_2(self): + self.st.stride( (1, 1, -2) ) + self.st.reshape( (3, 6) ) + self.st.shrink( ((1, 2), (1, 5)) ) + self.st.stride( (1, -1) ) + def test_case_3(self): + self.st.shrink( ((0, 2), (0, 2), (0, 1)) ) + self.st.permute( (1, 0, 2) ) + self.st.reshape( (4,) ) + self.st.shrink( ((0, 3),) ) + self.st.stride( (-1,) ) + def test_case_4(self): + self.st.reshape( (3, 3, 3, 1) ) + self.st.pad( ((0, 0), (0, 0), (0, 0), (1, 1)) ) + self.st.shrink( ((0, 2), (1, 2), (0, 2), (0, 1)) ) + self.st.expand( (2, 1, 2, 3) ) + +class TestMaskedShapeTracker(unittest.TestCase): + def test_pad_1x1(self): + self.st = CheckingShapeTracker((1,1)) + self.st.pad(((1,1), (1,1))) + self.st.assert_same() + + def test_pad_2x2(self): + self.st = CheckingShapeTracker((2,2)) + self.st.pad(((1,1), (1,1))) + self.st.assert_same() + +class TestShapeTracker(unittest.TestCase): + def setUp(self): + self.st = CheckingShapeTracker((7,4)) + self.apply = lambda fxn: [fxn(x) for x in [self.st]] + + def tearDown(self): + self.st.assert_same() + + def test_noop(self): + pass + + def test_simple_split(self): + self.test_permute() + self.apply(lambda x: x.reshape((prod(self.st.shape), ))) + + def test_simple_pad(self): + self.st.pad(((1,1), (1,1))) + + def test_pad_shrink(self): + self.st.pad(((1,1), (1,1))) + self.st.shrink(((0,4), (0,4))) + + def test_pad_one_sided(self): + self.st.pad(((0,1), (0,0))) + + def test_pad_reshape(self): + self.st.pad(((0,1), (0,0))) + self.st.reshape((8*4,)) + + def test_pad_pad(self): + self.st.pad(((1,1), (1,1))) + self.st.pad(((1,1), (1,1))) + + def test_pad_permute(self): + self.st.pad(((1,1), (2,2))) + self.st.permute((1,0)) + + def test_pad_expand(self): + self.st.reshape((7,4,1)) + self.st.pad(((1,1), (1,1), (0,0))) + self.st.expand((9,6,4)) + + def test_pad_expand_alt(self): + self.st.pad(((1,1), (1,1))) + self.st.reshape((9,6,1)) + self.st.expand((9,6,4)) + + def test_pad_stride(self): + self.st.pad(((1,4), (1,3))) + self.st.stride((2,2)) + + def test_pad_stride_neg(self): + self.st.pad(((1,2), (1,0))) + self.st.stride((-1,-1)) + + def test_pad_stride_both(self): + self.st.pad(((1,2), (1,0))) + self.st.stride((-2,-2)) + + def test_shrink_pad(self): + self.st.shrink(((0,4), (0,4))) + self.st.pad(((1,1), (1,1))) + + def test_reshape(self): + new_shape = self.st.shape[::-1] + self.apply(lambda x: x.reshape(new_shape)) + + def test_permute(self): + if len(self.st.shape) == 2: self.apply(lambda x: x.permute((1,0))) + elif len(self.st.shape) == 3: self.apply(lambda x: x.permute((2,0,1))) + + def test_reshape_with_1(self): + new_shape = (self.st.shape[0], 1, self.st.shape[1]) + self.apply(lambda x: x.reshape(new_shape)) + + def test_expand(self): + self.test_reshape_with_1() + new_shape = list(self.st.shape) + new_shape[1] = 2 + self.apply(lambda x: x.expand(tuple(new_shape))) + + def test_flip_0(self): + self.apply(lambda x: x.flip((0,))) + + def test_flip_1(self): + self.apply(lambda x: x.flip((1,))) + + def test_flip_01(self): + self.apply(lambda x: x.flip((0,1))) + + def test_slice_0(self): + self.apply(lambda x: x.shrink(((1, x.shape[0]), (0, x.shape[1])))) + + def test_slice_1(self): + self.apply(lambda x: x.shrink(((0, x.shape[0]), (1, x.shape[1])))) + + def test_slice_1c1(self): + self.apply(lambda x: x.shrink(((0, 1), (0, 1)))) + + def test_slice_1c2(self): + self.apply(lambda x: x.shrink(((1, 2), (1, 2)))) + + def test_double_permute(self): + self.apply(lambda x: x.permute((1, 0))) + self.apply(lambda x: x.permute((1, 0))) + + def test_slice_permute(self): + self.apply(lambda x: x.shrink(((0, 2), (2, 4)))) + self.apply(lambda x: x.permute((1, 0))) + + def test_slice_expand(self): + self.apply(lambda x: x.shrink(((0, 2), (3, 4)))) + self.apply(lambda x: x.expand((2, 10))) + + def test_double_stride(self): + self.apply(lambda x: x.stride((1, 2))) + self.apply(lambda x: x.stride((2, 1))) + + def test_stride(self): self.apply(lambda x: x.stride((2,1))) + def test_stride_int(self): self.apply(lambda x: x.stride((1,2))) + def test_stride_2(self): self.apply(lambda x: x.stride((2,2))) + def test_stride_n(self): self.apply(lambda x: x.stride((-2,1))) + def test_stride_int_n(self): self.apply(lambda x: x.stride((-1,2))) + def test_stride_2_n(self): self.apply(lambda x: x.stride((-2,-2))) + + def test_reshape_then_permute(self): + self.test_reshape() + self.test_permute() + + def test_reshape_then_expand(self): + self.test_reshape() + self.test_expand() + + def test_permute_then_reshape(self): + self.test_permute() + self.test_reshape() + + def test_expand_then_reshape(self): + self.test_expand() + self.test_reshape() + + def test_combo(self): + self.test_permute() + self.test_reshape() + self.test_slice_1() + self.test_expand() + self.test_permute() + +class TestGetContraction(unittest.TestCase): + def test_contraction(self): + r = get_contraction((1,2,3,4), (2,3,4)) + self.assertEqual(r, [[0, 1], [2], [3]]) + + r = get_contraction((2,1,3,4), (2,3,4)) + self.assertEqual(r, [[0], [1, 2], [3]]) + + r = get_contraction((1,2,3,1,4), (1,2,3,4)) + self.assertEqual(r, [[0], [1], [2], [3, 4]]) + + r = get_contraction((1,2,3,1,4,1,1), (2,3,4)) + self.assertEqual(r, [[0, 1], [2], [3, 4, 5, 6]]) + + r = get_contraction((1,2,3,4), (1,2,3*4)) + self.assertEqual(r, [[0], [1], [2, 3]]) + + r = get_contraction((1,2,3,4), (2,1,3,4)) + self.assertEqual(r, [[0, 1], [], [2], [3]]) + + r = get_contraction((1,2,3,4), (1,1,2*3*4,1)) + self.assertEqual(r, [[0], [], [1,2,3], []]) + + r = get_contraction((2,1,3,4), (1,2,3,4)) + self.assertEqual(r, [[], [0], [1, 2], [3]]) + + r = get_contraction((1,2,3,4), (2*3*4,1,1,1)) + self.assertEqual(r, [[0, 1, 2, 3], [], [], []]) + + r = get_contraction((4,4,4,4), (16,1,16)) + self.assertEqual(r, [[0, 1], [], [2, 3]]) + + r = get_contraction((1,2,3,4,1,1,1), (2,3,4)) + self.assertEqual(r, [[0, 1], [2], [3, 4, 5, 6]]) + + r = get_contraction((1,2,3,4), (1,2,3,4,1)) + self.assertEqual(r, [[0], [1], [2], [3], []]) + + r = get_contraction((14,1,384,14,1,1,1,1), (1,14,384,14)) + self.assertEqual(r, [[], [0], [1,2], [3,4,5,6,7]]) + + r = get_contraction((14,1,384,1,14,1,1,1,1), (1,14,384,14)) + self.assertEqual(r, [[], [0], [1,2], [3,4,5,6,7,8]]) + + r = get_contraction((512, 512), (1, 1, 512, 1, 1, 1, 1, 512)) + self.assertEqual(r, [[], [], [0], [], [], [], [], [1]]) + + r = get_contraction((1,2,3,4), (1,2,6,2)) + self.assertEqual(r, None) + + def test_contraction_ones(self): + r = get_contraction((1,), (1,1,1)) + self.assertEqual(r, [[0], [], []]) + + r = get_contraction((1,1), (1,1,1)) + self.assertEqual(r, [[0], [1], []]) + + r = get_contraction((1,1,1,1), (1,)) + self.assertEqual(r, [[0,1,2,3]]) + + r = get_contraction((1,1,1,1), (1,1)) + self.assertEqual(r, [[0], [1,2,3]]) + + r = get_contraction((1,1,1,1), (1,1,1)) + self.assertEqual(r, [[0], [1], [2,3]]) + + r = get_contraction((1,1,1,1), (1,1,1,1)) + self.assertEqual(r, [[0], [1], [2], [3]]) + +if __name__ == '__main__': + unittest.main() diff --git a/tinygrad_repo/test/unit/test_shm_tensor.py b/tinygrad_repo/test/unit/test_shm_tensor.py new file mode 100644 index 0000000..7708066 --- /dev/null +++ b/tinygrad_repo/test/unit/test_shm_tensor.py @@ -0,0 +1,39 @@ +import unittest +import multiprocessing.shared_memory as shared_memory +from tinygrad.helpers import CI +from tinygrad.runtime.ops_shm import RawShmBuffer +from tinygrad.tensor import Tensor, Device +import numpy as np + +class TestRawShmBuffer(unittest.TestCase): + def test_e2e(self): + t = Tensor.randn(2, 2, 2).realize() + + # copy to shm + shm_name = (s := shared_memory.SharedMemory(create=True, size=t.nbytes())).name + s.close() + t_shm = t.to(f"shm:{shm_name}").realize() + + # copy from shm + t2 = t_shm.to(Device.DEFAULT).realize() + + assert np.allclose(t.numpy(), t2.numpy()) + s.unlink() + + @unittest.skipIf(CI, "CI doesn't like big shared memory") + def test_e2e_big(self): + t = Tensor.randn(2048, 2048, 8).realize() + + # copy to shm + shm_name = (s := shared_memory.SharedMemory(create=True, size=t.nbytes())).name + s.close() + t_shm = t.to(f"shm:{shm_name}").realize() + + # copy from shm + t2 = t_shm.to(Device.DEFAULT).realize() + + assert np.allclose(t.numpy(), t2.numpy()) + s.unlink() + +if __name__ == "__main__": + unittest.main() diff --git a/tinygrad_repo/test/unit/test_symbolic.py b/tinygrad_repo/test/unit/test_symbolic.py new file mode 100644 index 0000000..4f97a93 --- /dev/null +++ b/tinygrad_repo/test/unit/test_symbolic.py @@ -0,0 +1,448 @@ +#!/usr/bin/env python +import unittest +from tinygrad.shape.symbolic import Node, MulNode, SumNode, Variable, NumNode, LtNode, sym_render, sym_infer, create_rednode + +class TestSymbolic(unittest.TestCase): + def helper_test_variable(self, v, n, m, s): + self.assertEqual(v.render(), s) + self.assertEqual(v.min, n) + self.assertEqual(v.max, m) + + def test_ge(self): + self.helper_test_variable(Variable("a", 3, 8)>=77, 0, 0, "0") + self.helper_test_variable(Variable("a", 3, 8)>=9, 0, 0, "0") + self.helper_test_variable(Variable("a", 3, 8)>=8, 0, 1, "((a*-1)<-7)") + self.helper_test_variable(Variable("a", 3, 8)>=4, 0, 1, "((a*-1)<-3)") + self.helper_test_variable(Variable("a", 3, 8)>=3, 1, 1, "1") + self.helper_test_variable(Variable("a", 3, 8)>=2, 1, 1, "1") + + def test_lt(self): + self.helper_test_variable(Variable("a", 3, 8)<77, 1, 1, "1") + self.helper_test_variable(Variable("a", 3, 8)<9, 1, 1, "1") + self.helper_test_variable(Variable("a", 3, 8)<8, 0, 1, "(a<8)") + self.helper_test_variable(Variable("a", 3, 8)<4, 0, 1, "(a<4)") + self.helper_test_variable(Variable("a", 3, 8)<3, 0, 0, "0") + self.helper_test_variable(Variable("a", 3, 8)<2, 0, 0, "0") + + def test_ge_divides(self): + expr = (Variable("idx", 0, 511)*4 + Variable("FLOAT4_INDEX", 0, 3)) < 512 + self.helper_test_variable(expr, 0, 1, "(idx<128)") + + def test_ge_divides_and(self): + expr = Variable.ands([(Variable("idx1", 0, 511)*4 + Variable("FLOAT4_INDEX", 0, 3)) < 512, + (Variable("idx2", 0, 511)*4 + Variable("FLOAT4_INDEX", 0, 3)) < 512]) + self.helper_test_variable(expr, 0, 1, "((idx1<128) and (idx2<128))") + expr = Variable.ands([(Variable("idx1", 0, 511)*4 + Variable("FLOAT4_INDEX", 0, 3)) < 512, + (Variable("idx2", 0, 511)*4 + Variable("FLOAT8_INDEX", 0, 7)) < 512]) + self.helper_test_variable(expr//4, 0, 1, "((((FLOAT8_INDEX//4)+idx2)<128) and ((idx1//4)<32))") + + def test_lt_factors(self): + expr = Variable.ands([(Variable("idx1", 0, 511)*4 + Variable("FLOAT4_INDEX", 0, 256)) < 512]) + self.helper_test_variable(expr, 0, 1, "(((idx1*4)+FLOAT4_INDEX)<512)") + + def test_div_becomes_num(self): + assert isinstance(Variable("a", 2, 3)//2, NumNode) + + def test_var_becomes_num(self): + assert isinstance(Variable("a", 2, 2), NumNode) + + def test_equality(self): + idx1 = Variable("idx1", 0, 3) + idx2 = Variable("idx2", 0, 3) + assert idx1 == idx1 + assert idx1 != idx2 + assert idx1*4 == idx1*4 + assert idx1*4 != idx1*3 + assert idx1*4 != idx1+4 + assert idx1*4 != idx2*4 + assert idx1+idx2 == idx1+idx2 + assert idx1+idx2 == idx2+idx1 + assert idx1+idx2 != idx2 + + def test_factorize(self): + a = Variable("a", 0, 8) + self.helper_test_variable(a*2+a*3, 0, 8*5, "(a*5)") + + def test_factorize_no_mul(self): + a = Variable("a", 0, 8) + self.helper_test_variable(a+a*3, 0, 8*4, "(a*4)") + + def test_neg(self): + self.helper_test_variable(-Variable("a", 0, 8), -8, 0, "(a*-1)") + + def test_add_1(self): + self.helper_test_variable(Variable("a", 0, 8)+1, 1, 9, "(1+a)") + + def test_add_num_1(self): + self.helper_test_variable(Variable("a", 0, 8)+Variable.num(1), 1, 9, "(1+a)") + + def test_sub_1(self): + self.helper_test_variable(Variable("a", 0, 8)-1, -1, 7, "(-1+a)") + + def test_sub_num_1(self): + self.helper_test_variable(Variable("a", 0, 8)-Variable.num(1), -1, 7, "(-1+a)") + + def test_mul_0(self): + self.helper_test_variable(Variable("a", 0, 8)*0, 0, 0, "0") + + def test_mul_1(self): + self.helper_test_variable(Variable("a", 0, 8)*1, 0, 8, "a") + + def test_mul_neg_1(self): + self.helper_test_variable((Variable("a", 0, 2)*-1)//3, -1, 0, "((((a*-1)+3)//3)+-1)") + + def test_mul_2(self): + self.helper_test_variable(Variable("a", 0, 8)*2, 0, 16, "(a*2)") + + def test_div_1(self): + self.helper_test_variable(Variable("a", 0, 8)//1, 0, 8, "a") + + def test_mod_1(self): + self.helper_test_variable(Variable("a", 0, 8)%1, 0, 0, "0") + + def test_add_min_max(self): + self.helper_test_variable(Variable("a", 0, 8) * 2 + 12, 12, 16+12, "((a*2)+12)") + + def test_div_min_max(self): + self.helper_test_variable(Variable("a", 0, 7) // 2, 0, 3, "(a//2)") + + def test_div_neg_min_max(self): + self.helper_test_variable(Variable("a", 0, 7) // -2, -3, 0, "((a//2)*-1)") + + def test_sum_div_min_max(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7), Variable("b", 0, 3)]) // 2, 0, 5, "((a+b)//2)") + + def test_sum_div_factor(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*4, Variable("b", 0, 3)*4]) // 2, 0, 20, "((a*2)+(b*2))") + + def test_sum_div_some_factor(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*5, Variable("b", 0, 3)*4]) // 2, 0, 23, "(((a*5)//2)+(b*2))") + + def test_sum_div_some_partial_factor(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*6, Variable("b", 0, 7)*6]) // 16, 0, 5, "(((a*3)+(b*3))//8)") + self.helper_test_variable(Variable.sum([Variable.num(16), Variable("a", 0, 7)*6, Variable("b", 0, 7)*6]) // 16, 1, 6, "((((a*3)+(b*3))//8)+1)") + + def test_sum_div_no_factor(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*5, Variable("b", 0, 3)*5]) // 2, 0, 25, "(((a*5)+(b*5))//2)") + + def test_mod_factor(self): + # NOTE: even though the mod max is 50, it can't know this without knowing about the mul + self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*100, Variable("b", 0, 3)*50]) % 100, 0, 99, "((b*50)%100)") + + def test_mod_to_sub(self): + # This is mod reduction + self.helper_test_variable((1+Variable("a",1,2))%2, 0, 1, (Variable("a",1,2)-1).render()) + + def test_sum_div_const(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*4, Variable.num(3)]) // 4, 0, 7, "a") + + def test_sum_div_const_big(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*4, Variable.num(3)]) // 16, 0, 1, "(a//4)") + + def test_sum_lt_fold(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7) * 4, Variable("b", 0, 3)]) < 16, 0, 1, "(a<4)") + self.helper_test_variable(Variable.sum([Variable("a", 0, 7) * 4, Variable("b", 0, 4)]) < 16, 0, 1, "(((a*4)+b)<16)") + + def test_mod_mul(self): + self.helper_test_variable((Variable("a", 0, 5)*10)%9, 0, 5, "a") + + def test_mod_mod(self): + self.helper_test_variable((Variable("a", 0, 31)%12)%4, 0, 3, "(a%4)") + self.helper_test_variable(((4*Variable("a", 0, 31)) % 12) % 4, 0, 0, "0") + self.helper_test_variable((Variable("a", 0, 31) % 4) % 12, 0, 3, "(a%4)") + + def test_mul_mul(self): + self.helper_test_variable((Variable("a", 0, 5)*10)*9, 0, 5*10*9, "(a*90)") + + def test_mul_lt(self): + self.helper_test_variable((Variable("a", 0, 5)*4)<13, 0, 1, "(a<4)") + self.helper_test_variable((Variable("a", 0, 5)*4)<16, 0, 1, "(a<4)") + self.helper_test_variable((Variable("a", 0, 5)*4)>11, 0, 1, "((a*-1)<-2)") + self.helper_test_variable((Variable("a", 0, 5)*4)>12, 0, 1, "((a*-1)<-3)") + + def test_div_div(self): + self.helper_test_variable((Variable("a", 0, 1800)//10)//9, 0, 20, "(a//90)") + + def test_distribute_mul(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 3), Variable("b", 0, 5)])*3, 0, 24, "((a*3)+(b*3))") + + def test_mod_mul_sum(self): + self.helper_test_variable(Variable.sum([Variable("b", 0, 2), Variable("a", 0, 5)*10])%9, 0, 7, "(a+b)") + + def test_sum_0(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 7)]), 0, 7, "a") + + def test_mod_remove(self): + self.helper_test_variable(Variable("a", 0, 6)%100, 0, 6, "a") + + def test_big_mod(self): + # NOTE: we no longer support negative variables + #self.helper_test_variable(Variable("a", -20, 20)%10, -9, 9, "(a%10)") + #self.helper_test_variable(Variable("a", -20, 0)%10, -9, 0, "(a%10)") + #self.helper_test_variable(Variable("a", -20, 1)%10, -9, 1, "(a%10)") + self.helper_test_variable(Variable("a", 0, 20)%10, 0, 9, "(a%10)") + #self.helper_test_variable(Variable("a", -1, 20)%10, -1, 9, "(a%10)") + + def test_gt_remove(self): + self.helper_test_variable(Variable("a", 0, 6) >= 25, 0, 0, "0") + + def test_lt_remove(self): + self.helper_test_variable(Variable("a", 0, 6) < -3, 0, 0, "0") + self.helper_test_variable(Variable("a", 0, 6) < 3, 0, 1, "(a<3)") + self.helper_test_variable(Variable("a", 0, 6) < 8, 1, 1, "1") + + def test_lt_sum_remove(self): + self.helper_test_variable((Variable("a", 0, 6) + 2) < 3, 0, 1, "(a<1)") + + def test_and_fold(self): + self.helper_test_variable(Variable.ands([Variable.num(0), Variable("a", 0, 1)]), 0, 0, "0") + + def test_and_remove(self): + self.helper_test_variable(Variable.ands([Variable.num(1), Variable("a", 0, 1)]), 0, 1, "a") + + def test_mod_factor_negative(self): + self.helper_test_variable(Variable.sum([Variable.num(-29), Variable("a", 0, 10), Variable("b", 0, 10)*28]) % 28, 0, 27, "((27+a)%28)") + self.helper_test_variable(Variable.sum([Variable.num(-29), Variable("a", 0, 100), Variable("b", 0, 10)*28]) % 28, 0, 27, "((27+a)%28)") + + def test_sum_combine_num(self): + self.helper_test_variable(Variable.sum([Variable.num(29), Variable("a", 0, 10), Variable.num(-23)]), 6, 16, "(6+a)") + + def test_sum_num_hoisted_and_factors_cancel_out(self): + self.helper_test_variable(Variable.sum([Variable("a", 0, 1) * -4 + 1, Variable("a", 0, 1) * 4]), 1, 1, "1") + + def test_div_factor(self): + self.helper_test_variable(Variable.sum([Variable.num(-40), Variable("a", 0, 10)*2, Variable("b", 0, 10)*40]) // 40, -1, 9, "(-1+b)") + + def test_mul_div(self): + self.helper_test_variable((Variable("a", 0, 10)*4)//4, 0, 10, "a") + + def test_mul_div_factor_mul(self): + self.helper_test_variable((Variable("a", 0, 10)*8)//4, 0, 20, "(a*2)") + + def test_mul_div_factor_div(self): + self.helper_test_variable((Variable("a", 0, 10)*4)//8, 0, 5, "(a//2)") + + def test_div_remove(self): + self.helper_test_variable(Variable.sum([Variable("idx0", 0, 127)*4, Variable("idx2", 0, 3)])//4, 0, 127, "idx0") + + def test_div_numerator_negative(self): + self.helper_test_variable((Variable("idx", 0, 9)*-10)//11, -9, 0, "((((idx*-10)+99)//11)+-9)") + + def test_div_into_mod(self): + self.helper_test_variable((Variable("idx", 0, 16)*4)%8//4, 0, 1, "(idx%2)") + +class TestSymbolicNumeric(unittest.TestCase): + def helper_test_numeric(self, f): + # TODO: why are the negative tests broken? (even if we did support negative variables) + #MIN, MAX = -10, 10 + MIN, MAX = 0, 10 + # one number + for i in range(MIN, MAX): + v = f(Variable.num(i)) + #print(i, f(i), v.min, v.max) + self.assertEqual(v.min, v.max) + self.assertEqual(v.min, f(i)) + for kmin in range(MIN, MAX): + for kmax in range(MIN, MAX): + if kmin > kmax: continue + v = f(Variable("tmp", kmin, kmax)) + values = [f(rv) for rv in range(kmin, kmax+1)] + # the min and max may not be exact + self.assertLessEqual(v.min, min(values)) + self.assertGreaterEqual(v.max, max(values)) + + def test_mod_4(self): self.helper_test_numeric(lambda x: (x%4)) + def test_div_4(self): self.helper_test_numeric(lambda x: (x//4)) + def test_plus_1_div_2(self): self.helper_test_numeric(lambda x: (x+1)//2) + def test_plus_1_mod_2(self): self.helper_test_numeric(lambda x: (x+1)%2) + def test_times_2(self): self.helper_test_numeric(lambda x: x*2) + def test_times_2_plus_3(self): self.helper_test_numeric(lambda x: x*2 + 3) + def test_times_2_plus_3_mod_4(self): self.helper_test_numeric(lambda x: (x*2 + 3)%4) + def test_times_2_plus_3_div_4(self): self.helper_test_numeric(lambda x: (x*2 + 3)//4) + def test_times_2_plus_3_div_4_mod_4(self): self.helper_test_numeric(lambda x: ((x*2 + 3)//4)%4) + +class TestSymbolicVars(unittest.TestCase): + def test_simple(self): + z = NumNode(0) + a = Variable("a", 0, 10) + b = Variable("b", 0, 10) + c = Variable("c", 0, 10) + assert z.vars() == z.vars() == [] + assert a.vars() == a.vars() == [a] + m = MulNode(a, 3) + assert m.vars() == [a] + s = SumNode([a, b, c]) + assert s.vars() == [a, b, c] + + def test_compound(self): + a = Variable("a", 0, 10) + b = Variable("b", 0, 10) + c = Variable("c", 0, 10) + assert (a + b * c).vars() == [a, b, c] + assert (a % 3 + b // 5).vars() == [a, b] + assert (a + b + c - a).vars() == [b, c] + +class TestSymbolicMinMax(unittest.TestCase): + def test_min_max_known(self): + a = Variable("a", 1, 8) + assert max(1, a) == max(a, 1) == a + assert min(1, a) == min(a, 1) == 1 + +class TestSymRender(unittest.TestCase): + def test_sym_render(self): + a = Variable("a", 1, 8) + b = Variable("b", 1, 10) + assert sym_render(a) == "a" + assert sym_render(1) == "1" + assert sym_render(a+1) == "(1+a)" + assert sym_render(a*b) == "(a*b)" + +class TestSymInfer(unittest.TestCase): + def test_sym_infer(self): + a = Variable("a", 0, 10) + b = Variable("b", 0, 10) + c = Variable("c", 0, 10) + var_vals = {a: 2, b: 3, c: 4} + assert sym_infer(5, var_vals) == 5 + assert sym_infer(a, var_vals) == 2 + assert sym_infer(b, var_vals) == 3 + assert sym_infer(a+b, var_vals) == 5 + assert sym_infer(a-b, var_vals) == -1 + assert sym_infer(a+b+c, var_vals) == 9 + assert sym_infer(a*b, var_vals) == 6 + assert sym_infer(a*b+c, var_vals) == 10 + +class TestSymbolicSymbolicOps(unittest.TestCase): + def test_node_divmod_node(self): + i = Variable("i", 1, 10) + idx0 = Variable("idx0", 0, i*3-1) + assert NumNode(0) // (Variable("i", 1, 10)*128) == 0 + assert NumNode(0) % (Variable("i", 1, 10)*128) == 0 + assert NumNode(127) // (Variable("i", 1, 10)*128) == 0 + assert NumNode(127) % (Variable("i", 1, 10)*128) == 127 + assert 127 // (Variable("i", 1, 10)*128) == 0 + assert 127 % (Variable("i", 1, 10)*128) == 127 + assert NumNode(128) // (Variable("i", 1, 10)*128 + 128) == 0 + assert NumNode(128) % (Variable("i", 1, 10)*128 + 128) == 128 + assert 128 // (Variable("i", 1, 10)*128 + 128) == 0 + assert 128 % (Variable("i", 1, 10)*128 + 128) == 128 + assert 0 // (Variable("i", 1, 10)*128) == 0 + assert 0 % (Variable("i", 1, 10)*128) == 0 + assert idx0 // (i*3) == 0 + assert idx0 % (i*3) == idx0 + assert i // i == 1 + assert i % i == 0 + assert 128 // NumNode(4) == 32 + assert 128 % NumNode(4) == 0 + assert NumNode(128) // NumNode(4) == 32 + assert NumNode(128) % NumNode(4) == 0 + + def test_mulnode_divmod_node(self): + i = Variable("i", 1, 10) + idx0 = Variable("idx0", 0, 31) + assert (idx0*(i*4+4)) // (i+1) == (idx0*4) + assert (idx0*(i*4+4)) % (i+1) == 0 + assert (idx0*i) % i == 0 + + def test_sumnode_divmod_sumnode(self): + i = Variable("i", 1, 10) + idx0 = Variable("idx0", 0, 7) + idx1 = Variable("idx1", 0, 3) + idx2 = Variable("idx2", 0, i) + assert (idx0*(i*4+4)+idx1*(i+1)+idx2) // (i+1) == idx0*4+idx1 + assert (idx0*(i*4+4)+idx1*(i+1)+idx2) % (i+1) == idx2 + assert (i+1) // (i*128+128) == 0 + assert (i+1) % (i*128+128) == (i+1) + assert (i+1+idx2) // (i+1) == 1 + assert (i+1+idx2) % (i+1) == idx2 + assert (idx0*(i*4+4)+i+1+idx2) // (i+1) == idx0*4+1 + assert (idx0*(i*4+4)+i+1+idx2) % (i+1) == idx2 + assert (i*128+128)*2 // (i*128+128) == 2 + assert (i*128+128)*2 % (i*128+128) == 0 + + def test_sumnode_divmod_sumnode_complex(self): + i = Variable("i", 1, 1024) + gidx0 = Variable("gidx0", 0, i) + lidx1 = Variable("lidx1", 0, 7) + ridx2 = Variable("ridx1", 0, 31) + assert ((i*128+128)*2 + gidx0*128 + lidx1*(i*512+512) + ridx2*4) // (i*128+128) == 2 + lidx1*4 + assert ((i*128+128)*2 + gidx0*128 + lidx1*(i*512+512) + ridx2*4) % (i*128+128) == gidx0*128 + ridx2*4 + assert ((gidx0*128+i*128+ridx2*4+129)) // (i*128+128) == 1 + assert ((gidx0*128+i*128+ridx2*4+129)) % (i*128+128) == gidx0*128 + ridx2*4 + 1 + assert (ridx2*(i*4+4)+1+i+gidx0) // (i*128+128) == 0 + assert (ridx2*(i*4+4)+1+i+gidx0) % (i*128+128) == (ridx2*(i*4+4)+1+i+gidx0) + + def test_node_lt_node(self): + a = Variable("a", 1, 5) + b = Variable("b", 6, 9) + c = Variable("c", 1, 10) + d = Variable("d", 5, 10) + # if the value is always the same, it folds to num + assert (a < b) == 1 + assert (b < a) == 0 + assert (d < a) == 0 + # if it remains as a LtNode, bool is always true and (min, max) == (0, 1) + assert isinstance((a < c), LtNode) and (a < c).min == 0 and (a < c).max == 1 + assert a < c + assert isinstance((a > c), LtNode) and (a > c).min == 0 and (a > c).max == 1 + # same when comparing with a constant + assert a < 3 and (a < 3).min == 0 and (a < 3).max == 1 + assert a > 3 and (a > 3).min == 0 and (a > 3).max == 1 + + def test_num_node_mul_node(self): + a = Variable("a", 1, 5) + b = NumNode(2) * a + assert b == a * 2 + assert isinstance(b, MulNode) + b = NumNode(1) * a + assert b == a + assert isinstance(b, Variable) + b = NumNode(0) * a + assert b == 0 + assert isinstance(b, NumNode) + + def test_num_node_expand(self): + a = NumNode(42) + assert a.expand() == [a] + + def test_variable_expand(self): + a = Variable("a", 5, 7) + assert a.expand() == [a] + + def test_variable_expand_expr_none(self): + a = Variable(None, 5, 7) + assert a.expand() == [NumNode(5), NumNode(6), NumNode(7)] + + def test_mul_node_expand(self): + a = Variable(None, 5, 7) + m = MulNode(a, 3) + assert m.expand() == [NumNode(15), NumNode(18), NumNode(21)] + + b = Variable("b", 1, 3) + n = MulNode(b, 3) + assert n.expand() == [Variable("b", 1, 3)*3] + + def test_sum_node_expand(self): + a = Variable(None, 1, 3) + b = Variable("b", 5, 7) + + s1 = create_rednode(SumNode, [a, b]) + assert s1.expand() == [Variable.sum([NumNode(i),b]) for i in range(1,4)] + + def test_multi_expand(self): + a = Variable("a", 1, 3) + b = Variable("b", 14, 17) + s1 = create_rednode(SumNode, [a, b]) + # expand increments earlier variables faster than later variables (as specified in the argument) + # this behavior was just copied from before, no idea why this should be true + assert s1.expand((a, b)) == [NumNode(x + y) for x in range(b.min, b.max + 1) for y in range(a.min, a.max + 1)] + + def test_substitute(self): + a = Variable(None, 1, 3) + b = a + 1 + c = b.substitute({a: NumNode(1)}) + assert c == NumNode(2) + + +if __name__ == '__main__': + unittest.main()