wip
This commit is contained in:
66
selfdrive/modeld/models/README.md
Normal file
66
selfdrive/modeld/models/README.md
Normal file
@@ -0,0 +1,66 @@
|
||||
## Neural networks in openpilot
|
||||
To view the architecture of the ONNX networks, you can use [netron](https://netron.app/)
|
||||
|
||||
## Supercombo
|
||||
### Supercombo input format (Full size: 799906 x float32)
|
||||
* **image stream**
|
||||
* Two consecutive images (256 * 512 * 3 in RGB) recorded at 20 Hz : 393216 = 2 * 6 * 128 * 256
|
||||
* Each 256 * 512 image is represented in YUV420 with 6 channels : 6 * 128 * 256
|
||||
* Channels 0,1,2,3 represent the full-res Y channel and are represented in numpy as Y[::2, ::2], Y[::2, 1::2], Y[1::2, ::2], and Y[1::2, 1::2]
|
||||
* Channel 4 represents the half-res U channel
|
||||
* Channel 5 represents the half-res V channel
|
||||
* **wide image stream**
|
||||
* Two consecutive images (256 * 512 * 3 in RGB) recorded at 20 Hz : 393216 = 2 * 6 * 128 * 256
|
||||
* Each 256 * 512 image is represented in YUV420 with 6 channels : 6 * 128 * 256
|
||||
* Channels 0,1,2,3 represent the full-res Y channel and are represented in numpy as Y[::2, ::2], Y[::2, 1::2], Y[1::2, ::2], and Y[1::2, 1::2]
|
||||
* Channel 4 represents the half-res U channel
|
||||
* Channel 5 represents the half-res V channel
|
||||
* **desire**
|
||||
* one-hot encoded buffer to command model to execute certain actions, bit needs to be sent for the past 5 seconds (at 20FPS) : 100 * 8
|
||||
* **traffic convention**
|
||||
* one-hot encoded vector to tell model whether traffic is right-hand or left-hand traffic : 2
|
||||
* **feature buffer**
|
||||
* A buffer of intermediate features that gets appended to the current feature to form a 5 seconds temporal context (at 20FPS) : 99 * 512
|
||||
* **nav features**
|
||||
* 1 * 150
|
||||
* **nav instructions**
|
||||
* 1 * 256
|
||||
|
||||
|
||||
### Supercombo output format (Full size: XXX x float32)
|
||||
Read [here](https://github.com/commaai/openpilot/blob/90af436a121164a51da9fa48d093c29f738adf6a/selfdrive/modeld/models/driving.h#L236) for more.
|
||||
|
||||
|
||||
## Driver Monitoring Model
|
||||
* .onnx model can be run with onnx runtimes
|
||||
* .dlc file is a pre-quantized model and only runs on qualcomm DSPs
|
||||
|
||||
### input format
|
||||
* single image W = 1440 H = 960 luminance channel (Y) from the planar YUV420 format:
|
||||
* full input size is 1440 * 960 = 1382400
|
||||
* normalized ranging from 0.0 to 1.0 in float32 (onnx runner) or ranging from 0 to 255 in uint8 (snpe runner)
|
||||
* camera calibration angles (roll, pitch, yaw) from liveCalibration: 3 x float32 inputs
|
||||
|
||||
### output format
|
||||
* 84 x float32 outputs = 2 + 41 * 2 ([parsing example](https://github.com/commaai/openpilot/blob/22ce4e17ba0d3bfcf37f8255a4dd1dc683fe0c38/selfdrive/modeld/models/dmonitoring.cc#L33))
|
||||
* for each person in the front seats (2 * 41)
|
||||
* face pose: 12 = 6 + 6
|
||||
* face orientation [pitch, yaw, roll] in camera frame: 3
|
||||
* face position [dx, dy] relative to image center: 2
|
||||
* normalized face size: 1
|
||||
* standard deviations for above outputs: 6
|
||||
* face visible probability: 1
|
||||
* eyes: 20 = (8 + 1) + (8 + 1) + 1 + 1
|
||||
* eye position and size, and their standard deviations: 8
|
||||
* eye visible probability: 1
|
||||
* eye closed probability: 1
|
||||
* wearing sunglasses probability: 1
|
||||
* face occluded probability: 1
|
||||
* touching wheel probability: 1
|
||||
* paying attention probability: 1
|
||||
* (deprecated) distracted probabilities: 2
|
||||
* using phone probability: 1
|
||||
* distracted probability: 1
|
||||
* common outputs 2
|
||||
* poor camera vision probability: 1
|
||||
* left hand drive probability: 1
|
||||
72
selfdrive/modeld/models/commonmodel.cc
Normal file
72
selfdrive/modeld/models/commonmodel.cc
Normal file
@@ -0,0 +1,72 @@
|
||||
#include "selfdrive/modeld/models/commonmodel.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
|
||||
#include "common/clutil.h"
|
||||
#include "common/mat.h"
|
||||
#include "common/timing.h"
|
||||
|
||||
ModelFrame::ModelFrame(cl_device_id device_id, cl_context context) {
|
||||
input_frames = std::make_unique<float[]>(buf_size);
|
||||
|
||||
q = CL_CHECK_ERR(clCreateCommandQueue(context, device_id, 0, &err));
|
||||
y_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, MODEL_WIDTH * MODEL_HEIGHT, NULL, &err));
|
||||
u_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, (MODEL_WIDTH / 2) * (MODEL_HEIGHT / 2), NULL, &err));
|
||||
v_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, (MODEL_WIDTH / 2) * (MODEL_HEIGHT / 2), NULL, &err));
|
||||
net_input_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, MODEL_FRAME_SIZE * sizeof(float), NULL, &err));
|
||||
|
||||
transform_init(&transform, context, device_id);
|
||||
loadyuv_init(&loadyuv, context, device_id, MODEL_WIDTH, MODEL_HEIGHT);
|
||||
}
|
||||
|
||||
float* ModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3 &projection, cl_mem *output) {
|
||||
transform_queue(&this->transform, q,
|
||||
yuv_cl, frame_width, frame_height, frame_stride, frame_uv_offset,
|
||||
y_cl, u_cl, v_cl, MODEL_WIDTH, MODEL_HEIGHT, projection);
|
||||
|
||||
if (output == NULL) {
|
||||
loadyuv_queue(&loadyuv, q, y_cl, u_cl, v_cl, net_input_cl);
|
||||
|
||||
std::memmove(&input_frames[0], &input_frames[MODEL_FRAME_SIZE], sizeof(float) * MODEL_FRAME_SIZE);
|
||||
CL_CHECK(clEnqueueReadBuffer(q, net_input_cl, CL_TRUE, 0, MODEL_FRAME_SIZE * sizeof(float), &input_frames[MODEL_FRAME_SIZE], 0, nullptr, nullptr));
|
||||
clFinish(q);
|
||||
return &input_frames[0];
|
||||
} else {
|
||||
loadyuv_queue(&loadyuv, q, y_cl, u_cl, v_cl, *output, true);
|
||||
// NOTE: Since thneed is using a different command queue, this clFinish is needed to ensure the image is ready.
|
||||
clFinish(q);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ModelFrame::~ModelFrame() {
|
||||
transform_destroy(&transform);
|
||||
loadyuv_destroy(&loadyuv);
|
||||
CL_CHECK(clReleaseMemObject(net_input_cl));
|
||||
CL_CHECK(clReleaseMemObject(v_cl));
|
||||
CL_CHECK(clReleaseMemObject(u_cl));
|
||||
CL_CHECK(clReleaseMemObject(y_cl));
|
||||
CL_CHECK(clReleaseCommandQueue(q));
|
||||
}
|
||||
|
||||
void softmax(const float* input, float* output, size_t len) {
|
||||
const float max_val = *std::max_element(input, input + len);
|
||||
float denominator = 0;
|
||||
for (int i = 0; i < len; i++) {
|
||||
float const v_exp = expf(input[i] - max_val);
|
||||
denominator += v_exp;
|
||||
output[i] = v_exp;
|
||||
}
|
||||
|
||||
const float inv_denominator = 1. / denominator;
|
||||
for (int i = 0; i < len; i++) {
|
||||
output[i] *= inv_denominator;
|
||||
}
|
||||
}
|
||||
|
||||
float sigmoid(float input) {
|
||||
return 1 / (1 + expf(-input));
|
||||
}
|
||||
47
selfdrive/modeld/models/commonmodel.h
Normal file
47
selfdrive/modeld/models/commonmodel.h
Normal file
@@ -0,0 +1,47 @@
|
||||
#pragma once
|
||||
|
||||
#include <cfloat>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
|
||||
#ifdef __APPLE__
|
||||
#include <OpenCL/cl.h>
|
||||
#else
|
||||
#include <CL/cl.h>
|
||||
#endif
|
||||
|
||||
#include "common/mat.h"
|
||||
#include "cereal/messaging/messaging.h"
|
||||
#include "selfdrive/modeld/transforms/loadyuv.h"
|
||||
#include "selfdrive/modeld/transforms/transform.h"
|
||||
|
||||
const bool send_raw_pred = getenv("SEND_RAW_PRED") != NULL;
|
||||
|
||||
void softmax(const float* input, float* output, size_t len);
|
||||
float sigmoid(float input);
|
||||
|
||||
template<class T, size_t size>
|
||||
constexpr const kj::ArrayPtr<const T> to_kj_array_ptr(const std::array<T, size> &arr) {
|
||||
return kj::ArrayPtr(arr.data(), arr.size());
|
||||
}
|
||||
|
||||
class ModelFrame {
|
||||
public:
|
||||
ModelFrame(cl_device_id device_id, cl_context context);
|
||||
~ModelFrame();
|
||||
float* prepare(cl_mem yuv_cl, int width, int height, int frame_stride, int frame_uv_offset, const mat3& transform, cl_mem *output);
|
||||
|
||||
const int MODEL_WIDTH = 512;
|
||||
const int MODEL_HEIGHT = 256;
|
||||
const int MODEL_FRAME_SIZE = MODEL_WIDTH * MODEL_HEIGHT * 3 / 2;
|
||||
const int buf_size = MODEL_FRAME_SIZE * 2;
|
||||
|
||||
private:
|
||||
Transform transform;
|
||||
LoadYUVState loadyuv;
|
||||
cl_command_queue q;
|
||||
cl_mem y_cl, u_cl, v_cl, net_input_cl;
|
||||
std::unique_ptr<float[]> input_frames;
|
||||
};
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
2
selfdrive/modeld/models/dmonitoring_model.current
Normal file
2
selfdrive/modeld/models/dmonitoring_model.current
Normal file
@@ -0,0 +1,2 @@
|
||||
60abed33-9f25-4e34-9937-aaf918d41dfc
|
||||
50887963963a3022e85ac0c94b0801aef955a608
|
||||
BIN
selfdrive/modeld/models/dmonitoring_model.onnx
Normal file
BIN
selfdrive/modeld/models/dmonitoring_model.onnx
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
47416
selfdrive/modeld/models/navmodel.onnx
Normal file
47416
selfdrive/modeld/models/navmodel.onnx
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user