openpilot v0.9.6 release
date: 2024-01-12T10:13:37 master commit: ba792d576a49a0899b88a753fa1c52956bedf9e6
This commit is contained in:
5
rednose/.gitignore
vendored
Normal file
5
rednose/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Cython intermediates
|
||||
*_pyx.cpp
|
||||
*_pyx.h
|
||||
*_pyx_api.h
|
||||
*.os
|
||||
17
rednose/SConscript
Normal file
17
rednose/SConscript
Normal file
@@ -0,0 +1,17 @@
|
||||
Import('env', 'envCython', 'common')
|
||||
|
||||
cc_sources = [
|
||||
"helpers/ekf_load.cc",
|
||||
"helpers/ekf_sym.cc",
|
||||
]
|
||||
libs = ["dl"]
|
||||
if common != "":
|
||||
# for SWAGLOG support
|
||||
libs += [common, 'zmq']
|
||||
|
||||
ekf_objects = env.SharedObject(cc_sources)
|
||||
rednose = env.Library("helpers/ekf_sym", ekf_objects, LIBS=libs)
|
||||
rednose_python = envCython.Program("helpers/ekf_sym_pyx.so", ["helpers/ekf_sym_pyx.pyx", ekf_objects],
|
||||
LIBS=libs + envCython["LIBS"])
|
||||
|
||||
Export('rednose', 'rednose_python')
|
||||
0
rednose/__init__.py
Normal file
0
rednose/__init__.py
Normal file
33
rednose/helpers/__init__.py
Normal file
33
rednose/helpers/__init__.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import os
|
||||
import platform
|
||||
from cffi import FFI
|
||||
|
||||
TEMPLATE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'templates'))
|
||||
|
||||
|
||||
def write_code(folder, name, code, header):
|
||||
if not os.path.exists(folder):
|
||||
os.mkdir(folder)
|
||||
|
||||
open(os.path.join(folder, f"{name}.cpp"), 'w', encoding='utf-8').write(code)
|
||||
open(os.path.join(folder, f"{name}.h"), 'w', encoding='utf-8').write(header)
|
||||
|
||||
|
||||
def load_code(folder, name):
|
||||
shared_ext = "dylib" if platform.system() == "Darwin" else "so"
|
||||
shared_fn = os.path.join(folder, f"lib{name}.{shared_ext}")
|
||||
header_fn = os.path.join(folder, f"{name}.h")
|
||||
|
||||
with open(header_fn, encoding='utf-8') as f:
|
||||
header = f.read()
|
||||
|
||||
# is the only thing that can be parsed by cffi
|
||||
header = "\n".join([line for line in header.split("\n") if line.startswith("void ")])
|
||||
|
||||
ffi = FFI()
|
||||
ffi.cdef(header)
|
||||
return (ffi, ffi.dlopen(shared_fn))
|
||||
|
||||
|
||||
class KalmanError(Exception):
|
||||
pass
|
||||
22
rednose/helpers/chi2_lookup.py
Normal file
22
rednose/helpers/chi2_lookup.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def gen_chi2_ppf_lookup(max_dim=200):
|
||||
from scipy.stats import chi2
|
||||
table = np.zeros((max_dim, 98))
|
||||
for dim in range(1, max_dim):
|
||||
table[dim] = chi2.ppf(np.arange(.01, .99, .01), dim)
|
||||
|
||||
np.save('chi2_lookup_table', table)
|
||||
|
||||
|
||||
def chi2_ppf(p, dim):
|
||||
table = np.load(os.path.dirname(os.path.realpath(__file__)) + '/chi2_lookup_table.npy')
|
||||
result = np.interp(p, np.arange(.01, .99, .01), table[dim])
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_chi2_ppf_lookup()
|
||||
BIN
rednose/helpers/chi2_lookup_table.npy
Normal file
BIN
rednose/helpers/chi2_lookup_table.npy
Normal file
Binary file not shown.
42
rednose/helpers/ekf.h
Normal file
42
rednose/helpers/ekf.h
Normal file
@@ -0,0 +1,42 @@
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <deque>
|
||||
#include <unordered_map>
|
||||
#include <map>
|
||||
#include <cmath>
|
||||
|
||||
#include <eigen3/Eigen/Dense>
|
||||
|
||||
typedef void (*extra_routine_t)(double *, double *);
|
||||
|
||||
struct EKF {
|
||||
std::string name;
|
||||
std::vector<int> kinds;
|
||||
std::vector<int> feature_kinds;
|
||||
|
||||
void (*f_fun)(double *, double, double *);
|
||||
void (*F_fun)(double *, double, double *);
|
||||
void (*err_fun)(double *, double *, double *);
|
||||
void (*inv_err_fun)(double *, double *, double *);
|
||||
void (*H_mod_fun)(double *, double *);
|
||||
void (*predict)(double *, double *, double *, double);
|
||||
std::unordered_map<int, void (*)(double *, double *, double *)> hs = {};
|
||||
std::unordered_map<int, void (*)(double *, double *, double *)> Hs = {};
|
||||
std::unordered_map<int, void (*)(double *, double *, double *, double *, double *)> updates = {};
|
||||
std::unordered_map<int, void (*)(double *, double *, double *)> Hes = {};
|
||||
std::unordered_map<std::string, void (*)(double)> sets = {};
|
||||
std::unordered_map<std::string, extra_routine_t> extra_routines = {};
|
||||
};
|
||||
|
||||
#define ekf_lib_init(ekf) \
|
||||
extern "C" void* ekf_get() { \
|
||||
return (void*) &ekf; \
|
||||
} \
|
||||
extern void __attribute__((weak)) ekf_register(const EKF* ptr); \
|
||||
static void __attribute__((constructor)) do_ekf_init_ ## ekf(void) { \
|
||||
if (ekf_register) ekf_register(&ekf); \
|
||||
}
|
||||
39
rednose/helpers/ekf_load.cc
Normal file
39
rednose/helpers/ekf_load.cc
Normal file
@@ -0,0 +1,39 @@
|
||||
#include "ekf_load.h"
|
||||
#include <dlfcn.h>
|
||||
|
||||
std::vector<const EKF*>& ekf_get_all() {
|
||||
static std::vector<const EKF*> vec;
|
||||
return vec;
|
||||
}
|
||||
|
||||
void ekf_register(const EKF* ekf) {
|
||||
ekf_get_all().push_back(ekf);
|
||||
}
|
||||
|
||||
const EKF* ekf_lookup(const std::string& ekf_name) {
|
||||
for (const auto& ekfi : ekf_get_all()) {
|
||||
if (ekf_name == ekfi->name) {
|
||||
return ekfi;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ekf_load_and_register(const std::string& ekf_directory, const std::string& ekf_name) {
|
||||
if (ekf_lookup(ekf_name)) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
std::string dylib_ext = ".dylib";
|
||||
#else
|
||||
std::string dylib_ext = ".so";
|
||||
#endif
|
||||
std::string ekf_path = ekf_directory + "/lib" + ekf_name + dylib_ext;
|
||||
void* handle = dlopen(ekf_path.c_str(), RTLD_NOW);
|
||||
assert(handle);
|
||||
void* (*ekf_get)() = (void*(*)())dlsym(handle, "ekf_get");
|
||||
assert(ekf_get != NULL);
|
||||
const EKF* ekf = (const EKF*)ekf_get();
|
||||
ekf_register(ekf);
|
||||
}
|
||||
9
rednose/helpers/ekf_load.h
Normal file
9
rednose/helpers/ekf_load.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "ekf.h"
|
||||
|
||||
std::vector<const EKF*>& ekf_get_all();
|
||||
const EKF* ekf_lookup(const std::string& ekf_name);
|
||||
void ekf_register(const EKF* ekf);
|
||||
void ekf_load_and_register(const std::string& ekf_directory, const std::string& ekf_name);
|
||||
223
rednose/helpers/ekf_sym.cc
Normal file
223
rednose/helpers/ekf_sym.cc
Normal file
@@ -0,0 +1,223 @@
|
||||
#include "ekf_sym.h"
|
||||
#include "logger/logger.h"
|
||||
|
||||
using namespace EKFS;
|
||||
using namespace Eigen;
|
||||
|
||||
EKFSym::EKFSym(std::string name, Map<MatrixXdr> Q, Map<VectorXd> x_initial, Map<MatrixXdr> P_initial, int dim_main,
|
||||
int dim_main_err, int N, int dim_augment, int dim_augment_err, std::vector<int> maha_test_kinds,
|
||||
std::vector<int> quaternion_idxs, std::vector<std::string> global_vars, double max_rewind_age)
|
||||
{
|
||||
// TODO: add logger
|
||||
this->ekf = ekf_lookup(name);
|
||||
assert(this->ekf);
|
||||
|
||||
this->msckf = N > 0;
|
||||
this->N = N;
|
||||
this->dim_augment = dim_augment;
|
||||
this->dim_augment_err = dim_augment_err;
|
||||
this->dim_main = dim_main;
|
||||
this->dim_main_err = dim_main_err;
|
||||
|
||||
this->dim_x = x_initial.rows();
|
||||
this->dim_err = P_initial.rows();
|
||||
|
||||
assert(dim_main + dim_augment * N == dim_x);
|
||||
assert(dim_main_err + dim_augment_err * N == this->dim_err);
|
||||
assert(Q.rows() == P_initial.rows() && Q.cols() == P_initial.cols());
|
||||
|
||||
// kinds that should get mahalanobis distance
|
||||
// tested for outlier rejection
|
||||
this->maha_test_kinds = maha_test_kinds;
|
||||
|
||||
// quaternions need normalization
|
||||
this->quaternion_idxs = quaternion_idxs;
|
||||
|
||||
this->global_vars = global_vars;
|
||||
|
||||
// Process noise
|
||||
this->Q = Q;
|
||||
|
||||
this->max_rewind_age = max_rewind_age;
|
||||
this->init_state(x_initial, P_initial, NAN);
|
||||
}
|
||||
|
||||
void EKFSym::init_state(Map<VectorXd> state, Map<MatrixXdr> covs, double init_filter_time) {
|
||||
this->x = state;
|
||||
this->P = covs;
|
||||
this->filter_time = init_filter_time;
|
||||
this->augment_times = VectorXd::Zero(this->N);
|
||||
this->reset_rewind();
|
||||
}
|
||||
|
||||
VectorXd EKFSym::state() {
|
||||
return this->x;
|
||||
}
|
||||
|
||||
MatrixXdr EKFSym::covs() {
|
||||
return this->P;
|
||||
}
|
||||
|
||||
void EKFSym::set_filter_time(double t) {
|
||||
this->filter_time = t;
|
||||
}
|
||||
|
||||
double EKFSym::get_filter_time() {
|
||||
return this->filter_time;
|
||||
}
|
||||
|
||||
void EKFSym::normalize_quaternions() {
|
||||
for(std::size_t i = 0; i < this->quaternion_idxs.size(); ++i) {
|
||||
this->normalize_slice(this->quaternion_idxs[i], this->quaternion_idxs[i] + 4);
|
||||
}
|
||||
}
|
||||
|
||||
void EKFSym::normalize_slice(int slice_start, int slice_end_ex) {
|
||||
this->x.block(slice_start, 0, slice_end_ex - slice_start, this->x.cols()).normalize();
|
||||
}
|
||||
|
||||
void EKFSym::set_global(std::string global_var, double val) {
|
||||
this->ekf->sets.at(global_var)(val);
|
||||
}
|
||||
|
||||
std::optional<Estimate> EKFSym::predict_and_update_batch(double t, int kind, std::vector<Map<VectorXd>> z_map,
|
||||
std::vector<Map<MatrixXdr>> R_map, std::vector<std::vector<double>> extra_args, bool augment)
|
||||
{
|
||||
// TODO handle rewinding at this level
|
||||
|
||||
std::deque<Observation> rewound;
|
||||
if (!std::isnan(this->filter_time) && t < this->filter_time) {
|
||||
if (this->rewind_t.empty() || t < this->rewind_t.front() || t < this->rewind_t.back() - this->max_rewind_age) {
|
||||
LOGD("observation too old at %f with filter at %f, ignoring!", t, this->filter_time);
|
||||
return std::nullopt;
|
||||
}
|
||||
rewound = this->rewind(t);
|
||||
}
|
||||
|
||||
Observation obs;
|
||||
obs.t = t;
|
||||
obs.kind = kind;
|
||||
obs.extra_args = extra_args;
|
||||
for (Map<VectorXd> zi : z_map) {
|
||||
obs.z.push_back(zi);
|
||||
}
|
||||
for (Map<MatrixXdr> Ri : R_map) {
|
||||
obs.R.push_back(Ri);
|
||||
}
|
||||
|
||||
std::optional<Estimate> res = std::make_optional(this->predict_and_update_batch(obs, augment));
|
||||
|
||||
// optional fast forward
|
||||
while (!rewound.empty()) {
|
||||
this->predict_and_update_batch(rewound.front(), false);
|
||||
rewound.pop_front();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void EKFSym::reset_rewind() {
|
||||
this->rewind_obscache.clear();
|
||||
this->rewind_t.clear();
|
||||
this->rewind_states.clear();
|
||||
}
|
||||
|
||||
std::deque<Observation> EKFSym::rewind(double t) {
|
||||
std::deque<Observation> rewound;
|
||||
|
||||
// rewind observations until t is after previous observation
|
||||
while (this->rewind_t.back() > t) {
|
||||
rewound.push_front(this->rewind_obscache.back());
|
||||
this->rewind_t.pop_back();
|
||||
this->rewind_states.pop_back();
|
||||
this->rewind_obscache.pop_back();
|
||||
}
|
||||
|
||||
// set the state to the time right before that
|
||||
this->filter_time = this->rewind_t.back();
|
||||
this->x = this->rewind_states.back().first;
|
||||
this->P = this->rewind_states.back().second;
|
||||
|
||||
return rewound;
|
||||
}
|
||||
|
||||
void EKFSym::checkpoint(Observation& obs) {
|
||||
// push to rewinder
|
||||
this->rewind_t.push_back(this->filter_time);
|
||||
this->rewind_states.push_back(std::make_pair(this->x, this->P));
|
||||
this->rewind_obscache.push_back(obs);
|
||||
|
||||
// only keep a certain number around
|
||||
if (this->rewind_t.size() > REWIND_TO_KEEP) {
|
||||
this->rewind_t.pop_front();
|
||||
this->rewind_states.pop_front();
|
||||
this->rewind_obscache.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
Estimate EKFSym::predict_and_update_batch(Observation& obs, bool augment) {
|
||||
assert(obs.z.size() == obs.R.size());
|
||||
assert(obs.z.size() == obs.extra_args.size());
|
||||
|
||||
this->predict(obs.t);
|
||||
|
||||
Estimate res;
|
||||
res.t = obs.t;
|
||||
res.kind = obs.kind;
|
||||
res.z = obs.z;
|
||||
res.extra_args = obs.extra_args;
|
||||
res.xk1 = this->x;
|
||||
res.Pk1 = this->P;
|
||||
|
||||
// update batch
|
||||
std::vector<VectorXd> y;
|
||||
for (int i = 0; i < obs.z.size(); i++) {
|
||||
assert(obs.z[i].rows() == obs.R[i].rows());
|
||||
assert(obs.z[i].rows() == obs.R[i].cols());
|
||||
|
||||
// update state
|
||||
y.push_back(this->update(obs.kind, obs.z[i], obs.R[i], obs.extra_args[i]));
|
||||
}
|
||||
|
||||
res.xk = this->x;
|
||||
res.Pk = this->P;
|
||||
res.y = y;
|
||||
|
||||
assert(!augment); // TODO
|
||||
// if (augment) {
|
||||
// this->augment();
|
||||
// }
|
||||
|
||||
this->checkpoint(obs);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void EKFSym::predict(double t) {
|
||||
// initialize time
|
||||
if (std::isnan(this->filter_time)) {
|
||||
this->filter_time = t;
|
||||
}
|
||||
|
||||
// predict
|
||||
double dt = t - this->filter_time;
|
||||
assert(dt >= 0.0);
|
||||
|
||||
this->ekf->predict(this->x.data(), this->P.data(), this->Q.data(), dt);
|
||||
this->normalize_quaternions();
|
||||
this->filter_time = t;
|
||||
}
|
||||
|
||||
VectorXd EKFSym::update(int kind, VectorXd z, MatrixXdr R, std::vector<double> extra_args) {
|
||||
this->ekf->updates.at(kind)(this->x.data(), this->P.data(), z.data(), R.data(), extra_args.data());
|
||||
this->normalize_quaternions();
|
||||
|
||||
if (this->msckf && std::find(this->feature_track_kinds.begin(), this->feature_track_kinds.end(), kind) != this->feature_track_kinds.end()) {
|
||||
return z.head(z.rows() - extra_args.size());
|
||||
}
|
||||
return z;
|
||||
}
|
||||
|
||||
extra_routine_t EKFSym::get_extra_routine(const std::string& routine) {
|
||||
return this->ekf->extra_routines.at(routine);
|
||||
}
|
||||
113
rednose/helpers/ekf_sym.h
Normal file
113
rednose/helpers/ekf_sym.h
Normal file
@@ -0,0 +1,113 @@
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <deque>
|
||||
#include <unordered_map>
|
||||
#include <map>
|
||||
#include <cmath>
|
||||
#include <optional>
|
||||
|
||||
#include <eigen3/Eigen/Dense>
|
||||
|
||||
#include "ekf.h"
|
||||
#include "ekf_load.h"
|
||||
|
||||
#define REWIND_TO_KEEP 512
|
||||
|
||||
namespace EKFS {
|
||||
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixXdr;
|
||||
|
||||
typedef struct Observation {
|
||||
double t;
|
||||
int kind;
|
||||
std::vector<Eigen::VectorXd> z;
|
||||
std::vector<MatrixXdr> R;
|
||||
std::vector<std::vector<double>> extra_args;
|
||||
} Observation;
|
||||
|
||||
typedef struct Estimate {
|
||||
Eigen::VectorXd xk1;
|
||||
Eigen::VectorXd xk;
|
||||
MatrixXdr Pk1;
|
||||
MatrixXdr Pk;
|
||||
double t;
|
||||
int kind;
|
||||
std::vector<Eigen::VectorXd> y;
|
||||
std::vector<Eigen::VectorXd> z;
|
||||
std::vector<std::vector<double>> extra_args;
|
||||
} Estimate;
|
||||
|
||||
class EKFSym {
|
||||
public:
|
||||
EKFSym(std::string name, Eigen::Map<MatrixXdr> Q, Eigen::Map<Eigen::VectorXd> x_initial,
|
||||
Eigen::Map<MatrixXdr> P_initial, int dim_main, int dim_main_err, int N = 0, int dim_augment = 0,
|
||||
int dim_augment_err = 0, std::vector<int> maha_test_kinds = std::vector<int>(),
|
||||
std::vector<int> quaternion_idxs = std::vector<int>(),
|
||||
std::vector<std::string> global_vars = std::vector<std::string>(), double max_rewind_age = 1.0);
|
||||
void init_state(Eigen::Map<Eigen::VectorXd> state, Eigen::Map<MatrixXdr> covs, double filter_time);
|
||||
|
||||
Eigen::VectorXd state();
|
||||
MatrixXdr covs();
|
||||
void set_filter_time(double t);
|
||||
double get_filter_time();
|
||||
void normalize_quaternions();
|
||||
void normalize_slice(int slice_start, int slice_end_ex);
|
||||
void set_global(std::string global_var, double val);
|
||||
void reset_rewind();
|
||||
|
||||
void predict(double t);
|
||||
std::optional<Estimate> predict_and_update_batch(double t, int kind, std::vector<Eigen::Map<Eigen::VectorXd>> z,
|
||||
std::vector<Eigen::Map<MatrixXdr>> R, std::vector<std::vector<double>> extra_args = {{}}, bool augment = false);
|
||||
|
||||
extra_routine_t get_extra_routine(const std::string& routine);
|
||||
|
||||
private:
|
||||
std::deque<Observation> rewind(double t);
|
||||
void checkpoint(Observation& obs);
|
||||
|
||||
Estimate predict_and_update_batch(Observation& obs, bool augment);
|
||||
Eigen::VectorXd update(int kind, Eigen::VectorXd z, MatrixXdr R, std::vector<double> extra_args);
|
||||
|
||||
// stuct with linked sympy generated functions
|
||||
const EKF *ekf = NULL;
|
||||
|
||||
Eigen::VectorXd x; // state
|
||||
MatrixXdr P; // covs
|
||||
|
||||
bool msckf;
|
||||
int N;
|
||||
int dim_augment;
|
||||
int dim_augment_err;
|
||||
int dim_main;
|
||||
int dim_main_err;
|
||||
|
||||
// state
|
||||
int dim_x;
|
||||
int dim_err;
|
||||
|
||||
double filter_time;
|
||||
|
||||
std::vector<int> maha_test_kinds;
|
||||
std::vector<int> quaternion_idxs;
|
||||
|
||||
std::vector<std::string> global_vars;
|
||||
|
||||
// process noise
|
||||
MatrixXdr Q;
|
||||
|
||||
// rewind stuff
|
||||
double max_rewind_age;
|
||||
std::deque<double> rewind_t;
|
||||
std::deque<std::pair<Eigen::VectorXd, MatrixXdr>> rewind_states;
|
||||
std::deque<Observation> rewind_obscache;
|
||||
|
||||
Eigen::VectorXd augment_times;
|
||||
|
||||
std::vector<int> feature_track_kinds;
|
||||
};
|
||||
|
||||
}
|
||||
687
rednose/helpers/ekf_sym.py
Normal file
687
rednose/helpers/ekf_sym.py
Normal file
@@ -0,0 +1,687 @@
|
||||
import os
|
||||
import logging
|
||||
from bisect import bisect_right
|
||||
|
||||
import numpy as np
|
||||
import sympy as sp
|
||||
from numpy import dot
|
||||
|
||||
from rednose.helpers.sympy_helpers import sympy_into_c
|
||||
from rednose.helpers import TEMPLATE_DIR, load_code
|
||||
from rednose.helpers.chi2_lookup import chi2_ppf
|
||||
|
||||
|
||||
def solve(a, b):
|
||||
if a.shape[0] == 1 and a.shape[1] == 1:
|
||||
return b / a[0][0]
|
||||
else:
|
||||
return np.linalg.solve(a, b)
|
||||
|
||||
|
||||
def null(H, eps=1e-12):
|
||||
_, s, vh = np.linalg.svd(H)
|
||||
padding = max(0, np.shape(H)[1] - np.shape(s)[0])
|
||||
null_mask = np.concatenate(((s <= eps), np.ones((padding,), dtype=bool)), axis=0)
|
||||
null_space = np.compress(null_mask, vh, axis=0)
|
||||
return np.transpose(null_space)
|
||||
|
||||
|
||||
def gen_code(folder, name, f_sym, dt_sym, x_sym, obs_eqs, dim_x, dim_err, eskf_params=None, msckf_params=None, # pylint: disable=dangerous-default-value
|
||||
maha_test_kinds=[], quaternion_idxs=[], global_vars=None, extra_routines=[]):
|
||||
# optional state transition matrix, H modifier
|
||||
# and err_function if an error-state kalman filter (ESKF)
|
||||
# is desired. Best described in "Quaternion kinematics
|
||||
# for the error-state Kalman filter" by Joan Sola
|
||||
|
||||
if eskf_params:
|
||||
err_eqs = eskf_params[0]
|
||||
inv_err_eqs = eskf_params[1]
|
||||
H_mod_sym = eskf_params[2]
|
||||
f_err_sym = eskf_params[3]
|
||||
x_err_sym = eskf_params[4]
|
||||
else:
|
||||
nom_x = sp.MatrixSymbol('nom_x', dim_x, 1)
|
||||
true_x = sp.MatrixSymbol('true_x', dim_x, 1)
|
||||
delta_x = sp.MatrixSymbol('delta_x', dim_x, 1)
|
||||
err_function_sym = sp.Matrix(nom_x + delta_x)
|
||||
inv_err_function_sym = sp.Matrix(true_x - nom_x)
|
||||
err_eqs = [err_function_sym, nom_x, delta_x]
|
||||
inv_err_eqs = [inv_err_function_sym, nom_x, true_x]
|
||||
|
||||
H_mod_sym = sp.Matrix(np.eye(dim_x))
|
||||
f_err_sym = f_sym
|
||||
x_err_sym = x_sym
|
||||
|
||||
# This configures the multi-state augmentation
|
||||
# needed for EKF-SLAM with MSCKF (Mourikis et al 2007)
|
||||
if msckf_params:
|
||||
msckf = True
|
||||
dim_main = msckf_params[0] # size of the main state
|
||||
dim_augment = msckf_params[1] # size of one augment state chunk
|
||||
dim_main_err = msckf_params[2]
|
||||
dim_augment_err = msckf_params[3]
|
||||
N = msckf_params[4]
|
||||
feature_track_kinds = msckf_params[5]
|
||||
assert dim_main + dim_augment * N == dim_x
|
||||
assert dim_main_err + dim_augment_err * N == dim_err
|
||||
else:
|
||||
msckf = False
|
||||
dim_main = dim_x
|
||||
dim_augment = 0
|
||||
dim_main_err = dim_err
|
||||
dim_augment_err = 0
|
||||
N = 0
|
||||
|
||||
# linearize with jacobians
|
||||
F_sym = f_err_sym.jacobian(x_err_sym)
|
||||
|
||||
if eskf_params:
|
||||
for sym in x_err_sym:
|
||||
F_sym = F_sym.subs(sym, 0)
|
||||
|
||||
assert dt_sym in F_sym.free_symbols
|
||||
|
||||
for i in range(len(obs_eqs)):
|
||||
obs_eqs[i].append(obs_eqs[i][0].jacobian(x_sym))
|
||||
if msckf and obs_eqs[i][1] in feature_track_kinds:
|
||||
obs_eqs[i].append(obs_eqs[i][0].jacobian(obs_eqs[i][2]))
|
||||
else:
|
||||
obs_eqs[i].append(None)
|
||||
|
||||
# collect sympy functions
|
||||
sympy_functions = []
|
||||
|
||||
# extra routines
|
||||
sympy_functions += extra_routines
|
||||
|
||||
# error functions
|
||||
sympy_functions.append(('err_fun', err_eqs[0], [err_eqs[1], err_eqs[2]]))
|
||||
sympy_functions.append(('inv_err_fun', inv_err_eqs[0], [inv_err_eqs[1], inv_err_eqs[2]]))
|
||||
|
||||
# H modifier for ESKF updates
|
||||
sympy_functions.append(('H_mod_fun', H_mod_sym, [x_sym]))
|
||||
|
||||
# state propagation function
|
||||
sympy_functions.append(('f_fun', f_sym, [x_sym, dt_sym]))
|
||||
sympy_functions.append(('F_fun', F_sym, [x_sym, dt_sym]))
|
||||
|
||||
# observation functions
|
||||
for h_sym, kind, ea_sym, H_sym, He_sym in obs_eqs:
|
||||
sympy_functions.append(('h_%d' % kind, h_sym, [x_sym, ea_sym]))
|
||||
sympy_functions.append(('H_%d' % kind, H_sym, [x_sym, ea_sym]))
|
||||
if msckf and kind in feature_track_kinds:
|
||||
sympy_functions.append(('He_%d' % kind, He_sym, [x_sym, ea_sym]))
|
||||
|
||||
# Generate and wrap all th c code
|
||||
sympy_header, code = sympy_into_c(sympy_functions, global_vars)
|
||||
|
||||
header = "#pragma once\n"
|
||||
header += "#include \"rednose/helpers/ekf.h\"\n"
|
||||
header += "extern \"C\" {\n"
|
||||
|
||||
pre_code = f"#include \"{name}.h\"\n"
|
||||
pre_code += "\nnamespace {\n"
|
||||
pre_code += "#define DIM %d\n" % dim_x
|
||||
pre_code += "#define EDIM %d\n" % dim_err
|
||||
pre_code += "#define MEDIM %d\n" % dim_main_err
|
||||
pre_code += "typedef void (*Hfun)(double *, double *, double *);\n"
|
||||
|
||||
if global_vars is not None:
|
||||
for var in global_vars:
|
||||
pre_code += f"\ndouble {var.name};\n"
|
||||
pre_code += f"\nvoid set_{var.name}(double x){{ {var.name} = x;}}\n"
|
||||
|
||||
post_code = "\n}\n" # namespace
|
||||
post_code += "extern \"C\" {\n\n"
|
||||
|
||||
for h_sym, kind, ea_sym, H_sym, He_sym in obs_eqs:
|
||||
if msckf and kind in feature_track_kinds:
|
||||
He_str = 'He_%d' % kind
|
||||
# ea_dim = ea_sym.shape[0]
|
||||
else:
|
||||
He_str = 'NULL'
|
||||
# ea_dim = 1 # not really dim of ea but makes c function work
|
||||
maha_thresh = chi2_ppf(0.95, int(h_sym.shape[0])) # mahalanobis distance for outlier detection
|
||||
maha_test = kind in maha_test_kinds
|
||||
|
||||
pre_code += f"const static double MAHA_THRESH_{kind} = {maha_thresh};\n"
|
||||
|
||||
header += f"void {name}_update_{kind}(double *in_x, double *in_P, double *in_z, double *in_R, double *in_ea);\n"
|
||||
post_code += f"void {name}_update_{kind}(double *in_x, double *in_P, double *in_z, double *in_R, double *in_ea) {{\n"
|
||||
post_code += f" update<{h_sym.shape[0]}, 3, {int(maha_test)}>(in_x, in_P, h_{kind}, H_{kind}, {He_str}, in_z, in_R, in_ea, MAHA_THRESH_{kind});\n"
|
||||
post_code += "}\n"
|
||||
|
||||
# For ffi loading of specific functions
|
||||
for line in sympy_header.split("\n"):
|
||||
if line.startswith("void "): # sympy functions
|
||||
func_call = line[5: line.index(')') + 1]
|
||||
header += f"void {name}_{func_call};\n"
|
||||
post_code += f"void {name}_{func_call} {{\n"
|
||||
post_code += f" {func_call.replace('double *', '').replace('double', '')};\n"
|
||||
post_code += "}\n"
|
||||
header += f"void {name}_predict(double *in_x, double *in_P, double *in_Q, double dt);\n"
|
||||
post_code += f"void {name}_predict(double *in_x, double *in_P, double *in_Q, double dt) {{\n"
|
||||
post_code += " predict(in_x, in_P, in_Q, dt);\n"
|
||||
post_code += "}\n"
|
||||
if global_vars is not None:
|
||||
for var in global_vars:
|
||||
header += f"void {name}_set_{var.name}(double x);\n"
|
||||
post_code += f"void {name}_set_{var.name}(double x) {{\n"
|
||||
post_code += f" set_{var.name}(x);\n"
|
||||
post_code += "}\n"
|
||||
|
||||
post_code += "}\n\n" # extern c
|
||||
|
||||
funcs = ['f_fun', 'F_fun', 'err_fun', 'inv_err_fun', 'H_mod_fun', 'predict']
|
||||
func_lists = {
|
||||
'h': [kind for _, kind, _, _, _ in obs_eqs],
|
||||
'H': [kind for _, kind, _, _, _ in obs_eqs],
|
||||
'update': [kind for _, kind, _, _, _ in obs_eqs],
|
||||
'He': [kind for _, kind, _, _, _ in obs_eqs if msckf and kind in feature_track_kinds],
|
||||
'set': [var.name for var in global_vars] if global_vars is not None else [],
|
||||
}
|
||||
func_extra = [x[0] for x in extra_routines]
|
||||
|
||||
# For dynamic loading of specific functions
|
||||
post_code += f"const EKF {name} = {{\n"
|
||||
post_code += f" .name = \"{name}\",\n"
|
||||
post_code += f" .kinds = {{ {', '.join([str(kind) for _, kind, _, _, _ in obs_eqs])} }},\n"
|
||||
post_code += f" .feature_kinds = {{ {', '.join([str(kind) for _, kind, _, _, _ in obs_eqs if msckf and kind in feature_track_kinds])} }},\n"
|
||||
for func in funcs:
|
||||
post_code += f" .{func} = {name}_{func},\n"
|
||||
for group, kinds in func_lists.items():
|
||||
post_code += f" .{group}s = {{\n"
|
||||
for kind in kinds:
|
||||
str_kind = f"\"{kind}\"" if isinstance(kind, str) else kind
|
||||
post_code += f" {{ {str_kind}, {name}_{group}_{kind} }},\n"
|
||||
post_code += " },\n"
|
||||
post_code += " .extra_routines = {\n"
|
||||
for f in func_extra:
|
||||
post_code += f" {{ \"{f}\", {name}_{f} }},\n"
|
||||
post_code += " },\n"
|
||||
post_code += "};\n\n"
|
||||
post_code += f"ekf_lib_init({name})\n"
|
||||
|
||||
# merge code blocks
|
||||
header += "}"
|
||||
code = "\n".join([pre_code, code, open(os.path.join(TEMPLATE_DIR, "ekf_c.c"), encoding='utf-8').read(), post_code])
|
||||
|
||||
# write to file
|
||||
if not os.path.exists(folder):
|
||||
os.mkdir(folder)
|
||||
|
||||
open(os.path.join(folder, f"{name}.h"), 'w', encoding='utf-8').write(header) # header is used for ffi import
|
||||
open(os.path.join(folder, f"{name}.cpp"), 'w', encoding='utf-8').write(code)
|
||||
|
||||
|
||||
class EKF_sym():
|
||||
def __init__(self, folder, name, Q, x_initial, P_initial, dim_main, dim_main_err, # pylint: disable=dangerous-default-value
|
||||
N=0, dim_augment=0, dim_augment_err=0, maha_test_kinds=[], quaternion_idxs=[], global_vars=None, max_rewind_age=1.0, logger=logging):
|
||||
"""Generates process function and all observation functions for the kalman filter."""
|
||||
self.msckf = N > 0
|
||||
self.N = N
|
||||
self.dim_augment = dim_augment
|
||||
self.dim_augment_err = dim_augment_err
|
||||
self.dim_main = dim_main
|
||||
self.dim_main_err = dim_main_err
|
||||
|
||||
self.logger = logger
|
||||
|
||||
# state
|
||||
x_initial = x_initial.reshape((-1, 1))
|
||||
self.dim_x = x_initial.shape[0]
|
||||
self.dim_err = P_initial.shape[0]
|
||||
assert dim_main + dim_augment * N == self.dim_x
|
||||
assert dim_main_err + dim_augment_err * N == self.dim_err
|
||||
assert Q.shape == P_initial.shape
|
||||
|
||||
# kinds that should get mahalanobis distance
|
||||
# tested for outlier rejection
|
||||
self.maha_test_kinds = maha_test_kinds
|
||||
|
||||
# quaternions need normalization
|
||||
self.quaternion_idxs = quaternion_idxs
|
||||
|
||||
# process noise
|
||||
self.Q = Q
|
||||
|
||||
# rewind stuff
|
||||
self.max_rewind_age = max_rewind_age
|
||||
self.rewind_t = []
|
||||
self.rewind_states = []
|
||||
self.rewind_obscache = []
|
||||
self.init_state(x_initial, P_initial, None)
|
||||
|
||||
ffi, lib = load_code(folder, name)
|
||||
kinds, self.feature_track_kinds = [], []
|
||||
for func in dir(lib):
|
||||
if func[:len(name) + 3] == f'{name}_h_':
|
||||
kinds.append(int(func[len(name) + 3:]))
|
||||
if func[:len(name) + 4] == f'{name}_He_':
|
||||
self.feature_track_kinds.append(int(func[len(name) + 4:]))
|
||||
|
||||
# wrap all the sympy functions
|
||||
def wrap_1lists(func_name):
|
||||
func = eval(f"lib.{name}_{func_name}", {"lib": lib}) # pylint: disable=eval-used
|
||||
|
||||
def ret(lst1, out):
|
||||
func(ffi.cast("double *", lst1.ctypes.data),
|
||||
ffi.cast("double *", out.ctypes.data))
|
||||
return ret
|
||||
|
||||
def wrap_2lists(func_name):
|
||||
func = eval(f"lib.{name}_{func_name}", {"lib": lib}) # pylint: disable=eval-used
|
||||
|
||||
def ret(lst1, lst2, out):
|
||||
func(ffi.cast("double *", lst1.ctypes.data),
|
||||
ffi.cast("double *", lst2.ctypes.data),
|
||||
ffi.cast("double *", out.ctypes.data))
|
||||
return ret
|
||||
|
||||
def wrap_1list_1float(func_name):
|
||||
func = eval(f"lib.{name}_{func_name}", {"lib": lib}) # pylint: disable=eval-used
|
||||
|
||||
def ret(lst1, fl, out):
|
||||
func(ffi.cast("double *", lst1.ctypes.data),
|
||||
ffi.cast("double", fl),
|
||||
ffi.cast("double *", out.ctypes.data))
|
||||
return ret
|
||||
|
||||
self.f = wrap_1list_1float("f_fun")
|
||||
self.F = wrap_1list_1float("F_fun")
|
||||
|
||||
self.err_function = wrap_2lists("err_fun")
|
||||
self.inv_err_function = wrap_2lists("inv_err_fun")
|
||||
self.H_mod = wrap_1lists("H_mod_fun")
|
||||
|
||||
self.hs, self.Hs, self.Hes = {}, {}, {}
|
||||
for kind in kinds:
|
||||
self.hs[kind] = wrap_2lists(f"h_{kind}")
|
||||
self.Hs[kind] = wrap_2lists(f"H_{kind}")
|
||||
if self.msckf and kind in self.feature_track_kinds:
|
||||
self.Hes[kind] = wrap_2lists(f"He_{kind}")
|
||||
|
||||
self.set_globals = {}
|
||||
if global_vars is not None:
|
||||
for global_var in global_vars:
|
||||
self.set_globals[global_var] = getattr(lib, f"{name}_set_{global_var}")
|
||||
|
||||
# wrap the C++ predict function
|
||||
def _predict_blas(x, P, dt):
|
||||
func = eval(f"lib.{name}_predict", {"lib": lib}) # pylint: disable=eval-used
|
||||
func(ffi.cast("double *", x.ctypes.data),
|
||||
ffi.cast("double *", P.ctypes.data),
|
||||
ffi.cast("double *", self.Q.ctypes.data),
|
||||
ffi.cast("double", dt))
|
||||
return x, P
|
||||
|
||||
# wrap the C++ update function
|
||||
def fun_wrapper(f, kind):
|
||||
f = eval(f"lib.{name}_{f}", {"lib": lib}) # pylint: disable=eval-used
|
||||
|
||||
def _update_inner_blas(x, P, z, R, extra_args):
|
||||
f(ffi.cast("double *", x.ctypes.data),
|
||||
ffi.cast("double *", P.ctypes.data),
|
||||
ffi.cast("double *", z.ctypes.data),
|
||||
ffi.cast("double *", R.ctypes.data),
|
||||
ffi.cast("double *", extra_args.ctypes.data))
|
||||
if self.msckf and kind in self.feature_track_kinds:
|
||||
y = z[:-len(extra_args)]
|
||||
else:
|
||||
y = z
|
||||
return x, P, y
|
||||
return _update_inner_blas
|
||||
|
||||
self._updates = {}
|
||||
for kind in kinds:
|
||||
self._updates[kind] = fun_wrapper("update_%d" % kind, kind)
|
||||
|
||||
def _update_blas(x, P, kind, z, R, extra_args=[]): # pylint: disable=dangerous-default-value
|
||||
return self._updates[kind](x, P, z, R, extra_args)
|
||||
|
||||
# assign the functions
|
||||
self._predict = _predict_blas
|
||||
# self._predict = self._predict_python
|
||||
self._update = _update_blas
|
||||
# self._update = self._update_python
|
||||
|
||||
def init_state(self, state, covs, filter_time):
|
||||
self.x = np.array(state.reshape((-1, 1))).astype(np.float64)
|
||||
self.P = np.array(covs).astype(np.float64)
|
||||
self.filter_time = filter_time
|
||||
self.augment_times = [0] * self.N
|
||||
self.rewind_obscache = []
|
||||
self.rewind_t = []
|
||||
self.rewind_states = []
|
||||
|
||||
def reset_rewind(self):
|
||||
self.rewind_obscache = []
|
||||
self.rewind_t = []
|
||||
self.rewind_states = []
|
||||
|
||||
def augment(self):
|
||||
# TODO this is not a generalized way of doing this and implies that the augmented states
|
||||
# are simply the first (dim_augment_state) elements of the main state.
|
||||
assert self.msckf
|
||||
d1 = self.dim_main
|
||||
d2 = self.dim_main_err
|
||||
d3 = self.dim_augment
|
||||
d4 = self.dim_augment_err
|
||||
|
||||
# push through augmented states
|
||||
self.x[d1:-d3] = self.x[d1 + d3:]
|
||||
self.x[-d3:] = self.x[:d3]
|
||||
assert self.x.shape == (self.dim_x, 1)
|
||||
|
||||
# push through augmented covs
|
||||
assert self.P.shape == (self.dim_err, self.dim_err)
|
||||
P_reduced = self.P
|
||||
P_reduced = np.delete(P_reduced, np.s_[d2:d2 + d4], axis=1)
|
||||
P_reduced = np.delete(P_reduced, np.s_[d2:d2 + d4], axis=0)
|
||||
assert P_reduced.shape == (self.dim_err - d4, self.dim_err - d4)
|
||||
to_mult = np.zeros((self.dim_err, self.dim_err - d4))
|
||||
to_mult[:-d4, :] = np.eye(self.dim_err - d4)
|
||||
to_mult[-d4:, :d4] = np.eye(d4)
|
||||
self.P = to_mult.dot(P_reduced.dot(to_mult.T))
|
||||
self.augment_times = self.augment_times[1:]
|
||||
self.augment_times.append(self.filter_time)
|
||||
assert self.P.shape == (self.dim_err, self.dim_err)
|
||||
|
||||
def state(self):
|
||||
return np.array(self.x).flatten()
|
||||
|
||||
def covs(self):
|
||||
return self.P
|
||||
|
||||
def set_filter_time(self, t):
|
||||
self.filter_time = t
|
||||
|
||||
def get_filter_time(self):
|
||||
return self.filter_time
|
||||
|
||||
def normalize_quaternions(self):
|
||||
for idx in self.quaternion_idxs:
|
||||
self.normalize_slice(idx, idx+4)
|
||||
|
||||
def normalize_slice(self, slice_start, slice_end_ex):
|
||||
self.x[slice_start:slice_end_ex] /= np.linalg.norm(self.x[slice_start:slice_end_ex])
|
||||
|
||||
def get_augment_times(self):
|
||||
return self.augment_times
|
||||
|
||||
def set_global(self, global_var, val):
|
||||
self.set_globals[global_var](val)
|
||||
|
||||
def rewind(self, t):
|
||||
# find where we are rewinding to
|
||||
idx = bisect_right(self.rewind_t, t)
|
||||
assert self.rewind_t[idx - 1] <= t
|
||||
assert self.rewind_t[idx] > t # must be true, or rewind wouldn't be called
|
||||
|
||||
# set the state to the time right before that
|
||||
self.filter_time = self.rewind_t[idx - 1]
|
||||
self.x[:] = self.rewind_states[idx - 1][0]
|
||||
self.P[:] = self.rewind_states[idx - 1][1]
|
||||
|
||||
# return the observations we rewound over for fast forwarding
|
||||
ret = self.rewind_obscache[idx:]
|
||||
|
||||
# throw away the old future
|
||||
# TODO: is this making a copy?
|
||||
self.rewind_t = self.rewind_t[:idx]
|
||||
self.rewind_states = self.rewind_states[:idx]
|
||||
self.rewind_obscache = self.rewind_obscache[:idx]
|
||||
|
||||
return ret
|
||||
|
||||
def checkpoint(self, obs):
|
||||
# push to rewinder
|
||||
self.rewind_t.append(self.filter_time)
|
||||
self.rewind_states.append((np.copy(self.x), np.copy(self.P)))
|
||||
self.rewind_obscache.append(obs)
|
||||
|
||||
# only keep a certain number around
|
||||
REWIND_TO_KEEP = 512
|
||||
self.rewind_t = self.rewind_t[-REWIND_TO_KEEP:]
|
||||
self.rewind_states = self.rewind_states[-REWIND_TO_KEEP:]
|
||||
self.rewind_obscache = self.rewind_obscache[-REWIND_TO_KEEP:]
|
||||
|
||||
def predict(self, t):
|
||||
# initialize time
|
||||
if self.filter_time is None:
|
||||
self.filter_time = t
|
||||
|
||||
# predict
|
||||
dt = t - self.filter_time
|
||||
assert dt >= 0
|
||||
self.x, self.P = self._predict(self.x, self.P, dt)
|
||||
self.normalize_quaternions()
|
||||
self.filter_time = t
|
||||
|
||||
def predict_and_update_batch(self, t, kind, z, R, extra_args=[[]], augment=False): # pylint: disable=dangerous-default-value
|
||||
# TODO handle rewinding at this level"
|
||||
|
||||
# rewind
|
||||
if self.filter_time is not None and t < self.filter_time:
|
||||
if len(self.rewind_t) == 0 or t < self.rewind_t[0] or t < self.rewind_t[-1] - self.max_rewind_age:
|
||||
self.logger.error("observation too old at %.3f with filter at %.3f, ignoring" % (t, self.filter_time))
|
||||
return None
|
||||
rewound = self.rewind(t)
|
||||
else:
|
||||
rewound = []
|
||||
|
||||
ret = self._predict_and_update_batch(t, kind, z, R, extra_args, augment)
|
||||
|
||||
# optional fast forward
|
||||
for r in rewound:
|
||||
self._predict_and_update_batch(*r)
|
||||
|
||||
return ret
|
||||
|
||||
def _predict_and_update_batch(self, t, kind, z, R, extra_args, augment=False):
|
||||
"""The main kalman filter function
|
||||
Predicts the state and then updates a batch of observations
|
||||
dim_x: dimensionality of the state space
|
||||
dim_z: dimensionality of the observation and depends on kind
|
||||
n: number of observations
|
||||
Args:
|
||||
t (float): Time of observation
|
||||
kind (int): Type of observation
|
||||
z (vec [n,dim_z]): Measurements
|
||||
R (mat [n,dim_z, dim_z]): Measurement Noise
|
||||
extra_args (list, [n]): Values used in H computations
|
||||
"""
|
||||
assert z.shape[0] == R.shape[0]
|
||||
assert z.shape[1] == R.shape[1]
|
||||
assert z.shape[1] == R.shape[2]
|
||||
|
||||
# initialize time
|
||||
if self.filter_time is None:
|
||||
self.filter_time = t
|
||||
|
||||
# predict
|
||||
dt = t - self.filter_time
|
||||
assert dt >= 0
|
||||
self.x, self.P = self._predict(self.x, self.P, dt)
|
||||
self.filter_time = t
|
||||
xk_km1, Pk_km1 = np.copy(self.x).flatten(), np.copy(self.P)
|
||||
|
||||
# update batch
|
||||
y = []
|
||||
for i in range(len(z)):
|
||||
# these are from the user, so we canonicalize them
|
||||
z_i = np.array(z[i], dtype=np.float64, order='F')
|
||||
R_i = np.array(R[i], dtype=np.float64, order='F')
|
||||
extra_args_i = np.array(extra_args[i], dtype=np.float64, order='F')
|
||||
# update
|
||||
self.x, self.P, y_i = self._update(self.x, self.P, kind, z_i, R_i, extra_args=extra_args_i)
|
||||
self.normalize_quaternions()
|
||||
y.append(y_i)
|
||||
xk_k, Pk_k = np.copy(self.x).flatten(), np.copy(self.P)
|
||||
|
||||
if augment:
|
||||
self.augment()
|
||||
|
||||
# checkpoint
|
||||
self.checkpoint((t, kind, z, R, extra_args))
|
||||
|
||||
return xk_km1, xk_k, Pk_km1, Pk_k, t, kind, y, z, extra_args
|
||||
|
||||
def _predict_python(self, x, P, dt):
|
||||
x_new = np.zeros(x.shape, dtype=np.float64)
|
||||
self.f(x, dt, x_new)
|
||||
|
||||
F = np.zeros(P.shape, dtype=np.float64)
|
||||
self.F(x, dt, F)
|
||||
|
||||
if not self.msckf:
|
||||
P = dot(dot(F, P), F.T)
|
||||
else:
|
||||
# Update the predicted state covariance:
|
||||
# Pk+1|k = |F*Pii*FT + Q*dt F*Pij |
|
||||
# |PijT*FT Pjj |
|
||||
# Where F is the jacobian of the main state
|
||||
# predict function, Pii is the main state's
|
||||
# covariance and Q its process noise. Pij
|
||||
# is the covariance between the augmented
|
||||
# states and the main state.
|
||||
#
|
||||
d2 = self.dim_main_err # known at compile time
|
||||
F_curr = F[:d2, :d2]
|
||||
P[:d2, :d2] = (F_curr.dot(P[:d2, :d2])).dot(F_curr.T)
|
||||
P[:d2, d2:] = F_curr.dot(P[:d2, d2:])
|
||||
P[d2:, :d2] = P[d2:, :d2].dot(F_curr.T)
|
||||
|
||||
P += dt * self.Q
|
||||
return x_new, P
|
||||
|
||||
def _update_python(self, x, P, kind, z, R, extra_args=[]): # pylint: disable=dangerous-default-value
|
||||
# init vars
|
||||
z = z.reshape((-1, 1))
|
||||
h = np.zeros(z.shape, dtype=np.float64)
|
||||
H = np.zeros((z.shape[0], self.dim_x), dtype=np.float64)
|
||||
|
||||
# C functions
|
||||
self.hs[kind](x, extra_args, h)
|
||||
self.Hs[kind](x, extra_args, H)
|
||||
|
||||
# y is the "loss"
|
||||
y = z - h
|
||||
|
||||
# *** same above this line ***
|
||||
|
||||
if self.msckf and kind in self.Hes:
|
||||
# Do some algebraic magic to decorrelate
|
||||
He = np.zeros((z.shape[0], len(extra_args)), dtype=np.float64)
|
||||
self.Hes[kind](x, extra_args, He)
|
||||
|
||||
# TODO: Don't call a function here, do projection locally
|
||||
A = null(He.T)
|
||||
|
||||
y = A.T.dot(y)
|
||||
H = A.T.dot(H)
|
||||
R = A.T.dot(R.dot(A))
|
||||
|
||||
# TODO If nullspace isn't the dimension we want
|
||||
if A.shape[1] + He.shape[1] != A.shape[0]:
|
||||
self.logger.warning('Warning: null space projection failed, measurement ignored')
|
||||
return x, P, np.zeros(A.shape[0] - He.shape[1])
|
||||
|
||||
# if using eskf
|
||||
H_mod = np.zeros((x.shape[0], P.shape[0]), dtype=np.float64)
|
||||
self.H_mod(x, H_mod)
|
||||
H = H.dot(H_mod)
|
||||
|
||||
# Do mahalobis distance test
|
||||
# currently just runs on msckf observations
|
||||
# could run on anything if needed
|
||||
if self.msckf and kind in self.maha_test_kinds:
|
||||
a = np.linalg.inv(H.dot(P).dot(H.T) + R)
|
||||
maha_dist = y.T.dot(a.dot(y))
|
||||
if maha_dist > chi2_ppf(0.95, y.shape[0]):
|
||||
R = 10e16 * R
|
||||
|
||||
# *** same below this line ***
|
||||
|
||||
# Outlier resilient weighting as described in:
|
||||
# "A Kalman Filter for Robust Outlier Detection - Jo-Anne Ting, ..."
|
||||
weight = 1 # (1.5)/(1 + np.sum(y**2)/np.sum(R))
|
||||
|
||||
S = dot(dot(H, P), H.T) + R / weight
|
||||
K = solve(S, dot(H, P.T)).T
|
||||
I_KH = np.eye(P.shape[0]) - dot(K, H)
|
||||
|
||||
# update actual state
|
||||
delta_x = dot(K, y)
|
||||
P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)
|
||||
|
||||
# inject observed error into state
|
||||
x_new = np.zeros(x.shape, dtype=np.float64)
|
||||
self.err_function(x, delta_x, x_new)
|
||||
return x_new, P, y.flatten()
|
||||
|
||||
def maha_test(self, x, P, kind, z, R, extra_args=[], maha_thresh=0.95): # pylint: disable=dangerous-default-value
|
||||
# init vars
|
||||
z = z.reshape((-1, 1))
|
||||
h = np.zeros(z.shape, dtype=np.float64)
|
||||
H = np.zeros((z.shape[0], self.dim_x), dtype=np.float64)
|
||||
|
||||
# C functions
|
||||
self.hs[kind](x, extra_args, h)
|
||||
self.Hs[kind](x, extra_args, H)
|
||||
|
||||
# y is the "loss"
|
||||
y = z - h
|
||||
|
||||
# if using eskf
|
||||
H_mod = np.zeros((x.shape[0], P.shape[0]), dtype=np.float64)
|
||||
self.H_mod(x, H_mod)
|
||||
H = H.dot(H_mod)
|
||||
|
||||
a = np.linalg.inv(H.dot(P).dot(H.T) + R)
|
||||
maha_dist = y.T.dot(a.dot(y))
|
||||
if maha_dist > chi2_ppf(maha_thresh, y.shape[0]):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def rts_smooth(self, estimates, norm_quats=False):
|
||||
'''
|
||||
Returns rts smoothed results of
|
||||
kalman filter estimates
|
||||
If the kalman state is augmented with
|
||||
old states only the main state is smoothed
|
||||
'''
|
||||
xk_n = estimates[-1][0]
|
||||
Pk_n = estimates[-1][2]
|
||||
Fk_1 = np.zeros(Pk_n.shape, dtype=np.float64)
|
||||
|
||||
states_smoothed = [xk_n]
|
||||
covs_smoothed = [Pk_n]
|
||||
for k in range(len(estimates) - 2, -1, -1):
|
||||
xk1_n = xk_n
|
||||
if norm_quats:
|
||||
xk1_n[3:7] /= np.linalg.norm(xk1_n[3:7])
|
||||
Pk1_n = Pk_n
|
||||
|
||||
xk1_k, _, Pk1_k, _, t2, _, _, _, _ = estimates[k + 1]
|
||||
_, xk_k, _, Pk_k, t1, _, _, _, _ = estimates[k]
|
||||
dt = t2 - t1
|
||||
self.F(xk_k, dt, Fk_1)
|
||||
|
||||
d1 = self.dim_main
|
||||
d2 = self.dim_main_err
|
||||
Ck = np.linalg.solve(Pk1_k[:d2, :d2], Fk_1[:d2, :d2].dot(Pk_k[:d2, :d2].T)).T
|
||||
xk_n = xk_k
|
||||
delta_x = np.zeros((Pk_n.shape[0], 1), dtype=np.float64)
|
||||
self.inv_err_function(xk1_k, xk1_n, delta_x)
|
||||
delta_x[:d2] = Ck.dot(delta_x[:d2])
|
||||
x_new = np.zeros((xk_n.shape[0], 1), dtype=np.float64)
|
||||
self.err_function(xk_k, delta_x, x_new)
|
||||
xk_n[:d1] = x_new[:d1, 0]
|
||||
Pk_n = Pk_k
|
||||
Pk_n[:d2, :d2] = Pk_k[:d2, :d2] + Ck.dot(Pk1_n[:d2, :d2] - Pk1_k[:d2, :d2]).dot(Ck.T)
|
||||
states_smoothed.append(xk_n)
|
||||
covs_smoothed.append(Pk_n)
|
||||
|
||||
return np.flipud(np.vstack(states_smoothed)), np.stack(covs_smoothed, 0)[::-1]
|
||||
195
rednose/helpers/ekf_sym_pyx.pyx
Normal file
195
rednose/helpers/ekf_sym_pyx.pyx
Normal file
@@ -0,0 +1,195 @@
|
||||
# cython: language_level=3
|
||||
# cython: profile=True
|
||||
# distutils: language = c++
|
||||
|
||||
cimport cython
|
||||
|
||||
from libcpp.string cimport string
|
||||
from libcpp.vector cimport vector
|
||||
from libcpp cimport bool
|
||||
cimport numpy as np
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
cdef extern from "<optional>" namespace "std" nogil:
|
||||
cdef cppclass optional[T]:
|
||||
ctypedef T value_type
|
||||
bool has_value()
|
||||
T& value()
|
||||
|
||||
cdef extern from "rednose/helpers/ekf_load.h":
|
||||
cdef void ekf_load_and_register(string directory, string name)
|
||||
|
||||
cdef extern from "rednose/helpers/ekf_sym.h" namespace "EKFS":
|
||||
cdef cppclass MapVectorXd "Eigen::Map<Eigen::VectorXd>":
|
||||
MapVectorXd(double*, int)
|
||||
|
||||
cdef cppclass MapMatrixXdr "Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> >":
|
||||
MapMatrixXdr(double*, int, int)
|
||||
|
||||
cdef cppclass VectorXd "Eigen::VectorXd":
|
||||
VectorXd()
|
||||
double* data()
|
||||
int rows()
|
||||
|
||||
cdef cppclass MatrixXdr "Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>":
|
||||
MatrixXdr()
|
||||
double* data()
|
||||
int rows()
|
||||
int cols()
|
||||
|
||||
ctypedef struct Estimate:
|
||||
VectorXd xk1
|
||||
VectorXd xk
|
||||
MatrixXdr Pk1
|
||||
MatrixXdr Pk
|
||||
double t
|
||||
int kind
|
||||
vector[VectorXd] y
|
||||
vector[VectorXd] z
|
||||
vector[vector[double]] extra_args
|
||||
|
||||
cdef cppclass EKFSym:
|
||||
EKFSym(string name, MapMatrixXdr Q, MapVectorXd x_initial, MapMatrixXdr P_initial, int dim_main,
|
||||
int dim_main_err, int N, int dim_augment, int dim_augment_err, vector[int] maha_test_kinds,
|
||||
vector[int] quaternion_idxs, vector[string] global_vars, double max_rewind_age)
|
||||
void init_state(MapVectorXd state, MapMatrixXdr covs, double filter_time)
|
||||
|
||||
VectorXd state()
|
||||
MatrixXdr covs()
|
||||
void set_filter_time(double t)
|
||||
double get_filter_time()
|
||||
void set_global(string name, double val)
|
||||
void reset_rewind()
|
||||
|
||||
void predict(double t)
|
||||
optional[Estimate] predict_and_update_batch(double t, int kind, vector[MapVectorXd] z, vector[MapMatrixXdr] z,
|
||||
vector[vector[double]] extra_args, bool augment)
|
||||
|
||||
# Functions like `numpy_to_matrix` are not possible, cython requires default
|
||||
# constructor for return variable types which aren't available with Eigen::Map
|
||||
|
||||
@cython.wraparound(False)
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.float64_t, ndim=2, mode="c"] matrix_to_numpy(MatrixXdr arr):
|
||||
cdef double[:,:] mem_view = <double[:arr.rows(),:arr.cols()]>arr.data()
|
||||
return np.copy(np.asarray(mem_view, dtype=np.double, order="C"))
|
||||
|
||||
@cython.wraparound(False)
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.float64_t, ndim=1, mode="c"] vector_to_numpy(VectorXd arr):
|
||||
cdef double[:] mem_view = <double[:arr.rows()]>arr.data()
|
||||
return np.copy(np.asarray(mem_view, dtype=np.double, order="C"))
|
||||
|
||||
cdef class EKF_sym_pyx:
|
||||
cdef EKFSym* ekf
|
||||
def __cinit__(self, str gen_dir, str name, np.ndarray[np.float64_t, ndim=2] Q,
|
||||
np.ndarray[np.float64_t, ndim=1] x_initial, np.ndarray[np.float64_t, ndim=2] P_initial, int dim_main,
|
||||
int dim_main_err, int N=0, int dim_augment=0, int dim_augment_err=0, list maha_test_kinds=[],
|
||||
list quaternion_idxs=[], list global_vars=[], double max_rewind_age=1.0, logger=None):
|
||||
# TODO logger
|
||||
ekf_load_and_register(gen_dir.encode('utf8'), name.encode('utf8'))
|
||||
|
||||
cdef np.ndarray[np.float64_t, ndim=2, mode='c'] Q_b = np.ascontiguousarray(Q, dtype=np.double)
|
||||
cdef np.ndarray[np.float64_t, ndim=1, mode='c'] x_initial_b = np.ascontiguousarray(x_initial, dtype=np.double)
|
||||
cdef np.ndarray[np.float64_t, ndim=2, mode='c'] P_initial_b = np.ascontiguousarray(P_initial, dtype=np.double)
|
||||
self.ekf = new EKFSym(
|
||||
name.encode('utf8'),
|
||||
MapMatrixXdr(<double*> Q_b.data, Q.shape[0], Q.shape[1]),
|
||||
MapVectorXd(<double*> x_initial_b.data, x_initial.shape[0]),
|
||||
MapMatrixXdr(<double*> P_initial_b.data, P_initial.shape[0], P_initial.shape[1]),
|
||||
dim_main,
|
||||
dim_main_err,
|
||||
N,
|
||||
dim_augment,
|
||||
dim_augment_err,
|
||||
maha_test_kinds,
|
||||
quaternion_idxs,
|
||||
[x.encode('utf8') for x in global_vars],
|
||||
max_rewind_age
|
||||
)
|
||||
|
||||
def init_state(self, np.ndarray[np.float64_t, ndim=1] state, np.ndarray[np.float64_t, ndim=2] covs, filter_time):
|
||||
cdef np.ndarray[np.float64_t, ndim=1, mode='c'] state_b = np.ascontiguousarray(state, dtype=np.double)
|
||||
cdef np.ndarray[np.float64_t, ndim=2, mode='c'] covs_b = np.ascontiguousarray(covs, dtype=np.double)
|
||||
self.ekf.init_state(
|
||||
MapVectorXd(<double*> state_b.data, state.shape[0]),
|
||||
MapMatrixXdr(<double*> covs_b.data, covs.shape[0], covs.shape[1]),
|
||||
np.nan if filter_time is None else filter_time
|
||||
)
|
||||
|
||||
def state(self):
|
||||
cdef np.ndarray res = vector_to_numpy(self.ekf.state())
|
||||
return res
|
||||
|
||||
def covs(self):
|
||||
return matrix_to_numpy(self.ekf.covs())
|
||||
|
||||
def set_filter_time(self, double t):
|
||||
self.ekf.set_filter_time(t)
|
||||
|
||||
def get_filter_time(self):
|
||||
return self.ekf.get_filter_time()
|
||||
|
||||
def set_global(self, str global_var, double val):
|
||||
self.ekf.set_global(global_var.encode('utf8'), val)
|
||||
|
||||
def reset_rewind(self):
|
||||
self.ekf.reset_rewind()
|
||||
|
||||
def predict(self, double t):
|
||||
self.ekf.predict(t)
|
||||
|
||||
def predict_and_update_batch(self, double t, int kind, z, R, extra_args=[[]], bool augment=False):
|
||||
cdef vector[MapVectorXd] z_map
|
||||
cdef np.ndarray[np.float64_t, ndim=1, mode='c'] zi_b
|
||||
for zi in z:
|
||||
zi_b = np.ascontiguousarray(zi, dtype=np.double)
|
||||
z_map.push_back(MapVectorXd(<double*> zi_b.data, zi.shape[0]))
|
||||
|
||||
cdef vector[MapMatrixXdr] R_map
|
||||
cdef np.ndarray[np.float64_t, ndim=2, mode='c'] Ri_b
|
||||
for Ri in R:
|
||||
Ri_b = np.ascontiguousarray(Ri, dtype=np.double)
|
||||
R_map.push_back(MapMatrixXdr(<double*> Ri_b.data, Ri.shape[0], Ri.shape[1]))
|
||||
|
||||
cdef vector[vector[double]] extra_args_map
|
||||
cdef vector[double] args_map
|
||||
for args in extra_args:
|
||||
args_map.clear()
|
||||
for a in args:
|
||||
args_map.push_back(a)
|
||||
extra_args_map.push_back(args_map)
|
||||
|
||||
cdef optional[Estimate] res = self.ekf.predict_and_update_batch(t, kind, z_map, R_map, extra_args_map, augment)
|
||||
if not res.has_value():
|
||||
return None
|
||||
|
||||
cdef VectorXd tmpvec
|
||||
return (
|
||||
vector_to_numpy(res.value().xk1),
|
||||
vector_to_numpy(res.value().xk),
|
||||
matrix_to_numpy(res.value().Pk1),
|
||||
matrix_to_numpy(res.value().Pk),
|
||||
res.value().t,
|
||||
res.value().kind,
|
||||
[vector_to_numpy(tmpvec) for tmpvec in res.value().y],
|
||||
z, # TODO: take return values?
|
||||
extra_args,
|
||||
)
|
||||
|
||||
def augment(self):
|
||||
raise NotImplementedError() # TODO
|
||||
|
||||
def get_augment_times(self):
|
||||
raise NotImplementedError() # TODO
|
||||
|
||||
def rts_smooth(self, estimates, norm_quats=False):
|
||||
raise NotImplementedError() # TODO
|
||||
|
||||
def maha_test(self, x, P, kind, z, R, extra_args=[], maha_thresh=0.95):
|
||||
raise NotImplementedError() # TODO
|
||||
|
||||
def __dealloc__(self):
|
||||
del self.ekf
|
||||
52
rednose/helpers/kalmanfilter.py
Normal file
52
rednose/helpers/kalmanfilter.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class KalmanFilter:
|
||||
name = "<name>"
|
||||
initial_x = np.zeros((0, 0))
|
||||
initial_P_diag = np.zeros((0, 0))
|
||||
Q = np.zeros((0, 0))
|
||||
obs_noise: Dict[int, Any] = {}
|
||||
|
||||
# Should be initialized when initializating a KalmanFilter implementation
|
||||
filter = None # noqa: A003
|
||||
|
||||
@property
|
||||
def x(self):
|
||||
return self.filter.state()
|
||||
|
||||
@property
|
||||
def t(self):
|
||||
return self.filter.get_filter_time()
|
||||
|
||||
@property
|
||||
def P(self):
|
||||
return self.filter.covs()
|
||||
|
||||
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
|
||||
if covs_diag is not None:
|
||||
P = np.diag(covs_diag)
|
||||
elif covs is not None:
|
||||
P = covs
|
||||
else:
|
||||
P = self.filter.covs()
|
||||
self.filter.init_state(state, P, filter_time)
|
||||
|
||||
def get_R(self, kind, n):
|
||||
obs_noise = self.obs_noise[kind]
|
||||
dim = obs_noise.shape[0]
|
||||
R = np.zeros((n, dim, dim))
|
||||
for i in range(n):
|
||||
R[i, :, :] = obs_noise
|
||||
return R
|
||||
|
||||
def predict_and_observe(self, t, kind, data, R=None):
|
||||
if len(data) > 0:
|
||||
data = np.atleast_2d(data)
|
||||
|
||||
if R is None:
|
||||
R = self.get_R(kind, len(data))
|
||||
|
||||
self.filter.predict_and_update_batch(t, kind, data, R)
|
||||
154
rednose/helpers/sympy_helpers.py
Normal file
154
rednose/helpers/sympy_helpers.py
Normal file
@@ -0,0 +1,154 @@
|
||||
import sympy as sp
|
||||
import numpy as np
|
||||
|
||||
# TODO: remove code duplication between openpilot.common.orientation
|
||||
def quat2rot(quats):
|
||||
quats = np.array(quats)
|
||||
input_shape = quats.shape
|
||||
quats = np.atleast_2d(quats)
|
||||
Rs = np.zeros((quats.shape[0], 3, 3))
|
||||
q0 = quats[:, 0]
|
||||
q1 = quats[:, 1]
|
||||
q2 = quats[:, 2]
|
||||
q3 = quats[:, 3]
|
||||
Rs[:, 0, 0] = q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3
|
||||
Rs[:, 0, 1] = 2 * (q1 * q2 - q0 * q3)
|
||||
Rs[:, 0, 2] = 2 * (q0 * q2 + q1 * q3)
|
||||
Rs[:, 1, 0] = 2 * (q1 * q2 + q0 * q3)
|
||||
Rs[:, 1, 1] = q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3
|
||||
Rs[:, 1, 2] = 2 * (q2 * q3 - q0 * q1)
|
||||
Rs[:, 2, 0] = 2 * (q1 * q3 - q0 * q2)
|
||||
Rs[:, 2, 1] = 2 * (q0 * q1 + q2 * q3)
|
||||
Rs[:, 2, 2] = q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3
|
||||
|
||||
if len(input_shape) < 2:
|
||||
return Rs[0]
|
||||
else:
|
||||
return Rs
|
||||
|
||||
|
||||
def euler2quat(eulers):
|
||||
eulers = np.array(eulers)
|
||||
if len(eulers.shape) > 1:
|
||||
output_shape = (-1,4)
|
||||
else:
|
||||
output_shape = (4,)
|
||||
eulers = np.atleast_2d(eulers)
|
||||
gamma, theta, psi = eulers[:,0], eulers[:,1], eulers[:,2]
|
||||
|
||||
q0 = np.cos(gamma / 2) * np.cos(theta / 2) * np.cos(psi / 2) + \
|
||||
np.sin(gamma / 2) * np.sin(theta / 2) * np.sin(psi / 2)
|
||||
q1 = np.sin(gamma / 2) * np.cos(theta / 2) * np.cos(psi / 2) - \
|
||||
np.cos(gamma / 2) * np.sin(theta / 2) * np.sin(psi / 2)
|
||||
q2 = np.cos(gamma / 2) * np.sin(theta / 2) * np.cos(psi / 2) + \
|
||||
np.sin(gamma / 2) * np.cos(theta / 2) * np.sin(psi / 2)
|
||||
q3 = np.cos(gamma / 2) * np.cos(theta / 2) * np.sin(psi / 2) - \
|
||||
np.sin(gamma / 2) * np.sin(theta / 2) * np.cos(psi / 2)
|
||||
|
||||
quats = np.array([q0, q1, q2, q3]).T
|
||||
for i in range(len(quats)):
|
||||
if quats[i,0] < 0: # pylint: disable=unsubscriptable-object
|
||||
quats[i] = -quats[i] # pylint: disable=unsupported-assignment-operation,unsubscriptable-object
|
||||
return quats.reshape(output_shape)
|
||||
|
||||
|
||||
def euler2rot(eulers):
|
||||
return quat2rot(euler2quat(eulers))
|
||||
|
||||
rotations_from_quats = quat2rot
|
||||
|
||||
|
||||
def cross(x):
|
||||
ret = sp.Matrix(np.zeros((3, 3)))
|
||||
ret[0, 1], ret[0, 2] = -x[2], x[1]
|
||||
ret[1, 0], ret[1, 2] = x[2], -x[0]
|
||||
ret[2, 0], ret[2, 1] = -x[1], x[0]
|
||||
return ret
|
||||
|
||||
|
||||
def rot_matrix(roll, pitch, yaw):
|
||||
cr, sr = np.cos(roll), np.sin(roll)
|
||||
cp, sp = np.cos(pitch), np.sin(pitch)
|
||||
cy, sy = np.cos(yaw), np.sin(yaw)
|
||||
rr = np.array([[1,0,0],[0, cr,-sr],[0, sr, cr]])
|
||||
rp = np.array([[cp,0,sp],[0, 1,0],[-sp, 0, cp]])
|
||||
ry = np.array([[cy,-sy,0],[sy, cy,0],[0, 0, 1]])
|
||||
return ry.dot(rp.dot(rr))
|
||||
|
||||
|
||||
def euler_rotate(roll, pitch, yaw):
|
||||
# make symbolic rotation matrix from eulers
|
||||
matrix_roll = sp.Matrix([[1, 0, 0],
|
||||
[0, sp.cos(roll), -sp.sin(roll)],
|
||||
[0, sp.sin(roll), sp.cos(roll)]])
|
||||
matrix_pitch = sp.Matrix([[sp.cos(pitch), 0, sp.sin(pitch)],
|
||||
[0, 1, 0],
|
||||
[-sp.sin(pitch), 0, sp.cos(pitch)]])
|
||||
matrix_yaw = sp.Matrix([[sp.cos(yaw), -sp.sin(yaw), 0],
|
||||
[sp.sin(yaw), sp.cos(yaw), 0],
|
||||
[0, 0, 1]])
|
||||
return matrix_yaw * matrix_pitch * matrix_roll
|
||||
|
||||
|
||||
def quat_rotate(q0, q1, q2, q3):
|
||||
# make symbolic rotation matrix from quat
|
||||
return sp.Matrix([[q0**2 + q1**2 - q2**2 - q3**2, 2 * (q1 * q2 + q0 * q3), 2 * (q1 * q3 - q0 * q2)],
|
||||
[2 * (q1 * q2 - q0 * q3), q0**2 - q1**2 + q2**2 - q3**2, 2 * (q2 * q3 + q0 * q1)],
|
||||
[2 * (q1 * q3 + q0 * q2), 2 * (q2 * q3 - q0 * q1), q0**2 - q1**2 - q2**2 + q3**2]]).T
|
||||
|
||||
|
||||
def quat_matrix_l(p):
|
||||
return sp.Matrix([[p[0], -p[1], -p[2], -p[3]],
|
||||
[p[1], p[0], -p[3], p[2]],
|
||||
[p[2], p[3], p[0], -p[1]],
|
||||
[p[3], -p[2], p[1], p[0]]])
|
||||
|
||||
|
||||
def quat_matrix_r(p):
|
||||
return sp.Matrix([[p[0], -p[1], -p[2], -p[3]],
|
||||
[p[1], p[0], p[3], -p[2]],
|
||||
[p[2], -p[3], p[0], p[1]],
|
||||
[p[3], p[2], -p[1], p[0]]])
|
||||
|
||||
|
||||
def sympy_into_c(sympy_functions, global_vars=None):
|
||||
from sympy.utilities import codegen
|
||||
routines = []
|
||||
for name, expr, args in sympy_functions:
|
||||
r = codegen.make_routine(name, expr, language="C99", global_vars=global_vars)
|
||||
|
||||
# argument ordering input to sympy is broken with function with output arguments
|
||||
nargs = []
|
||||
|
||||
# reorder the input arguments
|
||||
for aa in args:
|
||||
if aa is None:
|
||||
nargs.append(codegen.InputArgument(sp.Symbol('unused'), dimensions=[1, 1]))
|
||||
continue
|
||||
found = False
|
||||
for a in r.arguments:
|
||||
if str(aa.name) == str(a.name):
|
||||
nargs.append(a)
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
# [1,1] is a hack for Matrices
|
||||
nargs.append(codegen.InputArgument(aa, dimensions=[1, 1]))
|
||||
|
||||
# add the output arguments
|
||||
for a in r.arguments:
|
||||
if type(a) == codegen.OutputArgument:
|
||||
nargs.append(a)
|
||||
|
||||
# assert len(r.arguments) == len(args)+1
|
||||
r.arguments = nargs
|
||||
|
||||
# add routine to list
|
||||
routines.append(r)
|
||||
|
||||
[(_, c_code), (_, c_header)] = codegen.get_code_generator('C', 'ekf', 'C99').write(routines, "ekf")
|
||||
c_header = '\n'.join(x for x in c_header.split("\n") if len(x) > 0 and x[0] != '#')
|
||||
|
||||
c_code = '\n'.join(x for x in c_code.split("\n") if len(x) > 0 and x[0] != '#')
|
||||
|
||||
return c_header, c_code
|
||||
20
rednose/logger/logger.h
Normal file
20
rednose/logger/logger.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef SWAGLOG
|
||||
#include SWAGLOG
|
||||
#else
|
||||
|
||||
#define CLOUDLOG_DEBUG 10
|
||||
#define CLOUDLOG_INFO 20
|
||||
#define CLOUDLOG_WARNING 30
|
||||
#define CLOUDLOG_ERROR 40
|
||||
#define CLOUDLOG_CRITICAL 50
|
||||
|
||||
#define cloudlog(lvl, fmt, ...) printf(fmt "\n", ## __VA_ARGS__)
|
||||
|
||||
#define LOGD(fmt, ...) cloudlog(CLOUDLOG_DEBUG, fmt, ## __VA_ARGS__)
|
||||
#define LOG(fmt, ...) cloudlog(CLOUDLOG_INFO, fmt, ## __VA_ARGS__)
|
||||
#define LOGW(fmt, ...) cloudlog(CLOUDLOG_WARNING, fmt, ## __VA_ARGS__)
|
||||
#define LOGE(fmt, ...) cloudlog(CLOUDLOG_ERROR, fmt, ## __VA_ARGS__)
|
||||
|
||||
#endif
|
||||
52
rednose/templates/compute_pos.c
Normal file
52
rednose/templates/compute_pos.c
Normal file
@@ -0,0 +1,52 @@
|
||||
#include <eigen3/Eigen/QR>
|
||||
#include <eigen3/Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
typedef Eigen::Matrix<double, KDIM*2, 3, Eigen::RowMajor> R3M;
|
||||
typedef Eigen::Matrix<double, KDIM*2, 1> R1M;
|
||||
typedef Eigen::Matrix<double, 3, 1> O1M;
|
||||
typedef Eigen::Matrix<double, 3, 3, Eigen::RowMajor> M3D;
|
||||
|
||||
void gauss_newton(double *in_x, double *in_poses, double *in_img_positions) {
|
||||
|
||||
double res[KDIM*2] = {0};
|
||||
double jac[KDIM*6] = {0};
|
||||
|
||||
O1M x(in_x);
|
||||
O1M delta;
|
||||
int counter = 0;
|
||||
while ((delta.squaredNorm() > 0.0001 and counter < 30) or counter == 0){
|
||||
res_fun(in_x, in_poses, in_img_positions, res);
|
||||
jac_fun(in_x, in_poses, in_img_positions, jac);
|
||||
R1M E(res); R3M J(jac);
|
||||
delta = (J.transpose()*J).inverse() * J.transpose() * E;
|
||||
x = x - delta;
|
||||
memcpy(in_x, x.data(), 3 * sizeof(double));
|
||||
counter = counter + 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void compute_pos(double *to_c, double *poses, double *img_positions, double *param, double *pos) {
|
||||
param[0] = img_positions[KDIM*2-2];
|
||||
param[1] = img_positions[KDIM*2-1];
|
||||
param[2] = 0.1;
|
||||
gauss_newton(param, poses, img_positions);
|
||||
|
||||
Eigen::Quaterniond q;
|
||||
q.w() = poses[KDIM*7-4];
|
||||
q.x() = poses[KDIM*7-3];
|
||||
q.y() = poses[KDIM*7-2];
|
||||
q.z() = poses[KDIM*7-1];
|
||||
M3D RC(to_c);
|
||||
Eigen::Matrix3d R = q.normalized().toRotationMatrix();
|
||||
Eigen::Matrix3d rot = R * RC.transpose();
|
||||
|
||||
pos[0] = param[0]/param[2];
|
||||
pos[1] = param[1]/param[2];
|
||||
pos[2] = 1.0/param[2];
|
||||
O1M ecef_offset(poses + KDIM*7-7);
|
||||
O1M ecef_output(pos);
|
||||
ecef_output = rot*ecef_output + ecef_offset;
|
||||
memcpy(pos, ecef_output.data(), 3 * sizeof(double));
|
||||
}
|
||||
123
rednose/templates/ekf_c.c
Normal file
123
rednose/templates/ekf_c.c
Normal file
@@ -0,0 +1,123 @@
|
||||
#include <eigen3/Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
typedef Eigen::Matrix<double, DIM, DIM, Eigen::RowMajor> DDM;
|
||||
typedef Eigen::Matrix<double, EDIM, EDIM, Eigen::RowMajor> EEM;
|
||||
typedef Eigen::Matrix<double, DIM, EDIM, Eigen::RowMajor> DEM;
|
||||
|
||||
void predict(double *in_x, double *in_P, double *in_Q, double dt) {
|
||||
typedef Eigen::Matrix<double, MEDIM, MEDIM, Eigen::RowMajor> RRM;
|
||||
|
||||
double nx[DIM] = {0};
|
||||
double in_F[EDIM*EDIM] = {0};
|
||||
|
||||
// functions from sympy
|
||||
f_fun(in_x, dt, nx);
|
||||
F_fun(in_x, dt, in_F);
|
||||
|
||||
|
||||
EEM F(in_F);
|
||||
EEM P(in_P);
|
||||
EEM Q(in_Q);
|
||||
|
||||
RRM F_main = F.topLeftCorner(MEDIM, MEDIM);
|
||||
P.topLeftCorner(MEDIM, MEDIM) = (F_main * P.topLeftCorner(MEDIM, MEDIM)) * F_main.transpose();
|
||||
P.topRightCorner(MEDIM, EDIM - MEDIM) = F_main * P.topRightCorner(MEDIM, EDIM - MEDIM);
|
||||
P.bottomLeftCorner(EDIM - MEDIM, MEDIM) = P.bottomLeftCorner(EDIM - MEDIM, MEDIM) * F_main.transpose();
|
||||
|
||||
P = P + dt*Q;
|
||||
|
||||
// copy out state
|
||||
memcpy(in_x, nx, DIM * sizeof(double));
|
||||
memcpy(in_P, P.data(), EDIM * EDIM * sizeof(double));
|
||||
}
|
||||
|
||||
// note: extra_args dim only correct when null space projecting
|
||||
// otherwise 1
|
||||
template <int ZDIM, int EADIM, bool MAHA_TEST>
|
||||
void update(double *in_x, double *in_P, Hfun h_fun, Hfun H_fun, Hfun Hea_fun, double *in_z, double *in_R, double *in_ea, double MAHA_THRESHOLD) {
|
||||
typedef Eigen::Matrix<double, ZDIM, ZDIM, Eigen::RowMajor> ZZM;
|
||||
typedef Eigen::Matrix<double, ZDIM, DIM, Eigen::RowMajor> ZDM;
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, EDIM, Eigen::RowMajor> XEM;
|
||||
//typedef Eigen::Matrix<double, EDIM, ZDIM, Eigen::RowMajor> EZM;
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> X1M;
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> XXM;
|
||||
|
||||
double in_hx[ZDIM] = {0};
|
||||
double in_H[ZDIM * DIM] = {0};
|
||||
double in_H_mod[EDIM * DIM] = {0};
|
||||
double delta_x[EDIM] = {0};
|
||||
double x_new[DIM] = {0};
|
||||
|
||||
|
||||
// state x, P
|
||||
Eigen::Matrix<double, ZDIM, 1> z(in_z);
|
||||
EEM P(in_P);
|
||||
ZZM pre_R(in_R);
|
||||
|
||||
// functions from sympy
|
||||
h_fun(in_x, in_ea, in_hx);
|
||||
H_fun(in_x, in_ea, in_H);
|
||||
ZDM pre_H(in_H);
|
||||
|
||||
// get y (y = z - hx)
|
||||
Eigen::Matrix<double, ZDIM, 1> pre_y(in_hx); pre_y = z - pre_y;
|
||||
X1M y; XXM H; XXM R;
|
||||
if (Hea_fun){
|
||||
typedef Eigen::Matrix<double, ZDIM, EADIM, Eigen::RowMajor> ZAM;
|
||||
double in_Hea[ZDIM * EADIM] = {0};
|
||||
Hea_fun(in_x, in_ea, in_Hea);
|
||||
ZAM Hea(in_Hea);
|
||||
XXM A = Hea.transpose().fullPivLu().kernel();
|
||||
|
||||
|
||||
y = A.transpose() * pre_y;
|
||||
H = A.transpose() * pre_H;
|
||||
R = A.transpose() * pre_R * A;
|
||||
} else {
|
||||
y = pre_y;
|
||||
H = pre_H;
|
||||
R = pre_R;
|
||||
}
|
||||
// get modified H
|
||||
H_mod_fun(in_x, in_H_mod);
|
||||
DEM H_mod(in_H_mod);
|
||||
XEM H_err = H * H_mod;
|
||||
|
||||
// Do mahalobis distance test
|
||||
if (MAHA_TEST){
|
||||
XXM a = (H_err * P * H_err.transpose() + R).inverse();
|
||||
double maha_dist = y.transpose() * a * y;
|
||||
if (maha_dist > MAHA_THRESHOLD){
|
||||
R = 1.0e16 * R;
|
||||
}
|
||||
}
|
||||
|
||||
// Outlier resilient weighting
|
||||
double weight = 1;//(1.5)/(1 + y.squaredNorm()/R.sum());
|
||||
|
||||
// kalman gains and I_KH
|
||||
XXM S = ((H_err * P) * H_err.transpose()) + R/weight;
|
||||
XEM KT = S.fullPivLu().solve(H_err * P.transpose());
|
||||
//EZM K = KT.transpose(); TODO: WHY DOES THIS NOT COMPILE?
|
||||
//EZM K = S.fullPivLu().solve(H_err * P.transpose()).transpose();
|
||||
//std::cout << "Here is the matrix rot:\n" << K << std::endl;
|
||||
EEM I_KH = Eigen::Matrix<double, EDIM, EDIM>::Identity() - (KT.transpose() * H_err);
|
||||
|
||||
// update state by injecting dx
|
||||
Eigen::Matrix<double, EDIM, 1> dx(delta_x);
|
||||
dx = (KT.transpose() * y);
|
||||
memcpy(delta_x, dx.data(), EDIM * sizeof(double));
|
||||
err_fun(in_x, delta_x, x_new);
|
||||
Eigen::Matrix<double, DIM, 1> x(x_new);
|
||||
|
||||
// update cov
|
||||
P = ((I_KH * P) * I_KH.transpose()) + ((KT.transpose() * R) * KT);
|
||||
|
||||
// copy out state
|
||||
memcpy(in_x, x.data(), DIM * sizeof(double));
|
||||
memcpy(in_P, P.data(), EDIM * EDIM * sizeof(double));
|
||||
memcpy(in_z, y.data(), y.rows() * sizeof(double));
|
||||
}
|
||||
|
||||
|
||||
56
rednose/templates/feature_handler.c
Normal file
56
rednose/templates/feature_handler.c
Normal file
@@ -0,0 +1,56 @@
|
||||
bool sane(double track [K + 1][5]) {
|
||||
double diffs_x [K-1];
|
||||
double diffs_y [K-1];
|
||||
int i;
|
||||
for (i = 0; i < K-1; i++) {
|
||||
diffs_x[i] = fabs(track[i+2][2] - track[i+1][2]);
|
||||
diffs_y[i] = fabs(track[i+2][3] - track[i+1][3]);
|
||||
}
|
||||
for (i = 1; i < K-1; i++) {
|
||||
if (((diffs_x[i] > 0.05 or diffs_x[i-1] > 0.05) and
|
||||
(diffs_x[i] > 2*diffs_x[i-1] or
|
||||
diffs_x[i] < .5*diffs_x[i-1])) or
|
||||
((diffs_y[i] > 0.05 or diffs_y[i-1] > 0.05) and
|
||||
(diffs_y[i] > 2*diffs_y[i-1] or
|
||||
diffs_y[i] < .5*diffs_y[i-1]))){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void merge_features(double *tracks, double *features, long long *empty_idxs) {
|
||||
double feature_arr [3000][5];
|
||||
memcpy(feature_arr, features, 3000 * 5 * sizeof(double));
|
||||
double track_arr [6000][K + 1][5];
|
||||
memcpy(track_arr, tracks, (K+1) * 6000 * 5 * sizeof(double));
|
||||
int match;
|
||||
int empty_idx = 0;
|
||||
int idx;
|
||||
for (int i = 0; i < 3000; i++) {
|
||||
match = feature_arr[i][4];
|
||||
if (track_arr[match][0][1] == match and track_arr[match][0][2] == 0){
|
||||
track_arr[match][0][0] = track_arr[match][0][0] + 1;
|
||||
track_arr[match][0][1] = feature_arr[i][1];
|
||||
track_arr[match][0][2] = 1;
|
||||
idx = track_arr[match][0][0];
|
||||
memcpy(track_arr[match][idx], feature_arr[i], 5 * sizeof(double));
|
||||
if (idx == K){
|
||||
// label complete
|
||||
track_arr[match][0][3] = 1;
|
||||
if (sane(track_arr[match])){
|
||||
// label valid
|
||||
track_arr[match][0][4] = 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// gen new track with this feature
|
||||
track_arr[empty_idxs[empty_idx]][0][0] = 1;
|
||||
track_arr[empty_idxs[empty_idx]][0][1] = feature_arr[i][1];
|
||||
track_arr[empty_idxs[empty_idx]][0][2] = 1;
|
||||
memcpy(track_arr[empty_idxs[empty_idx]][1], feature_arr[i], 5 * sizeof(double));
|
||||
empty_idx = empty_idx + 1;
|
||||
}
|
||||
}
|
||||
memcpy(tracks, track_arr, (K+1) * 6000 * 5 * sizeof(double));
|
||||
}
|
||||
Reference in New Issue
Block a user