Custom driving personality profiles
Added toggles to customize the driving personality profiles.
This commit is contained in:
@@ -56,26 +56,46 @@ T_DIFFS = np.diff(T_IDXS, prepend=[0.])
|
||||
COMFORT_BRAKE = 2.5
|
||||
STOP_DISTANCE = 6.0
|
||||
|
||||
def get_jerk_factor(personality=log.LongitudinalPersonality.standard):
|
||||
if personality==log.LongitudinalPersonality.relaxed:
|
||||
return 1.0
|
||||
elif personality==log.LongitudinalPersonality.standard:
|
||||
return 1.0
|
||||
elif personality==log.LongitudinalPersonality.aggressive:
|
||||
return 0.5
|
||||
def get_jerk_factor(custom_personalities, aggressive_jerk, standard_jerk, relaxed_jerk, personality=log.LongitudinalPersonality.standard):
|
||||
if custom_personalities:
|
||||
if personality==log.LongitudinalPersonality.relaxed:
|
||||
return relaxed_jerk
|
||||
elif personality==log.LongitudinalPersonality.standard:
|
||||
return standard_jerk
|
||||
elif personality==log.LongitudinalPersonality.aggressive:
|
||||
return aggressive_jerk
|
||||
else:
|
||||
raise NotImplementedError("Longitudinal personality not supported")
|
||||
else:
|
||||
raise NotImplementedError("Longitudinal personality not supported")
|
||||
if personality==log.LongitudinalPersonality.relaxed:
|
||||
return 1.0
|
||||
elif personality==log.LongitudinalPersonality.standard:
|
||||
return 1.0
|
||||
elif personality==log.LongitudinalPersonality.aggressive:
|
||||
return 0.5
|
||||
else:
|
||||
raise NotImplementedError("Longitudinal personality not supported")
|
||||
|
||||
|
||||
def get_T_FOLLOW(personality=log.LongitudinalPersonality.standard):
|
||||
if personality==log.LongitudinalPersonality.relaxed:
|
||||
return 1.75
|
||||
elif personality==log.LongitudinalPersonality.standard:
|
||||
return 1.45
|
||||
elif personality==log.LongitudinalPersonality.aggressive:
|
||||
return 1.25
|
||||
def get_T_FOLLOW(custom_personalities=False, aggressive_follow=1.25, standard_follow=1.45, relaxed_follow=1.75, personality=log.LongitudinalPersonality.standard):
|
||||
if custom_personalities:
|
||||
if personality==log.LongitudinalPersonality.relaxed:
|
||||
return relaxed_follow
|
||||
elif personality==log.LongitudinalPersonality.standard:
|
||||
return standard_follow
|
||||
elif personality==log.LongitudinalPersonality.aggressive:
|
||||
return aggressive_follow
|
||||
else:
|
||||
raise NotImplementedError("Longitudinal personality not supported")
|
||||
else:
|
||||
raise NotImplementedError("Longitudinal personality not supported")
|
||||
if personality==log.LongitudinalPersonality.relaxed:
|
||||
return 1.75
|
||||
elif personality==log.LongitudinalPersonality.standard:
|
||||
return 1.45
|
||||
elif personality==log.LongitudinalPersonality.aggressive:
|
||||
return 1.25
|
||||
else:
|
||||
raise NotImplementedError("Longitudinal personality not supported")
|
||||
|
||||
def get_stopped_equivalence_factor(v_lead):
|
||||
return (v_lead**2) / (2 * COMFORT_BRAKE)
|
||||
@@ -272,8 +292,8 @@ class LongitudinalMpc:
|
||||
for i in range(N):
|
||||
self.solver.cost_set(i, 'Zl', Zl)
|
||||
|
||||
def set_weights(self, prev_accel_constraint=True, personality=log.LongitudinalPersonality.standard):
|
||||
jerk_factor = get_jerk_factor(personality)
|
||||
def set_weights(self, custom_personalities, aggressive_jerk, standard_jerk, relaxed_jerk, prev_accel_constraint=True, personality=log.LongitudinalPersonality.standard):
|
||||
jerk_factor = get_jerk_factor(custom_personalities, aggressive_jerk, standard_jerk, relaxed_jerk, personality)
|
||||
if self.mode == 'acc':
|
||||
a_change_cost = A_CHANGE_COST if prev_accel_constraint else 0
|
||||
cost_weights = [X_EGO_OBSTACLE_COST, X_EGO_COST, V_EGO_COST, A_EGO_COST, jerk_factor * a_change_cost, jerk_factor * J_EGO_COST]
|
||||
@@ -331,8 +351,8 @@ class LongitudinalMpc:
|
||||
self.cruise_min_a = min_a
|
||||
self.max_a = max_a
|
||||
|
||||
def update(self, radarstate, v_cruise, x, v, a, j, aggressive_acceleration, personality=log.LongitudinalPersonality.standard):
|
||||
t_follow = get_T_FOLLOW(personality)
|
||||
def update(self, radarstate, v_cruise, x, v, a, j, aggressive_acceleration, custom_personalities, aggressive_follow, standard_follow, relaxed_follow, personality=log.LongitudinalPersonality.standard):
|
||||
t_follow = get_T_FOLLOW(custom_personalities, aggressive_follow, standard_follow, relaxed_follow, personality)
|
||||
self.t_follow = t_follow
|
||||
v_ego = self.x0[1]
|
||||
self.status = radarstate.leadOne.status or radarstate.leadTwo.status
|
||||
|
||||
@@ -135,11 +135,12 @@ class LongitudinalPlanner:
|
||||
accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired + 0.05)
|
||||
accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired - 0.05)
|
||||
|
||||
self.mpc.set_weights(prev_accel_constraint, personality=self.personality)
|
||||
self.mpc.set_weights(frogpilot_planner.custom_personalities, frogpilot_planner.aggressive_jerk, frogpilot_planner.standard_jerk, frogpilot_planner.relaxed_jerk, prev_accel_constraint, personality=self.personality)
|
||||
self.mpc.set_accel_limits(accel_limits_turns[0], accel_limits_turns[1])
|
||||
self.mpc.set_cur_state(self.v_desired_filter.x, self.a_desired)
|
||||
x, v, a, j = self.parse_model(sm['modelV2'], self.v_model_error)
|
||||
self.mpc.update(sm['radarState'], v_cruise, x, v, a, j, frogpilot_planner.aggressive_acceleration, personality=self.personality)
|
||||
self.mpc.update(sm['radarState'], frogpilot_planner.v_cruise, x, v, a, j, frogpilot_planner.aggressive_acceleration,
|
||||
frogpilot_planner.custom_personalities, frogpilot_planner.aggressive_follow, frogpilot_planner.standard_follow, frogpilot_planner.relaxed_follow, personality=self.personality)
|
||||
|
||||
self.v_desired_trajectory_full = np.interp(ModelConstants.T_IDXS, T_IDXS_MPC, self.mpc.v_solution)
|
||||
self.a_desired_trajectory_full = np.interp(ModelConstants.T_IDXS, T_IDXS_MPC, self.mpc.a_solution)
|
||||
|
||||
Reference in New Issue
Block a user