I'm running Ubuntu 22.04 with the newest stable Drake install. I am using differential evolution (DE) to optimize gaits in my controller. Bounds of my parameters
bounds = np.array([[0.0005, 0.005], # servo 1 and 2 input parameter 1 bounds
[1, 3], # servo 1 and 2 input parameter 2 bounds
[0.0001, np.pi/6], # servo 3 input parameter 1 bounds
[1, 3], # servo 3 input parameter 2 bounds
[0.00001, np.pi/6], # servo 4 input parameter 1 bounds
[0.0001, np.pi/2], # servo 3 input parameter 3 bounds
[0, 50000], # servo 1 and 2 kp
[0, 2], # servo 1 and 2 ki
[0, 2], # servo 1 and 2 kd
[0, 40], # servo 3 kp
[0, 2], # servo 3 ki
[0, 2], # servo 3 kd
[0, 40], # servo 4 kp
[0, 2], # servo 4 ki
[0, 2]]) # servo 4 kd
DE optimizes my controller inputs and my PID gains for my controller. I have a function that updates my target state based on some joint inputs:
# set control
self.control_signal[:] = self.ComputeControl(self,elapsed_time,
current_state=self.current_state,
desired_state=self.target_state,
gain_matrix=self.gain_matrix
)
This is part of the code in my controller class:
def ComputeControl(self,elapsed_time,current_state, desired_state, gain_matrix,feedforward=None):
torso_base_feedback = (self.torso_base_kp*(desired_state[7] - current_state[7]) +
self.torso_base_ki*self.control_period*(desired_state[7] - current_state[7]) +
self.torso_base_kd *(desired_state[-5] - current_state[-5]))
torso_feedback = (self.torso_kp*(desired_state[8] - current_state[8]) +
self.torso_ki*self.control_period*(desired_state[8] - current_state[8]) +
self.torso_kd *(desired_state[-4] - current_state[-4]))
left_leg_feedback = (self.leg_kp*(desired_state[9] - current_state[9]) +
self.leg_ki*self.control_period*(desired_state[9] - current_state[9]) +
self.leg_kd *(desired_state[-3] - current_state[-3]))
right_leg_feedback = (self.leg_kp*(desired_state[11] - current_state[11]) +
self.leg_ki*self.control_period*(desired_state[11] - current_state[11]) +
self.leg_kd *(desired_state[-1] - current_state[-1]))
feedback_input = np.array([torso_base_feedback, torso_feedback, left_leg_feedback, right_leg_feedback])
# prevent sim dynamics from crashing
if self.current_state[6] < 0.08:
feedback_input = np.zeros_like(feedback_input)
if np.isnan(feedback_input).any():
feedback_input = np.nan_to_num(feedback_input, nan=0.0)
print("NaN IS HERE!!! BEWARE!")
print("NaN feedback_input=", feedback_input)
feedback_input[np.abs(feedback_input) > 1000] = 1
feedback_input[feedback_input < -1000] = -1
# print("feedback_input=", feedback_input)
if feedforward is None:
return feedback_input
else:
return feedback_input + feedforward
I am working on a bipedal walker that has two prismatic joints (one in each leg), a rolling revolute joint at the torso, and a pitching revolute joint at the same torso. Unfortunately, I cannot post the code online because it is part of work that has not yet been published, but I can send it to someone if anyone has an idea on how to fix the error. I have checked my URDF and my controller. Initially I thought my gains were too high for my robot (the robot was really light) so I increased the mass of the robot, and I am still running into an issue:
---------------------------------------------------------------------------
RemoteTraceback Traceback (most recent call last)
RemoteTraceback:
"""
Traceback (most recent call last):
File "/usr/lib/python3.10/multiprocessing/pool.py", line 125, in worker
result = (True, func(*args, **kwds))
File "/usr/lib/python3.10/multiprocessing/pool.py", line 48, in mapstar
return list(map(*args))
File "/home/naomio/drake_gym_env/lib/python3.10/site-packages/scipy/_lib/_util.py", line 657, in __call__
return self.f(x, *self.args)
File "/tmp/ipykernel_1108179/1426189393.py", line 2, in objective
return -run_sim_get_cost(theta = theta, mass = 0.435, friction = 0.9, simtime_in_secs = 30)
File "/tmp/ipykernel_1108179/489585047.py", line 3, in run_sim_get_cost
x, y, z = simulate(leg_amplitude = theta[0],
File "/tmp/ipykernel_1108179/2577454022.py", line 73, in simulate
simulator.AdvanceTo(simulation_time_step*(idx+1))
RuntimeError: The Hessian of the momentum cost along the search direction is NaN. The typical root cause for this failure is usually outside the solver, when there are not enough checks to catch it earlier. In this case, a previous (valid) simulation result led to the generation of NaN values in a controller, that are then fed as actuation through MultibodyPlant's ports. If you don't believe this is the root cause of your problem, please contact the Drake developers and/or open a Drake issue with a minimal reproduction example to help debug your problem.
"""
The above exception was the direct cause of the following exception:
RuntimeError Traceback (most recent call last)
Cell In[24], line 16
11 print('------------------------------------------------------------------------------------------------------')
13 return result.x
---> 16 best_theta = optimize_simulation()
18 print("Parameter names: leg amp/retract, leg freq, torso rolling amp, torso rolling freq, torso pitch amp, torso rolling offset")
19 print("Optimized Parameters:", best_theta)
Cell In[24], line 6, in optimize_simulation()
4 def optimize_simulation():
5 num_cores = multiprocessing.cpu_count()-2
----> 6 result = differential_evolution(objective, bounds, strategy='best1bin', maxiter=1000, popsize=20, tol=1e-6, workers=num_cores)
7 print('------------------------------------------------------------------------------------------------------')
8 print(f"Optimization Status: {result.message}")
File ~/drake_gym_env/lib/python3.10/site-packages/scipy/_lib/_util.py:440, in _transition_to_rng.<locals>.decorator.<locals>.wrapper(*args, **kwargs)
433 message = (
434 "The NumPy global RNG was seeded by calling "
435 f"`np.random.seed`. Beginning in {end_version}, this "
436 "function will no longer use the global RNG."
437 ) + cmn_msg
438 warnings.warn(message, FutureWarning, stacklevel=2)
--> 440 return fun(*args, **kwargs)
File ~/drake_gym_env/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py:501, in differential_evolution(func, bounds, args, strategy, maxiter, popsize, tol, mutation, recombination, rng, callback, disp, polish, init, atol, updating, workers, constraints, x0, integrality, vectorized)
484 # using a context manager means that any created Pool objects are
485 # cleared up.
486 with DifferentialEvolutionSolver(func, bounds, args=args,
487 strategy=strategy,
488 maxiter=maxiter,
(...)
499 integrality=integrality,
500 vectorized=vectorized) as solver:
--> 501 ret = solver.solve()
503 return ret
File ~/drake_gym_env/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py:1176, in DifferentialEvolutionSolver.solve(self)
1171 self.feasible, self.constraint_violation = (
1172 self._calculate_population_feasibilities(self.population))
1174 # only work out population energies for feasible solutions
1175 self.population_energies[self.feasible] = (
-> 1176 self._calculate_population_energies(
1177 self.population[self.feasible]))
1179 self._promote_lowest_energy()
1181 # do the optimization.
File ~/drake_gym_env/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py:1338, in DifferentialEvolutionSolver._calculate_population_energies(self, population)
1335 parameters_pop = self._scale_parameters(population)
1336 try:
1337 calc_energies = list(
-> 1338 self._mapwrapper(self.func, parameters_pop[0:S])
1339 )
1340 calc_energies = np.squeeze(calc_energies)
1341 except (TypeError, ValueError) as e:
1342 # wrong number of arguments for _mapwrapper
1343 # or wrong length returned from the mapper
File ~/drake_gym_env/lib/python3.10/site-packages/scipy/_lib/_util.py:727, in MapWrapper.__call__(self, func, iterable)
724 def __call__(self, func, iterable):
725 # only accept one iterable because that's all Pool.map accepts
726 try:
--> 727 return self._mapfunc(func, iterable)
728 except TypeError as e:
729 # wrong number of arguments
730 raise TypeError("The map-like callable must be of the"
731 " form f(func, iterable)") from e
File /usr/lib/python3.10/multiprocessing/pool.py:367, in Pool.map(self, func, iterable, chunksize)
362 def map(self, func, iterable, chunksize=None):
363 '''
364 Apply `func` to each element in `iterable`, collecting the results
365 in a list that is returned.
366 '''
--> 367 return self._map_async(func, iterable, mapstar, chunksize).get()
File /usr/lib/python3.10/multiprocessing/pool.py:774, in ApplyResult.get(self, timeout)
772 return self._value
773 else:
--> 774 raise self._value
RuntimeError: The Hessian of the momentum cost along the search direction is NaN. The typical root cause for this failure is usually outside the solver, when there are not enough checks to catch it earlier. In this case, a previous (valid) simulation result led to the generation of NaN values in a controller, that are then fed as actuation through MultibodyPlant's ports. If you don't believe this is the root cause of your problem, please contact the Drake developers and/or open a Drake issue with a minimal reproduction example to help debug your problem.