The first thing you need to do is to have a copy of the ROSject we mentioned above. Click on the link to get an automatic copy. You should now see a ROSject called BuoyancyBot on your list of ROSjects, something like the image below:
BuoyancyBot simulation on ROSDS
After clicking on the Open button to open the ROSject, you should have the environment like the one in the image below:
BuoyancyBot opened on ROSDS
Finding the code
After opening the ROSject, you can see the code on the ~/simulation_ws/src folder. There you will find the folders buoyancy_tests_pkg and spawn_robot_tools. It is worth mentioning that the plugin we created is called fluid_resitance.cpp, which has the content below. Bear in mind that you can find that code on the ROSject:
#include "ros/callback_queue.h"
#include "ros/ros.h"
#include "ros/subscribe_options.h"
#include "std_msgs/Float32.h"
#include <functional>
#include <gazebo/common/common.hh>
#include <gazebo/gazebo.hh>
#include <gazebo/msgs/msgs.hh>
#include <gazebo/physics/physics.hh>
#include <gazebo/transport/transport.hh>
#include <ignition/math/Vector3.hh>
#include <thread>
namespace gazebo {
class FluidResitance : public ModelPlugin {
public:
void Load(physics::ModelPtr _parent, sdf::ElementPtr _sdf) {
if (_sdf->HasElement("fluid_resitanceTopicName")) {
this->fluid_resitanceTopicName =
_sdf->Get<std::string>("fluid_resitanceTopicName");
} else {
ROS_WARN("No Topic Given name given, setting default name %s",
this->fluid_resitanceTopicName.c_str());
}
if (_sdf->HasElement("NameLinkToApplyResitance")) {
this->NameLinkToApplyResitance =
_sdf->Get<std::string>("NameLinkToApplyResitance");
} else {
ROS_WARN("No NameLinkToApplyResitance Given name given, setting default "
"name %s",
this->NameLinkToApplyResitance.c_str());
}
if (_sdf->HasElement("rate")) {
this->rate = _sdf->Get<double>("rate");
} else {
ROS_WARN("No rate Given name given, setting default "
"name %f",
this->rate);
}
// Store the pointer to the model
this->model = _parent;
this->world = this->model->GetWorld();
this->link_to_apply_resitance =
this->model->GetLink(this->NameLinkToApplyResitance);
// Listen to the update event. This event is broadcast every
// simulation iteration.
this->updateConnection = event::Events::ConnectWorldUpdateBegin(
std::bind(&FluidResitance::OnUpdate, this));
// Create a topic name
// std::string fluid_resitance_index_topicName = "/fluid_resitance_index";
// Initialize ros, if it has not already bee initialized.
if (!ros::isInitialized()) {
int argc = 0;
char **argv = NULL;
ros::init(argc, argv, "model_mas_controler_rosnode",
ros::init_options::NoSigintHandler);
}
// Create our ROS node. This acts in a similar manner to
// the Gazebo node
this->rosNode.reset(new ros::NodeHandle("model_mas_controler_rosnode"));
#if (GAZEBO_MAJOR_VERSION >= 8)
this->last_time = this->world->SimTime().Float();
#else
this->last_time = this->world->GetSimTime().Float();
#endif
// Freq
ros::SubscribeOptions so = ros::SubscribeOptions::create<std_msgs::Float32>(
this->fluid_resitanceTopicName, 1,
boost::bind(&FluidResitance::OnRosMsg, this, _1), ros::VoidPtr(),
&this->rosQueue);
this->rosSub = this->rosNode->subscribe(so);
// Spin up the queue helper thread.
this->rosQueueThread =
std::thread(std::bind(&FluidResitance::QueueThread, this));
ROS_WARN("Loaded FluidResitance Plugin with parent...%s, With Fluid "
"Resitance = %f "
"Started ",
this->model->GetName().c_str(), this->fluid_resitance_index);
}
// Called by the world update start event
public:
void OnUpdate() {
float period = 1.0 / this->rate;
// Get simulator time
#if (GAZEBO_MAJOR_VERSION >= 8)
float current_time = this->world->SimTime().Float();
#else
float current_time = this->world->GetSimTime().Float();
#endif
float dt = current_time - this->last_time;
if (dt <= period){
ROS_DEBUG(">>>>>>>>>>TimePassed = %f, TimePeriod =%f ",dt, period);
return;
}else{
this->last_time = current_time;
this->ApplyResitance();
}
}
public:
void SetResitance(const double &_force) {
this->fluid_resitance_index = _force;
ROS_WARN("model_fluid_resitance_index changed >> %f",
this->fluid_resitance_index);
}
void UpdateLinearVel() {
#if (GAZEBO_MAJOR_VERSION >= 8)
this->now_lin_vel = this->model->RelativeLinearVel();
#else
this->now_lin_vel = this->model->GetRelativeLinearVel();
#endif
}
void ApplyResitance() {
this->UpdateLinearVel();
#if (GAZEBO_MAJOR_VERSION >= 8)
ignition::math::Vector3d force, torque;
#else
math::Vector3 force, torque;
#endif
ROS_WARN("LinearSpeed = [%f,%f,%f] ",this->now_lin_vel.x, this->now_lin_vel.y, this->now_lin_vel.z);
force.x = -1.0 * this->fluid_resitance_index * this->now_lin_vel.x;
force.y = -1.0 * this->fluid_resitance_index * this->now_lin_vel.y;
force.z = -1.0 * this->fluid_resitance_index * this->now_lin_vel.z;
// Changing the mass
this->link_to_apply_resitance->AddRelativeForce(force);
#if (GAZEBO_MAJOR_VERSION >= 8)
this->link_to_apply_resitance->AddRelativeTorque(
torque -
this->link_to_apply_resitance->GetInertial()->CoG().Cross(force));
#else
this->link_to_apply_resitance->AddRelativeTorque(
torque -
this->link_to_apply_resitance->GetInertial()->GetCoG().Cross(force));
#endif
ROS_WARN("FluidResitanceApplying = [%f,%f,%f] ",force.x, force.y, force.z);
}
public:
void OnRosMsg(const std_msgs::Float32ConstPtr &_msg) {
this->SetResitance(_msg->data);
}
/// \brief ROS helper function that processes messages
private:
void QueueThread() {
static const double timeout = 0.01;
while (this->rosNode->ok()) {
this->rosQueue.callAvailable(ros::WallDuration(timeout));
}
}
// Pointer to the model
private:
physics::ModelPtr model;
// Pointer to the update event connection
private:
event::ConnectionPtr updateConnection;
// Mas of model
double fluid_resitance_index = 1.0;
/// \brief A node use for ROS transport
private:
std::unique_ptr<ros::NodeHandle> rosNode;
/// \brief A ROS subscriber
private:
ros::Subscriber rosSub;
/// \brief A ROS callbackqueue that helps process messages
private:
ros::CallbackQueue rosQueue;
/// \brief A thread the keeps running the rosQueue
private:
std::thread rosQueueThread;
/// \brief A ROS subscriber
private:
physics::LinkPtr link_to_apply_resitance;
private:
std::string fluid_resitanceTopicName = "fluid_resitance";
private:
std::string NameLinkToApplyResitance = "base_link";
private:
#if (GAZEBO_MAJOR_VERSION >= 8)
ignition::math::Vector3d now_lin_vel;
#else
math::Vector3 now_lin_vel;
#endif
private:
double rate = 1.0;
private:
float last_time = 0.0;
private:
/// \brief The parent World
physics::WorldPtr world;
};
// Register this plugin with the simulator
GZ_REGISTER_MODEL_PLUGIN(FluidResitance)
} // namespace gazebo
Compile the plugin
Once you have the ROSject, be it on ROSDS or on your own computer, you can easily compile it with:
cd ~/simulation_ws/
catkin_make
Everything should have compiled without any problems. If compiling in your own computer raises errors, please remember running the command source /opt/ros/kinetic/setup.bash before calling catkin_make .
Using the plugin
The plugin we compiled previously has to be loaded by gazebo. We do that on the file
~/simulation_ws/src/buoyancy_tests_pkg/buoyancy_tests_pkg/urdf/simple_floating_sphere_buoyancy_control.urdf, by using the instructions below:
With everything in place, we can now run the simulation. To launch the Simple Fluid Machine, we launch the simulation that has the mass control, the fluid resistance and the propulsion plugins enabled:
If you have applied a movement and you change the FluidResistance, you should see a variation in the time the robot takes to stop. The higher the friction, the faster it will stop its movement.
Air Dragging in Gazebo on ROSDS
There is more to it but have a look at the HectorPluginsAerodinamics, in which this code is based upon.
Change the Mass or buoyancy
By changing the mass, it will change the buoyancy of the model. This is to simulate the water intake of submarines or the buoyancy system that an air flying machine like a blimp could have.
rostopicpub/model_massstd_msgs/Float32"data: 0.0"
Mass_0 = 7.23822947387 –> Neutral buoyancy because its the mass of the object
Mass > Mass_0 –> It will sink
Mass < Mass_0 –> It will rise
This mass you can find it in the URDF of the model: buoyancy_tests_pkg/urdf/simple_floating_sphere_buoyancy_control.urdf
So, that is the post for today. Remember that we also have a live version of this post on our YouTube channel, as can be seen on the link below. If you liked the content of this post or video, please consider subscribing to our channel and pressing the bell for a new video every single day:
In this video, we’ll see how to create and launch a custom world in Gazebo. We’ll build the outline of a room that you’ll hopefully complete (in case you want to have some fun)!
If you are a ROS beginner and want to learn ROS basics fast, we recommend you take any of the following courses on Robot Ignite Academy:
Links mentioned in the video and other useful links:
– Robot Ignite Academy, the place to learn to program robots using only a web browser
– ROS Development Studio (ROSDS), a powerful online tool for pushing your ROS learning in a practical way (the environment used in the video for demonstration)
In this video, we are going to show how to create a launch file to spawn a URDF robot model in a given gazebo world.
Up to the end of the video, you will be able to spawn any robot model you may have described in URDF in Gazebo.
Step 1. Create a project in ROS Development Studio(ROSDS)
ROSDS helps you follow our tutorial in a fast pace without dealing without setting up an environment locally. If you haven’t had an account yet, you can create a free account here. Let’s create a new project and call it launch_urdf_model_in_gazebo.
Step 2. Create a package
Let’s create a ROS package for our code by using the following command.
cd ~/catkin_ws/src
catkin_create_pkg my_robot_urdf rospy
Then we create a m2wr.urdf file under the my_robot_urdf/urdf folder.
You can see that the launch file try to find the description file from the my_robot_urdf package.
Now we have to compile it with the following command
cd ~/catkin_ws
catkin_make
source devel/setup.bash
Now you can launch an empty world simulation from Simulations->Empty, then use the command roslaunch my_robot_urdf spawn_urdf.launch to spawn the robot!
If you are interested in this topic and want to learn more about URDF, please check our Robot Creation with URDF course.
In this new ROS Project you are going to learn Step-by-Step how to create a moving cube and that it learns to move using OpenAI environment.
This second video is for learning the creation the basics of Reinforcement learning and how to connect to the various systems of the robot to get the state, perform actions and calculate rewards.
If you didn’t follow up, please check the link below for the last post.
[irp posts=”9744″ name=”[ROS Projects] OpenAI with Moving Cube Robot in Gazebo Step-by-Step Part1″]
Step 1. Clone the simulation
In order to make sure we have the same project. Please run the following command
cd ~/simulation_ws/src
git clone https://bitbucket.org/theconstructcore/moving_cube.git
NOTICE: please delete the previous code if you have problems to compile the code
In the /moving_cube/moving_cube_description/urdf/moving_cube.urdf file, please uncomment the following part. We’ll need this part to publish the odom topic.
To train the robot, let’s create a package for training under the catkin_ws/src
cd ~/catkin_ws/src
catkin_create_pkg my_moving_cube_traning_pkg rospy
Then we’ll create a script folder inside the package and put a file called cube_rl_utils.py inside it with the following content
#!/usr/bin/env python
import time
import rospy
import math
import copy
import numpy
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point
from tf.transformations import euler_from_quaternion
class CubeRLUtils(object):
def __init__(self):
self.check_all_sensors_ready()
rospy.Subscriber("/moving_cube/joint_states", JointState, self.joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self.odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command', Float64, queue_size=1)
self.check_publishers_connection()
def check_all_sensors_ready(self):
self.disk_joints_data = None
while self.disk_joints_data is None and not rospy.is_shutdown():
try:
self.disk_joints_data = rospy.wait_for_message("/moving_cube/joint_states", JointState, timeout=1.0)
rospy.loginfo("Current moving_cube/joint_states READY=>"+str(self.disk_joints_data))
except:
rospy.logerr("Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
self.cube_odom_data = None
while self.disk_joints_data is None and not rospy.is_shutdown():
try:
self.cube_odom_data = rospy.wait_for_message("/moving_cube/odom", Odometry, timeout=1.0)
rospy.loginfo("Current /moving_cube/odom READY=>" + str(self.cube_odom_data))
except:
rospy.logerr("Current /moving_cube/odom not ready yet, retrying for getting odom")
rospy.loginfo("ALL SENSORS READY")
def check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while (self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown()):
rospy.loginfo("No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.loginfo("_base_pub Publisher Connected")
rospy.loginfo("All Publishers READY")
def joints_callback(self, data):
self.joints = data
def odom_callback(self, data):
self.odom = data
# Reinforcement Learning Utility Code
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.loginfo("Single Disk Roll Velocity>>"+str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
def get_cube_state(self):
# We convert from quaternions to euler
orientation_list = [self.odom.pose.pose.orientation.x,
self.odom.pose.pose.orientation.y,
self.odom.pose.pose.orientation.z,
self.odom.pose.pose.orientation.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
# We get the distance from the origin
start_position = Point()
start_position.x = 0.0
start_position.y = 0.0
start_position.z = 0.0
distance = self.get_distance_from_point(start_position,
self.odom.pose.pose.position)
cube_state = [
round(self.joints.velocity[0],1),
round(distance,1),
round(roll,1),
round(pitch,1),
round(yaw,1)
]
return cube_state
def observation_checks(self, cube_state):
# MAximum distance to travel permited in meters from origin
max_distance=2.0
if (cube_state[1] > max_distance):
rospy.logerr("Cube Too Far==>"+str(cube_state[1]))
done = True
else:
rospy.loginfo("Cube NOT Too Far==>"+str(cube_state[1]))
done = False
return done
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def get_reward_for_observations(self, state):
# We reward it for lower speeds and distance traveled
speed = state[0]
distance = state[1]
# Positive Reinforcement
reward_distance = distance * 10.0
# Negative Reinforcement for magnitude of speed
reward_for_efective_movement = -1 * abs(speed)
reward = reward_distance + reward_for_efective_movement
rospy.loginfo("Reward_distance="+str(reward_distance))
rospy.loginfo("Reward_for_efective_movement= "+str(reward_for_efective_movement))
return reward
def cube_rl_systems_test():
rospy.init_node('cube_rl_systems_test_node', anonymous=True, log_level=rospy.INFO)
cube_rl_utils_object = CubeRLUtils()
rospy.loginfo("Moving to Speed==>80")
cube_rl_utils_object.move_joints(roll_speed=80.0)
time.sleep(2)
rospy.loginfo("Moving to Speed==>-80")
cube_rl_utils_object.move_joints(roll_speed=-80.0)
time.sleep(2)
rospy.loginfo("Moving to Speed==>0.0")
cube_rl_utils_object.move_joints(roll_speed=0.0)
time.sleep(2)
cube_state = cube_rl_utils_object.get_cube_state()
done = cube_rl_utils_object.observation_checks(cube_state)
reward = cube_rl_utils_object.get_reward_for_observations(cube_state)
rospy.loginfo("Done==>"+str(done))
rospy.loginfo("Reward==>"+str(reward))
if __name__ == "__main__":
cube_rl_systems_test()
In this post, we’ll focus on the cube_rl_systems_test() function. The function uses the class to move the cube, get the observation, calculate reward and check if it’s done. To run it, you have to run the simulation first. Please go to Simulations->Select launch file-> main.launch
NOTICE: You have to unpause the simulation by clicking the arrow key in the simulation window
Then you can run the following command to run the script
cd ~/catkin_ws/src/my_moving_cube_training_pkg/script
chmod +x cube_rl_utils.py
cd ~/catkin_ws
source devel_setup.bash
rosrun my_moving_cube_training_pkg cube_rl_utils.py
You should see the cube moving around and the reward and the done state is calculated.
Edit by: Tony Huang
[irp posts=”9976″ name=”ROS Projects OpenAI with Moving Cube Robot in Gazebo Step-by-Step Part3″]
Got a suggestion for the next steps to take of this project? We would love to hear them in the comments bellow :).
Part 2
Step 1. Environment setup
We’ll start by explaining how to create the gym environment for training. You can create a file called monoped_env.py under the my_hopper_training/src directory with the following content
#!/usr/bin/env python
'''
By Miguel Angel Rodriguez <duckfrost@theconstructsim.com>
Visit our website at ec2-54-246-60-98.eu-west-1.compute.amazonaws.com
'''
import gym
import rospy
import numpy as np
import time
from gym import utils, spaces
from geometry_msgs.msg import Pose
from gym.utils import seeding
from gym.envs.registration import register
from gazebo_connection import GazeboConnection
from joint_publisher import JointPub
from monoped_state import MonopedState
from controllers_connection import ControllersConnection
#register the training environment in the gym as an available one
reg = register(
id='Monoped-v0',
entry_point='monoped_env:MonopedEnv',
timestep_limit=50,
)
class MonopedEnv(gym.Env):
def __init__(self):
# We assume that a ROS node has already been created
# before initialising the environment
# gets training parameters from param server
self.desired_pose = Pose()
self.desired_pose.position.x = rospy.get_param("/desired_pose/x")
self.desired_pose.position.y = rospy.get_param("/desired_pose/y")
self.desired_pose.position.z = rospy.get_param("/desired_pose/z")
self.running_step = rospy.get_param("/running_step")
self.max_incl = rospy.get_param("/max_incl")
self.max_height = rospy.get_param("/max_height")
self.min_height = rospy.get_param("/min_height")
self.joint_increment_value = rospy.get_param("/joint_increment_value")
self.done_reward = rospy.get_param("/done_reward")
self.alive_reward = rospy.get_param("/alive_reward")
self.desired_force = rospy.get_param("/desired_force")
self.desired_yaw = rospy.get_param("/desired_yaw")
self.weight_r1 = rospy.get_param("/weight_r1")
self.weight_r2 = rospy.get_param("/weight_r2")
self.weight_r3 = rospy.get_param("/weight_r3")
self.weight_r4 = rospy.get_param("/weight_r4")
self.weight_r5 = rospy.get_param("/weight_r5")
# stablishes connection with simulator
self.gazebo = GazeboConnection()
self.controllers_object = ControllersConnection(namespace="monoped")
self.monoped_state_object = MonopedState( max_height=self.max_height,
min_height=self.min_height,
abs_max_roll=self.max_incl,
abs_max_pitch=self.max_incl,
joint_increment_value=self.joint_increment_value,
done_reward=self.done_reward,
alive_reward=self.alive_reward,
desired_force=self.desired_force,
desired_yaw=self.desired_yaw,
weight_r1=self.weight_r1,
weight_r2=self.weight_r2,
weight_r3=self.weight_r3,
weight_r4=self.weight_r4,
weight_r5=self.weight_r5
)
self.monoped_state_object.set_desired_world_point(self.desired_pose.position.x,
self.desired_pose.position.y,
self.desired_pose.position.z)
self.monoped_joint_pubisher_object = JointPub()
"""
For this version, we consider 6 actions
1-2) Increment/Decrement haa_joint
3-4) Increment/Decrement hfe_joint
5-6) Increment/Decrement kfe_joint
"""
self.action_space = spaces.Discrete(6)
self.reward_range = (-np.inf, np.inf)
self._seed()
# A function to initialize the random generator
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# Resets the state of the environment and returns an initial observation.
def _reset(self):
# 0st: We pause the Simulator
rospy.logdebug("Pausing SIM...")
self.gazebo.pauseSim()
# 1st: resets the simulation to initial values
rospy.logdebug("Reset SIM...")
self.gazebo.resetSim()
# 2nd: We Set the gravity to 0.0 so that we dont fall when reseting joints
# It also UNPAUSES the simulation
rospy.logdebug("Remove Gravity...")
self.gazebo.change_gravity(0.0, 0.0, 0.0)
# EXTRA: Reset JoinStateControlers because sim reset doesnt reset TFs, generating time problems
rospy.logdebug("reset_monoped_joint_controllers...")
self.controllers_object.reset_monoped_joint_controllers()
# 3rd: resets the robot to initial conditions
rospy.logdebug("set_init_pose...")
self.monoped_joint_pubisher_object.set_init_pose()
# 5th: Check all subscribers work.
# Get the state of the Robot defined by its RPY orientation, distance from
# desired point, contact force and JointState of the three joints
rospy.logdebug("check_all_systems_ready...")
self.monoped_state_object.check_all_systems_ready()
rospy.logdebug("get_observations...")
observation = self.monoped_state_object.get_observations()
# 6th: We restore the gravity to original
rospy.logdebug("Restore Gravity...")
self.gazebo.change_gravity(0.0, 0.0, -9.81)
# 7th: pauses simulation
rospy.logdebug("Pause SIM...")
self.gazebo.pauseSim()
# Get the State Discrete Stringuified version of the observations
state = self.get_state(observation)
return state
def _step(self, action):
# Given the action selected by the learning algorithm,
# we perform the corresponding movement of the robot
# 1st, decide which action corresponsd to which joint is incremented
next_action_position = self.monoped_state_object.get_action_to_position(action)
# We move it to that pos
self.gazebo.unpauseSim()
self.monoped_joint_pubisher_object.move_joints(next_action_position)
# Then we send the command to the robot and let it go
# for running_step seconds
time.sleep(self.running_step)
self.gazebo.pauseSim()
# We now process the latest data saved in the class state to calculate
# the state and the rewards. This way we guarantee that they work
# with the same exact data.
# Generate State based on observations
observation = self.monoped_state_object.get_observations()
# finally we get an evaluation based on what happened in the sim
reward,done = self.monoped_state_object.process_data()
# Get the State Discrete Stringuified version of the observations
state = self.get_state(observation)
return state, reward, done, {}
def get_state(self, observation):
"""
We retrieve the Stringuified-Discrete version of the given observation
:return: state
"""
return self.monoped_state_object.get_state_as_string(observation)
This script creates the gym environment with the following parts:
1.Register the training environment
You will always need to do this step to inform the gym package that a new training environment is created.
2.Load the desired pose
Parameters related to the training is loaded at this step.
3.Connect with the gazebo
In order to connect the gym environment with gazebo simulation, we create a script called gazebo_connection.py in the same directory with the following content
#!/usr/bin/env python
import rospy
from std_srvs.srv import Empty
from gazebo_msgs.msg import ODEPhysics
from gazebo_msgs.srv import SetPhysicsProperties, SetPhysicsPropertiesRequest
from std_msgs.msg import Float64
from geometry_msgs.msg import Vector3
class GazeboConnection():
def __init__(self):
self.unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.reset_proxy = rospy.ServiceProxy('/gazebo/reset_simulation', Empty)
# Setup the Gravity Controle system
service_name = '/gazebo/set_physics_properties'
rospy.logdebug("Waiting for service " + str(service_name))
rospy.wait_for_service(service_name)
rospy.logdebug("Service Found " + str(service_name))
self.set_physics = rospy.ServiceProxy(service_name, SetPhysicsProperties)
self.init_values()
# We always pause the simulation, important for legged robots learning
self.pauseSim()
def pauseSim(self):
rospy.wait_for_service('/gazebo/pause_physics')
try:
self.pause()
except rospy.ServiceException, e:
print ("/gazebo/pause_physics service call failed")
def unpauseSim(self):
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except rospy.ServiceException, e:
print ("/gazebo/unpause_physics service call failed")
def resetSim(self):
rospy.wait_for_service('/gazebo/reset_simulation')
try:
self.reset_proxy()
except rospy.ServiceException, e:
print ("/gazebo/reset_simulation service call failed")
def resetWorld(self):
rospy.wait_for_service('/gazebo/reset_world')
try:
self.reset_proxy()
except rospy.ServiceException, e:
print ("/gazebo/reset_world service call failed")
def init_values(self):
rospy.wait_for_service('/gazebo/reset_simulation')
try:
# reset_proxy.call()
self.reset_proxy()
except rospy.ServiceException, e:
print ("/gazebo/reset_simulation service call failed")
self._time_step = Float64(0.001)
self._max_update_rate = Float64(1000.0)
self._gravity = Vector3()
self._gravity.x = 0.0
self._gravity.y = 0.0
self._gravity.z = 0.0
self._ode_config = ODEPhysics()
self._ode_config.auto_disable_bodies = False
self._ode_config.sor_pgs_precon_iters = 0
self._ode_config.sor_pgs_iters = 50
self._ode_config.sor_pgs_w = 1.3
self._ode_config.sor_pgs_rms_error_tol = 0.0
self._ode_config.contact_surface_layer = 0.001
self._ode_config.contact_max_correcting_vel = 0.0
self._ode_config.cfm = 0.0
self._ode_config.erp = 0.2
self._ode_config.max_contacts = 20
self.update_gravity_call()
def update_gravity_call(self):
self.pauseSim()
set_physics_request = SetPhysicsPropertiesRequest()
set_physics_request.time_step = self._time_step.data
set_physics_request.max_update_rate = self._max_update_rate.data
set_physics_request.gravity = self._gravity
set_physics_request.ode_config = self._ode_config
rospy.logdebug(str(set_physics_request.gravity))
result = self.set_physics(set_physics_request)
rospy.logdebug("Gravity Update Result==" + str(result.success) + ",message==" + str(result.status_message))
self.unpauseSim()
def change_gravity(self, x, y, z):
self._gravity.x = x
self._gravity.y = y
self._gravity.z = z
self.update_gravity_call()
This script is responsible to reset, pause and unpause the simulation for the training script.
4.Connect with the controller
Since TF doesn’t like that we reset the environment and it will generate some problems. We have to manually reset the controller with the controller_connection.py script. We won’t go into detail here. If you want to learn more about the controller, please check our controller 101 course.
#!/usr/bin/env python
'''
By Miguel Angel Rodriguez <duckfrost@theconstructsim.com>
Visit our website at ec2-54-246-60-98.eu-west-1.compute.amazonaws.com
'''
import gym
import rospy
import numpy as np
import time
from gym import utils, spaces
from geometry_msgs.msg import Pose
from gym.utils import seeding
from gym.envs.registration import register
from gazebo_connection import GazeboConnection
from joint_publisher import JointPub
from monoped_state import MonopedState
from controllers_connection import ControllersConnection
#register the training environment in the gym as an available one
reg = register(
id='Monoped-v0',
entry_point='monoped_env:MonopedEnv',
timestep_limit=50,
)
class MonopedEnv(gym.Env):
def __init__(self):
# We assume that a ROS node has already been created
# before initialising the environment
# gets training parameters from param server
self.desired_pose = Pose()
self.desired_pose.position.x = rospy.get_param("/desired_pose/x")
self.desired_pose.position.y = rospy.get_param("/desired_pose/y")
self.desired_pose.position.z = rospy.get_param("/desired_pose/z")
self.running_step = rospy.get_param("/running_step")
self.max_incl = rospy.get_param("/max_incl")
self.max_height = rospy.get_param("/max_height")
self.min_height = rospy.get_param("/min_height")
self.joint_increment_value = rospy.get_param("/joint_increment_value")
self.done_reward = rospy.get_param("/done_reward")
self.alive_reward = rospy.get_param("/alive_reward")
self.desired_force = rospy.get_param("/desired_force")
self.desired_yaw = rospy.get_param("/desired_yaw")
self.weight_r1 = rospy.get_param("/weight_r1")
self.weight_r2 = rospy.get_param("/weight_r2")
self.weight_r3 = rospy.get_param("/weight_r3")
self.weight_r4 = rospy.get_param("/weight_r4")
self.weight_r5 = rospy.get_param("/weight_r5")
# stablishes connection with simulator
self.gazebo = GazeboConnection()
self.controllers_object = ControllersConnection(namespace="monoped")
self.monoped_state_object = MonopedState( max_height=self.max_height,
min_height=self.min_height,
abs_max_roll=self.max_incl,
abs_max_pitch=self.max_incl,
joint_increment_value=self.joint_increment_value,
done_reward=self.done_reward,
alive_reward=self.alive_reward,
desired_force=self.desired_force,
desired_yaw=self.desired_yaw,
weight_r1=self.weight_r1,
weight_r2=self.weight_r2,
weight_r3=self.weight_r3,
weight_r4=self.weight_r4,
weight_r5=self.weight_r5
)
self.monoped_state_object.set_desired_world_point(self.desired_pose.position.x,
self.desired_pose.position.y,
self.desired_pose.position.z)
self.monoped_joint_pubisher_object = JointPub()
"""
For this version, we consider 6 actions
1-2) Increment/Decrement haa_joint
3-4) Increment/Decrement hfe_joint
5-6) Increment/Decrement kfe_joint
"""
self.action_space = spaces.Discrete(6)
self.reward_range = (-np.inf, np.inf)
self._seed()
# A function to initialize the random generator
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# Resets the state of the environment and returns an initial observation.
def _reset(self):
# 0st: We pause the Simulator
rospy.logdebug("Pausing SIM...")
self.gazebo.pauseSim()
# 1st: resets the simulation to initial values
rospy.logdebug("Reset SIM...")
self.gazebo.resetSim()
# 2nd: We Set the gravity to 0.0 so that we dont fall when reseting joints
# It also UNPAUSES the simulation
rospy.logdebug("Remove Gravity...")
self.gazebo.change_gravity(0.0, 0.0, 0.0)
# EXTRA: Reset JoinStateControlers because sim reset doesnt reset TFs, generating time problems
rospy.logdebug("reset_monoped_joint_controllers...")
self.controllers_object.reset_monoped_joint_controllers()
# 3rd: resets the robot to initial conditions
rospy.logdebug("set_init_pose...")
self.monoped_joint_pubisher_object.set_init_pose()
# 5th: Check all subscribers work.
# Get the state of the Robot defined by its RPY orientation, distance from
# desired point, contact force and JointState of the three joints
rospy.logdebug("check_all_systems_ready...")
self.monoped_state_object.check_all_systems_ready()
rospy.logdebug("get_observations...")
observation = self.monoped_state_object.get_observations()
# 6th: We restore the gravity to original
rospy.logdebug("Restore Gravity...")
self.gazebo.change_gravity(0.0, 0.0, -9.81)
# 7th: pauses simulation
rospy.logdebug("Pause SIM...")
self.gazebo.pauseSim()
# Get the State Discrete Stringuified version of the observations
state = self.get_state(observation)
return state
def _step(self, action):
# Given the action selected by the learning algorithm,
# we perform the corresponding movement of the robot
# 1st, decide which action corresponsd to which joint is incremented
next_action_position = self.monoped_state_object.get_action_to_position(action)
# We move it to that pos
self.gazebo.unpauseSim()
self.monoped_joint_pubisher_object.move_joints(next_action_position)
# Then we send the command to the robot and let it go
# for running_step seconds
time.sleep(self.running_step)
self.gazebo.pauseSim()
# We now process the latest data saved in the class state to calculate
# the state and the rewards. This way we guarantee that they work
# with the same exact data.
# Generate State based on observations
observation = self.monoped_state_object.get_observations()
# finally we get an evaluation based on what happened in the sim
reward,done = self.monoped_state_object.process_data()
# Get the State Discrete Stringuified version of the observations
state = self.get_state(observation)
return state, reward, done, {}
def get_state(self, observation):
"""
We retrieve the Stringuified-Discrete version of the given observation
:return: state
"""
return self.monoped_state_object.get_state_as_string(observation)
5.Generate state object
The state object is then generated with the current state with the following monoped_state.py script
#!/usr/bin/env python
'''
By Miguel Angel Rodriguez <duckfrost@theconstructsim.com>
Visit our website at ec2-54-246-60-98.eu-west-1.compute.amazonaws.com
'''
import gym
import rospy
import numpy as np
import time
from gym import utils, spaces
from geometry_msgs.msg import Pose
from gym.utils import seeding
from gym.envs.registration import register
from gazebo_connection import GazeboConnection
from joint_publisher import JointPub
from monoped_state import MonopedState
from controllers_connection import ControllersConnection
#register the training environment in the gym as an available one
reg = register(
id='Monoped-v0',
entry_point='monoped_env:MonopedEnv',
timestep_limit=50,
)
class MonopedEnv(gym.Env):
def __init__(self):
# We assume that a ROS node has already been created
# before initialising the environment
# gets training parameters from param server
self.desired_pose = Pose()
self.desired_pose.position.x = rospy.get_param("/desired_pose/x")
self.desired_pose.position.y = rospy.get_param("/desired_pose/y")
self.desired_pose.position.z = rospy.get_param("/desired_pose/z")
self.running_step = rospy.get_param("/running_step")
self.max_incl = rospy.get_param("/max_incl")
self.max_height = rospy.get_param("/max_height")
self.min_height = rospy.get_param("/min_height")
self.joint_increment_value = rospy.get_param("/joint_increment_value")
self.done_reward = rospy.get_param("/done_reward")
self.alive_reward = rospy.get_param("/alive_reward")
self.desired_force = rospy.get_param("/desired_force")
self.desired_yaw = rospy.get_param("/desired_yaw")
self.weight_r1 = rospy.get_param("/weight_r1")
self.weight_r2 = rospy.get_param("/weight_r2")
self.weight_r3 = rospy.get_param("/weight_r3")
self.weight_r4 = rospy.get_param("/weight_r4")
self.weight_r5 = rospy.get_param("/weight_r5")
# stablishes connection with simulator
self.gazebo = GazeboConnection()
self.controllers_object = ControllersConnection(namespace="monoped")
self.monoped_state_object = MonopedState( max_height=self.max_height,
min_height=self.min_height,
abs_max_roll=self.max_incl,
abs_max_pitch=self.max_incl,
joint_increment_value=self.joint_increment_value,
done_reward=self.done_reward,
alive_reward=self.alive_reward,
desired_force=self.desired_force,
desired_yaw=self.desired_yaw,
weight_r1=self.weight_r1,
weight_r2=self.weight_r2,
weight_r3=self.weight_r3,
weight_r4=self.weight_r4,
weight_r5=self.weight_r5
)
self.monoped_state_object.set_desired_world_point(self.desired_pose.position.x,
self.desired_pose.position.y,
self.desired_pose.position.z)
self.monoped_joint_pubisher_object = JointPub()
"""
For this version, we consider 6 actions
1-2) Increment/Decrement haa_joint
3-4) Increment/Decrement hfe_joint
5-6) Increment/Decrement kfe_joint
"""
self.action_space = spaces.Discrete(6)
self.reward_range = (-np.inf, np.inf)
self._seed()
# A function to initialize the random generator
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# Resets the state of the environment and returns an initial observation.
def _reset(self):
# 0st: We pause the Simulator
rospy.logdebug("Pausing SIM...")
self.gazebo.pauseSim()
# 1st: resets the simulation to initial values
rospy.logdebug("Reset SIM...")
self.gazebo.resetSim()
# 2nd: We Set the gravity to 0.0 so that we dont fall when reseting joints
# It also UNPAUSES the simulation
rospy.logdebug("Remove Gravity...")
self.gazebo.change_gravity(0.0, 0.0, 0.0)
# EXTRA: Reset JoinStateControlers because sim reset doesnt reset TFs, generating time problems
rospy.logdebug("reset_monoped_joint_controllers...")
self.controllers_object.reset_monoped_joint_controllers()
# 3rd: resets the robot to initial conditions
rospy.logdebug("set_init_pose...")
self.monoped_joint_pubisher_object.set_init_pose()
# 5th: Check all subscribers work.
# Get the state of the Robot defined by its RPY orientation, distance from
# desired point, contact force and JointState of the three joints
rospy.logdebug("check_all_systems_ready...")
self.monoped_state_object.check_all_systems_ready()
rospy.logdebug("get_observations...")
observation = self.monoped_state_object.get_observations()
# 6th: We restore the gravity to original
rospy.logdebug("Restore Gravity...")
self.gazebo.change_gravity(0.0, 0.0, -9.81)
# 7th: pauses simulation
rospy.logdebug("Pause SIM...")
self.gazebo.pauseSim()
# Get the State Discrete Stringuified version of the observations
state = self.get_state(observation)
return state
def _step(self, action):
# Given the action selected by the learning algorithm,
# we perform the corresponding movement of the robot
# 1st, decide which action corresponsd to which joint is incremented
next_action_position = self.monoped_state_object.get_action_to_position(action)
# We move it to that pos
self.gazebo.unpauseSim()
self.monoped_joint_pubisher_object.move_joints(next_action_position)
# Then we send the command to the robot and let it go
# for running_step seconds
time.sleep(self.running_step)
self.gazebo.pauseSim()
# We now process the latest data saved in the class state to calculate
# the state and the rewards. This way we guarantee that they work
# with the same exact data.
# Generate State based on observations
observation = self.monoped_state_object.get_observations()
# finally we get an evaluation based on what happened in the sim
reward,done = self.monoped_state_object.process_data()
# Get the State Discrete Stringuified version of the observations
state = self.get_state(observation)
return state, reward, done, {}
def get_state(self, observation):
"""
We retrieve the Stringuified-Discrete version of the given observation
:return: state
"""
return self.monoped_state_object.get_state_as_string(observation)
6.Reset
Setup the environment back to its initial observation. This includes the steps like pause the simulation, reset the controller and etc. as we described before.
7.Step
In this part, an action will be decided based on the current state and then be published with the following joint_publisher.py script. After that, the reward is calculated based on the new observation.
#!/usr/bin/env python
import rospy
import math
from std_msgs.msg import String
from std_msgs.msg import Float64
class JointPub(object):
def __init__(self):
self.publishers_array = []
self._haa_joint_pub = rospy.Publisher('/monoped/haa_joint_position_controller/command', Float64, queue_size=1)
self._hfe_joint_pub = rospy.Publisher('/monoped/hfe_joint_position_controller/command', Float64, queue_size=1)
self._kfe_joint_pub = rospy.Publisher('/monoped/kfe_joint_position_controller/command', Float64, queue_size=1)
self.publishers_array.append(self._haa_joint_pub)
self.publishers_array.append(self._hfe_joint_pub)
self.publishers_array.append(self._kfe_joint_pub)
self.init_pos = [0.0,0.0,0.0]
def set_init_pose(self):
"""
Sets joints to initial position [0,0,0]
:return:
"""
self.check_publishers_connection()
self.move_joints(self.init_pos)
def check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while (self._haa_joint_pub.get_num_connections() == 0):
rospy.logdebug("No susbribers to _haa_joint_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_haa_joint_pub Publisher Connected")
while (self._hfe_joint_pub.get_num_connections() == 0):
rospy.logdebug("No susbribers to _hfe_joint_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_hfe_joint_pub Publisher Connected")
while (self._kfe_joint_pub.get_num_connections() == 0):
rospy.logdebug("No susbribers to _kfe_joint_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_kfe_joint_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
def joint_mono_des_callback(self, msg):
rospy.logdebug(str(msg.joint_state.position))
self.move_joints(msg.joint_state.position)
def move_joints(self, joints_array):
i = 0
for publisher_object in self.publishers_array:
joint_value = Float64()
joint_value.data = joints_array[i]
rospy.logdebug("JointsPos>>"+str(joint_value))
publisher_object.publish(joint_value)
i += 1
def start_loop(self, rate_value = 2.0):
rospy.logdebug("Start Loop")
pos1 = [0.0,0.0,1.6]
pos2 = [0.0,0.0,-1.6]
position = "pos1"
rate = rospy.Rate(rate_value)
while not rospy.is_shutdown():
if position == "pos1":
self.move_joints(pos1)
position = "pos2"
else:
self.move_joints(pos2)
position = "pos1"
rate.sleep()
def start_sinus_loop(self, rate_value = 2.0):
rospy.logdebug("Start Loop")
w = 0.0
x = 2.0*math.sin(w)
#pos_x = [0.0,0.0,x]
#pos_x = [x, 0.0, 0.0]
pos_x = [0.0, x, 0.0]
rate = rospy.Rate(rate_value)
while not rospy.is_shutdown():
self.move_joints(pos_x)
w += 0.05
x = 2.0 * math.sin(w)
#pos_x = [0.0, 0.0, x]
#pos_x = [x, 0.0, 0.0]
pos_x = [0.0, x, 0.0]
rate.sleep()
if __name__=="__main__":
rospy.init_node('joint_publisher_node')
joint_publisher = JointPub()
rate_value = 50.0
#joint_publisher.start_loop(rate_value)
joint_publisher.start_sinus_loop(rate_value)
Step 2. Training
Now you have all the code you need to start training. Let’s run the simulation first. Go to Simulations -> Select launch file -> my_legged_robot_sims->main.launch to launch the hopper robot simulation in Gazebo.
Then you can run the training with
roslaunch my_hopper_training main.launch
The algorithm is working and start to train the robot to perform the task we want, however, it still requires lots of tuning to work properly.
You can have multiple instances and train robots with different parameters in parallel with our paid program in ROSDS. Please check it here if you are interested.