diff --git a/docs/faq/index.html b/docs/faq/index.html index bd6df190..a000b021 100644 --- a/docs/faq/index.html +++ b/docs/faq/index.html @@ -613,6 +613,13 @@ Robot Pool (only in C++) + + +
To enable graphics in your code, you need to do the following:
We have not implemented this feature in Python
yet. One can emulate the RobotPool
behavior or create a custom parallel robot loader.
On magnum_contexts.cpp you can find an example showcasing the use of many worlds with camera sensors in parallel. The main takeaway is that we need to pre-allocate OpenGL contexts so that each thread can take one and use it to render their worlds.
-Below you can find an example showcasing the use of many worlds with camera sensors in parallel.
+ +robot = rd.Robot("arm.urdf", "arm", False)
+robot.fix_to_world()
+
+def test():
+ pid = os.getpid()
+ ii = pid%15
+
+ # create the controller
+ pdcontrol = rd.PDControl([0.0, 1.0, -1.5, 1.0], False)
+
+ # clone the robot
+ grobot = robot.clone()
+
+ # add the controller to the robot
+ grobot.add_controller(pdcontrol, 1.)
+ pdcontrol.set_pd(200., 20.)
+
+ # create the simulation object
+ simu = rd.RobotDARTSimu(0.001)
+
+ # set the graphics
+ graphics = rd.gui.WindowlessGraphics()
+ simu.set_graphics(graphics)
+
+ graphics.look_at([0.4 * ii, 3.5 - ii * 0.1, 2.], [0., 0., 0.25], [0., 0., 1.])
+
+ # add the robot and the floor
+ simu.add_robot(grobot)
+ simu.add_checkerboard_floor()
+
+ # run
+ simu.run(20.)
+
+ # save last frame for visualization purposes
+ img = graphics.image()
+ rd.gui.save_png_image('camera-' + str(ii) + '.png', img)
+
+# helper function to run in parallel
+def runInParallel(N):
+ proc = []
+ for i in range(N):
+ # rd.gui.run_with_gl_context accepts 2 parameters:
+ # (func, wait_time_in_ms)
+ # the func needs to be of the following format: void(), aka no return, no arguments
+ p = Process(target=rd.gui.run_with_gl_context, args=(test, 20))
+ p.start()
+ proc.append(p)
+ for p in proc:
+ p.join()
+
+print('Running parallel evaluations')
+N = 15
+start = timer()
+runInParallel(N)
+end = timer()
+print('Time:', end-start)
+
In C++ you are also able to pre-allocate a custom number of OpenGL contexts so that you can take advantage of stronger GPUs.
You need to use waf
to build RobotDART, but when installing the library a CMake module is installed. Thus it is possible use RobotDART in your code using CMake. You can find a complete example at cmake/example. In short the CMake would look like this:
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
-project(robot_dart_example)
-# we ask for Magnum because we want to build the graphics
-find_package(RobotDART REQUIRED OPTIONAL_COMPONENTS Magnum)
-
-add_executable(robot_dart_example example.cpp)
-
-target_link_libraries(robot_dart_example
- RobotDART::Simu
-)
-
-if(RobotDART_Magnum_FOUND)
- add_executable(robot_dart_example_graphics example.cpp)
- target_link_libraries(robot_dart_example_graphics
- RobotDART::Simu
- RobotDART::Magnum
- )
-endif()
+cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
+project(robot_dart_example)
+# we ask for Magnum because we want to build the graphics
+find_package(RobotDART REQUIRED OPTIONAL_COMPONENTS Magnum)
+
+add_executable(robot_dart_example example.cpp)
+
+target_link_libraries(robot_dart_example
+ RobotDART::Simu
+)
+
+if(RobotDART_Magnum_FOUND)
+ add_executable(robot_dart_example_graphics example.cpp)
+ target_link_libraries(robot_dart_example_graphics
+ RobotDART::Simu
+ RobotDART::Magnum
+ )
+endif()
Where can I find complete working examples for either c++ or python?¶
diff --git a/docs/install/index.html b/docs/install/index.html
index 4883bf4e..faac02fd 100644
--- a/docs/install/index.html
+++ b/docs/install/index.html
@@ -424,15 +424,8 @@
-
-
- Installing Boost and Eigen3
-
-
-
-
- -
-
- Installing DART
+
+ Installing system-wide packages
@@ -596,15 +589,8 @@
-
-
- Installing Boost and Eigen3
-
-
-
-
- -
-
- Installing DART
+
+ Installing system-wide packages
@@ -687,126 +673,78 @@ Manual Installation
display: none;
}
+
Manual Installation of RobotDART¶
+For the quick installation manual, see the quick installation page.
Dependencies¶
Required¶
- Ubuntu (it should work on versions >= 14.04) or OSX
-- Eigen3 (needed by DART)
-- Boost (needed by DART)
+- Eigen3
- DART, http://dartsim.github.io/
Optional¶
+- Boost (for unit tests)
- Magnum (for graphics), https://github.com/mosra/magnum
Installation of the dependencies¶
-Installing Boost and Eigen3¶
-For Ubuntu-based distributions we should use the following commands to install Eigen3 and Boost:
+Note: The following instructions are high-level and assume people with some experience in building/installing software.
+Installing system-wide packages¶
+For Ubuntu-based distributions (>=20.04) we should use the following commands:
sudo apt-get update
-sudo apt-get install libeigen3-dev libboost-filesystem-dev libboost-system-dev libboost-regex-dev
+sudo apt-get install build-essential cmake pkg-config git libboost-regex-dev libboost-system-dev libboost-test-dev pybind11-dev
+sudo apt-get install libdart-all-dev
For OSX with brew:
-
-Installing DART¶
-In order to use RobotDART, you need to install DART (from source).
-For Ubuntu systems, please follow the detailed installation instructions on the DART documentation website. Make sure that you don't forget to add the PPAs as detailed here. What is more, you can enable the -DART_ENABLE_SIMD
flag in the CMake configuration. In addition, you need the following optional dependency: DART Parsers. Lastly, it is recommended to use either the master
branch or v6.12.1
tag (and not the one provided in DART's documentation). In short you should do the following:
-Ubuntu <= 14.04 only
-
-For all Ubuntu distributions
-sudo apt-add-repository ppa:dartsim/ppa
-sudo apt-get update
-
-sudo apt-get install build-essential cmake pkg-config git
-sudo apt-get install libeigen3-dev libassimp-dev libccd-dev libfcl-dev libboost-regex-dev libboost-system-dev
-
-sudo apt-get install libtinyxml-dev libtinyxml2-dev
-sudo apt-get install liburdfdom-dev liburdfdom-headers-dev
-
-cd /path/to/tmp/folder
-git clone git://github.com/dartsim/dart.git
-cd dart
-git checkout v6.12.1
-
-mkdir build
-cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..
-make -j4
-sudo make install
-
-If you want to install DART somewhere else than /usr/local
, you should use the -DCMAKE_INSTALL_PREFIX=/path/to/install/folder
cmake argument.
-For OSX systems with homebrew, please follow the detailed installation instructions on the DART documentation website. You need to follow the same procedure as for Ubuntu systems. In short you should do the following:
-brew install eigen
-brew install assimp
-brew install libccd
-brew install dartsim/dart/fcl04
-brew install boost
-
-brew install tinyxml
-brew install tinyxml2
-brew install urdfdom
-
-cd /path/to/tmp/folder
-git clone git://github.com/dartsim/dart.git
-cd dart
-git checkout v6.12.1
-
-mkdir build
-cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..
-make -j4
-sudo make install
+
Installing Magnum¶
Magnum depends on Corrade and we are going to use a few plugins and extras from the library. We are also going to use Glfw and Glx for the back-end. Follow the instrutions below:
-#installation of Glfw and OpenAL
-# Ubuntu
-sudo apt-get install libglfw3-dev libglfw3 libopenal-dev libassimp-dev
-# Mac OSX
-brew install glfw3 openal-soft assimp
-
-# installation of Corrade
-cd /path/to/tmp/folder
-git clone https://github.com/mosra/corrade.git
-cd corrade
-mkdir build && cd build
-cmake -DCMAKE_BUILD_TYPE=Release ..
-make -j
-sudo make install
-
-# installation of Magnum
-cd /path/to/tmp/folder
-git clone https://github.com/mosra/magnum.git
-cd magnum
-mkdir build && cd build
-# Ubuntu
-cmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSEGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON -DTARGET_HEADLESS=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
-# Mac OSX
-cmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSCGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
-make -j
-sudo make install
-
-# installation of Magnum Plugins
-cd /path/to/tmp/folder
-git clone https://github.com/mosra/magnum-plugins.git
-cd magnum-plugins
-mkdir build && cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DWITH_ASSIMPIMPORTER=ON -DWITH_DDSIMPORTER=ON -DWITH_JPEGIMPORTER=ON -DWITH_OPENGEXIMPORTER=ON -DWITH_PNGIMPORTER=ON -DWITH_TINYGLTFIMPORTER=ON -DWITH_STBTRUETYPEFONT=ON .. # this will enable quite a few Magnum Plugins that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
-make -j
-sudo make install
-
-# installation of Magnum DART Integration (DART needs to be installed) and Eigen Integration
-cd /path/to/tmp/folder
-git clone https://github.com/mosra/magnum-integration.git
-cd magnum-integration
-mkdir build && cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DWITH_DART=ON -DWITH_EIGEN=ON ..
-make -j
-sudo make install
+#installation of Glfw and OpenAL
+# Ubuntu
+sudo apt-get install libglfw3-dev libglfw3 libassimp-dev libopenal-dev libglfw3-dev libsdl2-dev libopenexr-dev libdevil-dev libpng-dev libfaad-dev libfreetype6-dev
+# Mac OSX
+brew install glfw3 openal-soft assimp
+
+# installation of Corrade
+cd /path/to/tmp/folder
+git clone https://github.com/mosra/corrade.git
+cd corrade
+mkdir build && cd build
+cmake -DCMAKE_BUILD_TYPE=Release ..
+make -j
+sudo make install
+
+# installation of Magnum
+cd /path/to/tmp/folder
+git clone https://github.com/mosra/magnum.git
+cd magnum
+mkdir build && cd build
+# Ubuntu
+cmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSEGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON -DTARGET_EGL=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
+# Mac OSX
+cmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSCGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
+make -j
+sudo make install
+
+# installation of Magnum Plugins
+cd /path/to/tmp/folder
+git clone https://github.com/mosra/magnum-plugins.git
+cd magnum-plugins
+mkdir build && cd build
+cmake -DCMAKE_BUILD_TYPE=Release -DWITH_ASSIMPIMPORTER=ON -DWITH_DDSIMPORTER=ON -DWITH_JPEGIMPORTER=ON -DWITH_OPENGEXIMPORTER=ON -DWITH_PNGIMPORTER=ON -DWITH_TINYGLTFIMPORTER=ON -DWITH_STBTRUETYPEFONT=ON .. # this will enable quite a few Magnum Plugins that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
+make -j
+sudo make install
+
+# installation of Magnum DART Integration (DART needs to be installed) and Eigen Integration
+cd /path/to/tmp/folder
+git clone https://github.com/mosra/magnum-integration.git
+cd magnum-integration
+mkdir build && cd build
+cmake -DCMAKE_BUILD_TYPE=Release -DWITH_DART=ON -DWITH_EIGEN=ON ..
+make -j
+sudo make install
Compilation and running the examples¶
The compilation of the library is straight-forward:
@@ -832,17 +770,7 @@ Installing the libraryIn short, with --prefix
you can change the directory where the library will be installed and if --shared
is present a shared library will be created instead of a static one.
Compiling the python bindings¶
For the python bindings of robot_dart, we need numpy
to be installed, pybind11
and the python bindings of DART (dartpy).
-For numpy
one can install it with pip
or standard packages. For pybind11
please follow the instructions on the dart website (focus on the pybind11 part, for the other parts follow our instructions above).
-For the python bindings of DART, do:
-cd dart
-
-mkdir build
-cd build
-cmake -DDART_BUILD_DARTPY=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..
-make -j4
-sudo make install
-sudo make install-dartpy # for DART >= v6.12.0, we do not need this
-
+For numpy
one can install it with pip
or standard packages. dartpy
should be installed via the packages above. If not, please see the installation instructions on the main DART website.
Then the compilation of robot_dart is almost identical as before:
- retrieve the code, for instance with
git clone https://github.com/resibots/robot_dart.git
@@ -850,7 +778,7 @@ Compiling the python bindings
- Ubuntu 20.04
+ Ubuntu >=20.04
@@ -481,7 +481,7 @@
-
- Ubuntu 20.04
+ Ubuntu >=20.04
@@ -525,22 +525,37 @@ Installation
display: none;
}
+
Scripts for Quick Installation of RobotDART¶
-In this page we provide standalone scripts for installing RobotDART for Ubuntu
(20.04) and OSX
. The scripts will install all the required dependencies and RobotDART. Notably, all dependencies that need to be compiled by source and RobotDART will be installed inside the /opt
folder. This way, one can be rest assured that their system will be clean.
-Ubuntu 20.04¶
-To quickly install RobotDART on Ubuntu 20.04
, we just need to run ./scripts/install_ubuntu.sh
from the root of the repo. In more detail:
+In this page we provide standalone scripts for installing RobotDART for Ubuntu
(>=20.04) and OSX
. The scripts will install all the required dependencies and RobotDART. Notably, all dependencies that need to be compiled by source and RobotDART will be installed inside the /opt
folder. This way, one can be rest assured that their system will be clean.
+Ubuntu >=20.04¶
+To quickly install RobotDART on Ubuntu >=20.04
, we just need to run ./scripts/install_ubuntu.sh
from the root of the repo. In more detail:
git clone https://github.com/resibots/robot_dart.git
cd robot_dart
./scripts/install_ubuntu.sh
-This will install everything needed! Once the script is successfully executed, one should add the following to their ~/.bashrc
or ~/.zshrc
file:
+This will install everything needed! Once the script is successfully executed, one should add the following to their ~/.bashrc
or ~/.zshrc
file (you may need to swap the python version to yours1):
export PATH=/opt/magnum/bin:$PATH
-export LD_LIBRARY_PATH=/opt/dart/lib:/opt/magnum/lib:/opt/robot_dart/lib:$LD_LIBRARY_PATH
-export PYTHONPATH=/opt/dart/lib/python3/dist-packages:/opt/robot_dart/lib/python3.8/site-packages:$PYTHONPATH
+export LD_LIBRARY_PATH=/opt/magnum/lib:/opt/robot_dart/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=/opt/robot_dart/lib/python3.10/site-packages:$PYTHONPATH
+
+
OSX¶
Coming soon
+
+
+
+-
+
You can run python --version
to see your version. We only keep the major.minor (ignoring the patch version) ↩
+
+
+
diff --git a/docs/search/search_index.json b/docs/search/search_index.json
index 1ae75267..5e739c46 100644
--- a/docs/search/search_index.json
+++ b/docs/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":""},{"location":"#robotdart","title":"RobotDART","text":"RobotDART is a C++11 robot simulator (with optional Python bindings) built on top of the DART physics engine. The RobotDART simulator is intended to be used by Robotics and Machine Learning researchers who want to write controllers or test learning algorithms without the delays and overhead that usually comes with other simulators (e.g., Gazebo, Coppelia-sim).
For this reason, the simulator runs headless by default, and there is the possibility of rendering the scene (e.g., through a camera sensor) without opening a graphics window. All RobotDART's code is thread-safe (including graphics and camera sensors), and thus enables its users to use their code in parallel jobs in multicore computers.
In a few words, RobotDART combines:
- a physics engine (DART)
- an optional graphics engine (Magnum)
- a few sensor classes (IMU, force/torque sensors, cameras, etc.)
- a curated URDF library
- ... and a few useful features to make the life of roboticists/researchers easier
"},{"location":"#main-features","title":"Main Features","text":" - Modern C++ code that makes it easy to develop environments and applications
- Fast and reliable simulation of robotic mechanisms and their interactions (through the DART physics engine)
- A structured
Robot
class that enables a unified creation and access to all important values: in RobotDART you can load any robot description file (URDF, SDF, SKEL, and MuJoCo files) with the same command, and all robot measurements can be queried without using any DART code - A generic
RobotControl
class that enables fast prototyping of any type of controller - A generic
Sensor
class that allows the creation of any kind of sensor - A growing list of already implemented sensors, that includes 6-axis
ForceTorque
, IMU
, RGB
, and RGB-D
sensors - A simulation class (
RobotDARTSimu
) that handles multiple robots and sensors, and allows for step-by-step simulation - A growing list of supported robots along with edited and optimized models to be used with RobotDART (see the robots page for details and examples):
- PAL Talos humanoid
- Franka Emika Panda
- KUKA LBR Iiwa (14kg version)
- IIT iCub humanoid (without hands)
- Unitree A1 quadruped robot
- Dynamixel-based 6-legged robot
- A simple arm for educational purposes
- and you can use any URDF
- A custom graphical interface built on top of Magnum that allows generic customization
- Support for windowless OpenGL context creation (even in parallel threads!) to allow for camera sensor usage even in parallel jobs running on clusters
- Support for video recording in simulation time (i.e., not affected by delays of simulator and/or graphics) for visualization or debugging purposes
- Full-featured Python bindings for fast prototyping
- RobotDART runs on any Linux distribution and Mac OS
"},{"location":"#what-robotdart-is-not","title":"What RobotDART is not","text":" - RobotDART is primarily intended to be non-interactive (run a simulation, record/view the result),
- Interaction is limited to changing the view and your own code. No GUI for adding objects or interactively build an environment,
- RobotDART is not optimized for wheeled robots,
- RobotDART is not optimized for simulating complex (e.g., mountain-like) terrains.
"},{"location":"faq/","title":"FAQ","text":""},{"location":"faq/#frequently-asked-questions","title":"Frequently Asked Questions","text":"This pages provides a user guide of the library through Frequently Asked Questions (FAQ).
"},{"location":"faq/#what-is-a-minimal-working-example-of-robotdart","title":"What is a minimal working example of RobotDART?","text":"You can find a minimal working example at hello_world.cpp. This example is creating a world where a hexapod robot is placed just above a floor and left to fall. The robot has no actuation, and there is the simplest graphics configuration. Let's split it down.
- We first include the appropriate files:
C++Python
#include <robot_dart/robot_dart_simu.hpp>\n#ifdef GRAPHIC\n#include <robot_dart/gui/magnum/graphics.hpp>\n#endif\n
import RobotDART as rd\n
- We then load our hexapod robot:
C++Python auto robot = std::make_shared<robot_dart::Robot>(\"pexod.urdf\");\n
robot = rd.Robot(\"pexod.urdf\");\n
- We need to place it above the floor to avoid collision (we can use RobotDART's helpers ;)):
C++Python robot->set_base_pose(robot_dart::make_tf({0., 0., 0.2}));\n
robot.set_base_pose([0., 0., 0., 0., 0., 0.2])\n
- We can now create the simulation object and add the robot and the floor:
C++Python robot_dart::RobotDARTSimu simu(0.001); // dt=0.001, 1KHz simulation\nsimu.add_floor();\nsimu.add_robot(robot);\n
simu = rd.RobotDARTSimu(0.001); # dt=0.001, 1KHz simulation\nsimu.add_floor();\nsimu.add_robot(robot);\n
- If needed or wanted, we can add a graphics component to visualize the scene:
C++Python auto graphics = std::make_shared<robot_dart::gui::magnum::Graphics>();\nsimu.set_graphics(graphics);\ngraphics->look_at({0.5, 3., 0.75}, {0.5, 0., 0.2});\n
graphics = rd.gui.Graphics()\nsimu.set_graphics(graphics)\ngraphics.look_at([0.5, 3., 0.75], [0.5, 0., 0.2])\n
- Once everything is configured, we can run our simulation for a few seconds:
C++Python simu.run(10.);\n
simu.run(10.)\n
- Here's how it looks:
"},{"location":"faq/#what-robots-are-supported-in-robotdart","title":"What robots are supported in RobotDART?","text":"RobotDART supports any robot that can be described by a URDF, SDF, SKEL or MJCF file. Nevertheless, we have a curated list of robots with edited and optimized models to be used with RobotDART (see the robots page for details and examples).
"},{"location":"faq/#how-can-i-load-my-own-urdfsdfskelmjcf-file","title":"How can I load my own URDF/SDF/SKEL/MJCF file?","text":"See the robots page for details.
"},{"location":"faq/#how-do-i-enable-graphics-in-my-code","title":"How do I enable graphics in my code?","text":"To enable graphics in your code, you need to do the following:
- Install Magnum. See the installation page for details.
- Create and set a graphics object in the simulation object. Here's an example:
C++Python auto graphics = std::make_shared<robot_dart::gui::magnum::Graphics>();\nsimu.set_graphics(graphics);\ngraphics->look_at({0.5, 3., 0.75}, {0.5, 0., 0.2});\n
graphics = rd.gui.Graphics()\nsimu.set_graphics(graphics)\ngraphics.look_at([0.5, 3., 0.75], [0.5, 0., 0.2])\n
"},{"location":"faq/#i-want-to-have-multiple-camera-sensors-is-it-possible","title":"I want to have multiple camera sensors. Is it possible?","text":"Having multiple camera sensors is indeed possible. We can add as many cameras as we wish along the main camera defined in How do I record a video:
C++Python // Add camera\nauto camera = std::make_shared<robot_dart::sensor::Camera>(graphics->magnum_app(), 256, 256);\n
# Add camera\ncamera = rd.sensor.Camera(graphics.magnum_app(), 32, 32)\n
"},{"location":"faq/#how-do-i-record-a-video","title":"How do I record a video?","text":"In order to record a video of what the main or any other camera \"sees\", you need to call the function record_video(path)
of the graphics class:
C++Python graphics->record_video(\"talos_dancing.mp4\");\n
graphics.record_video(\"talos_dancing.mp4\")\n
Or the camera class:
C++Python // cameras can also record video\ncamera->record_video(\"video-camera.mp4\");\n
# cameras can also record video\ncamera.record_video(\"video-camera.mp4\")\n
"},{"location":"faq/#how-can-i-position-a-camera-to-the-environment","title":"How can I position a camera to the environment?","text":"In order to position a camera inside the world, we need to use the lookAt
method of the camera/graphics object:
C++Python // set the position of the camera, and the position where the main camera is looking at\nEigen::Vector3d cam_pos = {-0.5, -3., 0.75};\nEigen::Vector3d cam_looks_at = {0.5, 0., 0.2};\ncamera->look_at(cam_pos, cam_looks_at);\n
# set the position of the camera, and the position where the main camera is looking at\ncam_pos = [-0.5, -3., 0.75]\ncam_looks_at = [0.5, 0., 0.2]\ncamera.look_at(cam_pos, cam_looks_at)\n
"},{"location":"faq/#how-can-i-attach-a-camera-to-a-moving-link","title":"How can I attach a camera to a moving link?","text":"Cameras can be easily attached to a moving link:
C++Python Eigen::Isometry3d tf;\ntf = Eigen::AngleAxisd(3.14, Eigen::Vector3d{1., 0., 0.});\ntf *= Eigen::AngleAxisd(1.57, Eigen::Vector3d{0., 0., 1.});\ntf.translation() = Eigen::Vector3d(0., 0., 0.1);\ncamera->attach_to_body(robot->body_node(\"iiwa_link_ee\"), tf); // cameras are looking towards -z by default\n
tf = dartpy.math.Isometry3()\nrot = dartpy.math.AngleAxis(3.14, [1., 0., 0.])\nrot = rot.multiply( dartpy.math.AngleAxis(1.57, [0., 0., 1.])).to_rotation_matrix()\ntf.set_translation([0., 0., 0.1])\ncamera.attach_to_body(robot.body_node(\"iiwa_link_ee\"), tf) # cameras are looking towards -z by default\n
"},{"location":"faq/#how-can-i-manipulate-the-camera-object","title":"How can I manipulate the camera object?","text":"Every camera has its own parameters, i.e a Near plane, a far plane, a Field Of View (FOV), a width and a height (that define the aspect ratio), you can manipulate each one separately:
C++Python camera->camera().set_far_plane(5.f);\ncamera->camera().set_near_plane(0.01f);\ncamera->camera().set_fov(60.0f);\n
camera.camera().set_far_plane(5.)\ncamera.camera().set_near_plane(0.01)\ncamera.camera().set_fov(60.0)\n
or all at once:
C++Python camera->camera().set_camera_params(5., // far plane\n0.01f, // near plane\n60.0f, // field of view\n600, // width\n400 // height\n);\n
camera.camera().set_camera_params(5., #far plane\n0.01, #near plane\n60.0, # field of view\n600, # width\n400) #height\n
You can find a complete example at cameras.cpp.
"},{"location":"faq/#how-can-i-interact-with-the-camera","title":"How can I interact with the camera?","text":"We can move translate the cameras with the WASD
keys, zoom in and out using the mouse wheel
and rotate the camera with holding the left mouse key
and moving the mouse.
"},{"location":"faq/#what-do-the-numbers-in-the-status-bar-mean","title":"What do the numbers in the status bar mean?","text":"The status bar looks like this:
Where simulation time gives us the total simulated time (in seconds), wall time gives us the total time (in seconds) that has passed in real-time once we have started simulating. The next number X.Xx gives us the real-time factor: for example, 1.1x means that the simulation runs 1.1 times faster than real-time, whereas 0.7x means that the simulation runs slower than real-time. The value it: XX ms reports the time it took the last iteration (in milliseconds). The last part gives us whether the simulation tries to adhere to real-time or not. sync means that RobotDART will slow down the simulation in order for it to be in real-time, whereas no-sync means that RobotDART will try to run the simulation as fast as possible.
"},{"location":"faq/#how-can-i-alter-the-graphics-scene-eg-change-lighting-conditions","title":"How can I alter the graphics scene (e.g., change lighting conditions)?","text":"When creating a graphics object, you can pass a GraphicsConfiguration
object that changes the default values:
C++Python robot_dart::gui::magnum::GraphicsConfiguration configuration;\n// We can change the width/height of the window (or camera image dimensions)\nconfiguration.width = 1280;\nconfiguration.height = 960;\nconfiguration.title = \"Graphics Tutorial\"; // We can set a title for our window\n// We can change the configuration for shadows\nconfiguration.shadowed = true;\nconfiguration.transparent_shadows = true;\nconfiguration.shadow_map_size = 1024;\n// We can also alter some specifications for the lighting\nconfiguration.max_lights = 3; // maximum number of lights for our scene [default=3]\nconfiguration.specular_strength = 0.25; // strength of the specular component\n// Some extra configuration for the main camera\nconfiguration.draw_main_camera = true;\nconfiguration.draw_debug = true;\nconfiguration.draw_text = true;\n// We can also change the background color [default=black]\nconfiguration.bg_color = Eigen::Vector4d{1.0, 1.0, 1.0, 1.0};\n// Create the graphics object with the configuration as parameter\nauto graphics = std::make_shared<robot_dart::gui::magnum::Graphics>(configuration);\n
configuration = rd.gui.GraphicsConfiguration()\n# We can change the width/height of the window (or camera, dimensions)\nconfiguration.width = 1280\nconfiguration.height = 960\nconfiguration.title = \"Graphics Tutorial\" # We can set a title for our window\n# We can change the configuration for shadows\nconfiguration.shadowed = True\nconfiguration.transparent_shadows = True\nconfiguration.shadow_map_size = 1024\n# We can also alter some specifications for the lighting\nconfiguration.max_lights = 3 # maximum number of lights for our scene\nconfiguration.specular_strength = 0.25 # strength og the specular component\n# Some extra configuration for the main camera\nconfiguration.draw_main_camera = True\nconfiguration.draw_debug = True\nconfiguration.draw_text = True\n# We can also change the background color [default = black]\nconfiguration.bg_color = [1., 1., 1., 1.]\n# create the graphics object with the configuration as a parameter\ngraphics = rd.gui.Graphics(configuration)\n
You can disable or enable shadows on the fly as well:
C++Python // Disable shadows\ngraphics->enable_shadows(false, false);\nsimu.run(1.);\n// Enable shadows only for non-transparent objects\ngraphics->enable_shadows(true, false);\nsimu.run(1.);\n// Enable shadows for transparent objects as well\ngraphics->enable_shadows(true, true);\nsimu.run(1.);\n
# Disable shadows\ngraphics.enable_shadows(False, False)\nsimu.run(1.)\n# Enable shadows only for non-transparent objects\ngraphics.enable_shadows(True, False)\nsimu.run(1.)\n# Enable shadows for transparent objects as well\ngraphics.enable_shadows(True, True)\nsimu.run(1.)\n
You can also add your own lights. The application by default creates 2 light sources and the maximum number of lights is 3 (you can change this once before the creation of the graphics object via the GraphicsConfiguration
object). So usually before you add your lights, you have to clear the default lights:
C++Python // Clear Lights\ngraphics->clear_lights();\n
# Clear Lights\ngraphics.clear_lights()\n
Then you must create a custom light material:
C++Python // Create Light material\nMagnum::Float shininess = 1000.f;\nMagnum::Color4 white = {1.f, 1.f, 1.f, 1.f};\n// ambient, diffuse, specular\nauto custom_material = robot_dart::gui::magnum::gs::Material(white, white, white, shininess);\n
# Clear Light material\nshininess = 1000.\nwhite = magnum.Color4(1., 1., 1., 1.)\n# ambient, diffuse, specular\ncustom_material = rd.gui.Material(white, white, white, shininess)\n
Now you can add on ore more of the following lights:
Point Light:
C++Python // Create point light\nMagnum::Vector3 position = {0.f, 0.f, 2.f};\nMagnum::Float intensity = 1.f;\nMagnum::Vector3 attenuation_terms = {1.f, 0.f, 0.f};\nauto point_light = robot_dart::gui::magnum::gs::create_point_light(position, custom_material, intensity, attenuation_terms);\ngraphics->add_light(point_light);\n
# Create point light\nposition = magnum.Vector3(0., 0., 2.)\nintensity = 1.\nattenuation_terms = magnum.Vector3(1., 0., 0.)\npoint_light = rd.gui.create_point_light(position, custom_material, intensity, attenuation_terms)\ngraphics.add_light(point_light)\n
Spot Light:
C++Python // Create spot light\nMagnum::Vector3 position = {0.f, 0.f, 1.f};\nMagnum::Vector3 direction = {-1.f, -1.f, -1.f};\nMagnum::Float intensity = 1.f;\nMagnum::Vector3 attenuation_terms = {1.f, 0.f, 0.f};\nMagnum::Float spot_exponent = M_PI;\nMagnum::Float spot_cut_off = M_PI / 8;\nauto spot_light = robot_dart::gui::magnum::gs::create_spot_light(position, custom_material, direction, spot_exponent, spot_cut_off, intensity, attenuation_terms);\ngraphics->add_light(spot_light);\n
# Create spot light\nposition = magnum.Vector3(0., 0., 1.)\ndirection = magnum.Vector3(-1, -1, -1)\nintensity = 1.\nattenuation_terms = magnum.Vector3(1., 0., 0.)\nspot_exponent = np.pi\nspot_cut_off = np.pi / 8\nspot_light = rd.gui.create_spot_light(position, custom_material, direction, spot_exponent, spot_cut_off, intensity, attenuation_terms)\ngraphics.add_light(spot_light)\n
Directional Light:
C++Python // Create directional light\nMagnum::Vector3 direction = {-1.f, -1.f, -1.f};\nauto directional_light = robot_dart::gui::magnum::gs::create_directional_light(direction, custom_material);\ngraphics->add_light(directional_light);\n
# Create directional light\ndirection = magnum.Vector3(-1, -1, -1)\ndirectional_light = rd.gui.create_directional_light(direction, custom_material)\ngraphics.add_light(directional_light)\n
"},{"location":"faq/#i-want-to-visualize-a-target-configuration-of-my-robot-is-this-possible","title":"I want to visualize a target configuration of my robot, is this possible?","text":"Yes this is possible. RobotDART gives the ability to create a clone of your robot that has no physics, no contacts, just visuals:
C++Python // Add a ghost robot; only visuals, no dynamics, no collision\nauto ghost = robot->clone_ghost();\nsimu.add_robot(ghost);\n
# Add a ghost robot; only visuals, no dynamics, no collision\nghost = robot.clone_ghost()\nsimu.add_robot(ghost)\n
"},{"location":"faq/#how-can-i-control-my-robot","title":"How can I control my robot ?","text":"PD control
C++Python // add a PD-controller to the arm\n// set desired positions\nEigen::VectorXd ctrl = robot_dart::make_vector({0., M_PI / 4., 0., -M_PI / 4., 0., M_PI / 2., 0., 0.});\n// add the controller to the robot\nauto controller = std::make_shared<robot_dart::control::PDControl>(ctrl);\nrobot->add_controller(controller);\ncontroller->set_pd(300., 50.);\n
# add a PD-controller to the arm\n# set desired positions\nctrl = [0., np.pi / 4., 0., -np.pi / 4., 0., np.pi / 2., 0., 0.]\n# add the controller to the robot\ncontroller = rd.PDControl(ctrl)\nrobot.add_controller(controller)\ncontroller.set_pd(300., 50.)\n
Simple control
C++Python auto controller1 = std::make_shared<robot_dart::control::SimpleControl>(ctrl);\nrobot->add_controller(controller1);\n
controller1 = rd.SimpleControl(ctrl)\nrobot.add_controller(controller1)\n
Robot control
C++Python class MyController : public robot_dart::control::RobotControl {\npublic:\nMyController() : robot_dart::control::RobotControl() {}\nMyController(const Eigen::VectorXd& ctrl, bool full_control) : robot_dart::control::RobotControl(ctrl, full_control) {}\nMyController(const Eigen::VectorXd& ctrl, const std::vector<std::string>& dof_names) : robot_dart::control::RobotControl(ctrl, dof_names) {}\nvoid configure() override\n{\n_active = true;\n}\nEigen::VectorXd calculate(double) override\n{\nauto robot = _robot.lock();\nEigen::VectorXd cmd = 100. * (_ctrl - robot->positions(_controllable_dofs));\nreturn cmd;\n}\nstd::shared_ptr<robot_dart::control::RobotControl> clone() const override\n{\nreturn std::make_shared<MyController>(*this);\n}\n};\n
class MyController(rd.RobotControl):\ndef __init__(self, ctrl, full_control):\nrd.RobotControl.__init__(self, ctrl, full_control)\ndef __init__(self, ctrl, controllable_dofs):\nrd.RobotControl.__init__(self, ctrl, controllable_dofs)\ndef configure(self):\nself._active = True\ndef calculate(self, t):\ntarget = np.array([self._ctrl])\ncmd = 100*(target-self.robot().positions(self._controllable_dofs))\nreturn cmd[0]\n# TO-DO: This is NOT working at the moment!\ndef clone(self):\nreturn MyController(self._ctrl, self._controllable_dofs)\n
"},{"location":"faq/#is-there-a-way-to-control-the-simulation-timestep","title":"Is there a way to control the simulation timestep?","text":"When creating a RobotDARTSimu object you choose the simulation timestep:
C++Python // choose time step of 0.001 seconds\nrobot_dart::RobotDARTSimu simu(0.001);\n
# choose time step of 0.001 seconds\nsimu = rd.RobotDARTSimu(0.001)\n
which can later be modified by:
C++Python // set timestep to 0.005 and update control frequency(bool)\nsimu.set_timestep(0.005, true);\n
# set timestep to 0.005 and update control frequency(bool)\nsimu.set_timestep(0.005, True)\n
"},{"location":"faq/#i-want-to-simulate-a-mars-environment-is-it-possible-to-change-the-gravitational-force-of-the-simulation-environment","title":"I want to simulate a mars environment, is it possible to change the gravitational force of the simulation environment?","text":"Yes you can modify the gravitational forces 3-dimensional vector of the simulation:
C++Python // Set gravitational force of mars\nEigen::Vector3d mars_gravity = {0., 0., -3.721};\nsimu.set_gravity(mars_gravity);\n
# set gravitational force of mars\nmars_gravity = [0., 0., -3.721]\nsimu.set_gravity(mars_gravity)\n
"},{"location":"faq/#which-collision-detectors-are-available-what-are-their-differences-how-can-i-choose-between-them","title":"Which collision detectors are available? What are their differences? How can I choose between them?","text":"DART FCL ODE Bullet Support only basic shapes Full-featured collision detector fully integrated by DART External collision detector of ODE External collision detector of Bullet This is building along with DART This is a required dependency of DART Needs an external library Needs an external library Very fast for small scenes Accurate detailed collisions, but not very fast Fast collision detection (the integration is not complete) Fast and accurate collision detection (works well for wheels as well) We can easily select one of the available collision detectors using the simulator object:
C++Python simu.set_collision_detector(\"fcl\"); // collision_detector can be \"dart\", \"fcl\", \"ode\" or \"bullet\" (case does not matter)\n
simu.set_collision_detector(\"fcl\") # collision_detector can be \"dart\", \"fcl\", \"ode\" or \"bullet\" (case does not matter)\n
"},{"location":"faq/#my-robot-does-not-self-collide-how-can-i-change-this","title":"My robot does not self-collide. How can I change this?","text":"One possible cause may be the fact that self collision is disabled, you can check and change this:
C++Python if (!robot->self_colliding()) {\nstd::cout << \"Self collision is not enabled\" << std::endl;\n// set self collisions to true and adjacent collisions to false\nrobot->set_self_collision(true, false);\n}\n
if(not robot.self_colliding()):\nprint(\"Self collision is not enabled\")\n# set self collisions to true and adjacent collisions to false\nrobot.set_self_collision(True, False)\n
"},{"location":"faq/#how-can-i-compute-kinematicdynamic-properties-of-my-robot-eg-jacobians-mass-matrix","title":"How can I compute kinematic/dynamic properties of my robot (e.g., Jacobians, Mass Matrix)?","text":"Kinematic Properties:
C++Python // Get Joint Positions(Angles)\nauto joint_positions = robot->positions();\n// Get Joint Velocities\nauto joint_vels = robot->velocities();\n// Get Joint Accelerations\nauto joint_accs = robot->accelerations();\n// Get link_name(str) Transformation matrix with respect to the world frame.\nauto eef_tf = robot->body_pose(link_name);\n// Get translation vector from transformation matrix\nauto eef_pos = eef_tf.translation();\n// Get rotation matrix from tranformation matrix\nauto eef_rot = eef_tf.rotation();\n// Get link_name 6d pose vector [logmap(eef_tf.linear()), eef_tf.translation()]\nauto eef_pose_vec = robot->body_pose_vec(link_name);\n// Get link_name 6d velocity vector [angular, cartesian]\nauto eef_vel = robot->body_velocity(link_name);\n// Get link_name 6d acceleration vector [angular, cartesian]\nauto eef_acc = robot->body_acceleration(link_name);\n// Jacobian targeting the origin of link_name(str)\nauto jacobian = robot->jacobian(link_name);\n// Jacobian time derivative\nauto jacobian_deriv = robot->jacobian_deriv(link_name);\n// Center of Mass Jacobian\nauto com_jacobian = robot->com_jacobian();\n// Center of Mass Jacobian Time Derivative\nauto com_jacobian_deriv = robot->com_jacobian_deriv();\n
# Get Joint Positions(Angles)\njoint_positions = robot.positions()\n# Get Joint Velocities\njoint_vels = robot.velocities()\n# Get Joint Accelerations\njoint_accs = robot.accelerations()\n# Get link_name(str) Transformation matrix with respect to the world frame.\neef_tf = robot.body_pose(link_name)\n# Get translation vector from transformation matrix\neef_pos = eef_tf.translation()\n# Get rotation matrix from tranformation matrix\neef_rot = eef_tf.rotation()\n# Get link_name 6d pose vector [logmap(eef_tf.linear()), eef_tf.translation()]\neef_pose_vec = robot.body_pose_vec(link_name)\n# Get link_name 6d velocity vector [angular, cartesian]\neef_vel = robot.body_velocity(link_name)\n# Get link_name 6d acceleration vector [angular, cartesian]\neef_acc = robot.body_acceleration(link_name)\n# Jacobian targeting the origin of link_name(str)\njacobian = robot.jacobian(link_name)\n# Jacobian time derivative\njacobian_deriv = robot.jacobian_deriv(link_name)\n# Center of Mass Jacobian\ncom_jacobian = robot.com_jacobian()\n# Center of Mass Jacobian Time Derivative\ncom_jacobian_deriv = robot.com_jacobian_deriv()\n
Dynamic Properties:
C++Python // Get Joint Forces\nauto joint_forces = robot->forces();\n// Get link's mass\nauto eef_mass = robot->body_mass(link_name);\n// Mass Matrix of robot\nauto mass_matrix = robot->mass_matrix();\n// Inverse of Mass Matrix\nauto inv_mass_matrix = robot->inv_mass_matrix();\n// Augmented Mass matrix\nauto aug_mass_matrix = robot->aug_mass_matrix();\n// Inverse of Augmented Mass matrix\nauto inv_aug_mass_matrix = robot->inv_aug_mass_matrix();\n// Coriolis Force vector\nauto coriolis = robot->coriolis_forces();\n// Gravity Force vector\nauto gravity = robot->gravity_forces();\n// Combined vector of Coriolis Force and Gravity Force\nauto coriolis_gravity = robot->coriolis_gravity_forces();\n// Constraint Force Vector\nauto constraint_forces = robot->constraint_forces();\n
# Get Joint Forces\njoint_forces = robot.forces()\n# Get link's mass\neef_mass = robot.body_mass(link_name)\n# Mass Matrix of robot\nmass_matrix = robot.mass_matrix()\n# Inverse of Mass Matrix\ninv_mass_matrix = robot.inv_mass_matrix()\n# Augmented Mass matrix\naug_mass_matrix = robot.aug_mass_matrix()\n# Inverse of Augmented Mass matrix\ninv_aug_mass_matrix = robot.inv_aug_mass_matrix()\n# Coriolis Force vector\ncoriolis = robot.coriolis_forces()\n# Gravity Force vector\ngravity = robot.gravity_forces()\n# Combined vector of Coriolis Force and Gravity Force\ncoriolis_gravity = robot.coriolis_gravity_forces()\n# NOT IMPLEMENTED\n# # Constraint Force Vector\n# constraint_forces = robot.constraint_forces()\n
"},{"location":"faq/#is-there-a-way-to-change-the-joint-properties-eg-actuation-friction","title":"Is there a way to change the joint properties (e.g., actuation, friction)?","text":"There are 6 types of actuators available, you can set the same actuator to multiple joints at once, or you can set each sensor separately:
C++Python // Set all DoFs to same actuator\nrobot->set_actuator_types(\"servo\"); // actuator types can be \"servo\", \"torque\", \"velocity\", \"passive\", \"locked\", \"mimic\"\n// You can also set actuator types separately\nrobot->set_actuator_type(\"torque\", \"iiwa_joint_5\");\n
# Set all DoFs to same actuator\n# actuator types can be \"servo\", \"torque\", \"velocity\", \"passive\", \"locked\", \"mimic\"\nrobot.set_actuator_types(\"servo\")\n# You can also set actuator types separately\nrobot.set_actuator_type(\"torque\", \"iiwa_joint_5\")\n
To enable position and velocity limits for the actuators:
C++Python // \u0395nforce joint position and velocity limits\nrobot->set_position_enforced(true);\n
# \u0395nforce joint position and velocity limits\nrobot.set_position_enforced(True)\n
Every DOF's limits (position, velocity, acceleration, force) can be modified:
C++Python // Modify Position Limits\nEigen::VectorXd pos_upper_lims(7);\npos_upper_lims << 2.096, 2.096, 2.096, 2.096, 2.096, 2.096, 2.096;\nrobot->set_position_upper_limits(pos_upper_lims, dof_names);\nrobot->set_position_lower_limits(-pos_upper_lims, dof_names);\n// Modify Velocity Limits\nEigen::VectorXd vel_upper_lims(7);\nvel_upper_lims << 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5;\nrobot->set_velocity_upper_limits(vel_upper_lims, dof_names);\nrobot->set_velocity_lower_limits(-vel_upper_lims, dof_names);\n// Modify Force Limits\nEigen::VectorXd force_upper_lims(7);\nforce_upper_lims << 150, 150, 150, 150, 150, 150, 150;\nrobot->set_force_upper_limits(force_upper_lims, dof_names);\nrobot->set_force_lower_limits(-force_upper_lims, dof_names);\n// Modify Acceleration Limits\nEigen::VectorXd acc_upper_lims(7);\nacc_upper_lims << 1500, 1500, 1500, 1500, 1500, 1500, 1500;\nrobot->set_acceleration_upper_limits(acc_upper_lims, dof_names);\nrobot->set_acceleration_lower_limits(-acc_upper_lims, dof_names);\n
# Modify Position Limits\npos_upper_lims = np.array([2.096, 2.096, 2.096, 2.096, 2.096, 2.096, 2.096])\nrobot.set_position_upper_limits(pos_upper_lims, dof_names)\nrobot.set_position_lower_limits(-1*pos_upper_lims, dof_names)\n# Modify Velocity Limits\nvel_upper_lims = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5])\nrobot.set_velocity_upper_limits(vel_upper_lims, dof_names)\nrobot.set_velocity_lower_limits(-1*vel_upper_lims, dof_names)\n# Modify Force Limits\nforce_upper_lims = np.array([150, 150, 150, 150, 150, 150, 150])\nrobot.set_force_upper_limits(force_upper_lims, dof_names)\nrobot.set_force_lower_limits(-1*force_upper_lims, dof_names)\n# Modify Acceleration Limits\nacc_upper_lims = np.array([1500, 1500, 1500, 1500, 1500, 1500, 1500])\nrobot.set_acceleration_upper_limits(acc_upper_lims, dof_names)\nrobot.set_acceleration_lower_limits(-1*acc_upper_lims, dof_names)\n
You can also modify the damping coefficients, coulomb frictions and spring stiffness of every DOF:
C++Python // Modify Damping Coefficients\nstd::vector<double> damps = {0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4};\nrobot->set_damping_coeffs(damps, dof_names);\n// Modify Coulomb Frictions\nstd::vector<double> cfrictions = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001};\nrobot->set_coulomb_coeffs(cfrictions, dof_names);\n// Modify Spring Stiffness\nstd::vector<double> stiffnesses = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001};\nrobot->set_spring_stiffnesses(stiffnesses, dof_names);\n
# Modify Damping Coefficients\ndamps = [0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4]\nrobot.set_damping_coeffs(damps, dof_names)\n# Modify Coulomb Frictions\ncfrictions = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]\nrobot.set_coulomb_coeffs(cfrictions, dof_names)\n# Modify Spring Stiffness\nstiffnesses = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]\nrobot.set_spring_stiffnesses(stiffnesses, dof_names)\n
"},{"location":"faq/#what-are-the-supported-sensors-how-can-i-use-an-imu","title":"What are the supported sensors? How can I use an IMU?","text":"Sensors in RobotDART can be added only through the simulator object. All of the sensors can be added without being attached to any body or joint but some of them can operate only when attached to something (e.g. ForceTorque
sensors).
"},{"location":"faq/#torque-sensor","title":"Torque sensor","text":"Torque sensors can be added to every joint of the robot:
C++Python // Add torque sensors to the robot\nint ct = 0;\nstd::vector<std::shared_ptr<robot_dart::sensor::Torque>> tq_sensors(robot->num_dofs());\nfor (const auto& joint : robot->dof_names())\ntq_sensors[ct++] = simu.add_sensor<robot_dart::sensor::Torque>(robot, joint, 1000);\n
# Add torque sensors to the robot\ntq_sensors = np.empty(robot.num_dofs(), dtype=rd.sensor.Torque)\nct = 0\nfor joint in robot.dof_names():\nsimu.add_sensor(rd.sensor.Torque(robot, joint, 1000))\ntq_sensors[ct] = simu.sensors()[-1]\nct += 1\n
Torque sensors measure the torque \\(\\tau \\in \\rm I\\!R^n\\) of the attached joint (where \\(n\\) is the DOFs of the joint):
C++Python // vector that contains the torque measurement for every joint (scalar)\nEigen::VectorXd torques_measure(robot->num_dofs());\nfor (const auto& tq_sens : tq_sensors)\ntorques_measure.block<1, 1>(ct++, 0) = tq_sens->torques();\n
# vector that contains the torque measurement for every joint (scalar)\ntorques_measure = np.empty(robot.num_dofs())\nct = 0\nfor tq_sens in tq_sensors:\ntorques_measure[ct] = tq_sens.torques()\nct += 1\n
"},{"location":"faq/#force-torque-sensor","title":"Force-Torque sensor","text":"Force-Torque sensors can be added to every joint of the robot:
C++Python // Add force-torque sensors to the robot\nct = 0;\nstd::vector<std::shared_ptr<robot_dart::sensor::ForceTorque>> f_tq_sensors(robot->num_dofs());\nfor (const auto& joint : robot->dof_names())\nf_tq_sensors[ct++] = simu.add_sensor<robot_dart::sensor::ForceTorque>(robot, joint, 1000, \"parent_to_child\");\n
# Add force-torque sensors to the robot\nf_tq_sensors = np.empty(robot.num_dofs(), dtype=rd.sensor.ForceTorque)\nct = 0\nfor joint in robot.dof_names():\nsimu.add_sensor(rd.sensor.ForceTorque(\nrobot, joint, 1000, \"parent_to_child\"))\nf_tq_sensors[ct] = simu.sensors()[-1]\nprint(f_tq_sensors)\nct += 1\n
Torque sensors measure the force \\(\\boldsymbol{F} \\in \\rm I\\!R^3\\), the torque \\(\\boldsymbol{\\tau} \\in \\rm I\\!R^3\\) and the wrench \\(\\boldsymbol{\\mathcal{F}} =\\begin{bmatrix} \\tau, F\\end{bmatrix}\\in \\rm I\\!R^6\\) of the attached joint:
C++Python // matrix that contains the torque measurement for every joint (3d vector)\nEigen::MatrixXd ft_torques_measure(robot->num_dofs(), 3);\n// matrix that contains the force measurement for every joint (3d vector)\nEigen::MatrixXd ft_forces_measure(robot->num_dofs(), 3);\n// matrix that contains the wrench measurement for every joint (6d vector)[torque, force]\nEigen::MatrixXd ft_wrench_measure(robot->num_dofs(), 6);\nct = 0;\nfor (const auto& f_tq_sens : f_tq_sensors) {\nft_torques_measure.block<1, 3>(ct, 0) = f_tq_sens->torque();\nft_forces_measure.block<1, 3>(ct, 0) = f_tq_sens->force();\nft_wrench_measure.block<1, 6>(ct, 0) = f_tq_sens->wrench();\nct++;\n}\n
# matrix that contains the torque measurement for every joint (3d vector)\nft_torques_measure = np.empty([robot.num_dofs(), 3])\n# matrix that contains the force measurement for every joint (3d vector)\nft_forces_measure = np.empty([robot.num_dofs(), 3])\n# matrix that contains the wrench measurement for every joint (6d vector)[torque, force]\nft_wrench_measure = np.empty([robot.num_dofs(), 6])\nct = 0\nfor f_tq_sens in f_tq_sensors:\nft_torques_measure[ct, :] = f_tq_sens.torque()\nft_forces_measure[ct, :] = f_tq_sens.force()\nft_wrench_measure[ct, :] = f_tq_sens.wrench()\nct += 1\n
"},{"location":"faq/#imu-sensor","title":"IMU sensor","text":"IMU sensors can be added to every link of the robot:
C++Python // Add IMU sensors to the robot\nct = 0;\nstd::vector<std::shared_ptr<robot_dart::sensor::IMU>> imu_sensors(robot->num_bodies());\nfor (const auto& body_node : robot->body_names()) {\n// _imu(std::make_shared<sensor::IMU>(sensor::IMUConfig(body_node(\"head\"), frequency))),\nimu_sensors[ct++] = simu.add_sensor<robot_dart::sensor::IMU>(robot_dart::sensor::IMUConfig(robot->body_node(body_node), 1000));\n}\n
# Add IMU sensors to the robot\nct = 0\nimu_sensors = np.empty(robot.num_bodies(), dtype=rd.sensor.IMU)\nfor body_node in robot.body_names():\nsimu.add_sensor(rd.sensor.IMU(\nrd.sensor.IMUConfig(robot.body_node(body_node), 1000)))\nimu_sensors[ct] = simu.sensors()[-1]\nct += 1\n
IMU sensors measure the angular position vector \\(\\boldsymbol{\\theta} \\in \\rm I\\!R^3\\), the angular velocity \\(\\boldsymbol{\\omega} \\in \\rm I\\!R^3\\) and the linear acceleration \\(\\boldsymbol{\\alpha} \\in \\rm I\\!R^3\\) of the attached link:
C++Python Eigen::MatrixXd imu_angular_positions_measure(robot->num_bodies(), 3);\nEigen::MatrixXd imu_angular_velocities_measure(robot->num_bodies(), 3);\nEigen::MatrixXd imu_linear_acceleration_measure(robot->num_bodies(), 3);\nct = 0;\nfor (const auto& imu_sens : imu_sensors) {\nimu_angular_positions_measure.block<1, 3>(ct, 0) = imu_sens->angular_position_vec();\nimu_angular_velocities_measure.block<1, 3>(ct, 0) = imu_sens->angular_velocity();\nimu_linear_acceleration_measure.block<1, 3>(ct, 0) = imu_sens->linear_acceleration();\nct++;\n}\n
imu_angular_positions_measure = np.empty([robot.num_bodies(), 3])\nimu_angular_velocities_measure = np.empty([robot.num_bodies(), 3])\nimu_linear_acceleration_measure = np.empty([robot.num_bodies(), 3])\nct = 0\nfor imu_sens in imu_sensors:\nimu_angular_positions_measure[ct,:] = imu_sens.angular_position_vec()\nimu_angular_velocities_measure[ct, :] = imu_sens.angular_velocity()\nimu_linear_acceleration_measure[ct,:] = imu_sens.linear_acceleration()\nct += 1\n
"},{"location":"faq/#rgb-sensor","title":"RGB sensor","text":"Any camera can be used as an RGB sensor:
C++Python // a nested std::vector (w*h*3) of the last image taken can be retrieved\nauto rgb_image = camera->image();\n
# a nested array (w*h*3) of the last image taken can be retrieved\nrgb_image = camera.image()\n
We can easily save the image and/or transform it to grayscale:
C++Python // a nested std::vector (w*h*3) of the last image taken can be retrieved\nauto rgb_image = camera->image();\n
# a nested array (w*h*3) of the last image taken can be retrieved\nrgb_image = camera.image()\n
"},{"location":"faq/#rgb_d-sensor","title":"RGB_D sensor","text":"Any camera can also be configured to also record depth:
C++Python camera->camera().record(true, true); // cameras are recording color images by default, enable depth images as well for this example\n
# cameras are recording color images by default, enable depth images as well for this example\ncamera.camera().record(True, True)\n
We can then read the RGB and depth images:
C++Python // get the depth image from a camera\n// with a version for visualization or bigger differences in the output\nauto rgb_d_image = camera->depth_image();\n// and the raw values that can be used along with the camera parameters to transform the image to point-cloud\nauto rgb_d_image_raw = camera->raw_depth_image();\n
# get the depth image from a camera\n# with a version for visualization or bigger differences in the output\nrgb_d_image = camera.depth_image()\n# and the raw values that can be used along with the camera parameters to transform the image to point-cloud\nrgb_d_image_raw = camera.raw_depth_image()\n
We can save the depth images as well:
C++Python robot_dart::gui::save_png_image(\"camera-depth.png\", rgb_d_image);\nrobot_dart::gui::save_png_image(\"camera-depth-raw.png\", rgb_d_image_raw);\n
rd.gui.save_png_image(\"camera-depth.png\", rgb_d_image)\nrd.gui.save_png_image(\"camera-depth-raw.png\", rgb_d_image_raw)\n
"},{"location":"faq/#how-can-i-spawn-multiple-robots-in-parallel","title":"How can I spawn multiple robots in parallel?","text":""},{"location":"faq/#robot-pool-only-in-c","title":"Robot Pool (only in C++)","text":"The best way to do so is to create a Robot pool. With a robot pool you:
- Minimize the overhead of loading robots (it happens only once!) or cloning robots (it never happens)
- Make sure that your robots are \"clean\" once released from each thread
- Focus on the important stuff rather than handling robots and threads
Let's see a more practical example:
- First we need to include the proper header:
C++ #include <robot_dart/robot_pool.hpp>\n
- Then we create a
creator
function and the pool object:
C++ namespace pool {\n// This function should load one robot: here we load Talos\ninline std::shared_ptr<robot_dart::Robot> robot_creator()\n{\nreturn std::make_shared<robot_dart::robots::Talos>();\n}\n// To create the object we need to pass the robot_creator function and the number of maximum parallel threads\nrobot_dart::RobotPool robot_pool(robot_creator, NUM_THREADS);\n} // namespace pool\n
The creator
function is the function responsible for loading your robot. This should basically look like a standalone code to load or create a robot.
- Next, we create a few threads that utilize the robots (in your code you might be using OpenMP or TBB):
C++ // for the example, we run NUM_THREADS threads of eval_robot()\nstd::vector<std::thread> threads(NUM_THREADS * 2); // *2 to see some reuse\nfor (size_t i = 0; i < threads.size(); ++i)\nthreads[i] = std::thread(eval_robot, i); // eval_robot is the function that uses the robot\n
- An example evaluation function:
C++ inline void eval_robot(int i)\n{\n// We get one available robot\nauto robot = pool::robot_pool.get_robot();\nstd::cout << \"Robot \" << i << \" got [\" << robot->skeleton() << \"]\" << std::endl;\n/// --- some robot_dart code ---\nsimulate_robot(robot);\n// --- do something with the result\nstd::cout << \"End of simulation \" << i << std::endl;\n// CRITICAL : free your robot !\npool::robot_pool.free_robot(robot);\nstd::cout << \"Robot \" << i << \" freed!\" << std::endl;\n}\n
"},{"location":"faq/#i-need-to-simulate-many-worlds-with-camera-sensors-in-parallel-how-can-i-do-this","title":"I need to simulate many worlds with camera sensors in parallel. How can I do this?","text":"On magnum_contexts.cpp you can find an example showcasing the use of many worlds with camera sensors in parallel. The main takeaway is that we need to pre-allocate OpenGL contexts so that each thread can take one and use it to render their worlds.
C++ // Load robot from URDF\nauto global_robot = std::make_shared<robot_dart::robots::Iiwa>();\nstd::vector<std::thread> workers;\n// Set maximum number of parallel GL contexts (this is GPU-dependent)\nrobot_dart::gui::magnum::GlobalData::instance()->set_max_contexts(4);\n// We want 15 parallel simulations with different GL context each\nsize_t N_workers = 15;\nstd::mutex mutex;\nsize_t id = 0;\n// Launch the workers\nfor (size_t i = 0; i < N_workers; i++) {\nworkers.push_back(std::thread([&] {\nmutex.lock();\nsize_t index = id++;\nmutex.unlock();\n// Get the GL context -- this is a blocking call\n// will wait until one GL context is available\n// get_gl_context(gl_context); // this call will not sleep between failed queries\nget_gl_context_with_sleep(gl_context, 20); // this call will sleep 20ms between each failed query\n// Do the simulation\nauto robot = global_robot->clone();\nrobot_dart::RobotDARTSimu simu(0.001);\nEigen::VectorXd ctrl = robot_dart::make_vector({0., M_PI / 3., 0., -M_PI / 4., 0., 0., 0.});\nauto controller = std::make_shared<robot_dart::control::PDControl>(ctrl);\nrobot->add_controller(controller);\ncontroller->set_pd(300., 50.);\n// Magnum graphics\nrobot_dart::gui::magnum::GraphicsConfiguration configuration = robot_dart::gui::magnum::WindowlessGraphics::default_configuration();\nconfiguration.width = 1024;\nconfiguration.height = 768;\nauto graphics = std::make_shared<robot_dart::gui::magnum::WindowlessGraphics>(configuration);\nsimu.set_graphics(graphics);\n// Position the camera differently for each thread to visualize the difference\ngraphics->look_at({0.4 * index, 3.5 - index * 0.1, 2.}, {0., 0., 0.25});\n// record images from main camera/graphics\n// graphics->set_recording(true); // WindowlessGLApplication records images by default\nsimu.add_robot(robot);\nsimu.run(6);\n// Save the image for verification\nrobot_dart::gui::save_png_image(\"camera_\" + std::to_string(index) + \".png\",\ngraphics->image());\n// Release the GL context for another thread to use\nrelease_gl_context(gl_context);\n}));\n}\n// Wait for all the workers\nfor (size_t i = 0; i < workers.size(); i++) {\nworkers[i].join();\n}\n
"},{"location":"faq/#i-do-not-know-how-to-use-waf-how-can-i-detect-robotdart-from-cmake","title":"I do not know how to use waf. How can I detect RobotDART from CMake?","text":"You need to use waf
to build RobotDART, but when installing the library a CMake module is installed. Thus it is possible use RobotDART in your code using CMake. You can find a complete example at cmake/example. In short the CMake would look like this:
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)\nproject(robot_dart_example)\n# we ask for Magnum because we want to build the graphics\nfind_package(RobotDART REQUIRED OPTIONAL_COMPONENTS Magnum)\nadd_executable(robot_dart_example example.cpp)\ntarget_link_libraries(robot_dart_example\nRobotDART::Simu\n)\nif(RobotDART_Magnum_FOUND)\nadd_executable(robot_dart_example_graphics example.cpp)\ntarget_link_libraries(robot_dart_example_graphics\nRobotDART::Simu\nRobotDART::Magnum\n)\nendif()\n
"},{"location":"faq/#where-can-i-find-complete-working-examples-for-either-c-or-python","title":"Where can I find complete working examples for either c++ or python?","text":"C++ examples
Python examples
"},{"location":"install/","title":"Manual Installation","text":""},{"location":"install/#manual-installation-of-robotdart","title":"Manual Installation of RobotDART","text":""},{"location":"install/#dependencies","title":"Dependencies","text":""},{"location":"install/#required","title":"Required","text":" - Ubuntu (it should work on versions >= 14.04) or OSX
- Eigen3 (needed by DART)
- Boost (needed by DART)
- DART, http://dartsim.github.io/
"},{"location":"install/#optional","title":"Optional","text":" - Magnum (for graphics), https://github.com/mosra/magnum
"},{"location":"install/#installation-of-the-dependencies","title":"Installation of the dependencies","text":""},{"location":"install/#installing-boost-and-eigen3","title":"Installing Boost and Eigen3","text":"For Ubuntu-based distributions we should use the following commands to install Eigen3 and Boost:
sudo apt-get update\nsudo apt-get install libeigen3-dev libboost-filesystem-dev libboost-system-dev libboost-regex-dev\n
For OSX with brew:
brew install eigen3\nbrew install boost\n
"},{"location":"install/#installing-dart","title":"Installing DART","text":"In order to use RobotDART, you need to install DART (from source).
For Ubuntu systems, please follow the detailed installation instructions on the DART documentation website. Make sure that you don't forget to add the PPAs as detailed here. What is more, you can enable the -DART_ENABLE_SIMD
flag in the CMake configuration. In addition, you need the following optional dependency: DART Parsers. Lastly, it is recommended to use either the master
branch or v6.12.1
tag (and not the one provided in DART's documentation). In short you should do the following:
Ubuntu <= 14.04 only
sudo apt-add-repository ppa:libccd-debs/ppa\nsudo apt-add-repository ppa:fcl-debs/ppa\n
For all Ubuntu distributions
sudo apt-add-repository ppa:dartsim/ppa\nsudo apt-get update\n\nsudo apt-get install build-essential cmake pkg-config git\nsudo apt-get install libeigen3-dev libassimp-dev libccd-dev libfcl-dev libboost-regex-dev libboost-system-dev\n\nsudo apt-get install libtinyxml-dev libtinyxml2-dev\nsudo apt-get install liburdfdom-dev liburdfdom-headers-dev\n\ncd /path/to/tmp/folder\ngit clone git://github.com/dartsim/dart.git\ncd dart\ngit checkout v6.12.1\n\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..\nmake -j4\nsudo make install\n
If you want to install DART somewhere else than /usr/local
, you should use the -DCMAKE_INSTALL_PREFIX=/path/to/install/folder
cmake argument.
For OSX systems with homebrew, please follow the detailed installation instructions on the DART documentation website. You need to follow the same procedure as for Ubuntu systems. In short you should do the following:
brew install eigen\nbrew install assimp\nbrew install libccd\nbrew install dartsim/dart/fcl04\nbrew install boost\n\nbrew install tinyxml\nbrew install tinyxml2\nbrew install urdfdom\n\ncd /path/to/tmp/folder\ngit clone git://github.com/dartsim/dart.git\ncd dart\ngit checkout v6.12.1\n\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..\nmake -j4\nsudo make install\n
"},{"location":"install/#installing-magnum","title":"Installing Magnum","text":"Magnum depends on Corrade and we are going to use a few plugins and extras from the library. We are also going to use Glfw and Glx for the back-end. Follow the instrutions below:
#installation of Glfw and OpenAL\n# Ubuntu\nsudo apt-get install libglfw3-dev libglfw3 libopenal-dev libassimp-dev\n# Mac OSX\nbrew install glfw3 openal-soft assimp\n\n# installation of Corrade\ncd /path/to/tmp/folder\ngit clone https://github.com/mosra/corrade.git\ncd corrade\nmkdir build && cd build\ncmake -DCMAKE_BUILD_TYPE=Release ..\nmake -j\nsudo make install\n\n# installation of Magnum\ncd /path/to/tmp/folder\ngit clone https://github.com/mosra/magnum.git\ncd magnum\nmkdir build && cd build\n# Ubuntu\ncmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSEGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON -DTARGET_HEADLESS=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)\n# Mac OSX\ncmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSCGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)\nmake -j\nsudo make install\n\n# installation of Magnum Plugins\ncd /path/to/tmp/folder\ngit clone https://github.com/mosra/magnum-plugins.git\ncd magnum-plugins\nmkdir build && cd build\ncmake -DCMAKE_BUILD_TYPE=Release -DWITH_ASSIMPIMPORTER=ON -DWITH_DDSIMPORTER=ON -DWITH_JPEGIMPORTER=ON -DWITH_OPENGEXIMPORTER=ON -DWITH_PNGIMPORTER=ON -DWITH_TINYGLTFIMPORTER=ON -DWITH_STBTRUETYPEFONT=ON .. # this will enable quite a few Magnum Plugins that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)\nmake -j\nsudo make install\n\n# installation of Magnum DART Integration (DART needs to be installed) and Eigen Integration\ncd /path/to/tmp/folder\ngit clone https://github.com/mosra/magnum-integration.git\ncd magnum-integration\nmkdir build && cd build\ncmake -DCMAKE_BUILD_TYPE=Release -DWITH_DART=ON -DWITH_EIGEN=ON ..\nmake -j\nsudo make install\n
"},{"location":"install/#compilation-and-running-the-examples","title":"Compilation and running the examples","text":"The compilation of the library is straight-forward:
- retrieve the code, for instance with
git clone https://github.com/resibots/robot_dart.git
cd /path/to/repo/root
./waf configure
./waf
To build the examples, execute this: ./waf examples
Now you can run the examples. For example, to run the arm example you need to type the following: ./build/arm
(or ./build/arm_plain
to run it without graphics).
"},{"location":"install/#installing-the-library","title":"Installing the library","text":"To install the library (assuming that you have already compiled it), you need only to run:
./waf install
By default the library will be installed in /usr/local/lib
(for this maybe sudo ./waf install
might be needed) and a static library will be generated. You can change the defaults as follows:
./waf configure --prefix=/path/to/install/dir --shared
./waf install
In short, with --prefix
you can change the directory where the library will be installed and if --shared
is present a shared library will be created instead of a static one.
"},{"location":"install/#compiling-the-python-bindings","title":"Compiling the python bindings","text":"For the python bindings of robot_dart, we need numpy
to be installed, pybind11
and the python bindings of DART (dartpy).
For numpy
one can install it with pip
or standard packages. For pybind11
please follow the instructions on the dart website (focus on the pybind11 part, for the other parts follow our instructions above).
For the python bindings of DART, do:
cd dart\n\nmkdir build\ncd build\ncmake -DDART_BUILD_DARTPY=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..\nmake -j4\nsudo make install\nsudo make install-dartpy # for DART >= v6.12.0, we do not need this\n
Then the compilation of robot_dart is almost identical as before:
- retrieve the code, for instance with
git clone https://github.com/resibots/robot_dart.git
cd /path/to/repo/root
./waf configure --python
(--python
enables the python bindings) ./waf
- Install the library (including the python bindings) as before (no change is needed)
- Depending on your installation directory you might need to update your
PYTHONPATH
, e.g. export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.8/site-packages/
To run the python examples (for the python examples you need to have enabled the graphics, that is, install Magnum library), run:
cd /path/to/repo/root
python src/python/example.py
or python src/python/example_parallel.py
"},{"location":"install/#common-issues-with-python-bindings","title":"Common Issues with Python bindings","text":"One of the most common issue with the python bindings is the fact that DART bindings might be compiled and installed for python 3 and the robot_dart ones for python 2. To force the usage of python 3 for robot_dart, you use python3 ./waf
instead of just ./waf
in all the commands above.
"},{"location":"quick_install/","title":"Installation","text":""},{"location":"quick_install/#scripts-for-quick-installation-of-robotdart","title":"Scripts for Quick Installation of RobotDART","text":"In this page we provide standalone scripts for installing RobotDART for Ubuntu
(20.04) and OSX
. The scripts will install all the required dependencies and RobotDART. Notably, all dependencies that need to be compiled by source and RobotDART will be installed inside the /opt
folder. This way, one can be rest assured that their system will be clean.
"},{"location":"quick_install/#ubuntu-2004","title":"Ubuntu 20.04","text":"To quickly install RobotDART on Ubuntu 20.04
, we just need to run ./scripts/install_ubuntu.sh
from the root of the repo. In more detail:
git clone https://github.com/resibots/robot_dart.git
cd robot_dart
./scripts/install_ubuntu.sh
This will install everything needed! Once the script is successfully executed, one should add the following to their ~/.bashrc
or ~/.zshrc
file:
export PATH=/opt/magnum/bin:$PATH\nexport LD_LIBRARY_PATH=/opt/dart/lib:/opt/magnum/lib:/opt/robot_dart/lib:$LD_LIBRARY_PATH\nexport PYTHONPATH=/opt/dart/lib/python3/dist-packages:/opt/robot_dart/lib/python3.8/site-packages:$PYTHONPATH\n
"},{"location":"quick_install/#osx","title":"OSX","text":"Coming soon
"},{"location":"robots/","title":"Supported robots","text":""},{"location":"robots/#supported-robots","title":"Supported robots","text":"Every robot is a defined as a URDF, which will be installed $PREFIX/share/utheque
. All robots have pre-defined \"robot classes\" that define sensors and other properties; for your custom/new robots, you will have to add the sensors/properties via the generic robot class (or create a new robot class).
The URDF files are loaded using the following rules (see utheque::path()
):
- First check in the current directory
- If not found, check in
current_directory/utheque
- If not found, check in
$ROBOT_DART_PATH/utheque
- If not found, check in the robot dart installation path/robots (e.g.,
/usr/share/utheque
or $HOME/share/utheque
) - Otherwise, report failure
utheque
is a separate header-only library that gets installed together with RobotDART (or even alone), that can be used in libraries that do not want to interfere with RobotDART and use the curated URDF files.
"},{"location":"robots/#talos-pal-robotics","title":"Talos (PAL Robotics)","text":"Talos is a humanoid robot made by PAL Robotics.
- Datasheet: [pdf]
- 32 degrees of freedom (6 for each leg, 7 for each arm, 2 for the waist, 2 for the neck, 1 for each gripper)
- 175 cm / 95 kg
- IMU in the torso
- Torque sensors in all joints except head, wrist and gripper (22 torque sensors total)
- 1 force/torque sensor in each ankle
- 1 force/torque sensor in each wrist
We have two URDF files:
robots/talos/talos.urdf
: - accurate (simplified but made of polygons) collision meshes
- mimic joints for the gripper
- Not compatible the DART collision detector (you need to use FCL collision detector - shipped with DART)
- URDF: [talos.urdf]
- Example: [talos.cpp]
Load Talos
C++ auto robot = std::make_shared<robot_dart::robots::Talos>();\n
Python robot = rd.Talos()\n
robot/talos/talos_fast.urdf
: - no collision except for the feet, which are approximated by boxes
- grippers are fixed (no movement is allowed)
- compatible with the DART collision detector
- URDF: [talos_fast.urdf]
- Example: [talos_fast.cpp]
talos_fast.urdf
is faster because it makes it possible to use the DART collision detector (and has much collision shapes). You should prefer it except if you want to use the grippers (e.g., for manipulation) or are working on self-collisions.
Load Talos Fast
C++ // load talos fast\nauto robot = std::make_shared<robot_dart::robots::TalosFastCollision>();\n// Set actuator types to VELOCITY (for speed)\nrobot->set_actuator_types(\"velocity\");\ndouble dt = 0.001;\nrobot_dart::RobotDARTSimu simu(dt);\n// we can use the DART (fast) collision detector\nsimu.set_collision_detector(\"dart\");\n
Python robot = rd.TalosFastCollision()\n
Please note that the mesh files (.glb) require assimp 5.x (and not assimp4.x usually shipped with ROS). If you cannot load the URDF, please check your assimp version.
"},{"location":"robots/#panda-franka-emika","title":"Panda (Franka Emika)","text":"The Franka is a modern manipulator made by Franka Emika Panda. It is commonly found in many robotics labs.
- Datasheet: [pdf]
- 7 degrees of freedom
- Can be controlled in torque
- 18 kg
- workspace: 855 mm (horizontal), 1190 mm (vertical)
- URDF: [franka.urdf]
- Example: [franka.cpp] The URDF includes the gripper.
Load Franka
C++ auto robot = std::make_shared<robot_dart::robots::Franka>();\n
Python robot = rd.Franka()\n
"},{"location":"robots/#lbr-iiwa-kuka","title":"LBR iiwa (KUKA)","text":"The LBR iiwa is manufactured by KUKA. It is similar to the Panda and is also very common in robotics labs.
- Datasheet: [pdf]
- We implement the 14 kg version
- 29.5 kg
- 7 degrees of freedom
- URDF: [iiwa.urdf]
- Example: [iiwa.cpp]
Load Iiwa
C++ auto robot = std::make_shared<robot_dart::robots::Iiwa>();\n
Python robot = rd.Iiwa()\n
"},{"location":"robots/#icub-iit","title":"iCub (IIT)","text":"The iCub is a open source humanoid robot made by the Instituto Italiano di Tecnologia. There are currently 42 iCUbs in the world, and many versions.
- Datasheet (rev 2.3) [pdf]
- 6 force/torque sensors (upper arms, upper legs, ankles)
- IMU in the head
- We do to simulate the skin
- We do not simulate the hands
- Our model is close to the Inria's iCub, but it has not been checked in detail.
- URDF: [icub.urdf]
- Example [icub.cpp]
Please note that the mesh files (.glb) require assimp 5.x (and not assimp4.x usually shipped with ROS). If you cannot load the URDF, please check your assimp version.
Load iCub
C++ auto robot = std::make_shared<robot_dart::robots::ICub>();\n// Set actuator types to VELOCITY motors so that they stay in position without any controller\nrobot->set_actuator_types(\"velocity\");\nrobot_dart::RobotDARTSimu simu(0.001);\nsimu.set_collision_detector(\"fcl\");\n
Python robot = rd.ICub()\n# Set actuator types to VELOCITY motors so that they stay in position without any controller\nrobot.set_actuator_types(\"velocity\")\nsimu = rd.RobotDARTSimu(0.001)\nsimu.set_collision_detector(\"fcl\")\n
Print IMU sensor measurements
C++ std::cout << \"Angular Position: \" << robot->imu().angular_position_vec().transpose().format(fmt) << std::endl;\nstd::cout << \"Angular Velocity: \" << robot->imu().angular_velocity().transpose().format(fmt) << std::endl;\nstd::cout << \"Linear Acceleration: \" << robot->imu().linear_acceleration().transpose().format(fmt) << std::endl;\nstd::cout << \"=================================\" << std::endl;\n
Python print(\"Angular Position: \", robot.imu().angular_position_vec().transpose())\nprint(\"Angular Velocity: \", robot.imu().angular_velocity().transpose())\nprint(\"Linear Acceleration: \", robot.imu().linear_acceleration().transpose())\nprint(\"=================================\" )\n
Print Force-Torque sensor measurements
C++ std::cout << \"FT ( force): \" << robot->ft_foot_left().force().transpose().format(fmt) << std::endl;\nstd::cout << \"FT (torque): \" << robot->ft_foot_left().torque().transpose().format(fmt) << std::endl;\nstd::cout << \"=================================\" << std::endl;\n
Python print(\"FT ( force): \", robot.ft_foot_left().force().transpose())\nprint(\"FT (torque): \", robot.ft_foot_left().torque().transpose())\nprint(\"=================================\")\n
"},{"location":"robots/#unitree-a1","title":"Unitree A1","text":"A1 is a quadruped robot made by the Unitree Robotics.
- IMU in the torso
- We do not simulate the foot pressure sensors (yet)
- One can easily add a depth camera on the head
- URDF: [a1.urdf]
- Example [a1.cpp]
Load A1
C++ auto robot = std::make_shared<robot_dart::robots::A1>();\n
Python robot = rd.A1()\n
Print IMU sensor measurements
C++ std::cout << \"Angular Position: \" << robot->imu().angular_position_vec().transpose().format(fmt) << std::endl;\nstd::cout << \"Angular Velocity: \" << robot->imu().angular_velocity().transpose().format(fmt) << std::endl;\nstd::cout << \"Linear Acceleration: \" << robot->imu().linear_acceleration().transpose().format(fmt) << std::endl;\nstd::cout << \"=================================\" << std::endl;\n
Python print( \"Angular Position: \", robot.imu().angular_position_vec().transpose())\nprint( \"Angular Velocity: \", robot.imu().angular_velocity().transpose())\nprint( \"Linear Acceleration: \", robot.imu().linear_acceleration().transpose())\nprint( \"=================================\")\n
Add a depth camera on the head
How can I attach a camera to a moving link?
Please note that the mesh files (.glb) require assimp 5.x (and not assimp4.x usually shipped with ROS). If you cannot load the URDF, please check your assimp version.
"},{"location":"robots/#dynamixel-based-hexapod-robot-inria-and-others","title":"Dynamixel-based hexapod robot (Inria and others)","text":"This hexapod is a simple 6-legged robot based on dynamixel actuators. It is similar to the robot used in the paper `Robots that can adapt like animals' (Cully et al., 2015).
- 6 legs, 3 degrees of freedom for each leg (18 degrees of freedom)
- simple URDF (no meshes)
- URDF: [pexod.urdf]
- Example: [hexapod.cpp]
Load Hexapod
C++ auto robot = std::make_shared<robot_dart::robots::Hexapod>();\n
Python robot = rd.Hexapod()\n
Load Pexod
C++ auto robot = std::make_shared<robot_dart::Robot>(\"pexod.urdf\");\n
Python robot = rd.Robot(\"pexod.urdf\");\n
"},{"location":"robots/#simple-arm","title":"Simple arm","text":" - A simple arm for educational or debugging purposes
- 5 degrees of freedom
- simple URDF (no meshes)
- URDF: [arm.urdf]
- Example: [arm.cpp]
Load Simple Arm
C++ auto robot = std::make_shared<robot_dart::robots::Arm>();\n
Python robot = rd.Arm()\n
"},{"location":"robots/#loading-custom-robots","title":"Loading Custom Robots","text":"RobotDART gives you the ability to load custom robots that are defined in URDF, SDF, SKEL or MJCF files. For example, you can load a urdf model using:
Load custom Robot
C++ auto your_robot = std::make_shared<robot_dart::Robot>(\"path/to/model.urdf\");\n
Python your_robot = robot_dart.Robot(\"path/to/model.urdf\")\n
Load custom Robot with packages (e.g STL, DAE meshes)
C++ std::vector<std::pair<std::string, std::string>> your_model_packages = ('model', 'path/to/model/dir');\nauto your_robot = std::make_shared<robot_dart::Robot>(\"path/to/model.urdf\", your_model_packages, \"packages\");\n
Python your_model_packages = [(\"model\", \"path/to/model/dir\")]\nyour_robot = robot_dart.Robot(\"path/to/model.urdf\", your_model_packages)\n
"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":""},{"location":"#robotdart","title":"RobotDART","text":"RobotDART is a C++11 robot simulator (with optional Python bindings) built on top of the DART physics engine. The RobotDART simulator is intended to be used by Robotics and Machine Learning researchers who want to write controllers or test learning algorithms without the delays and overhead that usually comes with other simulators (e.g., Gazebo, Coppelia-sim).
For this reason, the simulator runs headless by default, and there is the possibility of rendering the scene (e.g., through a camera sensor) without opening a graphics window. All RobotDART's code is thread-safe (including graphics and camera sensors), and thus enables its users to use their code in parallel jobs in multicore computers.
In a few words, RobotDART combines:
- a physics engine (DART)
- an optional graphics engine (Magnum)
- a few sensor classes (IMU, force/torque sensors, cameras, etc.)
- a curated URDF library
- ... and a few useful features to make the life of roboticists/researchers easier
"},{"location":"#main-features","title":"Main Features","text":" - Modern C++ code that makes it easy to develop environments and applications
- Fast and reliable simulation of robotic mechanisms and their interactions (through the DART physics engine)
- A structured
Robot
class that enables a unified creation and access to all important values: in RobotDART you can load any robot description file (URDF, SDF, SKEL, and MuJoCo files) with the same command, and all robot measurements can be queried without using any DART code - A generic
RobotControl
class that enables fast prototyping of any type of controller - A generic
Sensor
class that allows the creation of any kind of sensor - A growing list of already implemented sensors, that includes 6-axis
ForceTorque
, IMU
, RGB
, and RGB-D
sensors - A simulation class (
RobotDARTSimu
) that handles multiple robots and sensors, and allows for step-by-step simulation - A growing list of supported robots along with edited and optimized models to be used with RobotDART (see the robots page for details and examples):
- PAL Talos humanoid
- Franka Emika Panda
- KUKA LBR Iiwa (14kg version)
- IIT iCub humanoid (without hands)
- Unitree A1 quadruped robot
- Dynamixel-based 6-legged robot
- A simple arm for educational purposes
- and you can use any URDF
- A custom graphical interface built on top of Magnum that allows generic customization
- Support for windowless OpenGL context creation (even in parallel threads!) to allow for camera sensor usage even in parallel jobs running on clusters
- Support for video recording in simulation time (i.e., not affected by delays of simulator and/or graphics) for visualization or debugging purposes
- Full-featured Python bindings for fast prototyping
- RobotDART runs on any Linux distribution and Mac OS
"},{"location":"#what-robotdart-is-not","title":"What RobotDART is not","text":" - RobotDART is primarily intended to be non-interactive (run a simulation, record/view the result),
- Interaction is limited to changing the view and your own code. No GUI for adding objects or interactively build an environment,
- RobotDART is not optimized for wheeled robots,
- RobotDART is not optimized for simulating complex (e.g., mountain-like) terrains.
"},{"location":"faq/","title":"FAQ","text":""},{"location":"faq/#frequently-asked-questions","title":"Frequently Asked Questions","text":"This pages provides a user guide of the library through Frequently Asked Questions (FAQ).
"},{"location":"faq/#what-is-a-minimal-working-example-of-robotdart","title":"What is a minimal working example of RobotDART?","text":"You can find a minimal working example at hello_world.cpp. This example is creating a world where a hexapod robot is placed just above a floor and left to fall. The robot has no actuation, and there is the simplest graphics configuration. Let's split it down.
- We first include the appropriate files:
C++Python #include <robot_dart/robot_dart_simu.hpp>\n#ifdef GRAPHIC\n#include <robot_dart/gui/magnum/graphics.hpp>\n#endif\n
import RobotDART as rd\n
- We then load our hexapod robot:
C++Python auto robot = std::make_shared<robot_dart::Robot>(\"pexod.urdf\");\n
robot = rd.Robot(\"pexod.urdf\");\n
- We need to place it above the floor to avoid collision (we can use RobotDART's helpers ;)):
C++Python robot->set_base_pose(robot_dart::make_tf({0., 0., 0.2}));\n
robot.set_base_pose([0., 0., 0., 0., 0., 0.2])\n
- We can now create the simulation object and add the robot and the floor:
C++Python robot_dart::RobotDARTSimu simu(0.001); // dt=0.001, 1KHz simulation\nsimu.add_floor();\nsimu.add_robot(robot);\n
simu = rd.RobotDARTSimu(0.001); # dt=0.001, 1KHz simulation\nsimu.add_floor();\nsimu.add_robot(robot);\n
- If needed or wanted, we can add a graphics component to visualize the scene:
C++Python auto graphics = std::make_shared<robot_dart::gui::magnum::Graphics>();\nsimu.set_graphics(graphics);\ngraphics->look_at({0.5, 3., 0.75}, {0.5, 0., 0.2});\n
graphics = rd.gui.Graphics()\nsimu.set_graphics(graphics)\ngraphics.look_at([0.5, 3., 0.75], [0.5, 0., 0.2])\n
- Once everything is configured, we can run our simulation for a few seconds:
C++Python simu.run(10.);\n
simu.run(10.)\n
- Here's how it looks:
"},{"location":"faq/#what-robots-are-supported-in-robotdart","title":"What robots are supported in RobotDART?","text":"RobotDART supports any robot that can be described by a URDF, SDF, SKEL or MJCF file. Nevertheless, we have a curated list of robots with edited and optimized models to be used with RobotDART (see the robots page for details and examples).
"},{"location":"faq/#how-can-i-load-my-own-urdfsdfskelmjcf-file","title":"How can I load my own URDF/SDF/SKEL/MJCF file?","text":"See the robots page for details.
"},{"location":"faq/#how-do-i-enable-graphics-in-my-code","title":"How do I enable graphics in my code?","text":"To enable graphics in your code, you need to do the following:
- Install Magnum. See the installation page for details.
- Create and set a graphics object in the simulation object. Here's an example:
C++Python auto graphics = std::make_shared<robot_dart::gui::magnum::Graphics>();\nsimu.set_graphics(graphics);\ngraphics->look_at({0.5, 3., 0.75}, {0.5, 0., 0.2});\n
graphics = rd.gui.Graphics()\nsimu.set_graphics(graphics)\ngraphics.look_at([0.5, 3., 0.75], [0.5, 0., 0.2])\n
"},{"location":"faq/#i-want-to-have-multiple-camera-sensors-is-it-possible","title":"I want to have multiple camera sensors. Is it possible?","text":"Having multiple camera sensors is indeed possible. We can add as many cameras as we wish along the main camera defined in How do I record a video:
C++Python // Add camera\nauto camera = std::make_shared<robot_dart::sensor::Camera>(graphics->magnum_app(), 256, 256);\n
# Add camera\ncamera = rd.sensor.Camera(graphics.magnum_app(), 32, 32)\n
"},{"location":"faq/#how-do-i-record-a-video","title":"How do I record a video?","text":"In order to record a video of what the main or any other camera \"sees\", you need to call the function record_video(path)
of the graphics class:
C++Python graphics->record_video(\"talos_dancing.mp4\");\n
graphics.record_video(\"talos_dancing.mp4\")\n
Or the camera class:
C++Python // cameras can also record video\ncamera->record_video(\"video-camera.mp4\");\n
# cameras can also record video\ncamera.record_video(\"video-camera.mp4\")\n
"},{"location":"faq/#how-can-i-position-a-camera-to-the-environment","title":"How can I position a camera to the environment?","text":"In order to position a camera inside the world, we need to use the lookAt
method of the camera/graphics object:
C++Python // set the position of the camera, and the position where the main camera is looking at\nEigen::Vector3d cam_pos = {-0.5, -3., 0.75};\nEigen::Vector3d cam_looks_at = {0.5, 0., 0.2};\ncamera->look_at(cam_pos, cam_looks_at);\n
# set the position of the camera, and the position where the main camera is looking at\ncam_pos = [-0.5, -3., 0.75]\ncam_looks_at = [0.5, 0., 0.2]\ncamera.look_at(cam_pos, cam_looks_at)\n
"},{"location":"faq/#how-can-i-attach-a-camera-to-a-moving-link","title":"How can I attach a camera to a moving link?","text":"Cameras can be easily attached to a moving link:
C++Python Eigen::Isometry3d tf;\ntf = Eigen::AngleAxisd(3.14, Eigen::Vector3d{1., 0., 0.});\ntf *= Eigen::AngleAxisd(1.57, Eigen::Vector3d{0., 0., 1.});\ntf.translation() = Eigen::Vector3d(0., 0., 0.1);\ncamera->attach_to_body(robot->body_node(\"iiwa_link_ee\"), tf); // cameras are looking towards -z by default\n
tf = dartpy.math.Isometry3()\nrot = dartpy.math.AngleAxis(3.14, [1., 0., 0.])\nrot = rot.multiply( dartpy.math.AngleAxis(1.57, [0., 0., 1.])).to_rotation_matrix()\ntf.set_translation([0., 0., 0.1])\ncamera.attach_to_body(robot.body_node(\"iiwa_link_ee\"), tf) # cameras are looking towards -z by default\n
"},{"location":"faq/#how-can-i-manipulate-the-camera-object","title":"How can I manipulate the camera object?","text":"Every camera has its own parameters, i.e a Near plane, a far plane, a Field Of View (FOV), a width and a height (that define the aspect ratio), you can manipulate each one separately:
C++Python camera->camera().set_far_plane(5.f);\ncamera->camera().set_near_plane(0.01f);\ncamera->camera().set_fov(60.0f);\n
camera.camera().set_far_plane(5.)\ncamera.camera().set_near_plane(0.01)\ncamera.camera().set_fov(60.0)\n
or all at once:
C++Python camera->camera().set_camera_params(5., // far plane\n0.01f, // near plane\n60.0f, // field of view\n600, // width\n400 // height\n);\n
camera.camera().set_camera_params(5., #far plane\n0.01, #near plane\n60.0, # field of view\n600, # width\n400) #height\n
You can find a complete example at cameras.cpp.
"},{"location":"faq/#how-can-i-interact-with-the-camera","title":"How can I interact with the camera?","text":"We can move translate the cameras with the WASD
keys, zoom in and out using the mouse wheel
and rotate the camera with holding the left mouse key
and moving the mouse.
"},{"location":"faq/#what-do-the-numbers-in-the-status-bar-mean","title":"What do the numbers in the status bar mean?","text":"The status bar looks like this:
Where simulation time gives us the total simulated time (in seconds), wall time gives us the total time (in seconds) that has passed in real-time once we have started simulating. The next number X.Xx gives us the real-time factor: for example, 1.1x means that the simulation runs 1.1 times faster than real-time, whereas 0.7x means that the simulation runs slower than real-time. The value it: XX ms reports the time it took the last iteration (in milliseconds). The last part gives us whether the simulation tries to adhere to real-time or not. sync means that RobotDART will slow down the simulation in order for it to be in real-time, whereas no-sync means that RobotDART will try to run the simulation as fast as possible.
"},{"location":"faq/#how-can-i-alter-the-graphics-scene-eg-change-lighting-conditions","title":"How can I alter the graphics scene (e.g., change lighting conditions)?","text":"When creating a graphics object, you can pass a GraphicsConfiguration
object that changes the default values:
C++Python robot_dart::gui::magnum::GraphicsConfiguration configuration;\n// We can change the width/height of the window (or camera image dimensions)\nconfiguration.width = 1280;\nconfiguration.height = 960;\nconfiguration.title = \"Graphics Tutorial\"; // We can set a title for our window\n// We can change the configuration for shadows\nconfiguration.shadowed = true;\nconfiguration.transparent_shadows = true;\nconfiguration.shadow_map_size = 1024;\n// We can also alter some specifications for the lighting\nconfiguration.max_lights = 3; // maximum number of lights for our scene [default=3]\nconfiguration.specular_strength = 0.25; // strength of the specular component\n// Some extra configuration for the main camera\nconfiguration.draw_main_camera = true;\nconfiguration.draw_debug = true;\nconfiguration.draw_text = true;\n// We can also change the background color [default=black]\nconfiguration.bg_color = Eigen::Vector4d{1.0, 1.0, 1.0, 1.0};\n// Create the graphics object with the configuration as parameter\nauto graphics = std::make_shared<robot_dart::gui::magnum::Graphics>(configuration);\n
configuration = rd.gui.GraphicsConfiguration()\n# We can change the width/height of the window (or camera, dimensions)\nconfiguration.width = 1280\nconfiguration.height = 960\nconfiguration.title = \"Graphics Tutorial\" # We can set a title for our window\n# We can change the configuration for shadows\nconfiguration.shadowed = True\nconfiguration.transparent_shadows = True\nconfiguration.shadow_map_size = 1024\n# We can also alter some specifications for the lighting\nconfiguration.max_lights = 3 # maximum number of lights for our scene\nconfiguration.specular_strength = 0.25 # strength og the specular component\n# Some extra configuration for the main camera\nconfiguration.draw_main_camera = True\nconfiguration.draw_debug = True\nconfiguration.draw_text = True\n# We can also change the background color [default = black]\nconfiguration.bg_color = [1., 1., 1., 1.]\n# create the graphics object with the configuration as a parameter\ngraphics = rd.gui.Graphics(configuration)\n
You can disable or enable shadows on the fly as well:
C++Python // Disable shadows\ngraphics->enable_shadows(false, false);\nsimu.run(1.);\n// Enable shadows only for non-transparent objects\ngraphics->enable_shadows(true, false);\nsimu.run(1.);\n// Enable shadows for transparent objects as well\ngraphics->enable_shadows(true, true);\nsimu.run(1.);\n
# Disable shadows\ngraphics.enable_shadows(False, False)\nsimu.run(1.)\n# Enable shadows only for non-transparent objects\ngraphics.enable_shadows(True, False)\nsimu.run(1.)\n# Enable shadows for transparent objects as well\ngraphics.enable_shadows(True, True)\nsimu.run(1.)\n
You can also add your own lights. The application by default creates 2 light sources and the maximum number of lights is 3 (you can change this once before the creation of the graphics object via the GraphicsConfiguration
object). So usually before you add your lights, you have to clear the default lights:
C++Python // Clear Lights\ngraphics->clear_lights();\n
# Clear Lights\ngraphics.clear_lights()\n
Then you must create a custom light material:
C++Python // Create Light material\nMagnum::Float shininess = 1000.f;\nMagnum::Color4 white = {1.f, 1.f, 1.f, 1.f};\n// ambient, diffuse, specular\nauto custom_material = robot_dart::gui::magnum::gs::Material(white, white, white, shininess);\n
# Clear Light material\nshininess = 1000.\nwhite = magnum.Color4(1., 1., 1., 1.)\n# ambient, diffuse, specular\ncustom_material = rd.gui.Material(white, white, white, shininess)\n
Now you can add on ore more of the following lights:
Point Light:
C++Python // Create point light\nMagnum::Vector3 position = {0.f, 0.f, 2.f};\nMagnum::Float intensity = 1.f;\nMagnum::Vector3 attenuation_terms = {1.f, 0.f, 0.f};\nauto point_light = robot_dart::gui::magnum::gs::create_point_light(position, custom_material, intensity, attenuation_terms);\ngraphics->add_light(point_light);\n
# Create point light\nposition = magnum.Vector3(0., 0., 2.)\nintensity = 1.\nattenuation_terms = magnum.Vector3(1., 0., 0.)\npoint_light = rd.gui.create_point_light(position, custom_material, intensity, attenuation_terms)\ngraphics.add_light(point_light)\n
Spot Light:
C++Python // Create spot light\nMagnum::Vector3 position = {0.f, 0.f, 1.f};\nMagnum::Vector3 direction = {-1.f, -1.f, -1.f};\nMagnum::Float intensity = 1.f;\nMagnum::Vector3 attenuation_terms = {1.f, 0.f, 0.f};\nMagnum::Float spot_exponent = M_PI;\nMagnum::Float spot_cut_off = M_PI / 8;\nauto spot_light = robot_dart::gui::magnum::gs::create_spot_light(position, custom_material, direction, spot_exponent, spot_cut_off, intensity, attenuation_terms);\ngraphics->add_light(spot_light);\n
# Create spot light\nposition = magnum.Vector3(0., 0., 1.)\ndirection = magnum.Vector3(-1, -1, -1)\nintensity = 1.\nattenuation_terms = magnum.Vector3(1., 0., 0.)\nspot_exponent = np.pi\nspot_cut_off = np.pi / 8\nspot_light = rd.gui.create_spot_light(position, custom_material, direction, spot_exponent, spot_cut_off, intensity, attenuation_terms)\ngraphics.add_light(spot_light)\n
Directional Light:
C++Python // Create directional light\nMagnum::Vector3 direction = {-1.f, -1.f, -1.f};\nauto directional_light = robot_dart::gui::magnum::gs::create_directional_light(direction, custom_material);\ngraphics->add_light(directional_light);\n
# Create directional light\ndirection = magnum.Vector3(-1, -1, -1)\ndirectional_light = rd.gui.create_directional_light(direction, custom_material)\ngraphics.add_light(directional_light)\n
"},{"location":"faq/#i-want-to-visualize-a-target-configuration-of-my-robot-is-this-possible","title":"I want to visualize a target configuration of my robot, is this possible?","text":"Yes this is possible. RobotDART gives the ability to create a clone of your robot that has no physics, no contacts, just visuals:
C++Python // Add a ghost robot; only visuals, no dynamics, no collision\nauto ghost = robot->clone_ghost();\nsimu.add_robot(ghost);\n
# Add a ghost robot; only visuals, no dynamics, no collision\nghost = robot.clone_ghost()\nsimu.add_robot(ghost)\n
"},{"location":"faq/#how-can-i-control-my-robot","title":"How can I control my robot ?","text":"PD control
C++Python // add a PD-controller to the arm\n// set desired positions\nEigen::VectorXd ctrl = robot_dart::make_vector({0., M_PI / 4., 0., -M_PI / 4., 0., M_PI / 2., 0., 0.});\n// add the controller to the robot\nauto controller = std::make_shared<robot_dart::control::PDControl>(ctrl);\nrobot->add_controller(controller);\ncontroller->set_pd(300., 50.);\n
# add a PD-controller to the arm\n# set desired positions\nctrl = [0., np.pi / 4., 0., -np.pi / 4., 0., np.pi / 2., 0., 0.]\n# add the controller to the robot\ncontroller = rd.PDControl(ctrl)\nrobot.add_controller(controller)\ncontroller.set_pd(300., 50.)\n
Simple control
C++Python auto controller1 = std::make_shared<robot_dart::control::SimpleControl>(ctrl);\nrobot->add_controller(controller1);\n
controller1 = rd.SimpleControl(ctrl)\nrobot.add_controller(controller1)\n
Robot control
C++Python class MyController : public robot_dart::control::RobotControl {\npublic:\nMyController() : robot_dart::control::RobotControl() {}\nMyController(const Eigen::VectorXd& ctrl, bool full_control) : robot_dart::control::RobotControl(ctrl, full_control) {}\nMyController(const Eigen::VectorXd& ctrl, const std::vector<std::string>& dof_names) : robot_dart::control::RobotControl(ctrl, dof_names) {}\nvoid configure() override\n{\n_active = true;\n}\nEigen::VectorXd calculate(double) override\n{\nauto robot = _robot.lock();\nEigen::VectorXd cmd = 100. * (_ctrl - robot->positions(_controllable_dofs));\nreturn cmd;\n}\nstd::shared_ptr<robot_dart::control::RobotControl> clone() const override\n{\nreturn std::make_shared<MyController>(*this);\n}\n};\n
class MyController(rd.RobotControl):\ndef __init__(self, ctrl, full_control):\nrd.RobotControl.__init__(self, ctrl, full_control)\ndef __init__(self, ctrl, controllable_dofs):\nrd.RobotControl.__init__(self, ctrl, controllable_dofs)\ndef configure(self):\nself._active = True\ndef calculate(self, t):\ntarget = np.array([self._ctrl])\ncmd = 100*(target-self.robot().positions(self._controllable_dofs))\nreturn cmd[0]\n# TO-DO: This is NOT working at the moment!\ndef clone(self):\nreturn MyController(self._ctrl, self._controllable_dofs)\n
"},{"location":"faq/#is-there-a-way-to-control-the-simulation-timestep","title":"Is there a way to control the simulation timestep?","text":"When creating a RobotDARTSimu object you choose the simulation timestep:
C++Python // choose time step of 0.001 seconds\nrobot_dart::RobotDARTSimu simu(0.001);\n
# choose time step of 0.001 seconds\nsimu = rd.RobotDARTSimu(0.001)\n
which can later be modified by:
C++Python // set timestep to 0.005 and update control frequency(bool)\nsimu.set_timestep(0.005, true);\n
# set timestep to 0.005 and update control frequency(bool)\nsimu.set_timestep(0.005, True)\n
"},{"location":"faq/#i-want-to-simulate-a-mars-environment-is-it-possible-to-change-the-gravitational-force-of-the-simulation-environment","title":"I want to simulate a mars environment, is it possible to change the gravitational force of the simulation environment?","text":"Yes you can modify the gravitational forces 3-dimensional vector of the simulation:
C++Python // Set gravitational force of mars\nEigen::Vector3d mars_gravity = {0., 0., -3.721};\nsimu.set_gravity(mars_gravity);\n
# set gravitational force of mars\nmars_gravity = [0., 0., -3.721]\nsimu.set_gravity(mars_gravity)\n
"},{"location":"faq/#which-collision-detectors-are-available-what-are-their-differences-how-can-i-choose-between-them","title":"Which collision detectors are available? What are their differences? How can I choose between them?","text":"DART FCL ODE Bullet Support only basic shapes Full-featured collision detector fully integrated by DART External collision detector of ODE External collision detector of Bullet This is building along with DART This is a required dependency of DART Needs an external library Needs an external library Very fast for small scenes Accurate detailed collisions, but not very fast Fast collision detection (the integration is not complete) Fast and accurate collision detection (works well for wheels as well) We can easily select one of the available collision detectors using the simulator object:
C++Python simu.set_collision_detector(\"fcl\"); // collision_detector can be \"dart\", \"fcl\", \"ode\" or \"bullet\" (case does not matter)\n
simu.set_collision_detector(\"fcl\") # collision_detector can be \"dart\", \"fcl\", \"ode\" or \"bullet\" (case does not matter)\n
"},{"location":"faq/#my-robot-does-not-self-collide-how-can-i-change-this","title":"My robot does not self-collide. How can I change this?","text":"One possible cause may be the fact that self collision is disabled, you can check and change this:
C++Python if (!robot->self_colliding()) {\nstd::cout << \"Self collision is not enabled\" << std::endl;\n// set self collisions to true and adjacent collisions to false\nrobot->set_self_collision(true, false);\n}\n
if(not robot.self_colliding()):\nprint(\"Self collision is not enabled\")\n# set self collisions to true and adjacent collisions to false\nrobot.set_self_collision(True, False)\n
"},{"location":"faq/#how-can-i-compute-kinematicdynamic-properties-of-my-robot-eg-jacobians-mass-matrix","title":"How can I compute kinematic/dynamic properties of my robot (e.g., Jacobians, Mass Matrix)?","text":"Kinematic Properties:
C++Python // Get Joint Positions(Angles)\nauto joint_positions = robot->positions();\n// Get Joint Velocities\nauto joint_vels = robot->velocities();\n// Get Joint Accelerations\nauto joint_accs = robot->accelerations();\n// Get link_name(str) Transformation matrix with respect to the world frame.\nauto eef_tf = robot->body_pose(link_name);\n// Get translation vector from transformation matrix\nauto eef_pos = eef_tf.translation();\n// Get rotation matrix from tranformation matrix\nauto eef_rot = eef_tf.rotation();\n// Get link_name 6d pose vector [logmap(eef_tf.linear()), eef_tf.translation()]\nauto eef_pose_vec = robot->body_pose_vec(link_name);\n// Get link_name 6d velocity vector [angular, cartesian]\nauto eef_vel = robot->body_velocity(link_name);\n// Get link_name 6d acceleration vector [angular, cartesian]\nauto eef_acc = robot->body_acceleration(link_name);\n// Jacobian targeting the origin of link_name(str)\nauto jacobian = robot->jacobian(link_name);\n// Jacobian time derivative\nauto jacobian_deriv = robot->jacobian_deriv(link_name);\n// Center of Mass Jacobian\nauto com_jacobian = robot->com_jacobian();\n// Center of Mass Jacobian Time Derivative\nauto com_jacobian_deriv = robot->com_jacobian_deriv();\n
# Get Joint Positions(Angles)\njoint_positions = robot.positions()\n# Get Joint Velocities\njoint_vels = robot.velocities()\n# Get Joint Accelerations\njoint_accs = robot.accelerations()\n# Get link_name(str) Transformation matrix with respect to the world frame.\neef_tf = robot.body_pose(link_name)\n# Get translation vector from transformation matrix\neef_pos = eef_tf.translation()\n# Get rotation matrix from tranformation matrix\neef_rot = eef_tf.rotation()\n# Get link_name 6d pose vector [logmap(eef_tf.linear()), eef_tf.translation()]\neef_pose_vec = robot.body_pose_vec(link_name)\n# Get link_name 6d velocity vector [angular, cartesian]\neef_vel = robot.body_velocity(link_name)\n# Get link_name 6d acceleration vector [angular, cartesian]\neef_acc = robot.body_acceleration(link_name)\n# Jacobian targeting the origin of link_name(str)\njacobian = robot.jacobian(link_name)\n# Jacobian time derivative\njacobian_deriv = robot.jacobian_deriv(link_name)\n# Center of Mass Jacobian\ncom_jacobian = robot.com_jacobian()\n# Center of Mass Jacobian Time Derivative\ncom_jacobian_deriv = robot.com_jacobian_deriv()\n
Dynamic Properties:
C++Python // Get Joint Forces\nauto joint_forces = robot->forces();\n// Get link's mass\nauto eef_mass = robot->body_mass(link_name);\n// Mass Matrix of robot\nauto mass_matrix = robot->mass_matrix();\n// Inverse of Mass Matrix\nauto inv_mass_matrix = robot->inv_mass_matrix();\n// Augmented Mass matrix\nauto aug_mass_matrix = robot->aug_mass_matrix();\n// Inverse of Augmented Mass matrix\nauto inv_aug_mass_matrix = robot->inv_aug_mass_matrix();\n// Coriolis Force vector\nauto coriolis = robot->coriolis_forces();\n// Gravity Force vector\nauto gravity = robot->gravity_forces();\n// Combined vector of Coriolis Force and Gravity Force\nauto coriolis_gravity = robot->coriolis_gravity_forces();\n// Constraint Force Vector\nauto constraint_forces = robot->constraint_forces();\n
# Get Joint Forces\njoint_forces = robot.forces()\n# Get link's mass\neef_mass = robot.body_mass(link_name)\n# Mass Matrix of robot\nmass_matrix = robot.mass_matrix()\n# Inverse of Mass Matrix\ninv_mass_matrix = robot.inv_mass_matrix()\n# Augmented Mass matrix\naug_mass_matrix = robot.aug_mass_matrix()\n# Inverse of Augmented Mass matrix\ninv_aug_mass_matrix = robot.inv_aug_mass_matrix()\n# Coriolis Force vector\ncoriolis = robot.coriolis_forces()\n# Gravity Force vector\ngravity = robot.gravity_forces()\n# Combined vector of Coriolis Force and Gravity Force\ncoriolis_gravity = robot.coriolis_gravity_forces()\n# NOT IMPLEMENTED\n# # Constraint Force Vector\n# constraint_forces = robot.constraint_forces()\n
"},{"location":"faq/#is-there-a-way-to-change-the-joint-properties-eg-actuation-friction","title":"Is there a way to change the joint properties (e.g., actuation, friction)?","text":"There are 6 types of actuators available, you can set the same actuator to multiple joints at once, or you can set each sensor separately:
C++Python // Set all DoFs to same actuator\nrobot->set_actuator_types(\"servo\"); // actuator types can be \"servo\", \"torque\", \"velocity\", \"passive\", \"locked\", \"mimic\"\n// You can also set actuator types separately\nrobot->set_actuator_type(\"torque\", \"iiwa_joint_5\");\n
# Set all DoFs to same actuator\n# actuator types can be \"servo\", \"torque\", \"velocity\", \"passive\", \"locked\", \"mimic\"\nrobot.set_actuator_types(\"servo\")\n# You can also set actuator types separately\nrobot.set_actuator_type(\"torque\", \"iiwa_joint_5\")\n
To enable position and velocity limits for the actuators:
C++Python // \u0395nforce joint position and velocity limits\nrobot->set_position_enforced(true);\n
# \u0395nforce joint position and velocity limits\nrobot.set_position_enforced(True)\n
Every DOF's limits (position, velocity, acceleration, force) can be modified:
C++Python // Modify Position Limits\nEigen::VectorXd pos_upper_lims(7);\npos_upper_lims << 2.096, 2.096, 2.096, 2.096, 2.096, 2.096, 2.096;\nrobot->set_position_upper_limits(pos_upper_lims, dof_names);\nrobot->set_position_lower_limits(-pos_upper_lims, dof_names);\n// Modify Velocity Limits\nEigen::VectorXd vel_upper_lims(7);\nvel_upper_lims << 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5;\nrobot->set_velocity_upper_limits(vel_upper_lims, dof_names);\nrobot->set_velocity_lower_limits(-vel_upper_lims, dof_names);\n// Modify Force Limits\nEigen::VectorXd force_upper_lims(7);\nforce_upper_lims << 150, 150, 150, 150, 150, 150, 150;\nrobot->set_force_upper_limits(force_upper_lims, dof_names);\nrobot->set_force_lower_limits(-force_upper_lims, dof_names);\n// Modify Acceleration Limits\nEigen::VectorXd acc_upper_lims(7);\nacc_upper_lims << 1500, 1500, 1500, 1500, 1500, 1500, 1500;\nrobot->set_acceleration_upper_limits(acc_upper_lims, dof_names);\nrobot->set_acceleration_lower_limits(-acc_upper_lims, dof_names);\n
# Modify Position Limits\npos_upper_lims = np.array([2.096, 2.096, 2.096, 2.096, 2.096, 2.096, 2.096])\nrobot.set_position_upper_limits(pos_upper_lims, dof_names)\nrobot.set_position_lower_limits(-1*pos_upper_lims, dof_names)\n# Modify Velocity Limits\nvel_upper_lims = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5])\nrobot.set_velocity_upper_limits(vel_upper_lims, dof_names)\nrobot.set_velocity_lower_limits(-1*vel_upper_lims, dof_names)\n# Modify Force Limits\nforce_upper_lims = np.array([150, 150, 150, 150, 150, 150, 150])\nrobot.set_force_upper_limits(force_upper_lims, dof_names)\nrobot.set_force_lower_limits(-1*force_upper_lims, dof_names)\n# Modify Acceleration Limits\nacc_upper_lims = np.array([1500, 1500, 1500, 1500, 1500, 1500, 1500])\nrobot.set_acceleration_upper_limits(acc_upper_lims, dof_names)\nrobot.set_acceleration_lower_limits(-1*acc_upper_lims, dof_names)\n
You can also modify the damping coefficients, coulomb frictions and spring stiffness of every DOF:
C++Python // Modify Damping Coefficients\nstd::vector<double> damps = {0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4};\nrobot->set_damping_coeffs(damps, dof_names);\n// Modify Coulomb Frictions\nstd::vector<double> cfrictions = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001};\nrobot->set_coulomb_coeffs(cfrictions, dof_names);\n// Modify Spring Stiffness\nstd::vector<double> stiffnesses = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001};\nrobot->set_spring_stiffnesses(stiffnesses, dof_names);\n
# Modify Damping Coefficients\ndamps = [0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4]\nrobot.set_damping_coeffs(damps, dof_names)\n# Modify Coulomb Frictions\ncfrictions = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]\nrobot.set_coulomb_coeffs(cfrictions, dof_names)\n# Modify Spring Stiffness\nstiffnesses = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]\nrobot.set_spring_stiffnesses(stiffnesses, dof_names)\n
"},{"location":"faq/#what-are-the-supported-sensors-how-can-i-use-an-imu","title":"What are the supported sensors? How can I use an IMU?","text":"Sensors in RobotDART can be added only through the simulator object. All of the sensors can be added without being attached to any body or joint but some of them can operate only when attached to something (e.g. ForceTorque
sensors).
"},{"location":"faq/#torque-sensor","title":"Torque sensor","text":"Torque sensors can be added to every joint of the robot:
C++Python // Add torque sensors to the robot\nint ct = 0;\nstd::vector<std::shared_ptr<robot_dart::sensor::Torque>> tq_sensors(robot->num_dofs());\nfor (const auto& joint : robot->dof_names())\ntq_sensors[ct++] = simu.add_sensor<robot_dart::sensor::Torque>(robot, joint, 1000);\n
# Add torque sensors to the robot\ntq_sensors = np.empty(robot.num_dofs(), dtype=rd.sensor.Torque)\nct = 0\nfor joint in robot.dof_names():\nsimu.add_sensor(rd.sensor.Torque(robot, joint, 1000))\ntq_sensors[ct] = simu.sensors()[-1]\nct += 1\n
Torque sensors measure the torque \\(\\tau \\in \\rm I\\!R^n\\) of the attached joint (where \\(n\\) is the DOFs of the joint):
C++Python // vector that contains the torque measurement for every joint (scalar)\nEigen::VectorXd torques_measure(robot->num_dofs());\nfor (const auto& tq_sens : tq_sensors)\ntorques_measure.block<1, 1>(ct++, 0) = tq_sens->torques();\n
# vector that contains the torque measurement for every joint (scalar)\ntorques_measure = np.empty(robot.num_dofs())\nct = 0\nfor tq_sens in tq_sensors:\ntorques_measure[ct] = tq_sens.torques()\nct += 1\n
"},{"location":"faq/#force-torque-sensor","title":"Force-Torque sensor","text":"Force-Torque sensors can be added to every joint of the robot:
C++Python // Add force-torque sensors to the robot\nct = 0;\nstd::vector<std::shared_ptr<robot_dart::sensor::ForceTorque>> f_tq_sensors(robot->num_dofs());\nfor (const auto& joint : robot->dof_names())\nf_tq_sensors[ct++] = simu.add_sensor<robot_dart::sensor::ForceTorque>(robot, joint, 1000, \"parent_to_child\");\n
# Add force-torque sensors to the robot\nf_tq_sensors = np.empty(robot.num_dofs(), dtype=rd.sensor.ForceTorque)\nct = 0\nfor joint in robot.dof_names():\nsimu.add_sensor(rd.sensor.ForceTorque(\nrobot, joint, 1000, \"parent_to_child\"))\nf_tq_sensors[ct] = simu.sensors()[-1]\nprint(f_tq_sensors)\nct += 1\n
Torque sensors measure the force \\(\\boldsymbol{F} \\in \\rm I\\!R^3\\), the torque \\(\\boldsymbol{\\tau} \\in \\rm I\\!R^3\\) and the wrench \\(\\boldsymbol{\\mathcal{F}} =\\begin{bmatrix} \\tau, F\\end{bmatrix}\\in \\rm I\\!R^6\\) of the attached joint:
C++Python // matrix that contains the torque measurement for every joint (3d vector)\nEigen::MatrixXd ft_torques_measure(robot->num_dofs(), 3);\n// matrix that contains the force measurement for every joint (3d vector)\nEigen::MatrixXd ft_forces_measure(robot->num_dofs(), 3);\n// matrix that contains the wrench measurement for every joint (6d vector)[torque, force]\nEigen::MatrixXd ft_wrench_measure(robot->num_dofs(), 6);\nct = 0;\nfor (const auto& f_tq_sens : f_tq_sensors) {\nft_torques_measure.block<1, 3>(ct, 0) = f_tq_sens->torque();\nft_forces_measure.block<1, 3>(ct, 0) = f_tq_sens->force();\nft_wrench_measure.block<1, 6>(ct, 0) = f_tq_sens->wrench();\nct++;\n}\n
# matrix that contains the torque measurement for every joint (3d vector)\nft_torques_measure = np.empty([robot.num_dofs(), 3])\n# matrix that contains the force measurement for every joint (3d vector)\nft_forces_measure = np.empty([robot.num_dofs(), 3])\n# matrix that contains the wrench measurement for every joint (6d vector)[torque, force]\nft_wrench_measure = np.empty([robot.num_dofs(), 6])\nct = 0\nfor f_tq_sens in f_tq_sensors:\nft_torques_measure[ct, :] = f_tq_sens.torque()\nft_forces_measure[ct, :] = f_tq_sens.force()\nft_wrench_measure[ct, :] = f_tq_sens.wrench()\nct += 1\n
"},{"location":"faq/#imu-sensor","title":"IMU sensor","text":"IMU sensors can be added to every link of the robot:
C++Python // Add IMU sensors to the robot\nct = 0;\nstd::vector<std::shared_ptr<robot_dart::sensor::IMU>> imu_sensors(robot->num_bodies());\nfor (const auto& body_node : robot->body_names()) {\n// _imu(std::make_shared<sensor::IMU>(sensor::IMUConfig(body_node(\"head\"), frequency))),\nimu_sensors[ct++] = simu.add_sensor<robot_dart::sensor::IMU>(robot_dart::sensor::IMUConfig(robot->body_node(body_node), 1000));\n}\n
# Add IMU sensors to the robot\nct = 0\nimu_sensors = np.empty(robot.num_bodies(), dtype=rd.sensor.IMU)\nfor body_node in robot.body_names():\nsimu.add_sensor(rd.sensor.IMU(\nrd.sensor.IMUConfig(robot.body_node(body_node), 1000)))\nimu_sensors[ct] = simu.sensors()[-1]\nct += 1\n
IMU sensors measure the angular position vector \\(\\boldsymbol{\\theta} \\in \\rm I\\!R^3\\), the angular velocity \\(\\boldsymbol{\\omega} \\in \\rm I\\!R^3\\) and the linear acceleration \\(\\boldsymbol{\\alpha} \\in \\rm I\\!R^3\\) of the attached link:
C++Python Eigen::MatrixXd imu_angular_positions_measure(robot->num_bodies(), 3);\nEigen::MatrixXd imu_angular_velocities_measure(robot->num_bodies(), 3);\nEigen::MatrixXd imu_linear_acceleration_measure(robot->num_bodies(), 3);\nct = 0;\nfor (const auto& imu_sens : imu_sensors) {\nimu_angular_positions_measure.block<1, 3>(ct, 0) = imu_sens->angular_position_vec();\nimu_angular_velocities_measure.block<1, 3>(ct, 0) = imu_sens->angular_velocity();\nimu_linear_acceleration_measure.block<1, 3>(ct, 0) = imu_sens->linear_acceleration();\nct++;\n}\n
imu_angular_positions_measure = np.empty([robot.num_bodies(), 3])\nimu_angular_velocities_measure = np.empty([robot.num_bodies(), 3])\nimu_linear_acceleration_measure = np.empty([robot.num_bodies(), 3])\nct = 0\nfor imu_sens in imu_sensors:\nimu_angular_positions_measure[ct,:] = imu_sens.angular_position_vec()\nimu_angular_velocities_measure[ct, :] = imu_sens.angular_velocity()\nimu_linear_acceleration_measure[ct,:] = imu_sens.linear_acceleration()\nct += 1\n
"},{"location":"faq/#rgb-sensor","title":"RGB sensor","text":"Any camera can be used as an RGB sensor:
C++Python // a nested std::vector (w*h*3) of the last image taken can be retrieved\nauto rgb_image = camera->image();\n
# a nested array (w*h*3) of the last image taken can be retrieved\nrgb_image = camera.image()\n
We can easily save the image and/or transform it to grayscale:
C++Python // a nested std::vector (w*h*3) of the last image taken can be retrieved\nauto rgb_image = camera->image();\n
# a nested array (w*h*3) of the last image taken can be retrieved\nrgb_image = camera.image()\n
"},{"location":"faq/#rgb_d-sensor","title":"RGB_D sensor","text":"Any camera can also be configured to also record depth:
C++Python camera->camera().record(true, true); // cameras are recording color images by default, enable depth images as well for this example\n
# cameras are recording color images by default, enable depth images as well for this example\ncamera.camera().record(True, True)\n
We can then read the RGB and depth images:
C++Python // get the depth image from a camera\n// with a version for visualization or bigger differences in the output\nauto rgb_d_image = camera->depth_image();\n// and the raw values that can be used along with the camera parameters to transform the image to point-cloud\nauto rgb_d_image_raw = camera->raw_depth_image();\n
# get the depth image from a camera\n# with a version for visualization or bigger differences in the output\nrgb_d_image = camera.depth_image()\n# and the raw values that can be used along with the camera parameters to transform the image to point-cloud\nrgb_d_image_raw = camera.raw_depth_image()\n
We can save the depth images as well:
C++Python robot_dart::gui::save_png_image(\"camera-depth.png\", rgb_d_image);\nrobot_dart::gui::save_png_image(\"camera-depth-raw.png\", rgb_d_image_raw);\n
rd.gui.save_png_image(\"camera-depth.png\", rgb_d_image)\nrd.gui.save_png_image(\"camera-depth-raw.png\", rgb_d_image_raw)\n
"},{"location":"faq/#how-can-i-spawn-multiple-robots-in-parallel","title":"How can I spawn multiple robots in parallel?","text":""},{"location":"faq/#robot-pool-only-in-c","title":"Robot Pool (only in C++)","text":"The best way to do so is to create a Robot pool. With a robot pool you:
- Minimize the overhead of loading robots (it happens only once!) or cloning robots (it never happens)
- Make sure that your robots are \"clean\" once released from each thread
- Focus on the important stuff rather than handling robots and threads
Let's see a more practical example:
- First we need to include the proper header:
C++ #include <robot_dart/robot_pool.hpp>\n
- Then we create a
creator
function and the pool object:
C++ namespace pool {\n// This function should load one robot: here we load Talos\ninline std::shared_ptr<robot_dart::Robot> robot_creator()\n{\nreturn std::make_shared<robot_dart::robots::Talos>();\n}\n// To create the object we need to pass the robot_creator function and the number of maximum parallel threads\nrobot_dart::RobotPool robot_pool(robot_creator, NUM_THREADS);\n} // namespace pool\n
The creator
function is the function responsible for loading your robot. This should basically look like a standalone code to load or create a robot.
- Next, we create a few threads that utilize the robots (in your code you might be using OpenMP or TBB):
C++ // for the example, we run NUM_THREADS threads of eval_robot()\nstd::vector<std::thread> threads(NUM_THREADS * 2); // *2 to see some reuse\nfor (size_t i = 0; i < threads.size(); ++i)\nthreads[i] = std::thread(eval_robot, i); // eval_robot is the function that uses the robot\n
- An example evaluation function:
C++ inline void eval_robot(int i)\n{\n// We get one available robot\nauto robot = pool::robot_pool.get_robot();\nstd::cout << \"Robot \" << i << \" got [\" << robot->skeleton() << \"]\" << std::endl;\n/// --- some robot_dart code ---\nsimulate_robot(robot);\n// --- do something with the result\nstd::cout << \"End of simulation \" << i << std::endl;\n// CRITICAL : free your robot !\npool::robot_pool.free_robot(robot);\nstd::cout << \"Robot \" << i << \" freed!\" << std::endl;\n}\n
"},{"location":"faq/#python","title":"Python","text":"We have not implemented this feature in Python
yet. One can emulate the RobotPool
behavior or create a custom parallel robot loader.
"},{"location":"faq/#i-need-to-simulate-many-worlds-with-camera-sensors-in-parallel-how-can-i-do-this","title":"I need to simulate many worlds with camera sensors in parallel. How can I do this?","text":"Below you can find an example showcasing the use of many worlds with camera sensors in parallel.
C++Python // Load robot from URDF\nauto global_robot = std::make_shared<robot_dart::robots::Iiwa>();\nstd::vector<std::thread> workers;\n// Set maximum number of parallel GL contexts (this is GPU-dependent)\nrobot_dart::gui::magnum::GlobalData::instance()->set_max_contexts(4);\n// We want 15 parallel simulations with different GL context each\nsize_t N_workers = 15;\nstd::mutex mutex;\nsize_t id = 0;\n// Launch the workers\nfor (size_t i = 0; i < N_workers; i++) {\nworkers.push_back(std::thread([&] {\nmutex.lock();\nsize_t index = id++;\nmutex.unlock();\n// Get the GL context -- this is a blocking call\n// will wait until one GL context is available\n// get_gl_context(gl_context); // this call will not sleep between failed queries\nget_gl_context_with_sleep(gl_context, 20); // this call will sleep 20ms between each failed query\n// Do the simulation\nauto robot = global_robot->clone();\nrobot_dart::RobotDARTSimu simu(0.001);\nEigen::VectorXd ctrl = robot_dart::make_vector({0., M_PI / 3., 0., -M_PI / 4., 0., 0., 0.});\nauto controller = std::make_shared<robot_dart::control::PDControl>(ctrl);\nrobot->add_controller(controller);\ncontroller->set_pd(300., 50.);\n// Magnum graphics\nrobot_dart::gui::magnum::GraphicsConfiguration configuration = robot_dart::gui::magnum::WindowlessGraphics::default_configuration();\nconfiguration.width = 1024;\nconfiguration.height = 768;\nauto graphics = std::make_shared<robot_dart::gui::magnum::WindowlessGraphics>(configuration);\nsimu.set_graphics(graphics);\n// Position the camera differently for each thread to visualize the difference\ngraphics->look_at({0.4 * index, 3.5 - index * 0.1, 2.}, {0., 0., 0.25});\n// record images from main camera/graphics\n// graphics->set_recording(true); // WindowlessGLApplication records images by default\nsimu.add_robot(robot);\nsimu.run(6);\n// Save the image for verification\nrobot_dart::gui::save_png_image(\"camera_\" + std::to_string(index) + \".png\",\ngraphics->image());\n// Release the GL context for another thread to use\nrelease_gl_context(gl_context);\n}));\n}\n// Wait for all the workers\nfor (size_t i = 0; i < workers.size(); i++) {\nworkers[i].join();\n}\n
robot = rd.Robot(\"arm.urdf\", \"arm\", False)\nrobot.fix_to_world()\ndef test():\npid = os.getpid()\nii = pid%15\n# create the controller\npdcontrol = rd.PDControl([0.0, 1.0, -1.5, 1.0], False)\n# clone the robot\ngrobot = robot.clone()\n# add the controller to the robot\ngrobot.add_controller(pdcontrol, 1.)\npdcontrol.set_pd(200., 20.)\n# create the simulation object\nsimu = rd.RobotDARTSimu(0.001)\n# set the graphics\ngraphics = rd.gui.WindowlessGraphics()\nsimu.set_graphics(graphics)\ngraphics.look_at([0.4 * ii, 3.5 - ii * 0.1, 2.], [0., 0., 0.25], [0., 0., 1.])\n# add the robot and the floor\nsimu.add_robot(grobot)\nsimu.add_checkerboard_floor()\n# run\nsimu.run(20.)\n# save last frame for visualization purposes\nimg = graphics.image()\nrd.gui.save_png_image('camera-' + str(ii) + '.png', img)\n# helper function to run in parallel\ndef runInParallel(N):\nproc = []\nfor i in range(N):\n# rd.gui.run_with_gl_context accepts 2 parameters:\n# (func, wait_time_in_ms)\n# the func needs to be of the following format: void(), aka no return, no arguments\np = Process(target=rd.gui.run_with_gl_context, args=(test, 20))\np.start()\nproc.append(p)\nfor p in proc:\np.join()\nprint('Running parallel evaluations')\nN = 15\nstart = timer()\nrunInParallel(N)\nend = timer()\nprint('Time:', end-start)\n
In C++ you are also able to pre-allocate a custom number of OpenGL contexts so that you can take advantage of stronger GPUs.
"},{"location":"faq/#i-do-not-know-how-to-use-waf-how-can-i-detect-robotdart-from-cmake","title":"I do not know how to use waf. How can I detect RobotDART from CMake?","text":"You need to use waf
to build RobotDART, but when installing the library a CMake module is installed. Thus it is possible use RobotDART in your code using CMake. You can find a complete example at cmake/example. In short the CMake would look like this:
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)\nproject(robot_dart_example)\n# we ask for Magnum because we want to build the graphics\nfind_package(RobotDART REQUIRED OPTIONAL_COMPONENTS Magnum)\nadd_executable(robot_dart_example example.cpp)\ntarget_link_libraries(robot_dart_example\nRobotDART::Simu\n)\nif(RobotDART_Magnum_FOUND)\nadd_executable(robot_dart_example_graphics example.cpp)\ntarget_link_libraries(robot_dart_example_graphics\nRobotDART::Simu\nRobotDART::Magnum\n)\nendif()\n
"},{"location":"faq/#where-can-i-find-complete-working-examples-for-either-c-or-python","title":"Where can I find complete working examples for either c++ or python?","text":"C++ examples
Python examples
"},{"location":"install/","title":"Manual Installation","text":""},{"location":"install/#manual-installation-of-robotdart","title":"Manual Installation of RobotDART","text":"For the quick installation manual, see the quick installation page.
"},{"location":"install/#dependencies","title":"Dependencies","text":""},{"location":"install/#required","title":"Required","text":" - Ubuntu (it should work on versions >= 14.04) or OSX
- Eigen3
- DART, http://dartsim.github.io/
"},{"location":"install/#optional","title":"Optional","text":" - Boost (for unit tests)
- Magnum (for graphics), https://github.com/mosra/magnum
"},{"location":"install/#installation-of-the-dependencies","title":"Installation of the dependencies","text":"Note: The following instructions are high-level and assume people with some experience in building/installing software.
"},{"location":"install/#installing-system-wide-packages","title":"Installing system-wide packages","text":"For Ubuntu-based distributions (>=20.04) we should use the following commands:
sudo apt-get update\nsudo apt-get install build-essential cmake pkg-config git libboost-regex-dev libboost-system-dev libboost-test-dev pybind11-dev\nsudo apt-get install libdart-all-dev\n
For OSX with brew:
brew install dartsim\n
"},{"location":"install/#installing-magnum","title":"Installing Magnum","text":"Magnum depends on Corrade and we are going to use a few plugins and extras from the library. We are also going to use Glfw and Glx for the back-end. Follow the instrutions below:
#installation of Glfw and OpenAL\n# Ubuntu\nsudo apt-get install libglfw3-dev libglfw3 libassimp-dev libopenal-dev libglfw3-dev libsdl2-dev libopenexr-dev libdevil-dev libpng-dev libfaad-dev libfreetype6-dev\n# Mac OSX\nbrew install glfw3 openal-soft assimp\n\n# installation of Corrade\ncd /path/to/tmp/folder\ngit clone https://github.com/mosra/corrade.git\ncd corrade\nmkdir build && cd build\ncmake -DCMAKE_BUILD_TYPE=Release ..\nmake -j\nsudo make install\n\n# installation of Magnum\ncd /path/to/tmp/folder\ngit clone https://github.com/mosra/magnum.git\ncd magnum\nmkdir build && cd build\n# Ubuntu\ncmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSEGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON -DTARGET_EGL=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)\n# Mac OSX\ncmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSCGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)\nmake -j\nsudo make install\n\n# installation of Magnum Plugins\ncd /path/to/tmp/folder\ngit clone https://github.com/mosra/magnum-plugins.git\ncd magnum-plugins\nmkdir build && cd build\ncmake -DCMAKE_BUILD_TYPE=Release -DWITH_ASSIMPIMPORTER=ON -DWITH_DDSIMPORTER=ON -DWITH_JPEGIMPORTER=ON -DWITH_OPENGEXIMPORTER=ON -DWITH_PNGIMPORTER=ON -DWITH_TINYGLTFIMPORTER=ON -DWITH_STBTRUETYPEFONT=ON .. # this will enable quite a few Magnum Plugins that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)\nmake -j\nsudo make install\n\n# installation of Magnum DART Integration (DART needs to be installed) and Eigen Integration\ncd /path/to/tmp/folder\ngit clone https://github.com/mosra/magnum-integration.git\ncd magnum-integration\nmkdir build && cd build\ncmake -DCMAKE_BUILD_TYPE=Release -DWITH_DART=ON -DWITH_EIGEN=ON ..\nmake -j\nsudo make install\n
"},{"location":"install/#compilation-and-running-the-examples","title":"Compilation and running the examples","text":"The compilation of the library is straight-forward:
- retrieve the code, for instance with
git clone https://github.com/resibots/robot_dart.git
cd /path/to/repo/root
./waf configure
./waf
To build the examples, execute this: ./waf examples
Now you can run the examples. For example, to run the arm example you need to type the following: ./build/arm
(or ./build/arm_plain
to run it without graphics).
"},{"location":"install/#installing-the-library","title":"Installing the library","text":"To install the library (assuming that you have already compiled it), you need only to run:
./waf install
By default the library will be installed in /usr/local/lib
(for this maybe sudo ./waf install
might be needed) and a static library will be generated. You can change the defaults as follows:
./waf configure --prefix=/path/to/install/dir --shared
./waf install
In short, with --prefix
you can change the directory where the library will be installed and if --shared
is present a shared library will be created instead of a static one.
"},{"location":"install/#compiling-the-python-bindings","title":"Compiling the python bindings","text":"For the python bindings of robot_dart, we need numpy
to be installed, pybind11
and the python bindings of DART (dartpy).
For numpy
one can install it with pip
or standard packages. dartpy
should be installed via the packages above. If not, please see the installation instructions on the main DART website.
Then the compilation of robot_dart is almost identical as before:
- retrieve the code, for instance with
git clone https://github.com/resibots/robot_dart.git
cd /path/to/repo/root
./waf configure --python
(--python
enables the python bindings) ./waf
- Install the library (including the python bindings) as before (no change is needed)
- Depending on your installation directory you might need to update your
PYTHONPATH
, e.g. export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.10/site-packages/
To run the python examples (for the python examples you need to have enabled the graphics, that is, install Magnum library), run:
cd /path/to/repo/root
python src/python/example.py
or python src/python/example_parallel.py
"},{"location":"install/#common-issues-with-python-bindings","title":"Common Issues with Python bindings","text":"One of the most common issue with the python bindings is the fact that DART bindings might be compiled and installed for python 3 and the robot_dart ones for python 2. To force the usage of python 3 for robot_dart, you use python3 ./waf
instead of just ./waf
in all the commands above.
"},{"location":"quick_install/","title":"Installation","text":""},{"location":"quick_install/#scripts-for-quick-installation-of-robotdart","title":"Scripts for Quick Installation of RobotDART","text":"In this page we provide standalone scripts for installing RobotDART for Ubuntu
(>=20.04) and OSX
. The scripts will install all the required dependencies and RobotDART. Notably, all dependencies that need to be compiled by source and RobotDART will be installed inside the /opt
folder. This way, one can be rest assured that their system will be clean.
"},{"location":"quick_install/#ubuntu-2004","title":"Ubuntu >=20.04","text":"To quickly install RobotDART on Ubuntu >=20.04
, we just need to run ./scripts/install_ubuntu.sh
from the root of the repo. In more detail:
git clone https://github.com/resibots/robot_dart.git
cd robot_dart
./scripts/install_ubuntu.sh
This will install everything needed! Once the script is successfully executed, one should add the following to their ~/.bashrc
or ~/.zshrc
file (you may need to swap the python version to yours1):
export PATH=/opt/magnum/bin:$PATH\nexport LD_LIBRARY_PATH=/opt/magnum/lib:/opt/robot_dart/lib:$LD_LIBRARY_PATH\nexport PYTHONPATH=/opt/robot_dart/lib/python3.10/site-packages:$PYTHONPATH\n
"},{"location":"quick_install/#osx","title":"OSX","text":"Coming soon
-
You can run python --version
to see your version. We only keep the major.minor (ignoring the patch version)\u00a0\u21a9
"},{"location":"robots/","title":"Supported robots","text":""},{"location":"robots/#supported-robots","title":"Supported robots","text":"Every robot is a defined as a URDF, which will be installed $PREFIX/share/utheque
. All robots have pre-defined \"robot classes\" that define sensors and other properties; for your custom/new robots, you will have to add the sensors/properties via the generic robot class (or create a new robot class).
The URDF files are loaded using the following rules (see utheque::path()
):
- First check in the current directory
- If not found, check in
current_directory/utheque
- If not found, check in
$ROBOT_DART_PATH/utheque
- If not found, check in the robot dart installation path/robots (e.g.,
/usr/share/utheque
or $HOME/share/utheque
) - Otherwise, report failure
utheque
is a separate header-only library that gets installed together with RobotDART (or even alone), that can be used in libraries that do not want to interfere with RobotDART and use the curated URDF files.
"},{"location":"robots/#talos-pal-robotics","title":"Talos (PAL Robotics)","text":"Talos is a humanoid robot made by PAL Robotics.
- Datasheet: [pdf]
- 32 degrees of freedom (6 for each leg, 7 for each arm, 2 for the waist, 2 for the neck, 1 for each gripper)
- 175 cm / 95 kg
- IMU in the torso
- Torque sensors in all joints except head, wrist and gripper (22 torque sensors total)
- 1 force/torque sensor in each ankle
- 1 force/torque sensor in each wrist
We have two URDF files:
robots/talos/talos.urdf
: - accurate (simplified but made of polygons) collision meshes
- mimic joints for the gripper
- Not compatible the DART collision detector (you need to use FCL collision detector - shipped with DART)
- URDF: [talos.urdf]
- Example: [talos.cpp]
Load Talos
C++ auto robot = std::make_shared<robot_dart::robots::Talos>();\n
Python robot = rd.Talos()\n
robot/talos/talos_fast.urdf
: - no collision except for the feet, which are approximated by boxes
- grippers are fixed (no movement is allowed)
- compatible with the DART collision detector
- URDF: [talos_fast.urdf]
- Example: [talos_fast.cpp]
talos_fast.urdf
is faster because it makes it possible to use the DART collision detector (and has much collision shapes). You should prefer it except if you want to use the grippers (e.g., for manipulation) or are working on self-collisions.
Load Talos Fast
C++ // load talos fast\nauto robot = std::make_shared<robot_dart::robots::TalosFastCollision>();\n// Set actuator types to VELOCITY (for speed)\nrobot->set_actuator_types(\"velocity\");\ndouble dt = 0.001;\nrobot_dart::RobotDARTSimu simu(dt);\n// we can use the DART (fast) collision detector\nsimu.set_collision_detector(\"dart\");\n
Python robot = rd.TalosFastCollision()\n
Please note that the mesh files (.glb) require assimp 5.x (and not assimp4.x usually shipped with ROS). If you cannot load the URDF, please check your assimp version.
"},{"location":"robots/#panda-franka-emika","title":"Panda (Franka Emika)","text":"The Franka is a modern manipulator made by Franka Emika Panda. It is commonly found in many robotics labs.
- Datasheet: [pdf]
- 7 degrees of freedom
- Can be controlled in torque
- 18 kg
- workspace: 855 mm (horizontal), 1190 mm (vertical)
- URDF: [franka.urdf]
- Example: [franka.cpp] The URDF includes the gripper.
Load Franka
C++ auto robot = std::make_shared<robot_dart::robots::Franka>();\n
Python robot = rd.Franka()\n
"},{"location":"robots/#lbr-iiwa-kuka","title":"LBR iiwa (KUKA)","text":"The LBR iiwa is manufactured by KUKA. It is similar to the Panda and is also very common in robotics labs.
- Datasheet: [pdf]
- We implement the 14 kg version
- 29.5 kg
- 7 degrees of freedom
- URDF: [iiwa.urdf]
- Example: [iiwa.cpp]
Load Iiwa
C++ auto robot = std::make_shared<robot_dart::robots::Iiwa>();\n
Python robot = rd.Iiwa()\n
"},{"location":"robots/#icub-iit","title":"iCub (IIT)","text":"The iCub is a open source humanoid robot made by the Instituto Italiano di Tecnologia. There are currently 42 iCUbs in the world, and many versions.
- Datasheet (rev 2.3) [pdf]
- 6 force/torque sensors (upper arms, upper legs, ankles)
- IMU in the head
- We do to simulate the skin
- We do not simulate the hands
- Our model is close to the Inria's iCub, but it has not been checked in detail.
- URDF: [icub.urdf]
- Example [icub.cpp]
Please note that the mesh files (.glb) require assimp 5.x (and not assimp4.x usually shipped with ROS). If you cannot load the URDF, please check your assimp version.
Load iCub
C++ auto robot = std::make_shared<robot_dart::robots::ICub>();\n// Set actuator types to VELOCITY motors so that they stay in position without any controller\nrobot->set_actuator_types(\"velocity\");\nrobot_dart::RobotDARTSimu simu(0.001);\nsimu.set_collision_detector(\"fcl\");\n
Python robot = rd.ICub()\n# Set actuator types to VELOCITY motors so that they stay in position without any controller\nrobot.set_actuator_types(\"velocity\")\nsimu = rd.RobotDARTSimu(0.001)\nsimu.set_collision_detector(\"fcl\")\n
Print IMU sensor measurements
C++ std::cout << \"Angular Position: \" << robot->imu().angular_position_vec().transpose().format(fmt) << std::endl;\nstd::cout << \"Angular Velocity: \" << robot->imu().angular_velocity().transpose().format(fmt) << std::endl;\nstd::cout << \"Linear Acceleration: \" << robot->imu().linear_acceleration().transpose().format(fmt) << std::endl;\nstd::cout << \"=================================\" << std::endl;\n
Python print(\"Angular Position: \", robot.imu().angular_position_vec().transpose())\nprint(\"Angular Velocity: \", robot.imu().angular_velocity().transpose())\nprint(\"Linear Acceleration: \", robot.imu().linear_acceleration().transpose())\nprint(\"=================================\" )\n
Print Force-Torque sensor measurements
C++ std::cout << \"FT ( force): \" << robot->ft_foot_left().force().transpose().format(fmt) << std::endl;\nstd::cout << \"FT (torque): \" << robot->ft_foot_left().torque().transpose().format(fmt) << std::endl;\nstd::cout << \"=================================\" << std::endl;\n
Python print(\"FT ( force): \", robot.ft_foot_left().force().transpose())\nprint(\"FT (torque): \", robot.ft_foot_left().torque().transpose())\nprint(\"=================================\")\n
"},{"location":"robots/#unitree-a1","title":"Unitree A1","text":"A1 is a quadruped robot made by the Unitree Robotics.
- IMU in the torso
- We do not simulate the foot pressure sensors (yet)
- One can easily add a depth camera on the head
- URDF: [a1.urdf]
- Example [a1.cpp]
Load A1
C++ auto robot = std::make_shared<robot_dart::robots::A1>();\n
Python robot = rd.A1()\n
Print IMU sensor measurements
C++ std::cout << \"Angular Position: \" << robot->imu().angular_position_vec().transpose().format(fmt) << std::endl;\nstd::cout << \"Angular Velocity: \" << robot->imu().angular_velocity().transpose().format(fmt) << std::endl;\nstd::cout << \"Linear Acceleration: \" << robot->imu().linear_acceleration().transpose().format(fmt) << std::endl;\nstd::cout << \"=================================\" << std::endl;\n
Python print( \"Angular Position: \", robot.imu().angular_position_vec().transpose())\nprint( \"Angular Velocity: \", robot.imu().angular_velocity().transpose())\nprint( \"Linear Acceleration: \", robot.imu().linear_acceleration().transpose())\nprint( \"=================================\")\n
Add a depth camera on the head
How can I attach a camera to a moving link?
Please note that the mesh files (.glb) require assimp 5.x (and not assimp4.x usually shipped with ROS). If you cannot load the URDF, please check your assimp version.
"},{"location":"robots/#dynamixel-based-hexapod-robot-inria-and-others","title":"Dynamixel-based hexapod robot (Inria and others)","text":"This hexapod is a simple 6-legged robot based on dynamixel actuators. It is similar to the robot used in the paper `Robots that can adapt like animals' (Cully et al., 2015).
- 6 legs, 3 degrees of freedom for each leg (18 degrees of freedom)
- simple URDF (no meshes)
- URDF: [pexod.urdf]
- Example: [hexapod.cpp]
Load Hexapod
C++ auto robot = std::make_shared<robot_dart::robots::Hexapod>();\n
Python robot = rd.Hexapod()\n
Load Pexod
C++ auto robot = std::make_shared<robot_dart::Robot>(\"pexod.urdf\");\n
Python robot = rd.Robot(\"pexod.urdf\");\n
"},{"location":"robots/#simple-arm","title":"Simple arm","text":" - A simple arm for educational or debugging purposes
- 5 degrees of freedom
- simple URDF (no meshes)
- URDF: [arm.urdf]
- Example: [arm.cpp]
Load Simple Arm
C++ auto robot = std::make_shared<robot_dart::robots::Arm>();\n
Python robot = rd.Arm()\n
"},{"location":"robots/#loading-custom-robots","title":"Loading Custom Robots","text":"RobotDART gives you the ability to load custom robots that are defined in URDF, SDF, SKEL or MJCF files. For example, you can load a urdf model using:
Load custom Robot
C++ auto your_robot = std::make_shared<robot_dart::Robot>(\"path/to/model.urdf\");\n
Python your_robot = robot_dart.Robot(\"path/to/model.urdf\")\n
Load custom Robot with packages (e.g STL, DAE meshes)
C++ std::vector<std::pair<std::string, std::string>> your_model_packages = ('model', 'path/to/model/dir');\nauto your_robot = std::make_shared<robot_dart::Robot>(\"path/to/model.urdf\", your_model_packages, \"packages\");\n
Python your_model_packages = [(\"model\", \"path/to/model/dir\")]\nyour_robot = robot_dart.Robot(\"path/to/model.urdf\", your_model_packages)\n
"}]}
\ No newline at end of file
diff --git a/docs/sitemap.xml.gz b/docs/sitemap.xml.gz
index 158d71d2..832bb56e 100644
Binary files a/docs/sitemap.xml.gz and b/docs/sitemap.xml.gz differ
diff --git a/scripts/install_osx.sh b/scripts/install_osx.sh
index 564d7e6e..d9003f92 100755
--- a/scripts/install_osx.sh
+++ b/scripts/install_osx.sh
@@ -1,18 +1,6 @@
set -x
-# sudo apt-add-repository -y ppa:dartsim/ppa
-# sudo apt update
-# sudo apt install -y build-essential cmake pkg-config git
-# sudo apt install -y libeigen3-dev libccd-dev libfcl-dev libboost-regex-dev libboost-system-dev libbullet-dev libode-dev liboctomap-dev
-# sudo apt install -y libtinyxml-dev libtinyxml2-dev
-# sudo apt install -y liburdfdom-dev liburdfdom-headers-dev python3-pip python3-numpy
-# sudo apt install -y libxi-dev libxmu-dev freeglut3-dev libopenscenegraph-dev
-# sudo apt install -y libassimp-dev pybind11-dev
-# sudo apt install -y libopenal-dev libglfw3-dev libsdl2-dev libopenexr-dev
-# sudo apt install -y libdevil-dev libpng-dev libfaad-dev libfreetype6-dev libglm-dev
-# sudo apt install -y python-is-python3
-
-brew install cmake eigen fcl tinyxml tinyxml2 urdfdom assimp boost numpy
+brew install cmake eigen fcl tinyxml tinyxml2 urdfdom assimp boost numpy fmt
python -m pip install numpy pytest
# Remove previous attempts
@@ -69,7 +57,7 @@ cd ../..
git clone --depth 1 https://github.com/mosra/magnum-integration.git
cd magnum-integration
mkdir -p build && cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/magnum -DMAGNUM_WITH_DART=ON -DMAGNUM_WITH_EIGEN=ON -DMAGNUM_WITH_BULLET=ON -DMAGNUM_WITH_GLM=ON ..
+cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/magnum -DMAGNUM_WITH_DART=ON -DMAGNUM_WITH_EIGEN=ON -DMAGNUM_WITH_BULLET=ON -DMAGNUM_WITH_GLM=OFF ..
make -j
sudo make install
cd ../..
@@ -78,16 +66,16 @@ pwd
export PATH=/opt/magnum/bin:$PATH
export LD_LIBRARY_PATH=/opt/magnum/lib:$LD_LIBRARY_PATH
-# sudo ln -s /usr/bin/python3 /usr/bin/python
pwd
cd ../
pwd
rm -rf temp_robot_dart
cd ..
pwd
+
# RobotDART
./waf configure --prefix /opt/robot_dart --python --dart /opt/dart --corrade_install_dir /opt/magnum --magnum_install_dir /opt/magnum --magnum_plugins_install_dir /opt/magnum --magnum_integration_install_dir /opt/magnum
./waf -j8
./waf examples -j8
-#sudo ./waf install
+sudo ./waf install
diff --git a/scripts/install_ubuntu.sh b/scripts/install_ubuntu.sh
index 56e8482c..6706a1c8 100755
--- a/scripts/install_ubuntu.sh
+++ b/scripts/install_ubuntu.sh
@@ -1,38 +1,42 @@
+CLEAN=0
+
+CLEAN=${4:-$CLEAN}
+
sudo apt-add-repository -y ppa:dartsim/ppa
sudo apt update
sudo apt install -y build-essential cmake pkg-config git
-sudo apt install -y libeigen3-dev libccd-dev libfcl-dev libboost-regex-dev libboost-system-dev libbullet-dev libode-dev liboctomap-dev
-sudo apt install -y libtinyxml-dev libtinyxml2-dev
-sudo apt install -y liburdfdom-dev liburdfdom-headers-dev python3-pip python3-numpy
+sudo apt install -y libboost-regex-dev libboost-system-dev libboost-test-dev
+sudo apt install -y libdart-all-dev
sudo apt install -y libxi-dev libxmu-dev freeglut3-dev libopenscenegraph-dev
sudo apt install -y libassimp-dev pybind11-dev
sudo apt install -y libopenal-dev libglfw3-dev libsdl2-dev libopenexr-dev
sudo apt install -y libdevil-dev libpng-dev libfaad-dev libfreetype6-dev libglm-dev
-sudo apt install -y python-is-python3
+sudo apt install -y python3-pip python3-numpy python-is-python3
-# Remove previous attempts
-rm -rf /opt/magnum
-rm -rf /opt/dart
-rm -rf /opt/robot_dart
+if [ $CLEAN -ne 0 ]; then
+ echo "-- Cleaning.."
+ # Remove previous attempts
+ sudo rm -rf /opt/magnum
+ # rm -rf /opt/dart
+ sudo rm -rf /opt/robot_dart
+fi
# Make temp folder
mkdir -p temp_robot_dart
cd temp_robot_dart
-# DART
-git clone https://github.com/dartsim/dart.git
-cd dart && git checkout v6.12.1
-mkdir build && cd build
-cmake -DDART_BUILD_DARTPY=ON -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/dart ..
-make -j8
-sudo make install
-
-export LD_LIBRARY_PATH=/opt/dart/lib:$LD_LIBRARY_PATH
-export PYTHONPATH=/opt/dart/lib/python3/dist-packages:$PYTHONPATH
+if [ $CLEAN -ne 0 ]; then
+ rm -rf corrade
+ rm -rf magnum
+ rm -rf magnum-plugins
+ rm -rf magnum-integration
+fi
# Magnum related
-cd ../..
+if [ ! -d "corrade" ]
+then
git clone https://github.com/mosra/corrade.git
+fi
cd corrade
mkdir build && cd build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/magnum ..
@@ -40,22 +44,32 @@ make -j8
sudo make install
cd ../..
+if [ ! -d "magnum" ]
+then
git clone https://github.com/mosra/magnum.git
+fi
cd magnum
mkdir build && cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/magnum -DMAGNUM_WITH_AUDIO=ON -DMAGNUM_WITH_DEBUGTOOLS=ON -DMAGNUM_WITH_GL=ON -DMAGNUM_WITH_MESHTOOLS=ON -DMAGNUM_WITH_PRIMITIVES=ON -DMAGNUM_WITH_SCENEGRAPH=ON -DMAGNUM_WITH_SHADERS=ON -DMAGNUM_WITH_TEXT=ON -DMAGNUM_WITH_TEXTURETOOLS=ON -DMAGNUM_WITH_TRADE=ON -DMAGNUM_WITH_GLFWAPPLICATION=ON -DMAGNUM_WITH_WINDOWLESSGLXAPPLICATION=ON -DMAGNUM_WITH_WINDOWLESSEGLAPPLICATION=ON -DMAGNUM_WITH_OPENGLTESTER=ON -DMAGNUM_WITH_ANYAUDIOIMPORTER=ON -DMAGNUM_WITH_ANYIMAGECONVERTER=ON -DMAGNUM_WITH_ANYIMAGEIMPORTER=ON -DMAGNUM_WITH_ANYSCENEIMPORTER=ON -DMAGNUM_WITH_MAGNUMFONT=ON -DMAGNUM_WITH_OBJIMPORTER=ON -DMAGNUM_WITH_TGAIMPORTER=ON -DMAGNUM_WITH_WAVAUDIOIMPORTER=ON -DMAGNUM_WITH_GL_INFO=ON -DMAGNUM_WITH_AL_INFO=ON ..
+cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/magnum -DMAGNUM_WITH_AUDIO=ON -DMAGNUM_WITH_DEBUGTOOLS=ON -DMAGNUM_WITH_GL=ON -DMAGNUM_WITH_MESHTOOLS=ON -DMAGNUM_WITH_PRIMITIVES=ON -DMAGNUM_WITH_SCENEGRAPH=ON -DMAGNUM_WITH_SHADERS=ON -DMAGNUM_WITH_TEXT=ON -DMAGNUM_WITH_TEXTURETOOLS=ON -DMAGNUM_WITH_TRADE=ON -DMAGNUM_WITH_GLFWAPPLICATION=ON -DMAGNUM_WITH_WINDOWLESSGLXAPPLICATION=ON -DMAGNUM_WITH_WINDOWLESSEGLAPPLICATION=ON -DMAGNUM_WITH_OPENGLTESTER=ON -DMAGNUM_WITH_ANYAUDIOIMPORTER=ON -DMAGNUM_WITH_ANYIMAGECONVERTER=ON -DMAGNUM_WITH_ANYIMAGEIMPORTER=ON -DMAGNUM_WITH_ANYSCENEIMPORTER=ON -DMAGNUM_WITH_MAGNUMFONT=ON -DMAGNUM_WITH_OBJIMPORTER=ON -DMAGNUM_WITH_TGAIMPORTER=ON -DMAGNUM_WITH_WAVAUDIOIMPORTER=ON -DMAGNUM_WITH_GL_INFO=ON -DMAGNUM_WITH_AL_INFO=ON -DMAGNUM_TARGET_EGL=ON ..
make -j8
sudo make install
cd ../..
+if [ ! -d "magnum-plugins" ]
+then
git clone https://github.com/mosra/magnum-plugins.git
+fi
cd magnum-plugins
mkdir build && cd build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/magnum -DMAGNUM_WITH_ASSIMPIMPORTER=ON -DMAGNUM_WITH_DDSIMPORTER=ON -DMAGNUM_WITH_JPEGIMPORTER=ON -DMAGNUM_WITH_OPENGEXIMPORTER=ON -DMAGNUM_WITH_PNGIMPORTER=ON -DMAGNUM_WITH_TINYGLTFIMPORTER=ON -DMAGNUM_WITH_STBTRUETYPEFONT=ON ..
make -j
sudo make install
+cd ../..
+if [ ! -d "magnum-integration" ]
+then
git clone https://github.com/mosra/magnum-integration.git
+fi
cd magnum-integration
mkdir build && cd build
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/magnum -DMAGNUM_WITH_DART=ON -DMAGNUM_WITH_EIGEN=ON -DMAGNUM_WITH_BULLET=ON -DMAGNUM_WITH_GLM=ON ..
@@ -65,14 +79,13 @@ sudo make install
export PATH=/opt/magnum/bin:$PATH
export LD_LIBRARY_PATH=/opt/magnum/lib:$LD_LIBRARY_PATH
-# sudo ln -s /usr/bin/python3 /usr/bin/python
-
cd ../../..
-rm -rf temp_robot_dart
+if [ $CLEAN -ne 0 ]; then
+ rm -rf temp_robot_dart
+fi
# RobotDART
-./waf configure --prefix /opt/robot_dart --python --dart /opt/dart --corrade_install_dir /opt/magnum --magnum_install_dir /opt/magnum --magnum_plugins_install_dir /opt/magnum --magnum_integration_install_dir /opt/magnum
+./waf configure --prefix /opt/robot_dart --python --corrade_install_dir /opt/magnum --magnum_install_dir /opt/magnum --magnum_plugins_install_dir /opt/magnum --magnum_integration_install_dir /opt/magnum
./waf -j8
./waf examples -j8
sudo ./waf install
-
diff --git a/src/docs/docs/faq.md b/src/docs/docs/faq.md
index 7db45045..195e30f9 100644
--- a/src/docs/docs/faq.md
+++ b/src/docs/docs/faq.md
@@ -70,7 +70,7 @@ See the [robots page](robots.md) for details.
To enable graphics in your code, you need to do the following:
-- Install [Magnum](http://magnum.graphics). See the [installation page](install.md) for details.
+- Install [Magnum](http://magnum.graphics). See the [installation page](quick_install.md) for details.
- Create and set a graphics object in the simulation object. Here's an example:
=== "C++"
@@ -355,7 +355,6 @@ We can save the depth images as well:
## **How can I spawn multiple robots in parallel?**
-
### Robot Pool (only in C++)
The best way to do so is to create a Robot pool. With a robot pool you:
@@ -388,12 +387,20 @@ The `creator` function is the function responsible for loading your robot. This
=== "C++"
{{ROBOT_POOL_EVAL}}
+### Python
+
+We have not implemented this feature in `Python` yet. One can emulate the `RobotPool` behavior or create a custom parallel robot loader.
+
## **I need to simulate many worlds with camera sensors in parallel. How can I do this?**
-On [magnum_contexts.cpp](https://github.com/resibots/robot_dart/blob/master/src/examples/magnum_contexts.cpp) you can find an example showcasing the use of many worlds with camera sensors in parallel. The main takeaway is that we need to pre-allocate OpenGL contexts so that each thread can take one and use it to render their worlds.
+Below you can find an example showcasing the use of many worlds with camera sensors in parallel.
=== "C++"
{{CAMERAS_PARALLEL}}
+=== "Python"
+{{CAMERAS_PARALLEL_PYTHON}}
+
+In C++ you are also able to pre-allocate a custom number of OpenGL contexts so that you can take advantage of stronger GPUs.
## **I do not know how to use waf. How can I detect RobotDART from CMake?**
diff --git a/src/docs/docs/install.md b/src/docs/docs/install.md
index ca46be67..0304de8a 100644
--- a/src/docs/docs/install.md
+++ b/src/docs/docs/install.md
@@ -8,99 +8,40 @@
display: none;
}
+
## Manual Installation of RobotDART
+For the quick installation manual, see the [quick installation page](quick_install.md).
+
### Dependencies
#### Required
- Ubuntu (it should work on versions >= 14.04) or OSX
-- Eigen3 (needed by DART)
-- Boost (needed by DART)
+- Eigen3
- DART, http://dartsim.github.io/
#### Optional
+- Boost (for unit tests)
- Magnum (for graphics), https://github.com/mosra/magnum
### Installation of the dependencies
-#### Installing Boost and Eigen3
-
-For Ubuntu-based distributions we should use the following commands to install Eigen3 and Boost:
-
-```bash
-sudo apt-get update
-sudo apt-get install libeigen3-dev libboost-filesystem-dev libboost-system-dev libboost-regex-dev
-```
-
-For OSX with brew:
-
-```bash
-brew install eigen3
-brew install boost
-```
-
-#### Installing DART
-
-In order to use RobotDART, you need to install [DART](http://dartsim.github.io/) (from source).
+**Note:** The following instructions are high-level and assume people with some experience in building/installing software.
-For **Ubuntu systems**, please follow the detailed installation instructions on the [DART documentation website](http://dartsim.github.io/install_dart_on_ubuntu.html#install-required-dependencies). Make sure that you don't forget to add the PPAs as detailed [here](http://dartsim.github.io/install_dart_on_ubuntu.html#adding-personal-package-archives-ppas-for-dart-and-dependencies). What is more, you can enable the `-DART_ENABLE_SIMD` flag in the CMake configuration. In addition, you need the following optional dependency: **DART Parsers**. Lastly, it is recommended to use either the `master` branch or `v6.12.1` tag (and not the one provided in DART's documentation). In short you should do the following:
-
-**Ubuntu <= 14.04 only**
-
-```bash
-sudo apt-add-repository ppa:libccd-debs/ppa
-sudo apt-add-repository ppa:fcl-debs/ppa
-```
+#### Installing system-wide packages
-**For all Ubuntu distributions**
+For Ubuntu-based distributions (>=20.04) we should use the following commands:
```bash
-sudo apt-add-repository ppa:dartsim/ppa
sudo apt-get update
-
-sudo apt-get install build-essential cmake pkg-config git
-sudo apt-get install libeigen3-dev libassimp-dev libccd-dev libfcl-dev libboost-regex-dev libboost-system-dev
-
-sudo apt-get install libtinyxml-dev libtinyxml2-dev
-sudo apt-get install liburdfdom-dev liburdfdom-headers-dev
-
-cd /path/to/tmp/folder
-git clone git://github.com/dartsim/dart.git
-cd dart
-git checkout v6.12.1
-
-mkdir build
-cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..
-make -j4
-sudo make install
+sudo apt-get install build-essential cmake pkg-config git libboost-regex-dev libboost-system-dev libboost-test-dev pybind11-dev
+sudo apt-get install libdart-all-dev
```
-If you want to install DART somewhere else than `/usr/local`, you should use the `-DCMAKE_INSTALL_PREFIX=/path/to/install/folder` cmake argument.
-
-For **OSX systems** with homebrew, please follow the detailed installation instructions on the [DART documentation website](http://dartsim.github.io/install_dart_on_mac.html#install-from-source-using-homebrew). You need to follow the same procedure as for Ubuntu systems. In short you should do the following:
+For OSX with brew:
```bash
-brew install eigen
-brew install assimp
-brew install libccd
-brew install dartsim/dart/fcl04
-brew install boost
-
-brew install tinyxml
-brew install tinyxml2
-brew install urdfdom
-
-cd /path/to/tmp/folder
-git clone git://github.com/dartsim/dart.git
-cd dart
-git checkout v6.12.1
-
-mkdir build
-cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..
-make -j4
-sudo make install
+brew install dartsim
```
#### Installing Magnum
@@ -110,7 +51,7 @@ Magnum depends on [Corrade](https://github.com/mosra/corrade) and we are going t
```bash
#installation of Glfw and OpenAL
# Ubuntu
-sudo apt-get install libglfw3-dev libglfw3 libopenal-dev libassimp-dev
+sudo apt-get install libglfw3-dev libglfw3 libassimp-dev libopenal-dev libglfw3-dev libsdl2-dev libopenexr-dev libdevil-dev libpng-dev libfaad-dev libfreetype6-dev
# Mac OSX
brew install glfw3 openal-soft assimp
@@ -129,7 +70,7 @@ git clone https://github.com/mosra/magnum.git
cd magnum
mkdir build && cd build
# Ubuntu
-cmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSEGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON -DTARGET_HEADLESS=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
+cmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSEGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON -DTARGET_EGL=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
# Mac OSX
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_AUDIO=ON -DWITH_DEBUGTOOLS=ON -DWITH_GL=ON -DWITH_MESHTOOLS=ON -DWITH_PRIMITIVES=ON -DWITH_SCENEGRAPH=ON -DWITH_SHADERS=ON -DWITH_TEXT=ON -DWITH_TEXTURETOOLS=ON -DWITH_TRADE=ON -DWITH_GLFWAPPLICATION=ON -DWITH_WINDOWLESSCGLAPPLICATION=ON -DWITH_OPENGLTESTER=ON -DWITH_ANYAUDIOIMPORTER=ON -DWITH_ANYIMAGECONVERTER=ON -DWITH_ANYIMAGEIMPORTER=ON -DWITH_ANYSCENEIMPORTER=ON -DWITH_MAGNUMFONT=ON -DWITH_OBJIMPORTER=ON -DWITH_TGAIMPORTER=ON -DWITH_WAVAUDIOIMPORTER=ON .. # this will enable almost all features of Magnum that are not necessarily needed for robot_dart (please refer to the documentation of Magnum for more details on selecting only the ones that you need)
make -j
@@ -185,20 +126,7 @@ In short, with `--prefix` you can change the directory where the library will be
For the python bindings of robot_dart, we need `numpy` to be installed, `pybind11` and the python bindings of DART (dartpy).
-For `numpy` one can install it with `pip` or standard packages. For `pybind11` please follow the instructions on the [dart website](http://dartsim.github.io/install_dartpy_on_ubuntu.html#install-dartpy-from-source) (focus on the pybind11 part, for the other parts follow our instructions above).
-
-For the python bindings of DART, do:
-
-```bash
-cd dart
-
-mkdir build
-cd build
-cmake -DDART_BUILD_DARTPY=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..
-make -j4
-sudo make install
-sudo make install-dartpy # for DART >= v6.12.0, we do not need this
-```
+For `numpy` one can install it with `pip` or standard packages. `dartpy` should be installed via the packages above. If not, please see the installation instructions on the main DART website.
Then the compilation of robot_dart is almost identical as before:
@@ -207,7 +135,7 @@ Then the compilation of robot_dart is almost identical as before:
- `./waf configure --python` (`--python` enables the python bindings)
- `./waf`
- Install the library (including the python bindings) as before (no change is needed)
-- Depending on your installation directory you might need to update your `PYTHONPATH`, e.g. `export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.8/site-packages/`
+- Depending on your installation directory you might need to update your `PYTHONPATH`, e.g. `export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.10/site-packages/`
To run the python examples (for the python examples you need to have enabled the graphics, that is, install Magnum library), run:
diff --git a/src/docs/docs/quick_install.md b/src/docs/docs/quick_install.md
index baf8ddb0..6ce8d143 100644
--- a/src/docs/docs/quick_install.md
+++ b/src/docs/docs/quick_install.md
@@ -8,25 +8,35 @@
display: none;
}
+
## Scripts for Quick Installation of RobotDART
-In this page we provide standalone scripts for installing RobotDART for `Ubuntu` (20.04) and `OSX`. The scripts will install all the required dependencies and RobotDART. Notably, all dependencies that need to be compiled by source and RobotDART will be installed inside the `/opt` folder. This way, one can be rest assured that their system will be clean.
+In this page we provide standalone scripts for installing RobotDART for `Ubuntu` (>=20.04) and `OSX`. The scripts will install all the required dependencies and RobotDART. Notably, all dependencies that need to be compiled by source and RobotDART will be installed inside the `/opt` folder. This way, one can be rest assured that their system will be clean.
-### Ubuntu 20.04
+### Ubuntu >=20.04
-To quickly install RobotDART on `Ubuntu 20.04`, we just need to run `./scripts/install_ubuntu.sh` from the root of the repo. In more detail:
+To quickly install RobotDART on `Ubuntu >=20.04`, we just need to run `./scripts/install_ubuntu.sh` from the root of the repo. In more detail:
- `git clone https://github.com/resibots/robot_dart.git`
- `cd robot_dart`
- `./scripts/install_ubuntu.sh`
-This will install everything needed! Once the script is successfully executed, one should add the following to their `~/.bashrc` or `~/.zshrc` file:
+This will install everything needed! Once the script is successfully executed, one should add the following to their `~/.bashrc` or `~/.zshrc` file (you may need to swap the python version to yours[^1]):
```bash
export PATH=/opt/magnum/bin:$PATH
+export LD_LIBRARY_PATH=/opt/magnum/lib:/opt/robot_dart/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=/opt/robot_dart/lib/python3.10/site-packages:$PYTHONPATH
+```
+
+
### OSX
**Coming soon**
+
+
+[^1]: You can run `python --version` to see your version. We only keep the major.minor (ignoring the patch version)
diff --git a/src/docs/include/macros.py b/src/docs/include/macros.py
index 02cdedfa..9377cfc6 100644
--- a/src/docs/include/macros.py
+++ b/src/docs/include/macros.py
@@ -1,5 +1,5 @@
def define_env(env):
- variables = {'IIWA': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'ROBOT_GHOST': '\t```cpp\n\t// Add a ghost robot; only visuals, no dynamics, no collision\n\tauto ghost = robot->clone_ghost();\n\tsimu.add_robot(ghost);\n\t```', 'SET_COLLISION_DETECTOR': '\t```cpp\n\tsimu.set_collision_detector("fcl"); // collision_detector can be "dart", "fcl", "ode" or "bullet" (case does not matter)\n\t```', 'SELF_COLLISIONS': '\t```cpp\n\tif (!robot->self_colliding()) {\n\t std::cout << "Self collision is not enabled" << std::endl;\n\t // set self collisions to true and adjacent collisions to false\n\t robot->set_self_collision(true, false);\n\t}\n\t```', 'SIMPLE_CONTROL': '\t```cpp\n\tauto controller1 = std::make_shared(ctrl);\n\trobot->add_controller(controller1);\n\t```', 'CAMERA_SENSOR_RGBD_RECORD_DEPTH': '\t```cpp\n\tcamera->camera().record(true, true); // cameras are recording color images by default, enable depth images as well for this example\n\t```', 'TORQUE_SENSOR': '\t```cpp\n\t// Add torque sensors to the robot\n\tint ct = 0;\n\tstd::vector> tq_sensors(robot->num_dofs());\n\tfor (const auto& joint : robot->dof_names())\n\t tq_sensors[ct++] = simu.add_sensor(robot, joint, 1000);\n\t```', 'FORCE_TORQUE_SENSOR': '\t```cpp\n\t// Add force-torque sensors to the robot\n\tct = 0;\n\tstd::vector> f_tq_sensors(robot->num_dofs());\n\tfor (const auto& joint : robot->dof_names())\n\t f_tq_sensors[ct++] = simu.add_sensor(robot, joint, 1000, "parent_to_child");\n\t```', 'IMU_SENSOR': '\t```cpp\n\t// Add IMU sensors to the robot\n\tct = 0;\n\tstd::vector> imu_sensors(robot->num_bodies());\n\tfor (const auto& body_node : robot->body_names()) {\n\t // _imu(std::make_shared(sensor::IMUConfig(body_node("head"), frequency))),\n\t imu_sensors[ct++] = simu.add_sensor(robot_dart::sensor::IMUConfig(robot->body_node(body_node), 1000));\n\t}\n\t```', 'TORQUE_MEASUREMENT': '\t```cpp\n\t// vector that contains the torque measurement for every joint (scalar)\n\tEigen::VectorXd torques_measure(robot->num_dofs());\n\tfor (const auto& tq_sens : tq_sensors)\n\t torques_measure.block<1, 1>(ct++, 0) = tq_sens->torques();\n\t```', 'FORCE_TORQUE_MEASUREMENT': '\t```cpp\n\t// matrix that contains the torque measurement for every joint (3d vector)\n\tEigen::MatrixXd ft_torques_measure(robot->num_dofs(), 3);\n\t// matrix that contains the force measurement for every joint (3d vector)\n\tEigen::MatrixXd ft_forces_measure(robot->num_dofs(), 3);\n\t// matrix that contains the wrench measurement for every joint (6d vector)[torque, force]\n\tEigen::MatrixXd ft_wrench_measure(robot->num_dofs(), 6);\n\tct = 0;\n\tfor (const auto& f_tq_sens : f_tq_sensors) {\n\t ft_torques_measure.block<1, 3>(ct, 0) = f_tq_sens->torque();\n\t ft_forces_measure.block<1, 3>(ct, 0) = f_tq_sens->force();\n\t ft_wrench_measure.block<1, 6>(ct, 0) = f_tq_sens->wrench();\n\t ct++;\n\t}\n\t```', 'IMU_MEASUREMENT': '\t```cpp\n\tEigen::MatrixXd imu_angular_positions_measure(robot->num_bodies(), 3);\n\tEigen::MatrixXd imu_angular_velocities_measure(robot->num_bodies(), 3);\n\tEigen::MatrixXd imu_linear_acceleration_measure(robot->num_bodies(), 3);\n\tct = 0;\n\tfor (const auto& imu_sens : imu_sensors) {\n\t imu_angular_positions_measure.block<1, 3>(ct, 0) = imu_sens->angular_position_vec();\n\t imu_angular_velocities_measure.block<1, 3>(ct, 0) = imu_sens->angular_velocity();\n\t imu_linear_acceleration_measure.block<1, 3>(ct, 0) = imu_sens->linear_acceleration();\n\t ct++;\n\t}\n\t```', 'RGB_SENSOR': '\t```cpp\n\t// a nested std::vector (w*h*3) of the last image taken can be retrieved\n\tauto rgb_image = camera->image();\n\t```', 'RGB_SENSOR_MEASURE': '\t```cpp\n\t// we can also save them to png\n\trobot_dart::gui::save_png_image("camera-small.png", rgb_image);\n\t// convert an rgb image to grayscale (useful in some cases)\n\tauto gray_image = robot_dart::gui::convert_rgb_to_grayscale(rgb_image);\n\trobot_dart::gui::save_png_image("camera-gray.png", gray_image);\n\t```', 'RGB_D_SENSOR': '\t```cpp\n\t// get the depth image from a camera\n\t// with a version for visualization or bigger differences in the output\n\tauto rgb_d_image = camera->depth_image();\n\t// and the raw values that can be used along with the camera parameters to transform the image to point-cloud\n\tauto rgb_d_image_raw = camera->raw_depth_image();\n\t```', 'RGB_D_SENSOR_MEASURE': '\t```cpp\n\trobot_dart::gui::save_png_image("camera-depth.png", rgb_d_image);\n\trobot_dart::gui::save_png_image("camera-depth-raw.png", rgb_d_image_raw);\n\t```', 'ROBOT_CONTROL': '\t```cpp\n\tclass MyController : public robot_dart::control::RobotControl {\n\tpublic:\n\t MyController() : robot_dart::control::RobotControl() {}\n\t\n\t MyController(const Eigen::VectorXd& ctrl, bool full_control) : robot_dart::control::RobotControl(ctrl, full_control) {}\n\t MyController(const Eigen::VectorXd& ctrl, const std::vector& dof_names) : robot_dart::control::RobotControl(ctrl, dof_names) {}\n\t\n\t void configure() override\n\t {\n\t _active = true;\n\t }\n\t\n\t Eigen::VectorXd calculate(double) override\n\t {\n\t auto robot = _robot.lock();\n\t Eigen::VectorXd cmd = 100. * (_ctrl - robot->positions(_controllable_dofs));\n\t return cmd;\n\t }\n\t std::shared_ptr clone() const override\n\t {\n\t return std::make_shared(*this);\n\t }\n\t};\n\t```', 'SIMPLE_ARM': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'KINEMATICS': '\t```cpp\n\t// Get Joint Positions(Angles)\n\tauto joint_positions = robot->positions();\n\t\n\t// Get Joint Velocities\n\tauto joint_vels = robot->velocities();\n\t\n\t// Get Joint Accelerations\n\tauto joint_accs = robot->accelerations();\n\t\n\t// Get link_name(str) Transformation matrix with respect to the world frame.\n\tauto eef_tf = robot->body_pose(link_name);\n\t\n\t// Get translation vector from transformation matrix\n\tauto eef_pos = eef_tf.translation();\n\t\n\t// Get rotation matrix from tranformation matrix\n\tauto eef_rot = eef_tf.rotation();\n\t\n\t// Get link_name 6d pose vector [logmap(eef_tf.linear()), eef_tf.translation()]\n\tauto eef_pose_vec = robot->body_pose_vec(link_name);\n\t\n\t// Get link_name 6d velocity vector [angular, cartesian]\n\tauto eef_vel = robot->body_velocity(link_name);\n\t\n\t// Get link_name 6d acceleration vector [angular, cartesian]\n\tauto eef_acc = robot->body_acceleration(link_name);\n\t\n\t// Jacobian targeting the origin of link_name(str)\n\tauto jacobian = robot->jacobian(link_name);\n\t\n\t// Jacobian time derivative\n\tauto jacobian_deriv = robot->jacobian_deriv(link_name);\n\t\n\t// Center of Mass Jacobian\n\tauto com_jacobian = robot->com_jacobian();\n\t\n\t// Center of Mass Jacobian Time Derivative\n\tauto com_jacobian_deriv = robot->com_jacobian_deriv();\n\t```', 'DYNAMICS': "\t```cpp\n\t// Get Joint Forces\n\tauto joint_forces = robot->forces();\n\t\n\t// Get link's mass\n\tauto eef_mass = robot->body_mass(link_name);\n\t\n\t// Mass Matrix of robot\n\tauto mass_matrix = robot->mass_matrix();\n\t\n\t// Inverse of Mass Matrix\n\tauto inv_mass_matrix = robot->inv_mass_matrix();\n\t\n\t// Augmented Mass matrix\n\tauto aug_mass_matrix = robot->aug_mass_matrix();\n\t\n\t// Inverse of Augmented Mass matrix\n\tauto inv_aug_mass_matrix = robot->inv_aug_mass_matrix();\n\t\n\t// Coriolis Force vector\n\tauto coriolis = robot->coriolis_forces();\n\t\n\t// Gravity Force vector\n\tauto gravity = robot->gravity_forces();\n\t\n\t// Combined vector of Coriolis Force and Gravity Force\n\tauto coriolis_gravity = robot->coriolis_gravity_forces();\n\t\n\t// Constraint Force Vector\n\tauto constraint_forces = robot->constraint_forces();\n\t```", 'FRANKA': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'PD_CONTROL': '\t```cpp\n\t// add a PD-controller to the arm\n\t// set desired positions\n\tEigen::VectorXd ctrl = robot_dart::make_vector({0., M_PI / 4., 0., -M_PI / 4., 0., M_PI / 2., 0., 0.});\n\t\n\t// add the controller to the robot\n\tauto controller = std::make_shared(ctrl);\n\trobot->add_controller(controller);\n\tcontroller->set_pd(300., 50.);\n\t```', 'ADD_NEW_CAMERA': '\t```cpp\n\t// Add camera\n\tauto camera = std::make_shared(graphics->magnum_app(), 256, 256);\n\t```', 'MANIPULATE_CAM_SEP': '\t```cpp\n\tcamera->camera().set_far_plane(5.f);\n\tcamera->camera().set_near_plane(0.01f);\n\tcamera->camera().set_fov(60.0f);\n\t```', 'MANIPULATE_CAM': '\t```cpp\n\tcamera->camera().set_camera_params(5., // far plane\n\t 0.01f, // near plane\n\t 60.0f, // field of view\n\t 600, // width\n\t 400 // height\n\t);\n\t```', 'RECORD_VIDEO_CAMERA': '\t```cpp\n\t// cameras can also record video\n\tcamera->record_video("video-camera.mp4");\n\t```', 'CAM_POSITION': '\t```cpp\n\t// set the position of the camera, and the position where the main camera is looking at\n\tEigen::Vector3d cam_pos = {-0.5, -3., 0.75};\n\tEigen::Vector3d cam_looks_at = {0.5, 0., 0.2};\n\tcamera->look_at(cam_pos, cam_looks_at);\n\t```', 'CAM_ATTACH': '\t```cpp\n\tEigen::Isometry3d tf;\n\ttf = Eigen::AngleAxisd(3.14, Eigen::Vector3d{1., 0., 0.});\n\ttf *= Eigen::AngleAxisd(1.57, Eigen::Vector3d{0., 0., 1.});\n\ttf.translation() = Eigen::Vector3d(0., 0., 0.1);\n\tcamera->attach_to_body(robot->body_node("iiwa_link_ee"), tf); // cameras are looking towards -z by default\n\t```', 'HEXAPOD': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'HELLO_WORLD_INCLUDE': '\t```cpp\n\t#include \n\t\n\t#ifdef GRAPHIC\n\t#include \n\t#endif\n\t```', 'HELLO_WORLD_ROBOT_CREATION': '\t```cpp\n\tauto robot = std::make_shared("pexod.urdf");\n\t```', 'HELLO_WORLD_ROBOT_PLACING': '\t```cpp\n\trobot->set_base_pose(robot_dart::make_tf({0., 0., 0.2}));\n\t```', 'HELLO_WORLD_ROBOT_SIMU': '\t```cpp\n\trobot_dart::RobotDARTSimu simu(0.001); // dt=0.001, 1KHz simulation\n\tsimu.add_floor();\n\tsimu.add_robot(robot);\n\t```', 'HELLO_WORLD_ROBOT_GRAPHIC': '\t```cpp\n\tauto graphics = std::make_shared();\n\tsimu.set_graphics(graphics);\n\tgraphics->look_at({0.5, 3., 0.75}, {0.5, 0., 0.2});\n\t```', 'HELLO_WORLD_ROBOT_RUN': '\t```cpp\n\tsimu.run(10.);\n\t```', 'INIT_SIMU': '\t```cpp\n\t// choose time step of 0.001 seconds\n\trobot_dart::RobotDARTSimu simu(0.001);\n\t```', 'MODIFY_SIMU_DT': '\t```cpp\n\t// set timestep to 0.005 and update control frequency(bool)\n\tsimu.set_timestep(0.005, true);\n\t```', 'SIMU_GRAVITY': '\t```cpp\n\t// Set gravitational force of mars\n\tEigen::Vector3d mars_gravity = {0., 0., -3.721};\n\tsimu.set_gravity(mars_gravity);\n\t```', 'GRAPHICS_PARAMS': '\t```cpp\n\trobot_dart::gui::magnum::GraphicsConfiguration configuration;\n\t// We can change the width/height of the window (or camera image dimensions)\n\tconfiguration.width = 1280;\n\tconfiguration.height = 960;\n\tconfiguration.title = "Graphics Tutorial"; // We can set a title for our window\n\t\n\t// We can change the configuration for shadows\n\tconfiguration.shadowed = true;\n\tconfiguration.transparent_shadows = true;\n\tconfiguration.shadow_map_size = 1024;\n\t\n\t// We can also alter some specifications for the lighting\n\tconfiguration.max_lights = 3; // maximum number of lights for our scene [default=3]\n\tconfiguration.specular_strength = 0.25; // strength of the specular component\n\t\n\t// Some extra configuration for the main camera\n\tconfiguration.draw_main_camera = true;\n\tconfiguration.draw_debug = true;\n\tconfiguration.draw_text = true;\n\t\n\t// We can also change the background color [default=black]\n\tconfiguration.bg_color = Eigen::Vector4d{1.0, 1.0, 1.0, 1.0};\n\t\n\t// Create the graphics object with the configuration as parameter\n\tauto graphics = std::make_shared(configuration);\n\t```', 'SHADOWS_GRAPHICS': '\t```cpp\n\t// Disable shadows\n\tgraphics->enable_shadows(false, false);\n\tsimu.run(1.);\n\t// Enable shadows only for non-transparent objects\n\tgraphics->enable_shadows(true, false);\n\tsimu.run(1.);\n\t// Enable shadows for transparent objects as well\n\tgraphics->enable_shadows(true, true);\n\tsimu.run(1.);\n\t```', 'CLR_LIGHT': '\t```cpp\n\t// Clear Lights\n\tgraphics->clear_lights();\n\t```', 'LIGHT_MATERIAL': '\t```cpp\n\t// Create Light material\n\tMagnum::Float shininess = 1000.f;\n\tMagnum::Color4 white = {1.f, 1.f, 1.f, 1.f};\n\t\n\t// ambient, diffuse, specular\n\tauto custom_material = robot_dart::gui::magnum::gs::Material(white, white, white, shininess);\n\t```', 'POINT_LIGHT': '\t```cpp\n\t// Create point light\n\tMagnum::Vector3 position = {0.f, 0.f, 2.f};\n\tMagnum::Float intensity = 1.f;\n\tMagnum::Vector3 attenuation_terms = {1.f, 0.f, 0.f};\n\tauto point_light = robot_dart::gui::magnum::gs::create_point_light(position, custom_material, intensity, attenuation_terms);\n\tgraphics->add_light(point_light);\n\t```', 'DIRECTIONAL_LIGHT': '\t```cpp\n\t// Create directional light\n\tMagnum::Vector3 direction = {-1.f, -1.f, -1.f};\n\tauto directional_light = robot_dart::gui::magnum::gs::create_directional_light(direction, custom_material);\n\tgraphics->add_light(directional_light);\n\t```', 'SPOT_LIGHT': '\t```cpp\n\t// Create spot light\n\tMagnum::Vector3 position = {0.f, 0.f, 1.f};\n\tMagnum::Vector3 direction = {-1.f, -1.f, -1.f};\n\tMagnum::Float intensity = 1.f;\n\tMagnum::Vector3 attenuation_terms = {1.f, 0.f, 0.f};\n\tMagnum::Float spot_exponent = M_PI;\n\tMagnum::Float spot_cut_off = M_PI / 8;\n\tauto spot_light = robot_dart::gui::magnum::gs::create_spot_light(position, custom_material, direction, spot_exponent, spot_cut_off, intensity, attenuation_terms);\n\tgraphics->add_light(spot_light);\n\t```', 'ROBOT_POOL_INCLUDE': '\t```cpp\n\t#include \n\t```', 'ROBOT_POOL_GLOBAL_NAMESPACE': '\t```cpp\n\tnamespace pool {\n\t // This function should load one robot: here we load Talos\n\t inline std::shared_ptr robot_creator()\n\t {\n\t return std::make_shared();\n\t }\n\t\n\t // To create the object we need to pass the robot_creator function and the number of maximum parallel threads\n\t robot_dart::RobotPool robot_pool(robot_creator, NUM_THREADS);\n\t} // namespace pool\n\t```', 'ROBOT_POOL_EVAL': '\t```cpp\n\tinline void eval_robot(int i)\n\t{\n\t // We get one available robot\n\t auto robot = pool::robot_pool.get_robot();\n\t std::cout << "Robot " << i << " got [" << robot->skeleton() << "]" << std::endl;\n\t\n\t /// --- some robot_dart code ---\n\t simulate_robot(robot);\n\t // --- do something with the result\n\t\n\t std::cout << "End of simulation " << i << std::endl;\n\t\n\t // CRITICAL : free your robot !\n\t pool::robot_pool.free_robot(robot);\n\t\n\t std::cout << "Robot " << i << " freed!" << std::endl;\n\t}\n\t```', 'ROBOT_POOL_CREATE_THREADS': '\t```cpp\n\t// for the example, we run NUM_THREADS threads of eval_robot()\n\tstd::vector threads(NUM_THREADS * 2); // *2 to see some reuse\n\tfor (size_t i = 0; i < threads.size(); ++i)\n\t threads[i] = std::thread(eval_robot, i); // eval_robot is the function that uses the robot\n\t```', 'TALOS_FAST': '\t```cpp\n\t// load talos fast\n\tauto robot = std::make_shared();\n\t\n\t// Set actuator types to VELOCITY (for speed)\n\trobot->set_actuator_types("velocity");\n\t\n\tdouble dt = 0.001;\n\trobot_dart::RobotDARTSimu simu(dt);\n\t// we can use the DART (fast) collision detector\n\tsimu.set_collision_detector("dart");\n\t```', 'CAMERAS_PARALLEL': '\t```cpp\n\t// Load robot from URDF\n\tauto global_robot = std::make_shared();\n\t\n\tstd::vector workers;\n\t\n\t// Set maximum number of parallel GL contexts (this is GPU-dependent)\n\trobot_dart::gui::magnum::GlobalData::instance()->set_max_contexts(4);\n\t\n\t// We want 15 parallel simulations with different GL context each\n\tsize_t N_workers = 15;\n\tstd::mutex mutex;\n\tsize_t id = 0;\n\t// Launch the workers\n\tfor (size_t i = 0; i < N_workers; i++) {\n\t workers.push_back(std::thread([&] {\n\t mutex.lock();\n\t size_t index = id++;\n\t mutex.unlock();\n\t\n\t // Get the GL context -- this is a blocking call\n\t // will wait until one GL context is available\n\t // get_gl_context(gl_context); // this call will not sleep between failed queries\n\t get_gl_context_with_sleep(gl_context, 20); // this call will sleep 20ms between each failed query\n\t\n\t // Do the simulation\n\t auto robot = global_robot->clone();\n\t\n\t robot_dart::RobotDARTSimu simu(0.001);\n\t\n\t Eigen::VectorXd ctrl = robot_dart::make_vector({0., M_PI / 3., 0., -M_PI / 4., 0., 0., 0.});\n\t\n\t auto controller = std::make_shared(ctrl);\n\t robot->add_controller(controller);\n\t controller->set_pd(300., 50.);\n\t\n\t // Magnum graphics\n\t robot_dart::gui::magnum::GraphicsConfiguration configuration = robot_dart::gui::magnum::WindowlessGraphics::default_configuration();\n\t\n\t configuration.width = 1024;\n\t configuration.height = 768;\n\t auto graphics = std::make_shared(configuration);\n\t simu.set_graphics(graphics);\n\t // Position the camera differently for each thread to visualize the difference\n\t graphics->look_at({0.4 * index, 3.5 - index * 0.1, 2.}, {0., 0., 0.25});\n\t // record images from main camera/graphics\n\t // graphics->set_recording(true); // WindowlessGLApplication records images by default\n\t\n\t simu.add_robot(robot);\n\t simu.run(6);\n\t\n\t // Save the image for verification\n\t robot_dart::gui::save_png_image("camera_" + std::to_string(index) + ".png",\n\t graphics->image());\n\t\n\t // Release the GL context for another thread to use\n\t release_gl_context(gl_context);\n\t }));\n\t}\n\t\n\t// Wait for all the workers\n\tfor (size_t i = 0; i < workers.size(); i++) {\n\t workers[i].join();\n\t}\n\t```', 'LOAD_IICUB': '\t```cpp\n\tauto robot = std::make_shared();\n\t// Set actuator types to VELOCITY motors so that they stay in position without any controller\n\trobot->set_actuator_types("velocity");\n\trobot_dart::RobotDARTSimu simu(0.001);\n\tsimu.set_collision_detector("fcl");\n\t```', 'ICUB_PRINT_IMU': '\t```cpp\n\tstd::cout << "Angular Position: " << robot->imu().angular_position_vec().transpose().format(fmt) << std::endl;\n\tstd::cout << "Angular Velocity: " << robot->imu().angular_velocity().transpose().format(fmt) << std::endl;\n\tstd::cout << "Linear Acceleration: " << robot->imu().linear_acceleration().transpose().format(fmt) << std::endl;\n\tstd::cout << "=================================" << std::endl;\n\t```', 'ICUB_PRINT_FT': '\t```cpp\n\tstd::cout << "FT ( force): " << robot->ft_foot_left().force().transpose().format(fmt) << std::endl;\n\tstd::cout << "FT (torque): " << robot->ft_foot_left().torque().transpose().format(fmt) << std::endl;\n\tstd::cout << "=================================" << std::endl;\n\t```', 'SET_ACTUATOR': '\t```cpp\n\t// Set all DoFs to same actuator\n\trobot->set_actuator_types("servo"); // actuator types can be "servo", "torque", "velocity", "passive", "locked", "mimic"\n\t// You can also set actuator types separately\n\trobot->set_actuator_type("torque", "iiwa_joint_5");\n\t```', 'POSITIONS_ENFORCED': '\t```cpp\n\t// Εnforce joint position and velocity limits\n\trobot->set_position_enforced(true);\n\t```', 'MODIFY_LIMITS': '\t```cpp\n\t// Modify Position Limits\n\tEigen::VectorXd pos_upper_lims(7);\n\tpos_upper_lims << 2.096, 2.096, 2.096, 2.096, 2.096, 2.096, 2.096;\n\trobot->set_position_upper_limits(pos_upper_lims, dof_names);\n\trobot->set_position_lower_limits(-pos_upper_lims, dof_names);\n\t\n\t// Modify Velocity Limits\n\t\n\tEigen::VectorXd vel_upper_lims(7);\n\tvel_upper_lims << 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5;\n\trobot->set_velocity_upper_limits(vel_upper_lims, dof_names);\n\trobot->set_velocity_lower_limits(-vel_upper_lims, dof_names);\n\t\n\t// Modify Force Limits\n\t\n\tEigen::VectorXd force_upper_lims(7);\n\tforce_upper_lims << 150, 150, 150, 150, 150, 150, 150;\n\trobot->set_force_upper_limits(force_upper_lims, dof_names);\n\trobot->set_force_lower_limits(-force_upper_lims, dof_names);\n\t\n\t// Modify Acceleration Limits\n\tEigen::VectorXd acc_upper_lims(7);\n\tacc_upper_lims << 1500, 1500, 1500, 1500, 1500, 1500, 1500;\n\trobot->set_acceleration_upper_limits(acc_upper_lims, dof_names);\n\trobot->set_acceleration_lower_limits(-acc_upper_lims, dof_names);\n\t```', 'MODIFY_COEFFS': '\t```cpp\n\t// Modify Damping Coefficients\n\tstd::vector damps = {0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4};\n\trobot->set_damping_coeffs(damps, dof_names);\n\t\n\t// Modify Coulomb Frictions\n\tstd::vector cfrictions = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001};\n\trobot->set_coulomb_coeffs(cfrictions, dof_names);\n\t\n\t// Modify Spring Stiffness\n\tstd::vector stiffnesses = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001};\n\trobot->set_spring_stiffnesses(stiffnesses, dof_names);\n\t```', 'TALOS': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'RECORD_VIDEO_ROBOT_GRAPHICS_PARAMS': '\t```cpp\n\tgraphics->record_video("talos_dancing.mp4");\n\t```', 'A1': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'A1_PRINT_IMU': '\t```cpp\n\tstd::cout << "Angular Position: " << robot->imu().angular_position_vec().transpose().format(fmt) << std::endl;\n\tstd::cout << "Angular Velocity: " << robot->imu().angular_velocity().transpose().format(fmt) << std::endl;\n\tstd::cout << "Linear Acceleration: " << robot->imu().linear_acceleration().transpose().format(fmt) << std::endl;\n\tstd::cout << "=================================" << std::endl;\n\t```', 'ROBOT_CONTROL_PYTHON': '\t```python\n\tclass MyController(rd.RobotControl):\n\t def __init__(self, ctrl, full_control):\n\t rd.RobotControl.__init__(self, ctrl, full_control)\n\t\n\t def __init__(self, ctrl, controllable_dofs):\n\t rd.RobotControl.__init__(self, ctrl, controllable_dofs)\n\t\n\t def configure(self):\n\t self._active = True\n\t\n\t def calculate(self, t):\n\t target = np.array([self._ctrl])\n\t cmd = 100*(target-self.robot().positions(self._controllable_dofs))\n\t return cmd[0]\n\t\n\t # TO-DO: This is NOT working at the moment!\n\t def clone(self):\n\t return MyController(self._ctrl, self._controllable_dofs)\n\t```', 'FRANKA_PYTHON': '\t```python\n\trobot = rd.Franka()\n\t```', 'PD_CONTROL_PYTHON': '\t```python\n\t# add a PD-controller to the arm\n\t# set desired positions\n\tctrl = [0., np.pi / 4., 0., -np.pi / 4., 0., np.pi / 2., 0., 0.]\n\t\n\t# add the controller to the robot\n\tcontroller = rd.PDControl(ctrl)\n\trobot.add_controller(controller)\n\tcontroller.set_pd(300., 50.)\n\t```', 'TALOS_FAST_PYTHON': '\t```python\n\trobot = rd.TalosFastCollision()\n\t```', 'RECORD_VIDEO_ROBOT_GRAPHICS_PARAMS_PYTHON': '\t```python\n\tgraphics.record_video("talos_dancing.mp4")\n\t```', 'IIWA_PYTHON': '\t```python\n\trobot = rd.Iiwa()\n\t```', 'ROBOT_GHOST_PYTHON': '\t```python\n\t# Add a ghost robot; only visuals, no dynamics, no collision\n\tghost = robot.clone_ghost()\n\tsimu.add_robot(ghost)\n\t```', 'HELLO_WORLD_INCLUDE_PYTHON': '\t```python\n\timport RobotDART as rd\n\t```', 'HELLO_WORLD_ROBOT_CREATION_PYTHON': '\t```python\n\trobot = rd.Robot("pexod.urdf");\n\t```', 'HELLO_WORLD_ROBOT_PLACING_PYTHON': '\t```python\n\trobot.set_base_pose([0., 0., 0., 0., 0., 0.2])\n\t```', 'HELLO_WORLD_ROBOT_SIMU_PYTHON': '\t```python\n\tsimu = rd.RobotDARTSimu(0.001); # dt=0.001, 1KHz simulation\n\tsimu.add_floor();\n\tsimu.add_robot(robot);\n\t```', 'HELLO_WORLD_ROBOT_GRAPHIC_PYTHON': '\t```python\n\tgraphics = rd.gui.Graphics()\n\tsimu.set_graphics(graphics)\n\tgraphics.look_at([0.5, 3., 0.75], [0.5, 0., 0.2])\n\t```', 'HELLO_WORLD_ROBOT_RUN_PYTHON': '\t```python\n\tsimu.run(10.)\n\t```', 'CAMERA_SENSOR_RGBD_RECORD_DEPTH_PYTHON': '\t```python\n\t# cameras are recording color images by default, enable depth images as well for this example\n\tcamera.camera().record(True, True)\n\t```', 'TORQUE_SENSOR_PYTHON': '\t```python\n\t# Add torque sensors to the robot\n\ttq_sensors = np.empty(robot.num_dofs(), dtype=rd.sensor.Torque)\n\tct = 0\n\tfor joint in robot.dof_names():\n\t simu.add_sensor(rd.sensor.Torque(robot, joint, 1000))\n\t tq_sensors[ct] = simu.sensors()[-1]\n\t ct += 1\n\t```', 'FORCE_TORQUE_SENSOR_PYTHON': '\t```python\n\t# Add force-torque sensors to the robot\n\tf_tq_sensors = np.empty(robot.num_dofs(), dtype=rd.sensor.ForceTorque)\n\tct = 0\n\tfor joint in robot.dof_names():\n\t simu.add_sensor(rd.sensor.ForceTorque(\n\t robot, joint, 1000, "parent_to_child"))\n\t f_tq_sensors[ct] = simu.sensors()[-1]\n\t print(f_tq_sensors)\n\t ct += 1\n\t```', 'IMU_SENSOR_PYTHON': '\t```python\n\t# Add IMU sensors to the robot\n\tct = 0\n\timu_sensors = np.empty(robot.num_bodies(), dtype=rd.sensor.IMU)\n\tfor body_node in robot.body_names():\n\t simu.add_sensor(rd.sensor.IMU(\n\t rd.sensor.IMUConfig(robot.body_node(body_node), 1000)))\n\t imu_sensors[ct] = simu.sensors()[-1]\n\t ct += 1\n\t```', 'TORQUE_MEASUREMENT_PYTHON': '\t```python\n\t# vector that contains the torque measurement for every joint (scalar)\n\ttorques_measure = np.empty(robot.num_dofs())\n\tct = 0\n\tfor tq_sens in tq_sensors:\n\t torques_measure[ct] = tq_sens.torques()\n\t ct += 1\n\t```', 'FORCE_TORQUE_MEASUREMENT_PYTHON': '\t```python\n\t# matrix that contains the torque measurement for every joint (3d vector)\n\tft_torques_measure = np.empty([robot.num_dofs(), 3])\n\t# matrix that contains the force measurement for every joint (3d vector)\n\tft_forces_measure = np.empty([robot.num_dofs(), 3])\n\t# matrix that contains the wrench measurement for every joint (6d vector)[torque, force]\n\tft_wrench_measure = np.empty([robot.num_dofs(), 6])\n\tct = 0\n\tfor f_tq_sens in f_tq_sensors:\n\t ft_torques_measure[ct, :] = f_tq_sens.torque()\n\t ft_forces_measure[ct, :] = f_tq_sens.force()\n\t ft_wrench_measure[ct, :] = f_tq_sens.wrench()\n\t ct += 1\n\t\n\t```', 'IMU_MEASUREMENT_PYTHON': '\t```python\n\timu_angular_positions_measure = np.empty([robot.num_bodies(), 3])\n\timu_angular_velocities_measure = np.empty([robot.num_bodies(), 3])\n\timu_linear_acceleration_measure = np.empty([robot.num_bodies(), 3])\n\tct = 0\n\tfor imu_sens in imu_sensors:\n\t imu_angular_positions_measure[ct,:] = imu_sens.angular_position_vec()\n\t imu_angular_velocities_measure[ct, :] = imu_sens.angular_velocity()\n\t imu_linear_acceleration_measure[ct,:] = imu_sens.linear_acceleration()\n\t ct += 1\n\t\n\t```', 'RGB_SENSOR_PYTHON': '\t```python\n\t# a nested array (w*h*3) of the last image taken can be retrieved\n\trgb_image = camera.image()\n\t```', 'RGB_SENSOR_MEASURE_PYTHON': '\t```python\n\t# we can also save them to png\n\trd.gui.save_png_image("camera-small.png", rgb_image)\n\t# convert an rgb image to grayscale (useful in some cases)\n\tgray_image = rd.gui.convert_rgb_to_grayscale(rgb_image)\n\trd.gui.save_png_image("camera-gray.png", gray_image)\n\t```', 'RGB_D_SENSOR_PYTHON': '\t```python\n\t# get the depth image from a camera\n\t# with a version for visualization or bigger differences in the output\n\trgb_d_image = camera.depth_image()\n\t# and the raw values that can be used along with the camera parameters to transform the image to point-cloud\n\trgb_d_image_raw = camera.raw_depth_image()\n\t```', 'RGB_D_SENSOR_MEASURE_PYTHON': '\t```python\n\trd.gui.save_png_image("camera-depth.png", rgb_d_image)\n\trd.gui.save_png_image("camera-depth-raw.png", rgb_d_image_raw)\n\t```', 'SIMPLE_CONTROL_PYTHON': '\t```python\n\tcontroller1 = rd.SimpleControl(ctrl)\n\trobot.add_controller(controller1)\n\t```', 'A1_PYTHON': '\t```python\n\trobot = rd.A1()\n\t```', 'A1_PRINT_IMU_PYTHON': '\t```python\n\tprint( "Angular Position: ", robot.imu().angular_position_vec().transpose())\n\tprint( "Angular Velocity: ", robot.imu().angular_velocity().transpose())\n\tprint( "Linear Acceleration: ", robot.imu().linear_acceleration().transpose())\n\tprint( "=================================")\n\t```', 'ADD_NEW_CAMERA_PYTHON': '\t```python\n\t# Add camera\n\tcamera = rd.sensor.Camera(graphics.magnum_app(), 32, 32)\n\t```', 'MANIPULATE_CAM_SEP_PYTHON': '\t```python\n\tcamera.camera().set_far_plane(5.)\n\tcamera.camera().set_near_plane(0.01)\n\tcamera.camera().set_fov(60.0)\n\t```', 'MANIPULATE_CAM_PYTHON': '\t```python\n\tcamera.camera().set_camera_params(5., #far plane\n\t 0.01, #near plane\n\t 60.0, # field of view\n\t 600, # width\n\t 400) #height\n\t```', 'RECORD_VIDEO_CAMERA_PYTHON': '\t```python\n\t\n\t# cameras can also record video\n\tcamera.record_video("video-camera.mp4")\n\t```', 'CAM_POSITION_PYTHON': '\t```python\n\t# set the position of the camera, and the position where the main camera is looking at\n\tcam_pos = [-0.5, -3., 0.75]\n\tcam_looks_at = [0.5, 0., 0.2]\n\tcamera.look_at(cam_pos, cam_looks_at)\n\t```', 'CAM_ATTACH_PYTHON': '\t```python\n\ttf = dartpy.math.Isometry3()\n\trot = dartpy.math.AngleAxis(3.14, [1., 0., 0.])\n\trot = rot.multiply( dartpy.math.AngleAxis(1.57, [0., 0., 1.])).to_rotation_matrix()\n\ttf.set_translation([0., 0., 0.1])\n\tcamera.attach_to_body(robot.body_node("iiwa_link_ee"), tf) # cameras are looking towards -z by default\n\t```', 'SET_COLLISION_DETECTOR_PYTHON': '\t```python\n\tsimu.set_collision_detector("fcl") # collision_detector can be "dart", "fcl", "ode" or "bullet" (case does not matter)\n\t```', 'SELF_COLLISIONS_PYTHON': '\t```python\n\tif(not robot.self_colliding()):\n\t print("Self collision is not enabled")\n\t # set self collisions to true and adjacent collisions to false\n\t robot.set_self_collision(True, False)\n\t```', 'SIMPLE_ARM_PYTHON': '\t```python\n\trobot = rd.Arm()\n\t```', 'SET_ACTUATOR_PYTHON': '\t```python\n\t# Set all DoFs to same actuator\n\t# actuator types can be "servo", "torque", "velocity", "passive", "locked", "mimic"\n\trobot.set_actuator_types("servo")\n\t# You can also set actuator types separately\n\trobot.set_actuator_type("torque", "iiwa_joint_5")\n\t```', 'POSITIONS_ENFORCED_PYTHON': '\t```python\n\t# Εnforce joint position and velocity limits\n\trobot.set_position_enforced(True)\n\t```', 'MODIFY_LIMITS_PYTHON': '\t```python\n\t# Modify Position Limits\n\tpos_upper_lims = np.array([2.096, 2.096, 2.096, 2.096, 2.096, 2.096, 2.096])\n\trobot.set_position_upper_limits(pos_upper_lims, dof_names)\n\trobot.set_position_lower_limits(-1*pos_upper_lims, dof_names)\n\t\n\t# Modify Velocity Limits\n\tvel_upper_lims = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5])\n\t\n\trobot.set_velocity_upper_limits(vel_upper_lims, dof_names)\n\trobot.set_velocity_lower_limits(-1*vel_upper_lims, dof_names)\n\t\n\t# Modify Force Limits\n\tforce_upper_lims = np.array([150, 150, 150, 150, 150, 150, 150])\n\trobot.set_force_upper_limits(force_upper_lims, dof_names)\n\trobot.set_force_lower_limits(-1*force_upper_lims, dof_names)\n\t\n\t# Modify Acceleration Limits\n\tacc_upper_lims = np.array([1500, 1500, 1500, 1500, 1500, 1500, 1500])\n\trobot.set_acceleration_upper_limits(acc_upper_lims, dof_names)\n\trobot.set_acceleration_lower_limits(-1*acc_upper_lims, dof_names)\n\t```', 'MODIFY_COEFFS_PYTHON': '\t```python\n\t# Modify Damping Coefficients\n\tdamps = [0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4]\n\trobot.set_damping_coeffs(damps, dof_names)\n\t\n\t# Modify Coulomb Frictions\n\tcfrictions = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]\n\trobot.set_coulomb_coeffs(cfrictions, dof_names)\n\t\n\t# Modify Spring Stiffness\n\tstiffnesses = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]\n\trobot.set_spring_stiffnesses(stiffnesses, dof_names)\n\t```', 'KINEMATICS_PYTHON': '\t```python\n\t# Get Joint Positions(Angles)\n\tjoint_positions = robot.positions()\n\t\n\t# Get Joint Velocities\n\tjoint_vels = robot.velocities()\n\t\n\t# Get Joint Accelerations\n\tjoint_accs = robot.accelerations()\n\t\n\t# Get link_name(str) Transformation matrix with respect to the world frame.\n\teef_tf = robot.body_pose(link_name)\n\t\n\t# Get translation vector from transformation matrix\n\teef_pos = eef_tf.translation()\n\t\n\t# Get rotation matrix from tranformation matrix\n\teef_rot = eef_tf.rotation()\n\t\n\t# Get link_name 6d pose vector [logmap(eef_tf.linear()), eef_tf.translation()]\n\teef_pose_vec = robot.body_pose_vec(link_name)\n\t\n\t# Get link_name 6d velocity vector [angular, cartesian]\n\teef_vel = robot.body_velocity(link_name)\n\t\n\t# Get link_name 6d acceleration vector [angular, cartesian]\n\teef_acc = robot.body_acceleration(link_name)\n\t\n\t# Jacobian targeting the origin of link_name(str)\n\tjacobian = robot.jacobian(link_name)\n\t\n\t# Jacobian time derivative\n\tjacobian_deriv = robot.jacobian_deriv(link_name)\n\t\n\t# Center of Mass Jacobian\n\tcom_jacobian = robot.com_jacobian()\n\t\n\t# Center of Mass Jacobian Time Derivative\n\tcom_jacobian_deriv = robot.com_jacobian_deriv()\n\t```', 'DYNAMICS_PYTHON': "\t```python\n\t# Get Joint Forces\n\tjoint_forces = robot.forces()\n\t\n\t# Get link's mass\n\teef_mass = robot.body_mass(link_name)\n\t\n\t# Mass Matrix of robot\n\tmass_matrix = robot.mass_matrix()\n\t\n\t# Inverse of Mass Matrix\n\tinv_mass_matrix = robot.inv_mass_matrix()\n\t\n\t# Augmented Mass matrix\n\taug_mass_matrix = robot.aug_mass_matrix()\n\t\n\t# Inverse of Augmented Mass matrix\n\tinv_aug_mass_matrix = robot.inv_aug_mass_matrix()\n\t\n\t# Coriolis Force vector\n\tcoriolis = robot.coriolis_forces()\n\t\n\t# Gravity Force vector\n\tgravity = robot.gravity_forces()\n\t\n\t# Combined vector of Coriolis Force and Gravity Force\n\tcoriolis_gravity = robot.coriolis_gravity_forces()\n\t\n\t# NOT IMPLEMENTED\n\t# # Constraint Force Vector\n\t# constraint_forces = robot.constraint_forces()\n\t\n\t```", 'TALOS_PYTHON': '\t```python\n\trobot = rd.Talos()\n\t```', 'INIT_SIMU_PYTHON': '\t```python\n\t# choose time step of 0.001 seconds\n\tsimu = rd.RobotDARTSimu(0.001)\n\t```', 'MODIFY_SIMU_DT_PYTHON': '\t```python\n\t# set timestep to 0.005 and update control frequency(bool)\n\tsimu.set_timestep(0.005, True)\n\t```', 'SIMU_GRAVITY_PYTHON': '\t```python\n\t# set gravitational force of mars\n\tmars_gravity = [0., 0., -3.721]\n\tsimu.set_gravity(mars_gravity)\n\t```', 'GRAPHICS_PARAMS_PYTHON': '\t```python\n\tconfiguration = rd.gui.GraphicsConfiguration()\n\t# We can change the width/height of the window (or camera, dimensions)\n\tconfiguration.width = 1280\n\tconfiguration.height = 960\n\tconfiguration.title = "Graphics Tutorial" # We can set a title for our window\n\t\n\t# We can change the configuration for shadows\n\tconfiguration.shadowed = True\n\tconfiguration.transparent_shadows = True\n\tconfiguration.shadow_map_size = 1024\n\t\n\t# We can also alter some specifications for the lighting\n\tconfiguration.max_lights = 3 # maximum number of lights for our scene\n\tconfiguration.specular_strength = 0.25 # strength og the specular component\n\t\n\t# Some extra configuration for the main camera\n\tconfiguration.draw_main_camera = True\n\tconfiguration.draw_debug = True\n\tconfiguration.draw_text = True\n\t\n\t# We can also change the background color [default = black]\n\tconfiguration.bg_color = [1., 1., 1., 1.]\n\t\n\t# create the graphics object with the configuration as a parameter\n\tgraphics = rd.gui.Graphics(configuration)\n\t```', 'SHADOWS_GRAPHICS_PYTHON': '\t```python\n\t# Disable shadows\n\tgraphics.enable_shadows(False, False)\n\tsimu.run(1.)\n\t# Enable shadows only for non-transparent objects\n\tgraphics.enable_shadows(True, False)\n\tsimu.run(1.)\n\t# Enable shadows for transparent objects as well\n\tgraphics.enable_shadows(True, True)\n\tsimu.run(1.)\n\t```', 'CLR_LIGHT_PYTHON': '\t```python\n\t# Clear Lights\n\tgraphics.clear_lights()\n\t```', 'LIGHT_MATERIAL_PYTHON': '\t```python\n\t# Clear Light material\n\tshininess = 1000.\n\twhite = magnum.Color4(1., 1., 1., 1.)\n\t\n\t# ambient, diffuse, specular\n\tcustom_material = rd.gui.Material(white, white, white, shininess)\n\t```', 'POINT_LIGHT_PYTHON': '\t```python\n\t# Create point light\n\tposition = magnum.Vector3(0., 0., 2.)\n\tintensity = 1.\n\tattenuation_terms = magnum.Vector3(1., 0., 0.)\n\tpoint_light = rd.gui.create_point_light(position, custom_material, intensity, attenuation_terms)\n\tgraphics.add_light(point_light)\n\t```', 'DIRECTIONAL_LIGHT_PYTHON': '\t```python\n\t# Create directional light\n\tdirection = magnum.Vector3(-1, -1, -1)\n\tdirectional_light = rd.gui.create_directional_light(direction, custom_material)\n\tgraphics.add_light(directional_light)\n\t```', 'SPOT_LIGHT_PYTHON': '\t```python\n\t# Create spot light\n\tposition = magnum.Vector3(0., 0., 1.)\n\tdirection = magnum.Vector3(-1, -1, -1)\n\tintensity = 1.\n\tattenuation_terms = magnum.Vector3(1., 0., 0.)\n\tspot_exponent = np.pi\n\tspot_cut_off = np.pi / 8\n\tspot_light = rd.gui.create_spot_light(position, custom_material, direction, spot_exponent, spot_cut_off, intensity, attenuation_terms)\n\tgraphics.add_light(spot_light)\n\t```', 'LOAD_IICUB_PYTHON': '\t```python\n\trobot = rd.ICub()\n\t\n\t# Set actuator types to VELOCITY motors so that they stay in position without any controller\n\trobot.set_actuator_types("velocity")\n\tsimu = rd.RobotDARTSimu(0.001)\n\tsimu.set_collision_detector("fcl")\n\t```', 'ICUB_PRINT_IMU_PYTHON': '\t```python\n\tprint("Angular Position: ", robot.imu().angular_position_vec().transpose())\n\tprint("Angular Velocity: ", robot.imu().angular_velocity().transpose())\n\tprint("Linear Acceleration: ", robot.imu().linear_acceleration().transpose())\n\tprint("=================================" )\n\t```', 'ICUB_PRINT_FT_PYTHON': '\t```python\n\tprint("FT ( force): ", robot.ft_foot_left().force().transpose())\n\tprint("FT (torque): ", robot.ft_foot_left().torque().transpose())\n\tprint("=================================")\n\t```', 'HEXAPOD_PYTHON': '\t```python\n\trobot = rd.Hexapod()\n\t```'}
+ variables = {'IIWA': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'ROBOT_GHOST': '\t```cpp\n\t// Add a ghost robot; only visuals, no dynamics, no collision\n\tauto ghost = robot->clone_ghost();\n\tsimu.add_robot(ghost);\n\t```', 'SET_COLLISION_DETECTOR': '\t```cpp\n\tsimu.set_collision_detector("fcl"); // collision_detector can be "dart", "fcl", "ode" or "bullet" (case does not matter)\n\t```', 'SELF_COLLISIONS': '\t```cpp\n\tif (!robot->self_colliding()) {\n\t std::cout << "Self collision is not enabled" << std::endl;\n\t // set self collisions to true and adjacent collisions to false\n\t robot->set_self_collision(true, false);\n\t}\n\t```', 'SIMPLE_CONTROL': '\t```cpp\n\tauto controller1 = std::make_shared(ctrl);\n\trobot->add_controller(controller1);\n\t```', 'CAMERA_SENSOR_RGBD_RECORD_DEPTH': '\t```cpp\n\tcamera->camera().record(true, true); // cameras are recording color images by default, enable depth images as well for this example\n\t```', 'TORQUE_SENSOR': '\t```cpp\n\t// Add torque sensors to the robot\n\tint ct = 0;\n\tstd::vector> tq_sensors(robot->num_dofs());\n\tfor (const auto& joint : robot->dof_names())\n\t tq_sensors[ct++] = simu.add_sensor(robot, joint, 1000);\n\t```', 'FORCE_TORQUE_SENSOR': '\t```cpp\n\t// Add force-torque sensors to the robot\n\tct = 0;\n\tstd::vector> f_tq_sensors(robot->num_dofs());\n\tfor (const auto& joint : robot->dof_names())\n\t f_tq_sensors[ct++] = simu.add_sensor(robot, joint, 1000, "parent_to_child");\n\t```', 'IMU_SENSOR': '\t```cpp\n\t// Add IMU sensors to the robot\n\tct = 0;\n\tstd::vector> imu_sensors(robot->num_bodies());\n\tfor (const auto& body_node : robot->body_names()) {\n\t // _imu(std::make_shared(sensor::IMUConfig(body_node("head"), frequency))),\n\t imu_sensors[ct++] = simu.add_sensor(robot_dart::sensor::IMUConfig(robot->body_node(body_node), 1000));\n\t}\n\t```', 'TORQUE_MEASUREMENT': '\t```cpp\n\t// vector that contains the torque measurement for every joint (scalar)\n\tEigen::VectorXd torques_measure(robot->num_dofs());\n\tfor (const auto& tq_sens : tq_sensors)\n\t torques_measure.block<1, 1>(ct++, 0) = tq_sens->torques();\n\t```', 'FORCE_TORQUE_MEASUREMENT': '\t```cpp\n\t// matrix that contains the torque measurement for every joint (3d vector)\n\tEigen::MatrixXd ft_torques_measure(robot->num_dofs(), 3);\n\t// matrix that contains the force measurement for every joint (3d vector)\n\tEigen::MatrixXd ft_forces_measure(robot->num_dofs(), 3);\n\t// matrix that contains the wrench measurement for every joint (6d vector)[torque, force]\n\tEigen::MatrixXd ft_wrench_measure(robot->num_dofs(), 6);\n\tct = 0;\n\tfor (const auto& f_tq_sens : f_tq_sensors) {\n\t ft_torques_measure.block<1, 3>(ct, 0) = f_tq_sens->torque();\n\t ft_forces_measure.block<1, 3>(ct, 0) = f_tq_sens->force();\n\t ft_wrench_measure.block<1, 6>(ct, 0) = f_tq_sens->wrench();\n\t ct++;\n\t}\n\t```', 'IMU_MEASUREMENT': '\t```cpp\n\tEigen::MatrixXd imu_angular_positions_measure(robot->num_bodies(), 3);\n\tEigen::MatrixXd imu_angular_velocities_measure(robot->num_bodies(), 3);\n\tEigen::MatrixXd imu_linear_acceleration_measure(robot->num_bodies(), 3);\n\tct = 0;\n\tfor (const auto& imu_sens : imu_sensors) {\n\t imu_angular_positions_measure.block<1, 3>(ct, 0) = imu_sens->angular_position_vec();\n\t imu_angular_velocities_measure.block<1, 3>(ct, 0) = imu_sens->angular_velocity();\n\t imu_linear_acceleration_measure.block<1, 3>(ct, 0) = imu_sens->linear_acceleration();\n\t ct++;\n\t}\n\t```', 'RGB_SENSOR': '\t```cpp\n\t// a nested std::vector (w*h*3) of the last image taken can be retrieved\n\tauto rgb_image = camera->image();\n\t```', 'RGB_SENSOR_MEASURE': '\t```cpp\n\t// we can also save them to png\n\trobot_dart::gui::save_png_image("camera-small.png", rgb_image);\n\t// convert an rgb image to grayscale (useful in some cases)\n\tauto gray_image = robot_dart::gui::convert_rgb_to_grayscale(rgb_image);\n\trobot_dart::gui::save_png_image("camera-gray.png", gray_image);\n\t```', 'RGB_D_SENSOR': '\t```cpp\n\t// get the depth image from a camera\n\t// with a version for visualization or bigger differences in the output\n\tauto rgb_d_image = camera->depth_image();\n\t// and the raw values that can be used along with the camera parameters to transform the image to point-cloud\n\tauto rgb_d_image_raw = camera->raw_depth_image();\n\t```', 'RGB_D_SENSOR_MEASURE': '\t```cpp\n\trobot_dart::gui::save_png_image("camera-depth.png", rgb_d_image);\n\trobot_dart::gui::save_png_image("camera-depth-raw.png", rgb_d_image_raw);\n\t```', 'ROBOT_CONTROL': '\t```cpp\n\tclass MyController : public robot_dart::control::RobotControl {\n\tpublic:\n\t MyController() : robot_dart::control::RobotControl() {}\n\t\n\t MyController(const Eigen::VectorXd& ctrl, bool full_control) : robot_dart::control::RobotControl(ctrl, full_control) {}\n\t MyController(const Eigen::VectorXd& ctrl, const std::vector& dof_names) : robot_dart::control::RobotControl(ctrl, dof_names) {}\n\t\n\t void configure() override\n\t {\n\t _active = true;\n\t }\n\t\n\t Eigen::VectorXd calculate(double) override\n\t {\n\t auto robot = _robot.lock();\n\t Eigen::VectorXd cmd = 100. * (_ctrl - robot->positions(_controllable_dofs));\n\t return cmd;\n\t }\n\t std::shared_ptr clone() const override\n\t {\n\t return std::make_shared(*this);\n\t }\n\t};\n\t```', 'SIMPLE_ARM': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'KINEMATICS': '\t```cpp\n\t// Get Joint Positions(Angles)\n\tauto joint_positions = robot->positions();\n\t\n\t// Get Joint Velocities\n\tauto joint_vels = robot->velocities();\n\t\n\t// Get Joint Accelerations\n\tauto joint_accs = robot->accelerations();\n\t\n\t// Get link_name(str) Transformation matrix with respect to the world frame.\n\tauto eef_tf = robot->body_pose(link_name);\n\t\n\t// Get translation vector from transformation matrix\n\tauto eef_pos = eef_tf.translation();\n\t\n\t// Get rotation matrix from tranformation matrix\n\tauto eef_rot = eef_tf.rotation();\n\t\n\t// Get link_name 6d pose vector [logmap(eef_tf.linear()), eef_tf.translation()]\n\tauto eef_pose_vec = robot->body_pose_vec(link_name);\n\t\n\t// Get link_name 6d velocity vector [angular, cartesian]\n\tauto eef_vel = robot->body_velocity(link_name);\n\t\n\t// Get link_name 6d acceleration vector [angular, cartesian]\n\tauto eef_acc = robot->body_acceleration(link_name);\n\t\n\t// Jacobian targeting the origin of link_name(str)\n\tauto jacobian = robot->jacobian(link_name);\n\t\n\t// Jacobian time derivative\n\tauto jacobian_deriv = robot->jacobian_deriv(link_name);\n\t\n\t// Center of Mass Jacobian\n\tauto com_jacobian = robot->com_jacobian();\n\t\n\t// Center of Mass Jacobian Time Derivative\n\tauto com_jacobian_deriv = robot->com_jacobian_deriv();\n\t```', 'DYNAMICS': "\t```cpp\n\t// Get Joint Forces\n\tauto joint_forces = robot->forces();\n\t\n\t// Get link's mass\n\tauto eef_mass = robot->body_mass(link_name);\n\t\n\t// Mass Matrix of robot\n\tauto mass_matrix = robot->mass_matrix();\n\t\n\t// Inverse of Mass Matrix\n\tauto inv_mass_matrix = robot->inv_mass_matrix();\n\t\n\t// Augmented Mass matrix\n\tauto aug_mass_matrix = robot->aug_mass_matrix();\n\t\n\t// Inverse of Augmented Mass matrix\n\tauto inv_aug_mass_matrix = robot->inv_aug_mass_matrix();\n\t\n\t// Coriolis Force vector\n\tauto coriolis = robot->coriolis_forces();\n\t\n\t// Gravity Force vector\n\tauto gravity = robot->gravity_forces();\n\t\n\t// Combined vector of Coriolis Force and Gravity Force\n\tauto coriolis_gravity = robot->coriolis_gravity_forces();\n\t\n\t// Constraint Force Vector\n\tauto constraint_forces = robot->constraint_forces();\n\t```", 'FRANKA': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'PD_CONTROL': '\t```cpp\n\t// add a PD-controller to the arm\n\t// set desired positions\n\tEigen::VectorXd ctrl = robot_dart::make_vector({0., M_PI / 4., 0., -M_PI / 4., 0., M_PI / 2., 0., 0.});\n\t\n\t// add the controller to the robot\n\tauto controller = std::make_shared(ctrl);\n\trobot->add_controller(controller);\n\tcontroller->set_pd(300., 50.);\n\t```', 'ADD_NEW_CAMERA': '\t```cpp\n\t// Add camera\n\tauto camera = std::make_shared(graphics->magnum_app(), 256, 256);\n\t```', 'MANIPULATE_CAM_SEP': '\t```cpp\n\tcamera->camera().set_far_plane(5.f);\n\tcamera->camera().set_near_plane(0.01f);\n\tcamera->camera().set_fov(60.0f);\n\t```', 'MANIPULATE_CAM': '\t```cpp\n\tcamera->camera().set_camera_params(5., // far plane\n\t 0.01f, // near plane\n\t 60.0f, // field of view\n\t 600, // width\n\t 400 // height\n\t);\n\t```', 'RECORD_VIDEO_CAMERA': '\t```cpp\n\t// cameras can also record video\n\tcamera->record_video("video-camera.mp4");\n\t```', 'CAM_POSITION': '\t```cpp\n\t// set the position of the camera, and the position where the main camera is looking at\n\tEigen::Vector3d cam_pos = {-0.5, -3., 0.75};\n\tEigen::Vector3d cam_looks_at = {0.5, 0., 0.2};\n\tcamera->look_at(cam_pos, cam_looks_at);\n\t```', 'CAM_ATTACH': '\t```cpp\n\tEigen::Isometry3d tf;\n\ttf = Eigen::AngleAxisd(3.14, Eigen::Vector3d{1., 0., 0.});\n\ttf *= Eigen::AngleAxisd(1.57, Eigen::Vector3d{0., 0., 1.});\n\ttf.translation() = Eigen::Vector3d(0., 0., 0.1);\n\tcamera->attach_to_body(robot->body_node("iiwa_link_ee"), tf); // cameras are looking towards -z by default\n\t```', 'HEXAPOD': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'HELLO_WORLD_INCLUDE': '\t```cpp\n\t#include \n\t\n\t#ifdef GRAPHIC\n\t#include \n\t#endif\n\t```', 'HELLO_WORLD_ROBOT_CREATION': '\t```cpp\n\tauto robot = std::make_shared("pexod.urdf");\n\t```', 'HELLO_WORLD_ROBOT_PLACING': '\t```cpp\n\trobot->set_base_pose(robot_dart::make_tf({0., 0., 0.2}));\n\t```', 'HELLO_WORLD_ROBOT_SIMU': '\t```cpp\n\trobot_dart::RobotDARTSimu simu(0.001); // dt=0.001, 1KHz simulation\n\tsimu.add_floor();\n\tsimu.add_robot(robot);\n\t```', 'HELLO_WORLD_ROBOT_GRAPHIC': '\t```cpp\n\tauto graphics = std::make_shared();\n\tsimu.set_graphics(graphics);\n\tgraphics->look_at({0.5, 3., 0.75}, {0.5, 0., 0.2});\n\t```', 'HELLO_WORLD_ROBOT_RUN': '\t```cpp\n\tsimu.run(10.);\n\t```', 'INIT_SIMU': '\t```cpp\n\t// choose time step of 0.001 seconds\n\trobot_dart::RobotDARTSimu simu(0.001);\n\t```', 'MODIFY_SIMU_DT': '\t```cpp\n\t// set timestep to 0.005 and update control frequency(bool)\n\tsimu.set_timestep(0.005, true);\n\t```', 'SIMU_GRAVITY': '\t```cpp\n\t// Set gravitational force of mars\n\tEigen::Vector3d mars_gravity = {0., 0., -3.721};\n\tsimu.set_gravity(mars_gravity);\n\t```', 'GRAPHICS_PARAMS': '\t```cpp\n\trobot_dart::gui::magnum::GraphicsConfiguration configuration;\n\t// We can change the width/height of the window (or camera image dimensions)\n\tconfiguration.width = 1280;\n\tconfiguration.height = 960;\n\tconfiguration.title = "Graphics Tutorial"; // We can set a title for our window\n\t\n\t// We can change the configuration for shadows\n\tconfiguration.shadowed = true;\n\tconfiguration.transparent_shadows = true;\n\tconfiguration.shadow_map_size = 1024;\n\t\n\t// We can also alter some specifications for the lighting\n\tconfiguration.max_lights = 3; // maximum number of lights for our scene [default=3]\n\tconfiguration.specular_strength = 0.25; // strength of the specular component\n\t\n\t// Some extra configuration for the main camera\n\tconfiguration.draw_main_camera = true;\n\tconfiguration.draw_debug = true;\n\tconfiguration.draw_text = true;\n\t\n\t// We can also change the background color [default=black]\n\tconfiguration.bg_color = Eigen::Vector4d{1.0, 1.0, 1.0, 1.0};\n\t\n\t// Create the graphics object with the configuration as parameter\n\tauto graphics = std::make_shared(configuration);\n\t```', 'SHADOWS_GRAPHICS': '\t```cpp\n\t// Disable shadows\n\tgraphics->enable_shadows(false, false);\n\tsimu.run(1.);\n\t// Enable shadows only for non-transparent objects\n\tgraphics->enable_shadows(true, false);\n\tsimu.run(1.);\n\t// Enable shadows for transparent objects as well\n\tgraphics->enable_shadows(true, true);\n\tsimu.run(1.);\n\t```', 'CLR_LIGHT': '\t```cpp\n\t// Clear Lights\n\tgraphics->clear_lights();\n\t```', 'LIGHT_MATERIAL': '\t```cpp\n\t// Create Light material\n\tMagnum::Float shininess = 1000.f;\n\tMagnum::Color4 white = {1.f, 1.f, 1.f, 1.f};\n\t\n\t// ambient, diffuse, specular\n\tauto custom_material = robot_dart::gui::magnum::gs::Material(white, white, white, shininess);\n\t```', 'POINT_LIGHT': '\t```cpp\n\t// Create point light\n\tMagnum::Vector3 position = {0.f, 0.f, 2.f};\n\tMagnum::Float intensity = 1.f;\n\tMagnum::Vector3 attenuation_terms = {1.f, 0.f, 0.f};\n\tauto point_light = robot_dart::gui::magnum::gs::create_point_light(position, custom_material, intensity, attenuation_terms);\n\tgraphics->add_light(point_light);\n\t```', 'DIRECTIONAL_LIGHT': '\t```cpp\n\t// Create directional light\n\tMagnum::Vector3 direction = {-1.f, -1.f, -1.f};\n\tauto directional_light = robot_dart::gui::magnum::gs::create_directional_light(direction, custom_material);\n\tgraphics->add_light(directional_light);\n\t```', 'SPOT_LIGHT': '\t```cpp\n\t// Create spot light\n\tMagnum::Vector3 position = {0.f, 0.f, 1.f};\n\tMagnum::Vector3 direction = {-1.f, -1.f, -1.f};\n\tMagnum::Float intensity = 1.f;\n\tMagnum::Vector3 attenuation_terms = {1.f, 0.f, 0.f};\n\tMagnum::Float spot_exponent = M_PI;\n\tMagnum::Float spot_cut_off = M_PI / 8;\n\tauto spot_light = robot_dart::gui::magnum::gs::create_spot_light(position, custom_material, direction, spot_exponent, spot_cut_off, intensity, attenuation_terms);\n\tgraphics->add_light(spot_light);\n\t```', 'ROBOT_POOL_INCLUDE': '\t```cpp\n\t#include \n\t```', 'ROBOT_POOL_GLOBAL_NAMESPACE': '\t```cpp\n\tnamespace pool {\n\t // This function should load one robot: here we load Talos\n\t inline std::shared_ptr robot_creator()\n\t {\n\t return std::make_shared();\n\t }\n\t\n\t // To create the object we need to pass the robot_creator function and the number of maximum parallel threads\n\t robot_dart::RobotPool robot_pool(robot_creator, NUM_THREADS);\n\t} // namespace pool\n\t```', 'ROBOT_POOL_EVAL': '\t```cpp\n\tinline void eval_robot(int i)\n\t{\n\t // We get one available robot\n\t auto robot = pool::robot_pool.get_robot();\n\t std::cout << "Robot " << i << " got [" << robot->skeleton() << "]" << std::endl;\n\t\n\t /// --- some robot_dart code ---\n\t simulate_robot(robot);\n\t // --- do something with the result\n\t\n\t std::cout << "End of simulation " << i << std::endl;\n\t\n\t // CRITICAL : free your robot !\n\t pool::robot_pool.free_robot(robot);\n\t\n\t std::cout << "Robot " << i << " freed!" << std::endl;\n\t}\n\t```', 'ROBOT_POOL_CREATE_THREADS': '\t```cpp\n\t// for the example, we run NUM_THREADS threads of eval_robot()\n\tstd::vector threads(NUM_THREADS * 2); // *2 to see some reuse\n\tfor (size_t i = 0; i < threads.size(); ++i)\n\t threads[i] = std::thread(eval_robot, i); // eval_robot is the function that uses the robot\n\t```', 'TALOS_FAST': '\t```cpp\n\t// load talos fast\n\tauto robot = std::make_shared();\n\t\n\t// Set actuator types to VELOCITY (for speed)\n\trobot->set_actuator_types("velocity");\n\t\n\tdouble dt = 0.001;\n\trobot_dart::RobotDARTSimu simu(dt);\n\t// we can use the DART (fast) collision detector\n\tsimu.set_collision_detector("dart");\n\t```', 'CAMERAS_PARALLEL': '\t```cpp\n\t// Load robot from URDF\n\tauto global_robot = std::make_shared();\n\t\n\tstd::vector workers;\n\t\n\t// Set maximum number of parallel GL contexts (this is GPU-dependent)\n\trobot_dart::gui::magnum::GlobalData::instance()->set_max_contexts(4);\n\t\n\t// We want 15 parallel simulations with different GL context each\n\tsize_t N_workers = 15;\n\tstd::mutex mutex;\n\tsize_t id = 0;\n\t// Launch the workers\n\tfor (size_t i = 0; i < N_workers; i++) {\n\t workers.push_back(std::thread([&] {\n\t mutex.lock();\n\t size_t index = id++;\n\t mutex.unlock();\n\t\n\t // Get the GL context -- this is a blocking call\n\t // will wait until one GL context is available\n\t // get_gl_context(gl_context); // this call will not sleep between failed queries\n\t get_gl_context_with_sleep(gl_context, 20); // this call will sleep 20ms between each failed query\n\t\n\t // Do the simulation\n\t auto robot = global_robot->clone();\n\t\n\t robot_dart::RobotDARTSimu simu(0.001);\n\t\n\t Eigen::VectorXd ctrl = robot_dart::make_vector({0., M_PI / 3., 0., -M_PI / 4., 0., 0., 0.});\n\t\n\t auto controller = std::make_shared(ctrl);\n\t robot->add_controller(controller);\n\t controller->set_pd(300., 50.);\n\t\n\t // Magnum graphics\n\t robot_dart::gui::magnum::GraphicsConfiguration configuration = robot_dart::gui::magnum::WindowlessGraphics::default_configuration();\n\t\n\t configuration.width = 1024;\n\t configuration.height = 768;\n\t auto graphics = std::make_shared(configuration);\n\t simu.set_graphics(graphics);\n\t // Position the camera differently for each thread to visualize the difference\n\t graphics->look_at({0.4 * index, 3.5 - index * 0.1, 2.}, {0., 0., 0.25});\n\t // record images from main camera/graphics\n\t // graphics->set_recording(true); // WindowlessGLApplication records images by default\n\t\n\t simu.add_robot(robot);\n\t simu.run(6);\n\t\n\t // Save the image for verification\n\t robot_dart::gui::save_png_image("camera_" + std::to_string(index) + ".png",\n\t graphics->image());\n\t\n\t // Release the GL context for another thread to use\n\t release_gl_context(gl_context);\n\t }));\n\t}\n\t\n\t// Wait for all the workers\n\tfor (size_t i = 0; i < workers.size(); i++) {\n\t workers[i].join();\n\t}\n\t```', 'LOAD_IICUB': '\t```cpp\n\tauto robot = std::make_shared();\n\t// Set actuator types to VELOCITY motors so that they stay in position without any controller\n\trobot->set_actuator_types("velocity");\n\trobot_dart::RobotDARTSimu simu(0.001);\n\tsimu.set_collision_detector("fcl");\n\t```', 'ICUB_PRINT_IMU': '\t```cpp\n\tstd::cout << "Angular Position: " << robot->imu().angular_position_vec().transpose().format(fmt) << std::endl;\n\tstd::cout << "Angular Velocity: " << robot->imu().angular_velocity().transpose().format(fmt) << std::endl;\n\tstd::cout << "Linear Acceleration: " << robot->imu().linear_acceleration().transpose().format(fmt) << std::endl;\n\tstd::cout << "=================================" << std::endl;\n\t```', 'ICUB_PRINT_FT': '\t```cpp\n\tstd::cout << "FT ( force): " << robot->ft_foot_left().force().transpose().format(fmt) << std::endl;\n\tstd::cout << "FT (torque): " << robot->ft_foot_left().torque().transpose().format(fmt) << std::endl;\n\tstd::cout << "=================================" << std::endl;\n\t```', 'SET_ACTUATOR': '\t```cpp\n\t// Set all DoFs to same actuator\n\trobot->set_actuator_types("servo"); // actuator types can be "servo", "torque", "velocity", "passive", "locked", "mimic"\n\t// You can also set actuator types separately\n\trobot->set_actuator_type("torque", "iiwa_joint_5");\n\t```', 'POSITIONS_ENFORCED': '\t```cpp\n\t// Εnforce joint position and velocity limits\n\trobot->set_position_enforced(true);\n\t```', 'MODIFY_LIMITS': '\t```cpp\n\t// Modify Position Limits\n\tEigen::VectorXd pos_upper_lims(7);\n\tpos_upper_lims << 2.096, 2.096, 2.096, 2.096, 2.096, 2.096, 2.096;\n\trobot->set_position_upper_limits(pos_upper_lims, dof_names);\n\trobot->set_position_lower_limits(-pos_upper_lims, dof_names);\n\t\n\t// Modify Velocity Limits\n\t\n\tEigen::VectorXd vel_upper_lims(7);\n\tvel_upper_lims << 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5;\n\trobot->set_velocity_upper_limits(vel_upper_lims, dof_names);\n\trobot->set_velocity_lower_limits(-vel_upper_lims, dof_names);\n\t\n\t// Modify Force Limits\n\t\n\tEigen::VectorXd force_upper_lims(7);\n\tforce_upper_lims << 150, 150, 150, 150, 150, 150, 150;\n\trobot->set_force_upper_limits(force_upper_lims, dof_names);\n\trobot->set_force_lower_limits(-force_upper_lims, dof_names);\n\t\n\t// Modify Acceleration Limits\n\tEigen::VectorXd acc_upper_lims(7);\n\tacc_upper_lims << 1500, 1500, 1500, 1500, 1500, 1500, 1500;\n\trobot->set_acceleration_upper_limits(acc_upper_lims, dof_names);\n\trobot->set_acceleration_lower_limits(-acc_upper_lims, dof_names);\n\t```', 'MODIFY_COEFFS': '\t```cpp\n\t// Modify Damping Coefficients\n\tstd::vector damps = {0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4};\n\trobot->set_damping_coeffs(damps, dof_names);\n\t\n\t// Modify Coulomb Frictions\n\tstd::vector cfrictions = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001};\n\trobot->set_coulomb_coeffs(cfrictions, dof_names);\n\t\n\t// Modify Spring Stiffness\n\tstd::vector stiffnesses = {0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001};\n\trobot->set_spring_stiffnesses(stiffnesses, dof_names);\n\t```', 'TALOS': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'RECORD_VIDEO_ROBOT_GRAPHICS_PARAMS': '\t```cpp\n\tgraphics->record_video("talos_dancing.mp4");\n\t```', 'A1': '\t```cpp\n\tauto robot = std::make_shared();\n\t```', 'A1_PRINT_IMU': '\t```cpp\n\tstd::cout << "Angular Position: " << robot->imu().angular_position_vec().transpose().format(fmt) << std::endl;\n\tstd::cout << "Angular Velocity: " << robot->imu().angular_velocity().transpose().format(fmt) << std::endl;\n\tstd::cout << "Linear Acceleration: " << robot->imu().linear_acceleration().transpose().format(fmt) << std::endl;\n\tstd::cout << "=================================" << std::endl;\n\t```', 'ROBOT_CONTROL_PYTHON': '\t```python\n\tclass MyController(rd.RobotControl):\n\t def __init__(self, ctrl, full_control):\n\t rd.RobotControl.__init__(self, ctrl, full_control)\n\t\n\t def __init__(self, ctrl, controllable_dofs):\n\t rd.RobotControl.__init__(self, ctrl, controllable_dofs)\n\t\n\t def configure(self):\n\t self._active = True\n\t\n\t def calculate(self, t):\n\t target = np.array([self._ctrl])\n\t cmd = 100*(target-self.robot().positions(self._controllable_dofs))\n\t return cmd[0]\n\t\n\t # TO-DO: This is NOT working at the moment!\n\t def clone(self):\n\t return MyController(self._ctrl, self._controllable_dofs)\n\t```', 'FRANKA_PYTHON': '\t```python\n\trobot = rd.Franka()\n\t```', 'PD_CONTROL_PYTHON': '\t```python\n\t# add a PD-controller to the arm\n\t# set desired positions\n\tctrl = [0., np.pi / 4., 0., -np.pi / 4., 0., np.pi / 2., 0., 0.]\n\t\n\t# add the controller to the robot\n\tcontroller = rd.PDControl(ctrl)\n\trobot.add_controller(controller)\n\tcontroller.set_pd(300., 50.)\n\t```', 'TALOS_FAST_PYTHON': '\t```python\n\trobot = rd.TalosFastCollision()\n\t```', 'RECORD_VIDEO_ROBOT_GRAPHICS_PARAMS_PYTHON': '\t```python\n\tgraphics.record_video("talos_dancing.mp4")\n\t```', 'CAMERAS_PARALLEL_PYTHON': '\t```python\n\trobot = rd.Robot("arm.urdf", "arm", False)\n\trobot.fix_to_world()\n\t\n\tdef test():\n\t pid = os.getpid()\n\t ii = pid%15\n\t\n\t # create the controller\n\t pdcontrol = rd.PDControl([0.0, 1.0, -1.5, 1.0], False)\n\t\n\t # clone the robot\n\t grobot = robot.clone()\n\t\n\t # add the controller to the robot\n\t grobot.add_controller(pdcontrol, 1.)\n\t pdcontrol.set_pd(200., 20.)\n\t\n\t # create the simulation object\n\t simu = rd.RobotDARTSimu(0.001)\n\t\n\t # set the graphics\n\t graphics = rd.gui.WindowlessGraphics()\n\t simu.set_graphics(graphics)\n\t\n\t graphics.look_at([0.4 * ii, 3.5 - ii * 0.1, 2.], [0., 0., 0.25], [0., 0., 1.])\n\t\n\t # add the robot and the floor\n\t simu.add_robot(grobot)\n\t simu.add_checkerboard_floor()\n\t\n\t # run\n\t simu.run(20.)\n\t\n\t # save last frame for visualization purposes\n\t img = graphics.image()\n\t rd.gui.save_png_image(\'camera-\' + str(ii) + \'.png\', img)\n\t\n\t# helper function to run in parallel\n\tdef runInParallel(N):\n\t proc = []\n\t for i in range(N):\n\t # rd.gui.run_with_gl_context accepts 2 parameters:\n\t # (func, wait_time_in_ms)\n\t # the func needs to be of the following format: void(), aka no return, no arguments\n\t p = Process(target=rd.gui.run_with_gl_context, args=(test, 20))\n\t p.start()\n\t proc.append(p)\n\t for p in proc:\n\t p.join()\n\t\n\tprint(\'Running parallel evaluations\')\n\tN = 15\n\tstart = timer()\n\trunInParallel(N)\n\tend = timer()\n\tprint(\'Time:\', end-start)\n\t```', 'IIWA_PYTHON': '\t```python\n\trobot = rd.Iiwa()\n\t```', 'ROBOT_GHOST_PYTHON': '\t```python\n\t# Add a ghost robot; only visuals, no dynamics, no collision\n\tghost = robot.clone_ghost()\n\tsimu.add_robot(ghost)\n\t```', 'HELLO_WORLD_INCLUDE_PYTHON': '\t```python\n\timport RobotDART as rd\n\t```', 'HELLO_WORLD_ROBOT_CREATION_PYTHON': '\t```python\n\trobot = rd.Robot("pexod.urdf");\n\t```', 'HELLO_WORLD_ROBOT_PLACING_PYTHON': '\t```python\n\trobot.set_base_pose([0., 0., 0., 0., 0., 0.2])\n\t```', 'HELLO_WORLD_ROBOT_SIMU_PYTHON': '\t```python\n\tsimu = rd.RobotDARTSimu(0.001); # dt=0.001, 1KHz simulation\n\tsimu.add_floor();\n\tsimu.add_robot(robot);\n\t```', 'HELLO_WORLD_ROBOT_GRAPHIC_PYTHON': '\t```python\n\tgraphics = rd.gui.Graphics()\n\tsimu.set_graphics(graphics)\n\tgraphics.look_at([0.5, 3., 0.75], [0.5, 0., 0.2])\n\t```', 'HELLO_WORLD_ROBOT_RUN_PYTHON': '\t```python\n\tsimu.run(10.)\n\t```', 'CAMERA_SENSOR_RGBD_RECORD_DEPTH_PYTHON': '\t```python\n\t# cameras are recording color images by default, enable depth images as well for this example\n\tcamera.camera().record(True, True)\n\t```', 'TORQUE_SENSOR_PYTHON': '\t```python\n\t# Add torque sensors to the robot\n\ttq_sensors = np.empty(robot.num_dofs(), dtype=rd.sensor.Torque)\n\tct = 0\n\tfor joint in robot.dof_names():\n\t simu.add_sensor(rd.sensor.Torque(robot, joint, 1000))\n\t tq_sensors[ct] = simu.sensors()[-1]\n\t ct += 1\n\t```', 'FORCE_TORQUE_SENSOR_PYTHON': '\t```python\n\t# Add force-torque sensors to the robot\n\tf_tq_sensors = np.empty(robot.num_dofs(), dtype=rd.sensor.ForceTorque)\n\tct = 0\n\tfor joint in robot.dof_names():\n\t simu.add_sensor(rd.sensor.ForceTorque(\n\t robot, joint, 1000, "parent_to_child"))\n\t f_tq_sensors[ct] = simu.sensors()[-1]\n\t print(f_tq_sensors)\n\t ct += 1\n\t```', 'IMU_SENSOR_PYTHON': '\t```python\n\t# Add IMU sensors to the robot\n\tct = 0\n\timu_sensors = np.empty(robot.num_bodies(), dtype=rd.sensor.IMU)\n\tfor body_node in robot.body_names():\n\t simu.add_sensor(rd.sensor.IMU(\n\t rd.sensor.IMUConfig(robot.body_node(body_node), 1000)))\n\t imu_sensors[ct] = simu.sensors()[-1]\n\t ct += 1\n\t```', 'TORQUE_MEASUREMENT_PYTHON': '\t```python\n\t# vector that contains the torque measurement for every joint (scalar)\n\ttorques_measure = np.empty(robot.num_dofs())\n\tct = 0\n\tfor tq_sens in tq_sensors:\n\t torques_measure[ct] = tq_sens.torques()\n\t ct += 1\n\t```', 'FORCE_TORQUE_MEASUREMENT_PYTHON': '\t```python\n\t# matrix that contains the torque measurement for every joint (3d vector)\n\tft_torques_measure = np.empty([robot.num_dofs(), 3])\n\t# matrix that contains the force measurement for every joint (3d vector)\n\tft_forces_measure = np.empty([robot.num_dofs(), 3])\n\t# matrix that contains the wrench measurement for every joint (6d vector)[torque, force]\n\tft_wrench_measure = np.empty([robot.num_dofs(), 6])\n\tct = 0\n\tfor f_tq_sens in f_tq_sensors:\n\t ft_torques_measure[ct, :] = f_tq_sens.torque()\n\t ft_forces_measure[ct, :] = f_tq_sens.force()\n\t ft_wrench_measure[ct, :] = f_tq_sens.wrench()\n\t ct += 1\n\t\n\t```', 'IMU_MEASUREMENT_PYTHON': '\t```python\n\timu_angular_positions_measure = np.empty([robot.num_bodies(), 3])\n\timu_angular_velocities_measure = np.empty([robot.num_bodies(), 3])\n\timu_linear_acceleration_measure = np.empty([robot.num_bodies(), 3])\n\tct = 0\n\tfor imu_sens in imu_sensors:\n\t imu_angular_positions_measure[ct,:] = imu_sens.angular_position_vec()\n\t imu_angular_velocities_measure[ct, :] = imu_sens.angular_velocity()\n\t imu_linear_acceleration_measure[ct,:] = imu_sens.linear_acceleration()\n\t ct += 1\n\t\n\t```', 'RGB_SENSOR_PYTHON': '\t```python\n\t# a nested array (w*h*3) of the last image taken can be retrieved\n\trgb_image = camera.image()\n\t```', 'RGB_SENSOR_MEASURE_PYTHON': '\t```python\n\t# we can also save them to png\n\trd.gui.save_png_image("camera-small.png", rgb_image)\n\t# convert an rgb image to grayscale (useful in some cases)\n\tgray_image = rd.gui.convert_rgb_to_grayscale(rgb_image)\n\trd.gui.save_png_image("camera-gray.png", gray_image)\n\t```', 'RGB_D_SENSOR_PYTHON': '\t```python\n\t# get the depth image from a camera\n\t# with a version for visualization or bigger differences in the output\n\trgb_d_image = camera.depth_image()\n\t# and the raw values that can be used along with the camera parameters to transform the image to point-cloud\n\trgb_d_image_raw = camera.raw_depth_image()\n\t```', 'RGB_D_SENSOR_MEASURE_PYTHON': '\t```python\n\trd.gui.save_png_image("camera-depth.png", rgb_d_image)\n\trd.gui.save_png_image("camera-depth-raw.png", rgb_d_image_raw)\n\t```', 'SIMPLE_CONTROL_PYTHON': '\t```python\n\tcontroller1 = rd.SimpleControl(ctrl)\n\trobot.add_controller(controller1)\n\t```', 'A1_PYTHON': '\t```python\n\trobot = rd.A1()\n\t```', 'A1_PRINT_IMU_PYTHON': '\t```python\n\tprint( "Angular Position: ", robot.imu().angular_position_vec().transpose())\n\tprint( "Angular Velocity: ", robot.imu().angular_velocity().transpose())\n\tprint( "Linear Acceleration: ", robot.imu().linear_acceleration().transpose())\n\tprint( "=================================")\n\t```', 'ADD_NEW_CAMERA_PYTHON': '\t```python\n\t# Add camera\n\tcamera = rd.sensor.Camera(graphics.magnum_app(), 32, 32)\n\t```', 'MANIPULATE_CAM_SEP_PYTHON': '\t```python\n\tcamera.camera().set_far_plane(5.)\n\tcamera.camera().set_near_plane(0.01)\n\tcamera.camera().set_fov(60.0)\n\t```', 'MANIPULATE_CAM_PYTHON': '\t```python\n\tcamera.camera().set_camera_params(5., #far plane\n\t 0.01, #near plane\n\t 60.0, # field of view\n\t 600, # width\n\t 400) #height\n\t```', 'RECORD_VIDEO_CAMERA_PYTHON': '\t```python\n\t\n\t# cameras can also record video\n\tcamera.record_video("video-camera.mp4")\n\t```', 'CAM_POSITION_PYTHON': '\t```python\n\t# set the position of the camera, and the position where the main camera is looking at\n\tcam_pos = [-0.5, -3., 0.75]\n\tcam_looks_at = [0.5, 0., 0.2]\n\tcamera.look_at(cam_pos, cam_looks_at)\n\t```', 'CAM_ATTACH_PYTHON': '\t```python\n\ttf = dartpy.math.Isometry3()\n\trot = dartpy.math.AngleAxis(3.14, [1., 0., 0.])\n\trot = rot.multiply( dartpy.math.AngleAxis(1.57, [0., 0., 1.])).to_rotation_matrix()\n\ttf.set_translation([0., 0., 0.1])\n\tcamera.attach_to_body(robot.body_node("iiwa_link_ee"), tf) # cameras are looking towards -z by default\n\t```', 'SET_COLLISION_DETECTOR_PYTHON': '\t```python\n\tsimu.set_collision_detector("fcl") # collision_detector can be "dart", "fcl", "ode" or "bullet" (case does not matter)\n\t```', 'SELF_COLLISIONS_PYTHON': '\t```python\n\tif(not robot.self_colliding()):\n\t print("Self collision is not enabled")\n\t # set self collisions to true and adjacent collisions to false\n\t robot.set_self_collision(True, False)\n\t```', 'SIMPLE_ARM_PYTHON': '\t```python\n\trobot = rd.Arm()\n\t```', 'SET_ACTUATOR_PYTHON': '\t```python\n\t# Set all DoFs to same actuator\n\t# actuator types can be "servo", "torque", "velocity", "passive", "locked", "mimic"\n\trobot.set_actuator_types("servo")\n\t# You can also set actuator types separately\n\trobot.set_actuator_type("torque", "iiwa_joint_5")\n\t```', 'POSITIONS_ENFORCED_PYTHON': '\t```python\n\t# Εnforce joint position and velocity limits\n\trobot.set_position_enforced(True)\n\t```', 'MODIFY_LIMITS_PYTHON': '\t```python\n\t# Modify Position Limits\n\tpos_upper_lims = np.array([2.096, 2.096, 2.096, 2.096, 2.096, 2.096, 2.096])\n\trobot.set_position_upper_limits(pos_upper_lims, dof_names)\n\trobot.set_position_lower_limits(-1*pos_upper_lims, dof_names)\n\t\n\t# Modify Velocity Limits\n\tvel_upper_lims = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5])\n\t\n\trobot.set_velocity_upper_limits(vel_upper_lims, dof_names)\n\trobot.set_velocity_lower_limits(-1*vel_upper_lims, dof_names)\n\t\n\t# Modify Force Limits\n\tforce_upper_lims = np.array([150, 150, 150, 150, 150, 150, 150])\n\trobot.set_force_upper_limits(force_upper_lims, dof_names)\n\trobot.set_force_lower_limits(-1*force_upper_lims, dof_names)\n\t\n\t# Modify Acceleration Limits\n\tacc_upper_lims = np.array([1500, 1500, 1500, 1500, 1500, 1500, 1500])\n\trobot.set_acceleration_upper_limits(acc_upper_lims, dof_names)\n\trobot.set_acceleration_lower_limits(-1*acc_upper_lims, dof_names)\n\t```', 'MODIFY_COEFFS_PYTHON': '\t```python\n\t# Modify Damping Coefficients\n\tdamps = [0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4]\n\trobot.set_damping_coeffs(damps, dof_names)\n\t\n\t# Modify Coulomb Frictions\n\tcfrictions = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]\n\trobot.set_coulomb_coeffs(cfrictions, dof_names)\n\t\n\t# Modify Spring Stiffness\n\tstiffnesses = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]\n\trobot.set_spring_stiffnesses(stiffnesses, dof_names)\n\t```', 'KINEMATICS_PYTHON': '\t```python\n\t# Get Joint Positions(Angles)\n\tjoint_positions = robot.positions()\n\t\n\t# Get Joint Velocities\n\tjoint_vels = robot.velocities()\n\t\n\t# Get Joint Accelerations\n\tjoint_accs = robot.accelerations()\n\t\n\t# Get link_name(str) Transformation matrix with respect to the world frame.\n\teef_tf = robot.body_pose(link_name)\n\t\n\t# Get translation vector from transformation matrix\n\teef_pos = eef_tf.translation()\n\t\n\t# Get rotation matrix from tranformation matrix\n\teef_rot = eef_tf.rotation()\n\t\n\t# Get link_name 6d pose vector [logmap(eef_tf.linear()), eef_tf.translation()]\n\teef_pose_vec = robot.body_pose_vec(link_name)\n\t\n\t# Get link_name 6d velocity vector [angular, cartesian]\n\teef_vel = robot.body_velocity(link_name)\n\t\n\t# Get link_name 6d acceleration vector [angular, cartesian]\n\teef_acc = robot.body_acceleration(link_name)\n\t\n\t# Jacobian targeting the origin of link_name(str)\n\tjacobian = robot.jacobian(link_name)\n\t\n\t# Jacobian time derivative\n\tjacobian_deriv = robot.jacobian_deriv(link_name)\n\t\n\t# Center of Mass Jacobian\n\tcom_jacobian = robot.com_jacobian()\n\t\n\t# Center of Mass Jacobian Time Derivative\n\tcom_jacobian_deriv = robot.com_jacobian_deriv()\n\t```', 'DYNAMICS_PYTHON': "\t```python\n\t# Get Joint Forces\n\tjoint_forces = robot.forces()\n\t\n\t# Get link's mass\n\teef_mass = robot.body_mass(link_name)\n\t\n\t# Mass Matrix of robot\n\tmass_matrix = robot.mass_matrix()\n\t\n\t# Inverse of Mass Matrix\n\tinv_mass_matrix = robot.inv_mass_matrix()\n\t\n\t# Augmented Mass matrix\n\taug_mass_matrix = robot.aug_mass_matrix()\n\t\n\t# Inverse of Augmented Mass matrix\n\tinv_aug_mass_matrix = robot.inv_aug_mass_matrix()\n\t\n\t# Coriolis Force vector\n\tcoriolis = robot.coriolis_forces()\n\t\n\t# Gravity Force vector\n\tgravity = robot.gravity_forces()\n\t\n\t# Combined vector of Coriolis Force and Gravity Force\n\tcoriolis_gravity = robot.coriolis_gravity_forces()\n\t\n\t# NOT IMPLEMENTED\n\t# # Constraint Force Vector\n\t# constraint_forces = robot.constraint_forces()\n\t\n\t```", 'TALOS_PYTHON': '\t```python\n\trobot = rd.Talos()\n\t```', 'INIT_SIMU_PYTHON': '\t```python\n\t# choose time step of 0.001 seconds\n\tsimu = rd.RobotDARTSimu(0.001)\n\t```', 'MODIFY_SIMU_DT_PYTHON': '\t```python\n\t# set timestep to 0.005 and update control frequency(bool)\n\tsimu.set_timestep(0.005, True)\n\t```', 'SIMU_GRAVITY_PYTHON': '\t```python\n\t# set gravitational force of mars\n\tmars_gravity = [0., 0., -3.721]\n\tsimu.set_gravity(mars_gravity)\n\t```', 'GRAPHICS_PARAMS_PYTHON': '\t```python\n\tconfiguration = rd.gui.GraphicsConfiguration()\n\t# We can change the width/height of the window (or camera, dimensions)\n\tconfiguration.width = 1280\n\tconfiguration.height = 960\n\tconfiguration.title = "Graphics Tutorial" # We can set a title for our window\n\t\n\t# We can change the configuration for shadows\n\tconfiguration.shadowed = True\n\tconfiguration.transparent_shadows = True\n\tconfiguration.shadow_map_size = 1024\n\t\n\t# We can also alter some specifications for the lighting\n\tconfiguration.max_lights = 3 # maximum number of lights for our scene\n\tconfiguration.specular_strength = 0.25 # strength og the specular component\n\t\n\t# Some extra configuration for the main camera\n\tconfiguration.draw_main_camera = True\n\tconfiguration.draw_debug = True\n\tconfiguration.draw_text = True\n\t\n\t# We can also change the background color [default = black]\n\tconfiguration.bg_color = [1., 1., 1., 1.]\n\t\n\t# create the graphics object with the configuration as a parameter\n\tgraphics = rd.gui.Graphics(configuration)\n\t```', 'SHADOWS_GRAPHICS_PYTHON': '\t```python\n\t# Disable shadows\n\tgraphics.enable_shadows(False, False)\n\tsimu.run(1.)\n\t# Enable shadows only for non-transparent objects\n\tgraphics.enable_shadows(True, False)\n\tsimu.run(1.)\n\t# Enable shadows for transparent objects as well\n\tgraphics.enable_shadows(True, True)\n\tsimu.run(1.)\n\t```', 'CLR_LIGHT_PYTHON': '\t```python\n\t# Clear Lights\n\tgraphics.clear_lights()\n\t```', 'LIGHT_MATERIAL_PYTHON': '\t```python\n\t# Clear Light material\n\tshininess = 1000.\n\twhite = magnum.Color4(1., 1., 1., 1.)\n\t\n\t# ambient, diffuse, specular\n\tcustom_material = rd.gui.Material(white, white, white, shininess)\n\t```', 'POINT_LIGHT_PYTHON': '\t```python\n\t# Create point light\n\tposition = magnum.Vector3(0., 0., 2.)\n\tintensity = 1.\n\tattenuation_terms = magnum.Vector3(1., 0., 0.)\n\tpoint_light = rd.gui.create_point_light(position, custom_material, intensity, attenuation_terms)\n\tgraphics.add_light(point_light)\n\t```', 'DIRECTIONAL_LIGHT_PYTHON': '\t```python\n\t# Create directional light\n\tdirection = magnum.Vector3(-1, -1, -1)\n\tdirectional_light = rd.gui.create_directional_light(direction, custom_material)\n\tgraphics.add_light(directional_light)\n\t```', 'SPOT_LIGHT_PYTHON': '\t```python\n\t# Create spot light\n\tposition = magnum.Vector3(0., 0., 1.)\n\tdirection = magnum.Vector3(-1, -1, -1)\n\tintensity = 1.\n\tattenuation_terms = magnum.Vector3(1., 0., 0.)\n\tspot_exponent = np.pi\n\tspot_cut_off = np.pi / 8\n\tspot_light = rd.gui.create_spot_light(position, custom_material, direction, spot_exponent, spot_cut_off, intensity, attenuation_terms)\n\tgraphics.add_light(spot_light)\n\t```', 'LOAD_IICUB_PYTHON': '\t```python\n\trobot = rd.ICub()\n\t\n\t# Set actuator types to VELOCITY motors so that they stay in position without any controller\n\trobot.set_actuator_types("velocity")\n\tsimu = rd.RobotDARTSimu(0.001)\n\tsimu.set_collision_detector("fcl")\n\t```', 'ICUB_PRINT_IMU_PYTHON': '\t```python\n\tprint("Angular Position: ", robot.imu().angular_position_vec().transpose())\n\tprint("Angular Velocity: ", robot.imu().angular_velocity().transpose())\n\tprint("Linear Acceleration: ", robot.imu().linear_acceleration().transpose())\n\tprint("=================================" )\n\t```', 'ICUB_PRINT_FT_PYTHON': '\t```python\n\tprint("FT ( force): ", robot.ft_foot_left().force().transpose())\n\tprint("FT (torque): ", robot.ft_foot_left().torque().transpose())\n\tprint("=================================")\n\t```', 'HEXAPOD_PYTHON': '\t```python\n\trobot = rd.Hexapod()\n\t```'}
for v in variables.items():
env.variables[v[0]] = variables[v[0]]
diff --git a/src/examples/python/example_parallel.py b/src/examples/python/example_parallel.py
index 5f678896..7a14faf4 100644
--- a/src/examples/python/example_parallel.py
+++ b/src/examples/python/example_parallel.py
@@ -5,6 +5,7 @@
from timeit import default_timer as timer
import os
+# @CAMERAS_PARALLEL_PYTHON@
robot = rd.Robot("arm.urdf", "arm", False)
robot.fix_to_world()
@@ -61,6 +62,7 @@ def runInParallel(N):
runInParallel(N)
end = timer()
print('Time:', end-start)
+# @CAMERAS_PARALLEL_PYTHON_END@
# print('Running sequential evaluations')
# start = timer()