diff --git a/.github/workflows/run-tests.yaml b/.github/workflows/run-tests.yaml
index cac8db4c18..118cc3d6af 100644
--- a/.github/workflows/run-tests.yaml
+++ b/.github/workflows/run-tests.yaml
@@ -23,7 +23,8 @@ jobs:
run: |
sudo apt-cache search mesa
sudo apt update
- sudo apt install -y libgl1-mesa-glx libgl1-mesa-dev libosmesa6-dev python3-opengl mesa-utils
+ sudo apt install -y libglx-mesa0 libgl1-mesa-dev libosmesa6-dev python3-opengl mesa-utils
+
# check if OSMesa is installed
dpkg -L libosmesa6-dev
python -m pip install --upgrade pip
@@ -56,4 +57,5 @@ jobs:
run: |
export PYOPENGL_PLATFORM="osmesa"
export MUJOCO_GL="osmesa"
+ export NUMBA_DISABLE_JIT=1
pytest
diff --git a/.github/workflows/update-docs.yaml b/.github/workflows/update-docs.yaml
new file mode 100644
index 0000000000..50bbbf6a58
--- /dev/null
+++ b/.github/workflows/update-docs.yaml
@@ -0,0 +1,63 @@
+name: Update Docs and Deploy
+
+on:
+ push:
+ branches:
+ - master
+ - main
+ - ci-docs
+
+jobs:
+ update-docs:
+ name: Generate and Deploy Docs
+ runs-on: ubuntu-latest
+
+ steps:
+ # Checkout the repository
+ - name: Checkout Code
+ uses: actions/checkout@v3
+
+ # Set up Python environment
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+
+ # Install dependencies
+ - name: Install Dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ pip install sphinx sphinx_book_theme sphinx_markdown_tables recommonmark nbsphinx sphinx_togglebutton hidapi
+
+ # Build the documentation
+ - name: Build Docs
+ run: |
+ cd docs
+ make clean html
+ mv _build ..
+
+ # Deploy to GitHub Pages
+ - name: Deploy to GitHub Pages
+ uses: peaceiris/actions-gh-pages@v3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: _build/html
+ destination_dir: docs
+
+ # Remove .nojekyll from root directory if it exists
+ - name: Remove .nojekyll file
+ run: |
+ git config --global user.name "github-actions[bot]"
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
+ git fetch origin gh-pages
+ git checkout gh-pages
+ if [ -f .nojekyll ]; then
+ rm .nojekyll
+ git add .nojekyll
+ git commit -m "Remove .nojekyll file"
+ git push origin gh-pages
+ echo ".nojekyll file removed"
+ else
+ echo ".nojekyll file does not exist, skipping."
+ fi
diff --git a/.github/workflows/update-pypi.yaml b/.github/workflows/update-pypi.yaml
new file mode 100644
index 0000000000..dfb263546a
--- /dev/null
+++ b/.github/workflows/update-pypi.yaml
@@ -0,0 +1,63 @@
+name: Publish to PyPI
+
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - 'pypi-action'
+ release:
+ types: [published]
+
+permissions:
+ contents: read
+
+jobs:
+ build:
+ name: Build distribution
+ runs-on: ubuntu-latest
+
+ steps:
+ # Checkout the repository
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ # Set up Python environment
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.x'
+
+ # Install dependencies
+ - name: Build release distribution
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install build
+ python -m build
+
+ - name: Store the distribution packages
+ uses: actions/upload-artifact@v4
+ with:
+ name: release-dists
+ path: dist/
+
+ publish-to-pypi:
+ name: Publish Python distribution to PyPI
+ needs:
+ - build
+ runs-on: ubuntu-latest
+
+ environment:
+ name: pypi
+ url: https://pypi.org/p/robosuite/
+
+ permissions:
+ id-token: write # IMPORTANT: mandatory for trusted publishing
+
+ steps:
+ - name: Download the dists
+ uses: actions/download-artifact@v4
+ with:
+ name: release-dists
+ path: dist/
+ - name: Publish distribution to PyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
diff --git a/docs/acknowledgement.md b/docs/acknowledgement.md
index 4a4b8b8920..9fcddaf736 100644
--- a/docs/acknowledgement.md
+++ b/docs/acknowledgement.md
@@ -9,5 +9,6 @@
- [Andrew Kondrich](http://www.andrewkondrich.com/), [Jonathan Booher](https://web.stanford.edu/~jaustinb/) (domain randomization)
- [Albert Tung](https://www.linkedin.com/in/albert-tung3/) (demonstration collection)
- [Divyansh Jha](https://github.com/divyanshj16), [Fei Xia](http://fxia.me/) (robosuite v1.3 renderers)
+- [Zhenyu Jiang](https://zhenyujiang.me/), [Yuqi Xie](https://xieleo5.github.io/), [You Liang Tan](https://youliangtan.github.io/), (robosuite v1.5)
We wholeheartedly welcome the community to contribute to our project through issues and pull requests. New contributors will be added to the list above.
\ No newline at end of file
diff --git a/docs/algorithms/demonstrations.md b/docs/algorithms/demonstrations.md
index 170f91656d..a066363a54 100644
--- a/docs/algorithms/demonstrations.md
+++ b/docs/algorithms/demonstrations.md
@@ -2,39 +2,15 @@
## Collecting Human Demonstrations
-We provide teleoperation utilities that allow users to control the robots with input devices, such as the keyboard and the [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/). Such functionality allows us to collect a dataset of human demonstrations for learning. We provide an example script to illustrate how to collect demonstrations. Our [collect_human_demonstrations](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/scripts/collect_human_demonstrations.py) script takes the following arguments:
+We provide teleoperation utilities that allow users to control the robots with input devices, such as the keyboard, [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/), [DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) and mujoco-gui. Such functionality allows us to collect a dataset of human demonstrations for learning. We provide an example script to illustrate how to collect demonstrations. Our [collect_human_demonstrations](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/scripts/collect_human_demonstrations.py) script takes the following arguments:
- `directory:` path to a folder for where to store the pickle file of collected demonstrations
- `environment:` name of the environment you would like to collect the demonstrations for
-- `device:` either "keyboard" or "spacemouse"
-
-### Keyboard controls
-
-Note that the rendering window must be active for these commands to work.
-
-| Keys | Command |
-| :------: | :--------------------------------: |
-| q | reset simulation |
-| spacebar | toggle gripper (open/close) |
-| w-a-s-d | move arm horizontally in x-y plane |
-| r-f | move arm vertically |
-| z-x | rotate arm about x-axis |
-| t-g | rotate arm about y-axis |
-| c-v | rotate arm about z-axis |
-| ESC | quit |
-
-### 3Dconnexion SpaceMouse controls
-
-| Control | Command |
-| :-----------------------: | :-----------------------------------: |
-| Right button | reset simulation |
-| Left button (hold) | close gripper |
-| Move mouse laterally | move arm horizontally in x-y plane |
-| Move mouse vertically | move arm vertically |
-| Twist mouse about an axis | rotate arm about a corresponding axis |
-| ESC (keyboard) | quit |
-
+- `device:` either "keyboard" or "spacemouse" or "dualsense" or "mjgui"
+- `renderer:` Mujoco's builtin interactive viewer (mjviewer) or OpenCV viewer (mujoco)
+- `camera:` Pass multiple camera names to enable multiple views. Note that the "mujoco" renderer must be enabled when using multiple views, while "mjviewer" is not supported.
+See the [devices page](https://robosuite.ai/docs/modules/devices.html) for details on how to use the devices.
## Replaying Human Demonstrations
@@ -45,8 +21,6 @@ We have included an example script that illustrates how demonstrations can be lo
We have included some sample demonstrations for each task at `models/assets/demonstrations`.
-Our sister project [RoboTurk](http://roboturk.stanford.edu) has also collected several human demonstration datasets across different tasks and humans, including pilot datasets of more than a thousand demonstrations for two tasks in our suite via crowdsourcing. You can find detailed information about the RoboTurk datasets [here](roboturk).
-
## Structure of collected demonstrations
@@ -81,7 +55,7 @@ The reason for storing mujoco states instead of raw observations is to make it e
## Using Demonstrations for Learning
-We have recently released the [robomimic](https://arise-initiative.github.io/robomimic-web/) framework, which makes it easy to train policies using your own [datasets collected with robosuite](https://arise-initiative.github.io/robomimic-web/docs/introduction/datasets.html#robosuite-hdf5-datasets), and other publically released datasets (such as those collected with RoboTurk). The framework also contains many useful examples for how to integrate hdf5 datasets into your own learning pipeline.
+The [robomimic](https://arise-initiative.github.io/robomimic-web/) framework makes it easy to train policies using your own [datasets collected with robosuite](https://arise-initiative.github.io/robomimic-web/docs/introduction/datasets.html#robosuite-hdf5-datasets). The framework also contains many useful examples for how to integrate hdf5 datasets into your own learning pipeline.
The robosuite repository also has some utilities for using the demonstrations to alter the start state distribution of training episodes for learning RL policies - this have proved effective in [several](https://arxiv.org/abs/1802.09564) [prior](https://arxiv.org/abs/1807.06919) [works](https://arxiv.org/abs/1804.02717). For example, we provide a generic utility for setting various types of learning curriculums which dictate how to sample from demonstration episodes when doing an environment reset. For more information see the `DemoSamplerWrapper` class.
diff --git a/docs/algorithms/roboturk.md b/docs/algorithms/roboturk.md
deleted file mode 100644
index 723e47d8b3..0000000000
--- a/docs/algorithms/roboturk.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# RoboTurk Datasets
-
-[RoboTurk](https://roboturk.stanford.edu/) is a crowdsourcing platform developed in order to enabled collecting large-scale manipulation datasets. Below, we describe RoboTurk datasets that are compatible with robosuite.
-
-## Datasets compatible with v1.2+
-
-We have collected several human demonstration datasets across several tasks implemented in robosuite as part of the [robomimic](https://arise-initiative.github.io/robomimic-web/) framework. For more information on these datasets, including how to download them and start training policies with them, please see [this link](https://arise-initiative.github.io/robomimic-web/docs/introduction/results.html#downloading-released-datasets).
-
-## Datasets compatible with v0.3
-
-We collected a large-scale dataset on the `SawyerPickPlace` and `SawyerNutAssembly` tasks using the [RoboTurk](https://crowdncloud.ai/) platform. Crowdsourced workers collected these task demonstrations remotely. It consists of **1070** successful `SawyerPickPlace` demonstrations and **1147** successful `SawyerNutAssembly` demonstrations.
-
-We are providing the dataset in the hopes that it will be beneficial to researchers working on imitation learning. Large-scale imitation learning has not been explored much in the community; it will be exciting to see how this data is used.
-
-You can download the dataset [here](http://cvgl.stanford.edu/projects/roboturk/RoboTurkPilot.zip).
-
-**Note:** to get started with this data, we highly recommend using the [robomimic](https://arise-initiative.github.io/robomimic-web/) framework - see [this link](https://arise-initiative.github.io/robomimic-web/docs/introduction/datasets.html#roboturk-pilot-datasets) for more information. To use this data, you should be on the [roboturk_v1](https://github.com/ARISE-Initiative/robosuite/tree/roboturk_v1) branch of robosuite, which is `v0.3` with a few minor changes. You can do this by using `git checkout roboturk_v1` after cloning the repository, or just download the source code from [this link](https://github.com/ARISE-Initiative/robosuite/tree/roboturk_v1).
-
-After unzipping the dataset, the following subdirectories can be found within the `RoboTurkPilot` directory.
-
-- **bins-full**
- - The set of complete demonstrations on the full `SawyerPickPlace` task. Every demonstration consists of the Sawyer arm placing one of each object into its corresponding bin.
-- **bins-Milk**
- - A postprocessed, segmented set of demonstrations that corresponds to the `SawyerPickPlaceMilk` task. Every demonstration consists of the Sawyer arm placing a can into its corresponding bin.
-- **bins-Bread**
- - A postprocessed, segmented set of demonstrations that corresponds to the `SawyerPickPlaceBread` task. Every demonstration consists of the Sawyer arm placing a loaf of bread into its corresponding bin.
-- **bins-Cereal**
- - A postprocessed, segmented set of demonstrations that corresponds to the `SawyerPickPlaceCereal` task. Every demonstration consists of the Sawyer arm placing a cereal box into its corresponding bin.
-- **bins-Can**
- - A postprocessed, segmented set of demonstrations that corresponds to the `SawyerPickPlaceCan` task. Every demonstration consists of the Sawyer arm placing a can into its corresponding bin.
-- **pegs-full**
- - The set of complete demonstrations on the full `SawyerNutAssembly` task. Every demonstration consists of the Sawyer arm fitting a square nut and a round nut onto their corresponding pegs.
-- **pegs-SquareNut**
- - A postprocessed, segmented set of demonstrations that corresponds to the `SawyerNutAssemblySquare` task. Every demonstration consists of the Sawyer arm fitting a square nut onto its corresponding peg.
-- **pegs-RoundNut**
- - A postprocessed, segmented set of demonstrations that corresponds to the `SawyerNutAssemblyRound` task. Every demonstration consists of the Sawyer arm fitting a round nut onto its corresponding peg.
diff --git a/docs/algorithms/sim2real.md b/docs/algorithms/sim2real.md
index d98f3388d1..e619a5932a 100644
--- a/docs/algorithms/sim2real.md
+++ b/docs/algorithms/sim2real.md
@@ -143,9 +143,7 @@ env.modify_observable(
)
# Take a single environment step with positive joint velocity actions
-arm_action = np.ones(env.robots[0].robot_model.dof) * 1.0
-gripper_action = [1]
-action = np.concatenate([arm_action, gripper_action])
+action = np.ones(env.robots[0].robot_model.dof) * 1.0
env.step(action)
# Now we can analyze what values were recorded
diff --git a/docs/changelog.md b/docs/changelog.md
index 9a85deff01..ef262c5c46 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -1,12 +1,40 @@
# Changelog
+## Version 1.5.1
+- **Bug Fixes**:
+ - Fixed segmentation demo, part controller demo, demo renderer, joint and actuator issues, equality removal, and orientation mismatch in Fourier hands.
+
+- **Documentation Updates**:
+ - Updated `basicusage.md`, overview page, demo docs, teleop usage info, devices, and related docs.
+ - Added a CI doc update workflow and `.nojekyll` fix.
+ - Simplified composite controller keywords and updated robots section and task images.
+
+- **Features/Enhancements**:
+ - Added GymWrapper support for both `gym` and `gymnasium` with `dict_obs`.
+ - Updated DexMimicGen assets and added a default whole-body Mink IK config.
+ - Improved CI to check all tests and print video save paths in demo recordings.
+ - Add GR1 and spot robot to `demo_random_actions.py` script.
+
+- **Miscellaneous**:
+ - Added troubleshooting for SpaceMouse failures and terminated `mjviewer` on resets.
+ - Adjusted OSC position fixes and updated part controller JSONs.
## Version 1.5.0
-
-
Breaking API changes
-
-
\ No newline at end of file
+The 1.5 release of **Robosuite** introduces significant advancements to extend flexibility and realism in robotic simulations. Key highlights include support for diverse robot embodiments (e.g., humanoids), custom robot compositions, composite controllers (such as whole-body controllers), expanded teleoperation devices, and photorealistic rendering capabilities.
+
+### New Features
+- **Diverse Robot Embodiments**: Support for complex robots, including humanoids, allowing exploration of advanced manipulation and mobility tasks. Please see [robosuite_models](https://github.com/ARISE-Initiative/robosuite_models) for extra robosuite-compatible robot models.
+- **Custom Robot Composition**: Users can now build custom robots from modular components, offering extensive configuration options.
+- **Composite Controllers**: New controller abstraction includes whole-body controllers, and the ability to control robots with composed body parts, arms, and grippers.
+- **Additional Teleoperation Devices**: Expanded compatibility with teleoperation tools like drag-and-drop in the MuJoCo viewer and Apple Vision Pro.
+- **Photorealistic Rendering**: Integration of NVIDIA Isaac Sim for enhanced, real-time photorealistic visuals, bringing simulations closer to real-world fidelity.
+
+### Improvements
+- **Updated Documentation**: New tutorials and expanded documentation on utilizing advanced controllers, teleoperation, and rendering options.
+- **Simulation speed improvement**: By default we set the `lite_physics` flag to True to skip redundant calls to [`env.sim.step()`](https://github.com/ARISE-Initiative/robosuite/blob/29e73bd41f9bc43ba88bb7d2573b868398905819/robosuite/environments/base.py#L444)
+
+### Migration
+
+- Composite controller refactoring: please see example of [usage](https://github.com/ARISE-Initiative/robosuite/blob/29e73bd41f9bc43ba88bb7d2573b868398905819/robosuite/examples/third_party_controller/mink_controller.py#L421)
diff --git a/docs/conf.py b/docs/conf.py
index 47880736b7..c559fd6c06 100755
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -84,7 +84,7 @@
# General information about the project.
project = "robosuite"
-copyright = "Stanford University and The University of Texas at Austin 2022"
+copyright = "Stanford University and The University of Texas at Austin 2025"
author = "the robosuite core team"
# The version info for the project you're documenting, acts as replacement for
@@ -185,5 +185,21 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, "robosuite", "robosuite Documentation", author, "robosuite", "ARISE", "Miscellaneous"),
+ (
+ master_doc,
+ "robosuite",
+ "robosuite Documentation",
+ author,
+ "robosuite",
+ "ARISE",
+ "Miscellaneous",
+ ),
+]
+
+autodoc_mock_imports = [
+ "robosuite.devices.mjgui",
+ "robosuite.devices.spacemouse",
+ "robosuite.devices.dualsense",
+ "robosuite.devices.keyboard",
+ "robosuite.devices.device",
]
diff --git a/docs/demos.md b/docs/demos.md
index 636cefe938..5d24ddab03 100644
--- a/docs/demos.md
+++ b/docs/demos.md
@@ -121,6 +121,12 @@ The `demo_device_control.py` scripts shows how to teleoperate robot with [contro
This current implementation only supports macOS (Linux support can be added).
Download and install the [driver](https://www.3dconnexion.com/service/drivers.html) before running the script.
+* **DualSense**
+ We use the DualSense joystick from [DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) to control the end-effector of the robot. The joystick provides 6-DoF control commands.
+
+ **Note:**
+ Make sure `hidapi` can detect your DualSense in your computer. In Linux, you may add udev rules in `/etc/udev/rules.d` to get access to the device without root privilege. For the rules content you can refer to [game-device-udev](https://codeberg.org/fabiscafe/game-devices-udev).
+
* **Mujoco GUI**
The Mujoco GUI provides a graphical user interface for viewing and interacting with a mujoco simulation. We use the GUI and a mouse to drag and drop mocap bodies, whose
poses are tracked by a controller. More specifically, once the mujoco GUI is loaded from running `python demo_device_control.py`, you first need to hit the key to reach the interactive mujoco viewer state. Then, you should double click on
@@ -135,11 +141,7 @@ Furthermore, please choose environment specifics with the following arguments:
* `--environment`: Task to perform, e.g., `Lift`, `TwoArmPegInHole`, `NutAssembly`, etc.
-* `--robots`: Robot(s) with which to perform the task. Can be any in
- {`Panda`, `Sawyer`, `IIWA`, `Jaco`, `Kinova3`, `UR5e`, `Baxter`}. Note that the environments include sanity
- checks, such that a `TwoArm...` environment will only accept either a 2-tuple of robot names or a single
- bimanual robot name, according to the specified configuration (see below), and all other environments will
- only accept a single single-armed robot name
+* `--robots`: Robot(s) with which to perform the task, e.g., `Tiago`, `Panda`, `GR1`, `Sawyer`, etc. Note that the environments include sanity checks, such that a `TwoArm...` environment will not accept configurations with a single, one-armed robot.
* `--config`: Exclusively applicable and only should be specified for `TwoArm...` environments. Specifies the robot
configuration desired for the task when two robots are inputted. Options are {`parallel` and `opposed`}
@@ -152,14 +154,6 @@ Furthermore, please choose environment specifics with the following arguments:
each other, facing each other from opposite directions. Expects a 2-tuple of robot names
to be specified in the `--robots` argument.
-* `--arm`: Exclusively applicable and only should be specified for `TwoArm...` environments. Specifies which of the
- multiple arm eef's to control. The other (passive) arm will remain stationary. Options are {`right`, `left`}
- (from the point of view of the robot(s) facing against the viewer direction)
-
-* `--switch-on-click`: Exclusively applicable and only should be specified for `TwoArm...` environments. If enabled,
- will switch the current arm being controlled every time the gripper input is pressed
-
-* `--toggle-camera-on-click`: If enabled, gripper input presses will cycle through the available camera angles
Examples:
* For normal single-arm environment:
@@ -168,7 +162,7 @@ $ python demo_device_control.py --environment PickPlaceCan --robots Sawyer
```
* For two-arm bimanual environment:
```
-$ python demo_device_control.py --environment TwoArmLift --robots Baxter --config bimanual --arm left
+$ python demo_device_control.py --environment TwoArmLift --robots Tiago
```
* For two-arm multi single-arm robot environment:
```
@@ -186,7 +180,7 @@ The `demo_renderers.py` script shows how to use different renderers with the sim
```sh
$ python demo_renderers.py --renderer default
```
-The `--renderer` flag can be set to `mujoco` or `default(default)
+The `--renderer` flag can be set to `mujoco` or `default`
### Exporting to USD
Exporting to USD allows users to render **robosuite** trajectories in external renderers such as NVIDIA Omniverse and Blender. In order to export to USD you must install the required dependencies for the exporter.
diff --git a/docs/images/env_door.png b/docs/images/env_door.png
index 4bf4c791d8..acb5ea232e 100644
Binary files a/docs/images/env_door.png and b/docs/images/env_door.png differ
diff --git a/docs/images/env_lift.png b/docs/images/env_lift.png
index 2d56189718..a39819adfc 100644
Binary files a/docs/images/env_lift.png and b/docs/images/env_lift.png differ
diff --git a/docs/images/env_nut_assembly.png b/docs/images/env_nut_assembly.png
index fb58791fd9..1a80c37e35 100644
Binary files a/docs/images/env_nut_assembly.png and b/docs/images/env_nut_assembly.png differ
diff --git a/docs/images/env_pick_place.png b/docs/images/env_pick_place.png
index 191ddeb297..59d284ce91 100644
Binary files a/docs/images/env_pick_place.png and b/docs/images/env_pick_place.png differ
diff --git a/docs/images/env_stack.png b/docs/images/env_stack.png
index 611e7bb752..14554cf967 100644
Binary files a/docs/images/env_stack.png and b/docs/images/env_stack.png differ
diff --git a/docs/images/env_two_arm_handover.png b/docs/images/env_two_arm_handover.png
index 39fb4c0137..be5f435950 100644
Binary files a/docs/images/env_two_arm_handover.png and b/docs/images/env_two_arm_handover.png differ
diff --git a/docs/images/env_two_arm_lift.png b/docs/images/env_two_arm_lift.png
index 938aefe5eb..2ab8ec01dc 100644
Binary files a/docs/images/env_two_arm_lift.png and b/docs/images/env_two_arm_lift.png differ
diff --git a/docs/images/env_two_arm_peg_in_hole.png b/docs/images/env_two_arm_peg_in_hole.png
index e7207e8d9c..6c204eef87 100644
Binary files a/docs/images/env_two_arm_peg_in_hole.png and b/docs/images/env_two_arm_peg_in_hole.png differ
diff --git a/docs/images/env_wipe.png b/docs/images/env_wipe.png
index fadeefc2e5..3a469a2b77 100644
Binary files a/docs/images/env_wipe.png and b/docs/images/env_wipe.png differ
diff --git a/docs/images/gr1_cereal_mujoco.png b/docs/images/gr1_cereal_mujoco.png
new file mode 100644
index 0000000000..08a58e70e5
Binary files /dev/null and b/docs/images/gr1_cereal_mujoco.png differ
diff --git a/docs/images/gr1_cereal_path_tracing.png b/docs/images/gr1_cereal_path_tracing.png
new file mode 100644
index 0000000000..bf2bea7068
Binary files /dev/null and b/docs/images/gr1_cereal_path_tracing.png differ
diff --git a/docs/images/gr1_cereal_ray_tracing.png b/docs/images/gr1_cereal_ray_tracing.png
new file mode 100644
index 0000000000..eb412f5150
Binary files /dev/null and b/docs/images/gr1_cereal_ray_tracing.png differ
diff --git a/docs/images/models/bd_gripper.png b/docs/images/models/bd_gripper.png
new file mode 100644
index 0000000000..bc0de2268c
Binary files /dev/null and b/docs/images/models/bd_gripper.png differ
diff --git a/docs/images/models/inspire_hands.png b/docs/images/models/inspire_hands.png
new file mode 100644
index 0000000000..5c2e658380
Binary files /dev/null and b/docs/images/models/inspire_hands.png differ
diff --git a/docs/images/models/jaco_gripper.png b/docs/images/models/jaco_gripper.png
new file mode 100644
index 0000000000..7a69d782b3
Binary files /dev/null and b/docs/images/models/jaco_gripper.png differ
diff --git a/docs/images/models/omron_base.png b/docs/images/models/omron_base.png
new file mode 100644
index 0000000000..8d6e6e4d24
Binary files /dev/null and b/docs/images/models/omron_base.png differ
diff --git a/docs/images/models/panda_gripper.png b/docs/images/models/panda_gripper.png
new file mode 100644
index 0000000000..d5a8b12780
Binary files /dev/null and b/docs/images/models/panda_gripper.png differ
diff --git a/docs/images/models/rethink_base.png b/docs/images/models/rethink_base.png
new file mode 100644
index 0000000000..b4bc84dcea
Binary files /dev/null and b/docs/images/models/rethink_base.png differ
diff --git a/docs/images/models/rethink_gripper.png b/docs/images/models/rethink_gripper.png
new file mode 100644
index 0000000000..b0f3b54f79
Binary files /dev/null and b/docs/images/models/rethink_gripper.png differ
diff --git a/docs/images/models/rethink_minimal_base.png b/docs/images/models/rethink_minimal_base.png
new file mode 100644
index 0000000000..2e478a41c2
Binary files /dev/null and b/docs/images/models/rethink_minimal_base.png differ
diff --git a/docs/images/models/robot_model_Baxter_isaac.png b/docs/images/models/robot_model_Baxter_isaac.png
new file mode 100644
index 0000000000..19d0a38e44
Binary files /dev/null and b/docs/images/models/robot_model_Baxter_isaac.png differ
diff --git a/docs/images/models/robot_model_GR1_isaac.png b/docs/images/models/robot_model_GR1_isaac.png
new file mode 100644
index 0000000000..d4e7bc887e
Binary files /dev/null and b/docs/images/models/robot_model_GR1_isaac.png differ
diff --git a/docs/images/models/robot_model_IIWA_isaac.png b/docs/images/models/robot_model_IIWA_isaac.png
new file mode 100644
index 0000000000..4405930fa8
Binary files /dev/null and b/docs/images/models/robot_model_IIWA_isaac.png differ
diff --git a/docs/images/models/robot_model_Jaco_isaac.png b/docs/images/models/robot_model_Jaco_isaac.png
new file mode 100644
index 0000000000..16faef99c5
Binary files /dev/null and b/docs/images/models/robot_model_Jaco_isaac.png differ
diff --git a/docs/images/models/robot_model_Kinova3_isaac.png b/docs/images/models/robot_model_Kinova3_isaac.png
new file mode 100644
index 0000000000..34612c3b12
Binary files /dev/null and b/docs/images/models/robot_model_Kinova3_isaac.png differ
diff --git a/docs/images/models/robot_model_Panda_isaac.png b/docs/images/models/robot_model_Panda_isaac.png
new file mode 100644
index 0000000000..879d2892cc
Binary files /dev/null and b/docs/images/models/robot_model_Panda_isaac.png differ
diff --git a/docs/images/models/robot_model_Sawyer_isaac.png b/docs/images/models/robot_model_Sawyer_isaac.png
new file mode 100644
index 0000000000..4fda630357
Binary files /dev/null and b/docs/images/models/robot_model_Sawyer_isaac.png differ
diff --git a/docs/images/models/robot_model_Spot_isaac.png b/docs/images/models/robot_model_Spot_isaac.png
new file mode 100644
index 0000000000..36931c1789
Binary files /dev/null and b/docs/images/models/robot_model_Spot_isaac.png differ
diff --git a/docs/images/models/robot_model_Tiago_isaac.png b/docs/images/models/robot_model_Tiago_isaac.png
new file mode 100644
index 0000000000..cff29eaf29
Binary files /dev/null and b/docs/images/models/robot_model_Tiago_isaac.png differ
diff --git a/docs/images/models/robot_model_UR5e_isaac.png b/docs/images/models/robot_model_UR5e_isaac.png
new file mode 100644
index 0000000000..b50327b6a6
Binary files /dev/null and b/docs/images/models/robot_model_UR5e_isaac.png differ
diff --git a/docs/images/models/robotiq140_gripper.png b/docs/images/models/robotiq140_gripper.png
new file mode 100644
index 0000000000..4d316fcf4c
Binary files /dev/null and b/docs/images/models/robotiq140_gripper.png differ
diff --git a/docs/images/models/robotiq85_gripper.png b/docs/images/models/robotiq85_gripper.png
new file mode 100644
index 0000000000..c2bd9052d9
Binary files /dev/null and b/docs/images/models/robotiq85_gripper.png differ
diff --git a/docs/images/models/robotiq_three_gripper.png b/docs/images/models/robotiq_three_gripper.png
new file mode 100644
index 0000000000..d02794d88a
Binary files /dev/null and b/docs/images/models/robotiq_three_gripper.png differ
diff --git a/docs/images/models/spot_base.png b/docs/images/models/spot_base.png
new file mode 100644
index 0000000000..21c863eea0
Binary files /dev/null and b/docs/images/models/spot_base.png differ
diff --git a/docs/images/models/wiping_gripper.png b/docs/images/models/wiping_gripper.png
new file mode 100644
index 0000000000..b2c244b379
Binary files /dev/null and b/docs/images/models/wiping_gripper.png differ
diff --git a/docs/images/robots_module_v15.png b/docs/images/robots_module_v15.png
new file mode 100644
index 0000000000..b6644fd9e3
Binary files /dev/null and b/docs/images/robots_module_v15.png differ
diff --git a/docs/index.rst b/docs/index.rst
index a67f3b9b80..6d7c06c607 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -21,12 +21,12 @@ Welcome to robosuite's documentation!
modules/overview
modules/robots
- modules/controllers
modules/objects
modules/environments
modules/sensors
modules/devices
modules/renderers
+ modules/controllers
.. toctree::
:maxdepth: 1
@@ -67,7 +67,6 @@ Welcome to robosuite's documentation!
algorithms/benchmarking
algorithms/demonstrations
algorithms/sim2real
- algorithms/roboturk
.. toctree::
:maxdepth: 1
diff --git a/docs/installation.md b/docs/installation.md
index 388f08a438..a8d3661f03 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -37,7 +37,7 @@ The base installation requires the MuJoCo physics engine (with [mujoco](https://
```
This will also install our library as an editable package, such that local changes will be reflected elsewhere without having to reinstall the package.
-3. (Optional) We also provide add-on functionalities, such as [OpenAI Gym](https://github.com/openai/gym) [interfaces](source/robosuite.wrappers), [inverse kinematics controllers](source/robosuite.controllers) powered by [PyBullet](http://bulletphysics.org), and [teleoperation](source/robosuite.devices) with [SpaceMouse](https://www.3dconnexion.com/products/spacemouse.html) devices. To enable these additional features, please install the extra dependencies by running
+3. (Optional) We also provide add-on functionalities, such as [OpenAI Gym](https://github.com/openai/gym) [interfaces](source/robosuite.wrappers), [inverse kinematics controllers](source/robosuite.controllers) powered by [PyBullet](http://bulletphysics.org), and [teleoperation](source/robosuite.devices) with [SpaceMouse](https://www.3dconnexion.com/products/spacemouse.html) and [DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) devices. To enable these additional features, please install the extra dependencies by running
```sh
$ pip3 install -r requirements-extra.txt
```
diff --git a/docs/modules/controllers.ipynb b/docs/modules/controllers.ipynb
deleted file mode 100644
index 155850ff0d..0000000000
--- a/docs/modules/controllers.ipynb
+++ /dev/null
@@ -1,223 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Controllers \n",
- "\n",
- "Controllers are used to determine the type of high-level control over a given robot arm. While all arms are directly controlled via their joint torques, the inputted action space for a given environment can vary depending on the type of desired control. Our controller options include `OSC_POSE`, `OSC_POSITION`, `JOINT_POSITION`, `JOINT_VELOCITY`, and `JOINT_TORQUE`.\n",
- "\n",
- "\n",
- "\n",
- "For `OSC_POSE`, `OSC_POSITION`, and `JOINT_POSITION`, we include three variants that determine the action space. The most common variant is to use a predefined and constant set of impedance parameters; in that case, the action space only includes the desired pose, position, or joint configuration. We also include the option to specify either the stiffness values (and the damping will be automatically set to values that lead to a critically damped system), or all impedance parameters, both stiffness and damping, as part of the action at each step. These two variants lead to extended action spaces that can control the stiffness and damping behavior of the controller in a variable manner, providing full control to the policy/solution over the contact and dynamic behavior of the robot.\n",
- "\n",
- "When using any position-based control (`OSC`, `IK`, or `JOINT_POSITION` controllers), input actions are, by default,\n",
- "interpreted as delta values from the current state.\n",
- "\n",
- "When using any end-effector pose controller (`IK`, `OSC_POSE`), delta rotations from the current end-effector orientation\n",
- "in the form of axis-angle coordinates `(ax, ay, az)`, where the direction represents the axis and the magnitude\n",
- "represents the angle (in radians). Note that for `OSC`, the rotation axes are taken relative to the global world\n",
- "coordinate frame, whereas for `IK`, the rotation axes are taken relative to the end-effector origin, NOT the global world coordinate frame!\n",
- "\n",
- "## Controllers\n",
- "\n",
- "During runtime, the execution of the controllers is as follows. Controllers receive a desired configuration (reference value) and output joint torques that try to minimize the error between the current configuration and the desired one. Policies and solutions provide these desired configurations, elements of some action space, at what we call simulated policy frequency ($f_{p}$), e.g., 20Hz or 30Hz. **robosuite** will execute several iterations composed of a controller execution and a simulation step at simulation frequency, $f_s$ ($f_s = N\\cdot f_p$), using the same reference signal until a new action is provided by the policy/solution. In these iterations, while the desired configuration is kept constant, the current state of the robot is changing, and thus, the error.\n",
- "\n",
- "In the following we summarize the options, variables, and the control laws (equations) that convert desired values from the policy/solution and current robot states into executable joint torques to minimize the difference.\n",
- "\n",
- "### Joint Space Control - Torque\n",
- "\n",
- "Controller Type: `JOINT_TORQUE`\n",
- "\n",
- "Action Dimensions (not including gripper): `n` (number of joints)\n",
- "\n",
- "Since our controllers transform the desired values from the policies/solutions into joint torques, if these values are already joint torques, there is a one-to-one mapping between the reference value from the policy/solution and the output value from the joint torque controller at each step: $\\tau = \\tau_d$\n",
- "\\begin{equation}\n",
- "\\tau = \\tau_d\n",
- "\\end{equation}\n",
- "\n",
- "### Joint Space Control - Velocity\n",
- " \n",
- "Controller Type: `JOINT_VELOCITY`\n",
- "\n",
- "Action Dimensions (not including gripper): `n` (number of joints)\n",
- "\n",
- "To control joint velocities, we create a proportional (P) control law between the desired value provided by the policy/solution (interpreted as desired velocity of each joint) and the current joint velocity of the robot. This control law, parameterized by a proportional constant, $k_p$, generates joint torques to execute at each simulation step:\n",
- "\n",
- "\\begin{equation}\n",
- "\\tau = k_p (\\dot{q}_d - \\dot{q})\n",
- "\\end{equation}\n",
- "\n",
- "### Joint Space Control - Position with Fixed Impedance\n",
- "\n",
- "Controller Type: `JOINT_POSITION`\n",
- "\n",
- "Impedance: fixed\n",
- "\n",
- "Action Dimensions (not including gripper): `n` (number of joints)\n",
- "\n",
- "In joint position control, we create a proportional-derivative (PD) control law between the desired value provided by the policy/solution (interpreted as desired configuration for each joint) and the current joint positions of the robot. The control law that generates the joint torques to execute is parameterized by proportional and derivative gains, $k_p$ and $k_v$, and defined as\n",
- "\n",
- "\\begin{equation}\n",
- "\\tau = \\Lambda \\left[k_p \\Delta_q - k_d\\dot{q}\\right]\n",
- "\\end{equation} \n",
- "\n",
- "where $\\Delta_q = q_d - q$ is the difference between current and desired joint configurations, and $\\Lambda$ is the inertia matrix, that we use to scale the error to remove the dynamic effects of the mechanism. The stiffness and damping parameters, $k_p$ and $k_d$, are determined in construction and kept fixed.\n",
- "\n",
- "### Joint Space Control - Position with Variable Stiffness\n",
- "\n",
- "Controller Type: `JOINT_POSITION`\n",
- "\n",
- "Impedance: variable_kp\n",
- "\n",
- "Action Dimensions (not including gripper): `2n` (number of joints)\n",
- "\n",
- "The control law is the same as for fixed impedance but, in this controller, $k_p$ can be determined by the policy/solution at each policy step.\n",
- "\n",
- "### Joint Space Control - Position with Variable Impedance\n",
- "\n",
- "Controller Type: `JOINT_POSITION`\n",
- "\n",
- "Impedance: variable\n",
- "\n",
- "Action Dimensions (not including gripper): `3n` (number of joints)\n",
- "\n",
- "Again, the control law is the same in the two previous control types, but now both the stiffness and damping parameters, $k_p$ and $k_d$, are controllable by the policy/solution and can be changed at each step.\n",
- "\n",
- "### Operational Space Control - Pose with Fixed Impedance\n",
- "\n",
- "Controller Type: `OSC_POSE`\n",
- "\n",
- "Impedance: fixed\n",
- "\n",
- "Action Dimensions (not including gripper): `6`\n",
- "\n",
- "In the `OSC_POSE` controller, the desired value is the 6D pose (position and orientation) of a controlled frame. We follow the formalism from [\\[Khatib87\\]](https://ieeexplore.ieee.org/document/1087068). Our control frame is always the `eef_site` defined in the [Gripper Model](../modeling/robot_model.html#gripper-model), placed at the end of the last link for robots without gripper or between the fingers for robots with gripper. The operational space control framework (OSC) computes the necessary joint torques to minimize the error between the desired and the current pose of the `eef_site` with the minimal kinematic energy. \n",
- "\n",
- "Given a desired pose $\\mathbf{x}_{\\mathit{des}}$ and the current end-effector pose, , we first compute the end-effector acceleration that would help minimize the error between both, assumed. PD (proportional-derivative) control schema to improve convergence and stability. For that, we first decompose into a desired position, $p_d \\in \\mathbb{R}^3$, and a desired orientation, $R_d \\in \\mathbb{SO}(3)$. The end-effector acceleration to minimize the error should increase with the difference between desired end-effector pose and current pose, $p$ and $R$ (proportional term), and decrease with the current end-effector velocity, $v$ and $\\omega$ (derivative term).\n",
- "\n",
- "We then compute the robot actuation (joint torques) to achieve the desired end-effector space accelerations leveraging the kinematic and dynamic models of the robot with the dynamically-consistent operational space formulation in [\\[Khatib1995a\\]](https://journals.sagepub.com/doi/10.1177/027836499501400103). First, we compute the wrenches at the end-effector that corresponds to the desired accelerations, ${f}\\in\\mathbb{R}^{6}$.\n",
- "Then, we map the wrenches in end-effector space ${f}$ to joint torque commands with the end-effector Jacobian at the current joint configuration $J=J(q)$: $\\tau = J^T{f}$. \n",
- "\n",
- "Thus, the function that maps end-effector space position and orientation to low-level robot commands is ($\\textrm{ee} = \\textrm{\\it end-effector space}$):\n",
- "\\begin{equation}\n",
- "\\begin{aligned}\n",
- "\\tau &= J_p[\\Lambda_p[k_p^p (p_d - p) - k_v^p v]] + J_R[\\Lambda_R\\left[k_p^R(R_d \\ominus R) - k_d^R \\omega \\right]]\n",
- "\\end{aligned}\n",
- "\\end{equation}\n",
- "where $\\Lambda_p$ and $\\Lambda_R$ are the parts corresponding to position and orientation in $\\Lambda \\in \\mathbb{R}^{6\\times6}$, the inertial matrix in the end-effector frame that decouples the end-effector motions, $J_p$ and $J_R$ are the position and orientation parts of the end-effector Jacobian, and $\\ominus$ corresponds to the subtraction in $\\mathbb{SO}(3)$. The difference between current and desired position ($\\Delta_p= p_d - p$) and between current and desired orientation ($\\Delta_R = R_d \\ominus R$) can be used as alternative policy action space, $\\mathcal{A}$. $k_p^p$, $k_p^d$, $k_p^R$, and $k_d^R$ are vectors of proportional and derivative gains for position and orientation (parameters $\\kappa$), respectively, set once at initialization and kept fixed.\n",
- "\n",
- "### Operational Space Control - Pose with Variable Stiffness\n",
- "\n",
- "Controller Type: `OSC_POSE`\n",
- "\n",
- "Impedance: variable_kp\n",
- "\n",
- "Action Dimensions (not including gripper): `12`\n",
- "\n",
- "The control law is the same as `OSC_POSE` but, in this case, the stiffness of the controller, $k_p$, is part of the action space and can be controlled and changed at each time step by the policy/solution. The damping parameters, $k_d$, are set to maintain the critically damped behavior of the controller.\n",
- "\n",
- "### Operational Space Control - Pose with Variable Impedance\n",
- "\n",
- "Controller Type: `OSC_POSE`\n",
- "\n",
- "Impedance: variable\n",
- "\n",
- "Action Dimensions (not including gripper): `18`\n",
- "\n",
- "The control law is the same as in the to previous controllers, but now both the stiffness and the damping, $k_p$ and $k_d$, are part of the action space and can be controlled and changed at each time step by the policy/solution. \n",
- "\n",
- "\n",
- "## Configurations\n",
- "The config directory (found within the [Controllers](../source/robosuite.controllers.html) module) provides a set of default configuration files that hold default examples of parameters relevant to individual controllers. Note that when creating your controller config templates of a certain type of controller, the listed parameters in the default example are required and should be specified accordingly.\n",
- "\n",
- "Note: Each robot has its own default controller configuration which is called by default unless a different controller config is called.\n",
- "\n",
- "Below, a brief overview and description of each subset of controller parameters are shown:\n",
- "\n",
- "#### Controller Settings \n",
- "* `type`: Type of controller to control. Can be `OSC_POSE`, `OSC_POSITION`, `IK_POSE`, `JOINT_POSITION`, `JOINT_VELOCITY`, or `JOINT_TORQUE`\n",
- "* `interpolation`: If not `null`, specified type of interpolation to use between desired actions. Currently only `linear` is supported.\n",
- "* `ramp_ratio`: If using `linear` interpolation, specifies the proportion of allotted timesteps (value from [0, 1]) over which to execute the interpolated commands.\n",
- "* `{...}_limits`: Limits for that specific controller. E.g.: for a `JOINT_POSITION`, the relevant limits are its joint positions, `qpos_limits` . Can be either a 2-element list (same min/max limits across entire relevant space), or a list of lists (specific limits for each component)\n",
- "* `ik_{pos, ori}_limit`: Only applicable for IK controller. Limits the magnitude of the desired relative change in position / orientation.\n",
- "* `{input,output}_{min,max}`: Scaling ranges for mapping action space inputs into controller inputs. Settings these limits will automatically clip the action space input to be within the `input_{min,max}` before mapping the requested value into the specified `output_{min,max}` range. Can be either a scalar (same limits across entire action space), or a list (specific limits for each action component)\n",
- "* `kp`: Where relevant, specifies the proportional gain for the controller. Can be either be a scalar (same value for all controller dimensions), or a list (specific values for each dimension)\n",
- "* `damping_ratio`: Where relevant, specifies the damping ratio constant for the controller.\n",
- "* `impedance_mode`: For impedance-based controllers (`OSC_*`, `JOINT_POSITION`), determines the impedance mode for the controller, i.e. the nature of the impedance parameters. It can be `fixed`, `variable`, or `variable_kp` (kd is adjusted to provide critically damped behavior).\n",
- "* `kp_limits, damping_ratio_limits`: Only relevant if `impedance_mode` is set to `variable` or `variable_kp`. Sets the limits for the resulting action space for variable impedance gains.\n",
- "* `control_delta`: Only relevant for `OSC_POSE` or `OSC_POSITION` controllers. `true` interprets input actions as delta values from the current robot end-effector position. Otherwise, assumed to be absolute (global) values\n",
- "* `uncouple_pos_ori`: Only relevant for `OSC_POSE`. `true` decouples the desired position and orientation torques when executing the controller\n",
- "\n",
- "## Loading a Controller\n",
- "By default, if no controller configuration is specified during environment creation, then `JOINT_VELOCITY` controllers with robot-specific configurations will be used. \n",
- "\n",
- "#### Using a Default Controller Configuration\n",
- "Any controller can be used with its default configuration, and can be easily loaded into a given environment by calling its name as shown below (where `controller_name` is one of acceptable controller `type` strings):\n",
- "\n",
- "```python\n",
- "import robosuite as suite\n",
- "from robosuite import load_part_controller_config\n",
- "\n",
- "# Load the desired controller's default config as a dict\n",
- "config = load_part_controller_config(default_controller=controller_name)\n",
- "\n",
- "# Create environment\n",
- "env = suite.make(\"Lift\", robots=\"Panda\", controller_configs=config, ... )\n",
- "```\n",
- "\n",
- "#### Using a Custom Controller Configuration\n",
- "A custom controller configuration can also be used by simply creating a new config (`.json`) file with the relevant parameters as specified above. All robosuite environments have an optional `controller_configs` argument that can be used to pass in specific controller settings. Note that this is expected to be a `dict`, so the new configuration must be read in and parsed as a `dict` before passing it during the environment `robosuite.make(...)` call. A brief example script showing how to import a custom controller configuration is shown below.\n",
- "\n",
- "```python\n",
- "import robosuite as suite\n",
- "from robosuite import load_part_controller_config\n",
- "\n",
- "# Path to config file\n",
- "controller_fpath = \"/your/custom/config/filepath/here/filename.json\"\n",
- "\n",
- "# Import the file as a dict\n",
- "config = load_part_controller_config(custom_fpath=controller_fpath)\n",
- "\n",
- "# Create environment\n",
- "env = suite.make(\"Lift\", robots=\"Panda\", controller_configs=config, ... )\n",
- "```"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.4"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/docs/modules/controllers.rst b/docs/modules/controllers.rst
index 3217449c8a..47ba76c3be 100644
--- a/docs/modules/controllers.rst
+++ b/docs/modules/controllers.rst
@@ -4,17 +4,37 @@ Controllers
Composite Controllers
---------------------
-Basic
+Robosuite's composite controllers assumes that a robot consists of multiple "body parts", such as arms, torso, head, base, and legs, and that each body part has
+a "body part" controller (e.g., ``OSC_POSE``, ``JOINT_POSITION``). The composite controller orchestrates these body part controllers.
+Composite controllers are controllers that are composed of multiple body-part controllers.
+They are used to control the entire robot, including all of its parts.
+
+When an action vector is commanded to the robot, the action will be split into multiple body-part actions, each of which will be sent to the corresponding body-part
+controller. To understand the action split, use the function ``robosuite.robots.robot.print_action_info()``.
+To create the action easily, we also provide a helper function ``robosuite.robots.robot.create_action_vector()`` which takes the action dictionary as
+inputs and return the action vector with correct dimensions. For controller actions whose input dimentions does not match the robot's degrees of freedoms,
+you need to write your own ``create_action_vector()`` function inside the custom composite controller so that the robot's function can retrieve the information properly.
+
+**Basic**
******
+The "Basic" composite controller consists of individual part controllers that operate independently to control various parts of the robot, such as arms, torso, head, base, and legs.
+Each part can be assigned a specific controller type (e.g., ``OSC_POSE``, ``JOINT_POSITION``) depending on the desired control behavior for that part.
+For example, arms may use ``OSC_POSE`` for precise end-effector control, while the base may use JOINT_VELOCITY for movement across the ground.
-WholeBody IK
+
+**WholeBodyIK**
*************
+The "WholeBodyIK" composite controller takes in end effector targets, and converts them into joint angle targets for the corresponding body parts' joints.
+
-ThirdParty Controllers
+**Third-party Controllers**
***********************
+Third-party controllers integrate custom or external control algorithms into robosuite. Examples include https://github.com/kevinzakka/mink. We provide
+an example of adding a third-party controller in https://robosuite.ai/docs/tutorials/add_controller.html.
+
Workflow of Loading Configs
****************************
diff --git a/docs/modules/devices.md b/docs/modules/devices.md
index a496f8a5f4..76c84edfb5 100644
--- a/docs/modules/devices.md
+++ b/docs/modules/devices.md
@@ -1,6 +1,6 @@
# I/O Devices
-Devices are used to read user input and teleoperate simulated robots in real-time. This is achieved by either using a keyboard or a [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/), and whose teleoperation capabilities can be demonstrated with the [demo_device_control.py](../demos.html#teleoperation) script. More generally, we support any interface that implements the [Device](../simulation/device) abstract base class. In order to support your own custom device, simply subclass this base class and implement the required methods.
+Devices are used to read user input and teleoperate simulated robots in real-time. This is achieved by either using a keyboard, a [SpaceMouse](https://www.3dconnexion.com/spacemouse_compact/en/) or a [DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) joystick, and whose teleoperation capabilities can be demonstrated with the [demo_device_control.py](../demos.html#teleoperation) script. More generally, we support any interface that implements the [Device](../simulation/device) abstract base class. In order to support your own custom device, simply subclass this base class and implement the required methods.
## Keyboard
@@ -10,16 +10,20 @@ We support keyboard input through the OpenCV2 window created by the mujoco rende
Note that the rendering window must be active for these commands to work.
-| Keys | Command |
-| :------- | :--------------------------------- |
-| q | reset simulation |
-| spacebar | toggle gripper (open/close) |
-| w-a-s-d | move arm horizontally in x-y plane |
-| r-f | move arm vertically |
-| z-x | rotate arm about x-axis |
-| t-g | rotate arm about y-axis |
-| c-v | rotate arm about z-axis |
-| ESC | quit |
+| Keys | Command |
+| :------------------ | :----------------------------------------- |
+| Ctrl+q | reset simulation |
+| spacebar | toggle gripper (open/close) |
+| up-right-down-left | move horizontally in x-y plane |
+| .-; | move vertically |
+| o-p | rotate (yaw) |
+| y-h | rotate (pitch) |
+| e-r | rotate (roll) |
+| b | toggle arm/base mode (if applicable) |
+| s | switch active arm (if multi-armed robot) |
+| = | switch active robot (if multi-robot env) |
+| Ctrl+C | quit |
+
## 3Dconnexion SpaceMouse
@@ -34,4 +38,52 @@ We support the use of a [SpaceMouse](https://www.3dconnexion.com/spacemouse_comp
| Move mouse laterally | move arm horizontally in x-y plane |
| Move mouse vertically | move arm vertically |
| Twist mouse about an axis | rotate arm about a corresponding axis |
-| ESC (keyboard) | quit |
+| b | toggle arm/base mode (if applicable) |
+| s | switch active arm (if multi-armed robot) |
+| = | switch active robot (if multi-robot environment) |
+| Ctrl+C (keyboard) | quit |
+
+## Sony DualSense
+
+we support the use of a [Sony DualSense](https://www.playstation.com/en-us/accessories/dualsense-wireless-controller/) as well.
+
+**Sony DualSense controls**
+
+| Control | Command |
+| :--------------------------- | :------------------------------------ |
+| Square button | reset simulation |
+| Circle button (hold) | close gripper |
+| Move LX/LY Stick | move arm horizontally in x-y plane |
+| Press L2 Trigger with or without L1 button | move arm vertically |
+| Move RX/RY Stick | rotate arm about x/y axis (roll/pitch) |
+| Press R2 Trigger with or without R1 button | rotate arm about z axis (yaw) |
+| Triangle button | toggle arm/base mode (if applicable) |
+| Left/Right Direction Pad | switch active arm (if multi-armed robot) |
+| Up/Down Direction Pad | switch active robot (if multi-robot environment) |
+| Ctrl+C (keyboard) | quit |
+
+## Mujoco GUI Device
+
+To use the Mujoco GUI device for teleoperation, follow these steps:
+
+1. Set renderer as `"mjviewer"`. For example:
+
+```python
+env = suite.make(
+ **options,
+ renderer="mjviewer",
+ has_renderer=True,
+ has_offscreen_renderer=False,
+ ignore_done=True,
+ use_camera_obs=False,
+)
+```
+
+Note: if using Mac, please use `mjpython` instead of `python`. For example:
+
+```mjpython robosuite/scripts/collect_human_demonstrations.py --environment Lift --robots Panda --device mjgui --camera frontview --controller WHOLE_BODY_IK```
+
+2. Double click on a mocap body to select a body to drag, then:
+
+On Linux: `Ctrl` + right click to drag the body's position. `Ctrl` + left click to control the body's orientation.
+On Mac: `fn` + `Ctrl` + right click.
diff --git a/docs/modules/environments.md b/docs/modules/environments.md
index 26bbe54179..50583c32e9 100644
--- a/docs/modules/environments.md
+++ b/docs/modules/environments.md
@@ -10,17 +10,17 @@ Environments are created by calling `robosuite.make` with the name of the task a
```python
import robosuite
-from robosuite.controllers import load_part_controller_config
+from robosuite.controllers import load_composite_controller_config
-# load default controller parameters for Operational Space Control (OSC)
-controller_config = load_part_controller_config(default_controller="OSC_POSE")
+# BASIC controller: arms controlled using OSC, mobile base (if present) using JOINT_VELOCITY, other parts controlled using JOINT_POSITION
+controller_config = load_composite_controller_config(controller="BASIC")
# create an environment to visualize on-screen
env = robosuite.make(
"TwoArmLift",
robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot
gripper_types="default", # use default grippers per robot arm
- controller_configs=controller_config, # each arm is controlled using OSC
+ controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY
env_configuration="opposed", # (two-arm envs only) arms face each other
has_renderer=True, # on-screen rendering
render_camera="frontview", # visualize the "frontview" camera
@@ -36,7 +36,7 @@ env = robosuite.make(
"TwoArmLift",
robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot
gripper_types="default", # use default grippers per robot arm
- controller_configs=controller_config, # each arm is controlled using OSC
+ controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY
env_configuration="opposed", # (two-arm envs only) arms face each other
has_renderer=False, # no on-screen rendering
has_offscreen_renderer=False, # no off-screen rendering
@@ -52,7 +52,7 @@ env = robosuite.make(
"TwoArmLift",
robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot
gripper_types="default", # use default grippers per robot arm
- controller_configs=controller_config, # each arm is controlled using OSC
+ controller_configs=controller_config, # arms controlled via OSC, other parts via JOINT_POSITION/JOINT_VELOCITY
env_configuration="opposed", # (two-arm envs only) arms face each other
has_renderer=False, # no on-screen rendering
has_offscreen_renderer=True, # off-screen rendering needed for image obs
@@ -73,7 +73,7 @@ We provide a few additional details on a few keyword arguments below to highligh
- `robots` : this argument can be used to easily instantiate tasks with different robot arms. For example, we could change the task to use two "Jaco" robots by passing `robots=["Jaco", "Jaco"]`. Once the environment is initialized, these robots (as captured by the [Robot](../simulation/robot.html#robot) class) can be accessed via the `robots` array attribute within the environment, i.e.: `env.robots[i]` for the `ith` robot arm in the environment.
- `gripper_types` : this argument can be used to easily swap out different grippers for each robot arm. For example, suppose we want to swap the default grippers for the arms in the example above. We could just pass `gripper_types=["PandaGripper", "RethinkGripper"]` to achieve this. Note that a single type can also be used to automatically broadcast the same gripper type across all arms.
-- `controller_configs` : this argument can be used to easily replace the action space for each robot arm. For example, if we would like to control the arm using joint velocities instead of OSC, we could use `load_controller_config(default_controller="JOINT_VELOCITY")` in the example above. Similar to `gripper_types` this value can either be per-arm specific or a single configuration to broadcast to all robot arms.
+- `controller_configs` : this argument can be used to easily replace the action space for each robot. For example, if we would like to control the robot using IK instead of OSC, we could use `load_composite_controller_config(controller="WHOLE_BODY_IK")` in the example above.
- `env_configuration` : this argument is mainly used for two-arm tasks to easily configure how the robots are oriented with respect to one another. For example, in the `TwoArmLift` environment, we could pass `env_configuration="parallel"` instead so that the robot arms are located next to each other, instead of opposite each other
- `placement_initializer` : this argument is optional, but can be used to specify a custom `ObjectPositionSampler` to override the default start state distribution for Mujoco objects. Samplers are responsible for sampling a set of valid, non-colliding placements for all of the objects in the scene at the start of each episode (e.g. when `env.reset()` is called).
diff --git a/docs/modules/renderers.md b/docs/modules/renderers.md
index 4fc625d0ba..85aea0eab8 100644
--- a/docs/modules/renderers.md
+++ b/docs/modules/renderers.md
@@ -1,6 +1,6 @@
# Renderers
-[Renderers](../source/robosuite.renderers) are used to visualize the simulation, and can be used either in on-screen mode or headless (off-screen) mode. Renderers are also responsible for generating image-based observations that are returned from a given environment, and compute virtual images of the environment based on the properties defined in the cameras.
+[Renderers](../source/robosuite.renderers) are used to visualize the simulation and can be used either in on-screen mode or headless (off-screen) mode. Renderers are also responsible for generating image-based observations that are returned from a given environment, and compute virtual images of the environment based on the properties defined in the cameras.
Currently, the following ground-truth vision modalities are supported by the MuJoCo renderer:
@@ -9,10 +9,66 @@ Currently, the following ground-truth vision modalities are supported by the MuJ
- **Segmentation**: 1-channel frames with pixel values corresponding to integer IDs for various objects. Segmentation can
occur by class, instance, or geom, and is set during environment construction with the `camera_segmentations` argument.
-Additional modalities are supported by a subset of the renderers. In **robosuite**, the user has the following rendering options:
+**robosuite** presents the following rendering options:
-
+
-## MuJoCo
+## MuJoCo Default Renderer
MuJoCo exposes users to an OpenGL context supported by [mujoco](https://mujoco.readthedocs.io/en/latest/python.html#rendering). Based on [OpenGL](https://www.opengl.org/), our assets and environment definitions have been tuned to look good with this renderer. The rendered frames can be displayed in a window with [OpenCV's imshow](https://pythonexamples.org/python-opencv-imshow/).
+
+
+
+## Isaac Rendering
+
+Users are also able to render using photorealistic methods through Isaac Sim. Specifically, we users are able to choose between two rendering modes: ray tracing and path tracing. For more information about Isaac Sim rendering options, please visit [here](https://docs.omniverse.nvidia.com/materials-and-rendering/latest/rtx-renderer.html). Isaac renderers are only available to those who are running on a Linux or Windows machine.
+
+To install Isaac on your local system, please follow the instructions listed [here](https://isaac-sim.github.io/IsaacLab/main/source/setup/installation/pip_installation.html). Make sure to follow instructions to install both Isaac Sim and Isaac Lab.
+
+### Ray tracing
+
+
+Ray tracing can be performed in real time. We are currently working on enhancing the rendering pipeline to support an online viewer with ray tracing capabilities.
+
+### Path tracing
+
+
+Path tracing typically offers higher quality and is ideal for offline learning. If you have the time to collect data and plan to train algorithms using offline data, we recommend using path tracing for its photorealistic results.
+
+### Basic usage
+
+Once all dependecies for Isaac rendering have been installed, users can run the `robosuite/scripts/render_dataset_with_omniverse.py` to render previously collected demonstrations using either ray tracing or path tracining. Below we highlight the arguments that can be passed into the script.
+
+- **dataset**: Path to hdf5 dataset with the demonstrations to render.
+- **ds_format**: Dataset format (options include `robosuite` and `robomimic` depending on if the dataset was collected using robosuite or robomimic, respectively).
+- **episode**: Episode/demonstration to render. If no episode is provided, all demonstrations will be rendered.
+- **output_directory**: Directory to store outputs from Isaac rendering and USD generation.
+- **cameras**: List of cameras to render images. Cameras must be defined in robosuite.
+- **width**: Width of the rendered output.
+- **height**: Height of the rendered output.
+- **renderer**: Renderer mode to use (options include `RayTracedLighting` or `PathTracing`).
+- **save_video**: Whether to save the outputs renderings as a video.
+- **online**: Enables online rendering and will not save the USD for future rendering offline.
+- **skip_frames**: Renders every nth frame.
+- **hide_sites**: Hides all sites in the scene.
+- **reload_model**: Reloads the model from the Mujoco XML file.
+- **keep_models**: List of names of models to keep from the original Mujoco XML file.
+- **rgb**: Render with the RGB modality. If no other modality is selected, we default to rendering with RGB.
+- **normals**: Render with normals.
+- **semantic_segmentation**: Render with semantic segmentation.
+
+Here is an example command to render an video of a demonstration using ray tracing with the RGB and normal modality.
+
+```bash
+$ python robosuite/scripts/render_dataset_with_omniverse.py --dataset /home/abhishek/Documents/research/rpl/robosuite/robosuite/models/assets/demonstrations_private/1734107564_9898326/demo.hdf5 --ds_format robosuite --episode 1 --camera agentview frontview --width 1920 --height 1080 --renderer RayTracedLighting --save_video --hide_sites --rgb --normals
+```
+
+### Rendering Speed
+
+Below, we present a table showing the estimated frames per second when using these renderers. Note that the exact speed of rendering might depend on your machine and scene size. Larger scenes may take longer to render. Additionally, changing renderer inputs such as samples per pixel (spp) or max bounces might affect rendering speeds. The values below are estimates using the `Lift` task with an NVIDIA GeForce RTX 4090. We use an spp of 64 when rendering with path tracing.
+
+| Renderer | Estimated FPS |
+|----------------|---------------|
+| MuJoCo | 3500 |
+| Ray Tracing | 58 |
+| Path Tracing | 2.8 |
diff --git a/docs/modules/robots.md b/docs/modules/robots.md
deleted file mode 100644
index c0e9692828..0000000000
--- a/docs/modules/robots.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Robots
-
-
-
-**Robots** are a key component in **robosuite**, and serve as the embodiment of a given agent as well as the central interaction point within an environment and key interface to MuJoCo for the robot-related state and control. **robosuite** captures this level of abstraction with the [Robot](../simulation/robot)-based classes, with support for both single-armed and bimanual variations. In turn, the Robot class is centrally defined by a [RobotModel](../modeling/robot_model), [RobotBaseModel](../modeling/robot_model.html#base-model), and [Controller(s)](../simulation/controller). Subclasses of the `RobotModel` class may also include additional models as well; for example, the [ManipulatorModel](../modeling/robot_model.html#manipulator-model) class also includes [GripperModel(s)](../modeling/robot_model.html#gripper-model) (with no gripper being represented by a dummy class).
-
-The high-level features of **robosuite**'s robots are described as follows:
-
-* **Diverse and Realistic Models**: **robosuite** provides models for 8 commercially-available manipulator robots (including the bimanual Baxter robot), 7 grippers (including the popular Robotiq 140 / 85 models), and 6 controllers, with model properties either taken directly from the company website or raw spec sheets.
-
-* **Modularized Support**: Robots are designed to be plug-n-play -- any combinations of robots, models, and controllers can be used, assuming the given environment is intended for the desired robot configuration. Because each robot is assigned a unique ID number, multiple instances of identical robots can be instantiated within the simulation without error.
-
-* **Self-Enclosed Abstraction**: For a given task and environment, any information relevant to the specific robot instance can be found within the properties and methods within that instance. This means that each robot is responsible for directly setting its initial state within the simulation at the start of each episode, and also directly controls the robot in simulation via torques outputted by its controller's transformed actions.
-
-
-## Usage
-Below, we discuss the usage and functionality of the robots over the course of its program lifetime.
-
-#### Initialization
-During environment creation (`suite.make(...)`), individual robots are both instantiated and initialized. The desired RobotModel, MountModel, and Controller(s) (where multiple and / or additional models may be specified, e.g. for manipulator bimanual robots) are loaded into each robot, with the models being passed into the environment to compose the final MuJoCo simulation object. Each robot is then set to its initial state.
-
-#### Runtime
-During a given simulation episode (each `env.step(...)` call), the environment will receive a set of actions and distribute them accordingly to each robot, according to their respective action spaces. Each robot then converts these actions into low-level torques via their respective controllers, and directly execute these torques in the simulation. At the conclusion of the environment step, each robot will pass its set of robot-specific observations to the environment, which will then concatenate and append additional task-level observations before passing them as output from the `env.step(...)` call.
-
-#### Callables
-At any given time, each robot has a set of `properties` whose real-time values can be accessed at any time. These include specifications for a given robot, such as its DoF, action dimension, and torque limits, as well as proprioceptive values, such as its joint positions and velocities. Additionally, if the robot is enabled with any sensors, those readings can also be polled as well. A full list of robot properties can be found in the [Robots API](../simulation/robot) section.
-
-
-## Models
-**robosuite** is designed to be generalizable to multiple robotic domains. The current release focuses on manipulator robots. For adding new robots, we provide a [rudimentary guide](https://docs.google.com/document/d/1bSUKkpjmbKqWyV5Oc7_4VL4FGKAQZx8aWm_nvlmTVmE/edit?usp=sharing) on how to import raw Robot and Gripper models (based on a URDF source file) into robosuite.
-
-### Manipulators
-**robosuite** currently supports seven commercially-available manipulator robot models. We briefly describe each individual model along with its features below:
-
-#### Panda
-
-[Panda](https://www.franka.de/technology) is a 7-DoF and relatively new robot model produced by Franka Emika, and boasts high positional accuracy and repeatability. A common choice for both simulated and real-robot research, we provide a substantial set of [benchmarking](../algorithms/benchmarking) experiments using this robot. The default gripper for this robot is the `PandaGripper`, a parallel-jaw gripper equipped with two small finger pads, that comes shipped with the robot arm.
-
-#### Sawyer
-
-[Sawyer](https://www.rethinkrobotics.com/sawyer) is Rethink Robotic's 7-DoF single-arm robot, which also features an additional 8th joint (inactive and disabled by default in **robosuite**) for swiveling its display monitor. Along with Panda, Sawyer serves as the second testing robot for our set of benchmarking experiments. Sawyer's default `RethinkGripper` model is a parallel-jaw gripper with long fingers and useful for grasping a variety of objects.
-
-#### LBR IIWA 7
-
-[IIWA](https://www.kuka.com/en-us/products/robotics-systems/industrial-robots/lbr-iiwa) is one of KUKA's industrial-grade 7-DoF robots, and is equipped with the strongest actuators of the group, with its per-joint torque limits exceeding nearly all the other models in **robosuite** by over twofold! By default, IIWA is equipped with the `Robotiq140Gripper`, [Robotiq's 140mm variation](https://robotiq.com/products/2f85-140-adaptive-robot-gripper) of their multi-purpose two finger gripper models.
-
-#### Jaco
-
-[Jaco](https://www.kinovarobotics.com/en/products/assistive-technologies/kinova-jaco-assistive-robotic-arm) is a popular sleek 7-DoF robot produced by Kinova Robotics and intended for human assistive applications. As such, it is relatively weak in terms of max torque capabilities. Jaco comes equipped with the `JacoThreeFingerGripper` by default, a three-pronged gripper with multi-jointed fingers.
-
-#### Kinova Gen3
-
-[Kinova3](https://www.kinovarobotics.com/en/products/gen3-robot) is Kinova's newest 7-DoF robot, with integrated sensor modules and interfaces designed for research-oriented applications. It is marginally stronger than its Jaco counterpart, and is equipped with the `Robotiq85Gripper`, [Robotiq's 85mm variation](https://robotiq.com/products/2f85-140-adaptive-robot-gripper) of their multi-purpose two finger gripper models.
-
-#### UR5e
-
-[UR5e](https://www.universal-robots.com/products/ur5-robot/) is Universal Robot's newest update to the UR5 line, and is a 6-DoF robot intended for collaborative applications. This newest model boasts an improved footprint and embedded force-torque sensor in its end effector. This arm also uses the `Robotiq85Gripper` by default in **robosuite**.
-
-#### Baxter
-
-[Baxter](http://collabrobots.com/about-baxter-robot/) is an older but classic bimanual robot originally produced by Rethink Robotics but now owned by CoThink Robotics, and is equipped with two 7-DoF arms as well as an addition joint for controlling its swiveling display screen (inactive and disabled by default in **robosuite**). Each arm can be controlled independently in, and is the single multi-armed model currently supported in **robosuite**. Each arm is equipped with a `RethinkGripper` by default.
-
-## Robot Bases
-
-We provide a set of robot bases that can be added to the `RobotModel` via the `add_base(base: RobotBaseModel)` method. These bases can be static (e.g. `MountModel`) or movable (e.g. `MobileBaseModel` or `LegBaseModel`). This enables composability of robots with different bases, such as a mobile manipulator robot or a quadruped robot. Examples are shown in the [compositional.py](robosuite/models/robots/compositional.py).
-
-
-## Robosuite Models
-
-Other than the core robot models, we also provide more models for the `robosuite` ecosystem. Checkout this [robosuite_models](https://github.com/ARISE-Initiative/robosuite_models) repository for more details. This can also be installed via `pip install robosuite-models`.
diff --git a/docs/modules/robots.rst b/docs/modules/robots.rst
index 565a9d633f..70a20f5c66 100644
--- a/docs/modules/robots.rst
+++ b/docs/modules/robots.rst
@@ -1,25 +1,25 @@
Robots
=======
-.. figure:: ../images/robot_module.png
+.. figure:: ../images/robots_module_v15.png
-**Robots** are a key component in **robosuite**, and serve as the embodiment of a given agent as well as the central interaction point within an environment and key interface to MuJoCo for the robot-related state and control. **robosuite** captures this level of abstraction with the `Robot <../simulation/robot.html>`_-based classes, with support for both single-armed and bimanual variations. In turn, the Robot class is centrally defined by a `RobotModel <../modeling/robot_model.html>`_, `MountModel <../modeling/robot_model.html#mount-model>`_, and `Controller(s) <../simulation/controller.html>`_. Subclasses of the :class:`RobotModel` class may also include additional models as well; for example, the `ManipulatorModel <../modeling/robot_model.html#manipulator-model>`_ class also includes `GripperModel(s) <../modeling/robot_model.html#gripper-model>`_ (with no gripper being represented by a dummy class).
+**Robots** are a key component in **robosuite**, and serve as the embodiment of a given agent as well as the central interaction point within an environment and key interface to MuJoCo for the robot-related state and control. **robosuite** captures this level of abstraction with the `Robot <../simulation/robot>`_-based classes, with support for both single-armed and bimanual variations, as well as robots with mobile manipulation capabilities, including both legged and wheeled variants. In turn, the Robot class is centrally defined by a `RobotModel <../modeling/robot_model>`_, `RobotBaseModel <../modeling/robot_model.html#robot-base-model>`_, `GripperModel <../modeling/robot_model.html#gripper-model>`_, and `Controller(s) <../simulation/controller>`_. Subclasses of the ``RobotModel`` class may also include additional models as well; for example, the `ManipulatorModel <../modeling/robot_model.html#manipulator-model>`_ class also includes `GripperModel(s) <../modeling/robot_model.html#gripper-model>`_ (with no gripper being represented by a dummy class).
The high-level features of **robosuite**'s robots are described as follows:
-* **Diverse and Realistic Models**: **robosuite** provides models for 8 commercially-available manipulator robots (including the bimanual Baxter robot), 7 grippers (including the popular Robotiq 140 / 85 models), and 6 controllers, with model properties either taken directly from the company website or raw spec sheets.
+* **Diverse and Realistic Models**: **robosuite** provides models for 10 commercially-available robots (including the humanoid GR1 Robot), 9 grippers (including the inspire dexterous hand model), 4 bases (including the Omron wheeled mobile base), and 6 body-part controllers, with model properties either taken directly from official product documentation or raw spec sheets. An additional 8 robots, 8 grippers, and 3 bases can be installed separately from the `robosuite-models `_ repository.
* **Modularized Support**: Robots are designed to be plug-n-play -- any combinations of robots, models, and controllers can be used, assuming the given environment is intended for the desired robot configuration. Because each robot is assigned a unique ID number, multiple instances of identical robots can be instantiated within the simulation without error.
* **Self-Enclosed Abstraction**: For a given task and environment, any information relevant to the specific robot instance can be found within the properties and methods within that instance. This means that each robot is responsible for directly setting its initial state within the simulation at the start of each episode, and also directly controls the robot in simulation via torques outputted by its controller's transformed actions.
Usage
-======
+=====
Below, we discuss the usage and functionality of the robots over the course of its program lifetime.
Initialization
--------------
-During environment creation (``suite.make(...)``), individual robots are both instantiated and initialized. The desired RobotModel, MountModel, and Controller(s) (where multiple and/or additional models may be specified, e.g. for manipulator bimanual robots) are loaded into each robot, with the models being passed into the environment to compose the final MuJoCo simulation object. Each robot is then set to its initial state.
+During environment creation (``suite.make(...)``), individual robots are both instantiated and initialized. The desired RobotModel, RobotBaseModel, and Controller(s) (where multiple and/or additional models may be specified, e.g. for manipulator bimanual robots) are loaded into each robot, with the models being passed into the environment to compose the final MuJoCo simulation object. Each robot is then set to its initial state.
Runtime
-------
@@ -35,130 +35,176 @@ Models
Manipulators
------------
-**robosuite** currently supports seven commercially-available manipulator robot models. We briefly describe each individual model along with its features below:
-
-
-Panda
-~~~~~
-.. image:: ../images/models/robot_model_Panda.png
- :alt: panda_robot
- :align: left
- :width: 50%
-
-- DoF: 7
-- Parallel jaw gripper
-- Fixed base
-
-Sawyer
-~~~~~~
-.. image:: ../images/models/robot_model_Sawyer.png
- :alt: sawyer_robot
- :align: left
- :width: 50%
-
-- DoF: 7
-- Parallel jaw gripper
-- Fixed base
-
-LBR IIWA 7
-~~~~~~~~~~
-.. image:: ../images/models/robot_model_IIWA.png
- :alt: iiwa_robot
- :align: left
- :width: 50%
-
-- DoF: 7
-- ...
-- ...
-
-Jaco
-~~~~
-.. image:: ../images/models/robot_model_Jaco.png
- :alt: jaco_robot
- :align: left
- :width: 50%
-
-- DoF: 7
-- ...
-- ...
-
-Kinova Gen3
-~~~~~~~~~~~
-.. image:: ../images/models/robot_model_Kinova3.png
- :alt: kinova3_robot
- :align: left
- :width: 50%
-
-
-
-- DoF: 7
-- Parallel jaw gripper
-- Fixed base
-
-UR5e
-~~~~
-.. image:: ../images/models/robot_model_UR5e.png
- :alt: ur5e_robot
- :align: left
- :width: 50%
-
-- DoF: 6
-- Parallel jaw gripper
-- Fixed base
-
-Baxter
-~~~~~~
-.. image:: ../images/models/robot_model_Baxter.png
- :alt: baxter_robot
- :align: left
- :width: 50%
-
-- DoF: 14
-- ...
-- ...
-
-Tiago
-~~~~~~
-.. image:: ...
- :alt: tiago_robot
- :align: left
- :width: 50%
-
-- DoF: ...
-- ...
-- ...
-
-GR1
-~~~~~~
-.. image:: ...
- :alt: gr1_robot
- :align: left
- :width: 50%
-
-- DoF: ...
-- ...
-- ...
+
+.. list-table::
+ :widths: 15 50 35
+ :header-rows: 1
+
+ * - Robot
+ - Image
+ - Description
+ * - **Panda**
+ - .. image:: ../images/models/robot_model_Panda_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 7
+ - **Default Gripper:** PandaGripper
+ - **Default Base:** RethinkMount
+ * - **Sawyer**
+ - .. image:: ../images/models/robot_model_Sawyer_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 7
+ - **Default Gripper:** RethinkGripper
+ - **Default Base:** RethinkMount
+ * - **IIWA**
+ - .. image:: ../images/models/robot_model_IIWA_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 7
+ - **Default Gripper:** Robotiq140Gripper
+ - **Default Base:** RethinkMount
+ * - **Jaco**
+ - .. image:: ../images/models/robot_model_Jaco_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 7
+ - **Default Gripper:** JacoThreeFingerGripper
+ - **Default Base:** RethinkMount
+ * - **Kinova3**
+ - .. image:: ../images/models/robot_model_Kinova3_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 7
+ - **Default Gripper:** Robotiq85Gripper
+ - **Default Base:** RethinkMount
+ * - **UR5e**
+ - .. image:: ../images/models/robot_model_UR5e_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 6
+ - **Default Gripper:** Robotiq85Gripper
+ - **Default Base:** RethinkMount
+ * - **Baxter**
+ - .. image:: ../images/models/robot_model_Baxter_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 14
+ - **Default Gripper:** RethinkGripper
+ - **Default Base:** RethinkMount
+ * - **GR1**
+ - .. image:: ../images/models/robot_model_GR1_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 24
+ - **Default Gripper:** InspireHands
+ - **Default Base:** NoActuationBase
+ - **Variants**: GR1FixedLowerBody, GR1FloatingBody, GR1ArmsOnly
+ * - **Spot**
+ - .. image:: ../images/models/robot_model_Spot_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 19
+ - **Default Gripper:** BDGripper
+ - **Default Base:** Spot
+ - **Variants**: SpotWithArmFloating
+ * - **Tiago**
+ - .. image:: ../images/models/robot_model_Tiago_isaac.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 20
+ - **Default Gripper:** Robotiq85Gripper
+ - **Default Base:** NullMobileBase
+
+Grippers
+--------
+
+.. list-table::
+ :widths: 20 45 35
+ :header-rows: 1
+
+ * - Gripper
+ - Image
+ - Description
+ * - **BD Gripper**
+ - .. image:: ../images/models/bd_gripper.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 1
+ * - **Inspire Hands**
+ - .. image:: ../images/models/inspire_hands.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 6
+ * - **Jaco Three Finger Gripper**
+ - .. image:: ../images/models/jaco_gripper.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 1 (3 for dexterous version)
+ * - **Panda Gripper**
+ - .. image:: ../images/models/panda_gripper.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 1
+ * - **Rethink Gripper**
+ - .. image:: ../images/models/rethink_gripper.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 1
+ * - **Robotiq 85 Gripper**
+ - .. image:: ../images/models/robotiq85_gripper.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 1
+ * - **Robotiq 140 Gripper**
+ - .. image:: ../images/models/robotiq140_gripper.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 1
+ * - **Robotiq Three Finger Gripper**
+ - .. image:: ../images/models/robotiq_three_gripper.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 1
+ * - **Wiping Gripper**
+ - .. image:: ../images/models/wiping_gripper.png
+ :width: 90%
+ :align: center
+ - - **DoF:** 0
+
+Bases
+-----
+
+.. list-table::
+ :widths: 20 45 35
+ :header-rows: 1
+
+ * - Gripper
+ - Image
+ - Description
+ * - **Rethink Mount**
+ - .. image:: ../images/models/rethink_base.png
+ :width: 90%
+ :align: center
+ - - **Type:** Fixed
+ * - **Rethink Minimal Mount**
+ - .. image:: ../images/models/rethink_minimal_base.png
+ :width: 90%
+ :align: center
+ - - **Type:** Fixed
+ * - **Omron Mobile Base**
+ - .. image:: ../images/models/omron_base.png
+ :width: 90%
+ :align: center
+ - - **Type:** Mobile
+ * - **Spot Base**
+ - .. image:: ../images/models/spot_base.png
+ :width: 90%
+ :align: center
+ - - **Type:** Legged
Create Your Own Robot
----------------------
-
-
-.. code-block:: python
-
- @register_robot_class("LeggedRobot")
- class GR1SchunkSVH(GR1):
- """
- Variant of GR1 robot with SchunkSVH hands.
- """
-
- @property
- def default_gripper(self):
- """
- Since this is bimanual robot, returns dict with `'right'`, `'left'` keywords corresponding to their respective
- values
+As of v1.5, users can create composite robots to match their specification. Specificially, arms, grippers, and bases can be swapped to create new robots configurations. We also provide several other robot models in an external repo. For more information, please refer to `here `_.
- Returns:
- dict: Dictionary containing arm-specific gripper names
- """
- return {"right": "SchunkSvhRightHand", "left": "SchunkSvhLeftHand"}
diff --git a/docs/overview.md b/docs/overview.md
index cb88ec7b16..a16859751d 100644
--- a/docs/overview.md
+++ b/docs/overview.md
@@ -18,7 +18,7 @@ This framework was originally developed in late 2017 by researchers in [Stanford
* **standardized tasks**: a set of standardized manipulation tasks of large diversity and varying complexity and RL benchmarking results for reproducible research;
* **procedural generation**: modular APIs for programmatically creating new environments and new tasks as combinations of robot models, arenas, and parameterized 3D objects. Check out our repo [robosuite_models](https://github.com/ARISE-Initiative/robosuite_models) for extra robot models tailored to robosuite.
* **robot controllers**: a selection of controller types to command the robots, such as joint-space velocity control, inverse kinematics control, operational space control, and whole body control;
-* **teleoperation devices**: a selection of teleoperation devices including keyboard, spacemouse and MuJoCo viewer drag-drop;
+* **teleoperation devices**: a selection of teleoperation devices including keyboard, spacemouse, dualsense and MuJoCo viewer drag-drop;
* **multi-modal sensors**: heterogeneous types of sensory signals, including low-level physical states, RGB cameras, depth maps, and proprioception;
* **human demonstrations**: utilities for collecting human demonstrations, replaying demonstration datasets, and leveraging demonstration data for learning. Check out our sister project [robomimic](https://arise-initiative.github.io/robomimic-web/);
* **photorealistic rendering**: integration with advanced graphics tools that provide real-time photorealistic renderings of simulated scenes, including support for NVIDIA Isaac Sim rendering.
@@ -28,7 +28,7 @@ Please cite [**robosuite**](https://robosuite.ai) if you use this framework in y
```bibtex
@inproceedings{robosuite2020,
title={robosuite: A Modular Simulation Framework and Benchmark for Robot Learning},
- author={Yuke Zhu and Josiah Wong and Ajay Mandlekar and Roberto Mart\'{i}n-Mart\'{i}n and Abhishek Joshi and Kevin Lin and Soroush Nasiriany and Yifeng Zhu},
+ author={Yuke Zhu and Josiah Wong and Ajay Mandlekar and Roberto Mart\'{i}n-Mart\'{i}n and Abhishek Joshi and Kevin Lin and Abhiram Maddukuri and Soroush Nasiriany and Yifeng Zhu},
booktitle={arXiv preprint arXiv:2009.12293},
year={2020}
}
diff --git a/docs/references.md b/docs/references.md
index e028366bc1..ecc31305d8 100644
--- a/docs/references.md
+++ b/docs/references.md
@@ -1,98 +1,4 @@
# Projects using robosuite
-A list of references of projects and papers that use **robosuite**. If you would like to add your work to this list, please send the paper information to Yuke Zhu ([yukez@cs.utexas.edu](mailto:yukez@cs.utexas.edu)).
+Link to list of references of projects and papers that use [**robosuite**](https://scholar.google.com/scholar?oi=bibs&hl=en&cites=10588857002863609969&as_sdt=5)
-## 2022
-
-- [Robot Learning on the Job: Human-in-the-Loop Autonomy and Learning During Deployment](https://arxiv.org/abs/2211.08416). Huihan Liu, Soroush Nasiriany, Lance Zhang, Zhiyao Bao, Yuke Zhu
-- [Geometric Impedance Control on SE(3) for Robotic Manipulators](https://arxiv.org/abs/2211.07945). Joohwan Seo, Nikhil Potu, Surya Prakash, Alexander Rose, Roberto Horowitz
-- [Guided Skill Learning and Abstraction for Long-Horizon Manipulation](https://arxiv.org/abs/2210.12631). Shuo Cheng, Danfei Xu
-- [VIOLA: Imitation Learning for Vision-Based Manipulation with Object Proposal Priors](https://arxiv.org/abs/2210.11339). Yifeng Zhu, Abhishek Joshi, Peter Stone, Yuke Zhu
-- [Monte Carlo Augmented Actor-Critic for Sparse Reward Deep Reinforcement Learning from Suboptimal Demonstrations](https://arxiv.org/abs/2210.07432). Albert Wilcox, Ashwin Balakrishna, Jules Dedieu, Wyame Benslimane, Daniel S. Brown, Ken Goldberg
-- [ASPiRe: Adaptive Skill Priors for Reinforcement Learning](https://arxiv.org/abs/2209.15205). Mengda Xu, Manuela Veloso, Shuran Song
-- [Active Predicting Coding: Brain-Inspired Reinforcement Learning for Sparse Reward Robotic Control Problems](https://arxiv.org/abs/2209.09174). Alexander Ororbia, Ankur Mali
-- [Spatial and Temporal Features Unified Self-Supervised Representation Learning Network](https://www.sciencedirect.com/science/article/pii/S0921889022001506). Rahul Choudhary, Rahee Walambe, Ketan Kotecha
-- [HERD: Continuous Human-to-Robot Evolution for Learning from Human Demonstration](https://openreview.net/forum?id=VHia4Cint7). Xingyu Liu, Deepak Pathak, Kris M. Kitani
-- [A Dual Representation Framework for Robot Learning with Human Guidance](https://openreview.net/forum?id=H6rr_CGzV9y). Ruohan Zhang, Dhruva Bansal, Yilun Hao, Ayano Hiranaka, Jialu Gao, Chen Wang, Roberto MartÃn-MartÃn, Li Fei-Fei, Jiajun Wu
-- [CompoSuite: A Compositional Reinforcement Learning Benchmark](https://arxiv.org/abs/2207.04136). Jorge A. Mendez, Marcel Hussing, Meghna Gummadi, Eric Eaton
-- [Causal Dynamics Learning for Task-Independent State Abstraction](https://arxiv.org/abs/2206.13452). Zizhao Wang, Xuesu Xiao, Zifan Xu, Yuke Zhu, Peter Stone
-- [Latent Policies for Adversarial Imitation Learning](https://arxiv.org/abs/2206.11299). Tianyu Wang, Nikhil Karnwal, Nikolay Atanasov
-- [Play it by Ear: Learning Skills amidst Occlusion through Audio-Visual Imitation Learning](https://arxiv.org/abs/2205.14850). Maximilian Du, Olivia Y. Lee, Suraj Nair, Chelsea Finn
-- [Visuotactile-RL: Learning Multimodal Manipulation Policies with Deep Reinforcement Learning](https://ieeexplore.ieee.org/abstract/document/9812019). Johanna Hansen, Francois Hogan, Dmitriy Rivkin, David Meger, Michael Jenkin, Gregory Dudek
-- [DreamingV2: Reinforcement Learning with Discrete World Models without Reconstruction](https://arxiv.org/abs/2203.00494). Masashi Okada, Tadahiro Taniguchi
-- [Ditto: Building Digital Twins of Articulated Objects from Interaction](https://arxiv.org/abs/2202.08227). Zhenyu Jiang, Cheng-Chun Hsu, Yuke Zhu
-- [A Ranking Game for Imitation Learning](https://arxiv.org/abs/2202.03481). Harshit Sikchi, Akanksha Saran, Wonjoon Goo, Scott Niekum
-- [Learning to Grasp the Ungraspable with Emergent Extrinsic Dexterity](https://arxiv.org/abs/2211.01500). Wenxuan Zhou, David Held
-- [Efficiently Learning Recoveries from Failures Under Partial Observability](https://arxiv.org/abs/2209.13605). Shivam Vats, Maxim Likhachev, Oliver Kroemer
-- [Learning Representations via a Robust Behavioral Metric for Deep Reinforcement Learning](https://openreview.net/forum?id=7YXXt9lRls). Jianda Chen, Sinno Pan
-- [Synthesizing Adversarial Visual Scenarios for Model-Based Robotic Control](https://openreview.net/forum?id=WJbw_C-pCox). Shubhankar Agarwal, Sandeep P. Chinchali
-
-## 2021
-
-- [Guided Imitation of Task and Motion Planning](https://arxiv.org/abs/2112.03386). Michael McDonald, Dylan Hadfield-Menell
-- [V-MAO: Generative Modeling for Multi-Arm Manipulation of Articulated Objects](https://arxiv.org/abs/2111.03987). Xingyu Liu, Kris M. Kitani
-- [Koopman Q-learning: Offline Reinforcement Learning via Symmetries of Dynamics](https://arxiv.org/abs/2111.01365). Matthias Weissenbacher, Samarth Sinha, Animesh Garg, Yoshinobu Kawahara
-- [Validate on Sim, Detect on Real -- Model Selection for Domain Randomization](https://arxiv.org/abs/2111.00765). Gal Leibovich, Guy Jacob, Shadi Endrawis, Gal Novik, Aviv Tamar
-- [Accelerating Robotic Reinforcement Learning via Parameterized Action Primitives](https://arxiv.org/abs/2110.15360). Murtaza Dalal, Deepak Pathak, Ruslan Salakhutdinov
-- [Towards More Generalizable One-shot Visual Imitation Learning](https://arxiv.org/abs/2110.13423). Zhao Mandi, Fangchen Liu, Kimin Lee, Pieter Abbeel
-- [Decentralized Multi-Agent Control of a Manipulator in Continuous Task Learning](https://www.mdpi.com/2076-3417/11/21/10227). Asad Ali Shahid, Jorge Said Vidal Sesin, Damjan Pecioski, Francesco Braghin, Dario Piga, Loris Roveda
-- [Augmenting Reinforcement Learning with Behavior Primitives for Diverse Manipulation Tasks](https://arxiv.org/abs/2110.03655). Soroush Nasiriany, Huihan Liu, Yuke Zhu
-- [Bottom-Up Skill Discovery from Unsegmented Demonstrations for Long-Horizon Robot Manipulation](https://arxiv.org/abs/2109.13841). Yifeng Zhu, Peter Stone, Yuke Zhu
-- [Lifelong Robotic Reinforcement Learning by Retaining Experiences](https://arxiv.org/abs/2109.09180). Annie Xie, Chelsea Finn
-- [ThriftyDAgger: Budget-Aware Novelty and Risk Gating for Interactive Imitation Learning](https://arxiv.org/abs/2109.08273). Ryan Hoque, Ashwin Balakrishna, Ellen Novoseller, Albert Wilcox, Daniel S. Brown, Ken Goldberg
-- [What Matters in Learning from Offline Human Demonstrations for Robot Manipulation](https://arxiv.org/abs/2108.03298). Ajay Mandlekar, Danfei Xu, Josiah Wong, Soroush Nasiriany, Chen Wang, Rohun Kulkarni, Li Fei-Fei, Silvio Savarese, Yuke Zhu, Roberto MartÃn-MartÃn
-- [Multi-Modal Mutual Information (MuMMI) Training for Robust Self-Supervised Deep Reinforcement Learning](https://arxiv.org/abs/2107.02339). Kaiqi Chen, Yong Lee, Harold Soh
-- [SECANT: Self-Expert Cloning for Zero-Shot Generalization of Visual Policies](https://arxiv.org/abs/2106.09678). Linxi Fan, Guanzhi Wang, De-An Huang, Zhiding Yu, Li Fei-Fei, Yuke Zhu, Anima Anandkumar
-- [What Can I Do Here? Learning New Skills by Imagining Visual Affordances](https://arxiv.org/abs/2106.00671). Alexander Khazatsky, Ashvin Nair, Daniel Jing, Sergey Levine
-- [Calibration-Free Monocular Vision-Based Robot Manipulations With Occlusion Awareness](https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9439456). Yongle Luo, Kun Dong, Lili Zhao, Zhiyong Sun, Erkang Cheng, Honglin Kan, Chao Zhou, Bo Song
-- [Learning a Skill-sequence-dependent Policy for Long-horizon Manipulation Tasks](https://arxiv.org/abs/2105.05484). Zhihao Li, Zhenglong Sun, Jionglong Su, Jiaming Zhang
-- [Efficient Self-Supervised Data Collection for Offline Robot Learning](https://arxiv.org/abs/2105.04607). Shadi Endrawis, Gal Leibovich, Guy Jacob, Gal Novik, Aviv Tamar
-- [Learning Visually Guided Latent Actions for Assistive Teleoperation](https://arxiv.org/abs/2105.00580). Siddharth Karamcheti, Albert J. Zhai, Dylan P. Losey, Dorsa Sadigh
-- [LASER: Learning a Latent Action Space for Efficient Reinforcement Learning](https://arxiv.org/abs/2103.15793). Arthur Allshire, Roberto MartÃn-MartÃn, Charles Lin, Shawn Manuel, Silvio Savarese, Animesh Garg
-- [S4RL: Surprisingly Simple Self-Supervision for Offline Reinforcement Learning](https://arxiv.org/abs/2103.06326). Samarth Sinha, Ajay Mandlekar, Animesh Garg
-- [Generalization Through Hand-Eye Coordination: An Action Space for Learning Spatially-Invariant Visuomotor Control](https://arxiv.org/abs/2103.00375). Chen Wang, Rui Wang, Ajay Mandlekar, Li Fei-Fei, Silvio Savarese, Danfei Xu
-- [Interpreting Contact Interactions to Overcome Failure in Robot Assembly Tasks](https://arxiv.org/abs/2101.02725). Peter A. Zachares, Michelle A. Lee, Wenzhao Lian, Jeannette Bohg
-- [Learning Contact-Rich Assembly Skills Using Residual Admittance Policy](https://ieeexplore.ieee.org/document/9636547). Oren Spector, Miriam Zacksenhouse
-- [Learning Multi-Arm Manipulation Through Collaborative Teleoperation](https://arxiv.org/abs/2012.06738). Albert Tung, Josiah Wong, Ajay Mandlekar, Roberto MartÃn-MartÃn, Yuke Zhu, Li Fei-Fei, Silvio Savarese
-- [OSCAR: Data-Driven Operational Space Control for Adaptive and Robust Robot Manipulation](https://arxiv.org/abs/2110.00704). Josiah Wong, Viktor Makoviychuk, Anima Anandkumar, Yuke Zhu
-- [OPIRL: Sample Efficient Off-Policy Inverse Reinforcement Learning via Distribution Matching](https://arxiv.org/abs/2109.04307). Hana Hoshino, Kei Ota, Asako Kanezaki, Rio Yokota
-- [RLDS: an Ecosystem to Generate, Share and Use Datasets in Reinforcement Learning](https://arxiv.org/abs/2111.02767). Sabela Ramos, Sertan Girgin, Léonard Hussenot, Damien Vincent, Hanna Yakubovich, Daniel Toyama, Anita Gergely, Piotr Stanczyk, Raphael Marinier, Jeremiah Harmsen, Olivier Pietquin, Nikola Momchev
-- [RMPs for Safe Impedance Control in Contact-Rich Manipulation](https://arxiv.org/abs/2109.12103). Seiji Shaw, Ben Abbatematteo, George Konidaris
-- [Learning Robotic Manipulation Skills Using an Adaptive Force-Impedance Action Space](https://arxiv.org/abs/2110.09904). Maximilian Ulmer, Elie Aljalbout, Sascha Schwarz, Sami Haddadin
-
-## 2020
-
-- [On the Impact of Gravity Compensation on Reinforcement Learning in Goal-Reaching Tasks for Robotic Manipulators](https://www.mdpi.com/2218-6581/10/1/46). Jonathan Fugal, Jihye Bae, Hasan A. Poonawala
-- [Learning Multi-Arm Manipulation Through Collaborative Teleoperation](https://arxiv.org/abs/2012.06738). Albert Tung, Josiah Wong, Ajay Mandlekar, Roberto MartÃn-MartÃn, Yuke Zhu, Li Fei-Fei, Silvio Savarese
-- [Human-in-the-Loop Imitation Learning using Remote Teleoperation](https://arxiv.org/abs/2012.06733). Ajay Mandlekar, Danfei Xu, Roberto MartÃn-MartÃn, Yuke Zhu, Li Fei-Fei, Silvio Savarese
-- [Transformers for One-Shot Visual Imitation](https://arxiv.org/abs/2011.05970). Sudeep Dasari, Abhinav Gupta
-- [Conservative Safety Critics for Exploration](https://arxiv.org/abs/2010.14497). Homanga Bharadhwaj, Aviral Kumar, Nicholas Rhinehart, Sergey Levine, Florian Shkurti, Animesh Garg
-- [Continual Model-Based Reinforcement Learning with Hypernetworks](https://arxiv.org/abs/2009.11997). Yizhou Huang, Kevin Xie, Homanga Bharadhwaj, Florian Shkurti
-- [Hierarchical 6-DoF Grasping with Approaching Direction Selection](http://rllab.snu.ac.kr/publications/papers/2020_icra_gads.pdf). Yunho Choi, Hogun Kee, Kyungjae Lee, JaeGoo Choy, Junhong Min, Sohee Lee, Songhwai Oh
-- [Residual Learning from Demonstration](https://arxiv.org/abs/2008.07682). Todor Davchev, Kevin Sebastian Luck, Michael Burke, Franziska Meier, Stefan Schaal, Subramanian Ramamoorthy
-- [Crossing the Gap: A Deep Dive into Zero-Shot Sim-to-Real Transfer for Dynamics](https://arxiv.org/abs/2008.06686). Eugene Valassakis, Zihan Ding, Edward Johns
-- [Deep Reinforcement Learning for Contact-Rich Skills Using Compliant Movement Primitives](https://arxiv.org/abs/2008.13223). Oren Spector, Miriam Zacksenhouse
-- [Learning Robot Skills with Temporal Variational Inference](https://arxiv.org/abs/2006.16232). Tanmay Shankar, Abhinav Gupta
-- [Long-Horizon Visual Planning with Goal-Conditioned Hierarchical Predictors](https://arxiv.org/abs/2006.13205). Karl Pertsch, Oleh Rybkin, Frederik Ebert, Chelsea Finn, Dinesh Jayaraman, Sergey Levine
-- [Variational Imitation Learning with Diverse-quality Demonstrations](https://proceedings.icml.cc/static/paper_files/icml/2020/577-Paper.pdf). Voot Tangkaratt, Bo Han, Mohammad Emtiyaz Khan, Masashi Sugiyama
-- [Balance Between Efficient and Effective Learning: Dense2Sparse Reward Shaping for Robot Manipulation with Environment Uncertainty](https://arxiv.org/abs/2003.02740). Yongle Luo, Kun Dong, Lili Zhao, Zhiyong Sun, Chao Zhou, Bo Song
-- [Intrinsic Motivation for Encouraging Synergistic Behavior](https://arxiv.org/abs/2002.05189). Rohan Chitnis, Shubham Tulsiani, Saurabh Gupta, Abhinav Gupta
-- [Learning Continuous Control Actions for Robotic Grasping with Reinforcement Learning](https://ieeexplore.ieee.org/document/9282951). Asad Ali Shahid, Loris Roveda, Dario Piga, Francesco Braghin
-- [Combining Reinforcement Learning and Rule-based Method to Manipulate Objects in Clutter](https://ieeexplore.ieee.org/document/9207153). Yiwen Chen, Zhaojie Ju, Chenguang Yang
-- [Research on Complex Robot Manipulation Tasks Based on Hindsight Trust Region Policy Optimization](https://ieeexplore.ieee.org/document/9327251). Deyu Yang, Hanbo Zhang, Xuguang Lan
-
-## 2019
-
-- [To Follow or not to Follow: Selective Imitation Learning from Observations](https://arxiv.org/abs/1912.07670). Youngwoon Lee, Edward S. Hu, Zhengyu Yang, Joseph J. Lim
-- [IKEA Furniture Assembly Environment for Long-Horizon Complex Manipulation Tasks](https://arxiv.org/abs/1911.07246). Youngwoon Lee, Edward S. Hu, Zhengyu Yang, Alex Yin, Joseph J. Lim
-- [IRIS: Implicit Reinforcement without Interaction at Scale for Learning Control from Offline Robot Manipulation Data](https://arxiv.org/abs/1911.05321). Ajay Mandlekar, Fabio Ramos, Byron Boots, Silvio Savarese, Li Fei-Fei, Animesh Garg, Dieter Fox
-- [Network Randomization: A Simple Technique for Generalization in Deep Reinforcement Learning](https://arxiv.org/abs/1910.05396). Kimin Lee, Kibok Lee, Jinwoo Shin, Honglak Lee
-- [Efficient Bimanual Manipulation Using Learned Task Schemas](https://arxiv.org/abs/1909.13874). Rohan Chitnis, Shubham Tulsiani, Saurabh Gupta, Abhinav Gupta
-- [SURREAL-System: Fully-Integrated Stack for Distributed Deep Reinforcement Learning](https://arxiv.org/abs/1909.12989). Linxi Fan\*, Yuke Zhu\*, Jiren Zhu, Zihua Liu, Orien Zeng, Anchit Gupta, Joan Creus-Costa, Silvio Savarese, Li Fei-Fei
-- [Memory Based Trajectory-conditioned Policies for Learning from Sparse Rewards](https://arxiv.org/abs/1907.10247). Yijie Guo, Jongwook Choi, Marcin Moczulski, Shengyu Feng, Samy Bengio, Mohammad Norouzi, Honglak Lee
-- [Variable Impedance Control in End-Effector Space: An Action Space for Reinforcement Learning in Contact-Rich Tasks](https://arxiv.org/abs/1906.08880). Roberto MartÃn-MartÃn, Michelle A. Lee, Rachel Gardner, Silvio Savarese, Jeannette Bohg, Animesh Garg
-
-## 2018
-
-- [RoboTurk: A Crowdsourcing Platform for Robotic Skill Learning through Imitation](https://arxiv.org/abs/1811.02790). Ajay Mandlekar, Yuke Zhu, Animesh Garg, Jonathan Booher, Max Spero, Albert Tung, Julian Gao, John Emmons, Anchit Gupta, Emre Orbay, Silvio Savarese, Li Fei-Fei
-- [SURREAL: Open-Source Reinforcement Learning Framework and Robot Manipulation Benchmark](http://svl.stanford.edu/assets/papers/fan2018corl.pdf). Linxi Fan\*, Yuke Zhu\*, Jiren Zhu, Zihua Liu, Orien Zeng, Anchit Gupta, Joan Creus-Costa, Silvio Savarese, Li Fei-Fei
diff --git a/docs/simulation/device.rst b/docs/simulation/device.rst
index 216b70b160..41c7439695 100644
--- a/docs/simulation/device.rst
+++ b/docs/simulation/device.rst
@@ -1,7 +1,7 @@
Device
======
-Devices allow for direct real-time interfacing with the MuJoCo simulation. The current support devices are ``Keyboard`` and ``SpaceMouse``.
+Devices allow for direct real-time interfacing with the MuJoCo simulation. The currently supported devices are ``Keyboard``. ``SpaceMouse``. ``DualSense`` and ``MjGUI``.
Base Device
-----------
@@ -30,6 +30,26 @@ SpaceMouse Device
.. automethod:: get_controller_state
.. automethod:: run
- .. autoproperty:: control
- .. autoproperty:: control_gripper
+ .. automethod:: control
+ .. automethod:: control_gripper
+ .. automethod:: _display_controls
+
+DualSense Device
+-----------------
+
+.. autoclass:: robosuite.devices.dualsense.DualSense
+
+ .. automethod:: get_controller_state
+ .. automethod:: run
+ .. automethod:: control
+ .. automethod:: control_gripper
+ .. automethod:: _display_controls
+ .. automethod:: _check_connection_type
+
+MjGUI Device
+------------
+.. autoclass:: robosuite.devices.mjgui.MJGUI
+
+ .. automethod:: get_controller_state
+ .. automethod:: input2action
.. automethod:: _display_controls
diff --git a/docs/simulation/robot.rst b/docs/simulation/robot.rst
index 2d7227f7d8..fbe2cb813e 100644
--- a/docs/simulation/robot.rst
+++ b/docs/simulation/robot.rst
@@ -1,7 +1,7 @@
Robot
=====
-The ``Robot`` class defines a simulation object encapsulating a robot model, gripper model, and controller. Robosuite uses class extensions of this base class to model different robotic domains. The current release focuses on manipulation, and includes a ``Manipulator`` class, which itself is extended by ``SingleArm`` and ``Bimanual`` classes representing the two different types of supported robots.
+The ``Robot`` class defines a simulation object encapsulating a robot model, gripper model, base model, and controller. Robosuite uses class extensions of this base class to model different robotic domains. The current release focuses on manipulation and mobility, and includes a ``MobileRobot`` class, which itself is extended by ``WheeledRobot`` and ``LeggedRobot`` classes representing different types of mobile robots.
Base Robot
----------
diff --git a/docs/source/robosuite.controllers.composite.rst b/docs/source/robosuite.controllers.composite.rst
new file mode 100644
index 0000000000..2af588cbc6
--- /dev/null
+++ b/docs/source/robosuite.controllers.composite.rst
@@ -0,0 +1,29 @@
+robosuite.controllers.composite package
+=======================================
+
+Submodules
+----------
+
+robosuite.controllers.composite.composite\_controller module
+------------------------------------------------------------
+
+.. automodule:: robosuite.controllers.composite.composite_controller
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.controllers.composite.composite\_controller\_factory module
+---------------------------------------------------------------------
+
+.. automodule:: robosuite.controllers.composite.composite_controller_factory
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.controllers.composite
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.controllers.parts.arm.rst b/docs/source/robosuite.controllers.parts.arm.rst
new file mode 100644
index 0000000000..605af3e163
--- /dev/null
+++ b/docs/source/robosuite.controllers.parts.arm.rst
@@ -0,0 +1,29 @@
+robosuite.controllers.parts.arm package
+=======================================
+
+Submodules
+----------
+
+robosuite.controllers.parts.arm.ik module
+-----------------------------------------
+
+.. automodule:: robosuite.controllers.parts.arm.ik
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.controllers.parts.arm.osc module
+------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.arm.osc
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.controllers.parts.arm
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.controllers.parts.generic.rst b/docs/source/robosuite.controllers.parts.generic.rst
new file mode 100644
index 0000000000..acfe59215f
--- /dev/null
+++ b/docs/source/robosuite.controllers.parts.generic.rst
@@ -0,0 +1,37 @@
+robosuite.controllers.parts.generic package
+===========================================
+
+Submodules
+----------
+
+robosuite.controllers.parts.generic.joint\_pos module
+-----------------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.generic.joint_pos
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.controllers.parts.generic.joint\_tor module
+-----------------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.generic.joint_tor
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.controllers.parts.generic.joint\_vel module
+-----------------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.generic.joint_vel
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.controllers.parts.generic
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.controllers.parts.gripper.rst b/docs/source/robosuite.controllers.parts.gripper.rst
new file mode 100644
index 0000000000..8652c23a0c
--- /dev/null
+++ b/docs/source/robosuite.controllers.parts.gripper.rst
@@ -0,0 +1,29 @@
+robosuite.controllers.parts.gripper package
+===========================================
+
+Submodules
+----------
+
+robosuite.controllers.parts.gripper.gripper\_controller module
+--------------------------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.gripper.gripper_controller
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.controllers.parts.gripper.simple\_grip module
+-------------------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.gripper.simple_grip
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.controllers.parts.gripper
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.controllers.parts.mobile_base.rst b/docs/source/robosuite.controllers.parts.mobile_base.rst
new file mode 100644
index 0000000000..cb897b199a
--- /dev/null
+++ b/docs/source/robosuite.controllers.parts.mobile_base.rst
@@ -0,0 +1,29 @@
+robosuite.controllers.parts.mobile\_base package
+================================================
+
+Submodules
+----------
+
+robosuite.controllers.parts.mobile\_base.joint\_vel module
+----------------------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.mobile_base.joint_vel
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.controllers.parts.mobile\_base.mobile\_base\_controller module
+------------------------------------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.mobile_base.mobile_base_controller
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.controllers.parts.mobile_base
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.controllers.parts.rst b/docs/source/robosuite.controllers.parts.rst
new file mode 100644
index 0000000000..333df06bba
--- /dev/null
+++ b/docs/source/robosuite.controllers.parts.rst
@@ -0,0 +1,40 @@
+robosuite.controllers.parts package
+===================================
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ robosuite.controllers.parts.arm
+ robosuite.controllers.parts.generic
+ robosuite.controllers.parts.gripper
+ robosuite.controllers.parts.mobile_base
+
+Submodules
+----------
+
+robosuite.controllers.parts.controller module
+---------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.controller
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.controllers.parts.controller\_factory module
+------------------------------------------------------
+
+.. automodule:: robosuite.controllers.parts.controller_factory
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.controllers.parts
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.controllers.rst b/docs/source/robosuite.controllers.rst
index 8ef16210c0..8eb30b4c4a 100644
--- a/docs/source/robosuite.controllers.rst
+++ b/docs/source/robosuite.controllers.rst
@@ -7,66 +7,8 @@ Subpackages
.. toctree::
:maxdepth: 4
- robosuite.controllers.interpolators
-
-Submodules
-----------
-
-robosuite.controllers.base\_controller module
----------------------------------------------
-
-.. automodule:: robosuite.controllers.base_controller
- :members:
- :undoc-members:
- :show-inheritance:
-
-robosuite.controllers.controller\_factory module
-------------------------------------------------
-
-.. automodule:: robosuite.controllers.controller_factory
- :members:
- :undoc-members:
- :show-inheritance:
-
-robosuite.controllers.ik module
--------------------------------
-
-.. automodule:: robosuite.controllers.ik
- :members:
- :undoc-members:
- :show-inheritance:
-
-robosuite.controllers.joint\_pos module
----------------------------------------
-
-.. automodule:: robosuite.controllers.joint_pos
- :members:
- :undoc-members:
- :show-inheritance:
-
-robosuite.controllers.joint\_tor module
----------------------------------------
-
-.. automodule:: robosuite.controllers.joint_tor
- :members:
- :undoc-members:
- :show-inheritance:
-
-robosuite.controllers.joint\_vel module
----------------------------------------
-
-.. automodule:: robosuite.controllers.joint_vel
- :members:
- :undoc-members:
- :show-inheritance:
-
-robosuite.controllers.osc module
---------------------------------
-
-.. automodule:: robosuite.controllers.osc
- :members:
- :undoc-members:
- :show-inheritance:
+ robosuite.controllers.composite
+ robosuite.controllers.parts
Module contents
---------------
diff --git a/docs/source/robosuite.devices.rst b/docs/source/robosuite.devices.rst
index 14633059db..0f293fe649 100644
--- a/docs/source/robosuite.devices.rst
+++ b/docs/source/robosuite.devices.rst
@@ -12,6 +12,14 @@ robosuite.devices.device module
:undoc-members:
:show-inheritance:
+robosuite.devices.dualsense module
+----------------------------------
+
+.. automodule:: robosuite.devices.dualsense
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.devices.keyboard module
---------------------------------
@@ -20,6 +28,14 @@ robosuite.devices.keyboard module
:undoc-members:
:show-inheritance:
+robosuite.devices.mjgui module
+------------------------------
+
+.. automodule:: robosuite.devices.mjgui
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.devices.spacemouse module
-----------------------------------
diff --git a/docs/source/robosuite.environments.manipulation.rst b/docs/source/robosuite.environments.manipulation.rst
index 74f8b90fa6..728e45aae9 100644
--- a/docs/source/robosuite.environments.manipulation.rst
+++ b/docs/source/robosuite.environments.manipulation.rst
@@ -44,18 +44,18 @@ robosuite.environments.manipulation.pick\_place module
:undoc-members:
:show-inheritance:
-robosuite.environments.manipulation.single\_arm\_env module
------------------------------------------------------------
+robosuite.environments.manipulation.stack module
+------------------------------------------------
-.. automodule:: robosuite.environments.manipulation.single_arm_env
+.. automodule:: robosuite.environments.manipulation.stack
:members:
:undoc-members:
:show-inheritance:
-robosuite.environments.manipulation.stack module
-------------------------------------------------
+robosuite.environments.manipulation.tool\_hang module
+-----------------------------------------------------
-.. automodule:: robosuite.environments.manipulation.stack
+.. automodule:: robosuite.environments.manipulation.tool_hang
:members:
:undoc-members:
:show-inheritance:
@@ -92,6 +92,14 @@ robosuite.environments.manipulation.two\_arm\_peg\_in\_hole module
:undoc-members:
:show-inheritance:
+robosuite.environments.manipulation.two\_arm\_transport module
+--------------------------------------------------------------
+
+.. automodule:: robosuite.environments.manipulation.two_arm_transport
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.environments.manipulation.wipe module
-----------------------------------------------
diff --git a/docs/source/robosuite.models.arenas.rst b/docs/source/robosuite.models.arenas.rst
index ff42472ecf..54b12506d5 100644
--- a/docs/source/robosuite.models.arenas.rst
+++ b/docs/source/robosuite.models.arenas.rst
@@ -28,6 +28,14 @@ robosuite.models.arenas.empty\_arena module
:undoc-members:
:show-inheritance:
+robosuite.models.arenas.multi\_table\_arena module
+--------------------------------------------------
+
+.. automodule:: robosuite.models.arenas.multi_table_arena
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.arenas.pegs\_arena module
------------------------------------------
diff --git a/docs/source/robosuite.models.bases.rst b/docs/source/robosuite.models.bases.rst
new file mode 100644
index 0000000000..aca6fd371f
--- /dev/null
+++ b/docs/source/robosuite.models.bases.rst
@@ -0,0 +1,117 @@
+robosuite.models.bases package
+==============================
+
+Submodules
+----------
+
+robosuite.models.bases.floating\_legged\_base module
+----------------------------------------------------
+
+.. automodule:: robosuite.models.bases.floating_legged_base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.leg\_base\_model module
+----------------------------------------------
+
+.. automodule:: robosuite.models.bases.leg_base_model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.mobile\_base\_model module
+-------------------------------------------------
+
+.. automodule:: robosuite.models.bases.mobile_base_model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.mount\_model module
+------------------------------------------
+
+.. automodule:: robosuite.models.bases.mount_model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.no\_actuation\_base module
+-------------------------------------------------
+
+.. automodule:: robosuite.models.bases.no_actuation_base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.null\_mobile\_base module
+------------------------------------------------
+
+.. automodule:: robosuite.models.bases.null_mobile_base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.null\_mount module
+-----------------------------------------
+
+.. automodule:: robosuite.models.bases.null_mount
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.omron\_mobile\_base module
+-------------------------------------------------
+
+.. automodule:: robosuite.models.bases.omron_mobile_base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.rethink\_minimal\_mount module
+-----------------------------------------------------
+
+.. automodule:: robosuite.models.bases.rethink_minimal_mount
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.rethink\_mount module
+--------------------------------------------
+
+.. automodule:: robosuite.models.bases.rethink_mount
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.robot\_base\_factory module
+--------------------------------------------------
+
+.. automodule:: robosuite.models.bases.robot_base_factory
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.robot\_base\_model module
+------------------------------------------------
+
+.. automodule:: robosuite.models.bases.robot_base_model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.bases.spot\_base module
+----------------------------------------
+
+.. automodule:: robosuite.models.bases.spot_base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.models.bases
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.models.grippers.rst b/docs/source/robosuite.models.grippers.rst
index b65eac0b17..e0edddcb53 100644
--- a/docs/source/robosuite.models.grippers.rst
+++ b/docs/source/robosuite.models.grippers.rst
@@ -4,6 +4,22 @@ robosuite.models.grippers package
Submodules
----------
+robosuite.models.grippers.bd\_gripper module
+--------------------------------------------
+
+.. automodule:: robosuite.models.grippers.bd_gripper
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.grippers.fourier\_hands module
+-----------------------------------------------
+
+.. automodule:: robosuite.models.grippers.fourier_hands
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.grippers.gripper\_factory module
-------------------------------------------------
@@ -28,6 +44,14 @@ robosuite.models.grippers.gripper\_tester module
:undoc-members:
:show-inheritance:
+robosuite.models.grippers.inspire\_hands module
+-----------------------------------------------
+
+.. automodule:: robosuite.models.grippers.inspire_hands
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.grippers.jaco\_three\_finger\_gripper module
-------------------------------------------------------------
diff --git a/docs/source/robosuite.models.objects.composite.rst b/docs/source/robosuite.models.objects.composite.rst
index 3bfa093d68..d182bd0909 100644
--- a/docs/source/robosuite.models.objects.composite.rst
+++ b/docs/source/robosuite.models.objects.composite.rst
@@ -4,6 +4,22 @@ robosuite.models.objects.composite package
Submodules
----------
+robosuite.models.objects.composite.bin module
+---------------------------------------------
+
+.. automodule:: robosuite.models.objects.composite.bin
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.objects.composite.cone module
+----------------------------------------------
+
+.. automodule:: robosuite.models.objects.composite.cone
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.objects.composite.hammer module
------------------------------------------------
@@ -12,6 +28,30 @@ robosuite.models.objects.composite.hammer module
:undoc-members:
:show-inheritance:
+robosuite.models.objects.composite.hollow\_cylinder module
+----------------------------------------------------------
+
+.. automodule:: robosuite.models.objects.composite.hollow_cylinder
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.objects.composite.hook\_frame module
+-----------------------------------------------------
+
+.. automodule:: robosuite.models.objects.composite.hook_frame
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.objects.composite.lid module
+---------------------------------------------
+
+.. automodule:: robosuite.models.objects.composite.lid
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.objects.composite.pot\_with\_handles module
------------------------------------------------------------
@@ -20,6 +60,14 @@ robosuite.models.objects.composite.pot\_with\_handles module
:undoc-members:
:show-inheritance:
+robosuite.models.objects.composite.stand\_with\_mount module
+------------------------------------------------------------
+
+.. automodule:: robosuite.models.objects.composite.stand_with_mount
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
Module contents
---------------
diff --git a/docs/source/robosuite.models.objects.composite_body.rst b/docs/source/robosuite.models.objects.composite_body.rst
index 701da2b47a..1bf48f8a77 100644
--- a/docs/source/robosuite.models.objects.composite_body.rst
+++ b/docs/source/robosuite.models.objects.composite_body.rst
@@ -12,6 +12,14 @@ robosuite.models.objects.composite\_body.hinged\_box module
:undoc-members:
:show-inheritance:
+robosuite.models.objects.composite\_body.ratcheting\_wrench module
+------------------------------------------------------------------
+
+.. automodule:: robosuite.models.objects.composite_body.ratcheting_wrench
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
Module contents
---------------
diff --git a/docs/source/robosuite.models.objects.group.rst b/docs/source/robosuite.models.objects.group.rst
new file mode 100644
index 0000000000..a1caaedbee
--- /dev/null
+++ b/docs/source/robosuite.models.objects.group.rst
@@ -0,0 +1,21 @@
+robosuite.models.objects.group package
+======================================
+
+Submodules
+----------
+
+robosuite.models.objects.group.transport module
+-----------------------------------------------
+
+.. automodule:: robosuite.models.objects.group.transport
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.models.objects.group
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.models.objects.rst b/docs/source/robosuite.models.objects.rst
index ade511ce5a..c820d2fcd4 100644
--- a/docs/source/robosuite.models.objects.rst
+++ b/docs/source/robosuite.models.objects.rst
@@ -9,6 +9,7 @@ Subpackages
robosuite.models.objects.composite
robosuite.models.objects.composite_body
+ robosuite.models.objects.group
robosuite.models.objects.primitive
Submodules
@@ -22,6 +23,14 @@ robosuite.models.objects.generated\_objects module
:undoc-members:
:show-inheritance:
+robosuite.models.objects.object\_groups module
+----------------------------------------------
+
+.. automodule:: robosuite.models.objects.object_groups
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.objects.objects module
---------------------------------------
diff --git a/docs/source/robosuite.models.robots.manipulators.rst b/docs/source/robosuite.models.robots.manipulators.rst
index 9f35a578f2..1286fcb8e5 100644
--- a/docs/source/robosuite.models.robots.manipulators.rst
+++ b/docs/source/robosuite.models.robots.manipulators.rst
@@ -12,6 +12,30 @@ robosuite.models.robots.manipulators.baxter\_robot module
:undoc-members:
:show-inheritance:
+robosuite.models.robots.manipulators.gr1\_robot module
+------------------------------------------------------
+
+.. automodule:: robosuite.models.robots.manipulators.gr1_robot
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.robots.manipulators.humanoid\_model module
+-----------------------------------------------------------
+
+.. automodule:: robosuite.models.robots.manipulators.humanoid_model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.robots.manipulators.humanoid\_upperbody\_model module
+----------------------------------------------------------------------
+
+.. automodule:: robosuite.models.robots.manipulators.humanoid_upperbody_model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.robots.manipulators.iiwa\_robot module
-------------------------------------------------------
@@ -36,6 +60,14 @@ robosuite.models.robots.manipulators.kinova3\_robot module
:undoc-members:
:show-inheritance:
+robosuite.models.robots.manipulators.legged\_manipulator\_model module
+----------------------------------------------------------------------
+
+.. automodule:: robosuite.models.robots.manipulators.legged_manipulator_model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.robots.manipulators.manipulator\_model module
--------------------------------------------------------------
@@ -60,6 +92,22 @@ robosuite.models.robots.manipulators.sawyer\_robot module
:undoc-members:
:show-inheritance:
+robosuite.models.robots.manipulators.spot\_arm module
+-----------------------------------------------------
+
+.. automodule:: robosuite.models.robots.manipulators.spot_arm
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.models.robots.manipulators.tiago\_robot module
+--------------------------------------------------------
+
+.. automodule:: robosuite.models.robots.manipulators.tiago_robot
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.robots.manipulators.ur5e\_robot module
-------------------------------------------------------
diff --git a/docs/source/robosuite.models.robots.rst b/docs/source/robosuite.models.robots.rst
index 36cba107be..186ec1f201 100644
--- a/docs/source/robosuite.models.robots.rst
+++ b/docs/source/robosuite.models.robots.rst
@@ -12,6 +12,14 @@ Subpackages
Submodules
----------
+robosuite.models.robots.compositional module
+--------------------------------------------
+
+.. automodule:: robosuite.models.robots.compositional
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.models.robots.robot\_model module
-------------------------------------------
diff --git a/docs/source/robosuite.models.rst b/docs/source/robosuite.models.rst
index d322daa5f1..be78f5ccb4 100644
--- a/docs/source/robosuite.models.rst
+++ b/docs/source/robosuite.models.rst
@@ -8,8 +8,8 @@ Subpackages
:maxdepth: 4
robosuite.models.arenas
+ robosuite.models.bases
robosuite.models.grippers
- robosuite.models.mounts
robosuite.models.objects
robosuite.models.robots
robosuite.models.tasks
diff --git a/docs/source/robosuite.renderers.context.rst b/docs/source/robosuite.renderers.context.rst
new file mode 100644
index 0000000000..81b92a814a
--- /dev/null
+++ b/docs/source/robosuite.renderers.context.rst
@@ -0,0 +1,37 @@
+robosuite.renderers.context package
+===================================
+
+Submodules
+----------
+
+robosuite.renderers.context.egl\_context module
+-----------------------------------------------
+
+.. automodule:: robosuite.renderers.context.egl_context
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.renderers.context.glfw\_context module
+------------------------------------------------
+
+.. automodule:: robosuite.renderers.context.glfw_context
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.renderers.context.osmesa\_context module
+--------------------------------------------------
+
+.. automodule:: robosuite.renderers.context.osmesa_context
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.renderers.context
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.renderers.mjviewer.rst b/docs/source/robosuite.renderers.mjviewer.rst
new file mode 100644
index 0000000000..d4abf681c9
--- /dev/null
+++ b/docs/source/robosuite.renderers.mjviewer.rst
@@ -0,0 +1,21 @@
+robosuite.renderers.mjviewer package
+====================================
+
+Submodules
+----------
+
+robosuite.renderers.mjviewer.mjviewer\_renderer module
+------------------------------------------------------
+
+.. automodule:: robosuite.renderers.mjviewer.mjviewer_renderer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: robosuite.renderers.mjviewer
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/robosuite.renderers.rst b/docs/source/robosuite.renderers.rst
index 99f8aec74c..3e4514e899 100644
--- a/docs/source/robosuite.renderers.rst
+++ b/docs/source/robosuite.renderers.rst
@@ -8,6 +8,7 @@ Subpackages
:maxdepth: 4
robosuite.renderers.context
+ robosuite.renderers.mjviewer
Submodules
----------
diff --git a/docs/source/robosuite.robots.rst b/docs/source/robosuite.robots.rst
index 87fb90b4b0..52c8e34a78 100644
--- a/docs/source/robosuite.robots.rst
+++ b/docs/source/robosuite.robots.rst
@@ -4,18 +4,26 @@ robosuite.robots package
Submodules
----------
-robosuite.robots.bimanual module
---------------------------------
+robosuite.robots.fixed\_base\_robot module
+------------------------------------------
-.. automodule:: robosuite.robots.bimanual
+.. automodule:: robosuite.robots.fixed_base_robot
:members:
:undoc-members:
:show-inheritance:
-robosuite.robots.manipulator module
------------------------------------
+robosuite.robots.legged\_robot module
+-------------------------------------
-.. automodule:: robosuite.robots.manipulator
+.. automodule:: robosuite.robots.legged_robot
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.robots.mobile\_robot module
+-------------------------------------
+
+.. automodule:: robosuite.robots.mobile_robot
:members:
:undoc-members:
:show-inheritance:
@@ -28,10 +36,10 @@ robosuite.robots.robot module
:undoc-members:
:show-inheritance:
-robosuite.robots.single\_arm module
------------------------------------
+robosuite.robots.wheeled\_robot module
+--------------------------------------
-.. automodule:: robosuite.robots.single_arm
+.. automodule:: robosuite.robots.wheeled_robot
:members:
:undoc-members:
:show-inheritance:
diff --git a/docs/source/robosuite.rst b/docs/source/robosuite.rst
index 444d18344c..daa6bdf84a 100644
--- a/docs/source/robosuite.rst
+++ b/docs/source/robosuite.rst
@@ -27,6 +27,14 @@ robosuite.macros module
:undoc-members:
:show-inheritance:
+robosuite.macros\_private module
+--------------------------------
+
+.. automodule:: robosuite.macros_private
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
Module contents
---------------
diff --git a/docs/source/robosuite.utils.rst b/docs/source/robosuite.utils.rst
index be45a0de2e..b1295abc7d 100644
--- a/docs/source/robosuite.utils.rst
+++ b/docs/source/robosuite.utils.rst
@@ -44,6 +44,14 @@ robosuite.utils.errors module
:undoc-members:
:show-inheritance:
+robosuite.utils.ik\_utils module
+--------------------------------
+
+.. automodule:: robosuite.utils.ik_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.utils.input\_utils module
-----------------------------------
@@ -108,6 +116,14 @@ robosuite.utils.placement\_samplers module
:undoc-members:
:show-inheritance:
+robosuite.utils.robot\_composition\_utils module
+------------------------------------------------
+
+.. automodule:: robosuite.utils.robot_composition_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.utils.robot\_utils module
-----------------------------------
@@ -116,6 +132,22 @@ robosuite.utils.robot\_utils module
:undoc-members:
:show-inheritance:
+robosuite.utils.sim\_utils module
+---------------------------------
+
+.. automodule:: robosuite.utils.sim_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+robosuite.utils.traj\_utils module
+----------------------------------
+
+.. automodule:: robosuite.utils.traj_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
robosuite.utils.transform\_utils module
---------------------------------------
diff --git a/docs/tutorials/add_environment.md b/docs/tutorials/add_environment.md
index b562b0a705..9865cd69d1 100644
--- a/docs/tutorials/add_environment.md
+++ b/docs/tutorials/add_environment.md
@@ -1,8 +1,8 @@
## Building Your Own Environments
-**robosuite** offers great flexibility in creating your own environments. A [task](modeling/task) typically involves the participation of a [robot](modeling/robot_model) with [grippers](modeling/robot_model.html#gripper-model) as its end-effectors, an [arena](modeling/arena) (workspace), and [objects](modeling/object_model) that the robot interacts with. For a detailed overview of our design architecture, please check out the [Overview](modules/overview) page in Modules. Our Modeling APIs provide methods of composing these modularized elements into a scene, which can be loaded in MuJoCo for simulation. To build your own environments, we recommend you take a look at the [Environment classes](simulation/environment) which have used these APIs to define robotics environments and tasks and the [source code](https://github.com/ARISE-Initiative/robosuite/tree/master/robosuite/environments) of our standardized environments. Below we walk through a step-by-step example of building a new tabletop manipulation environment with our APIs.
+**robosuite** offers great flexibility in creating your own environments. A [task](../modeling/task) typically involves the participation of a [robot](../modeling/robot_model) with [grippers](../modeling/robot_model.html#gripper-model) as its end-effectors, an [arena](../modeling/arena) (workspace), and [objects](../modeling/object_model) that the robot interacts with. For a detailed overview of our design architecture, please check out the [Overview](../modules/overview) page in Modules. Our Modeling APIs provide methods of composing these modularized elements into a scene, which can be loaded in MuJoCo for simulation. To build your own environments, we recommend you take a look at the [Environment classes](../simulation/environment) which have used these APIs to define robotics environments and tasks and the [source code](https://github.com/ARISE-Initiative/robosuite/tree/master/robosuite/environments) of our standardized environments. Below we walk through a step-by-step example of building a new tabletop manipulation environment with our APIs.
-**Step 1: Creating the world.** All mujoco object definitions are housed in an xml. We create a [MujocoWorldBase](source/robosuite.models) class to do it.
+**Step 1: Creating the world.** All mujoco object definitions are housed in an xml. We create a [MujocoWorldBase](../source/robosuite.models) class to do it.
```python
from robosuite.models import MujocoWorldBase
@@ -28,7 +28,7 @@ mujoco_robot.set_base_xpos([0, 0, 0])
world.merge(mujoco_robot)
```
-**Step 3: Creating the table.** We can initialize the [TableArena](source/robosuite.models.arenas) instance that creates a table and the floorplane
+**Step 3: Creating the table.** We can initialize the [TableArena](../source/robosuite.models.arenas) instance that creates a table and the floorplane
```python
from robosuite.models.arenas import TableArena
@@ -37,7 +37,7 @@ mujoco_arena.set_origin([0.8, 0, 0])
world.merge(mujoco_arena)
```
-**Step 4: Adding the object.** For details of `MujocoObject`, refer to the documentation about [MujocoObject](modeling/object_model), we can create a ball and add it to the world.
+**Step 4: Adding the object.** For details of `MujocoObject`, refer to the documentation about [MujocoObject](../modeling/object_model), we can create a ball and add it to the world.
```python
from robosuite.models.objects import BallObject
from robosuite.utils.mjcf_utils import new_joint
@@ -50,7 +50,7 @@ sphere.set('pos', '1.0 0 1.0')
world.worldbody.append(sphere)
```
-**Step 5: Running Simulation.** Once we have created the object, we can obtain a `mujoco_py` model by running
+**Step 5: Running Simulation.** Once we have created the object, we can obtain a `mujoco.MjModel` model by running
```python
model = world.get_model(mode="mujoco")
```
diff --git a/requirements-extra.txt b/requirements-extra.txt
index 26c1b0c998..76fe290233 100644
--- a/requirements-extra.txt
+++ b/requirements-extra.txt
@@ -12,3 +12,6 @@ mink
# for extra robosuite_models
robosuite-models==1.0.0
+
+# for USD export and rendering
+usd-core
diff --git a/robosuite/__init__.py b/robosuite/__init__.py
index 5366c7b79b..9cf1965bb7 100644
--- a/robosuite/__init__.py
+++ b/robosuite/__init__.py
@@ -41,7 +41,7 @@
"Could not load the mink-based whole-body IK. Make sure you install related import properly, otherwise you will not be able to use the default IK controller setting for GR1 robot."
)
-__version__ = "1.5.0"
+__version__ = "1.5.1"
__logo__ = """
; / ,--.
["] ["] ,< |__**|
diff --git a/robosuite/controllers/composite/composite_controller.py b/robosuite/controllers/composite/composite_controller.py
index b832827c38..163977a1cc 100644
--- a/robosuite/controllers/composite/composite_controller.py
+++ b/robosuite/controllers/composite/composite_controller.py
@@ -122,6 +122,23 @@ def get_control_dim(self, part_name):
return self.part_controllers[part_name].control_dim
def get_controller_base_pose(self, controller_name):
+ """
+ Get the base position and orientation of a specified controller's part. Note: this pose may likely differ from
+ the robot base's pose.
+
+ Args:
+ controller_name (str): The name of the controller, used to look up part-specific information.
+
+ Returns:
+ tuple[np.ndarray, np.ndarray]: A tuple containing:
+ - base_pos (np.ndarray): The 3D position of the part's center in world coordinates (shape: (3,)).
+ - base_ori (np.ndarray): The 3x3 rotation matrix representing the part's orientation in world coordinates.
+
+ Details:
+ - Uses the controller's `naming_prefix` and `part_name` to construct the corresponding site name.
+ - Queries the simulation (`self.sim`) for the site's position (`site_xpos`) and orientation (`site_xmat`).
+ - The site orientation matrix is reshaped from a flat array of size 9 to a 3x3 rotation matrix.
+ """
naming_prefix = self.part_controllers[controller_name].naming_prefix
part_name = self.part_controllers[controller_name].part_name
base_pos = np.array(self.sim.data.site_xpos[self.sim.model.site_name2id(f"{naming_prefix}{part_name}_center")])
diff --git a/robosuite/controllers/config/default/parts/osc_pose.json b/robosuite/controllers/config/default/parts/osc_pose.json
index 94098779b8..8cd29dfaec 100644
--- a/robosuite/controllers/config/default/parts/osc_pose.json
+++ b/robosuite/controllers/config/default/parts/osc_pose.json
@@ -12,7 +12,8 @@
"position_limits": null,
"orientation_limits": null,
"uncouple_pos_ori": true,
- "control_delta": true,
+ "input_type": "delta",
+ "input_ref_frame": "base",
"interpolation": null,
"ramp_ratio": 0.2
}
diff --git a/robosuite/controllers/config/default/parts/osc_position.json b/robosuite/controllers/config/default/parts/osc_position.json
index 0eb12b667f..42b314a792 100644
--- a/robosuite/controllers/config/default/parts/osc_position.json
+++ b/robosuite/controllers/config/default/parts/osc_position.json
@@ -10,7 +10,8 @@
"kp_limits": [0, 300],
"damping_ratio_limits": [0, 10],
"position_limits": null,
- "control_delta": true,
+ "input_type": "delta",
+ "input_ref_frame": "base",
"interpolation": null,
"ramp_ratio": 0.2
}
diff --git a/robosuite/controllers/parts/arm/osc.py b/robosuite/controllers/parts/arm/osc.py
index b7ab2e2cb3..81f19097e5 100644
--- a/robosuite/controllers/parts/arm/osc.py
+++ b/robosuite/controllers/parts/arm/osc.py
@@ -242,25 +242,33 @@ def set_goal(self, action):
# Parse action based on the impedance mode, and update kp / kd as necessary
if self.impedance_mode == "variable":
- damping_ratio, kp, delta = action[:6], action[6:12], action[12:]
+ damping_ratio, kp, goal_update = action[:6], action[6:12], action[12:]
self.kp = np.clip(kp, self.kp_min, self.kp_max)
self.kd = 2 * np.sqrt(self.kp) * np.clip(damping_ratio, self.damping_ratio_min, self.damping_ratio_max)
elif self.impedance_mode == "variable_kp":
- kp, delta = action[:6], action[6:]
+ kp, goal_update = action[:6], action[6:]
self.kp = np.clip(kp, self.kp_min, self.kp_max)
self.kd = 2 * np.sqrt(self.kp) # critically damped
else: # This is case "fixed"
- delta = action
+ goal_update = action
# If we're using deltas, interpret actions as such
if self.input_type == "delta":
+ delta = goal_update
scaled_delta = self.scale_action(delta)
self.goal_pos = self.compute_goal_pos(scaled_delta[0:3])
- self.goal_ori = self.compute_goal_ori(scaled_delta[3:6])
+ if self.use_ori is True:
+ self.goal_ori = self.compute_goal_ori(scaled_delta[3:6])
+ else:
+ self.goal_ori = self.compute_goal_ori(np.zeros(3))
# Else, interpret actions as absolute values
elif self.input_type == "absolute":
- self.goal_pos = action[0:3]
- self.goal_ori = Rotation.from_rotvec(action[3:6]).as_matrix()
+ abs_action = goal_update
+ self.goal_pos = abs_action[0:3]
+ if self.use_ori is True:
+ self.goal_ori = Rotation.from_rotvec(abs_action[3:6]).as_matrix()
+ else:
+ self.goal_ori = self.compute_goal_ori(np.zeros(3))
else:
raise ValueError(f"Unsupport input_type {self.input_type}")
diff --git a/robosuite/controllers/parts/generic/joint_pos.py b/robosuite/controllers/parts/generic/joint_pos.py
index 6d55ef0ae5..7665679157 100644
--- a/robosuite/controllers/parts/generic/joint_pos.py
+++ b/robosuite/controllers/parts/generic/joint_pos.py
@@ -175,6 +175,8 @@ def __init__(
# initialize
self.goal_qpos = None
+ self.use_torque_compensation = kwargs.get("use_torque_compensation", True)
+
def set_goal(self, action, set_qpos=None):
"""
Sets goal based on input @action. If self.impedance_mode is not "fixed", then the input will be parsed into the
@@ -260,7 +262,10 @@ def run_controller(self):
desired_torque = np.multiply(np.array(position_error), np.array(self.kp)) + np.multiply(vel_pos_error, self.kd)
# Return desired torques plus gravity compensations
- self.torques = np.dot(self.mass_matrix, desired_torque) + self.torque_compensation
+ if self.use_torque_compensation:
+ self.torques = np.dot(self.mass_matrix, desired_torque) + self.torque_compensation
+ else:
+ self.torques = desired_torque
# Always run superclass call for any cleanups at the end
super().run_controller()
diff --git a/robosuite/controllers/parts/generic/joint_tor.py b/robosuite/controllers/parts/generic/joint_tor.py
index c7bd2bd7f6..f792cdde56 100644
--- a/robosuite/controllers/parts/generic/joint_tor.py
+++ b/robosuite/controllers/parts/generic/joint_tor.py
@@ -106,6 +106,8 @@ def __init__(
self.current_torque = np.zeros(self.control_dim) # Current torques being outputted, pre-compensation
self.torques = None # Torques returned every time run_controller is called
+ self.use_torque_compensation = kwargs.get("use_torque_compensation", True)
+
def set_goal(self, torques):
"""
Sets goal based on input @torques.
@@ -153,7 +155,10 @@ def run_controller(self):
self.current_torque = np.array(self.goal_torque)
# Add gravity compensation
- self.torques = self.current_torque + self.torque_compensation
+ if self.use_torque_compensation:
+ self.torques = self.current_torque + self.torque_compensation
+ else:
+ self.torques = self.current_torque
# Always run superclass call for any cleanups at the end
super().run_controller()
diff --git a/robosuite/controllers/parts/generic/joint_vel.py b/robosuite/controllers/parts/generic/joint_vel.py
index 727d92f21d..31958e6cc5 100644
--- a/robosuite/controllers/parts/generic/joint_vel.py
+++ b/robosuite/controllers/parts/generic/joint_vel.py
@@ -124,6 +124,8 @@ def __init__(
self.current_vel = np.zeros(self.joint_dim) # Current velocity setpoint, pre-compensation
self.torques = None # Torques returned every time run_controller is called
+ self.torque_compensation = kwargs.get("use_torque_compensation", True)
+
def set_goal(self, velocities):
"""
Sets goal based on input @velocities.
@@ -187,7 +189,12 @@ def run_controller(self):
self.summed_err += err
# Compute command torques via PID velocity controller plus gravity compensation torques
- torques = self.kp * err + self.ki * self.summed_err + self.kd * self.derr_buf.average + self.torque_compensation
+ if self.torque_compensation:
+ torques = (
+ self.kp * err + self.ki * self.summed_err + self.kd * self.derr_buf.average + self.torque_compensation
+ )
+ else:
+ torques = self.kp * err + self.ki * self.summed_err + self.kd * self.derr_buf.average
# Clip torques
self.torques = self.clip_torques(torques)
diff --git a/robosuite/demos/demo_device_control.py b/robosuite/demos/demo_device_control.py
index 673b6551bd..5657607bf9 100644
--- a/robosuite/demos/demo_device_control.py
+++ b/robosuite/demos/demo_device_control.py
@@ -99,16 +99,37 @@
from robosuite.wrappers import VisualizationWrapper
if __name__ == "__main__":
-
parser = argparse.ArgumentParser()
parser.add_argument("--environment", type=str, default="Lift")
- parser.add_argument("--robots", nargs="+", type=str, default="Panda", help="Which robot(s) to use in the env")
parser.add_argument(
- "--config", type=str, default="default", help="Specified environment configuration if necessary"
+ "--robots",
+ nargs="+",
+ type=str,
+ default="Panda",
+ help="Which robot(s) to use in the env",
+ )
+ parser.add_argument(
+ "--config",
+ type=str,
+ default="default",
+ help="Specified environment configuration if necessary",
+ )
+ parser.add_argument(
+ "--arm",
+ type=str,
+ default="right",
+ help="Which arm to control (eg bimanual) 'right' or 'left'",
+ )
+ parser.add_argument(
+ "--switch-on-grasp",
+ action="store_true",
+ help="Switch gripper control on gripper action",
+ )
+ parser.add_argument(
+ "--toggle-camera-on-grasp",
+ action="store_true",
+ help="Switch camera angle on gripper action",
)
- parser.add_argument("--arm", type=str, default="right", help="Which arm to control (eg bimanual) 'right' or 'left'")
- parser.add_argument("--switch-on-grasp", action="store_true", help="Switch gripper control on gripper action")
- parser.add_argument("--toggle-camera-on-grasp", action="store_true", help="Switch camera angle on gripper action")
parser.add_argument(
"--controller",
type=str,
@@ -116,14 +137,30 @@
help="Choice of controller. Can be generic (eg. 'BASIC' or 'WHOLE_BODY_MINK_IK') or json file (see robosuite/controllers/config for examples) or None to get the robot's default controller if it exists",
)
parser.add_argument("--device", type=str, default="keyboard")
- parser.add_argument("--pos-sensitivity", type=float, default=1.0, help="How much to scale position user inputs")
- parser.add_argument("--rot-sensitivity", type=float, default=1.0, help="How much to scale rotation user inputs")
+ parser.add_argument(
+ "--pos-sensitivity",
+ type=float,
+ default=1.0,
+ help="How much to scale position user inputs",
+ )
+ parser.add_argument(
+ "--rot-sensitivity",
+ type=float,
+ default=1.0,
+ help="How much to scale rotation user inputs",
+ )
parser.add_argument(
"--max_fr",
default=20,
type=int,
help="Sleep when simluation runs faster than specified frame rate; 20 fps is real time.",
)
+ parser.add_argument(
+ "--reverse_xy",
+ type=bool,
+ default=False,
+ help="(DualSense Only)Reverse the effect of the x and y axes of the joystick.It is used to handle the case that the left/right and front/back sides of the view are opposite to the LX and LY of the joystick(Push LX up but the robot move left in your view)",
+ )
args = parser.parse_args()
# Get controller config
@@ -168,18 +205,35 @@
if args.device == "keyboard":
from robosuite.devices import Keyboard
- device = Keyboard(env=env, pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
+ device = Keyboard(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ )
env.viewer.add_keypress_callback(device.on_press)
elif args.device == "spacemouse":
from robosuite.devices import SpaceMouse
- device = SpaceMouse(env=env, pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
+ device = SpaceMouse(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ )
+ elif args.device == "dualsense":
+ from robosuite.devices import DualSense
+
+ device = DualSense(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ reverse_xy=args.reverse_xy,
+ )
elif args.device == "mjgui":
from robosuite.devices.mjgui import MJGUI
device = MJGUI(env=env)
else:
- raise Exception("Invalid device choice: choose either 'keyboard' or 'spacemouse'.")
+ raise Exception("Invalid device choice: choose either 'keyboard', 'dualsene' or 'spacemouse'.")
while True:
# Reset the environment
diff --git a/robosuite/demos/demo_domain_randomization.py b/robosuite/demos/demo_domain_randomization.py
index e1113ec5ad..3f25dfd4a3 100644
--- a/robosuite/demos/demo_domain_randomization.py
+++ b/robosuite/demos/demo_domain_randomization.py
@@ -2,7 +2,7 @@
Script to showcase domain randomization functionality.
"""
-import mujoco
+import time
import robosuite.macros as macros
from robosuite.utils.input_utils import *
@@ -12,7 +12,6 @@
macros.USING_INSTANCE_RANDOMIZATION = True
if __name__ == "__main__":
- assert mujoco.__version__ == "3.1.1", "Script requires mujoco-py version 3.1.1 to run"
# Create dict to hold options that will be passed to env creation call
options = {}
@@ -57,10 +56,17 @@
control_freq=20,
hard_reset=False, # TODO: Not setting this flag to False brings up a segfault on macos or glfw error on linux
)
- env = DomainRandomizationWrapper(env)
+ env = DomainRandomizationWrapper(
+ env,
+ randomize_color=False, # randomize_color currently only works for mujoco==3.1.1
+ randomize_camera=False, # less jarring when visualizing
+ randomize_dynamics=False,
+ )
env.reset()
env.viewer.set_camera(camera_id=0)
+ max_frame_rate = 20 # Set the desired maximum frame rate
+
# Get action limits
low, high = env.action_spec
@@ -69,3 +75,4 @@
action = np.random.uniform(low, high)
obs, reward, done, _ = env.step(action)
env.render()
+ time.sleep(1 / max_frame_rate)
diff --git a/robosuite/demos/demo_gripper_interaction.py b/robosuite/demos/demo_gripper_interaction.py
index a677725f19..ed19e24f50 100644
--- a/robosuite/demos/demo_gripper_interaction.py
+++ b/robosuite/demos/demo_gripper_interaction.py
@@ -14,7 +14,7 @@
from robosuite.models.arenas.table_arena import TableArena
from robosuite.models.grippers import PandaGripper, RethinkGripper
from robosuite.models.objects import BoxObject
-from robosuite.utils import OpenCVRenderer
+from robosuite.renderers.viewer import OpenCVViewer
from robosuite.utils.binding_utils import MjRenderContextOffscreen, MjSim
from robosuite.utils.mjcf_utils import new_actuator, new_joint
@@ -66,7 +66,7 @@
model = world.get_model(mode="mujoco")
sim = MjSim(model)
- viewer = OpenCVRenderer(sim)
+ viewer = OpenCVViewer(sim)
render_context = MjRenderContextOffscreen(sim, device_id=-1)
sim.add_render_context(render_context)
diff --git a/robosuite/demos/demo_multi_camera.py b/robosuite/demos/demo_multi_camera.py
new file mode 100644
index 0000000000..2a7365b6cf
--- /dev/null
+++ b/robosuite/demos/demo_multi_camera.py
@@ -0,0 +1,70 @@
+import time
+
+from robosuite.robots import MobileRobot
+from robosuite.utils.input_utils import *
+
+MAX_FR = 25 # max frame rate for running simluation
+
+if __name__ == "__main__":
+
+ # Create dict to hold options that will be passed to env creation call
+ options = {}
+
+ # print welcome info
+ print("Welcome to robosuite v{}!".format(suite.__version__))
+ print(suite.__logo__)
+
+ # Choose environment and add it to options
+ options["env_name"] = choose_environment()
+
+ # If a multi-arm environment has been chosen, choose configuration and appropriate robot(s)
+ if "TwoArm" in options["env_name"]:
+ # Choose env config and add it to options
+ options["env_configuration"] = choose_multi_arm_config()
+
+ # If chosen configuration was bimanual, the corresponding robot must be Baxter. Else, have user choose robots
+ if options["env_configuration"] == "single-robot":
+ options["robots"] = choose_robots(exclude_bimanual=False, use_humanoids=True, exclude_single_arm=True)
+ else:
+ options["robots"] = []
+
+ # Have user choose two robots
+ for i in range(2):
+ print("Please choose Robot {}...\n".format(i))
+ options["robots"].append(choose_robots(exclude_bimanual=False, use_humanoids=True))
+ # If a humanoid environment has been chosen, choose humanoid robots
+ elif "Humanoid" in options["env_name"]:
+ options["robots"] = choose_robots(use_humanoids=True)
+ else:
+ options["robots"] = choose_robots(exclude_bimanual=False, use_humanoids=True)
+
+ # initialize the task
+ env = suite.make(
+ **options,
+ has_renderer=True,
+ has_offscreen_renderer=False,
+ ignore_done=True,
+ use_camera_obs=False,
+ control_freq=20,
+ renderer="mujoco",
+ )
+ env.reset()
+
+ camera_name = ["agentview", "birdview"]
+ env.viewer.set_camera(camera_name=camera_name)
+ for robot in env.robots:
+ if isinstance(robot, MobileRobot):
+ robot.enable_parts(legs=False, base=False)
+
+ # do visualization
+ for i in range(10000):
+ start = time.time()
+ action = np.random.randn(*env.action_spec[0].shape)
+ obs, reward, done, _ = env.step(action)
+ env.render()
+
+ # limit frame rate if necessary
+ elapsed = time.time() - start
+ diff = 1 / MAX_FR - elapsed
+ if diff > 0:
+ time.sleep(diff)
diff --git a/robosuite/demos/demo_random_action.py b/robosuite/demos/demo_random_action.py
index 6ce751c1c9..b8694fae3a 100644
--- a/robosuite/demos/demo_random_action.py
+++ b/robosuite/demos/demo_random_action.py
@@ -1,5 +1,6 @@
import time
+from robosuite.robots import MobileRobot
from robosuite.utils.input_utils import *
MAX_FR = 25 # max frame rate for running simluation
@@ -22,23 +23,20 @@
options["env_configuration"] = choose_multi_arm_config()
# If chosen configuration was bimanual, the corresponding robot must be Baxter. Else, have user choose robots
- if options["env_configuration"] == "bimanual":
- options["robots"] = "Baxter"
+ if options["env_configuration"] == "single-robot":
+ options["robots"] = choose_robots(exclude_bimanual=False, use_humanoids=True, exclude_single_arm=True)
else:
options["robots"] = []
# Have user choose two robots
- print("A multiple single-arm configuration was chosen.\n")
-
for i in range(2):
print("Please choose Robot {}...\n".format(i))
- options["robots"].append(choose_robots(exclude_bimanual=True))
+ options["robots"].append(choose_robots(exclude_bimanual=False, use_humanoids=True))
# If a humanoid environment has been chosen, choose humanoid robots
elif "Humanoid" in options["env_name"]:
options["robots"] = choose_robots(use_humanoids=True)
- # Else, we simply choose a single (single-armed) robot to instantiate in the environment
else:
- options["robots"] = choose_robots(exclude_bimanual=True)
+ options["robots"] = choose_robots(exclude_bimanual=False, use_humanoids=True)
# initialize the task
env = suite.make(
@@ -51,15 +49,14 @@
)
env.reset()
env.viewer.set_camera(camera_id=0)
-
- # Get action limits
- low, high = env.action_spec
+ for robot in env.robots:
+ if isinstance(robot, MobileRobot):
+ robot.enable_parts(legs=False, base=False)
# do visualization
for i in range(10000):
start = time.time()
-
- action = np.random.uniform(low, high)
+ action = np.random.randn(*env.action_spec[0].shape)
obs, reward, done, _ = env.step(action)
env.render()
diff --git a/robosuite/demos/demo_segmentation.py b/robosuite/demos/demo_segmentation.py
index bb53b72d34..9487ae9d85 100644
--- a/robosuite/demos/demo_segmentation.py
+++ b/robosuite/demos/demo_segmentation.py
@@ -57,18 +57,17 @@ def segmentation_to_rgb(seg_im, random_colors=False):
parser.add_argument("--video-path", type=str, default="/tmp/video.mp4", help="Path to video file")
parser.add_argument("--random-colors", action="store_true", help="Radnomize segmentation colors")
parser.add_argument("--segmentation-level", type=str, default="element", help="instance, class, or element")
+ parser.add_argument("--env-name", type=str, default="TwoArmHandover", help="Environment name")
+ parser.add_argument("--camera", type=str, default="frontview", help="Camera name")
args = parser.parse_args()
# Create dict to hold options that will be passed to env creation call
options = {}
# Choose environment and add it to options
- options["env_name"] = "TwoArmHandover"
+ options["env_name"] = args.env_name
options["robots"] = ["Panda", "Panda"]
- # Choose camera
- camera = "frontview"
-
# Choose segmentation type
segmentation_level = args.segmentation_level # Options are {instance, class, element}
@@ -80,7 +79,7 @@ def segmentation_to_rgb(seg_im, random_colors=False):
ignore_done=True,
use_camera_obs=True,
control_freq=20,
- camera_names=camera,
+ camera_names=args.camera,
camera_segmentations=segmentation_level,
camera_heights=512,
camera_widths=512,
@@ -98,7 +97,7 @@ def segmentation_to_rgb(seg_im, random_colors=False):
action = 0.5 * np.random.uniform(low, high)
obs, reward, done, _ = env.step(action)
- video_img = obs[f"{camera}_segmentation_{segmentation_level}"].squeeze(-1)[::-1]
+ video_img = obs[f"{args.camera}_segmentation_{segmentation_level}"].squeeze(-1)[::-1]
np.savetxt("/tmp/seg_{}.txt".format(i), video_img, fmt="%.2f")
video_img = segmentation_to_rgb(video_img, args.random_colors)
video_writer.append_data(video_img)
diff --git a/robosuite/demos/demo_sensor_corruption.py b/robosuite/demos/demo_sensor_corruption.py
index 5f62d84baf..ce19a49398 100644
--- a/robosuite/demos/demo_sensor_corruption.py
+++ b/robosuite/demos/demo_sensor_corruption.py
@@ -25,23 +25,65 @@
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--environment", type=str, default="Lift")
- parser.add_argument("--robots", nargs="+", type=str, default="Panda", help="Which robot(s) to use in the env")
parser.add_argument(
- "--config", type=str, default="default", help="Specified environment configuration if necessary"
+ "--robots",
+ nargs="+",
+ type=str,
+ default="Panda",
+ help="Which robot(s) to use in the env",
)
- parser.add_argument("--arm", type=str, default="right", help="Which arm to control (eg bimanual) 'right' or 'left'")
- parser.add_argument("--switch-on-grasp", action="store_true", help="Switch gripper control on gripper action")
parser.add_argument(
- "--toggle-corruption-on-grasp", action="store_true", help="Toggle corruption ON / OFF on gripper action"
+ "--config",
+ type=str,
+ default="default",
+ help="Specified environment configuration if necessary",
+ )
+ parser.add_argument(
+ "--arm",
+ type=str,
+ default="right",
+ help="Which arm to control (eg bimanual) 'right' or 'left'",
+ )
+ parser.add_argument(
+ "--switch-on-grasp",
+ action="store_true",
+ help="Switch gripper control on gripper action",
+ )
+ parser.add_argument(
+ "--toggle-corruption-on-grasp",
+ action="store_true",
+ help="Toggle corruption ON / OFF on gripper action",
)
parser.add_argument("--device", type=str, default="keyboard")
- parser.add_argument("--pos-sensitivity", type=float, default=1.0, help="How much to scale position user inputs")
- parser.add_argument("--rot-sensitivity", type=float, default=1.0, help="How much to scale rotation user inputs")
+ parser.add_argument(
+ "--pos-sensitivity",
+ type=float,
+ default=1.0,
+ help="How much to scale position user inputs",
+ )
+ parser.add_argument(
+ "--rot-sensitivity",
+ type=float,
+ default=1.0,
+ help="How much to scale rotation user inputs",
+ )
parser.add_argument("--delay", type=float, default=0.04, help="average delay to use (sec)")
- parser.add_argument("--corruption", type=float, default=20.0, help="Scale of corruption to use (std dev)")
+ parser.add_argument(
+ "--corruption",
+ type=float,
+ default=20.0,
+ help="Scale of corruption to use (std dev)",
+ )
parser.add_argument("--camera", type=str, default="agentview", help="Name of camera to render")
parser.add_argument("--width", type=int, default=512)
parser.add_argument("--height", type=int, default=384)
+ parser.add_argument(
+ "--reverse_xy",
+ type=bool,
+ default=False,
+ help="(DualSense Only)Reverse the effect of the x and y axes of the joystick.It is used to handle the case that the left/right and front/back sides of the view are opposite to the LX and LY of the joystick(Push LX up but the robot move left in your view)",
+ )
+
args = parser.parse_args()
# Create argument configuration
@@ -164,13 +206,30 @@ def calculate_proprio_delay():
if args.device == "keyboard":
from robosuite.devices import Keyboard
- device = Keyboard(env=env, pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
+ device = Keyboard(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ )
elif args.device == "spacemouse":
from robosuite.devices import SpaceMouse
- device = SpaceMouse(env=env, pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
+ device = SpaceMouse(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ )
+ elif args.device == "dualsense":
+ from robosuite.devices import DualSense
+
+ device = DualSense(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ reverse_xy=args.reverse_xy,
+ )
else:
- raise Exception("Invalid device choice: choose either 'keyboard' or 'spacemouse'.")
+ raise Exception("Invalid device choice: choose either 'keyboard' or 'spacemouse' or 'dualsense'.")
while True:
# Reset the environment
diff --git a/robosuite/demos/demo_usd_export.py b/robosuite/demos/demo_usd_export.py
index 0e0cbd756f..7bd1e75af4 100644
--- a/robosuite/demos/demo_usd_export.py
+++ b/robosuite/demos/demo_usd_export.py
@@ -1,4 +1,4 @@
-""" Exports a USD file corresponding to the collected trajectory.
+"""Exports a USD file corresponding to the collected trajectory.
The USD (Universal Scene Description) file format allows users to save
trajectories such that they can be rendered in external renderers such
@@ -8,8 +8,9 @@
***IMPORTANT***: If you are using mujoco version 3.1.1, please make sure
that you also have numpy < 2 installed in your environment. Failure to do
-so may result in incorrect renderings.
+so may result in incorrect renderings.
"""
+
import argparse
import mujoco
@@ -26,21 +27,68 @@
ROBOSUITE_DEFAULT_LOGGER.warning("If using mujoco==3.1.1, please use numpy < 2 for rendering with USD.")
if __name__ == "__main__":
-
parser = argparse.ArgumentParser()
parser.add_argument("--environment", type=str, default="Lift")
- parser.add_argument("--robots", nargs="+", type=str, default="Panda", help="Which robot(s) to use in the env")
parser.add_argument(
- "--config", type=str, default="default", help="Specified environment configuration if necessary"
+ "--robots",
+ nargs="+",
+ type=str,
+ default="Panda",
+ help="Which robot(s) to use in the env",
+ )
+ parser.add_argument(
+ "--config",
+ type=str,
+ default="default",
+ help="Specified environment configuration if necessary",
+ )
+ parser.add_argument(
+ "--arm",
+ type=str,
+ default="right",
+ help="Which arm to control (eg bimanual) 'right' or 'left'",
+ )
+ parser.add_argument(
+ "--camera",
+ type=str,
+ default="agentview",
+ help="Which camera to use for collecting demos",
+ )
+ parser.add_argument(
+ "--switch-on-grasp",
+ action="store_true",
+ help="Switch gripper control on gripper action",
+ )
+ parser.add_argument(
+ "--toggle-camera-on-grasp",
+ action="store_true",
+ help="Switch camera angle on gripper action",
+ )
+ parser.add_argument(
+ "--controller",
+ type=str,
+ default="BASIC",
+ help="Choice of controller. Can be 'ik' or 'osc'",
)
- parser.add_argument("--arm", type=str, default="right", help="Which arm to control (eg bimanual) 'right' or 'left'")
- parser.add_argument("--camera", type=str, default="agentview", help="Which camera to use for collecting demos")
- parser.add_argument("--switch-on-grasp", action="store_true", help="Switch gripper control on gripper action")
- parser.add_argument("--toggle-camera-on-grasp", action="store_true", help="Switch camera angle on gripper action")
- parser.add_argument("--controller", type=str, default="BASIC", help="Choice of controller. Can be 'ik' or 'osc'")
parser.add_argument("--device", type=str, default="keyboard")
- parser.add_argument("--pos-sensitivity", type=float, default=1.0, help="How much to scale position user inputs")
- parser.add_argument("--rot-sensitivity", type=float, default=1.0, help="How much to scale rotation user inputs")
+ parser.add_argument(
+ "--pos-sensitivity",
+ type=float,
+ default=1.0,
+ help="How much to scale position user inputs",
+ )
+ parser.add_argument(
+ "--rot-sensitivity",
+ type=float,
+ default=1.0,
+ help="How much to scale rotation user inputs",
+ )
+ parser.add_argument(
+ "--reverse_xy",
+ type=bool,
+ default=False,
+ help="(DualSense Only)Reverse the effect of the x and y axes of the joystick.It is used to handle the case that the left/right and front/back sides of the view are opposite to the LX and LY of the joystick(Push LX up but the robot move left in your view)",
+ )
args = parser.parse_args()
# Get controller config
@@ -87,14 +135,31 @@
if args.device == "keyboard":
from robosuite.devices import Keyboard
- device = Keyboard(env=env, pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
+ device = Keyboard(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ )
env.viewer.add_keypress_callback(device.on_press)
elif args.device == "spacemouse":
from robosuite.devices import SpaceMouse
- device = SpaceMouse(env=env, pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
+ device = SpaceMouse(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ )
+ elif args.device == "dualsense":
+ from robosuite.devices import DualSense
+
+ device = DualSense(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ reverse_xy=args.reverse_xy,
+ )
else:
- raise Exception("Invalid device choice: choose either 'keyboard' or 'spacemouse'.")
+ raise Exception("Invalid device choice: choose either 'keyboard' or 'spacemouse' or 'dualsense'.")
env.reset()
cam_id = 0
diff --git a/robosuite/devices/__init__.py b/robosuite/devices/__init__.py
index 39822e1465..c380a4b313 100644
--- a/robosuite/devices/__init__.py
+++ b/robosuite/devices/__init__.py
@@ -3,10 +3,11 @@
try:
from .spacemouse import SpaceMouse
+ from .dualsense import DualSense
except ImportError as e:
print("Exception!", e)
print(
- """Unable to load module hid, required to interface with SpaceMouse.\n
+ """Unable to load module hid, required to interface with SpaceMouse or DualSense.\n
Only macOS is officially supported. Install the additional\n
requirements with `pip install -r requirements-extra.txt`"""
)
diff --git a/robosuite/devices/device.py b/robosuite/devices/device.py
index 32da777a0d..32bd1584d1 100644
--- a/robosuite/devices/device.py
+++ b/robosuite/devices/device.py
@@ -112,6 +112,7 @@ def input2action(self, mirror_actions=False) -> Optional[Dict]:
# Get controller reference
controller = robot.part_controllers[active_arm]
+ gripper = robot.gripper[active_arm]
gripper_dof = robot.gripper[active_arm].dof
assert controller.name in ["OSC_POSE", "JOINT_POSITION"], "only supporting OSC_POSE and JOINT_POSITION for now"
@@ -163,11 +164,15 @@ def input2action(self, mirror_actions=False) -> Optional[Dict]:
)
ac_dict[f"{active_arm}_abs"] = arm_action["abs"]
ac_dict[f"{active_arm}_delta"] = arm_action["delta"]
- ac_dict[f"{active_arm}_gripper"] = np.array([grasp] * gripper_dof)
+
+ if hasattr(gripper, "grasp_qpos"):
+ ac_dict[f"{active_arm}_gripper"] = getattr(gripper, "grasp_qpos")[grasp]
+ else:
+ ac_dict[f"{active_arm}_gripper"] = np.array([grasp] * gripper_dof)
# clip actions between -1 and 1
for (k, v) in ac_dict.items():
- if "abs" not in k:
+ if "abs" not in k and "gripper" not in k:
ac_dict[k] = np.clip(v, -1, 1)
return ac_dict
@@ -189,7 +194,7 @@ def get_arm_action(self, robot, arm, norm_delta, goal_update_mode="target"):
"delta": norm_delta,
"abs": abs_action,
}
- elif robot.composite_controller_config["type"] in ["WHOLE_BODY_MINK_IK"]:
+ elif robot.composite_controller_config["type"] in ["WHOLE_BODY_MINK_IK", "HYBRID_WHOLE_BODY_MINK_IK"]:
ref_frame = self.env.robots[0].composite_controller.composite_controller_specific_config.get(
"ik_input_ref_frame", "world"
)
diff --git a/robosuite/devices/dualsense.py b/robosuite/devices/dualsense.py
new file mode 100644
index 0000000000..0436aa9e16
--- /dev/null
+++ b/robosuite/devices/dualsense.py
@@ -0,0 +1,596 @@
+"""Driver class for DualSense controller.
+
+This class provides a driver support to DualSense on macOS.
+In particular, we assume you are using a DualSense Wireless by default.
+
+"""
+
+import threading
+import time
+from enum import IntFlag
+
+import numpy as np
+
+from robosuite import make
+from robosuite.utils.log_utils import ROBOSUITE_DEFAULT_LOGGER
+
+try:
+ import hid
+except ModuleNotFoundError as exc:
+ raise ImportError(
+ "Unable to load module hid, required to interface with DualSense. "
+ "Only macOS is officially supported. Install the additional "
+ "requirements with `pip install -r requirements-extra.txt`"
+ ) from exc
+
+
+import robosuite.macros as macros
+from robosuite.devices import Device
+from robosuite.utils.transform_utils import rotation_matrix
+
+
+class ConnectionType(IntFlag):
+ USB = 0x1
+ BT01 = 0x2
+ BT31 = 0x4
+ UNKNOWN = 0x8
+
+ @classmethod
+ def to_string(cls, value) -> str:
+ if value & cls.UNKNOWN:
+ return "Unknown"
+ if value & cls.BT01:
+ return "Bluetooth 01"
+ if value & cls.BT31:
+ return "Bluetooth 31"
+ if value & cls.USB:
+ return "USB"
+ return "Unknown"
+
+
+USB_REPORT_LENGTH = 64
+BT_REPORT31_LENGTH = 78
+BT_REPORT01_LENGTH = 10
+DUALSENSE_AXIS_LIST = ["LX", "LY", "RX", "RY", "L2_Trigger", "R2_Trigger"]
+DUALSENSE_BTN_LIST = [
+ "Triangle",
+ "Circle",
+ "Cross",
+ "Square",
+ "DpadUp",
+ "DpadDown",
+ "DpadLeft",
+ "DpadRight",
+ "L1",
+ "L2",
+ "L3",
+ "R1",
+ "R2",
+ "R3",
+]
+DUALSENSE_AXIS_MIN = 0
+DUALSENSE_AXIS_MAX = 255
+DUALSENSE_STICK_Neutral = 128
+DUALSENSE_Trigger_Neutral = 0
+
+
+def scale_to_control(x, axis_scale=128, min_v=-1.0, max_v=1.0):
+ """
+ Normalize raw HID readings to target range.
+
+ Args:
+ x (int): Raw reading from HID
+ axis_scale (float): (Inverted) scaling factor for mapping raw input value
+ min_v (float): Minimum limit after scaling
+ max_v (float): Maximum limit after scaling
+
+ Returns:
+ float: Clipped, scaled input from HID
+ """
+ x = x / axis_scale
+ x = min(max(x, min_v), max_v)
+ return x
+
+
+class DSState:
+ def __init__(self) -> None:
+ """
+ All dualsense states (inputs) that can be read. Second method to check if a input is pressed.
+ """
+ self.Square, self.Triangle, self.Circle, self.Cross = False, False, False, False
+ self.DpadUp, self.DpadDown, self.DpadLeft, self.DpadRight = (
+ False,
+ False,
+ False,
+ False,
+ )
+ self.L1, self.L2, self.L3, self.R1, self.R2, self.R3 = (
+ False,
+ False,
+ False,
+ False,
+ False,
+ False,
+ )
+ # neutral: 0x00, pressed: 0xff
+ self.L2_Trigger, self.R2_Trigger = 0, 0
+ self.RX, self.RY, self.LX, self.LY = 128, 128, 128, 128
+
+ def __str__(self):
+ return f"Square: {self.Square}, Triangle: {self.Triangle}, Circle: {self.Circle}, Cross: {self.Cross}, DpadUp: {self.DpadUp}, DpadDown: {self.DpadDown}, DpadLeft: {self.DpadLeft}, DpadRight: {self.DpadRight}, L1: {self.L1}, L2: {self.L2}, L3: {self.L3}, R1: {self.R1}, R2: {self.R2}, R3: {self.R3}, R2Btn: {self.R2Btn}, L2Btn: {self.L2Btn}"
+
+ def setDPadState(self, dpad_state: int) -> None:
+ """
+ Sets the dpad state variables according to the integers that was read from the controller
+
+ Args:
+ dpad_state (int): integer number representing the dpad state, actually a 4-bit number,[0,8]
+ """
+ if dpad_state == 0:
+ self.DpadUp = True
+ self.DpadDown = False
+ self.DpadLeft = False
+ self.DpadRight = False
+ elif dpad_state == 1:
+ self.DpadUp = True
+ self.DpadDown = False
+ self.DpadLeft = False
+ self.DpadRight = True
+ elif dpad_state == 2:
+ self.DpadUp = False
+ self.DpadDown = False
+ self.DpadLeft = False
+ self.DpadRight = True
+ elif dpad_state == 3:
+ self.DpadUp = False
+ self.DpadDown = True
+ self.DpadLeft = False
+ self.DpadRight = True
+ elif dpad_state == 4:
+ self.DpadUp = False
+ self.DpadDown = True
+ self.DpadLeft = False
+ self.DpadRight = False
+ elif dpad_state == 5:
+ self.DpadUp = False
+ self.DpadDown = True
+ self.DpadLeft = True
+ self.DpadRight = False
+ elif dpad_state == 6:
+ self.DpadUp = False
+ self.DpadDown = False
+ self.DpadLeft = True
+ self.DpadRight = False
+ elif dpad_state == 7:
+ self.DpadUp = True
+ self.DpadDown = False
+ self.DpadLeft = True
+ self.DpadRight = False
+ else:
+ self.DpadUp = False
+ self.DpadDown = False
+ self.DpadLeft = False
+ self.DpadRight = False
+
+
+class DualSense(Device):
+ """
+ A minimalistic driver class for DualSense with HID library.
+
+ Note: Use hid.enumerate() to view all USB human interface devices (HID).
+ Make sure DualSense is detected before running the script.
+ You can look up its vendor/product id from this method.
+
+ You can test your DualSense in https://hardwaretester.com/gamepad and https://nondebug.github.io/dualsense/dualsense-explorer.html
+ DualSense HID protocol refer to https://github.com/nondebug/dualsense
+
+ Args:
+ env (RobotEnv): The environment which contains the robot(s) to control
+ using this device.
+ pos_sensitivity (float): Magnitude of input position command scaling
+ rot_sensitivity (float): Magnitude of scale input rotation commands scaling
+ reverse_xy (bool): Whether to reverse the effect of the x and y axes of the joystick. It is used to handle the case that the left/right and front/back sides of the view are opposite to the LX and LY of the joystick(Push LX up but the robot move left in your view)
+ """
+
+ def __init__(
+ self,
+ env,
+ vendor_id=macros.DUALSENSE_VENDOR_ID,
+ product_id=macros.DUALSENSE_PRODUCT_ID,
+ pos_sensitivity=1.0,
+ rot_sensitivity=1.0,
+ reverse_xy=False,
+ ):
+ super().__init__(env)
+
+ print("Opening DualSense device")
+ self.vendor_id = vendor_id
+ self.product_id = product_id
+ self.device = hid.device()
+ try:
+ self.device.open(self.vendor_id, self.product_id) # DualSense
+ except Exception as e:
+ ROBOSUITE_DEFAULT_LOGGER.warning(
+ "Failed to open DualSense device. "
+ "Consider killing other processes that may be using the device, "
+ f"or try other product ids for SONY DualSense in {[hex(id) for id in macros.DUALSENSE_PRODUCT_IDs]}"
+ )
+ raise e
+
+ print("Manufacturer: %s" % self.device.get_manufacturer_string())
+ print("Product: %s" % self.device.get_product_string())
+
+ self.input_report_length = -1
+ self.output_report_length = -1
+ self.connection_type = self._check_connection_type()
+ # By default, bluetooth-connected DualSense only sends input report 0x01 which omits motion and touchpad data.
+ # Reading feature report 0x05 causes it to start sending input report 0x31.
+ # Note: The Gamepad API will do this for us if it enumerates the gamepad.Other applications like Steam may have also done this already.
+ if self.connection_type == ConnectionType.BT01:
+ self.device.get_feature_report(0x05, BT_REPORT31_LENGTH)
+ self.connection_type = ConnectionType.BT31
+ self.input_report_length = BT_REPORT31_LENGTH
+ self.output_report_length = BT_REPORT31_LENGTH
+ print("DualSense Connection type: %s" % ConnectionType.to_string(self.connection_type))
+ print("")
+ print(
+ "PS: You can modify `reverse_xy` if the left/right and front/back sides of the view are opposite to the LX and LY of the joystick(Push LX up but the robot move left in your view)."
+ )
+
+ self.reverse_xy = reverse_xy
+ self.report_bytes: bytearray = None
+ self.state: DSState = DSState()
+ self.last_state: DSState = None
+
+ self.pos_sensitivity = pos_sensitivity
+ self.rot_sensitivity = rot_sensitivity
+
+ # 6-DOF variables
+ self.x, self.y, self.z = 0, 0, 0
+ self.roll, self.pitch, self.yaw = 0, 0, 0
+
+ self._display_controls()
+
+ self.single_click_and_hold = False
+
+ self._control = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+ self._reset_state = 0
+ self.rotation = np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])
+ self._enabled = False
+
+ # launch a new listener thread to listen to DualSense
+ self.thread = threading.Thread(target=self.run)
+ self.thread.daemon = True
+ self.thread.start()
+
+ @staticmethod
+ def _display_controls():
+ """
+ Method to pretty print controls.
+ """
+
+ def print_command(char, info):
+ char += " " * (35 - len(char))
+ print("{}\t{}".format(char, info))
+
+ print("")
+ print_command("Control", "Command")
+ print_command("Move LX/LY left joystick", "move arm horizontally in x-y plane")
+ print_command("Press L2 Trigger with or without L1", "move arm vertically")
+ print_command("Move RX/RY right joystick", "rotate arm about x/y axis, namely roll/pitch")
+ print_command("Press R2 Trigger with or without R1", "rotate arm about z axis, namely yaw")
+ print_command("Square button", "reset simulation")
+ print_command("Circle button (hold)", "close gripper")
+ print_command("Triangle button", "toggle arm/base mode (if applicable)")
+ print_command("Left/Right Direction Pad", "switch active arm (if multi-armed robot)")
+ print_command("Up/Down Direction Pad", "switch active robot (if multi-robot environment)")
+ print_command("Control+C", "quit")
+ print("")
+
+ def _reset_internal_state(self):
+ """
+ Resets internal state of controller, except for the reset signal.
+ """
+ super()._reset_internal_state()
+
+ self.rotation = np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])
+ # Reset 6-DOF variables
+ self.x, self.y, self.z = 0, 0, 0
+ self.roll, self.pitch, self.yaw = 0, 0, 0
+ # Reset control
+ self._control = np.zeros(6)
+ # Reset grasp
+ self.single_click_and_hold = False
+
+ def start_control(self):
+ """
+ Method that should be called externally before controller can
+ start receiving commands.
+ """
+ self._reset_internal_state()
+ self._reset_state = 0
+ self._enabled = True
+
+ def _check_connection_type(self):
+ """
+ Get the connection type of the DualSense controller.
+ ConnectionType:
+ - USB: DualSense connected via USB
+ - BT01: DualSense connected via Bluetooth, sends input report id 0x01
+ - BT31: DualSense connected via Bluetooth, sends input report id 0x31
+ - UNKNOWN: Unknown connection type
+
+ Returns:
+ ConnectionType: connection type(USB, BT01, BT31, UNKNOWN)
+ """
+ dummy_report = self.device.read(100)
+ dummy_report_length = len(dummy_report)
+ if dummy_report_length == USB_REPORT_LENGTH:
+ self.input_report_length = USB_REPORT_LENGTH
+ self.output_report_length = USB_REPORT_LENGTH
+ return ConnectionType.USB
+ elif dummy_report_length == BT_REPORT01_LENGTH:
+ self.input_report_length = BT_REPORT01_LENGTH
+ self.output_report_length = BT_REPORT01_LENGTH
+ return ConnectionType.BT01
+ elif dummy_report_length == BT_REPORT31_LENGTH:
+ self.input_report_length = BT_REPORT31_LENGTH
+ self.output_report_length = BT_REPORT31_LENGTH
+ return ConnectionType.BT31
+ return ConnectionType.UNKNOWN
+
+ def _check_btn_changed(self, btn_name: str):
+ """
+ Check if a button has been pressed or released.
+
+ Args:
+ btn_name (str): Name of the button to check
+
+ Returns:
+ bool: True if the button has been pressed or released, False otherwise
+ """
+ assert btn_name in DUALSENSE_BTN_LIST
+ return getattr(self.state, btn_name) != getattr(self.last_state, btn_name)
+
+ def run(self):
+ """Listener method that keeps pulling new messages."""
+ t_last_click = -1
+
+ while True:
+ d = self.device.read(self.input_report_length)
+ if d is not None and self._enabled:
+ report_bytes = bytearray(d)
+ self.report_bytes = report_bytes
+ self.last_state = self.state
+
+ if self.connection_type == ConnectionType.USB:
+ self.state = parse_usb_report(self, report_bytes)
+ elif self.connection_type == ConnectionType.BT01:
+ # report id 0x01
+ assert report_bytes[0] == 0x01
+ self.state = parse_bt01_report(report_bytes)
+ # report id 0x31
+ elif self.connection_type == ConnectionType.BT31:
+ assert report_bytes[0] == 0x31
+ self.state = parse_bt31_report(report_bytes)
+ else:
+ raise NotImplementedError(f"Connection type {self.connection_type} not supported")
+ self.x = scale_to_control(self.state.LX if not self.reverse_xy else self.state.LY)
+ self.y = scale_to_control(self.state.LY if not self.reverse_xy else self.state.LX)
+ self.roll = scale_to_control(self.state.RX if not self.reverse_xy else self.state.RY)
+ self.pitch = scale_to_control(self.state.RY if not self.reverse_xy else self.state.RX)
+ self.z = scale_to_control(self.state.L2_Trigger)
+ if self.state.L1:
+ self.z = -self.z
+ self.yaw = scale_to_control(self.state.R2_Trigger)
+ if self.state.R1:
+ self.yaw = -self.yaw
+ self._control = [
+ self.x,
+ self.y,
+ self.z,
+ self.roll,
+ self.pitch,
+ self.yaw,
+ ]
+
+ # press left button
+ if self._check_btn_changed("Circle") and self.state.Circle:
+ t_click = time.time()
+ elapsed_time = t_click - t_last_click
+ t_last_click = t_click
+ self.single_click_and_hold = True
+
+ # release left button
+ if self._check_btn_changed("Circle") and not self.state.Circle:
+ self.single_click_and_hold = False
+
+ # Reset
+ if self._check_btn_changed("Square") and self.state.Square:
+ self._reset_state = 1
+ self._enabled = False
+ self._reset_internal_state()
+ # controls for mobile base (only applicable if mobile base present)
+ if self._check_btn_changed("Triangle") and self.state.Triangle:
+ self.base_modes[self.active_robot] = not self.base_modes[self.active_robot] # toggle mobile base
+
+ if self._check_btn_changed("DpadRight") and self.state.DpadRight:
+ self.active_arm_index = (self.active_arm_index + 1) % len(self.all_robot_arms[self.active_robot])
+ elif self._check_btn_changed("DpadLeft") and self.state.DpadLeft:
+ self.active_arm_index = (self.active_arm_index - 1) % len(self.all_robot_arms[self.active_robot])
+
+ if self._check_btn_changed("DpadUp") and self.state.DpadUp:
+ self.active_robot = (self.active_robot + 1) % self.num_robots
+ if self._check_btn_changed("DpadDown") and self.state.DpadDown:
+ self.active_robot = (self.active_robot - 1) % self.num_robots
+
+ @property
+ def control(self):
+ """
+ Grabs current pose of DualSense
+
+ Returns:
+ np.array: 6-DoF control value
+ """
+ return np.array(self._control)
+
+ @property
+ def control_gripper(self):
+ """
+ Maps internal states into gripper commands.
+
+ Returns:
+ float: Whether we're using single click and hold or not
+ """
+ if self.single_click_and_hold:
+ return 1.0
+ return 0
+
+ def get_controller_state(self):
+ """
+ Grabs the current state of the 3D mouse.
+
+ Returns:
+ dict: A dictionary containing dpos, orn, unmodified orn, grasp, and reset
+ """
+ dpos = self.control[:3] * 0.005 * self.pos_sensitivity
+ roll, pitch, yaw = self.control[3:] * 0.005 * self.rot_sensitivity
+
+ # convert RPY to an absolute orientation
+ drot1 = rotation_matrix(angle=-pitch, direction=[1.0, 0, 0], point=None)[:3, :3]
+ drot2 = rotation_matrix(angle=roll, direction=[0, 1.0, 0], point=None)[:3, :3]
+ drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.0], point=None)[:3, :3]
+
+ self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3)))
+
+ return dict(
+ dpos=dpos,
+ rotation=self.rotation,
+ raw_drotation=np.array([roll, pitch, yaw]),
+ grasp=self.control_gripper,
+ reset=self._reset_state,
+ base_mode=int(self.base_mode),
+ )
+
+ def _postprocess_device_outputs(self, dpos, drotation):
+ drotation = drotation * 50
+ dpos = dpos * 125
+
+ dpos = np.clip(dpos, -1, 1)
+ drotation = np.clip(drotation, -1, 1)
+
+ return dpos, drotation
+
+
+def parse_usb_report(state_bytes: bytearray) -> DSState:
+ new_state = DSState()
+ # states 0 is always 1
+ new_state.LX = state_bytes[1] - 128
+ new_state.LY = state_bytes[2] - 128
+ new_state.RX = state_bytes[3] - 128
+ new_state.RY = state_bytes[4] - 128
+ new_state.L2_Trigger = state_bytes[5]
+ new_state.R2_Trigger = state_bytes[6]
+
+ # state 7 always increments -> not used anywhere
+
+ buttonState = state_bytes[8]
+ new_state.Triangle = (buttonState & (1 << 7)) != 0
+ new_state.Circle = (buttonState & (1 << 6)) != 0
+ new_state.Cross = (buttonState & (1 << 5)) != 0
+ new_state.Square = (buttonState & (1 << 4)) != 0
+
+ # dpad
+ dpad_state = buttonState & 0x0F
+ new_state.setDPadState(dpad_state)
+
+ misc = state_bytes[9]
+ new_state.L1 = (misc & (1 << 0)) != 0
+ new_state.R1 = (misc & (1 << 1)) != 0
+ new_state.L2 = (misc & (1 << 2)) != 0
+ new_state.R2 = (misc & (1 << 3)) != 0
+ # new_state.share = (misc & (1 << 4)) != 0
+ # new_state.options = (misc & (1 << 5)) != 0
+ new_state.L3 = (misc & (1 << 6)) != 0
+ new_state.R3 = (misc & (1 << 7)) != 0
+ return new_state
+
+
+def parse_bt01_report(state_bytes: bytearray) -> DSState:
+ # states 0 is always 0x01
+ assert len(state_bytes) == BT_REPORT01_LENGTH
+ assert state_bytes[0] == 0x01
+ new_state = DSState()
+ new_state.LX = state_bytes[1] - DUALSENSE_STICK_Neutral
+ new_state.LY = state_bytes[2] - DUALSENSE_STICK_Neutral
+ new_state.RX = state_bytes[3] - DUALSENSE_STICK_Neutral
+ new_state.RY = state_bytes[4] - DUALSENSE_STICK_Neutral
+ new_state.L2_Trigger = state_bytes[8]
+ new_state.R2_Trigger = state_bytes[9]
+
+ buttonState = state_bytes[5]
+ new_state.Square = (buttonState & (1 << 4)) != 0
+ new_state.Cross = (buttonState & (1 << 5)) != 0
+ new_state.Circle = (buttonState & (1 << 6)) != 0
+ new_state.Triangle = (buttonState & (1 << 7)) != 0
+
+ # dpad
+ dpad_state = buttonState & 0x0F
+ new_state.setDPadState(dpad_state)
+
+ misc = state_bytes[6]
+ new_state.L1 = (misc & (1 << 0)) != 0
+ new_state.R1 = (misc & (1 << 1)) != 0
+ new_state.L2 = (misc & (1 << 2)) != 0
+ new_state.R2 = (misc & (1 << 3)) != 0
+ # new_state.share = (misc & (1 << 4)) != 0
+ # new_state.options = (misc & (1 << 5)) != 0
+ new_state.L3 = (misc & (1 << 6)) != 0
+ new_state.R3 = (misc & (1 << 7)) != 0
+ return new_state
+
+
+def parse_bt31_report(state_bytes: bytearray) -> DSState:
+ # states 0 is always 0x31
+ assert len(state_bytes) == BT_REPORT31_LENGTH
+ assert state_bytes[0] == 0x31
+ new_state = DSState()
+ new_state.LX = state_bytes[2] - DUALSENSE_STICK_Neutral
+ new_state.LY = state_bytes[3] - DUALSENSE_STICK_Neutral
+ new_state.RX = state_bytes[4] - DUALSENSE_STICK_Neutral
+ new_state.RY = state_bytes[5] - DUALSENSE_STICK_Neutral
+ new_state.L2_Trigger = state_bytes[6]
+ new_state.R2_Trigger = state_bytes[7]
+
+ # state 7 always increments -> not used anywhere
+
+ buttonState = state_bytes[9]
+ new_state.Triangle = (buttonState & (1 << 7)) != 0
+ new_state.Circle = (buttonState & (1 << 6)) != 0
+ new_state.Cross = (buttonState & (1 << 5)) != 0
+ new_state.Square = (buttonState & (1 << 4)) != 0
+
+ # dpad
+ dpad_state = buttonState & 0x0F
+ new_state.setDPadState(dpad_state)
+
+ misc = state_bytes[10]
+ new_state.L1 = (misc & (1 << 0)) != 0
+ new_state.R1 = (misc & (1 << 1)) != 0
+ new_state.L2 = (misc & (1 << 2)) != 0
+ new_state.R2 = (misc & (1 << 3)) != 0
+ # new_state.share = (misc & (1 << 4)) != 0
+ # new_state.options = (misc & (1 << 5)) != 0
+ new_state.L3 = (misc & (1 << 6)) != 0
+ new_state.R3 = (misc & (1 << 7)) != 0
+ return new_state
+
+
+if __name__ == "__main__":
+ env = make("Lift", robots="Panda")
+ dualsense = DualSense(env)
+ dualsense.start_control()
+ for i in range(100):
+ # print(dualsense.control, dualsense.control_gripper)
+ print(dualsense.state)
+ time.sleep(0.5)
diff --git a/robosuite/devices/mjgui.py b/robosuite/devices/mjgui.py
index 61876d0f22..4c5d5f4916 100644
--- a/robosuite/devices/mjgui.py
+++ b/robosuite/devices/mjgui.py
@@ -122,8 +122,8 @@ def input2action(self) -> Dict[str, np.ndarray]:
(i.e. absolute actions or delta actions) and input_ref_frame (i.e. world frame, base frame or eef frame)
from the controller itself.
- TODO: unify this logic to be independent from controller type.
"""
+ # TODO: unify this logic to be independent from controller type.
action: Dict[str, np.ndarray] = {}
gripper_dof = self.env.robots[0].gripper[self.active_end_effector].dof
site_names = self._get_site_names()
diff --git a/robosuite/environments/base.py b/robosuite/environments/base.py
index 168e4906a9..c0e77b2918 100644
--- a/robosuite/environments/base.py
+++ b/robosuite/environments/base.py
@@ -9,7 +9,8 @@
import robosuite.macros as macros
import robosuite.utils.sim_utils as SU
from robosuite.renderers.base import load_renderer_config
-from robosuite.utils import OpenCVRenderer, SimulationError, XMLError
+from robosuite.renderers.viewer import OpenCVViewer
+from robosuite.utils import SimulationError, XMLError
from robosuite.utils.binding_utils import MjRenderContextOffscreen, MjSim
REGISTERED_ENVS = {}
@@ -62,9 +63,9 @@ class MujocoEnv(metaclass=EnvMeta):
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering.
- render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
+ render_camera (str or list of str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
- the user using the mouse
+ the user using the mouse. When a list of strings is provided, it will render from multiple camera angles.
render_collision_mesh (bool): True if rendering collision meshes
in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes
@@ -109,6 +110,8 @@ def __init__(
self.has_renderer = has_renderer
# offscreen renderer needed for on-screen rendering
self.has_offscreen_renderer = (has_renderer and renderer != "mjviewer") or has_offscreen_renderer
+ if render_camera is not None and isinstance(render_camera, str):
+ render_camera = [render_camera]
self.render_camera = render_camera
self.render_collision_mesh = render_collision_mesh
self.render_visual_mesh = render_visual_mesh
@@ -175,10 +178,11 @@ def initialize_renderer(self):
if self.renderer == "mujoco":
pass
elif self.renderer == "mjviewer":
- from robosuite.renderers.mjviewer.mjviewer_renderer import MjviewerRenderer
+ from robosuite.renderers.viewer import MjviewerRenderer
if self.render_camera is not None:
- camera_id = self.sim.model.camera_name2id(self.render_camera)
+ assert len(self.render_camera) == 1, "Only one camera can be specified for mjviewer"
+ camera_id = self.sim.model.camera_name2id(self.render_camera[0])
else:
camera_id = None
self.viewer = MjviewerRenderer(env=self, camera_id=camera_id, **self.renderer_config)
@@ -263,6 +267,10 @@ def reset(self):
# TODO(yukez): investigate black screen of death
# Use hard reset if requested
+ # always terminate mjviewer
+ if self.renderer == "mjviewer":
+ self._destroy_viewer()
+
if self.hard_reset and not self.deterministic_reset:
if self.renderer == "mujoco":
self._destroy_viewer()
@@ -314,12 +322,15 @@ def _reset_internal(self):
# create visualization screen or renderer
if self.has_renderer and self.viewer is None:
if self.renderer == "mujoco":
- self.viewer = OpenCVRenderer(self.sim)
+ self.viewer = OpenCVViewer(self.sim)
# Set the camera angle for viewing
if self.render_camera is not None:
- camera_id = self.sim.model.camera_name2id(self.render_camera)
- self.viewer.set_camera(camera_id)
+ camera_ids = []
+ for cam in self.render_camera:
+ camera_id = self.sim.model.camera_name2id(cam)
+ camera_ids.append(camera_id)
+ self.viewer.set_camera(camera_ids)
elif self.renderer == "mjviewer":
self.initialize_renderer()
diff --git a/robosuite/environments/manipulation/door.py b/robosuite/environments/manipulation/door.py
index 6d8dc189e2..4b032fcf09 100644
--- a/robosuite/environments/manipulation/door.py
+++ b/robosuite/environments/manipulation/door.py
@@ -33,6 +33,12 @@ class Door(ManipulationEnv):
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
+ base_types (None or str or list of str): type of base, used to instantiate base models from base factory.
+ Default is "default", which is the default base associated with the robot(s) the 'robots' specification.
+ None results in no base, and any other (valid) model overrides the default base. Should either be
+ single str if same base type is to be used for all robots or else it should be a list of the same
+ length as "robots" param
+
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
@@ -138,6 +144,7 @@ def __init__(
env_configuration="default",
controller_configs=None,
gripper_types="default",
+ base_types="default",
initialization_noise="default",
use_latch=True,
use_camera_obs=True,
@@ -183,7 +190,7 @@ def __init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
- base_types="default",
+ base_types=base_types,
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
diff --git a/robosuite/environments/manipulation/lift.py b/robosuite/environments/manipulation/lift.py
index 768161ae60..55f4219ae6 100644
--- a/robosuite/environments/manipulation/lift.py
+++ b/robosuite/environments/manipulation/lift.py
@@ -1,6 +1,7 @@
from collections import OrderedDict
import numpy as np
+import random
from robosuite.environments.manipulation.manipulation_env import ManipulationEnv
from robosuite.models.arenas import TableArena
@@ -35,6 +36,12 @@ class Lift(ManipulationEnv):
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
+ base_types (None or str or list of str): type of base, used to instantiate base models from base factory.
+ Default is "default", which is the default base associated with the robot(s) the 'robots' specification.
+ None results in no base, and any other (valid) model overrides the default base. Should either be
+ single str if same base type is to be used for all robots or else it should be a list of the same
+ length as "robots" param
+
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
@@ -142,6 +149,7 @@ def __init__(
env_configuration="default",
controller_configs=None,
gripper_types="default",
+ base_types="default",
initialization_noise="default",
table_full_size=(0.8, 0.8, 0.05),
table_friction=(1.0, 5e-3, 1e-4),
@@ -431,3 +439,24 @@ def _check_success(self):
# cube is higher than the table top above a margin
return cube_height > table_height + 0.04
+
+
+class LocoLift(Lift):
+
+ def _load_model(self):
+ """
+ Loads an xml model, puts it in self.model
+ """
+ self.table_full_size = random.choice([
+ (2.0, 0.8, 0.05),
+ (0.8, 2.0, 0.05)
+ ])
+ self.table_offset = np.array((
+ random.uniform(0, 1),
+ random.uniform(-0.5, 0.5),
+ 0.8,
+ ))
+ if self.placement_initializer is not None:
+ self.placement_initializer.reference_pos = self.table_offset
+
+ super()._load_model()
diff --git a/robosuite/environments/manipulation/manipulation_env.py b/robosuite/environments/manipulation/manipulation_env.py
index e216dd4e31..4f566f0791 100644
--- a/robosuite/environments/manipulation/manipulation_env.py
+++ b/robosuite/environments/manipulation/manipulation_env.py
@@ -5,6 +5,7 @@
from robosuite.models.base import MujocoModel
from robosuite.models.grippers import GripperModel
from robosuite.robots import ROBOT_CLASS_MAPPING, FixedBaseRobot, MobileRobot
+from robosuite.robots.legged_robot import LeggedRobot
from robosuite.utils.observables import Observable, sensor
@@ -24,9 +25,9 @@ class ManipulationEnv(RobotEnv):
dict if same controller is to be used for all robots or else it should be a list of the same length as
"robots" param
- base_types (None or str or list of str): type of base, used to instantiate base models from base factory.
+ base_types (str or list of str): type of base, used to instantiate base models from base factory.
Default is "default", which is the default base associated with the robot(s) the 'robots' specification.
- None results in no base, and any other (valid) model overrides the default base. Should either be
+ "NullMount" results in no base, and any other (valid) model overrides the default base. Should either be
single str if same base type is to be used for all robots or else it should be a list of the same
length as "robots" param
@@ -58,9 +59,9 @@ class ManipulationEnv(RobotEnv):
has_offscreen_renderer (bool): True if using off-screen rendering
- render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
+ render_camera (str or list of str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
- the user using the mouse
+ the user using the mouse. When a list of strings is provided, it will render from multiple camera angles.
render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise.
@@ -486,5 +487,5 @@ def _check_robot_configuration(self, robots):
robots = [robots]
for robot in robots:
assert issubclass(ROBOT_CLASS_MAPPING[robot], FixedBaseRobot) or issubclass(
- ROBOT_CLASS_MAPPING[robot], MobileRobot
+ ROBOT_CLASS_MAPPING[robot], MobileRobot or issubclass(ROBOT_CLASS_MAPPING[robot], LeggedRobot)
), f"Only manipulator robots supported for manipulation environment! Got {ROBOT_CLASS_MAPPING[robot]}"
diff --git a/robosuite/environments/manipulation/nut_assembly.py b/robosuite/environments/manipulation/nut_assembly.py
index b2dc996ff7..abf9094fd8 100644
--- a/robosuite/environments/manipulation/nut_assembly.py
+++ b/robosuite/environments/manipulation/nut_assembly.py
@@ -159,9 +159,11 @@ def __init__(
env_configuration="default",
controller_configs=None,
gripper_types="default",
+ base_types="default",
initialization_noise="default",
table_full_size=(0.8, 0.8, 0.05),
table_friction=(1, 0.005, 0.0001),
+ table_offset=(0, 0, 0.82),
use_camera_obs=True,
use_object_obs=True,
reward_scale=1.0,
@@ -202,7 +204,7 @@ def __init__(
# settings for table top
self.table_full_size = table_full_size
self.table_friction = table_friction
- self.table_offset = np.array((0, 0, 0.82))
+ self.table_offset = table_offset
# reward configuration
self.reward_scale = reward_scale
@@ -218,7 +220,7 @@ def __init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
- base_types="default",
+ base_types=base_types,
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
diff --git a/robosuite/environments/manipulation/pick_place.py b/robosuite/environments/manipulation/pick_place.py
index beafad45fa..b122c5a939 100644
--- a/robosuite/environments/manipulation/pick_place.py
+++ b/robosuite/environments/manipulation/pick_place.py
@@ -44,6 +44,12 @@ class PickPlace(ManipulationEnv):
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
+ base_types (None or str or list of str): type of base, used to instantiate base models from base factory.
+ Default is "default", which is the default base associated with the robot(s) the 'robots' specification.
+ None results in no base, and any other (valid) model overrides the default base. Should either be
+ single str if same base type is to be used for all robots or else it should be a list of the same
+ length as "robots" param
+
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
@@ -173,6 +179,7 @@ def __init__(
env_configuration="default",
controller_configs=None,
gripper_types="default",
+ base_types="default",
initialization_noise="default",
table_full_size=(0.39, 0.49, 0.82),
table_friction=(1, 0.005, 0.0001),
@@ -238,7 +245,7 @@ def __init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
- base_types="default",
+ base_types=base_types,
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
diff --git a/robosuite/environments/manipulation/stack.py b/robosuite/environments/manipulation/stack.py
index 1fc0f389ac..4c28d3cebe 100644
--- a/robosuite/environments/manipulation/stack.py
+++ b/robosuite/environments/manipulation/stack.py
@@ -35,6 +35,12 @@ class Stack(ManipulationEnv):
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
+ base_types (None or str or list of str): type of base, used to instantiate base models from base factory.
+ Default is "default", which is the default base associated with the robot(s) the 'robots' specification.
+ None results in no base, and any other (valid) model overrides the default base. Should either be
+ single str if same base type is to be used for all robots or else it should be a list of the same
+ length as "robots" param
+
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
@@ -142,6 +148,7 @@ def __init__(
env_configuration="default",
controller_configs=None,
gripper_types="default",
+ base_types="default",
initialization_noise="default",
table_full_size=(0.8, 0.8, 0.05),
table_friction=(1.0, 5e-3, 1e-4),
@@ -188,7 +195,7 @@ def __init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
- base_types="default",
+ base_types=base_types,
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
diff --git a/robosuite/environments/manipulation/tool_hang.py b/robosuite/environments/manipulation/tool_hang.py
index 7705450b5b..859fa95a53 100644
--- a/robosuite/environments/manipulation/tool_hang.py
+++ b/robosuite/environments/manipulation/tool_hang.py
@@ -36,6 +36,12 @@ class ToolHang(ManipulationEnv):
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
+ base_types (None or str or list of str): type of base, used to instantiate base models from base factory.
+ Default is "default", which is the default base associated with the robot(s) the 'robots' specification.
+ None results in no base, and any other (valid) model overrides the default base. Should either be
+ single str if same base type is to be used for all robots or else it should be a list of the same
+ length as "robots" param
+
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
@@ -139,6 +145,7 @@ def __init__(
env_configuration="default",
controller_configs=None,
gripper_types="default",
+ base_types="default",
initialization_noise="default",
table_full_size=(0.8, 0.8, 0.05),
table_friction=(1.0, 5e-3, 1e-4),
@@ -181,7 +188,7 @@ def __init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
- base_types="default",
+ base_types=base_types,
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
diff --git a/robosuite/environments/manipulation/wipe.py b/robosuite/environments/manipulation/wipe.py
index 07028f20b9..30ec444d0e 100644
--- a/robosuite/environments/manipulation/wipe.py
+++ b/robosuite/environments/manipulation/wipe.py
@@ -65,6 +65,12 @@ class Wipe(ManipulationEnv):
For this environment, setting a value other than the default ("WipingGripper") will raise an
AssertionError, as this environment is not meant to be used with any other alternative gripper.
+ base_types (None or str or list of str): type of base, used to instantiate base models from base factory.
+ Default is "default", which is the default base associated with the robot(s) the 'robots' specification.
+ None results in no base, and any other (valid) model overrides the default base. Should either be
+ single str if same base type is to be used for all robots or else it should be a list of the same
+ length as "robots" param
+
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
@@ -169,6 +175,7 @@ def __init__(
env_configuration="default",
controller_configs=None,
gripper_types="WipingGripper",
+ base_types="default",
initialization_noise="default",
use_camera_obs=True,
use_object_obs=True,
diff --git a/robosuite/environments/robot_env.py b/robosuite/environments/robot_env.py
index 9c58f2964a..b39fefa24c 100644
--- a/robosuite/environments/robot_env.py
+++ b/robosuite/environments/robot_env.py
@@ -25,9 +25,9 @@ class RobotEnv(MujocoEnv):
dict if same controller is to be used for all robots or else it should be a list of the same length as
"robots" param
- mount_types (None or str or list of str): type of mount, used to instantiate mount models from mount factory.
+ mount_types (str or list of str): type of mount, used to instantiate mount models from mount factory.
Default is "default", which is the default mount associated with the robot(s) the 'robots' specification.
- None results in no mount, and any other (valid) model overrides the default mount. Should either be
+ "NullMount" results in no mount, and any other (valid) model overrides the default mount. Should either be
single str if same mount type is to be used for all robots or else it should be a list of the same
length as "robots" param
@@ -53,9 +53,9 @@ class RobotEnv(MujocoEnv):
has_offscreen_renderer (bool): True if using off-screen rendering
- render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
+ render_camera (str or list of str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
- the user using the mouse
+ the user using the mouse. When a list of strings is provided, it will render from multiple camera angles.
render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise.
diff --git a/robosuite/examples/third_party_controller/mink_controller.py b/robosuite/examples/third_party_controller/mink_controller.py
index 6d796ea6ee..d6ace6bb36 100644
--- a/robosuite/examples/third_party_controller/mink_controller.py
+++ b/robosuite/examples/third_party_controller/mink_controller.py
@@ -141,13 +141,16 @@ def __init__(
robot_model: mujoco.MjModel,
robot_joint_names: Optional[List[str]] = None,
verbose: bool = False,
- input_type: Literal["absolute", "relative", "relative_pose"] = "absolute",
+ input_type: Literal["absolute", "delta", "delta_pose"] = "absolute",
input_ref_frame: Literal["world", "base", "eef"] = "world",
input_rotation_repr: Literal["quat_wxyz", "axis_angle"] = "axis_angle",
posture_weights: Dict[str, float] = None,
solve_freq: float = 20.0,
hand_pos_cost: float = 1,
hand_ori_cost: float = 0.5,
+ com_cost: float = 0.0,
+ use_mink_posture_task: bool = False,
+ initial_qpos_as_posture_target: bool = False,
):
self.full_model: mujoco.MjModel = model
self.full_model_data: mujoco.MjData = data
@@ -157,9 +160,11 @@ def __init__(
self.posture_weights = posture_weights
self.hand_pos_cost = hand_pos_cost
self.hand_ori_cost = hand_ori_cost
-
+ self.com_cost = com_cost
+ self.use_mink_posture_task = use_mink_posture_task
self.hand_tasks: List[mink.FrameTask]
self.posture_task: WeightedPostureTask
+ self.com_task: mink.ComTask | None = None
if robot_joint_names is None:
robot_joint_names: List[str] = [
@@ -168,15 +173,40 @@ def __init__(
if self.robot_model.joint(i).type != 0
] # Exclude fixed joints
- self.full_model_dof_ids: List[int] = np.array([self.full_model.joint(name).id for name in robot_joint_names])
+ # the order of the index is the same of model.qposadr
+ self.all_robot_qpos_indexes_in_full_model: List[int] = []
+ for i in range(self.robot_model.njnt):
+ joint_name = self.robot_model.joint(i).name
+ self.all_robot_qpos_indexes_in_full_model.extend(
+ self.full_model.joint(joint_name).qposadr[0] + np.arange(len(self.full_model.joint(joint_name).qpos0))
+ )
+ assert len(self.all_robot_qpos_indexes_in_full_model) == self.robot_model.nq
+
+ # the order of the index is determined by the order of the actuation_part_names
+ self.controlled_robot_qpos_indexes: List[int] = []
+ self.controlled_robot_qpos_indexes_in_full_model: List[int] = []
+ for name in robot_joint_names:
+ self.controlled_robot_qpos_indexes.extend(
+ self.robot_model.joint(name).qposadr[0] + np.arange(len(self.robot_model.joint(name).qpos0))
+ )
+ self.controlled_robot_qpos_indexes_in_full_model.extend(
+ self.full_model.joint(name).qposadr[0] + np.arange(len(self.full_model.joint(name).qpos0))
+ )
- self.robot_model_dof_ids: List[int] = np.array([self.robot_model.joint(name).id for name in robot_joint_names])
- self.full_model_dof_ids: List[int] = np.array([self.full_model.joint(name).id for name in robot_joint_names])
self.site_ids = [self.robot_model.site(site_name).id for site_name in site_names]
-
self.site_names = site_names
+
+ # update robot states
+ self.update_robot_states()
+
+ # setup tasks
self._setup_tasks()
- self.set_posture_target(np.zeros(self.robot_model.nq))
+ if initial_qpos_as_posture_target:
+ self.set_posture_target()
+ else:
+ self.set_posture_target(np.zeros(self.robot_model.nq))
+ if self.com_cost > 0.0:
+ self.set_com_target()
self.solver = "quadprog"
@@ -203,12 +233,17 @@ def __repr__(self) -> str:
return "IKSolverMink"
def _setup_tasks(self):
- weights = np.ones(self.robot_model.nq)
+ weights = np.ones(self.robot_model.nv)
+
for joint_name, posture_weight in self.posture_weights.items():
- joint_idx = self.robot_model.joint(joint_name).id
- weights[joint_idx] = posture_weight
+ joint = self.robot_model.joint(joint_name)
+ joint_dof_idx = joint.dofadr[0] + np.arange(len(joint.jntid))
+ weights[joint_dof_idx] = posture_weight
- self.posture_task = WeightedPostureTask(self.robot_model, cost=0.01, weights=weights, lm_damping=2)
+ if self.use_mink_posture_task:
+ self.posture_task = mink.PostureTask(self.robot_model, cost=weights * 0.1, lm_damping=1.0)
+ else:
+ self.posture_task = WeightedPostureTask(self.robot_model, cost=0.01, weights=weights, lm_damping=2)
self.tasks = [self.posture_task]
@@ -217,6 +252,10 @@ def _setup_tasks(self):
)
self.tasks.extend(self.hand_tasks)
+ if self.com_cost > 0.0:
+ self.com_task = mink.ComTask(cost=self.com_cost)
+ self.tasks.append(self.com_task)
+
def _create_frame_tasks(self, frame_names: List[str], position_cost: float, orientation_cost: float):
return [
mink.FrameTask(
@@ -229,13 +268,33 @@ def _create_frame_tasks(self, frame_names: List[str], position_cost: float, orie
for frame in frame_names
]
+ def update_robot_states(self):
+ # update the base pose, important for mobile robots such as humanoids
+ self.configuration.model.body("robot0_base").pos = self.full_model.body("robot0_base").pos
+ self.configuration.model.body("robot0_base").quat = self.full_model.body("robot0_base").quat
+
+ # update the qpos for the robot model
+ self.configuration.update(
+ self.full_model_data.qpos[self.controlled_robot_qpos_indexes_in_full_model],
+ self.controlled_robot_qpos_indexes,
+ )
+
def set_target_poses(self, target_poses: List[np.ndarray]):
for task, target in zip(self.hand_tasks, target_poses):
se3_target = mink.SE3.from_matrix(target)
task.set_target(se3_target)
- def set_posture_target(self, posture_target: np.ndarray):
- self.posture_task.set_target(posture_target)
+ def set_posture_target(self, posture_target: np.ndarray | None = None):
+ if posture_target is None:
+ self.posture_task.set_target_from_configuration(self.configuration)
+ else:
+ self.posture_task.set_target(posture_target)
+
+ def set_com_target(self, com_target: np.ndarray | None = None):
+ assert self.com_task is not None, "COM task is not initialized"
+ if com_target is None:
+ com_target = self.configuration.data.subtree_com[1]
+ self.com_task.set_target(com_target)
def action_split_indexes(self) -> Dict[str, Tuple[int, int]]:
action_split_indexes: Dict[str, Tuple[int, int]] = {}
@@ -261,9 +320,8 @@ def transform_pose(
if src_frame == dst_frame:
return src_frame_pose
- self.configuration.model.body("robot0_base").pos = self.full_model.body("robot0_base").pos
- self.configuration.model.body("robot0_base").quat = self.full_model.body("robot0_base").quat
- self.configuration.update()
+ self.robot_model.body("robot0_base").pos = self.full_model.body("robot0_base").pos
+ self.robot_model.body("robot0_base").quat = self.full_model.body("robot0_base").quat
X_src_frame_pose = src_frame_pose
# convert src frame pose to world frame pose
@@ -294,13 +352,8 @@ def solve(self, input_action: np.ndarray) -> np.ndarray:
By updating configuration's bose to match the actual base pose (in 'world' frame),
we're requiring our tasks' targets to be in the 'world' frame for mink.solve_ik().
"""
- # update configuration's base to match actual base
- self.configuration.model.body("robot0_base").pos = self.full_model.body("robot0_base").pos
- self.configuration.model.body("robot0_base").quat = self.full_model.body("robot0_base").quat
- # update configuration's qpos to match actual qpos
- self.configuration.update(
- self.full_model_data.qpos[self.full_model_dof_ids], update_idxs=self.robot_model_dof_ids
- )
+
+ self.update_robot_states()
input_action = input_action.reshape(len(self.site_names), -1)
input_pos = input_action[:, : self.pos_dim]
@@ -315,17 +368,32 @@ def solve(self, input_action: np.ndarray) -> np.ndarray:
input_quat_wxyz = input_ori
if self.input_ref_frame == "base":
- input_poses = np.zeros((len(self.site_ids), 4, 4))
- for i in range(len(self.site_ids)):
- base_pos = self.configuration.data.body("robot0_base").xpos
- base_ori = self.configuration.data.body("robot0_base").xmat.reshape(3, 3)
+ base_pos = self.configuration.data.body("robot0_base").xpos
+ base_ori = self.configuration.data.body("robot0_base").xmat.reshape(3, 3)
+
+ if self.input_type == "absolute":
+ # For absolute poses, transform both position and orientation from base to world frame
base_pose = T.make_pose(base_pos, base_ori)
- input_pose = T.make_pose(input_pos[i], T.quat2mat(np.roll(input_quat_wxyz[i], -1)))
- input_poses[i] = np.dot(base_pose, input_pose)
- input_pos = input_poses[:, :3, 3]
- input_quat_wxyz = np.array(
- [np.roll(T.mat2quat(input_poses[i, :3, :3]), shift=1) for i in range(len(self.site_ids))]
- )
+
+ # Transform each input pose to world frame
+ input_poses = []
+ for pos, quat in zip(input_pos, input_quat_wxyz):
+ pose_in_base = T.make_pose(pos, T.quat2mat(np.roll(quat, -1)))
+ world_pose = base_pose @ pose_in_base
+ input_poses.append(world_pose)
+
+ input_poses = np.array(input_poses)
+ input_pos = input_poses[:, :3, 3]
+ input_quat_wxyz = np.array([np.roll(T.mat2quat(pose[:3, :3]), 1) for pose in input_poses])
+
+ elif self.input_type == "delta":
+ # For deltas, only rotate the vectors from base to world frame
+ input_pos = np.array([base_ori @ pos for pos in input_pos])
+
+ # Transform rotation deltas using rotation matrices
+ input_quat_wxyz = np.array(
+ [np.roll(T.mat2quat(base_ori @ T.quat2mat(np.roll(quat, -1))), 1) for quat in input_quat_wxyz]
+ )
elif self.input_ref_frame == "eef":
raise NotImplementedError("Input reference frame 'eef' not yet implemented")
@@ -337,7 +405,7 @@ def solve(self, input_action: np.ndarray) -> np.ndarray:
target_pos = input_pos + cur_pos
target_quat_xyzw = np.array(
[
- T.quat_multiply(T.mat2quat(cur_ori[i].reshape(3, 3)), np.roll(input_quat_wxyz[i], -1))
+ T.quat_multiply(np.roll(input_quat_wxyz[i], -1), T.mat2quat(cur_ori[i].reshape(3, 3)))
for i in range(len(self.site_ids))
]
)
@@ -394,7 +462,7 @@ def solve(self, input_action: np.ndarray) -> np.ndarray:
if self.i % 50:
print(f"Task errors: {task_translation_errors}")
- return self.configuration.data.qpos[self.robot_model_dof_ids]
+ return self.configuration.data.qpos[self.controlled_robot_qpos_indexes]
def _get_task_translation_errors(self) -> List[float]:
errors = []
@@ -415,6 +483,7 @@ def _get_task_errors(self) -> List[float]:
for task in self.hand_tasks:
error = task.compute_error(self.configuration)
errors.append(np.linalg.norm(error[:3]))
+ errors.append(self.posture_task.compute_error(self.configuration))
return errors
@@ -489,9 +558,11 @@ def _init_joint_action_policy(self):
self.joint_action_policy = IKSolverMink(
model=self.sim.model._model,
data=self.sim.data._data,
- site_names=self.composite_controller_specific_config["ref_name"]
- if "ref_name" in self.composite_controller_specific_config
- else default_site_names,
+ site_names=(
+ self.composite_controller_specific_config["ref_name"]
+ if "ref_name" in self.composite_controller_specific_config
+ else default_site_names
+ ),
robot_model=self.robot_model.mujoco_model,
robot_joint_names=joint_names,
input_type=self.composite_controller_specific_config.get("ik_input_type", "absolute"),
@@ -501,5 +572,9 @@ def _init_joint_action_policy(self):
posture_weights=self.composite_controller_specific_config.get("ik_posture_weights", {}),
hand_pos_cost=self.composite_controller_specific_config.get("ik_hand_pos_cost", 1.0),
hand_ori_cost=self.composite_controller_specific_config.get("ik_hand_ori_cost", 0.5),
- verbose=self.composite_controller_specific_config.get("ik_verbose", False),
+ use_mink_posture_task=self.composite_controller_specific_config.get("use_mink_posture_task", False),
+ initial_qpos_as_posture_target=self.composite_controller_specific_config.get(
+ "initial_qpos_as_posture_target", False
+ ),
+ verbose=self.composite_controller_specific_config.get("verbose", False),
)
diff --git a/robosuite/macros.py b/robosuite/macros.py
index 917bfb4cb9..364f0fa55d 100644
--- a/robosuite/macros.py
+++ b/robosuite/macros.py
@@ -38,6 +38,10 @@
SPACEMOUSE_VENDOR_ID = 9583
SPACEMOUSE_PRODUCT_ID = 50734
+# DualSense settings. Used by DualSense class in robosuite/devices/dualsense.py
+DUALSENSE_VENDOR_ID = 0x054C
+DUALSENSE_PRODUCT_ID = 0x0CE6
+
# If LOGGING LEVEL is set to None, the logger will be turned off
CONSOLE_LOGGING_LEVEL = "INFO"
# File logging is written to /tmp/robosuite_{time}_{pid}.log by default
@@ -50,6 +54,6 @@
import robosuite
from robosuite.utils.log_utils import ROBOSUITE_DEFAULT_LOGGER
- ROBOSUITE_DEFAULT_LOGGER.warn("No private macro file found!")
- ROBOSUITE_DEFAULT_LOGGER.warn("It is recommended to use a private macro file")
- ROBOSUITE_DEFAULT_LOGGER.warn("To setup, run: python {}/scripts/setup_macros.py".format(robosuite.__path__[0]))
+ ROBOSUITE_DEFAULT_LOGGER.warning("No private macro file found!")
+ ROBOSUITE_DEFAULT_LOGGER.warning("It is recommended to use a private macro file")
+ ROBOSUITE_DEFAULT_LOGGER.warning("To setup, run: python {}/scripts/setup_macros.py".format(robosuite.__path__[0]))
diff --git a/robosuite/models/assets/bases/null_base.xml b/robosuite/models/assets/bases/null_base.xml
new file mode 100644
index 0000000000..4ab2db8bd8
--- /dev/null
+++ b/robosuite/models/assets/bases/null_base.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/robosuite/models/assets/grippers/fourier_left_hand.xml b/robosuite/models/assets/grippers/fourier_left_hand.xml
new file mode 100644
index 0000000000..3b2c36041d
--- /dev/null
+++ b/robosuite/models/assets/grippers/fourier_left_hand.xml
@@ -0,0 +1,198 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/robosuite/models/assets/grippers/fourier_right_hand.xml b/robosuite/models/assets/grippers/fourier_right_hand.xml
new file mode 100644
index 0000000000..b1529c4fa1
--- /dev/null
+++ b/robosuite/models/assets/grippers/fourier_right_hand.xml
@@ -0,0 +1,217 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/robosuite/models/assets/grippers/inspire_left_hand.xml b/robosuite/models/assets/grippers/inspire_left_hand.xml
index 97962695ab..e232285fdd 100644
--- a/robosuite/models/assets/grippers/inspire_left_hand.xml
+++ b/robosuite/models/assets/grippers/inspire_left_hand.xml
@@ -236,18 +236,18 @@
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/robosuite/models/assets/grippers/inspire_right_hand.xml b/robosuite/models/assets/grippers/inspire_right_hand.xml
index c9be5a7cfa..a508a3f566 100644
--- a/robosuite/models/assets/grippers/inspire_right_hand.xml
+++ b/robosuite/models/assets/grippers/inspire_right_hand.xml
@@ -233,18 +233,18 @@
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_hand_base_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_hand_base_link.STL
new file mode 100644
index 0000000000..875506ce79
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_hand_base_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_index_intermediate_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_index_intermediate_link.STL
new file mode 100644
index 0000000000..e7266f6782
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_index_intermediate_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_index_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_index_proximal_link.STL
new file mode 100644
index 0000000000..7b952e1931
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_index_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_intermediate_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_intermediate_link.STL
new file mode 100644
index 0000000000..4f6184caca
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_intermediate_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_proximal_link.STL
new file mode 100644
index 0000000000..1ecd5042b1
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_middle_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_intermediate_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_intermediate_link.STL
new file mode 100644
index 0000000000..c3e4127450
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_intermediate_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_proximal_link.STL
new file mode 100644
index 0000000000..dfefe3f173
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_pinky_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_intermediate_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_intermediate_link.STL
new file mode 100644
index 0000000000..ba7ddf21a1
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_intermediate_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_proximal_link.STL
new file mode 100644
index 0000000000..5c2cfcd616
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_ring_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_distal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_distal_link.STL
new file mode 100644
index 0000000000..bdf42be8f8
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_distal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_base_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_base_link.STL
new file mode 100644
index 0000000000..0115b791c8
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_base_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_link.STL
new file mode 100644
index 0000000000..f89e6f585b
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/L_thumb_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_hand_base_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_hand_base_link.STL
new file mode 100644
index 0000000000..3ec2a5fbf2
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_hand_base_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_index_intermediate_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_index_intermediate_link.STL
new file mode 100644
index 0000000000..cebf9d0988
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_index_intermediate_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_index_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_index_proximal_link.STL
new file mode 100644
index 0000000000..44e3bb20e5
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_index_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_intermediate_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_intermediate_link.STL
new file mode 100644
index 0000000000..64898855ec
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_intermediate_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_proximal_link.STL
new file mode 100644
index 0000000000..00e05d2853
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_middle_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_intermediate_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_intermediate_link.STL
new file mode 100644
index 0000000000..89f636bc2c
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_intermediate_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_proximal_link.STL
new file mode 100644
index 0000000000..92d93fd35f
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_pinky_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_intermediate_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_intermediate_link.STL
new file mode 100644
index 0000000000..c6d0862fc7
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_intermediate_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_proximal_link.STL
new file mode 100644
index 0000000000..93a6e15266
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_ring_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_distal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_distal_link.STL
new file mode 100644
index 0000000000..2c74cf031c
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_distal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_base_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_base_link.STL
new file mode 100644
index 0000000000..e48921672e
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_base_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_link.STL b/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_link.STL
new file mode 100644
index 0000000000..2ac8ba37b8
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/fourier_hands/R_thumb_proximal_link.STL differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/base_link.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/base_link.stl
new file mode 100644
index 0000000000..6a3263b5b5
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/base_link.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/end_tool.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/end_tool.stl
new file mode 100644
index 0000000000..e2324f41ef
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/end_tool.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_finger.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_finger.stl
new file mode 100644
index 0000000000..41f2c0d76f
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_finger.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_inner_knuckle.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_inner_knuckle.stl
new file mode 100644
index 0000000000..abeff0cec0
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_inner_knuckle.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_outer_knuckle.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_outer_knuckle.stl
new file mode 100644
index 0000000000..639add9e0e
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/left_outer_knuckle.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/link1.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link1.stl
new file mode 100644
index 0000000000..a5ca309d62
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link1.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/link2.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link2.stl
new file mode 100644
index 0000000000..1539adb628
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link2.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/link3.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link3.stl
new file mode 100644
index 0000000000..23926351ee
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link3.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/link4.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link4.stl
new file mode 100644
index 0000000000..a24cc91df0
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link4.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/link5.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link5.stl
new file mode 100644
index 0000000000..7f91e37cfa
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link5.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/link6.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link6.stl
new file mode 100644
index 0000000000..04fc480566
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link6.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/link7.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link7.stl
new file mode 100644
index 0000000000..0e9963c387
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link7.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/link_base.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link_base.stl
new file mode 100644
index 0000000000..555d6f2ea3
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/link_base.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_finger.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_finger.stl
new file mode 100644
index 0000000000..53f0eecae8
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_finger.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_inner_knuckle.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_inner_knuckle.stl
new file mode 100644
index 0000000000..f1a486daf3
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_inner_knuckle.stl differ
diff --git a/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_outer_knuckle.stl b/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_outer_knuckle.stl
new file mode 100644
index 0000000000..3099dfdeda
Binary files /dev/null and b/robosuite/models/assets/grippers/meshes/xarm7_gripper/right_outer_knuckle.stl differ
diff --git a/robosuite/models/assets/grippers/xarm7_gripper.xml b/robosuite/models/assets/grippers/xarm7_gripper.xml
new file mode 100644
index 0000000000..a6527d1051
--- /dev/null
+++ b/robosuite/models/assets/grippers/xarm7_gripper.xml
@@ -0,0 +1,138 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/robosuite/models/assets/robots/baxter/robot.xml b/robosuite/models/assets/robots/baxter/robot.xml
index 99ed75156a..36c1a84af2 100644
--- a/robosuite/models/assets/robots/baxter/robot.xml
+++ b/robosuite/models/assets/robots/baxter/robot.xml
@@ -9,7 +9,7 @@
-
+
diff --git a/robosuite/models/assets/robots/sawyer/robot.xml b/robosuite/models/assets/robots/sawyer/robot.xml
index a7aa26ccff..d0cef836a6 100644
--- a/robosuite/models/assets/robots/sawyer/robot.xml
+++ b/robosuite/models/assets/robots/sawyer/robot.xml
@@ -28,10 +28,10 @@
-
+
-
+
diff --git a/robosuite/models/assets/robots/xarm7/assets/base_link.stl b/robosuite/models/assets/robots/xarm7/assets/base_link.stl
new file mode 100644
index 0000000000..6a3263b5b5
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/base_link.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/end_tool.stl b/robosuite/models/assets/robots/xarm7/assets/end_tool.stl
new file mode 100644
index 0000000000..e2324f41ef
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/end_tool.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/left_finger.stl b/robosuite/models/assets/robots/xarm7/assets/left_finger.stl
new file mode 100644
index 0000000000..41f2c0d76f
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/left_finger.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/left_inner_knuckle.stl b/robosuite/models/assets/robots/xarm7/assets/left_inner_knuckle.stl
new file mode 100644
index 0000000000..abeff0cec0
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/left_inner_knuckle.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/left_outer_knuckle.stl b/robosuite/models/assets/robots/xarm7/assets/left_outer_knuckle.stl
new file mode 100644
index 0000000000..639add9e0e
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/left_outer_knuckle.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/link1.stl b/robosuite/models/assets/robots/xarm7/assets/link1.stl
new file mode 100644
index 0000000000..a5ca309d62
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/link1.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/link2.stl b/robosuite/models/assets/robots/xarm7/assets/link2.stl
new file mode 100644
index 0000000000..1539adb628
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/link2.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/link3.stl b/robosuite/models/assets/robots/xarm7/assets/link3.stl
new file mode 100644
index 0000000000..23926351ee
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/link3.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/link4.stl b/robosuite/models/assets/robots/xarm7/assets/link4.stl
new file mode 100644
index 0000000000..a24cc91df0
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/link4.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/link5.stl b/robosuite/models/assets/robots/xarm7/assets/link5.stl
new file mode 100644
index 0000000000..7f91e37cfa
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/link5.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/link6.stl b/robosuite/models/assets/robots/xarm7/assets/link6.stl
new file mode 100644
index 0000000000..04fc480566
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/link6.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/link7.stl b/robosuite/models/assets/robots/xarm7/assets/link7.stl
new file mode 100644
index 0000000000..0e9963c387
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/link7.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/link_base.stl b/robosuite/models/assets/robots/xarm7/assets/link_base.stl
new file mode 100644
index 0000000000..555d6f2ea3
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/link_base.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/right_finger.stl b/robosuite/models/assets/robots/xarm7/assets/right_finger.stl
new file mode 100644
index 0000000000..53f0eecae8
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/right_finger.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/right_inner_knuckle.stl b/robosuite/models/assets/robots/xarm7/assets/right_inner_knuckle.stl
new file mode 100644
index 0000000000..f1a486daf3
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/right_inner_knuckle.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/assets/right_outer_knuckle.stl b/robosuite/models/assets/robots/xarm7/assets/right_outer_knuckle.stl
new file mode 100644
index 0000000000..3099dfdeda
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/assets/right_outer_knuckle.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/base_link.stl b/robosuite/models/assets/robots/xarm7/meshes/base_link.stl
new file mode 100644
index 0000000000..6a3263b5b5
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/base_link.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/end_tool.stl b/robosuite/models/assets/robots/xarm7/meshes/end_tool.stl
new file mode 100644
index 0000000000..e2324f41ef
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/end_tool.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/left_finger.stl b/robosuite/models/assets/robots/xarm7/meshes/left_finger.stl
new file mode 100644
index 0000000000..41f2c0d76f
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/left_finger.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/left_inner_knuckle.stl b/robosuite/models/assets/robots/xarm7/meshes/left_inner_knuckle.stl
new file mode 100644
index 0000000000..abeff0cec0
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/left_inner_knuckle.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/left_outer_knuckle.stl b/robosuite/models/assets/robots/xarm7/meshes/left_outer_knuckle.stl
new file mode 100644
index 0000000000..639add9e0e
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/left_outer_knuckle.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/link1.stl b/robosuite/models/assets/robots/xarm7/meshes/link1.stl
new file mode 100644
index 0000000000..a5ca309d62
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/link1.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/link2.stl b/robosuite/models/assets/robots/xarm7/meshes/link2.stl
new file mode 100644
index 0000000000..1539adb628
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/link2.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/link3.stl b/robosuite/models/assets/robots/xarm7/meshes/link3.stl
new file mode 100644
index 0000000000..23926351ee
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/link3.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/link4.stl b/robosuite/models/assets/robots/xarm7/meshes/link4.stl
new file mode 100644
index 0000000000..a24cc91df0
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/link4.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/link5.stl b/robosuite/models/assets/robots/xarm7/meshes/link5.stl
new file mode 100644
index 0000000000..7f91e37cfa
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/link5.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/link6.stl b/robosuite/models/assets/robots/xarm7/meshes/link6.stl
new file mode 100644
index 0000000000..04fc480566
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/link6.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/link7.stl b/robosuite/models/assets/robots/xarm7/meshes/link7.stl
new file mode 100644
index 0000000000..0e9963c387
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/link7.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/link_base.stl b/robosuite/models/assets/robots/xarm7/meshes/link_base.stl
new file mode 100644
index 0000000000..555d6f2ea3
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/link_base.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/right_finger.stl b/robosuite/models/assets/robots/xarm7/meshes/right_finger.stl
new file mode 100644
index 0000000000..53f0eecae8
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/right_finger.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/right_inner_knuckle.stl b/robosuite/models/assets/robots/xarm7/meshes/right_inner_knuckle.stl
new file mode 100644
index 0000000000..f1a486daf3
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/right_inner_knuckle.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/meshes/right_outer_knuckle.stl b/robosuite/models/assets/robots/xarm7/meshes/right_outer_knuckle.stl
new file mode 100644
index 0000000000..3099dfdeda
Binary files /dev/null and b/robosuite/models/assets/robots/xarm7/meshes/right_outer_knuckle.stl differ
diff --git a/robosuite/models/assets/robots/xarm7/robot.xml b/robosuite/models/assets/robots/xarm7/robot.xml
new file mode 100644
index 0000000000..b8602f0878
--- /dev/null
+++ b/robosuite/models/assets/robots/xarm7/robot.xml
@@ -0,0 +1,102 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/robosuite/models/bases/__init__.py b/robosuite/models/bases/__init__.py
index 4fe39ea50f..4b19278a27 100644
--- a/robosuite/models/bases/__init__.py
+++ b/robosuite/models/bases/__init__.py
@@ -3,6 +3,7 @@
from .robot_base_factory import robot_base_factory
from .mobile_base_model import MobileBaseModel
from .leg_base_model import LegBaseModel
+from .null_base_model import NullBaseModel
from .rethink_mount import RethinkMount
from .rethink_minimal_mount import RethinkMinimalMount
@@ -12,7 +13,7 @@
from .null_mobile_base import NullMobileBase
from .no_actuation_base import NoActuationBase
from .floating_legged_base import FloatingLeggedBase
-
+from .null_base import NullBase
from .spot_base import Spot, SpotFloating
BASE_MAPPING = {
@@ -25,6 +26,7 @@
"FloatingLeggedBase": FloatingLeggedBase,
"Spot": Spot,
"SpotFloating": SpotFloating,
+ "NullBase": NullBase,
}
ALL_BASES = BASE_MAPPING.keys()
diff --git a/robosuite/models/bases/leg_base_model.py b/robosuite/models/bases/leg_base_model.py
index d272e4c0ef..3069221c7a 100644
--- a/robosuite/models/bases/leg_base_model.py
+++ b/robosuite/models/bases/leg_base_model.py
@@ -34,6 +34,15 @@ def _remove_joint_actuation(self, part_name):
parent_body = find_parent(self.actuator, motor)
parent_body.remove(motor)
self._actuators.remove(motor.get("name").replace(self.naming_prefix, ""))
+ for sensor in self.root.findall(".//jointpos"):
+ if part_name in sensor.get("joint"):
+ find_parent(self.root, sensor).remove(sensor)
+ for sensor in self.root.findall(".//jointvel"):
+ if part_name in sensor.get("joint"):
+ find_parent(self.root, sensor).remove(sensor)
+ for sensor in self.root.findall(".//jointactuatorfrc"):
+ if part_name in sensor.get("joint"):
+ find_parent(self.root, sensor).remove(sensor)
def _remove_free_joint(self):
"""Remove all freejoints from the model."""
diff --git a/robosuite/models/bases/null_base.py b/robosuite/models/bases/null_base.py
new file mode 100644
index 0000000000..928400639d
--- /dev/null
+++ b/robosuite/models/bases/null_base.py
@@ -0,0 +1,27 @@
+"""
+Rethink's Generic Mount (Officially used on Sawyer).
+"""
+import numpy as np
+
+from robosuite.models.bases.null_base_model import NullBaseModel
+from robosuite.utils.mjcf_utils import xml_path_completion
+
+
+class NullBase(NullBaseModel):
+ """
+ Dummy mobile base to signify no mount.
+
+ Args:
+ idn (int or str): Number or some other unique identification string for this mount instance
+ """
+
+ def __init__(self, idn=0):
+ super().__init__(xml_path_completion("bases/null_base.xml"), idn=idn)
+
+ @property
+ def top_offset(self):
+ return np.array((0, 0, 0))
+
+ @property
+ def horizontal_radius(self):
+ return 0
diff --git a/robosuite/models/bases/null_base_model.py b/robosuite/models/bases/null_base_model.py
new file mode 100644
index 0000000000..4b1f961502
--- /dev/null
+++ b/robosuite/models/bases/null_base_model.py
@@ -0,0 +1,11 @@
+"""
+Defines the null base model
+"""
+
+from robosuite.models.bases.robot_base_model import RobotBaseModel
+
+
+class NullBaseModel(RobotBaseModel):
+ @property
+ def naming_prefix(self):
+ return "nullbase{}_".format(self.idn)
diff --git a/robosuite/models/grippers/__init__.py b/robosuite/models/grippers/__init__.py
index 24b817a9d0..03d8d9ac99 100644
--- a/robosuite/models/grippers/__init__.py
+++ b/robosuite/models/grippers/__init__.py
@@ -13,6 +13,8 @@
from .bd_gripper import BDGripper
from .null_gripper import NullGripper
from .inspire_hands import InspireLeftHand, InspireRightHand
+from .fourier_hands import FourierLeftHand, FourierRightHand
+from .xarm7_gripper import XArm7Gripper
GRIPPER_MAPPING = {
"RethinkGripper": RethinkGripper,
@@ -27,6 +29,9 @@
"BDGripper": BDGripper,
"InspireLeftHand": InspireLeftHand,
"InspireRightHand": InspireRightHand,
+ "FourierLeftHand": FourierLeftHand,
+ "FourierRightHand": FourierRightHand,
+ "XArm7Gripper": XArm7Gripper,
None: NullGripper,
}
diff --git a/robosuite/models/grippers/fourier_hands.py b/robosuite/models/grippers/fourier_hands.py
new file mode 100644
index 0000000000..28eab30e08
--- /dev/null
+++ b/robosuite/models/grippers/fourier_hands.py
@@ -0,0 +1,157 @@
+"""
+Dexterous hands for GR1 robot.
+"""
+import numpy as np
+
+from robosuite.models.grippers.gripper_model import GripperModel
+from robosuite.utils.mjcf_utils import xml_path_completion
+
+
+class FourierLeftHand(GripperModel):
+ """
+ Dexterous left hand of GR1 robot
+
+ Args:
+ idn (int or str): Number or some other unique identification string for this gripper instance
+ """
+
+ def __init__(self, idn=0):
+ super().__init__(xml_path_completion("grippers/fourier_left_hand.xml"), idn=idn)
+
+ def format_action(self, action):
+ # the more correct way is to add tag in the xml
+ # however the tag makes finger movement laggy, so manually copy the value for finger joints
+ # 0 is thumb rot, no copying. Thumb bend has 3 joints, so copy 3 times. Other fingers has 2 joints, so copy 2 times.
+ assert len(action) == self.dof
+ action = np.array(action)
+ indices = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
+ return action[indices]
+
+ @property
+ def init_qpos(self):
+ return np.array([0.0] * 11)
+
+ @property
+ def grasp_qpos(self):
+ return {
+ -1: np.array([-1.5, -1.5, -1.5, -1.5, -3, 3]), # open
+ 1: np.array([1.5, 1.5, 1.5, 1.5, 3, 3]), # close
+ }
+
+ @property
+ def speed(self):
+ return 0.15
+
+ @property
+ def dof(self):
+ return 6
+
+ @property
+ def _important_geoms(self):
+ return {
+ "left_finger": [
+ "L_thumb_proximal_base_link_col",
+ "L_thumb_proximal_link_col",
+ "L_thumb_distal_link_col",
+ ],
+ "right_finger": [
+ "L_index_proximal_link_col",
+ "L_index_intermediate_link_col",
+ "L_middle_proximal_link_col",
+ "L_middle_proximal_link_col",
+ "L_ring_proximal_link_col",
+ "L_ring_intermediate_link_col",
+ "L_pinky_proximal_link_col",
+ "L_pinky_intermediate_link_col",
+ ],
+ "left_fingerpad": [
+ "L_thumb_proximal_base_link_col",
+ "L_thumb_proximal_link_col",
+ "L_thumb_distal_link_col",
+ ],
+ "right_fingerpad": [
+ "L_index_proximal_link_col",
+ "L_index_intermediate_link_col",
+ "L_middle_proximal_link_col",
+ "L_middle_proximal_link_col",
+ "L_ring_proximal_link_col",
+ "L_ring_intermediate_link_col",
+ "L_pinky_proximal_link_col",
+ "L_pinky_intermediate_link_col",
+ ],
+ }
+
+
+class FourierRightHand(GripperModel):
+ """
+ Dexterous right hand of GR1 robot
+
+ Args:
+ idn (int or str): Number or some other unique identification string for this gripper instance
+ """
+
+ def __init__(self, idn=0):
+ super().__init__(xml_path_completion("grippers/fourier_right_hand.xml"), idn=idn)
+
+ def format_action(self, action):
+ # the more correct way is to add tag in the xml
+ # however the tag makes finger movement laggy, so manually copy the value for finger joints
+ # 0 is thumb rot, no copying. Thumb bend has 3 joints, so copy 3 times. Other fingers has 2 joints, so copy 2 times.
+ assert len(action) == self.dof
+ action = np.array(action)
+ indices = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
+ return action[indices]
+
+ @property
+ def init_qpos(self):
+ return np.array([0.0] * 11)
+
+ @property
+ def grasp_qpos(self):
+ return {
+ -1: np.array([-1.5, -1.5, -1.5, -1.5, -3, 3]), # open
+ 1: np.array([1.5, 1.5, 1.5, 1.5, 3, 3]), # close
+ }
+
+ @property
+ def speed(self):
+ return 0.15
+
+ @property
+ def dof(self):
+ return 6
+
+ @property
+ def _important_geoms(self):
+ return {
+ "left_finger": [
+ "R_thumb_proximal_base_link_col",
+ "R_thumb_proximal_link_col",
+ "R_thumb_distal_link_col",
+ ],
+ "right_finger": [
+ "R_index_proximal_link_col",
+ "R_index_intermediate_link_col",
+ "R_middle_proximal_link_col",
+ "R_middle_proximal_link_col",
+ "R_ring_proximal_link_col",
+ "R_ring_intermediate_link_col",
+ "R_pinky_proximal_link_col",
+ "R_pinky_intermediate_link_col",
+ ],
+ "left_fingerpad": [
+ "R_thumb_proximal_base_link_col",
+ "R_thumb_proximal_link_col",
+ "R_thumb_distal_link_col",
+ ],
+ "right_fingerpad": [
+ "R_index_proximal_link_col",
+ "R_index_intermediate_link_col",
+ "R_middle_proximal_link_col",
+ "R_middle_proximal_link_col",
+ "R_ring_proximal_link_col",
+ "R_ring_intermediate_link_col",
+ "R_pinky_proximal_link_col",
+ "R_pinky_intermediate_link_col",
+ ],
+ }
diff --git a/robosuite/models/grippers/gripper_tester.py b/robosuite/models/grippers/gripper_tester.py
index 8221d855fa..a9c043775d 100644
--- a/robosuite/models/grippers/gripper_tester.py
+++ b/robosuite/models/grippers/gripper_tester.py
@@ -9,7 +9,7 @@
from robosuite.models.arenas.table_arena import TableArena
from robosuite.models.objects import BoxObject
from robosuite.models.world import MujocoWorldBase
-from robosuite.utils import OpenCVRenderer
+from robosuite.renderers.viewer import OpenCVViewer
from robosuite.utils.binding_utils import MjRenderContextOffscreen, MjSim
from robosuite.utils.mjcf_utils import array_to_string, new_actuator, new_joint
@@ -116,7 +116,7 @@ def start_simulation(self):
self.sim = MjSim.from_xml_string(model_xml)
if self.render:
- self.viewer = OpenCVRenderer(self.sim)
+ self.viewer = OpenCVViewer(self.sim)
# We also need to add the offscreen context
if self.sim._render_context_offscreen is None:
render_context = MjRenderContextOffscreen(self.sim, device_id=-1)
diff --git a/robosuite/models/grippers/inspire_hands.py b/robosuite/models/grippers/inspire_hands.py
index 26f61887ab..b4241d353f 100644
--- a/robosuite/models/grippers/inspire_hands.py
+++ b/robosuite/models/grippers/inspire_hands.py
@@ -24,13 +24,20 @@ def format_action(self, action):
# 0 is thumb rot, no copying. Thumb bend has 3 joints, so copy 3 times. Other fingers has 2 joints, so copy 2 times.
assert len(action) == self.dof
action = np.array(action)
- indices = np.array([0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5])
+ indices = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5])
return action[indices]
@property
def init_qpos(self):
return np.array([0.0] * 12)
+ @property
+ def grasp_qpos(self):
+ return {
+ -1: np.array([-1.5, -1.5, -1.5, -1.5, -3, 3]), # open
+ 1: np.array([1.5, 1.5, 1.5, 1.5, 3, 3]), # close
+ }
+
@property
def speed(self):
return 0.15
@@ -94,13 +101,20 @@ def format_action(self, action):
# 0 is thumb rot, no copying. Thumb bend has 3 joints, so copy 3 times. Other fingers has 2 joints, so copy 2 times.
assert len(action) == self.dof
action = np.array(action)
- indices = np.array([0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5])
+ indices = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5])
return action[indices]
@property
def init_qpos(self):
return np.array([0.0] * 12)
+ @property
+ def grasp_qpos(self):
+ return {
+ -1: np.array([-1.5, -1.5, -1.5, -1.5, -3, 3]), # open
+ 1: np.array([1.5, 1.5, 1.5, 1.5, 3, 3]), # close
+ }
+
@property
def speed(self):
return 0.15
diff --git a/robosuite/models/grippers/xarm7_gripper.py b/robosuite/models/grippers/xarm7_gripper.py
new file mode 100644
index 0000000000..791886550c
--- /dev/null
+++ b/robosuite/models/grippers/xarm7_gripper.py
@@ -0,0 +1,58 @@
+"""
+Gripper for UFactory's XArm7 (has two fingers).
+"""
+
+import numpy as np
+
+from robosuite.models.grippers.gripper_model import GripperModel
+from robosuite.utils.mjcf_utils import xml_path_completion
+
+
+class XArm7GripperBase(GripperModel):
+ """
+ Gripper for UFactory's XArm7.
+
+ Args:
+ idn (int or str): Number or some other unique identification string for this gripper instance
+ """
+
+ def __init__(self, idn=0):
+ super().__init__(xml_path_completion("grippers/xarm7_gripper.xml"), idn=idn)
+
+ def format_action(self, action):
+ return action
+
+ @property
+ def init_qpos(self):
+ return np.array([0.02, 0.0, 0.0, -0.02, 0.0, 0.0])
+
+ @property
+ def _important_geoms(self):
+ return {
+ "left_fingerpad": ["finger1_pad_collision"],
+ "right_fingerpad": ["finger2_pad_collision"],
+ }
+
+
+class XArm7Gripper(XArm7GripperBase):
+ """
+ Modifies XArm7 Gripper to only take one action.
+ """
+
+ def format_action(self, action):
+ """
+ -1 => fully open, +1 => fully closed
+ """
+ assert len(action) == self.dof # i.e., 1
+ # Suppose self.current_action is also shape (1,). Then:
+ delta = self.speed * np.sign(action)
+ self.current_action = np.clip(self.current_action + delta, -1.0, 1.0)
+ return self.current_action
+
+ @property
+ def speed(self):
+ return 0.2
+
+ @property
+ def dof(self):
+ return 1
diff --git a/robosuite/models/objects/composite/pot_with_handles.py b/robosuite/models/objects/composite/pot_with_handles.py
index 6783e5ebe9..157c162477 100644
--- a/robosuite/models/objects/composite/pot_with_handles.py
+++ b/robosuite/models/objects/composite/pot_with_handles.py
@@ -169,7 +169,7 @@ def _get_geom_attrs(self):
[-(self.body_half_size[1] - self.thickness / 2), 0, self.body_half_size[1] - self.thickness / 2, 0]
)
w_vals = np.array(
- [self.body_half_size[1], self.body_half_size[0], self.body_half_size[1], self.body_half_size[0]]
+ [self.body_half_size[0], self.body_half_size[1], self.body_half_size[0], self.body_half_size[1]]
)
r_vals = np.array([np.pi / 2, 0, -np.pi / 2, np.pi])
for i, (x, y, w, r) in enumerate(zip(x_off, y_off, w_vals, r_vals)):
diff --git a/robosuite/models/robots/compositional.py b/robosuite/models/robots/compositional.py
index de6cf6c90a..e8bc153e69 100644
--- a/robosuite/models/robots/compositional.py
+++ b/robosuite/models/robots/compositional.py
@@ -87,7 +87,7 @@ def gripper_mount_pos_offset(self):
@property
def gripper_mount_quat_offset(self):
- return {"right": [0.5, -0.5, 0.5, 0.5]}
+ return {"right": [-0.5, 0.5, 0.5, -0.5]}
class PandaDexLH(Panda):
@@ -101,4 +101,4 @@ def gripper_mount_pos_offset(self):
@property
def gripper_mount_quat_offset(self):
- return {"right": [0.5, -0.5, 0.5, 0.5]}
+ return {"right": [0.5, -0.5, 0.5, -0.5]}
diff --git a/robosuite/models/robots/manipulators/__init__.py b/robosuite/models/robots/manipulators/__init__.py
index 8536f9cefe..9b56b54d9a 100644
--- a/robosuite/models/robots/manipulators/__init__.py
+++ b/robosuite/models/robots/manipulators/__init__.py
@@ -9,3 +9,4 @@
from .spot_arm import SpotArm
from .tiago_robot import Tiago
from .gr1_robot import GR1, GR1FixedLowerBody, GR1ArmsOnly, GR1FloatingBody
+from .xarm7_robot import XArm7
diff --git a/robosuite/models/robots/manipulators/gr1_robot.py b/robosuite/models/robots/manipulators/gr1_robot.py
index 34035d2f98..ed6119cf90 100644
--- a/robosuite/models/robots/manipulators/gr1_robot.py
+++ b/robosuite/models/robots/manipulators/gr1_robot.py
@@ -30,7 +30,7 @@ def default_gripper(self):
Returns:
dict: Dictionary containing arm-specific gripper names
"""
- return {"right": "InspireRightHand", "left": "InspireLeftHand"}
+ return {"right": "FourierRightHand", "left": "FourierLeftHand"}
@property
def default_controller_config(self):
diff --git a/robosuite/models/robots/manipulators/legged_manipulator_model.py b/robosuite/models/robots/manipulators/legged_manipulator_model.py
index d2da70940b..b05d2c1c51 100644
--- a/robosuite/models/robots/manipulators/legged_manipulator_model.py
+++ b/robosuite/models/robots/manipulators/legged_manipulator_model.py
@@ -35,6 +35,27 @@ def _remove_joint_actuation(self, part_name):
parent_body = find_parent(self.actuator, motor)
parent_body.remove(motor)
self._actuators.remove(motor.get("name").replace(self.naming_prefix, ""))
+ for fixed in self.tendon.findall(".//fixed"):
+ for joint in fixed.findall(".//joint"):
+ if part_name in joint.get("joint"):
+ parent_body = find_parent(self.tendon, fixed)
+ parent_body.remove(fixed)
+ break
+ # remove joint in equality
+ for equality in self.equality.findall(".//joint"):
+ # If either joint1 or joint2 is the joint we want to remove
+ if part_name in equality.get("joint1") or part_name in equality.get("joint2"):
+ parent_body = find_parent(self.equality, equality)
+ parent_body.remove(equality)
+ for sensor in self.root.findall(".//jointpos"):
+ if part_name in sensor.get("joint"):
+ find_parent(self.root, sensor).remove(sensor)
+ for sensor in self.root.findall(".//jointvel"):
+ if part_name in sensor.get("joint"):
+ find_parent(self.root, sensor).remove(sensor)
+ for sensor in self.root.findall(".//jointactuatorfrc"):
+ if part_name in sensor.get("joint"):
+ find_parent(self.root, sensor).remove(sensor)
def _remove_free_joint(self):
# remove freejoint
diff --git a/robosuite/models/robots/manipulators/xarm7_robot.py b/robosuite/models/robots/manipulators/xarm7_robot.py
new file mode 100644
index 0000000000..6e8fdf6b3d
--- /dev/null
+++ b/robosuite/models/robots/manipulators/xarm7_robot.py
@@ -0,0 +1,57 @@
+import numpy as np
+
+from robosuite.models.robots.manipulators.manipulator_model import ManipulatorModel
+from robosuite.utils.mjcf_utils import xml_path_completion
+
+
+class XArm7(ManipulatorModel):
+ """
+ XArm7 is a sensitive single-arm 7 DOF robot designed by UFactory.
+
+ Args:
+ idn (int or str): Number or some other unique identification string for this robot instance
+ """
+
+ arms = ["right"]
+
+ def __init__(self, idn=0):
+ super().__init__(xml_path_completion("robots/xarm7/robot.xml"), idn=idn)
+
+ # Set joint damping
+ self.set_joint_attribute(attrib="damping", values=np.array((0.1, 0.1, 0.1, 0.1, 0.1, 0.01, 0.01)))
+
+ @property
+ def default_base(self):
+ return "RethinkMount"
+
+ @property
+ def default_gripper(self):
+ return {"right": "XArm7Gripper"}
+
+ @property
+ def default_controller_config(self):
+ return {"right": "default_xarm7"}
+
+ @property
+ def init_qpos(self):
+ return np.array([0.0, 0.0, 0.0, 0.39 * np.pi, 0.0, 0.39 * np.pi, 0.0])
+
+ @property
+ def base_xpos_offset(self):
+ return {
+ "bins": (-0.5, -0.1, 0),
+ "empty": (-0.6, 0, 0),
+ "table": lambda table_length: (-0.16 - table_length / 2, 0, 0),
+ }
+
+ @property
+ def top_offset(self):
+ return np.array((0, 0, 1.0))
+
+ @property
+ def _horizontal_radius(self):
+ return 0.5
+
+ @property
+ def arm_type(self):
+ return "single"
diff --git a/robosuite/models/robots/robot_model.py b/robosuite/models/robots/robot_model.py
index a988306bfd..410d9ac45c 100644
--- a/robosuite/models/robots/robot_model.py
+++ b/robosuite/models/robots/robot_model.py
@@ -5,7 +5,7 @@
import numpy as np
from robosuite.models.base import MujocoXMLModel
-from robosuite.models.bases import LegBaseModel, MobileBaseModel, MountModel, RobotBaseModel
+from robosuite.models.bases import LegBaseModel, MobileBaseModel, MountModel, NullBaseModel, RobotBaseModel
from robosuite.utils.mjcf_utils import ROBOT_COLLISION_COLOR, array_to_string, find_elements, find_parent
from robosuite.utils.transform_utils import euler2mat, mat2quat
@@ -137,6 +137,8 @@ def add_base(self, base: RobotBaseModel):
self.add_mobile_base(base)
elif isinstance(base, LegBaseModel):
self.add_leg_base(base)
+ elif isinstance(base, NullBaseModel):
+ self.add_null_base(base)
else:
raise ValueError("Invalid base type to add to robot!")
@@ -275,6 +277,21 @@ def add_leg_base(self, leg_base: LegBaseModel):
# Update cameras in this model
self.cameras = self.get_element_names(self.worldbody, "camera")
+ def add_null_base(self, base: NullBaseModel):
+ """
+ Do not add any base to the robot.
+ """
+ if self.base is not None:
+ raise ValueError("Mobile base already added for this robot!")
+
+ self.base = base
+ for body in base.worldbody:
+ ele = body.find("site")
+ if ele is not None:
+ self.worldbody.append(ele)
+
+ self.cameras = self.get_element_names(self.worldbody, "camera")
+
# -------------------------------------------------------------------------------------- #
# Public Properties: In general, these are the name-adjusted versions from the private #
# attributes pulled from their respective raw xml files #
diff --git a/robosuite/models/tasks/task.py b/robosuite/models/tasks/task.py
index 658fc2f3fd..6ccc00a3e6 100644
--- a/robosuite/models/tasks/task.py
+++ b/robosuite/models/tasks/task.py
@@ -1,11 +1,41 @@
+import xml.etree.ElementTree as ET
from copy import deepcopy
+import mujoco
+
from robosuite.models.objects import MujocoObject
from robosuite.models.robots import RobotModel
from robosuite.models.world import MujocoWorldBase
from robosuite.utils.mjcf_utils import get_ids
+def get_subtree_geom_ids_by_group(model: mujoco.MjModel, body_id: int, group: int) -> list[int]:
+ """Get all geoms belonging to a subtree starting at a given body, filtered by group.
+
+ Args:
+ model: MuJoCo model.
+ body_id: ID of body where subtree starts.
+ group: Group ID to filter geoms.
+
+ Returns:
+ A list containing all subtree geom ids in the specified group.
+
+ Adapted from https://github.com/kevinzakka/mink/blob/main/mink/utils.py
+ """
+
+ def gather_geoms(body_id: int) -> list[int]:
+ geoms: list[int] = []
+ geom_start = model.body_geomadr[body_id]
+ geom_end = geom_start + model.body_geomnum[body_id]
+ geoms.extend(geom_id for geom_id in range(geom_start, geom_end) if model.geom_group[geom_id] == group)
+ children = [i for i in range(model.nbody) if model.body_parentid[i] == body_id]
+ for child_id in children:
+ geoms.extend(gather_geoms(child_id))
+ return geoms
+
+ return gather_geoms(body_id)
+
+
class Task(MujocoWorldBase):
"""
Creates MJCF model for a task performed.
@@ -106,15 +136,32 @@ def generate_id_mappings(self, sim):
for robot in self.mujoco_robots:
models += [robot] + robot.models
+ worldbody = self.mujoco_arena.root.find("worldbody")
+ exclude_bodies = ["table", "left_eef_target", "right_eef_target"] # targets used for viz / mjgui
+ top_level_bodies = [
+ body.attrib.get("name")
+ for body in worldbody.findall("body")
+ if body.attrib.get("name") not in exclude_bodies
+ ]
+ models.extend(top_level_bodies)
+
# Parse all mujoco models from robots and objects
for model in models:
- # Grab model class name and visual IDs
- cls = str(type(model)).split("'")[1].split(".")[-1]
- inst = model.name
- id_groups = [
- get_ids(sim=sim, elements=model.visual_geoms + model.contact_geoms, element_type="geom"),
- get_ids(sim=sim, elements=model.sites, element_type="site"),
- ]
+ if isinstance(model, str):
+ body_name = model
+ visual_group_number = 1
+ body_id = sim.model.body_name2id(body_name)
+ inst, cls = body_name, body_name
+ geom_ids = get_subtree_geom_ids_by_group(sim.model, body_id, visual_group_number)
+ id_groups = [geom_ids, []]
+ else:
+ # Grab model class name and visual IDs
+ cls = str(type(model)).split("'")[1].split(".")[-1]
+ inst = model.name
+ id_groups = [
+ get_ids(sim=sim, elements=model.visual_geoms + model.contact_geoms, element_type="geom"),
+ get_ids(sim=sim, elements=model.sites, element_type="site"),
+ ]
group_types = ("geom", "site")
ids_to_instances = (self._geom_ids_to_instances, self._site_ids_to_instances)
ids_to_classes = (self._geom_ids_to_classes, self._site_ids_to_classes)
diff --git a/robosuite/renderers/context/egl_context.py b/robosuite/renderers/context/egl_context.py
index 8e774aafdc..72c187fba8 100644
--- a/robosuite/renderers/context/egl_context.py
+++ b/robosuite/renderers/context/egl_context.py
@@ -1,4 +1,4 @@
-# Modifications Copyright 2022 The robosuite Authors
+# Modifications Copyright 2025 The robosuite Authors
# Original Copyright 2018 The dm_control Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,7 +26,7 @@
raise ImportError(
"Cannot use EGL rendering platform. "
"The PYOPENGL_PLATFORM environment variable is set to {!r} "
- "(should be either unset or 'egl')."
+ "(should be either unset or 'egl').".format(PYOPENGL_PLATFORM)
)
from mujoco.egl import egl_ext as EGL
@@ -113,7 +113,7 @@ def __init__(self, max_width, max_height, device_id=0):
del max_width, max_height # unused
num_configs = ctypes.c_long()
config_size = 1
- config = EGL.EGLConfig()
+ config_ptr = EGL.EGLConfig() # Makes an opaque pointer
EGL.eglReleaseThread()
global EGL_DISPLAY
if EGL_DISPLAY is None:
@@ -126,14 +126,14 @@ def __init__(self, max_width, max_height, device_id=0):
"required for creating a headless rendering context."
)
atexit.register(EGL.eglTerminate, EGL_DISPLAY)
- EGL.eglChooseConfig(EGL_DISPLAY, EGL_ATTRIBUTES, ctypes.byref(config), config_size, num_configs)
+ EGL.eglChooseConfig(EGL_DISPLAY, EGL_ATTRIBUTES, config_ptr, config_size, num_configs)
if num_configs.value < 1:
raise RuntimeError(
"EGL failed to find a framebuffer configuration that matches the "
"desired attributes: {}".format(EGL_ATTRIBUTES)
)
EGL.eglBindAPI(EGL.EGL_OPENGL_API)
- self._context = EGL.eglCreateContext(EGL_DISPLAY, config, EGL.EGL_NO_CONTEXT, None)
+ self._context = EGL.eglCreateContext(EGL_DISPLAY, config_ptr, EGL.EGL_NO_CONTEXT, None)
if not self._context:
raise RuntimeError("Cannot create an EGL context.")
diff --git a/robosuite/renderers/mjviewer/__init__.py b/robosuite/renderers/mjviewer/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/robosuite/renderers/viewer/__init__.py b/robosuite/renderers/viewer/__init__.py
new file mode 100644
index 0000000000..d6868f385a
--- /dev/null
+++ b/robosuite/renderers/viewer/__init__.py
@@ -0,0 +1,2 @@
+from .mjviewer_renderer import MjviewerRenderer
+from .opencv_renderer import OpenCVViewer
diff --git a/robosuite/renderers/mjviewer/mjviewer_renderer.py b/robosuite/renderers/viewer/mjviewer_renderer.py
similarity index 100%
rename from robosuite/renderers/mjviewer/mjviewer_renderer.py
rename to robosuite/renderers/viewer/mjviewer_renderer.py
diff --git a/robosuite/renderers/viewer/opencv_renderer.py b/robosuite/renderers/viewer/opencv_renderer.py
new file mode 100644
index 0000000000..c6e85a7251
--- /dev/null
+++ b/robosuite/renderers/viewer/opencv_renderer.py
@@ -0,0 +1,90 @@
+"""
+opencv renderer class.
+"""
+import platform
+
+import cv2
+import numpy as np
+
+
+class OpenCVViewer:
+ def __init__(self, sim):
+ # TODO: update this appropriately - need to get screen dimensions
+ self.width = 1280
+ self.height = 800
+
+ self.sim = sim
+ self.set_camera(camera_id=0)
+ self._window_name = "offscreen render"
+ self._has_window = False
+ self.keypress_callback = None
+
+ def set_camera(self, camera_id=None, camera_name=None, width=None, height=None):
+ """
+ Set the camera view to the specified camera ID.
+
+ Args:
+ camera_id (int or list): id(s) of the camera to set the current viewer to
+ camera_name (str or list or None): name(s) of the camera to set the current viewer to
+ """
+
+ # enforce exactly one arg
+ assert (camera_id is not None) or (camera_name is not None)
+ assert (camera_id is None) or (camera_name is None)
+
+ # set width and height
+ if width is not None:
+ self.width = width
+ if height is not None:
+ self.height = height
+
+ if camera_id is not None:
+ if isinstance(camera_id, int):
+ camera_id = [camera_id]
+ self.camera_names = [self.sim.model.camera_id2name(cam_id) for cam_id in camera_id]
+ else:
+ if isinstance(camera_name, str):
+ camera_name = [camera_name]
+ self.camera_names = list(camera_name)
+
+ def render(self):
+ # get frame with offscreen renderer (assumes that the renderer already exists)
+ im = [
+ self.sim.render(camera_name=cam_name, height=self.height, width=self.width)[..., ::-1]
+ for cam_name in self.camera_names
+ ]
+ im = np.concatenate(im, axis=1) # concatenate horizontally
+
+ # write frame to window
+ im = np.flip(im, axis=0)
+ cv2.imshow(self._window_name, im)
+ if (platform.system() != "Darwin") and (not self._has_window):
+ # move window to top left of screen, and ensure we only move window on creation
+ cv2.moveWindow(self._window_name, 0, 0)
+ key = cv2.waitKey(1)
+ if self.keypress_callback is not None:
+ self.keypress_callback(key)
+ self._has_window = True
+
+ def add_keypress_callback(self, keypress_callback):
+ self.keypress_callback = keypress_callback
+
+ def close_window(self):
+ """
+ Helper method to close the active window.
+ """
+ if self._has_window:
+ cv2.destroyWindow(self._window_name)
+ cv2.waitKey(1)
+ self._has_window = False
+
+ def close(self):
+ """
+ Any cleanup to close renderer.
+ """
+
+ # NOTE: assume that @sim will get cleaned up outside the renderer - just delete the reference
+ self.sim = None
+
+ # close window
+ self.close_window()
diff --git a/robosuite/robots/__init__.py b/robosuite/robots/__init__.py
index d58a193186..86714f7dbc 100644
--- a/robosuite/robots/__init__.py
+++ b/robosuite/robots/__init__.py
@@ -30,6 +30,7 @@
"GR1FloatingBody": LeggedRobot,
"PandaDexRH": FixedBaseRobot,
"PandaDexLH": FixedBaseRobot,
+ "XArm7": FixedBaseRobot,
}
target_type_mapping = {
diff --git a/robosuite/robots/legged_robot.py b/robosuite/robots/legged_robot.py
index e48e496d46..8d89431b5d 100644
--- a/robosuite/robots/legged_robot.py
+++ b/robosuite/robots/legged_robot.py
@@ -186,9 +186,10 @@ def control(self, action, policy_step=False):
for part_name, applied_action in applied_action_dict.items():
applied_action_low = self.sim.model.actuator_ctrlrange[self._ref_actuators_indexes_dict[part_name], 0]
applied_action_high = self.sim.model.actuator_ctrlrange[self._ref_actuators_indexes_dict[part_name], 1]
- applied_action = np.clip(applied_action, applied_action_low, applied_action_high)
-
- self.sim.data.ctrl[self._ref_actuators_indexes_dict[part_name]] = applied_action
+ actuator_indexes = self._ref_actuators_indexes_dict[part_name]
+ actuator_gears = self.sim.model.actuator_gear[actuator_indexes, 0]
+ applied_action = np.clip(applied_action / actuator_gears, applied_action_low, applied_action_high)
+ self.sim.data.ctrl[actuator_indexes] = applied_action
# If this is a policy step, also update buffers holding recent values of interest
if policy_step:
diff --git a/robosuite/robots/mobile_robot.py b/robosuite/robots/mobile_robot.py
index 60c28bf35d..e5c9e39966 100644
--- a/robosuite/robots/mobile_robot.py
+++ b/robosuite/robots/mobile_robot.py
@@ -322,7 +322,7 @@ def base_quat(obs_cache):
for arm in self.arms:
@sensor(modality=modality)
- def base_to_eef_pos(obs_cache):
+ def base_to_eef_pos(obs_cache, arm=arm):
eef_pos = np.array(self.sim.data.site_xpos[self.eef_site_id[arm]])
base_pos = np.array(
self.sim.data.site_xpos[self.sim.model.site_name2id(self.robot_model.base.correct_naming("center"))]
@@ -339,7 +339,7 @@ def base_to_eef_pos(obs_cache):
return base_to_eef_pos
@sensor(modality=modality)
- def base_to_eef_quat(obs_cache):
+ def base_to_eef_quat(obs_cache, arm=arm):
"""
Args:
obs_cache (dict): A dictionary containing cached observations.
@@ -376,7 +376,7 @@ def base_to_eef_quat(obs_cache):
return T.mat2quat(base_to_eef_mat)
@sensor(modality=modality)
- def base_to_eef_quat_site(obs_cache):
+ def base_to_eef_quat_site(obs_cache, arm=arm):
"""
Args:
obs_cache (dict): A dictionary containing cached observations.
diff --git a/robosuite/robots/robot.py b/robosuite/robots/robot.py
index bc1faca82b..136430b794 100644
--- a/robosuite/robots/robot.py
+++ b/robosuite/robots/robot.py
@@ -148,7 +148,7 @@ def _postprocess_part_controller_config(self):
"""
for part_name, controller_config in self.composite_controller_config.get("body_parts", {}).items():
if not self.has_part(part_name):
- ROBOSUITE_DEFAULT_LOGGER.warn(
+ ROBOSUITE_DEFAULT_LOGGER.warning(
f'The config has defined for the controller "{part_name}", '
"but the robot does not have this component. Skipping, but make sure this is intended."
f"Removing the controller config for {part_name} from self.part_controller_config."
@@ -356,10 +356,14 @@ def joint_pos_sin(obs_cache):
def joint_vel(obs_cache):
return np.array([self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes])
- sensors = [joint_pos, joint_pos_cos, joint_pos_sin, joint_vel]
- names = ["joint_pos", "joint_pos_cos", "joint_pos_sin", "joint_vel"]
+ @sensor(modality=modality)
+ def joint_acc(obs_cache):
+ return np.array([self.sim.data.qacc[x] for x in self._ref_joint_vel_indexes])
+
+ sensors = [joint_pos, joint_pos_cos, joint_pos_sin, joint_vel, joint_acc]
+ names = ["joint_pos", "joint_pos_cos", "joint_pos_sin", "joint_vel", "joint_acc"]
# We don't want to include the direct joint pos sensor outputs
- actives = [False, True, True, True]
+ actives = [True, True, True, True, True]
for arm in self.arms:
arm_sensors, arm_sensor_names = self._create_arm_sensors(arm, modality=modality)
@@ -510,7 +514,7 @@ def check_q_limits(self):
zip(self.sim.data.qpos[self._ref_joint_pos_indexes], self.sim.model.jnt_range[self._ref_joint_indexes])
):
if q_limits[0] != q_limits[1] and not (q_limits[0] + tolerance < q < q_limits[1] - tolerance):
- ROBOSUITE_DEFAULT_LOGGER.warn("Joint limit reached in joint " + str(qidx))
+ ROBOSUITE_DEFAULT_LOGGER.warning("Joint limit reached in joint " + str(qidx))
return True
return False
@@ -908,7 +912,7 @@ def _load_arm_controllers(self):
self.part_controller_config[gripper_name]["ndim"] = self.gripper[arm].dof
self.part_controller_config[gripper_name]["policy_freq"] = self.control_freq
self.part_controller_config[gripper_name]["joint_indexes"] = {
- "joints": self.gripper_joints[arm],
+ "joints": self._ref_joints_indexes_dict[gripper_name],
"actuators": self._ref_joint_gripper_actuator_indexes[arm],
"qpos": self._ref_gripper_joint_pos_indexes[arm],
"qvel": self._ref_gripper_joint_vel_indexes[arm],
diff --git a/robosuite/scripts/collect_human_demonstrations.py b/robosuite/scripts/collect_human_demonstrations.py
index 2cd28732c3..d304c86f4e 100644
--- a/robosuite/scripts/collect_human_demonstrations.py
+++ b/robosuite/scripts/collect_human_demonstrations.py
@@ -155,7 +155,6 @@ def gather_demonstrations_as_hdf5(directory, out_dir, env_info):
env_name = None # will get populated at some point
for ep_directory in os.listdir(directory):
-
state_paths = os.path.join(directory, ep_directory, "state_*.npz")
states = []
actions = []
@@ -217,12 +216,32 @@ def gather_demonstrations_as_hdf5(directory, out_dir, env_info):
default=os.path.join(suite.models.assets_root, "demonstrations_private"),
)
parser.add_argument("--environment", type=str, default="Lift")
- parser.add_argument("--robots", nargs="+", type=str, default="Panda", help="Which robot(s) to use in the env")
parser.add_argument(
- "--config", type=str, default="default", help="Specified environment configuration if necessary"
+ "--robots",
+ nargs="+",
+ type=str,
+ default="Panda",
+ help="Which robot(s) to use in the env",
+ )
+ parser.add_argument(
+ "--config",
+ type=str,
+ default="default",
+ help="Specified environment configuration if necessary",
+ )
+ parser.add_argument(
+ "--arm",
+ type=str,
+ default="right",
+ help="Which arm to control (eg bimanual) 'right' or 'left'",
+ )
+ parser.add_argument(
+ "--camera",
+ nargs="*",
+ type=str,
+ default="agentview",
+ help="List of camera names to use for collecting demos. Pass multiple names to enable multiple views. Note: the `mujoco` renderer must be enabled when using multiple views; `mjviewer` is not supported.",
)
- parser.add_argument("--arm", type=str, default="right", help="Which arm to control (eg bimanual) 'right' or 'left'")
- parser.add_argument("--camera", type=str, default="agentview", help="Which camera to use for collecting demos")
parser.add_argument(
"--controller",
type=str,
@@ -230,8 +249,18 @@ def gather_demonstrations_as_hdf5(directory, out_dir, env_info):
help="Choice of controller. Can be generic (eg. 'BASIC' or 'WHOLE_BODY_MINK_IK') or json file (see robosuite/controllers/config for examples)",
)
parser.add_argument("--device", type=str, default="keyboard")
- parser.add_argument("--pos-sensitivity", type=float, default=1.0, help="How much to scale position user inputs")
- parser.add_argument("--rot-sensitivity", type=float, default=1.0, help="How much to scale rotation user inputs")
+ parser.add_argument(
+ "--pos-sensitivity",
+ type=float,
+ default=1.0,
+ help="How much to scale position user inputs",
+ )
+ parser.add_argument(
+ "--rot-sensitivity",
+ type=float,
+ default=1.0,
+ help="How much to scale rotation user inputs",
+ )
parser.add_argument(
"--renderer",
type=str,
@@ -244,6 +273,12 @@ def gather_demonstrations_as_hdf5(directory, out_dir, env_info):
type=int,
help="Sleep when simluation runs faster than specified frame rate; 20 fps is real time.",
)
+ parser.add_argument(
+ "--reverse_xy",
+ type=bool,
+ default=False,
+ help="(DualSense Only)Reverse the effect of the x and y axes of the joystick.It is used to handle the case that the left/right and front/back sides of the view are opposite to the LX and LY of the joystick(Push LX up but the robot move left in your view)",
+ )
args = parser.parse_args()
# Get controller config
@@ -294,11 +329,28 @@ def gather_demonstrations_as_hdf5(directory, out_dir, env_info):
if args.device == "keyboard":
from robosuite.devices import Keyboard
- device = Keyboard(env=env, pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
+ device = Keyboard(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ )
elif args.device == "spacemouse":
from robosuite.devices import SpaceMouse
- device = SpaceMouse(env=env, pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
+ device = SpaceMouse(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ )
+ elif args.device == "dualsense":
+ from robosuite.devices import DualSense
+
+ device = DualSense(
+ env=env,
+ pos_sensitivity=args.pos_sensitivity,
+ rot_sensitivity=args.rot_sensitivity,
+ reverse_xy=args.reverse_xy,
+ )
elif args.device == "mjgui":
assert args.renderer == "mjviewer", "Mocap is only supported with the mjviewer renderer"
from robosuite.devices.mjgui import MJGUI
diff --git a/robosuite/scripts/render_dataset_with_omniverse.py b/robosuite/scripts/render_dataset_with_omniverse.py
index 19739c8c8a..43b7673e51 100644
--- a/robosuite/scripts/render_dataset_with_omniverse.py
+++ b/robosuite/scripts/render_dataset_with_omniverse.py
@@ -74,6 +74,23 @@
"--keep_models", type=str, nargs="+", default=[], help="(optional) keep the model from the Mujoco XML file"
)
+# adding rendering types
+parser.add_argument(
+ "--rgb",
+ action="store_true",
+ default=False,
+)
+parser.add_argument(
+ "--normals",
+ action="store_true",
+ default=False,
+)
+parser.add_argument(
+ "--semantic_segmentation",
+ action="store_true",
+ default=False,
+)
+
# Add arguments for launch
AppLauncher.add_app_launcher_args(parser)
# Parse the arguments
@@ -95,11 +112,13 @@
import cv2
import h5py
import lxml.etree as ET
+import numpy as np
import omni
import omni.isaac.core.utils.stage as stage_utils
import omni.kit.app
import omni.replicator.core as rep
import omni.timeline
+from pxr import Semantics
from termcolor import colored
from tqdm import tqdm
@@ -124,6 +143,8 @@
scene_option = mujoco.MjvOption()
scene_option.geomgroup = [0, 1, 0, 0, 0, 0]
+render_modalities = {"rgb": args.rgb, "normals": args.normals, "semantic_segmentation": args.semantic_segmentation}
+
def make_sites_invisible(mujoco_xml):
"""
@@ -343,6 +364,24 @@ def link_env_with_ov(self):
)
exp.update_scene(data=data, scene_option=scene_option)
exp.add_light(pos=[0, 0, 0], intensity=1500, light_type="dome", light_name="dome_1")
+
+ # adds semantic information to objects in the scene
+ if args.semantic_segmentation:
+ for geom in exp.scene.geoms:
+ geom_id = geom.objid
+ geom_name = exp._get_geom_name(geom)
+ if geom_id in self.env.model._geom_ids_to_classes:
+ semantic_value = self.env.model._geom_ids_to_classes[geom_id]
+ if "site" in geom_name or "None" in geom_name:
+ continue
+ prim = exp.geom_refs[geom_name].usd_prim
+ instance_name = f"class_{semantic_value}"
+ sem = Semantics.SemanticsAPI.Apply(prim, instance_name)
+ sem.CreateSemanticTypeAttr()
+ sem.CreateSemanticDataAttr()
+ sem.GetSemanticTypeAttr().Set("class")
+ sem.GetSemanticDataAttr().Set(semantic_value)
+
return exp
def update_simulation(self, index):
@@ -369,6 +408,8 @@ def __init__(
output_dir: str = None,
image_output_format: str = "png",
rgb: bool = False,
+ normals: bool = False,
+ semantic_segmentation: bool = False,
frame_padding: int = 4,
):
self._output_dir = output_dir
@@ -385,9 +426,16 @@ def __init__(
self.data_structure = "annotator"
self.write_ready = False
- # RGB
+ self.rgb = rgb
+ self.normals = normals
+ self.semantic_segmentation = semantic_segmentation
+
if rgb:
self.annotators.append(rep.AnnotatorRegistry.get_annotator("rgb"))
+ if normals:
+ self.annotators.append(rep.AnnotatorRegistry.get_annotator("normals"))
+ if semantic_segmentation:
+ self.annotators.append(rep.AnnotatorRegistry.get_annotator("semantic_segmentation", {"colorize": True}))
def write(self, data: dict):
"""Write function called from the OgnWriter node on every frame to process annotator output.
@@ -399,7 +447,25 @@ def write(self, data: dict):
for annotator_name, annotator_data in data["annotators"].items():
for idx, (render_product_name, anno_rp_data) in enumerate(annotator_data.items()):
if annotator_name == "rgb":
- filepath = os.path.join(args.cameras[idx], f"rgb_{self._frame_id}.{self._image_output_format}")
+ filepath = os.path.join(
+ args.cameras[idx], "rgb", f"rgb_{self._frame_id}.{self._image_output_format}"
+ )
+ self._backend.write_image(filepath, anno_rp_data["data"])
+ elif annotator_name == "normals":
+ normals = anno_rp_data["data"][..., :3]
+ norm_lengths = np.linalg.norm(normals, axis=-1, keepdims=True)
+ normals_normalized = normals / np.clip(norm_lengths, 1e-8, None)
+ img = ((normals_normalized + 1) / 2 * 255).astype(np.uint8)
+ filepath = os.path.join(
+ args.cameras[idx], "normals", f"normals_{self._frame_id}.{self._image_output_format}"
+ )
+ self._backend.write_image(filepath, img)
+ elif annotator_name == "semantic_segmentation":
+ filepath = os.path.join(
+ args.cameras[idx],
+ "semantic_segmentation",
+ f"semantic_segmentation_{self._frame_id}.{self._image_output_format}",
+ )
self._backend.write_image(filepath, anno_rp_data["data"])
self._frame_id += 1
@@ -481,7 +547,12 @@ def init_recorder(self):
# Create writer for capturing generated data
self.writer = rep.WriterRegistry.get(self.writer_name)
- self.writer.initialize(output_dir=self.output_dir, rgb=True)
+ self.writer.initialize(
+ output_dir=self.output_dir,
+ rgb=args.rgb,
+ normals=args.normals,
+ semantic_segmentation=args.semantic_segmentation,
+ )
print("Writer Initiazed")
@@ -589,22 +660,27 @@ def create_video_from_frames(self, frame_folder, output_path, fps=30):
video.release()
print(f"Video saved: {output_path}")
+ def create_video(self, videos_folder, camera, data_type):
+ camera_folder_path = os.path.join(self.output_dir, camera, data_type) # temp, change to render type
+ if not os.path.isdir(camera_folder_path):
+ return
+
+ # Construct output filename and path
+ output_filename = f"{camera}_{data_type}.mp4"
+ output_path = os.path.join(videos_folder, output_filename)
+
+ # Create the video from the frames in the camera folder
+ self.create_video_from_frames(camera_folder_path, output_path)
+
def process_folders(self):
videos_folder = os.path.join(self.output_dir, "videos")
os.makedirs(videos_folder, exist_ok=True)
# Iterate over each camera folder in the output directory
for camera in args.cameras:
- camera_folder_path = os.path.join(self.output_dir, camera)
- if not os.path.isdir(camera_folder_path):
- continue
-
- # Construct output filename and path
- output_filename = f"{camera}_rgb.mp4"
- output_path = os.path.join(videos_folder, output_filename)
-
- # Create the video from the frames in the camera folder
- self.create_video_from_frames(camera_folder_path, output_path)
+ for render_modality, selected in render_modalities.items():
+ if selected:
+ self.create_video(videos_folder=videos_folder, camera=camera, data_type=render_modality)
def main():
diff --git a/robosuite/utils/__init__.py b/robosuite/utils/__init__.py
index 70f80caa07..59e2068822 100644
--- a/robosuite/utils/__init__.py
+++ b/robosuite/utils/__init__.py
@@ -1,3 +1 @@
from .errors import robosuiteError, XMLError, SimulationError, RandomizationError
-
-from .opencv_renderer import OpenCVRenderer
diff --git a/robosuite/utils/input_utils.py b/robosuite/utils/input_utils.py
index a7f6860323..31a73cc0ce 100644
--- a/robosuite/utils/input_utils.py
+++ b/robosuite/utils/input_utils.py
@@ -6,7 +6,8 @@
import robosuite as suite
import robosuite.utils.transform_utils as T
-from robosuite.devices import *
+
+# from robosuite.devices import *
from robosuite.models.robots import *
from robosuite.robots import *
@@ -78,6 +79,7 @@ def choose_multi_arm_config():
env_configs = {
"Opposed": "opposed",
"Parallel": "parallel",
+ "Single-Robot": "single-robot",
}
# Select environment configuration
@@ -101,7 +103,7 @@ def choose_multi_arm_config():
return list(env_configs.values())[k]
-def choose_robots(exclude_bimanual=False, use_humanoids=False):
+def choose_robots(exclude_bimanual=False, use_humanoids=False, exclude_single_arm=False):
"""
Prints out robot options, and returns the requested robot. Restricts options to single-armed robots if
@exclude_bimanual is set to True (False by default). Restrict options to humanoids if @use_humanoids is set to True (Flase by default).
@@ -114,15 +116,18 @@ def choose_robots(exclude_bimanual=False, use_humanoids=False):
str: Requested robot name
"""
# Get the list of robots
- robots = {"Sawyer", "Panda", "Jaco", "Kinova3", "IIWA", "UR5e"}
+ if exclude_single_arm:
+ robots = set()
+ else:
+ robots = {"Sawyer", "Panda", "Jaco", "Kinova3", "IIWA", "UR5e", "SpotWithArmFloating", "XArm7"}
# Add Baxter if bimanual robots are not excluded
if not exclude_bimanual:
robots.add("Baxter")
- robots.add("GR1")
- robots.add("GR1UpperBody")
+ robots.add("GR1ArmsOnly")
+ robots.add("Tiago")
if use_humanoids:
- robots = {"GR1", "GR1UpperBody"}
+ robots.add("GR1ArmsOnly")
# Make sure set is deterministically sorted
robots = sorted(robots)
diff --git a/robosuite/utils/log_utils.py b/robosuite/utils/log_utils.py
index ead0fe9507..e2dc96c983 100644
--- a/robosuite/utils/log_utils.py
+++ b/robosuite/utils/log_utils.py
@@ -2,6 +2,7 @@
This file contains utility classes and functions for logging to stdout and stderr
Adapted from robomimic: https://github.com/ARISE-Initiative/robomimic/blob/master/robomimic/utils/log_utils.py
"""
+import inspect
import logging
import os
import time
@@ -97,6 +98,52 @@ def get_logger(self):
return logger
+def format_message(level: str, message: str) -> str:
+ """
+ Format a message with colors based on the level and include file and line number.
+
+ Args:
+ level (str): The logging level (e.g., "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL").
+ message (str): The message to format.
+
+ Returns:
+ str: The formatted message with file and line number.
+ """
+ # Get the caller's file name and line number
+ frame = inspect.currentframe().f_back
+ filename = frame.f_code.co_filename
+ lineno = frame.f_lineno
+
+ # Level-based coloring
+ level_colors = {
+ "DEBUG": "blue",
+ "INFO": "green",
+ "WARNING": "yellow",
+ "ERROR": "red",
+ "CRITICAL": "red",
+ }
+ attrs = ["bold"]
+ if level == "CRITICAL":
+ attrs.append("reverse")
+
+ color = level_colors.get(level, "white")
+ formatted_message = colored(f"[{level}] {filename}:{lineno} - {message}", color, attrs=attrs)
+ return formatted_message
+
+
+def rs_assert(condition: bool, message: str):
+ """
+ Assert a condition and raise an error with a formatted message if the condition fails.
+
+ Args:
+ condition (bool): The condition to check.
+ message (str): The error message to display if the assertion fails.
+ """
+ if not condition:
+ formatted_message = format_message("ERROR", message)
+ raise AssertionError(formatted_message)
+
+
ROBOSUITE_DEFAULT_LOGGER = DefaultLogger(
console_logging_level=macros.CONSOLE_LOGGING_LEVEL,
file_logging_level=macros.FILE_LOGGING_LEVEL,
diff --git a/robosuite/utils/opencv_renderer.py b/robosuite/utils/opencv_renderer.py
deleted file mode 100644
index c6d6386b75..0000000000
--- a/robosuite/utils/opencv_renderer.py
+++ /dev/null
@@ -1,50 +0,0 @@
-"""
-opencv renderer class.
-"""
-import cv2
-import numpy as np
-
-
-class OpenCVRenderer:
- def __init__(self, sim):
- # TODO: update this appropriately - need to get screen dimensions
- self.width = 1280
- self.height = 800
-
- self.sim = sim
- self.camera_name = self.sim.model.camera_id2name(0)
-
- self.keypress_callback = None
-
- def set_camera(self, camera_id):
- """
- Set the camera view to the specified camera ID.
- Args:
- camera_id (int): id of the camera to set the current viewer to
- """
- self.camera_name = self.sim.model.camera_id2name(camera_id)
-
- def render(self):
- # get frame with offscreen renderer (assumes that the renderer already exists)
- im = self.sim.render(camera_name=self.camera_name, height=self.height, width=self.width)[..., ::-1]
-
- # write frame to window
- im = np.flip(im, axis=0)
- cv2.imshow("offscreen render", im)
- key = cv2.waitKey(1)
- if self.keypress_callback:
- self.keypress_callback(key)
-
- def add_keypress_callback(self, keypress_callback):
- self.keypress_callback = keypress_callback
-
- def close(self):
- """
- Any cleanup to close renderer.
- """
-
- # NOTE: assume that @sim will get cleaned up outside the renderer - just delete the reference
- self.sim = None
-
- # close window
- cv2.destroyAllWindows()
diff --git a/robosuite/utils/usd/exporter.py b/robosuite/utils/usd/exporter.py
index 4de4c303c7..acb1e5c4b9 100644
--- a/robosuite/utils/usd/exporter.py
+++ b/robosuite/utils/usd/exporter.py
@@ -527,6 +527,8 @@ def _get_geom_name(self, geom) -> str:
geom_name = mujoco.mj_id2name(self.model, geom.objtype, geom.objid)
if not geom_name:
geom_name = "None"
+ geom_name = geom_name.replace("-", "m_")
+ geom_name = geom_name.replace("+", "p_")
geom_name += f"_id{geom.objid}"
# adding additional naming information to differentiate
diff --git a/robosuite/utils/usd/objects.py b/robosuite/utils/usd/objects.py
index 3cc2299653..db87e5b627 100644
--- a/robosuite/utils/usd/objects.py
+++ b/robosuite/utils/usd/objects.py
@@ -59,7 +59,10 @@ def __init__(
self.rgba = rgba
self.texture_file = texture_file
- self.xform_path = f"/World/Mesh_Xform_{obj_name}"
+ self.obj_name = self.obj_name.replace("-", "m_")
+ self.obj_name = self.obj_name.replace("+", "p_")
+
+ self.xform_path = f"/World/Mesh_Xform_{self.obj_name}"
self.usd_xform = UsdGeom.Xform.Define(stage, self.xform_path)
# defining ops required by update function
@@ -199,7 +202,7 @@ def __init__(
self.dataid = dataid
- mesh_path = f"{self.xform_path}/Mesh_{obj_name}"
+ mesh_path = f"{self.xform_path}/Mesh_{self.obj_name}"
self.usd_mesh = UsdGeom.Mesh.Define(stage, mesh_path)
self.usd_prim = stage.GetPrimAtPath(mesh_path)
@@ -288,7 +291,7 @@ def __init__(
self.mesh_config = mesh_config
self.prim_mesh = self.generate_primitive_mesh()
- mesh_path = f"{self.xform_path}/Mesh_{obj_name}"
+ mesh_path = f"{self.xform_path}/Mesh_{self.obj_name}"
self.usd_mesh = UsdGeom.Mesh.Define(stage, mesh_path)
self.usd_prim = stage.GetPrimAtPath(mesh_path)
diff --git a/robosuite/wrappers/data_collection_wrapper.py b/robosuite/wrappers/data_collection_wrapper.py
index 7618585e19..70ed8f8969 100644
--- a/robosuite/wrappers/data_collection_wrapper.py
+++ b/robosuite/wrappers/data_collection_wrapper.py
@@ -77,6 +77,7 @@ def _start_new_episode(self):
# trick for ensuring that we can play MuJoCo demonstrations back
# deterministically by using the recorded actions open loop
+ self.env.set_ep_meta(self.env.get_ep_meta())
self.env.reset_from_xml_string(self._current_task_instance_xml)
self.env.sim.reset()
self.env.sim.set_state_from_flattened(self._current_task_instance_state)
@@ -144,6 +145,7 @@ def reset(self):
Returns:
OrderedDict: Environment observation space after reset occurs
"""
+ self.env.unset_ep_meta() # unset any episode meta data that was previously set
ret = super().reset()
self._start_new_episode()
return ret
diff --git a/robosuite/wrappers/domain_randomization_wrapper.py b/robosuite/wrappers/domain_randomization_wrapper.py
index 70dcd7cb9a..ce27582663 100644
--- a/robosuite/wrappers/domain_randomization_wrapper.py
+++ b/robosuite/wrappers/domain_randomization_wrapper.py
@@ -2,8 +2,10 @@
This file implements a wrapper for facilitating domain randomization over
robosuite environments.
"""
+import mujoco
import numpy as np
+from robosuite.utils.log_utils import rs_assert
from robosuite.utils.mjmod import CameraModder, DynamicsModder, LightingModder, TextureModder
from robosuite.wrappers import Wrapper
@@ -154,6 +156,13 @@ def __init__(
self.modders = []
if self.randomize_color:
+ rs_assert(
+ mujoco.__version__ == "3.1.1",
+ (
+ "TextureModder requires mujoco version 3.1.1 to run. "
+ "Pending support for later versions. Alternatively, you can set randomize_color=False."
+ ),
+ )
self.tex_modder = TextureModder(
sim=self.env.sim, random_state=self.random_state, **self.color_randomization_args
)
diff --git a/setup.py b/setup.py
index e036afceaf..2bc7e1da9a 100644
--- a/setup.py
+++ b/setup.py
@@ -18,13 +18,15 @@
"numpy>=1.13.3",
"numba>=0.49.1",
"scipy>=1.2.3",
- "mujoco>=3.2.3",
+ "mujoco>=3.3.0",
"mink>=0.0.5",
+ "qpsolvers[quadprog]>=4.3.1",
"Pillow",
"opencv-python",
"pynput",
"termcolor",
"pytest",
+ "tqdm",
],
eager_resources=["*"],
include_package_data=True,
@@ -33,7 +35,7 @@
author="Yuke Zhu",
url="https://github.com/ARISE-Initiative/robosuite",
author_email="yukez@cs.utexas.edu",
- version="1.5.0",
+ version="1.5.1",
long_description=long_description,
long_description_content_type="text/markdown",
)
diff --git a/tests/test_renderers/test_all_renderers.py b/tests/test_renderers/test_all_renderers.py
index 55bde68575..3b62013989 100644
--- a/tests/test_renderers/test_all_renderers.py
+++ b/tests/test_renderers/test_all_renderers.py
@@ -35,6 +35,31 @@ def test_mujoco_renderer():
env.render()
+@pytest.mark.skipif(not is_display_available(), reason="No display available for on-screen rendering.")
+def test_multiple_mujoco_renderer():
+ env = suite.make(
+ env_name="Lift",
+ robots="Panda",
+ controller_configs=load_composite_controller_config(controller="BASIC"),
+ has_renderer=True,
+ has_offscreen_renderer=False,
+ ignore_done=True,
+ use_camera_obs=False,
+ control_freq=20,
+ renderer="mujoco",
+ )
+
+ env.reset()
+ camera_name = ["agentview", "birdview"]
+ env.viewer.set_camera(camera_name=camera_name, width=1080, height=720)
+ low, high = env.action_spec
+
+ for i in range(10):
+ action = np.random.uniform(low, high)
+ obs, reward, done, _ = env.step(action)
+ env.render()
+
+
@pytest.mark.skipif(not is_display_available(), reason="No display available for on-screen rendering.")
def test_mjviewer_renderer():
env = suite.make(