Skip to content

Commit

Permalink
Merge branch 'robot-leader-control-interface' of https://github.com/p…
Browse files Browse the repository at this point in the history
…erezjln/gym-lowcostrobot into robot-leader-control-interface
  • Loading branch information
perezjln committed Jun 20, 2024
2 parents 09f5097 + 5df3902 commit a5e0814
Show file tree
Hide file tree
Showing 10 changed files with 59 additions and 587 deletions.
44 changes: 0 additions & 44 deletions examples/gym_test_ik.py

This file was deleted.

2 changes: 2 additions & 0 deletions examples/lerobotdataset_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

from lerobot.common.datasets.lerobot_dataset import LeRobotDataset



def main():

# You can easily load a dataset from a Hugging Face repository
Expand Down
2 changes: 1 addition & 1 deletion examples/mujoco_simple_invk.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import mujoco.viewer
import numpy as np

from gym_lowcostrobot.interface.simulated_robot import LevenbegMarquardtIK
from gym_lowcostrobot.simulated_robot import LevenbegMarquardtIK


def displace_object(data, m, object_id, viewer, square_size=0.2, invert_y=False, origin_pos=[0, 0.1]):
Expand Down
File renamed without changes.
15 changes: 15 additions & 0 deletions examples/simple.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import gymnasium as gym

import gym_lowcostrobot # noqa

env = gym.make("LiftCube-v0", render_mode=None, observation_mode="state", action_mode="ee")

env.reset()

for _ in range(1000):
action = env.action_space.sample()
observation, reward, terminated, truncated, info = env.step(action)
print(observation, reward, terminated, truncated, info)
if terminated or truncated:
env.reset()
env.close()
54 changes: 0 additions & 54 deletions examples/teleoperate_simulated_robot.py

This file was deleted.

41 changes: 41 additions & 0 deletions examples/test_ik.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import gymnasium as gym
import gym_lowcostrobot # Import the low-cost robot environments
import numpy as np

def displace_object(square_size=0.15, invert_y=False, origin_pos=[0, 0.1]):
### Sample a position in a square in front of the robot
if not invert_y:
x = np.random.uniform(origin_pos[0] - square_size / 2, origin_pos[0] + square_size / 2)
y = np.random.uniform(origin_pos[1] - square_size / 2, origin_pos[1] + square_size / 2)
else:
x = np.random.uniform(origin_pos[0] + square_size / 2, origin_pos[0] - square_size / 2)
y = np.random.uniform(origin_pos[1] + square_size / 2, origin_pos[1] - square_size / 2)
env.data.qpos[:3] = np.array([x, y, origin_pos[2]])
return env.data.qpos[:3]

# Create the environment
env = gym.make("ReachCube-v0", render_mode="human", action_mode="ee")

# Reset the environment
observation, info = env.reset()
cube_origin_pos = env.data.qpos[:3].astype(np.float32)
for i in range(10000):
if i % 500 == 0:
cube_pos = displace_object(square_size=0.2, invert_y=False, origin_pos=cube_origin_pos)
# Sample random action
action = env.action_space.sample()
ee_id = env.model.body("moving_side").id
ee_pos = env.data.xpos[ee_id].astype(np.float32) # default [0.03390873 0.22571199 0.14506643]
action[:3] = cube_pos + [0,0,0.1] - ee_pos
# action[:3] = [0.03390873, 0.22571199, 0.14506643]
action[3] = -1.5

# Step the environment
observation, reward, terminted, truncated, info = env.step(action)

# Reset the environment if it's done
if terminted or truncated:
observation, info = env.reset()

# Close the environment
env.close()
Loading

0 comments on commit a5e0814

Please sign in to comment.