From 5508f92edf9bd1338d1e38d9bf377e7e2e5f23eb Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Fri, 10 Nov 2023 19:32:17 -0800 Subject: [PATCH 01/15] [WIP] Mostly implemented, some debugging remaining --- ada_feeding/ada_feeding/behaviors/__init__.py | 8 +- .../behaviors/moveit2/moveit2_constraints.py | 12 +- .../ada_feeding/behaviors/ros_utility.py | 489 ++++++++++++++- .../compute_move_to_mouth_position.py | 2 + ada_feeding/ada_feeding/helpers.py | 6 +- ada_feeding/ada_feeding/trees/__init__.py | 3 + ...configuration_with_wheelchair_wall_tree.py | 196 ++++++ .../ada_feeding/trees/move_to_mouth_tree.py | 587 ++++++++++-------- .../config/ada_feeding_action_servers.yaml | 41 +- ada_feeding/scripts/save_image.py | 2 +- ada_feeding_msgs/CMakeLists.txt | 1 + ada_feeding_msgs/action/AcquireFood.action | 1 + ada_feeding_msgs/action/MoveTo.action | 1 + ada_feeding_msgs/action/Teleoperate.action | 1 + 14 files changed, 1063 insertions(+), 287 deletions(-) create mode 100644 ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py diff --git a/ada_feeding/ada_feeding/behaviors/__init__.py b/ada_feeding/ada_feeding/behaviors/__init__.py index 5f2aea6c..9a1c4ad7 100644 --- a/ada_feeding/ada_feeding/behaviors/__init__.py +++ b/ada_feeding/ada_feeding/behaviors/__init__.py @@ -2,4 +2,10 @@ This package contains custom py_tree behaviors for the Ada Feeding project. """ from .blackboard_behavior import BlackboardBehavior -from .ros_utility import UpdateTimestamp +from .ros_utility import ( + UpdateTimestamp, + GetTransform, + SetStaticTransform, + ApplyTransform, + CreatePoseStamped, +) diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py index c9d36dc6..6a717887 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/moveit2_constraints.py @@ -401,9 +401,7 @@ def blackboard_inputs( - `frame_id` defaults to the base link - `target_link` defaults to end effector - Note: if pose is PoseStamped: - - `frame_id` is pose.header.frame_id (if not "") - - `target_link` is pose.child_frame_id (if not "") + Note: if pose is PoseStamped `frame_id` is pose.header.frame_id (if not "") Details on parameterization: https://github.com/ros-planning/moveit_msgs/blob/master/msg/OrientationConstraint.msg @@ -468,9 +466,7 @@ def update(self) -> py_trees.common.Status: "frame_id": pose.header.frame_id if (isinstance(pose, PoseStamped) and len(pose.header.frame_id) > 0) else self.blackboard_get("frame_id"), - "target_link": pose.child_frame_id - if (isinstance(pose, PoseStamped) and len(pose.child_frame_id) > 0) - else self.blackboard_get("target_link"), + "target_link": self.blackboard_get("target_link"), "tolerance": self.blackboard_get("tolerance_orientation"), "weight": self.blackboard_get("weight_orientation"), "parameterization": self.blackboard_get("parameterization"), @@ -486,9 +482,7 @@ def update(self) -> py_trees.common.Status: "frame_id": pose.header.frame_id if (isinstance(pose, PoseStamped) and len(pose.header.frame_id) > 0) else self.blackboard_get("frame_id"), - "target_link": pose.child_frame_id - if (isinstance(pose, PoseStamped) and len(pose.child_frame_id) > 0) - else self.blackboard_get("target_link"), + "target_link": self.blackboard_get("target_link"), "tolerance": self.blackboard_get("tolerance_position"), "weight": self.blackboard_get("weight_position"), }, diff --git a/ada_feeding/ada_feeding/behaviors/ros_utility.py b/ada_feeding/ada_feeding/behaviors/ros_utility.py index 948a73ac..19e2b797 100644 --- a/ada_feeding/ada_feeding/behaviors/ros_utility.py +++ b/ada_feeding/ada_feeding/behaviors/ros_utility.py @@ -5,15 +5,36 @@ """ # Standard imports -from typing import Union, Optional, Any +from typing import Any, List, Optional, Tuple, Union # Third-party imports +from geometry_msgs.msg import ( + Point, + Pose, + Quaternion, + QuaternionStamped, + Transform, + TransformStamped, + Vector3, +) +import numpy as np from overrides import override import py_trees import rclpy +from rclpy.duration import Duration +from rclpy.time import Time +import ros2_numpy +from std_msgs.msg import Header +from tf2_geometry_msgs import PointStamped, PoseStamped, Vector3Stamped +import tf2_py as tf2 +from tf2_ros import TypeException # Local imports -from ada_feeding.helpers import BlackboardKey +from ada_feeding.helpers import ( + BlackboardKey, + get_tf_object, + set_static_tf, +) from .blackboard_behavior import BlackboardBehavior @@ -102,3 +123,467 @@ def update(self) -> py_trees.common.Status: self.blackboard_set("stamped_msg", msg) return py_trees.common.Status.SUCCESS + + +class GetTransform(BlackboardBehavior): + """ + Look up a transform between two frames. + """ + + # pylint: disable=arguments-differ + # We *intentionally* violate Liskov Substitution Princple + # in that blackboard config (inputs + outputs) are not + # meant to be called in a generic setting. + + def blackboard_inputs( + self, + target_frame: Union[BlackboardKey, str], + source_frame: Union[BlackboardKey, str], + time: Union[BlackboardKey, Time] = Time(), + timeout: Union[BlackboardKey, Duration] = Duration(seconds=0.0), + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + target_frame: Name of the frame to transform into. + source_frame: Name of the input frame. + time: The time at which to get the transform. (default, 0, gets the latest) + timeout: Time to wait for the target frame to become available. + Note that the tree ticking will block for this duration, so it is + recommended that this is kept at 0.0 (the default value). + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + transform: Optional[BlackboardKey], # TransformStamped + ) -> None: + """ + Blackboard Outputs + By convention (to avoid collisions), avoid non-None default arguments. + + Parameters + ---------- + transform: The transform between the two frames + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + # Docstring copied from @override + + # pylint: disable=attribute-defined-outside-init + # It is okay for attributes in behaviors to be + # defined in the setup / initialise functions. + + # Get Node + self.node = kwargs["node"] + + # Get TF Listener from blackboard + self.tf_buffer, _, self.tf_lock = get_tf_object(self.blackboard, self.node) + + @override + def update(self) -> py_trees.common.Status: + # Docstring copied from @override + + # Input Validation + if not self.blackboard_exists( + ["target_frame", "source_frame", "time", "timeout"] + ): + self.logger.error("Missing input arguments") + return py_trees.common.Status.FAILURE + + target_frame = self.blackboard_get("target_frame") + source_frame = self.blackboard_get("source_frame") + time = self.blackboard_get("time") + timeout = self.blackboard_get("timeout") + + if self.tf_lock.locked(): + return py_trees.common.Status.RUNNING + + with self.tf_lock: + try: + transform = self.tf_buffer.lookup_transform( + target_frame, source_frame, time, timeout + ) + except ( + tf2.ConnectivityException, + tf2.ExtrapolationException, + tf2.InvalidArgumentException, + tf2.LookupException, + tf2.TimeoutException, + tf2.TransformException, + ) as error: + self.logger.error(f"Could not get transform. Error: {error}") + return py_trees.common.Status.FAILURE + + self.blackboard_set("transform", transform) + return py_trees.common.Status.SUCCESS + + +class SetStaticTransform(BlackboardBehavior): + """ + Add a static transform to the TF tree. + """ + + # pylint: disable=arguments-differ + # We *intentionally* violate Liskov Substitution Princple + # in that blackboard config (inputs + outputs) are not + # meant to be called in a generic setting. + + def blackboard_inputs( + self, + transform: Union[BlackboardKey, TransformStamped, PoseStamped], + child_frame_id: Union[BlackboardKey, str], + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + transform: The transform to add to the TF tree. + child_frame_id: The child frame of the transform. Only used if transform + is a PoseStamped. + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + # Docstring copied from @override + + # pylint: disable=attribute-defined-outside-init + # It is okay for attributes in behaviors to be + # defined in the setup / initialise functions. + + # Get Node + self.node = kwargs["node"] + + @override + def update(self) -> py_trees.common.Status: + # Docstring copied from @override + + # Input Validation + if not self.blackboard_exists(["transform", "child_frame_id"]): + self.logger.error("Missing input arguments") + return py_trees.common.Status.FAILURE + + transform = self.blackboard_get("transform") + self.node.get_logger().info(f"Setting static transform: {transform}") + if isinstance(transform, PoseStamped): + # Convert PoseStamped to TransformStamped + transform = TransformStamped( + header=transform.header, + child_frame_id=self.blackboard_get("child_frame_id"), + transform=Transform( + translation=Vector3( + x=transform.pose.position.x, + y=transform.pose.position.y, + z=transform.pose.position.z, + ), + rotation=transform.pose.orientation, + ), + ) + + if set_static_tf( + transform, + self.blackboard, + self.node, + ): + return py_trees.common.Status.SUCCESS + return py_trees.common.Status.FAILURE + + +class ApplyTransform(BlackboardBehavior): + """ + Apply a Transform, either passed as an argument or from the TF tree, + to a PointStamped, PoseStamped, or Vector3Stamped. + """ + + # pylint: disable=arguments-differ + # We *intentionally* violate Liskov Substitution Princple + # in that blackboard config (inputs + outputs) are not + # meant to be called in a generic setting. + + def blackboard_inputs( + self, + stamped_msg: Union[BlackboardKey, PointStamped, PoseStamped, Vector3Stamped], + target_frame=Union[BlackboardKey, Optional[str]], + transform: Union[BlackboardKey, Optional[TransformStamped]] = None, + timeout: Union[BlackboardKey, Duration] = Duration(seconds=0.0), + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + stamped_msg: The object to transform. Note if the timestamp is 0, it + gets the latest transform. + target_frame: The frame to transform into. If set, look up the transform + from `stamped_msg` to `target_frame` in the TF tree. Else, apply + the fixed transform passed in as `transform`. + transform: The transform to apply to `stamped_msg`. Must be set if + `target_frame` is None. Ignored if `target_frame` is not None. + timeout: Time to wait for the target frame to become available. + Note that the tree ticking will block for this duration, so it is + recommended that this is kept at 0.0 (the default value). + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + transformed_msg: Optional[BlackboardKey], # same type as stamped_msg + ) -> None: + """ + Blackboard Outputs + By convention (to avoid collisions), avoid non-None default arguments. + + Parameters + ---------- + stamped_msg: The transformed stamped message. + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def setup(self, **kwargs): + # Docstring copied from @override + + # pylint: disable=attribute-defined-outside-init + # It is okay for attributes in behaviors to be + # defined in the setup / initialise functions. + + # Get Node + self.node = kwargs["node"] + + # Get TF Listener from blackboard + self.tf_buffer, _, self.tf_lock = get_tf_object(self.blackboard, self.node) + + @override + def update(self) -> py_trees.common.Status: + # Docstring copied from @override + + # Input Validation + if not self.blackboard_exists( + ["stamped_msg", "target_frame", "transform", "timeout"] + ): + self.logger.error("Missing input arguments") + return py_trees.common.Status.FAILURE + + stamped_msg = self.blackboard_get("stamped_msg") + target_frame = self.blackboard_get("target_frame") + transform = self.blackboard_get("transform") + timeout = self.blackboard_get("timeout") + if target_frame is None and transform is None: + self.logger.error("Must specify either `target_frame` or `transform`") + return py_trees.common.Status.FAILURE + + if self.tf_lock.locked(): + return py_trees.common.Status.RUNNING + + transformed_msg = None + with self.tf_lock: + if target_frame is not None: + # Compute the transform from the TF tree + try: + transformed_msg = self.tf_buffer.transform( + stamped_msg, + target_frame, + timeout, + ) + except ( + tf2.ConnectivityException, + tf2.ExtrapolationException, + tf2.InvalidArgumentException, + tf2.LookupException, + tf2.TimeoutException, + tf2.TransformException, + TypeException, + ) as error: + self.logger.error(f"Could not get transform. Error: {error}") + return py_trees.common.Status.FAILURE + + if transformed_msg is None: + # Apply the fixed transform + transform_matrix = ros2_numpy.numpify(transform.transform) + self.node.get_logger().info(f"Transform Matrix: {transform_matrix}") + header = Header( + stamp=stamped_msg.header.stamp, + frame_id=transform.child_frame_id, + ) + if isinstance(stamped_msg, PointStamped): + stamped_vec = ros2_numpy.numpify(stamped_msg.point, hom=True).reshape( + (-1, 1) + ) + transformed_msg = PointStamped( + header=header, + point=ros2_numpy.msgify( + Point, np.matmul(transform_matrix, stamped_vec) + ), + ) + elif isinstance(stamped_msg, PoseStamped): + stamped_matrix = ros2_numpy.numpify(stamped_msg.pose) + self.node.get_logger().info(f"Stamped Matrix: {stamped_matrix}") + transformed_msg = PoseStamped( + header=header, + pose=ros2_numpy.msgify( + Pose, + np.matmul(transform_matrix, stamped_matrix), + ), + ) + self.node.get_logger().info(f"Transformed Pose: {transformed_msg}") + elif isinstance(stamped_msg, Vector3Stamped): + stamped_vec = ros2_numpy.numpify(stamped_msg.vector, hom=True).reshape( + (-1, 1) + ) + transformed_msg = Vector3Stamped( + header=header, + vector=ros2_numpy.msgify( + Vector3, + np.matmul(transform_matrix, stamped_vec), + ), + ) + else: + self.logger.error(f"Unsupported message type: {type(stamped_msg)}") + return py_trees.common.Status.FAILURE + + self.blackboard_set("transformed_msg", transformed_msg) + return py_trees.common.Status.SUCCESS + + +class CreatePoseStamped(BlackboardBehavior): + """ + Create a PoseStamped from a position (which can be a PointStamped, Point, + or List[float]) and quaternion (which can be a QuaternionStamped, + Quaternion, or List[float]). + """ + + # pylint: disable=arguments-differ + # We *intentionally* violate Liskov Substitution Princple + # in that blackboard config (inputs + outputs) are not + # meant to be called in a generic setting. + + def blackboard_inputs( + self, + position: Union[ + BlackboardKey, + PointStamped, + Point, + List[float], + Tuple[float], + ], + quaternion: Union[ + BlackboardKey, + QuaternionStamped, + Quaternion, + List[float], + Tuple[float], + ], + frame_id: Union[BlackboardKey, Optional[str]] = None, + ) -> None: + """ + Blackboard Inputs + + Parameters + ---------- + position: The position of the pose. Must be in the same frame as the + quaternion. + quaternion: The orientation of the pose. Must be in the same frame as + the position. + frame_id: The frame of the pose. Only used if neither the position nor + quaternion are stamped. + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_inputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + def blackboard_outputs( + self, + pose_stamped: Optional[BlackboardKey], # PoseStamped + ) -> None: + """ + Blackboard Outputs + By convention (to avoid collisions), avoid non-None default arguments. + + Parameters + ---------- + pose_stamped: The pose stamped. + """ + # pylint: disable=unused-argument, duplicate-code + # Arguments are handled generically in base class. + super().blackboard_outputs( + **{key: value for key, value in locals().items() if key != "self"} + ) + + @override + def update(self) -> py_trees.common.Status: + # Docstring copied from @override + + # Input Validation + if not self.blackboard_exists(["position", "quaternion", "frame_id"]): + self.logger.error("Missing input arguments") + return py_trees.common.Status.FAILURE + + position = self.blackboard_get("position") + quaternion = self.blackboard_get("quaternion") + frame_id = self.blackboard_get("frame_id") + + pose_stamped = PoseStamped() + got_frame = False + if isinstance(position, PointStamped): + pose_stamped.header = position.header + got_frame = True + pose_stamped.pose.position = position.point + elif isinstance(position, Point): + pose_stamped.pose.position = position + elif isinstance(position, (list, tuple)): + pose_stamped.pose.position = Point( + x=position[0], + y=position[1], + z=position[2], + ) + if isinstance(quaternion, QuaternionStamped): + if not got_frame: + pose_stamped.header = quaternion.header + got_frame = True + pose_stamped.pose.orientation = quaternion.quaternion + elif isinstance(quaternion, Quaternion): + pose_stamped.pose.orientation = quaternion + elif isinstance(quaternion, (list, tuple)): + pose_stamped.pose.orientation = Quaternion( + x=quaternion[0], + y=quaternion[1], + z=quaternion[2], + w=quaternion[3], + ) + + if not got_frame: + if frame_id is None: + self.logger.error("Must specify `frame_id`") + return py_trees.common.Status.FAILURE + pose_stamped.header.frame_id = frame_id + + self.blackboard_set("pose_stamped", pose_stamped) + return py_trees.common.Status.SUCCESS diff --git a/ada_feeding/ada_feeding/behaviors/transfer/compute_move_to_mouth_position.py b/ada_feeding/ada_feeding/behaviors/transfer/compute_move_to_mouth_position.py index 23bc9393..5508462e 100644 --- a/ada_feeding/ada_feeding/behaviors/transfer/compute_move_to_mouth_position.py +++ b/ada_feeding/ada_feeding/behaviors/transfer/compute_move_to_mouth_position.py @@ -25,6 +25,8 @@ get_tf_object, ) +# TODO: Delete this file! + class ComputeMoveToMouthPosition(BlackboardBehavior): """ diff --git a/ada_feeding/ada_feeding/helpers.py b/ada_feeding/ada_feeding/helpers.py index 5b419384..eb3cf577 100644 --- a/ada_feeding/ada_feeding/helpers.py +++ b/ada_feeding/ada_feeding/helpers.py @@ -163,7 +163,11 @@ def set_static_tf( with lock: transforms = blackboard.get(static_tf_transforms_blackboard_key) - transforms[transform_stamped.child_frame_id] = transform_stamped + transform_key = ( + transform_stamped.child_frame_id, + transform_stamped.header.frame_id, + ) + transforms[transform_key] = transform_stamped blackboard.set(static_tf_transforms_blackboard_key, transforms) stb.sendTransform(list(transforms.values())) diff --git a/ada_feeding/ada_feeding/trees/__init__.py b/ada_feeding/ada_feeding/trees/__init__.py index e2ec3e0c..95f01768 100644 --- a/ada_feeding/ada_feeding/trees/__init__.py +++ b/ada_feeding/ada_feeding/trees/__init__.py @@ -12,6 +12,9 @@ from .move_to_configuration_with_ft_thresholds_tree import ( MoveToConfigurationWithFTThresholdsTree, ) +from .move_to_configuration_with_wheelchair_wall_tree import ( + MoveToConfigurationWithWheelchairWallTree, +) from .move_to_pose_tree import MoveToPoseTree from .move_from_mouth_tree import MoveFromMouthTree from .move_to_mouth_tree import MoveToMouthTree diff --git a/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py b/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py new file mode 100644 index 00000000..0c96ac07 --- /dev/null +++ b/ada_feeding/ada_feeding/trees/move_to_configuration_with_wheelchair_wall_tree.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +This module defines the MoveToConfigurationWithWheelchairWallTree behaviour tree. +This tree was designed for the MoveToStagingConfiguration action, but can be +reused by other actions that want to move to a configuration within the scope +of adding a wall in front of the wheelchair. +""" +# pylint: disable=duplicate-code +# MoveFromMouth and MoveToMouth are inverses of each other, so it makes sense +# that they have similar code. + +# Standard imports +from typing import List, Optional + +# Third-party imports +from overrides import override +import py_trees +from rclpy.node import Node + +# Local imports +from ada_feeding.behaviors.moveit2 import ( + MoveIt2Plan, + MoveIt2Execute, + MoveIt2JointConstraint, + MoveIt2OrientationConstraint, +) +from ada_feeding.helpers import BlackboardKey +from ada_feeding.idioms import pre_moveto_config, scoped_behavior +from ada_feeding.idioms.bite_transfer import ( + get_add_in_front_of_wheelchair_wall_behavior, + get_remove_in_front_of_wheelchair_wall_behavior, +) +from ada_feeding.trees import ( + MoveToTree, +) + + +class MoveToConfigurationWithWheelchairWallTree(MoveToTree): + """ + A behaviour tree adds a wall in front of the wheelchair to the collision + scene, moves to a specified configuration with optional orientation + constraints, and then removes the wall from the collision scene. This + class was designed for the MoveToStagingConfiguration action, but can be + reused by other actions. + + TODO: Add functionality to not add the collision wall if the robot is in + collision with it! + https://github.com/ros-planning/moveit_msgs/blob/humble/srv/GetStateValidity.srv + """ + + # pylint: disable=too-many-instance-attributes, too-many-arguments + # This is intended to be a flexible tree. + + def __init__( + self, + node: Node, + goal_configuration: List[float], + goal_configuration_tolerance: float = 0.001, + orientation_constraint_quaternion: Optional[List[float]] = None, + orientation_constraint_tolerances: Optional[List[float]] = None, + planner_id: str = "RRTstarkConfigDefault", + allowed_planning_time: float = 0.5, + max_velocity_scaling_factor: float = 0.1, + force_threshold: float = 4.0, + torque_threshold: float = 4.0, + ): + """ + Initializes tree-specific parameters. + + Parameters + ---------- + goal_configuration: The joint positions to move the robot arm to. + goal_configuration_tolerance: The tolerance for the joint positions. + orientation_constraint_quaternion: The quaternion for the orientation + constraint. If None, the orientation constraint is not used. + orientation_constraint_tolerances: The tolerances for the orientation + constraint, as a 3D rotation vector. If None, the orientation + constraint is not used. + allowed_planning_time_to: The allowed planning time for the MoveIt2 + motion planner. + max_velocity_scaling_factor: The maximum velocity scaling factor for the + MoveIt2 motion planner. + force_threshold: The force threshold (N) for the ForceGateController. + torque_threshold: The torque threshold (N*m) for the ForceGateController. + """ + + # pylint: disable=too-many-locals + # These are all necessary due to all the behaviors MoveToMouth contains + + # Initialize MoveToTree + super().__init__(node) + + # Store the parameters + self.goal_configuration = goal_configuration + assert len(self.goal_configuration) == 6, "Must provide 6 joint positions" + self.goal_configuration_tolerance = goal_configuration_tolerance + self.orientation_constraint_quaternion = orientation_constraint_quaternion + self.orientation_constraint_tolerances = orientation_constraint_tolerances + self.planner_id = planner_id + self.allowed_planning_time = allowed_planning_time + self.max_velocity_scaling_factor = max_velocity_scaling_factor + self.force_threshold = force_threshold + self.torque_threshold = torque_threshold + + @override + def create_tree( + self, + name: str, + ) -> py_trees.trees.BehaviourTree: + # Docstring copied from @override + + ### Define Tree Logic + + in_front_of_wheelchair_wall_id = "in_front_of_wheelchair_wall" + + # Root Sequence + root_seq = py_trees.composites.Sequence( + name=name, + memory=True, + children=[ + # Retare the F/T sensor and set the F/T Thresholds + pre_moveto_config( + name=name + "PreMoveToConfig", + toggle_watchdog_listener=False, + f_mag=self.force_threshold, + t_mag=self.torque_threshold, + ), + # Add a wall in front of the wheelchair to prevent the robot + # from moving unnecessarily close to the user. + scoped_behavior( + name=name + " InFrontOfWheelchairWallScope", + pre_behavior=get_add_in_front_of_wheelchair_wall_behavior( + name + "AddWheelchairWall", + in_front_of_wheelchair_wall_id, + ), + # Remove the wall in front of the wheelchair + post_behavior=get_remove_in_front_of_wheelchair_wall_behavior( + name + "RemoveWheelchairWall", + in_front_of_wheelchair_wall_id, + ), + # Move to the staging configuration + workers=[ + # Goal configuration: staging configuration + MoveIt2JointConstraint( + name="StagingConfigurationGoalConstraint", + ns=name, + inputs={ + "joint_positions": self.goal_configuration, + "tolerance": self.goal_configuration_tolerance, + }, + outputs={ + "constraints": BlackboardKey("goal_constraints"), + }, + ), + # Orientation path constraint to keep the fork straight + MoveIt2OrientationConstraint( + name="KeepForkStraightPathConstraint", + ns=name, + inputs={ + "quat_xyzw": self.orientation_constraint_quaternion, + "tolerance": self.orientation_constraint_tolerances, + "parameterization": 1, # Rotation vector + }, + outputs={ + "constraints": BlackboardKey("path_constraints"), + }, + ), + # Plan + MoveIt2Plan( + name="MoveToStagingConfigurationPlan", + ns=name, + inputs={ + "goal_constraints": BlackboardKey("goal_constraints"), + "path_constraints": BlackboardKey("path_constraints"), + "planner_id": self.planner_id, + "allowed_planning_time": self.allowed_planning_time, + "max_velocity_scale": self.max_velocity_scaling_factor, + "ignore_violated_path_constraints": True, + }, + outputs={"trajectory": BlackboardKey("trajectory")}, + ), + # Execute + MoveIt2Execute( + name="MoveToStagingConfigurationExecute", + ns=name, + inputs={"trajectory": BlackboardKey("trajectory")}, + outputs={}, + ), + ], + ), + ], + ) + + ### Return tree + return py_trees.trees.BehaviourTree(root_seq) diff --git a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py index c0f1d121..c9a210e4 100644 --- a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py @@ -9,33 +9,46 @@ # that they have similar code. # Standard imports -from typing import List, Tuple +import operator +from typing import Tuple # Third-party imports +from geometry_msgs.msg import ( + Quaternion, + Transform, + TransformStamped, + Vector3, +) from overrides import override import py_trees +from py_trees.blackboard import Blackboard import py_trees_ros +from rclpy.duration import Duration from rclpy.node import Node +from rclpy.time import Time +from std_msgs.msg import Header # Local imports +from ada_feeding_msgs.action import MoveToMouth from ada_feeding_msgs.msg import FaceDetection -from ada_feeding.behaviors.transfer import ComputeMoveToMouthPosition from ada_feeding.behaviors.moveit2 import ( MoveIt2Plan, MoveIt2Execute, - MoveIt2JointConstraint, - MoveIt2PositionConstraint, - MoveIt2OrientationConstraint, + MoveIt2PoseConstraint, ModifyCollisionObject, ModifyCollisionObjectOperation, ) +from ada_feeding.behaviors import ( + GetTransform, + SetStaticTransform, + ApplyTransform, + CreatePoseStamped, +) from ada_feeding.helpers import BlackboardKey from ada_feeding.idioms import pre_moveto_config, scoped_behavior from ada_feeding.idioms.bite_transfer import ( - get_add_in_front_of_wheelchair_wall_behavior, get_toggle_collision_object_behavior, get_toggle_face_detection_behavior, - get_remove_in_front_of_wheelchair_wall_behavior, ) from ada_feeding.trees import ( MoveToTree, @@ -44,10 +57,16 @@ class MoveToMouthTree(MoveToTree): """ - A behaviour tree that toggles face detection on, moves the robot to the - staging configuration, detects a face, checks whether it is within a - distance threshold from the camera, and does a cartesian motion to the - face. + A behaviour tree that gets the user's mouth pose and moves the robot to it. + Note that to get the mouth pose, the robot executes these in-order until one + succeeds: + 1. If `use_face_detection_msg` and the face detection message included in the + goal is not stale, use it. + 2. Else, attempt to detect a face from the robot's current pose and use that. + 3. Else, if there is a cached detected mouth pose on the static transform + tree, use it. + 4. Else, fail. The user can go back to the staging configuration and re-detect + the face. """ # pylint: disable=too-many-instance-attributes, too-many-arguments @@ -57,55 +76,37 @@ class MoveToMouthTree(MoveToTree): def __init__( self, node: Node, - staging_configuration: List[float], - staging_configuration_tolerance: float = 0.001, - mouth_pose_tolerance: float = 0.001, - orientation_constraint_quaternion: List[float] = None, - orientation_constraint_tolerances: List[float] = None, + mouth_position_tolerance: float = 0.001, planner_id: str = "RRTstarkConfigDefault", - allowed_planning_time_to_staging_configuration: float = 0.5, - allowed_planning_time_to_mouth: float = 0.5, - max_velocity_scaling_factor_to_staging_configuration: float = 0.1, - max_velocity_scaling_factor_to_mouth: float = 0.1, - cartesian_jump_threshold_to_mouth: float = 0.0, - cartesian_max_step_to_mouth: float = 0.0025, + allowed_planning_time: float = 0.5, head_object_id: str = "head", + max_velocity_scaling_factor: float = 0.1, + cartesian_jump_threshold: float = 0.0, + cartesian_max_step: float = 0.0025, wheelchair_collision_object_id: str = "wheelchair_collision", force_threshold: float = 4.0, torque_threshold: float = 4.0, allowed_face_distance: Tuple[float, float] = (0.4, 1.5), + face_detection_msg_timeout: float = 5.0, + face_detection_timeout: float = 2.5, ): """ Initializes tree-specific parameters. Parameters ---------- - staging_configuration: The joint positions to move the robot arm to. - The user's face should be visible in this configuration. - staging_configuration_tolerance: The tolerance for the joint positions. - mouth_pose_tolerance: The tolerance for the movement to the mouth pose. - orientation_constraint_quaternion: The quaternion for the orientation - constraint. If None, the orientation constraint is not used. - orientation_constraint_tolerances: The tolerances for the orientation - constraint, as a 3D rotation vector. If None, the orientation - constraint is not used. + mouth_position_tolerance: The tolerance for the movement to the mouth pose. planner_id: The planner ID to use for the MoveIt2 motion planning. - allowed_planning_time_to_staging_configuration: The allowed planning - time for the MoveIt2 motion planner to move to the staging config. - allowed_planning_time_to_mouth: The allowed planning time for the MoveIt2 - motion planner to move to the user's mouth. - max_velocity_scaling_factor_to_staging_configuration: The maximum - velocity scaling factor for the MoveIt2 motion planner to move to - the staging config. - max_velocity_scaling_factor_to_mouth: The maximum velocity scaling + allowed_planning_time: The allowed planning time. + head_object_id: The ID of the head collision object in the MoveIt2 + planning scene. + max_velocity_scaling_factor: The maximum velocity scaling factor for the MoveIt2 motion planner to move to the user's mouth. - cartesian_jump_threshold_to_mouth: The maximum allowed jump in the + cartesian_jump_threshold: The maximum allowed jump in the cartesian space for the MoveIt2 motion planner to move to the user's mouth. - cartesian_max_step_to_mouth: The maximum allowed step in the cartesian + cartesian_max_step: The maximum allowed step in the cartesian space for the MoveIt2 motion planner to move to the user's mouth. - head_object_id: The ID of the head collision object in the MoveIt2 - planning scene. wheelchair_collision_object_id: The ID of the wheelchair collision object in the MoveIt2 planning scene. force_threshold: The force threshold (N) for the ForceGateController. @@ -116,37 +117,38 @@ def __init__( and to the mouth. allowed_face_distance: The min and max distance (m) between a face and the **camera's optical frame** for the robot to move towards the face. + face_detection_msg_timeout: If the timestamp on the face detection message + is older than these many seconds, don't use it. + face_detection_timeout: If the robot has been trying to detect a face for + more than these many seconds, timeout. """ # pylint: disable=too-many-locals # These are all necessary due to all the behaviors MoveToMouth contains + # TODO: This class should override get_result and get_feedback to return + # perception failures, and tell the user it is attempting perception, + # while it is waiting for the user's face. + # Initialize MoveToTree super().__init__(node) # Store the parameters - self.staging_configuration = staging_configuration - assert len(self.staging_configuration) == 6, "Must provide 6 joint positions" - self.staging_configuration_tolerance = staging_configuration_tolerance - self.mouth_pose_tolerance = mouth_pose_tolerance - self.orientation_constraint_quaternion = orientation_constraint_quaternion - self.orientation_constraint_tolerances = orientation_constraint_tolerances + self.mouth_position_tolerance = mouth_position_tolerance self.planner_id = planner_id - self.allowed_planning_time_to_staging_configuration = ( - allowed_planning_time_to_staging_configuration - ) - self.allowed_planning_time_to_mouth = allowed_planning_time_to_mouth - self.max_velocity_scaling_factor_to_staging_configuration = ( - max_velocity_scaling_factor_to_staging_configuration - ) - self.max_velocity_scaling_factor_to_mouth = max_velocity_scaling_factor_to_mouth - self.cartesian_jump_threshold_to_mouth = cartesian_jump_threshold_to_mouth - self.cartesian_max_step_to_mouth = cartesian_max_step_to_mouth + self.allowed_planning_time = allowed_planning_time self.head_object_id = head_object_id + self.max_velocity_scaling_factor = max_velocity_scaling_factor + self.cartesian_jump_threshold = cartesian_jump_threshold + self.cartesian_max_step = cartesian_max_step self.wheelchair_collision_object_id = wheelchair_collision_object_id self.force_threshold = force_threshold self.torque_threshold = torque_threshold self.allowed_face_distance = allowed_face_distance + self.face_detection_msg_timeout = Duration(seconds=face_detection_msg_timeout) + self.face_detection_timeout = face_detection_timeout + + self.face_detection_relative_blackboard_key = "face_detection" def check_face_msg(self, msg: FaceDetection, _: FaceDetection) -> bool: """ @@ -162,25 +164,30 @@ def check_face_msg(self, msg: FaceDetection, _: FaceDetection) -> bool: ------- True if a face is detected within the required distance, False otherwise. """ - if msg.is_face_detected: - # Check the distance between the face and the camera optical frame - # The face detection message is in the camera optical frame - # The camera optical frame is the parent frame of the face detection - # frame, so we can just use the translation of the face detection - # frame to get the distance between the face and the camera optical - # frame. - distance = ( - msg.detected_mouth_center.point.x**2.0 - + msg.detected_mouth_center.point.y**2.0 - + msg.detected_mouth_center.point.z**2.0 - ) ** 0.5 - if ( - self.allowed_face_distance[0] - <= distance - <= self.allowed_face_distance[1] - ): - return True - return False + # Check if a face is detected + if not msg.is_face_detected: + return False + # Check if the message is stale + timestamp = Time.from_msg(msg.detected_mouth_center.header.stamp) + if self._node.get_clock().now() - timestamp > self.face_detection_msg_timeout: + return False + # Check the distance between the face and the camera optical frame + # The face detection message is in the camera optical frame + # The camera optical frame is the parent frame of the face detection + # frame, so we can just use the translation of the face detection + # frame to get the distance between the face and the camera optical + # frame. + distance = ( + msg.detected_mouth_center.point.x**2.0 + + msg.detected_mouth_center.point.y**2.0 + + msg.detected_mouth_center.point.z**2.0 + ) ** 0.5 + if ( + distance < self.allowed_face_distance[0] + or distance > self.allowed_face_distance[1] + ): + return False + return True @override def create_tree( @@ -191,175 +198,242 @@ def create_tree( ### Define Tree Logic - in_front_of_wheelchair_wall_id = "in_front_of_wheelchair_wall" - face_detection_relative_blackboard_key = "face_detection" - face_detection_absolute_blackboard_key = "/".join( - [name, face_detection_relative_blackboard_key] + face_detection_absolute_key = Blackboard.separator.join( + [name, self.face_detection_relative_blackboard_key] + ) + use_face_detection_msg_absolute_key = Blackboard.separator.join( + [name, "use_face_detection_msg"] ) - # The target position is 5cm away from the mouth center, in the direction - # away from the wheelchair backrest. - mouth_offset = (0.0, -0.05, 0.0) # Root Sequence root_seq = py_trees.composites.Sequence( name=name, memory=True, children=[ - # Retare the F/T sensor and set the F/T Thresholds - pre_moveto_config( - name=name + "PreMoveToConfig", - toggle_watchdog_listener=False, - f_mag=self.force_threshold, - t_mag=self.torque_threshold, - ), - # Add a wall in front of the wheelchair to prevent the robot - # from moving unnecessarily close to the user. - scoped_behavior( - name=name + " InFrontOfWheelchairWallScope", - pre_behavior=get_add_in_front_of_wheelchair_wall_behavior( - name + "AddWheelchairWall", - in_front_of_wheelchair_wall_id, - ), - # Move to the staging configuration - workers=[ - # Goal configuration: staging configuration - MoveIt2JointConstraint( - name="StagingConfigurationGoalConstraint", - ns=name, - inputs={ - "joint_positions": self.staging_configuration, - "tolerance": self.staging_configuration_tolerance, - }, - outputs={ - "constraints": BlackboardKey("goal_constraints"), - }, + py_trees.composites.Selector( + name=name + " FaceDetectionSelector", + memory=True, + children=[ + # Try to detect the face and then convert the detected face pose + # to the base frame. + py_trees.composites.Sequence( + name=name + " FaceDetectionSequence", + memory=True, + children=[ + py_trees.composites.Selector( + name=name + " FaceDetectionSelector", + memory=True, + children=[ + # If the client said to use the face detection message and + # it is not stale, use it. + py_trees.composites.Sequence( + name=name, + memory=True, + children=[ + # Check if the `use_face_detection_msg` field of the + # goal was set to True + py_trees.behaviours.CheckBlackboardVariableValue( + name=name + + " CheckUseFaceDetectionMsg", + check=py_trees.common.ComparisonExpression( + variable=use_face_detection_msg_absolute_key, + value=True, + operator=operator.eq, + ), + ), + # Check if the face detection message is not stale + # and close enough to the camera + py_trees.behaviours.CheckBlackboardVariableValue( + name=name + " CheckFaceMsg", + check=py_trees.common.ComparisonExpression( + variable=face_detection_absolute_key, + value=FaceDetection(), + operator=self.check_face_msg, + ), + ), + ], + ), + # If the above didn't work, turn face detection on until + # a face is detected, or until timeout + scoped_behavior( + name=name + " FaceDetectionOnScope", + pre_behavior=get_toggle_face_detection_behavior( + name + "TurnFaceDetectionOn", + True, + ), + # Turn off face detection + post_behavior=get_toggle_face_detection_behavior( + name + "TurnFaceDetectionOff", + False, + ), + workers=[ + py_trees.decorators.Timeout( + name=name + " FaceDetectionTimeout", + duration=self.face_detection_timeout, + child=py_trees.composites.Sequence( + name=name, + memory=False, + children=[ + # Get the detected face + py_trees_ros.subscribers.ToBlackboard( + name=name + " GetFace", + topic_name="~/face_detection", + topic_type=FaceDetection, + qos_profile=py_trees_ros.utilities.qos_profile_unlatched(), + blackboard_variables={ + face_detection_absolute_key: None, + }, + initialise_variables={ + face_detection_absolute_key: FaceDetection(), + }, + ), + # Check whether the face is within the required distance + py_trees.decorators.FailureIsRunning( + name=name + + " CheckFaceWrapper", + child=py_trees.behaviours.CheckBlackboardVariableValue( + name=name + + " CheckFaceMsg", + check=py_trees.common.ComparisonExpression( + variable=face_detection_absolute_key, + value=FaceDetection(), + operator=self.check_face_msg, + ), + ), + ), + ], + ), + ), + ], + ), + ], + ), + # Convert `face_detection` to `mouth_position` in the + # base frame. + ApplyTransform( + name=name + " FaceDetectionToMouthPosition", + ns=name, + inputs={ + "stamped_msg": BlackboardKey( + self.face_detection_relative_blackboard_key + ".detected_mouth_center" + ), + "target_frame": "j2n6s200_link_base", + }, + outputs={ + "transformed_msg": BlackboardKey( + "mouth_position" + ), # PointStamped + }, + ), + # Convert `mouth_position` into a mouth pose using + # a fixed quaternion + CreatePoseStamped( + name=name + " MouthPositionToMouthPose", + ns=name, + inputs={ + "position": BlackboardKey("mouth_position"), + "quaternion": [ + -0.0616284, + -0.0616284, + -0.704416, + 0.704416, + ], # Facing away from wheelchair backrest + }, + outputs={ + "pose_stamped": BlackboardKey( + "mouth_pose" + ), # PostStamped + }, + ), + # Cache the mouth pose on the static TF tree + SetStaticTransform( + name=name + " SetMouthPose", + ns=name, + inputs={ + "transform": BlackboardKey("mouth_pose"), + "child_frame_id": "mouth", + }, + ), + ], ), - # Orientation path constraint to keep the fork straight - MoveIt2OrientationConstraint( - name="KeepForkStraightPathConstraint", + # If there is a cached detected mouth pose on the static + # transform tree, use it. + GetTransform( + name=name + " GetCachedMouthPose", ns=name, inputs={ - "quat_xyzw": self.orientation_constraint_quaternion, - "tolerance": self.orientation_constraint_tolerances, - "parameterization": 1, # Rotation vector + "target_frame": "j2n6s200_link_base", + "source_frame": "mouth", }, outputs={ - "constraints": BlackboardKey("path_constraints"), - }, - ), - # Plan - MoveIt2Plan( - name="MoveToStagingConfigurationPlan", - ns=name, - inputs={ - "goal_constraints": BlackboardKey("goal_constraints"), - "path_constraints": BlackboardKey("path_constraints"), - "planner_id": self.planner_id, - "allowed_planning_time": ( - self.allowed_planning_time_to_staging_configuration - ), - "max_velocity_scale": ( - self.max_velocity_scaling_factor_to_staging_configuration - ), - "ignore_violated_path_constraints": True, + "transform": BlackboardKey("mouth_pose"), }, - outputs={"trajectory": BlackboardKey("trajectory")}, - ), - # Execute - MoveIt2Execute( - name="MoveToStagingConfigurationExecute", - ns=name, - inputs={"trajectory": BlackboardKey("trajectory")}, - outputs={}, ), ], - # Remove the wall in front of the wheelchair - post_behavior=get_remove_in_front_of_wheelchair_wall_behavior( - name + "RemoveWheelchairWall", - in_front_of_wheelchair_wall_id, - ), ), - # Turn face detection on until a face is detected - scoped_behavior( - name=name + " FaceDetectionOnScope", - pre_behavior=get_toggle_face_detection_behavior( - name + "TurnFaceDetectionOn", - True, - ), - workers=[ - py_trees.composites.Sequence( - name=name, - memory=False, - children=[ - # Get the detected face - py_trees_ros.subscribers.ToBlackboard( - name=name + " GetFace", - topic_name="~/face_detection", - topic_type=FaceDetection, - qos_profile=py_trees_ros.utilities.qos_profile_unlatched(), - blackboard_variables={ - face_detection_absolute_blackboard_key: None, - }, - initialise_variables={ - face_detection_absolute_blackboard_key: FaceDetection(), - }, - ), - # Check whether the face is within the required distance - # TODO: This can potentially block the tree forever, - # e.g., if the face is not visible. Change it! - py_trees.decorators.FailureIsRunning( - name=name + " CheckFaceWrapper", - child=py_trees.behaviours.CheckBlackboardVariableValue( - name=name + " CheckFace", - check=py_trees.common.ComparisonExpression( - variable=face_detection_absolute_blackboard_key, - value=FaceDetection(), - operator=self.check_face_msg, - ), - ), - ), - ], + # At this stage of the tree, we are guarenteed to have a + # `mouth_pose` on the blackboard -- else the tree would have failed. + # Move the head to the detected mouth pose. + ModifyCollisionObject( + name=name + " MoveHead", + ns=name, + inputs={ + "operation": ModifyCollisionObjectOperation.MOVE, + "collision_object_id": self.head_object_id, + "collision_object_position": BlackboardKey( + "mouth_pose.pose.position" ), - ], - # Turn off face detection - post_behavior=get_toggle_face_detection_behavior( - name + "TurnFaceDetectionOff", - False, - ), + "collision_object_orientation": BlackboardKey( + "mouth_pose.pose.orientation" + ), + }, ), - # Compute the goal constraints of the target pose - ComputeMoveToMouthPosition( - name=name + " ComputeTargetPosition", + # The goal constraint of the fork is the mouth pose, + # translated 5cm in the +y direction, rotated to face the mouth + ApplyTransform( + name=name + " ComputeMoveToMouthPose", ns=name, inputs={ - "face_detection_msg": BlackboardKey( - face_detection_relative_blackboard_key + "stamped_msg": BlackboardKey("mouth_pose"), + "target_frame": None, + "transform": TransformStamped( + child_frame_id="j2n6s200_link_base", + transform=Transform( + translation=Vector3( + x=0.0, + y=-0.05, + z=0.0, + ), + rotation=Quaternion( + x=0.0, + y=0.0, + z=0.0, + w=1.0, + ), + ), ), - "position_offset": mouth_offset, }, outputs={ - "target_position": BlackboardKey("mouth_position"), + "transformed_msg": BlackboardKey("goal_pose"), # PoseStamped }, ), - # Move the head to the detected face pose. We use the computed - # mouth position since that is in the base frame, not the camera - # frame. - ModifyCollisionObject( - name=name + " MoveHead", + # Cache the mouth pose on the static TF tree + # TODO: remove! + SetStaticTransform( + name=name + " SetMouthPose", ns=name, inputs={ - "operation": ModifyCollisionObjectOperation.MOVE, - "collision_object_id": self.head_object_id, - "collision_object_position": BlackboardKey("mouth_position"), - "collision_object_orientation": ( - -0.0616284, - -0.0616284, - -0.704416, - 0.704416, - ), - "position_offset": tuple(-1.0 * val for val in mouth_offset), + "transform": BlackboardKey("goal_pose"), + "child_frame_id": "goal", }, ), + # Retare the F/T sensor and set the F/T Thresholds + pre_moveto_config( + name=name + "PreMoveToConfig", + toggle_watchdog_listener=False, + f_mag=self.force_threshold, + t_mag=self.torque_threshold, + ), # Allow collisions with the expanded wheelchair collision box scoped_behavior( name=name + " AllowWheelchairCollisionScope", @@ -370,66 +444,35 @@ def create_tree( ), # Move to the target pose workers=[ - # Goal configuration: target position - MoveIt2PositionConstraint( - name="MoveToTargetPosePositionGoalConstraint", - ns=name, - inputs={ - "position": BlackboardKey("mouth_position"), - "tolerance": self.mouth_pose_tolerance, - }, - outputs={ - "constraints": BlackboardKey("goal_constraints"), - }, - ), - # Goal configuration: target orientation - MoveIt2OrientationConstraint( - name="MoveToTargetPoseOrientationGoalConstraint", + # Goal configuration + MoveIt2PoseConstraint( + name="MoveToTargetPosePoseGoalConstraint", ns=name, inputs={ - "quat_xyzw": ( - 0.0, - 0.7071068, - 0.7071068, - 0.0, - ), # Point to wheelchair backrest - "tolerance": (0.6, 0.5, 0.5), + "pose": BlackboardKey("goal_pose"), + "tolerance_position": self.mouth_position_tolerance, + "tolerance_orientation": (0.6, 0.5, 0.5), "parameterization": 1, # Rotation vector - "constraints": BlackboardKey("goal_constraints"), }, outputs={ "constraints": BlackboardKey("goal_constraints"), }, ), - # Orientation path constraint to keep the fork straight - MoveIt2OrientationConstraint( - name="KeepForkStraightPathConstraint", - ns=name, - inputs={ - "quat_xyzw": self.orientation_constraint_quaternion, - "tolerance": self.orientation_constraint_tolerances, - "parameterization": 1, # Rotation vector - }, - outputs={ - "constraints": BlackboardKey("path_constraints"), - }, - ), # Plan MoveIt2Plan( name="MoveToTargetPosePlan", ns=name, inputs={ "goal_constraints": BlackboardKey("goal_constraints"), - "path_constraints": BlackboardKey("path_constraints"), "planner_id": self.planner_id, - "allowed_planning_time": self.allowed_planning_time_to_mouth, + "allowed_planning_time": self.allowed_planning_time, "max_velocity_scale": ( - self.max_velocity_scaling_factor_to_mouth + self.max_velocity_scaling_factor ), "cartesian": True, - "cartesian_jump_threshold": self.cartesian_jump_threshold_to_mouth, + "cartesian_jump_threshold": self.cartesian_jump_threshold, "cartesian_fraction_threshold": 0.60, - "cartesian_max_step": self.cartesian_max_step_to_mouth, + "cartesian_max_step": self.cartesian_max_step, }, outputs={"trajectory": BlackboardKey("trajectory")}, ), @@ -454,3 +497,29 @@ def create_tree( ### Return tree return py_trees.trees.BehaviourTree(root_seq) + + # Override goal to read arguments into local blackboard + @override + def send_goal(self, tree: py_trees.trees.BehaviourTree, goal: object) -> bool: + # Docstring copied by @override + # Note: if here, tree is root, not a subtree + + # Check goal type + if not isinstance(goal, MoveToMouth.Goal): + return False + + # Write tree inputs to blackboard + name = tree.root.name + blackboard = py_trees.blackboard.Client(name=name, namespace=name) + blackboard.register_key( + key="use_face_detection_msg", access=py_trees.common.Access.WRITE + ) + blackboard.use_face_detection_msg = goal.use_face_detection_msg + blackboard.register_key( + key=self.face_detection_relative_blackboard_key, + access=py_trees.common.Access.WRITE, + ) + blackboard.face_detection = goal.face_detection + + # Adds MoveToVisitor for Feedback + return super().send_goal(tree, goal) diff --git a/ada_feeding/config/ada_feeding_action_servers.yaml b/ada_feeding/config/ada_feeding_action_servers.yaml index a7b02ba1..a93ea6dd 100644 --- a/ada_feeding/config/ada_feeding_action_servers.yaml +++ b/ada_feeding/config/ada_feeding_action_servers.yaml @@ -9,6 +9,7 @@ ada_feeding_action_servers: server_names: - MoveAbovePlate - AcquireFood + - MoveToStagingConfiguration - MoveToMouth - MoveFromMouthToAbovePlate - MoveFromMouthToRestingPosition @@ -79,20 +80,17 @@ ada_feeding_action_servers: tick_rate: 10 # Hz, optional, default: 30 # Parameters for the MoveToMouth action - MoveToMouth: # required + MoveToStagingConfiguration: # required action_type: ada_feeding_msgs.action.MoveTo # required - tree_class: ada_feeding.trees.MoveToMouthTree # required + tree_class: ada_feeding.trees.MoveToConfigurationWithWheelchairWallTree # required tree_kws: # optional - - staging_configuration + - goal_configuration - orientation_constraint_quaternion - orientation_constraint_tolerances - - allowed_planning_time_to_staging_configuration - - max_velocity_scaling_factor_to_staging_configuration - - max_velocity_scaling_factor_to_mouth - - cartesian_jump_threshold_to_mouth - - cartesian_max_step_to_mouth + - allowed_planning_time + - max_velocity_scaling_factor tree_kwargs: # optional - staging_configuration: + goal_configuration: # # Side-staging configuration # - 2.74709 # j2n6s200_joint_1 # - 2.01400 # j2n6s200_joint_2 @@ -123,11 +121,26 @@ ada_feeding_action_servers: - 0.6 # x, rad - 3.14159 # y, rad - 0.5 # z, rad - allowed_planning_time_to_staging_configuration: 2.5 # secs - max_velocity_scaling_factor_to_staging_configuration: 0.65 # optional in (0.0, 1.0], default: 0.1 - max_velocity_scaling_factor_to_mouth: 0.4 # optional in (0.0, 1.0], default: 0.1 - cartesian_jump_threshold_to_mouth: 2.0 # If a jump for a joint across consecutive IK solutions is more than this factor greater than the average, it is rejected - cartesian_max_step_to_mouth: 0.01 # m + allowed_planning_time: 2.5 # secs + max_velocity_scaling_factor: 0.65 # optional in (0.0, 1.0], default: 0.1 + tick_rate: 10 # Hz, optional, default: 30 + + # Parameters for the MoveToMouth action + MoveToMouth: # required + action_type: ada_feeding_msgs.action.MoveToMouth # required + tree_class: ada_feeding.trees.MoveToMouthTree # required + tree_kws: # optional + - max_velocity_scaling_factor + - cartesian_jump_threshold + - cartesian_max_step + - face_detection_msg_timeout + - face_detection_timeout + tree_kwargs: # optional + max_velocity_scaling_factor: 0.4 # optional in (0.0, 1.0], default: 0.1 + cartesian_jump_threshold: 2.0 # If a jump for a joint across consecutive IK solutions is more than this factor greater than the average, it is rejected + cartesian_max_step: 0.01 # m + face_detection_msg_timeout: 10.0 # sec + face_detection_timeout: 5.0 # sec tick_rate: 10 # Hz, optional, default: 30 # Parameters for the MoveFromMouthToAbovePlate action. diff --git a/ada_feeding/scripts/save_image.py b/ada_feeding/scripts/save_image.py index dca9ca59..d63632d1 100755 --- a/ada_feeding/scripts/save_image.py +++ b/ada_feeding/scripts/save_image.py @@ -136,7 +136,7 @@ def save_image_callback( cv2.imwrite(depth_image_filepath, depth_image) # Return a success response - response.success = (len(response.message) == 0) + response.success = len(response.message) == 0 response.message = "Successfully saved the latest color image and depth image" return response diff --git a/ada_feeding_msgs/CMakeLists.txt b/ada_feeding_msgs/CMakeLists.txt index 0d3090a3..8f8b6bd4 100644 --- a/ada_feeding_msgs/CMakeLists.txt +++ b/ada_feeding_msgs/CMakeLists.txt @@ -23,6 +23,7 @@ rosidl_generate_interfaces(${PROJECT_NAME} "action/AcquireFood.action" "action/MoveTo.action" + "action/MoveToMouth.action" "action/SegmentAllItems.action" "action/SegmentFromPoint.action" "action/Teleoperate.action" diff --git a/ada_feeding_msgs/action/AcquireFood.action b/ada_feeding_msgs/action/AcquireFood.action index 5528c414..254d9447 100644 --- a/ada_feeding_msgs/action/AcquireFood.action +++ b/ada_feeding_msgs/action/AcquireFood.action @@ -15,6 +15,7 @@ uint8 STATUS_SUCCESS=0 uint8 STATUS_PLANNING_FAILED=1 uint8 STATUS_MOTION_FAILED=2 uint8 STATUS_CANCELED=3 +uint8 STATUS_PERCEPTION_FAILED=4 uint8 STATUS_UNKNOWN=99 # Whether the planning and motion succeeded and if not, why diff --git a/ada_feeding_msgs/action/MoveTo.action b/ada_feeding_msgs/action/MoveTo.action index 1b92e321..ffa87ea0 100644 --- a/ada_feeding_msgs/action/MoveTo.action +++ b/ada_feeding_msgs/action/MoveTo.action @@ -11,6 +11,7 @@ uint8 STATUS_SUCCESS=0 uint8 STATUS_PLANNING_FAILED=1 uint8 STATUS_MOTION_FAILED=2 uint8 STATUS_CANCELED=3 +uint8 STATUS_PERCEPTION_FAILED=4 uint8 STATUS_UNKNOWN=99 # Whether the planning and motion succeeded and if not, why diff --git a/ada_feeding_msgs/action/Teleoperate.action b/ada_feeding_msgs/action/Teleoperate.action index 12bbba1e..6161ea66 100644 --- a/ada_feeding_msgs/action/Teleoperate.action +++ b/ada_feeding_msgs/action/Teleoperate.action @@ -11,6 +11,7 @@ uint8 STATUS_SUCCESS=0 uint8 STATUS_PLANNING_FAILED=1 uint8 STATUS_MOTION_FAILED=2 uint8 STATUS_CANCELED=3 +uint8 STATUS_PERCEPTION_FAILED=4 uint8 STATUS_UNKNOWN=99 # Whether the planning and motion succeeded and if not, why From c2473ae415436125019034226817c222c0f477cd Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Sun, 12 Nov 2023 13:57:19 -0800 Subject: [PATCH 02/15] Verified MoveToMouth works in sim --- .../ada_feeding/behaviors/ros_utility.py | 30 ++- .../behaviors/transfer/__init__.py | 4 - .../compute_move_to_mouth_position.py | 177 ------------------ .../ada_feeding/trees/move_to_mouth_tree.py | 108 ++++++----- 4 files changed, 90 insertions(+), 229 deletions(-) delete mode 100644 ada_feeding/ada_feeding/behaviors/transfer/__init__.py delete mode 100644 ada_feeding/ada_feeding/behaviors/transfer/compute_move_to_mouth_position.py diff --git a/ada_feeding/ada_feeding/behaviors/ros_utility.py b/ada_feeding/ada_feeding/behaviors/ros_utility.py index 19e2b797..dd6b7d63 100644 --- a/ada_feeding/ada_feeding/behaviors/ros_utility.py +++ b/ada_feeding/ada_feeding/behaviors/ros_utility.py @@ -141,6 +141,7 @@ def blackboard_inputs( source_frame: Union[BlackboardKey, str], time: Union[BlackboardKey, Time] = Time(), timeout: Union[BlackboardKey, Duration] = Duration(seconds=0.0), + new_type: Union[BlackboardKey, type] = TransformStamped, ) -> None: """ Blackboard Inputs @@ -153,8 +154,10 @@ def blackboard_inputs( timeout: Time to wait for the target frame to become available. Note that the tree ticking will block for this duration, so it is recommended that this is kept at 0.0 (the default value). + new_type: The type of the transform to return. Must be either TransformStamped + or PoseStamped. """ - # pylint: disable=unused-argument, duplicate-code + # pylint: disable=unused-argument, duplicate-code, too-many-arguments # Arguments are handled generically in base class. super().blackboard_inputs( **{key: value for key, value in locals().items() if key != "self"} @@ -162,7 +165,7 @@ def blackboard_inputs( def blackboard_outputs( self, - transform: Optional[BlackboardKey], # TransformStamped + transform: Optional[BlackboardKey], # new_type ) -> None: """ Blackboard Outputs @@ -198,7 +201,7 @@ def update(self) -> py_trees.common.Status: # Input Validation if not self.blackboard_exists( - ["target_frame", "source_frame", "time", "timeout"] + ["target_frame", "source_frame", "time", "timeout", "new_type"] ): self.logger.error("Missing input arguments") return py_trees.common.Status.FAILURE @@ -207,6 +210,12 @@ def update(self) -> py_trees.common.Status: source_frame = self.blackboard_get("source_frame") time = self.blackboard_get("time") timeout = self.blackboard_get("timeout") + new_type = self.blackboard_get("new_type") + if new_type not in [TransformStamped, PoseStamped]: + self.logger.error( + f"Invalid type {new_type}. Must be either TransformStamped or PoseStamped" + ) + return py_trees.common.Status.FAILURE if self.tf_lock.locked(): return py_trees.common.Status.RUNNING @@ -227,6 +236,19 @@ def update(self) -> py_trees.common.Status: self.logger.error(f"Could not get transform. Error: {error}") return py_trees.common.Status.FAILURE + if new_type == PoseStamped: + transform = PoseStamped( + header=transform.header, + pose=Pose( + position=Point( + x=transform.transform.translation.x, + y=transform.transform.translation.y, + z=transform.transform.translation.z, + ), + orientation=transform.transform.rotation, + ), + ) + self.blackboard_set("transform", transform) return py_trees.common.Status.SUCCESS @@ -244,7 +266,7 @@ class SetStaticTransform(BlackboardBehavior): def blackboard_inputs( self, transform: Union[BlackboardKey, TransformStamped, PoseStamped], - child_frame_id: Union[BlackboardKey, str], + child_frame_id: Union[BlackboardKey, Optional[str]] = None, ) -> None: """ Blackboard Inputs diff --git a/ada_feeding/ada_feeding/behaviors/transfer/__init__.py b/ada_feeding/ada_feeding/behaviors/transfer/__init__.py deleted file mode 100644 index 2d22f165..00000000 --- a/ada_feeding/ada_feeding/behaviors/transfer/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -This package contains custom py_tree behaviors for the bite transfer. -""" -from .compute_move_to_mouth_position import ComputeMoveToMouthPosition diff --git a/ada_feeding/ada_feeding/behaviors/transfer/compute_move_to_mouth_position.py b/ada_feeding/ada_feeding/behaviors/transfer/compute_move_to_mouth_position.py deleted file mode 100644 index 5508462e..00000000 --- a/ada_feeding/ada_feeding/behaviors/transfer/compute_move_to_mouth_position.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -This module defines the ComputeMoveToMouthPosition behavior, which computes the -target position to move the robot's end effector to based on the detected mouth -center. Specifically, it transforms the detected mouth center from the camera -frame to the requested frame and then adds an offset. -""" -# Standard imports -import copy -from typing import Optional, Tuple, Union - -# Third-party imports -from overrides import override -import py_trees -from rclpy.time import Time -from tf2_geometry_msgs import PointStamped # pylint: disable=unused-import -import tf2_py as tf2 - -# Local imports -from ada_feeding_msgs.msg import FaceDetection -from ada_feeding.behaviors import BlackboardBehavior -from ada_feeding.helpers import ( - BlackboardKey, - get_tf_object, -) - -# TODO: Delete this file! - - -class ComputeMoveToMouthPosition(BlackboardBehavior): - """ - A behavior that computes the target position to move the robot's end effector - to based on the detected mouth center. Specifically, it transforms the - detected mouth center from the camera frame to the requested frame and then - adds an offset. - """ - - # pylint: disable=arguments-differ - # We *intentionally* violate Liskov Substitution Princple - # in that blackboard config (inputs + outputs) are not - # meant to be called in a generic setting. - - def blackboard_inputs( - self, - face_detection_msg: Union[BlackboardKey, FaceDetection], - frame_id: Union[BlackboardKey, str] = "j2n6s200_link_base", - position_offset: Union[BlackboardKey, Tuple[float, float, float]] = ( - 0.0, - 0.0, - 0.0, - ), - ) -> None: - """ - Blackboard Inputs - - Parameters - ---------- - face_detection_msg: The face detection message. - frame_id: The frame ID for the target position. - position_offset: The offset to add to the target position, in `frame_id`. - """ - # pylint: disable=unused-argument, duplicate-code - # Arguments are handled generically in base class. - super().blackboard_inputs( - **{key: value for key, value in locals().items() if key != "self"} - ) - - def blackboard_outputs( - self, - target_position: Optional[BlackboardKey], # PointStamped - ) -> None: - """ - Blackboard Outputs - By convention (to avoid collisions), avoid non-None default arguments. - - Parameters - ---------- - target_position: The target position to move the robot's end effector to - will be written to this key, as a PointStamped in `frame_id`. - """ - # pylint: disable=unused-argument, duplicate-code - # Arguments are handled generically in base class. - super().blackboard_outputs( - **{key: value for key, value in locals().items() if key != "self"} - ) - - @override - def setup(self, **kwargs): - # Docstring copied from @override - - # pylint: disable=attribute-defined-outside-init - # It is okay for attributes in behaviors to be - # defined in the setup / initialise functions. - - # Get Node from Kwargs - self.node = kwargs["node"] - - # Get TF Listener from blackboard - # For transform approach -> end_effector_frame - self.tf_buffer, _, self.tf_lock = get_tf_object(self.blackboard, self.node) - - def update(self) -> py_trees.common.Status: - """ - Computes the target position to move the robot's end effector to based on - the detected mouth center. Specifically, it transforms the detected mouth - center from the camera frame to the requested frame and then adds an offset. - It immediately returns either SUCCESS or FAILURE, and never returns RUNNING. - """ - self.logger.info(f"{self.name} [ComputeMoveToMouthPosition::update()]") - - # Get the inputs from the blackboard - if not self.blackboard_exists( - [ - "face_detection_msg", - "frame_id", - "position_offset", - ] - ): - self.logger.error( - "Missing input arguments `face_detection_msg`, `frame_id`, or " - "`position_offset`" - ) - return py_trees.common.Status.FAILURE - face_detection_msg = self.blackboard_get("face_detection_msg") - frame_id = self.blackboard_get("frame_id") - position_offset = self.blackboard_get("position_offset") - - # Transform the face detection result to the base frame. - if self.tf_lock.locked(): - return py_trees.common.Status.RUNNING - with self.tf_lock: - try: - target_position = self.tf_buffer.transform( - face_detection_msg.detected_mouth_center, frame_id - ) - self.logger.debug( - f"{self.name} [ComputeMoveToMouthPosition::update()] " - "face_detection.detected_mouth_center " - f"{face_detection_msg.detected_mouth_center} " - f"target_position {target_position}" - ) - except tf2.ExtrapolationException as exc: - # If the transform failed at the timestamp in the message, retry - # with the latest transform - self.logger.warning( - f"{self.name} [ComputeMoveToMouthPosition::update()] " - f"Transform failed at timestamp in message: {type(exc)}: {exc}. " - "Retrying with latest transform." - ) - - # pylint: disable=redefined-outer-name - # Okay to redefine exc since we don't need it in the second try. - - detected_mouth_center = copy.deepcopy( - face_detection_msg.detected_mouth_center - ) - detected_mouth_center.header.stamp = Time().to_msg() - try: - target_position = self.tf_buffer.transform( - detected_mouth_center, - frame_id, - ) - except tf2.ExtrapolationException as exc: - # If the transform doesn't exist, then return failure. - self.logger.error( - f"%{self.name} [ComputeMoveToMouthPosition::update()] " - f"Failed to transform face detection result: {type(exc)}: {exc}" - ) - return py_trees.common.Status.FAILURE - - # Write the target position to the blackboard - target_position.point.x += position_offset[0] - target_position.point.y += position_offset[1] - target_position.point.z += position_offset[2] - self.blackboard_set("target_position", target_position) - return py_trees.common.Status.SUCCESS diff --git a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py index c9a210e4..a35a484a 100644 --- a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py @@ -14,10 +14,10 @@ # Third-party imports from geometry_msgs.msg import ( + Point, + Pose, + PoseStamped, Quaternion, - Transform, - TransformStamped, - Vector3, ) from overrides import override import py_trees @@ -89,6 +89,7 @@ def __init__( allowed_face_distance: Tuple[float, float] = (0.4, 1.5), face_detection_msg_timeout: float = 5.0, face_detection_timeout: float = 2.5, + plan_distance_from_mouth: float = 0.05, ): """ Initializes tree-specific parameters. @@ -121,14 +122,14 @@ def __init__( is older than these many seconds, don't use it. face_detection_timeout: If the robot has been trying to detect a face for more than these many seconds, timeout. + plan_distance_from_mouth: The distance (m) to plan from the mouth center. """ # pylint: disable=too-many-locals # These are all necessary due to all the behaviors MoveToMouth contains - # TODO: This class should override get_result and get_feedback to return - # perception failures, and tell the user it is attempting perception, - # while it is waiting for the user's face. + # TODO: Consider modifying feedback to return whether it is perceiving + # the face right now. Not crucial, but may be nice to have. # Initialize MoveToTree super().__init__(node) @@ -147,6 +148,7 @@ def __init__( self.allowed_face_distance = allowed_face_distance self.face_detection_msg_timeout = Duration(seconds=face_detection_msg_timeout) self.face_detection_timeout = face_detection_timeout + self.plan_distance_from_mouth = plan_distance_from_mouth self.face_detection_relative_blackboard_key = "face_detection" @@ -210,6 +212,8 @@ def create_tree( name=name, memory=True, children=[ + # NOTE: `get_result` relies on "FaceDetection" only being in the + # names of perception behaviors. py_trees.composites.Selector( name=name + " FaceDetectionSelector", memory=True, @@ -244,7 +248,8 @@ def create_tree( # Check if the face detection message is not stale # and close enough to the camera py_trees.behaviours.CheckBlackboardVariableValue( - name=name + " CheckFaceMsg", + name=name + + " CheckFaceDetectionMsg", check=py_trees.common.ComparisonExpression( variable=face_detection_absolute_key, value=FaceDetection(), @@ -276,10 +281,13 @@ def create_tree( children=[ # Get the detected face py_trees_ros.subscribers.ToBlackboard( - name=name + " GetFace", + name=name + + " GetFaceDetectionMsg", topic_name="~/face_detection", topic_type=FaceDetection, - qos_profile=py_trees_ros.utilities.qos_profile_unlatched(), + qos_profile=( + py_trees_ros.utilities.qos_profile_unlatched() + ), blackboard_variables={ face_detection_absolute_key: None, }, @@ -290,10 +298,10 @@ def create_tree( # Check whether the face is within the required distance py_trees.decorators.FailureIsRunning( name=name - + " CheckFaceWrapper", + + " CheckFaceDetectionWrapper", child=py_trees.behaviours.CheckBlackboardVariableValue( name=name - + " CheckFaceMsg", + + " CheckFaceDetectionMsg", check=py_trees.common.ComparisonExpression( variable=face_detection_absolute_key, value=FaceDetection(), @@ -311,11 +319,12 @@ def create_tree( # Convert `face_detection` to `mouth_position` in the # base frame. ApplyTransform( - name=name + " FaceDetectionToMouthPosition", + name=name + " ConvertFaceDetectionToBaseFrame", ns=name, inputs={ "stamped_msg": BlackboardKey( - self.face_detection_relative_blackboard_key + ".detected_mouth_center" + self.face_detection_relative_blackboard_key + + ".detected_mouth_center" ), "target_frame": "j2n6s200_link_base", }, @@ -328,15 +337,15 @@ def create_tree( # Convert `mouth_position` into a mouth pose using # a fixed quaternion CreatePoseStamped( - name=name + " MouthPositionToMouthPose", + name=name + " FaceDetectionToPose", ns=name, inputs={ "position": BlackboardKey("mouth_position"), "quaternion": [ - -0.0616284, - -0.0616284, - -0.704416, - 0.704416, + 0.0, + 0.0, + -0.7071068, + 0.7071068, ], # Facing away from wheelchair backrest }, outputs={ @@ -347,7 +356,7 @@ def create_tree( ), # Cache the mouth pose on the static TF tree SetStaticTransform( - name=name + " SetMouthPose", + name=name + " SetFaceDetectionPoseOnTF", ns=name, inputs={ "transform": BlackboardKey("mouth_pose"), @@ -359,11 +368,12 @@ def create_tree( # If there is a cached detected mouth pose on the static # transform tree, use it. GetTransform( - name=name + " GetCachedMouthPose", + name=name + " GetCachedFaceDetection", ns=name, inputs={ "target_frame": "j2n6s200_link_base", "source_frame": "mouth", + "new_type": PoseStamped, }, outputs={ "transform": BlackboardKey("mouth_pose"), @@ -389,44 +399,37 @@ def create_tree( }, ), # The goal constraint of the fork is the mouth pose, - # translated 5cm in the +y direction, rotated to face the mouth + # translated `self.plan_distance_from_mouth` in front of the mouth, + # and rotated to match the forkTip orientation. ApplyTransform( name=name + " ComputeMoveToMouthPose", ns=name, inputs={ - "stamped_msg": BlackboardKey("mouth_pose"), - "target_frame": None, - "transform": TransformStamped( - child_frame_id="j2n6s200_link_base", - transform=Transform( - translation=Vector3( - x=0.0, - y=-0.05, - z=0.0, - ), - rotation=Quaternion( - x=0.0, + "stamped_msg": PoseStamped( + header=Header( + stamp=Time().to_msg(), + frame_id="mouth", + ), + pose=Pose( + position=Point( + x=self.plan_distance_from_mouth, y=0.0, z=0.0, - w=1.0, + ), + orientation=Quaternion( + x=0.5, + y=-0.5, + z=-0.5, + w=0.5, ), ), ), + "target_frame": "j2n6s200_link_base", }, outputs={ "transformed_msg": BlackboardKey("goal_pose"), # PoseStamped }, ), - # Cache the mouth pose on the static TF tree - # TODO: remove! - SetStaticTransform( - name=name + " SetMouthPose", - ns=name, - inputs={ - "transform": BlackboardKey("goal_pose"), - "child_frame_id": "goal", - }, - ), # Retare the F/T sensor and set the F/T Thresholds pre_moveto_config( name=name + "PreMoveToConfig", @@ -523,3 +526,20 @@ def send_goal(self, tree: py_trees.trees.BehaviourTree, goal: object) -> bool: # Adds MoveToVisitor for Feedback return super().send_goal(tree, goal) + + @override + def get_result( + self, tree: py_trees.trees.BehaviourTree, action_type: type + ) -> object: + # Docstring copied by @override + result_msg = super().get_result(tree, action_type) + + # If the standard result determines there was a planning failure, + # we check whether that was actually a perception failure. + if result_msg.status == result_msg.STATUS_PLANNING_FAILED: + tip = tree.tip() + self._node.get_logger().info("Tip: %s" % tip) + if tip is not None and "FaceDetection" in tip.name: + result_msg.status = result_msg.STATUS_PERCEPTION_FAILED + + return result_msg From 12c2d2914e29096f0e23105865017721c86c82cd Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Sun, 12 Nov 2023 13:57:41 -0800 Subject: [PATCH 03/15] Linting import order --- .../behaviors/acquisition/compute_action_constraints.py | 4 ++-- .../ada_feeding/behaviors/acquisition/compute_food_frame.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py b/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py index de478da1..d05976cc 100644 --- a/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py +++ b/ada_feeding/ada_feeding/behaviors/acquisition/compute_action_constraints.py @@ -27,6 +27,8 @@ from scipy.spatial.transform import Rotation as R # Local imports +from ada_feeding_msgs.msg import AcquisitionSchema +from ada_feeding_msgs.srv import AcquisitionSelect from ada_feeding.behaviors import BlackboardBehavior from ada_feeding.helpers import ( BlackboardKey, @@ -35,8 +37,6 @@ set_static_tf, ) from ada_feeding.idioms.pre_moveto_config import create_ft_thresh_request -from ada_feeding_msgs.msg import AcquisitionSchema -from ada_feeding_msgs.srv import AcquisitionSelect class ComputeActionConstraints(BlackboardBehavior): diff --git a/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py b/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py index 6d12db1e..450ea224 100644 --- a/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py +++ b/ada_feeding/ada_feeding/behaviors/acquisition/compute_food_frame.py @@ -21,6 +21,7 @@ # Local imports from ada_feeding_msgs.msg import Mask from ada_feeding_msgs.srv import AcquisitionSelect +from ada_feeding_perception.helpers import ros_msg_to_cv2_image from ada_feeding.helpers import ( BlackboardKey, quat_between_vectors, @@ -28,7 +29,6 @@ set_static_tf, ) from ada_feeding.behaviors import BlackboardBehavior -from ada_feeding_perception.helpers import ros_msg_to_cv2_image class ComputeFoodFrame(BlackboardBehavior): From 905469702ac7e041f0f4864660dca9dfb9595069 Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Sun, 12 Nov 2023 13:57:56 -0800 Subject: [PATCH 04/15] Linting allow lines of 120 chars --- .pylintrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pylintrc b/.pylintrc index 5c67333f..d33bd642 100644 --- a/.pylintrc +++ b/.pylintrc @@ -355,7 +355,7 @@ indent-after-paren=4 indent-string=' ' # Maximum number of characters on a single line. -max-line-length=100 +max-line-length=120 # Maximum number of lines in a module. max-module-lines=1000 From ba7b451a407d0e5dabc9e33f07ad67b9042a072b Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Sun, 12 Nov 2023 13:58:20 -0800 Subject: [PATCH 05/15] Added MoveToMouth message --- ada_feeding_msgs/action/MoveToMouth.action | 38 ++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 ada_feeding_msgs/action/MoveToMouth.action diff --git a/ada_feeding_msgs/action/MoveToMouth.action b/ada_feeding_msgs/action/MoveToMouth.action new file mode 100644 index 00000000..e6e2e531 --- /dev/null +++ b/ada_feeding_msgs/action/MoveToMouth.action @@ -0,0 +1,38 @@ +# The interface for a bite transfer action that takes in a detected mouth +# center to move to, returns whether or not it succeeded, and gives progress +# in terms of how long it has been planning for and/or how much farther it has +# to move. + +# A flag that the client can use to specify whether the included face_detection +# msg is accurate or not. For example, if the robot moved since collecting that +# face_detection msg, then this should be set to false, indicating that the action +# should re-get the results of face detection. Note that it is possible that this +# is true despite the face_detection_msg being stale (e.g., if the RobotMotion page +# on the app gets refreshed). Hence, the action server should also implement a check +# for whether the below message is stale. +bool use_face_detection_msg + +# The output of face detection +ada_feeding_msgs/FaceDetection face_detection +--- +# Possible return statuses +uint8 STATUS_SUCCESS=0 +uint8 STATUS_PLANNING_FAILED=1 +uint8 STATUS_MOTION_FAILED=2 +uint8 STATUS_CANCELED=3 +uint8 STATUS_PERCEPTION_FAILED=4 +uint8 STATUS_UNKNOWN=99 + +# Whether the planning and motion succeeded and if not, why +uint8 status +--- +# Whether the robot is currently planning or moving (required) +bool is_planning +# The amount of time the robot has spent in planning (required) +builtin_interfaces/Duration planning_time +# The amount of time the robot has spent in motion (required if `is_planning == false`) +builtin_interfaces/Duration motion_time +# How far the robot initially was from the goal (required if `is_planning == false`) +float64 motion_initial_distance +# How far the robot currently is from the goal (required if `is_planning == false`) +float64 motion_curr_distance From 53e4d71e4c8d92405592e54944eca5096c76fb6d Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 09:39:23 -0800 Subject: [PATCH 06/15] Removed use_face_detection_msg field from MoveToMouth --- .../ada_feeding/trees/move_to_mouth_tree.py | 48 ++++--------------- ada_feeding_msgs/action/MoveToMouth.action | 9 ---- 2 files changed, 10 insertions(+), 47 deletions(-) diff --git a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py index a35a484a..b9c39cee 100644 --- a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py @@ -60,8 +60,7 @@ class MoveToMouthTree(MoveToTree): A behaviour tree that gets the user's mouth pose and moves the robot to it. Note that to get the mouth pose, the robot executes these in-order until one succeeds: - 1. If `use_face_detection_msg` and the face detection message included in the - goal is not stale, use it. + 1. If the face detection message included in the goal is not stale, use it. 2. Else, attempt to detect a face from the robot's current pose and use that. 3. Else, if there is a cached detected mouth pose on the static transform tree, use it. @@ -203,9 +202,6 @@ def create_tree( face_detection_absolute_key = Blackboard.separator.join( [name, self.face_detection_relative_blackboard_key] ) - use_face_detection_msg_absolute_key = Blackboard.separator.join( - [name, "use_face_detection_msg"] - ) # Root Sequence root_seq = py_trees.composites.Sequence( @@ -228,35 +224,15 @@ def create_tree( name=name + " FaceDetectionSelector", memory=True, children=[ - # If the client said to use the face detection message and - # it is not stale, use it. - py_trees.composites.Sequence( - name=name, - memory=True, - children=[ - # Check if the `use_face_detection_msg` field of the - # goal was set to True - py_trees.behaviours.CheckBlackboardVariableValue( - name=name - + " CheckUseFaceDetectionMsg", - check=py_trees.common.ComparisonExpression( - variable=use_face_detection_msg_absolute_key, - value=True, - operator=operator.eq, - ), - ), - # Check if the face detection message is not stale - # and close enough to the camera - py_trees.behaviours.CheckBlackboardVariableValue( - name=name - + " CheckFaceDetectionMsg", - check=py_trees.common.ComparisonExpression( - variable=face_detection_absolute_key, - value=FaceDetection(), - operator=self.check_face_msg, - ), - ), - ], + # Check if the face detection message is not stale + # and close enough to the camera + py_trees.behaviours.CheckBlackboardVariableValue( + name=name + " CheckFaceDetectionMsg", + check=py_trees.common.ComparisonExpression( + variable=face_detection_absolute_key, + value=FaceDetection(), + operator=self.check_face_msg, + ), ), # If the above didn't work, turn face detection on until # a face is detected, or until timeout @@ -514,10 +490,6 @@ def send_goal(self, tree: py_trees.trees.BehaviourTree, goal: object) -> bool: # Write tree inputs to blackboard name = tree.root.name blackboard = py_trees.blackboard.Client(name=name, namespace=name) - blackboard.register_key( - key="use_face_detection_msg", access=py_trees.common.Access.WRITE - ) - blackboard.use_face_detection_msg = goal.use_face_detection_msg blackboard.register_key( key=self.face_detection_relative_blackboard_key, access=py_trees.common.Access.WRITE, diff --git a/ada_feeding_msgs/action/MoveToMouth.action b/ada_feeding_msgs/action/MoveToMouth.action index e6e2e531..d36fe87f 100644 --- a/ada_feeding_msgs/action/MoveToMouth.action +++ b/ada_feeding_msgs/action/MoveToMouth.action @@ -3,15 +3,6 @@ # in terms of how long it has been planning for and/or how much farther it has # to move. -# A flag that the client can use to specify whether the included face_detection -# msg is accurate or not. For example, if the robot moved since collecting that -# face_detection msg, then this should be set to false, indicating that the action -# should re-get the results of face detection. Note that it is possible that this -# is true despite the face_detection_msg being stale (e.g., if the RobotMotion page -# on the app gets refreshed). Hence, the action server should also implement a check -# for whether the below message is stale. -bool use_face_detection_msg - # The output of face detection ada_feeding_msgs/FaceDetection face_detection --- From 7b9d9bfc01495825f7561676f300589ab95faea3 Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 09:39:41 -0800 Subject: [PATCH 07/15] Fixed face detection encoding --- .../ada_feeding_perception/face_detection.py | 14 ++++++++------ .../ada_feeding_perception/helpers.py | 9 +++++++-- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/ada_feeding_perception/ada_feeding_perception/face_detection.py b/ada_feeding_perception/ada_feeding_perception/face_detection.py index edb0af38..26366b7d 100755 --- a/ada_feeding_perception/ada_feeding_perception/face_detection.py +++ b/ada_feeding_perception/ada_feeding_perception/face_detection.py @@ -345,10 +345,10 @@ def detect_largest_face( # This function is not too complex, but it does have a lot of local variables. # Decode the image - image_rgb = cv2.imdecode( + image_bgr = cv2.imdecode( np.frombuffer(img_msg.data, np.uint8), cv2.IMREAD_COLOR ) - image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY) + image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY) # Detect faces using the haarcascade classifier on the grayscale image # NOTE: This detector will launch multiple threads to detect faces in @@ -375,11 +375,11 @@ def detect_largest_face( # Annotate the image with the face. See below for the landmark indices: # https://pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/ if publish_annotated_img: - cv2.rectangle(image_rgb, (x, y), (x + w, y + h), (255, 255, 255), 2) + cv2.rectangle(image_bgr, (x, y), (x + w, y + h), (255, 255, 255), 2) for j in range(48, 68): landmark_x, landmark_y = landmarks[i][0][j] cv2.circle( - image_rgb, + image_bgr, (int(landmark_x), int(landmark_y)), 1, (0, 255, 0), @@ -388,7 +388,7 @@ def detect_largest_face( # Annotate the image with a red rectangle around largest face (x, y, w, h) = faces[largest_face[1]] - cv2.rectangle(image_rgb, (x, y), (x + w, y + h), (0, 0, 255), 2) + cv2.rectangle(image_bgr, (x, y), (x + w, y + h), (0, 0, 255), 2) # Find stomion (mouth center) in image img_mouth_center = landmarks[largest_face[1]][0][66] @@ -396,7 +396,9 @@ def detect_largest_face( # Publish annotated image if publish_annotated_img: - annotated_msg = cv2_image_to_ros_msg(image_rgb, compress=False) + annotated_msg = cv2_image_to_ros_msg( + image_bgr, compress=False, encoding="bgr8" + ) self.publisher_image.publish(annotated_msg) return is_face_detected, img_mouth_center, img_mouth_points diff --git a/ada_feeding_perception/ada_feeding_perception/helpers.py b/ada_feeding_perception/ada_feeding_perception/helpers.py index 1b40547b..c1a89226 100644 --- a/ada_feeding_perception/ada_feeding_perception/helpers.py +++ b/ada_feeding_perception/ada_feeding_perception/helpers.py @@ -49,7 +49,10 @@ def ros_msg_to_cv2_image( def cv2_image_to_ros_msg( - image: npt.NDArray, compress: bool, bridge: Optional[CvBridge] = None + image: npt.NDArray, + compress: bool, + bridge: Optional[CvBridge] = None, + encoding: str = "passthrough", ) -> Union[Image, CompressedImage]: """ Convert a cv2 image to a ROS Image or CompressedImage message. Note that this @@ -67,6 +70,8 @@ def cv2_image_to_ros_msg( bridge: the CvBridge to use for the conversion. This is only used if `msg` is a ROS Image message. If `bridge` is None, a new CvBridge will be created. + encoding: the encoding to use for the ROS Image message. This is only used + if `compress` is False. """ if bridge is None: bridge = CvBridge() @@ -79,7 +84,7 @@ def cv2_image_to_ros_msg( ) raise RuntimeError("Failed to compress image") # If we get here, we're not compressing the image - return bridge.cv2_to_imgmsg(image, encoding="passthrough") + return bridge.cv2_to_imgmsg(image, encoding=encoding) def get_img_msg_type( From a45836099b7447673b7a00cb21ef9a1b126acc33 Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 11:35:31 -0800 Subject: [PATCH 08/15] Added wait to MoveToMouth for mouth transform to register --- ada_feeding/ada_feeding/trees/move_to_mouth_tree.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py index b9c39cee..ee31ea4c 100644 --- a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py @@ -45,7 +45,7 @@ CreatePoseStamped, ) from ada_feeding.helpers import BlackboardKey -from ada_feeding.idioms import pre_moveto_config, scoped_behavior +from ada_feeding.idioms import pre_moveto_config, scoped_behavior, wait_for_secs from ada_feeding.idioms.bite_transfer import ( get_toggle_collision_object_behavior, get_toggle_face_detection_behavior, @@ -339,6 +339,12 @@ def create_tree( "child_frame_id": "mouth", }, ), + # Add a slight delay to allow the static transform + # to be registered + wait_for_secs( + name=name + " FaceDetectionWaitForStaticTransform", + secs=0.25, + ), ], ), # If there is a cached detected mouth pose on the static From 514d5ec055a58dced5989389eb7093254a550fd9 Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 11:35:51 -0800 Subject: [PATCH 09/15] Added MoveFromMouthToStagingConfiguration --- .../ada_feeding/trees/move_from_mouth_tree.py | 14 ++++++--- .../config/ada_feeding_action_servers.yaml | 30 +++++++++++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/ada_feeding/ada_feeding/trees/move_from_mouth_tree.py b/ada_feeding/ada_feeding/trees/move_from_mouth_tree.py index 9263cdad..c79d2a2c 100644 --- a/ada_feeding/ada_feeding/trees/move_from_mouth_tree.py +++ b/ada_feeding/ada_feeding/trees/move_from_mouth_tree.py @@ -49,7 +49,7 @@ def __init__( node: Node, staging_configuration_position: Tuple[float, float, float], staging_configuration_quat_xyzw: Tuple[float, float, float, float], - end_configuration: List[float], + end_configuration: Optional[List[float]] = None, staging_configuration_tolerance: float = 0.001, end_configuration_tolerance: float = 0.001, orientation_constraint_to_staging_configuration_quaternion: Optional[ @@ -131,7 +131,8 @@ def __init__( self.staging_configuration_position = staging_configuration_position self.staging_configuration_quat_xyzw = staging_configuration_quat_xyzw self.end_configuration = end_configuration - assert len(self.end_configuration) == 6, "Must provide 6 joint positions" + if self.end_configuration is not None: + assert len(self.end_configuration) == 6, "Must provide 6 joint positions" self.staging_configuration_tolerance = staging_configuration_tolerance self.end_configuration_tolerance = end_configuration_tolerance self.orientation_constraint_to_staging_configuration_quaternion = ( @@ -314,6 +315,12 @@ def create_tree( False, ), ), + ], + ) + + # Move to the end configuration if it is provided + if self.end_configuration is not None: + root_seq.children.append( # Add the wall in front of the wheelchair to prevent the arm from # Moving closer to the user than it currently is. scoped_behavior( @@ -366,8 +373,7 @@ def create_tree( in_front_of_wheelchair_wall_id, ), ), - ], - ) + ) ### Return tree return py_trees.trees.BehaviourTree(root_seq) diff --git a/ada_feeding/config/ada_feeding_action_servers.yaml b/ada_feeding/config/ada_feeding_action_servers.yaml index a93ea6dd..d790969b 100644 --- a/ada_feeding/config/ada_feeding_action_servers.yaml +++ b/ada_feeding/config/ada_feeding_action_servers.yaml @@ -11,6 +11,7 @@ ada_feeding_action_servers: - AcquireFood - MoveToStagingConfiguration - MoveToMouth + - MoveFromMouthToStagingConfiguration - MoveFromMouthToAbovePlate - MoveFromMouthToRestingPosition - MoveToRestingPosition @@ -143,6 +144,35 @@ ada_feeding_action_servers: face_detection_timeout: 5.0 # sec tick_rate: 10 # Hz, optional, default: 30 + # Parameters for the MoveFromMouthToStagingConfiguration action. + # No need for orientation path constraints when moving above the plate + # because presumably there is no food on the fork. + MoveFromMouthToStagingConfiguration: # required + action_type: ada_feeding_msgs.action.MoveTo # required + tree_class: ada_feeding.trees.MoveFromMouthTree # required + tree_kws: # optional + - staging_configuration_position + - staging_configuration_quat_xyzw + - orientation_constraint_to_staging_configuration_quaternion + - orientation_constraint_to_staging_configuration_tolerances + - max_velocity_scaling_factor_to_staging_configuration + - cartesian_jump_threshold_to_staging_configuration + - cartesian_max_step_to_staging_configuration + tree_kwargs: # optional + staging_configuration_position: + - 0.28323 # x + - 0.07289 # y + - 0.69509 # z + staging_configuration_quat_xyzw: + - 0.00472 # x + - 0.69757 # y + - 0.71645 # z + - -0.00811 # w + max_velocity_scaling_factor_to_staging_configuration: 0.4 # optional in (0.0, 1.0], default: 0.1 + cartesian_jump_threshold_to_staging_configuration: 2.0 # If a jump for a joint across consecutive IK solutions is more than this factor greater than the average, it is rejected + cartesian_max_step_to_staging_configuration: 0.01 # m + tick_rate: 10 # Hz, optional, default: 30 + # Parameters for the MoveFromMouthToAbovePlate action. # No need for orientation path constraints when moving above the plate # because presumably there is no food on the fork. From 643027684dc701544e27f1db9ff7d034fc9fb6c0 Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 11:36:02 -0800 Subject: [PATCH 10/15] Fixed log levels --- ada_feeding/ada_feeding/behaviors/ros_utility.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ada_feeding/ada_feeding/behaviors/ros_utility.py b/ada_feeding/ada_feeding/behaviors/ros_utility.py index dd6b7d63..1b0b96c8 100644 --- a/ada_feeding/ada_feeding/behaviors/ros_utility.py +++ b/ada_feeding/ada_feeding/behaviors/ros_utility.py @@ -304,7 +304,7 @@ def update(self) -> py_trees.common.Status: return py_trees.common.Status.FAILURE transform = self.blackboard_get("transform") - self.node.get_logger().info(f"Setting static transform: {transform}") + self.node.get_logger().debug(f"Setting static transform: {transform}") if isinstance(transform, PoseStamped): # Convert PoseStamped to TransformStamped transform = TransformStamped( @@ -448,7 +448,6 @@ def update(self) -> py_trees.common.Status: if transformed_msg is None: # Apply the fixed transform transform_matrix = ros2_numpy.numpify(transform.transform) - self.node.get_logger().info(f"Transform Matrix: {transform_matrix}") header = Header( stamp=stamped_msg.header.stamp, frame_id=transform.child_frame_id, @@ -465,7 +464,6 @@ def update(self) -> py_trees.common.Status: ) elif isinstance(stamped_msg, PoseStamped): stamped_matrix = ros2_numpy.numpify(stamped_msg.pose) - self.node.get_logger().info(f"Stamped Matrix: {stamped_matrix}") transformed_msg = PoseStamped( header=header, pose=ros2_numpy.msgify( @@ -473,7 +471,6 @@ def update(self) -> py_trees.common.Status: np.matmul(transform_matrix, stamped_matrix), ), ) - self.node.get_logger().info(f"Transformed Pose: {transformed_msg}") elif isinstance(stamped_msg, Vector3Stamped): stamped_vec = ros2_numpy.numpify(stamped_msg.vector, hom=True).reshape( (-1, 1) From 5fae9978a29d0d66b28299d293dfc76f5067a1b7 Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 11:36:12 -0800 Subject: [PATCH 11/15] Updated README --- ada_feeding/README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ada_feeding/README.md b/ada_feeding/README.md index 50a8765b..5f1b5a94 100644 --- a/ada_feeding/README.md +++ b/ada_feeding/README.md @@ -33,10 +33,12 @@ This code has been developed and tested with the Kinova JACO Gen2 Arm, on comput 1. `ros2 action send_goal /MoveAbovePlate ada_feeding_msgs/action/MoveTo "{}" --feedback` 2. `ros2 action send_goal /AcquireFood ada_feeding_msgs/action/AcquireFood "{header: {stamp: {sec: 0, nanosec: 0}, frame_id: ''}, detected_food: {roi: {x_offset: 0, y_offset: 0, height: 0, width: 0, do_rectify: false}, mask: {header: {stamp: {sec: 0, nanosec: 0}, frame_id: ''}, format: '', data: []}, item_id: '', confidence: 0.0}}" --feedback` 3. `ros2 action send_goal /MoveToRestingPosition ada_feeding_msgs/action/MoveTo "{}" --feedback` - 4. `ros2 action send_goal /MoveToMouth ada_feeding_msgs/action/MoveTo "{}" --feedback` - 5. `ros2 action send_goal /MoveFromMouthToRestingPosition ada_feeding_msgs/action/MoveTo "{}" --feedback` - 6. `ros2 action send_goal /MoveFromMouthToAbovePlate ada_feeding_msgs/action/MoveTo "{}" --feedback` - 7. `ros2 action send_goal /MoveToStowLocation ada_feeding_msgs/action/MoveTo "{}" --feedback` + 4. `ros2 action send_goal /MoveToStagingConfiguration ada_feeding_msgs/action/MoveTo "{}" --feedback`` + 5. `ros2 action send_goal /MoveToMouth ada_feeding_msgs/action/MoveToMouth "{}" --feedback` + 6. `ros2 action send_goal /MoveFromMouthToRestingPosition ada_feeding_msgs/action/MoveTo "{}" --feedback` + 7. `ros2 action send_goal /MoveFromMouthToAbovePlate ada_feeding_msgs/action/MoveTo "{}" --feedback` + 8. `ros2 action send_goal /MoveToStowLocation ada_feeding_msgs/action/MoveTo "{}" --feedback` + 9. `ros2 action send_goal /MoveFromMouthToStagingConfiguration ada_feeding_msgs/action/MoveTo "{}" --feedback` 2. Test the individual actions with the web app: 1. Launch the web app ([instructions here](https://github.com/personalrobotics/feeding_web_interface/tree/main/feedingwebapp)) 2. Go through the web app, ensure the expected actions happen on the robot. From 22bb0356a336dcdbdd66a0793e50582be9082c2c Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 17:57:13 -0800 Subject: [PATCH 12/15] Updated README --- ada_feeding/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ada_feeding/README.md b/ada_feeding/README.md index 5f1b5a94..73c2e508 100644 --- a/ada_feeding/README.md +++ b/ada_feeding/README.md @@ -33,7 +33,7 @@ This code has been developed and tested with the Kinova JACO Gen2 Arm, on comput 1. `ros2 action send_goal /MoveAbovePlate ada_feeding_msgs/action/MoveTo "{}" --feedback` 2. `ros2 action send_goal /AcquireFood ada_feeding_msgs/action/AcquireFood "{header: {stamp: {sec: 0, nanosec: 0}, frame_id: ''}, detected_food: {roi: {x_offset: 0, y_offset: 0, height: 0, width: 0, do_rectify: false}, mask: {header: {stamp: {sec: 0, nanosec: 0}, frame_id: ''}, format: '', data: []}, item_id: '', confidence: 0.0}}" --feedback` 3. `ros2 action send_goal /MoveToRestingPosition ada_feeding_msgs/action/MoveTo "{}" --feedback` - 4. `ros2 action send_goal /MoveToStagingConfiguration ada_feeding_msgs/action/MoveTo "{}" --feedback`` + 4. `ros2 action send_goal /MoveToStagingConfiguration ada_feeding_msgs/action/MoveTo "{}" --feedback` 5. `ros2 action send_goal /MoveToMouth ada_feeding_msgs/action/MoveToMouth "{}" --feedback` 6. `ros2 action send_goal /MoveFromMouthToRestingPosition ada_feeding_msgs/action/MoveTo "{}" --feedback` 7. `ros2 action send_goal /MoveFromMouthToAbovePlate ada_feeding_msgs/action/MoveTo "{}" --feedback` From 223d354075f774caf92527773bec55881ffb1c2d Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 17:57:35 -0800 Subject: [PATCH 13/15] Change info logs to debug --- .../behaviors/moveit2/modify_collision_object.py | 6 +++--- .../behaviors/moveit2/toggle_collision_object.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/modify_collision_object.py b/ada_feeding/ada_feeding/behaviors/moveit2/modify_collision_object.py index ca774b55..dac5cc4e 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/modify_collision_object.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/modify_collision_object.py @@ -105,7 +105,7 @@ def setup(self, **kwargs): # It is okay for attributes in behaviors to be # defined in the setup / initialise functions. - self.logger.info(f"{self.name} [ModifyCollisionObject::setup()]") + self.logger.debug(f"{self.name} [ModifyCollisionObject::setup()]") # Get Node from Kwargs self.node = kwargs["node"] @@ -120,7 +120,7 @@ def setup(self, **kwargs): def initialise(self): # Docstring copied from @override - self.logger.info(f"{self.name} [ModifyCollisionObject::initialise()]") + self.logger.debug(f"{self.name} [ModifyCollisionObject::initialise()]") # Check that the right parameters have been passed operation = self.blackboard_get("operation") @@ -155,7 +155,7 @@ def initialise(self): def update(self) -> py_trees.common.Status: # Docstring copied from @override - self.logger.info(f"{self.name} [ModifyCollisionObject::update()]") + self.logger.debug(f"{self.name} [ModifyCollisionObject::update()]") # Get the blackboard inputs for all operations if not self.blackboard_exists(["operation", "collision_object_id"]): diff --git a/ada_feeding/ada_feeding/behaviors/moveit2/toggle_collision_object.py b/ada_feeding/ada_feeding/behaviors/moveit2/toggle_collision_object.py index c60f24a8..e453371b 100644 --- a/ada_feeding/ada_feeding/behaviors/moveit2/toggle_collision_object.py +++ b/ada_feeding/ada_feeding/behaviors/moveit2/toggle_collision_object.py @@ -63,7 +63,7 @@ def setup(self, **kwargs): # It is okay for attributes in behaviors to be # defined in the setup / initialise functions. - self.logger.info(f"{self.name} [ToggleCollisionObject::setup()]") + self.logger.debug(f"{self.name} [ToggleCollisionObject::setup()]") # Get Node from Kwargs self.node = kwargs["node"] @@ -111,7 +111,7 @@ def update(self) -> py_trees.common.Status: return py_trees.common.Status.FAILURE collision_object_ids = self.blackboard_get("collision_object_ids") - self.logger.info(f"{self.name} [ToggleCollisionObject::update()]") + self.logger.debug(f"{self.name} [ToggleCollisionObject::update()]") # (Dis)allow collisions between the robot and the collision object if self.service_future is None: # Check if we have processed all collision objects @@ -129,7 +129,7 @@ def update(self) -> py_trees.common.Status: collision_object_id = collision_object_ids[ self.curr_collision_object_id_i ] - self.logger.info( + self.logger.debug( f"{self.name} [ToggleCollisionObject::update()] " f"collision_object_id: {collision_object_id}" ) @@ -150,7 +150,7 @@ def update(self) -> py_trees.common.Status: with self.moveit2_lock: succ = self.moveit2.process_allow_collision_future(self.service_future) # Process success/failure - self.logger.info( + self.logger.debug( f"{self.name} [ToggleCollisionObject::update()] " f"service_future: {succ}" ) From ca519b30371de4c94258152fbdb13c49881e5e96 Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 17:58:08 -0800 Subject: [PATCH 14/15] Reduced max distance for face to 1.25m --- ada_feeding/ada_feeding/trees/move_to_mouth_tree.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py index ee31ea4c..e3ed9011 100644 --- a/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py +++ b/ada_feeding/ada_feeding/trees/move_to_mouth_tree.py @@ -9,7 +9,6 @@ # that they have similar code. # Standard imports -import operator from typing import Tuple # Third-party imports @@ -85,7 +84,7 @@ def __init__( wheelchair_collision_object_id: str = "wheelchair_collision", force_threshold: float = 4.0, torque_threshold: float = 4.0, - allowed_face_distance: Tuple[float, float] = (0.4, 1.5), + allowed_face_distance: Tuple[float, float] = (0.4, 1.25), face_detection_msg_timeout: float = 5.0, face_detection_timeout: float = 2.5, plan_distance_from_mouth: float = 0.05, @@ -516,7 +515,6 @@ def get_result( # we check whether that was actually a perception failure. if result_msg.status == result_msg.STATUS_PLANNING_FAILED: tip = tree.tip() - self._node.get_logger().info("Tip: %s" % tip) if tip is not None and "FaceDetection" in tip.name: result_msg.status = result_msg.STATUS_PERCEPTION_FAILED From 10d62011b9170d4fd7802236352ceddbb1221034 Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Mon, 13 Nov 2023 17:59:12 -0800 Subject: [PATCH 15/15] Updated food detection - Remove poitns with depth 0 - Use already-filtered octomap depth image - Only annotate images with detected face in RGB *and* depth --- .../ada_feeding_perception/face_detection.py | 102 +++++++++++------- .../launch/ada_feeding_perception.launch.py | 17 ++- 2 files changed, 79 insertions(+), 40 deletions(-) diff --git a/ada_feeding_perception/ada_feeding_perception/face_detection.py b/ada_feeding_perception/ada_feeding_perception/face_detection.py index 26366b7d..11ec0b20 100755 --- a/ada_feeding_perception/ada_feeding_perception/face_detection.py +++ b/ada_feeding_perception/ada_feeding_perception/face_detection.py @@ -16,6 +16,7 @@ from cv_bridge import CvBridge from geometry_msgs.msg import Point import numpy as np +import numpy.typing as npt import rclpy from rclpy.callback_groups import MutuallyExclusiveCallbackGroup from rclpy.node import Node @@ -322,16 +323,14 @@ def camera_callback(self, msg: Union[CompressedImage, Image]): self.latest_img_msg = msg def detect_largest_face( - self, img_msg: Union[CompressedImage, Image], publish_annotated_img: bool = True - ) -> Tuple[bool, Tuple[int, int], np.ndarray]: + self, image_bgr: npt.NDArray + ) -> Tuple[bool, Tuple[int, int], npt.NDArray, Tuple[float, float, float, float]]: """ Detect the largest face in an RGB image. Parameters ---------- - img_msg: The RGB image to detect faces in. - publish_annotated_img: Whether to publish an annotated image with the - detected faces and mouth points. + image_bgr: The OpenCV image to detect faces in. Returns ------- @@ -339,15 +338,14 @@ def detect_largest_face( mouth_center: The (u,v) coordinates of the somion of the largest face detected in the image. mouth_points: A list of (u,v) coordinates of the facial landmarks of the mouth. + face_bbox: The bounding box of the largest face detected in the image, + in the form (x, y, w, h). """ # pylint: disable=too-many-locals # This function is not too complex, but it does have a lot of local variables. # Decode the image - image_bgr = cv2.imdecode( - np.frombuffer(img_msg.data, np.uint8), cv2.IMREAD_COLOR - ) image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY) # Detect faces using the haarcascade classifier on the grayscale image @@ -357,6 +355,7 @@ def detect_largest_face( is_face_detected = len(faces) > 0 img_mouth_center = (0, 0) img_mouth_points = [] + face_bbox = (0, 0, 0, 0) if is_face_detected: # Detect landmarks (a 3d list) on image @@ -366,42 +365,21 @@ def detect_largest_face( # Add face markers to the image and find largest face largest_face = [0, 0] # [area (px^2), index] for i, face in enumerate(faces): - (x, y, w, h) = face + (_, _, w, h) = face # Update the largest face if w * h > largest_face[0]: largest_face = [w * h, i] - # Annotate the image with the face. See below for the landmark indices: - # https://pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/ - if publish_annotated_img: - cv2.rectangle(image_bgr, (x, y), (x + w, y + h), (255, 255, 255), 2) - for j in range(48, 68): - landmark_x, landmark_y = landmarks[i][0][j] - cv2.circle( - image_bgr, - (int(landmark_x), int(landmark_y)), - 1, - (0, 255, 0), - 5, - ) - - # Annotate the image with a red rectangle around largest face - (x, y, w, h) = faces[largest_face[1]] - cv2.rectangle(image_bgr, (x, y), (x + w, y + h), (0, 0, 255), 2) + # Get the largest face + face_bbox = faces[largest_face[1]] - # Find stomion (mouth center) in image + # Find stomion (mouth center) in image. See below for the landmark indices: + # https://pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/ img_mouth_center = landmarks[largest_face[1]][0][66] img_mouth_points = landmarks[largest_face[1]][0][48:68] - # Publish annotated image - if publish_annotated_img: - annotated_msg = cv2_image_to_ros_msg( - image_bgr, compress=False, encoding="bgr8" - ) - self.publisher_image.publish(annotated_msg) - - return is_face_detected, img_mouth_center, img_mouth_points + return is_face_detected, img_mouth_center, img_mouth_points, face_bbox def get_mouth_depth( self, rgb_msg: Union[CompressedImage, Image], face_points: np.ndarray @@ -460,12 +438,14 @@ def get_mouth_depth( for point in face_points: x, y = int(point[0]), int(point[1]) if 0 <= x < image_depth.shape[1] and 0 <= y < image_depth.shape[0]: - num_points_in_frame += 1 - depth_sum += image_depth[y][x] + # Points with depth 0mm are invalid (typically out-of-range) + if image_depth[y][x] != 0: + num_points_in_frame += 1 + depth_sum += image_depth[y][x] if num_points_in_frame < 0.5 * len(face_points): self.get_logger().warn( "Detected face in the RGB image, but majority of mouth points " - "were outside the frame of the depth image. Ignoring this face." + "were invalid. Ignoring this face." ) return False, 0 @@ -536,11 +516,15 @@ def run(self) -> None: continue # Detect the largest face in the RGB image + image_bgr = cv2.imdecode( + np.frombuffer(rgb_msg.data, np.uint8), cv2.IMREAD_COLOR + ) ( is_face_detected, img_mouth_center, img_mouth_points, - ) = self.detect_largest_face(rgb_msg) + face_bbox, + ) = self.detect_largest_face(image_bgr) if is_face_detected: # Get the depth of the mouth @@ -558,10 +542,50 @@ def run(self) -> None: else: is_face_detected = False + # Annotate the image with the face + if is_face_detected: + self.annotate_image(image_bgr, img_mouth_points, face_bbox) + + # Publish the face detection image + self.publisher_image.publish( + cv2_image_to_ros_msg(image_bgr, compress=False, encoding="bgr8") + ) + # Publish 3d point face_detection_msg.is_face_detected = is_face_detected self.publisher_results.publish(face_detection_msg) + def annotate_image( + self, + image_bgr: npt.NDArray, + img_mouth_points: npt.NDArray, + face_bbox: Tuple[int, int, int, int], + ) -> None: + """ + Annotate the image with the face and mouth center. + + Parameters + ---------- + image_bgr: The OpenCV image to annotate. + img_mouth_points: A list of (u,v) coordinates of the facial landmarks of the mouth. + face_bbox: The bounding box of the largest face detected in the image, + in the form (x, y, w, h). + """ + # Annotate the image with a red rectangle around largest face + x, y, w, h = face_bbox + cv2.rectangle(image_bgr, (x, y), (x + w, y + h), (0, 0, 255), 2) + + # Annotate the image with the face. + cv2.rectangle(image_bgr, (x, y), (x + w, y + h), (255, 255, 255), 2) + for landmark_x, landmark_y in img_mouth_points: + cv2.circle( + image_bgr, + (int(landmark_x), int(landmark_y)), + 1, + (0, 255, 0), + 5, + ) + def main(args=None): """ diff --git a/ada_feeding_perception/launch/ada_feeding_perception.launch.py b/ada_feeding_perception/launch/ada_feeding_perception.launch.py index ee67ef4e..a1a3e67c 100755 --- a/ada_feeding_perception/launch/ada_feeding_perception.launch.py +++ b/ada_feeding_perception/launch/ada_feeding_perception.launch.py @@ -79,6 +79,8 @@ def generate_launch_description(): "~/camera_info", PythonExpression(expression=["'", prefix, "/camera/color/camera_info'"]), ), + ] + food_detection_remappings = [ ( "~/aligned_depth", PythonExpression( @@ -100,7 +102,7 @@ def generate_launch_description(): name="segment_from_point", executable="segment_from_point", parameters=[segment_from_point_config, segment_from_point_params], - remappings=realsense_remappings, + remappings=realsense_remappings + food_detection_remappings, ) launch_description.add_action(segment_from_point) @@ -112,10 +114,23 @@ def generate_launch_description(): face_detection_params["model_dir"] = ParameterValue( os.path.join(ada_feeding_perception_share_dir, "model"), value_type=str ) + # To avoid incorrect depth estimates from the food on the fork, face detection + # uses the depth image that has been filtered for the octomap, where those + # points have been set to 0. face_detection_remappings = [ ("~/face_detection", "/face_detection"), ("~/face_detection_img", "/face_detection_img"), ("~/toggle_face_detection", "/toggle_face_detection"), + ( + "~/aligned_depth", + PythonExpression( + expression=[ + "'", + prefix, + "/camera/aligned_depth_to_color/depth_octomap'", + ] + ), + ), ] face_detection = Node( package="ada_feeding_perception",