Lesson 26:Diego 1# —Object detection & location

google最近公布了基于tensorflow物体识别的Api,本文将利用Diego1#的深度摄像头调用物体识别API,在识别物体的同时计算物体与出机器人摄像头的距离。原理如下:

  • Object Detection 订阅Openni发布的Image消息,识别视频帧中的物体
  • Object Depth 订阅Openni发布的Depth Image消息,根据Object Detection识别出的物体列表,对应到Depth Image的位置,计算Object的深度信息
  • Publish Image将视频帧经过处理,增加识别信息Lable后,以Compressed Image消息发布出去,可以方便其他应用订阅

1.创建diego_tensorflow包

由于我们使用的tensorflow,所以我们首先需要安装tensorflow,可以参考tensorflow官方安装说明https://www.tensorflow.org/install/install_linux

Object_detection相关依赖安装见官方安装说明https://github.com/tensorflow/models/blob/master/object_detection/g3doc/installation.md

执行如下命令创建diego_tensorflow包

catkin_create_pkg diego_tensorflow std_msgs rospy roscpp cv_bridge

在diego_tensorflow目录下创建两个子目录

  • scripts:存放相关代码
  • launch:存放launch启动文件

创建完成后diego_tensorflow目录如下图所示:

下载object_detection包:https://github.com/tensorflow/models

下载后将object_detection包上传到diego_tensorflow/scripts目录下,如果自己做模型训练还需有上传slim包到diego_tensorflow/scripts目录下

物体识别的代码都写在ObjectDetectionDemo.py文件中,其中有关识别的代码大部分参考tensorflow官方示例,这里将其包装成为一个ROS节点,并增加物体深度数据的计算

2.ROS节点源代码

#!/usr/bin/env python

import rospy
from sensor_msgs.msg import Image as ROSImage
from sensor_msgs.msg import CompressedImage as ROSImage_C
from cv_bridge import CvBridge
import cv2
import matplotlib
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import uuid
from collections import defaultdict
from io import StringIO
from PIL import Image
from math import isnan

# This is needed since the notebook is stored in the object_detection folder.
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util

class ObjectDetectionDemo():
    def __init__(self):
	rospy.init_node('object_detection_demo')
	
	# Set the shutdown function (stop the robot)
        rospy.on_shutdown(self.shutdown)
        
        self.depth_image =None
        
        self.depth_array = None
        
        model_path = rospy.get_param("~model_path", "")
        image_topic = rospy.get_param("~image_topic", "")
        depth_image_topic = rospy.get_param("~depth_image_topic", "")
        if_down=False
        self.vfc=0
        
        self._cv_bridge = CvBridge()
        
        # What model to download.
	#MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
	#MODEL_NAME='faster_rcnn_resnet101_coco_11_06_2017'
	MODEL_NAME ='ssd_inception_v2_coco_11_06_2017'
	#MODEL_NAME ='diego_object_detection_v1_07_2017'
	#MODEL_NAME ='faster_rcnn_inception_resnet_v2_atrous_coco_11_06_2017'
	MODEL_FILE = MODEL_NAME + '.tar.gz'
	DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'

	# Path to frozen detection graph. This is the actual model that is used for the object detection.
	PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'

	# List of the strings that is used to add correct label for each box.
	PATH_TO_LABELS = os.path.join(model_path+'/data', 'mscoco_label_map.pbtxt')
	#PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')

	NUM_CLASSES = 90
	
	if if_down:
		opener = urllib.request.URLopener()
		opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
		tar_file = tarfile.open(MODEL_FILE)
		for file in tar_file.getmembers():
			file_name = os.path.basename(file.name)
			if 'frozen_inference_graph.pb' in file_name:
        			tar_file.extract(file, os.getcwd())


	rospy.loginfo("begin initilize the tf...")
	self.detection_graph = tf.Graph()
	with self.detection_graph.as_default():
		od_graph_def = tf.GraphDef()
		with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
			serialized_graph = fid.read()
			od_graph_def.ParseFromString(serialized_graph)
			tf.import_graph_def(od_graph_def, name='')

	label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
	categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
	self.category_index = label_map_util.create_category_index(categories)
	
	# Subscribe to the registered depth image
        rospy.Subscriber(depth_image_topic, ROSImage, self.convert_depth_image)
        
        # Wait for the depth image to become available
        #rospy.wait_for_message('depth_image', ROSImage)
	
	self._sub = rospy.Subscriber(image_topic, ROSImage, self.callback, queue_size=1)	
	self._pub = rospy.Publisher('object_detection', ROSImage_C, queue_size=1)
	
	rospy.loginfo("initialization has finished...")
	
	
    def convert_depth_image(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        # The depth image is a single-channel float32 image
        self.depth_image = self._cv_bridge.imgmsg_to_cv2(ros_image, "32FC1")

        # Convert the depth image to a Numpy array
        self.depth_array = np.array(self.depth_image, dtype=np.float32)
        #print(self.depth_array)
        
    def callback(self,image_msg):
	if self.vfc<12:
		self.vfc=self.vfc+1
	else:
		self.callbackfun(image_msg)
		self.vfc=0	
		    	
    def box_depth(self,boxes,im_width, im_height):
	# Now compute the depth component
        depth=[]
	for row in boxes[0]:
		n_z = sum_z = mean_z = 0
		# Get the min/max x and y values from the ROI
		if row[0]<row[1]:
			min_x = row[0]*im_width
			max_x = row[1]*im_width
		else:
			min_x = row[1]*im_width
			max_x = row[0]*im_width
			
		if row[2]<row[3]:
			min_y = row[2]*im_height
			max_y = row[3]*im_height
		else:
			min_y = row[3]*im_height
			max_y = row[2]*im_height
		# Get the average depth value over the ROI
		for x in range(int(min_x), int(max_x)):
            		for y in range(int(min_y), int(max_y)):
                		try:
					z = self.depth_array[y, x]
				except:
					continue
                
				# Depth values can be NaN which should be ignored
				if isnan(z):
					z=6
					continue
				else:
					sum_z = sum_z + z
					n_z += 1 
			mean_z = sum_z / (n_z+0.01)
		depth.append(mean_z)
	return depth
    def callbackfun(self, image_msg):
	with self.detection_graph.as_default():
		with tf.Session(graph=self.detection_graph) as sess:
			 cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
			 #cv_image = (self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8"))[300:450, 150:380]
			 pil_img = Image.fromarray(cv_image)			 
			 (im_width, im_height) = pil_img.size			 
			 # the array based representation of the image will be used later in order to prepare the
			 # result image with boxes and labels on it.
			 image_np =np.array(pil_img.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
			 # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
			 image_np_expanded = np.expand_dims(image_np, axis=0)
			 image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
			 # Each box represents a part of the image where a particular object was detected.
			 boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
			 # Each score represent how level of confidence for each of the objects.
			 # Score is shown on the result image, together with the class label.
			 scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
			 classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
			 num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
			
			 # Actual detection.
			 (boxes, scores, classes, num_detections) = sess.run(
			 	[boxes, scores, classes, num_detections],
			 	feed_dict={image_tensor: image_np_expanded})
			 box_depths=self.box_depth(boxes,im_width,im_height)
			 print(box_depths)
			 # Visualization of the results of a detection.
			 vis_util.visualize_boxes_and_labels_on_image_array(
			 	image_np,
			 	np.squeeze(boxes),
			 	np.squeeze(classes).astype(np.int32),
			 	np.squeeze(scores),
			 	self.category_index,
                                box_depths,
			 	use_normalized_coordinates=True,
			 	line_thickness=8)
			 
			 ros_compressed_image=self._cv_bridge.cv2_to_compressed_imgmsg(image_np)
			 self._pub.publish(ros_compressed_image)
			
    
    def shutdown(self):
        rospy.loginfo("Stopping the tensorflow object detection...")
        rospy.sleep(1) 
        
if __name__ == '__main__':
    try:
        ObjectDetectionDemo()
        rospy.spin()
    except rospy.ROSInterruptException:
        rospy.loginfo("RosTensorFlow_ObjectDetectionDemo has started.")

下面我们来解释主要的代码逻辑

    def __init__(self):
	rospy.init_node('object_detection_demo')
	
	# Set the shutdown function (stop the robot)
        rospy.on_shutdown(self.shutdown)
        
        self.depth_image =None
        
        self.depth_array = None
        
        model_path = rospy.get_param("~model_path", "")
        image_topic = rospy.get_param("~image_topic", "")
        depth_image_topic = rospy.get_param("~depth_image_topic", "")
        if_down=False
        self.vfc=0
        
        self._cv_bridge = CvBridge()

以上代码是ROS的标准初始化代码,变量的初始化,及launch文件中参数的读取

  • model_path定义object_detection所使用的模型路径
  • image_topic订阅的image主题
  • depth_image_topic订阅的深度image主题
        # What model to download.
	#MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
	#MODEL_NAME='faster_rcnn_resnet101_coco_11_06_2017'
	MODEL_NAME ='ssd_inception_v2_coco_11_06_2017'
	#MODEL_NAME ='diego_object_detection_v1_07_2017'
	#MODEL_NAME ='faster_rcnn_inception_resnet_v2_atrous_coco_11_06_2017'
	MODEL_FILE = MODEL_NAME + '.tar.gz'
	DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'

	# Path to frozen detection graph. This is the actual model that is used for the object detection.
	PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'

	# List of the strings that is used to add correct label for each box.
	PATH_TO_LABELS = os.path.join(model_path+'/data', 'mscoco_label_map.pbtxt')
	#PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')

	NUM_CLASSES = 90
	
	if if_down:
		opener = urllib.request.URLopener()
		opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
		tar_file = tarfile.open(MODEL_FILE)
		for file in tar_file.getmembers():
			file_name = os.path.basename(file.name)
			if 'frozen_inference_graph.pb' in file_name:
        			tar_file.extract(file, os.getcwd())

以上代码设置object_detection所使用的模型,及下载解压相应的文件,这里设置了一个if_down的开关,第一次运行的时候可以打开此开关下载,以后可以关掉,因为下载的时间比较长,下载一次后面就无需再下载了。

self.detection_graph = tf.Graph()
	with self.detection_graph.as_default():
		od_graph_def = tf.GraphDef()
		with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
			serialized_graph = fid.read()
			od_graph_def.ParseFromString(serialized_graph)
			tf.import_graph_def(od_graph_def, name='')

	label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
	categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
	self.category_index = label_map_util.create_category_index(categories)

以上代码是tensorflow的初始化代码。

        # Subscribe to the registered depth image
        rospy.Subscriber(depth_image_topic, ROSImage, self.convert_depth_image)
        
        # Wait for the depth image to become available
        #rospy.wait_for_message('depth_image', ROSImage)
	
	self._sub = rospy.Subscriber(image_topic, ROSImage, self.callback, queue_size=1)	
	self._pub = rospy.Publisher('object_detection', ROSImage_C, queue_size=1)

以上代码,我们定义此节点订阅depth_image和image两个topic,同时发布一个名为object_detection的Compressed Imagetopic
depth_image的回调函数是convert_depth_image
image的回调函数是callback

    def convert_depth_image(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        # The depth image is a single-channel float32 image
        self.depth_image = self._cv_bridge.imgmsg_to_cv2(ros_image, "32FC1")

        # Convert the depth image to a Numpy array
        self.depth_array = np.array(self.depth_image, dtype=np.float32)
        #print(self.depth_array)

以上是depth_image处理的回调函数,首先将depth_image主题转换成opencv类型的,然后在将图片转换为numpy数组,赋值给depth_array成员变量

    def callback(self,image_msg):
	if self.vfc<12:
		self.vfc=self.vfc+1
	else:
		self.callbackfun(image_msg)
		self.vfc=0

以上是image处理的回调函数,这里控制视频帧的处理频率,主要是为了减少运算量,可以灵活调整,最终视频帧的处理是在callbackfun中处理的

    def box_depth(self,boxes,im_width, im_height):
	# Now compute the depth component
        depth=[]
	for row in boxes[0]:
		n_z = sum_z = mean_z = 0
		# Get the min/max x and y values from the ROI
		if row[0]<row[1]:
			min_x = row[0]*im_width
			max_x = row[1]*im_width
		else:
			min_x = row[1]*im_width
			max_x = row[0]*im_width
			
		if row[2]<row[3]:
			min_y = row[2]*im_height
			max_y = row[3]*im_height
		else:
			min_y = row[3]*im_height
			max_y = row[2]*im_height
		# Get the average depth value over the ROI
		for x in range(int(min_x), int(max_x)):
            		for y in range(int(min_y), int(max_y)):
                		try:
					z = self.depth_array[y, x]
				except:
					continue
                
				# Depth values can be NaN which should be ignored
				if isnan(z):
					z=6
					continue
				else:
					sum_z = sum_z + z
					n_z += 1 
			mean_z = sum_z / (n_z+0.01)
		depth.append(mean_z)
	return depth

以上代码是深度数据计算,输入参数boxes就是object_detection识别出来的物体的矩形标识rect,我们根据物体的矩形范围,匹配深度图片相应的区域,计算区域内的平均深度值作为此物体的深度数据。返回一个与boxes想对应的1维数组
由于深度图,和一般图片是异步处理,可能出现帧不对应的问题,在这里处理的比较简单,没有考虑此问题,可以通过缓存深度图片的方式来解决,通过时间戳来匹配最近的深度图片。

def callbackfun(self, image_msg):
	with self.detection_graph.as_default():
		with tf.Session(graph=self.detection_graph) as sess:
			 cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
			 #cv_image = (self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8"))[300:450, 150:380]
			 pil_img = Image.fromarray(cv_image)			 
			 (im_width, im_height) = pil_img.size			 
			 # the array based representation of the image will be used later in order to prepare the
			 # result image with boxes and labels on it.
			 image_np =np.array(pil_img.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
			 # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
			 image_np_expanded = np.expand_dims(image_np, axis=0)
			 image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
			 # Each box represents a part of the image where a particular object was detected.
			 boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
			 # Each score represent how level of confidence for each of the objects.
			 # Score is shown on the result image, together with the class label.
			 scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
			 classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
			 num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
			
			 # Actual detection.
			 (boxes, scores, classes, num_detections) = sess.run(
			 	[boxes, scores, classes, num_detections],
			 	feed_dict={image_tensor: image_np_expanded})
			 box_depths=self.box_depth(boxes,im_width,im_height)
			 # Visualization of the results of a detection.
			 vis_util.visualize_boxes_and_labels_on_image_array(
			 	image_np,
			 	np.squeeze(boxes),
			 	np.squeeze(classes).astype(np.int32),
			 	np.squeeze(scores),
			 	self.category_index,
                                box_depths
			 	use_normalized_coordinates=True,
			 	line_thickness=8)
			 
			 ros_compressed_image=self._cv_bridge.cv2_to_compressed_imgmsg(image_np)
			 self._pub.publish(ros_compressed_image)

以上代码是图片的回调函数,主要是将image消息转换为opencv格式,然后再转换成numpy数组,调用object_detection来识别图片中的物体,再调用 vis_util.visualize_boxes_and_labels_on_image_array将识别出来的物体在图片上标识出来,最后将处理后的图片发布为compressed_image类型的消息

现在我们只需要简单修改一下Object_detection/utils目录下的visualization_utils.py文件,就可以显示深度信息

def visualize_boxes_and_labels_on_image_array(image,
                                              boxes,
                                              classes,
                                              scores,
                                              category_index,
	                                      box_depths=None,
                                              instance_masks=None,
                                              keypoints=None,
                                              use_normalized_coordinates=False,
                                              max_boxes_to_draw=20,
                                              min_score_thresh=.5,
                                              agnostic_mode=False,
                                              line_thickness=4):

在visualize_boxes_and_labels_on_image_array定义中增加box_depths=None,缺省值为None

  for i in range(min(max_boxes_to_draw, boxes.shape[0])):
    if scores is None or scores[i] > min_score_thresh:
      box = tuple(boxes[i].tolist())
      if instance_masks is not None:
        box_to_instance_masks_map[box] = instance_masks[i]
      if keypoints is not None:
        box_to_keypoints_map[box].extend(keypoints[i])
      if scores is None:
        box_to_color_map[box] = 'black'
      else:
        if not agnostic_mode:
          if classes[i] in category_index.keys():
            class_name = category_index[classes[i]]['name']
          else:
            class_name = 'N/A'
          display_str = '{}: {}%'.format(
              class_name,
              int(100*scores[i]))
        else:
          display_str = 'score: {}%'.format(int(100 * scores[i]))
          
        #modify by diego robot
        if box_depths!=None:
        	display_str=display_str+"\ndepth: "+str(box_depths[i])
        	global depth_info
        	depth_info=True
        else:
        	global depth_info
        	depth_info=False
        #######################
        box_to_display_str_map[box].append(display_str)
        if agnostic_mode:
          box_to_color_map[box] = 'DarkOrange'
        else:
          box_to_color_map[box] = STANDARD_COLORS[
              classes[i] % len(STANDARD_COLORS)]

在第一个for循环里面,的box_to_display_str_map[box].append(display_str)一句前面增加如上diego robot修改部分代码

depth_info=False

在文件的开头部分定义全局变量,表示是否有深度信息传递进来

 # Reverse list and print from bottom to top.
  for display_str in display_str_list[::-1]: 
    text_width, text_height = font.getsize(display_str)    
    #modify by william
    global depth_info
    if depth_info:
  	text_height=text_height*2
    ###################
    	
    margin = np.ceil(0.05 * text_height)
    draw.rectangle(
        [(left, text_bottom - text_height - 2 * margin), (left + text_width,
                                                          text_bottom)],
        fill=color)
    draw.text(
        (left + margin, text_bottom - text_height - margin),
        display_str,
        fill='black',
        font=font)
    text_bottom -= text_height - 2 * margin

在draw_bounding_box_on_image函数的margin = np.ceil(0.05 * text_height)一句前面增加如上diego robot修改部分代码

3.launch文件

<launch>
   <node pkg="diego_tensorflow" name="ObjectDetectionDemo" type="ObjectDetectionDemo.py" output="screen">

       <param name="image_topic" value="/camera/rgb/image_raw" />

       <param name="depth_image_topic" value="/camera/depth_registered/image" />

       <param name="model_path" value="$(find diego_tensorflow)/scripts/object_detection" />

   </node>

</launch>

launch文件中定义了相应的参数,image_topic,depth_image_topic,model_path,读者可以根据自己的实际情况设定

4.启动节点

启动openni

roslaunch diego_vision openni_node.launch 

启动object_detection

roslaunch diego_tensorflow object_detection_demo.launch

5.通过手机APP订阅object_detection

我们只需要设置一个image_topic为object_detection,既可以在手机上看到物体识别的效果

object_detection物体识别是建立在训练模型上的,本文所采用的是官方所提供的训练好的模型,识别率还是比较高的。但是如果机器人所工作的环境与训练的图片差异比较大,识别率会大大降低。所以我们在实际使用中可以训练自己物体模型,在结合机器人的其他传感器和装置来达到实际应用的要求。比如可以应用在人员流量的监控,车辆的监控,或者特定物体的跟踪,再配合机械臂,深度相机,可以实现物体位置的判断,抓取等功能。

Lesson 21:moveit-the driver for diego1# plus

在上节课中我们已经通过moveit assistant完成了针对diego 1# plus的配置,但如果想让机械臂动起来,我们还需要对编写驱动代码:代码原理如下:

首先,moveit把计算的结果通过Ros action的方式发送给driver,driver调用Ros_arduino_bridge的servor_write server发送各个关节舵机的控制指令给Arduino uno控制板
其次,同时Driver也要通过调用Ros_arduino_bridge的servo_read服务读取各个关节的舵机状态,通过joint_state消息的方式发送给moveit,供moveit进行路径规划计算。

接下来我们一步一步的来完成驱动

1.创建控制器配置文件diego_controllers.yaml

在moveit assistant产生的配置文件目录的config子目录下,创建Diego 1# plus的配置文件diego1_controllers.yaml,此文件告诉moveit将与哪个的action通讯

diego1_controllers.yaml代码如下:

controller_list:
  - name: arm_controller
    action_ns: follow_joint_trajectory
    type: FollowJointTrajectory
    default: true
    joints:
      - arm_base_to_arm_round_joint_stevo0
      - shoulder_2_to_arm_joint_stevo1
      - big_arm_round_to_joint_stevo2
      - arm_joint_stevo2_to_arm_joint_stevo3
      - wrist_to_arm_joint_stevo4
      - arm_joint_stevo4_to_arm_joint_stevo5
  - name: gripper_controller
    action_ns: gripper_action
    type: GripperCommand
    default: true
    joints:
      - hand_to_grip_joint_stevo6
      - hand_to_grip_joint_stevo7

这里分别为机械臂和机械爪定义了两个控制器arm_controller和gripper_controller,告诉moveit机械臂通讯的topic是arm_controller/follow_joint_trajectory,机械爪通讯的topic是gripper_controller/gripper_action

2.机械臂控制的FollowController

我们扩展了ros_arduino_bridge的功能来实现FollowController,在ros_arduino_python目录下创建joints.py和diego1_follow_controller.py

2.1 joint.py

封装了关节舵机控制的代码,主要功能函数:

  • __init__类初始化代码
  • setCurrentPosition(self):通过调用/arduino/servo_write服务发送舵机的控制命令,把舵机的位置设置为self.position
  • setCurrentPosition(self, position):
    通过调用/arduino/servo_write服务发送舵机的控制命令,把舵机的位置设置为moveit给出的舵机位置
  • mapToServoPosition(self,position):将moveit给出的舵机位置,转换为机械臂舵机实际对应的位置,此功能需要标定后,才能准确控制机械臂的位置。
#!/usr/bin/env python

# Copyright (c) 2017-2018 diego. 
# All right reserved.
#

## @file joints.py the jonit clase support functions for joints.

## @brief Joints hold current values.
import roslib
import rospy
from ros_arduino_msgs.srv import *
from math import pi as PI, degrees, radians
class Joint:

    ## @brief Constructs a Joint instance.
    ##
    ## @param servoNum The servo id for this joint.
    ## 
    ## @param name The joint name.
    ## 
    ## @param name The servo control range.
    def __init__(self, name, servoNum, max, min, servo_max, servo_min, inverse):
        self.name = name
        self.servoNum=servoNum
        self.max=max
        self.min=min
        self.servo_max=servo_max
        self.servo_min=servo_min
        self.inverse=inverse

        self.position = 0.0
        self.velocity = 0.0

    ## @brief Set the current position.
    def setCurrentPosition(self):
        rospy.wait_for_service('/arduino/servo_write')
	try:
	        servo_write=rospy.ServiceProxy('/arduino/servo_write',ServoWrite)
	        servo_write(self.servoNum,self.position)
        except rospy.ServiceException, e:
            print "Service call failed: %s"%e   

    ## @brief Set the current position.
    ##
    ## @param position The current position. 
    def setCurrentPosition(self, position):
        rospy.wait_for_service('/arduino/servo_write')
	try:
		#if self.servoNum==2:
        	servo_write=rospy.ServiceProxy('/arduino/servo_write',ServoWrite)
        	self.mapToServoPosition(position)	        	
       		servo_write(self.servoNum,radians(self.position))

        except rospy.ServiceException, e:
            print "Service call failed: %s"%e
    
    ## @brief map to  Servo Position.
    ##
    ## @param position The current position.        
    def mapToServoPosition(self,position):
	    per=(position-self.min)/(self.max-self.min)
	    if not self.inverse:	    	
	    	self.position=self.servo_min+per*(self.servo_max-self.servo_min)
	    else:
	    	self.position=self.servo_max-per*(self.servo_max-self.servo_min)


2.2diego1_follow_controller.py

此类diego机械臂控制器类,接收moveit的控制命令,转换为关节舵机的实际控制指令,驱动机械臂的运动。

#!/usr/bin/env python

# Copyright (c) 2017-2018 williamar. 
# All right reserved.

import rospy, actionlib

from control_msgs.msg import FollowJointTrajectoryAction
from trajectory_msgs.msg import JointTrajectory
from diagnostic_msgs.msg import *
from math import pi as PI, degrees, radians
from joints import Joint

class FollowController:

    def __init__(self, name):
        self.name = name
        
        rospy.init_node(self.name)
        

        # rates
        self.rate = 10.0
        
        # Arm jonits
        self.arm_base_to_arm_round_joint_stevo0=Joint('arm_base_to_arm_round_joint_stevo0',0,1.5797,-1.5707,130,0,False)
        self.shoulder_2_to_arm_joint_stevo1=Joint('shoulder_2_to_arm_joint_stevo1',1,1.5707,-0.1899,115,45,False)
        self.big_arm_round_to_joint_stevo2=Joint('big_arm_round_to_joint_stevo2',2,2.5891,1,100,20,False)
        self.arm_joint_stevo2_to_arm_joint_stevo3=Joint('arm_joint_stevo2_to_arm_joint_stevo3',3,1.5707,-1.5707,130,0,False)
        self.wrist_to_arm_joint_stevo4=Joint('wrist_to_arm_joint_stevo4',4,1.5707,-1.5707,130,0,False)
        self.arm_joint_stevo4_to_arm_joint_stevo5=Joint('arm_joint_stevo4_to_arm_joint_stevo5',5,1.5707,-1.5707,130,0,True)
        
        
        
        self.joints=[self.arm_base_to_arm_round_joint_stevo0,
        self.shoulder_2_to_arm_joint_stevo1,
        self.big_arm_round_to_joint_stevo2,
        self.arm_joint_stevo2_to_arm_joint_stevo3,
        self.wrist_to_arm_joint_stevo4,
        self.arm_joint_stevo4_to_arm_joint_stevo5]
        
        # action server
        self.server = actionlib.SimpleActionServer('arm_controller/follow_joint_trajectory', FollowJointTrajectoryAction, execute_cb=self.actionCb, auto_start=False)
        self.server.start()
        rospy.spin()
        rospy.loginfo("Started FollowController")


    def actionCb(self, goal):
	print("****************actionCb*********************")    
        rospy.loginfo(self.name + ": Action goal recieved.")
        traj = goal.trajectory

        if set(self.joints) != set(traj.joint_names):
            for j in self.joints:
                if j.name not in traj.joint_names:
                    msg = "Trajectory joint names does not match action controlled joints." + str(traj.joint_names)
                    rospy.logerr(msg)
                    self.server.set_aborted(text=msg)
                    return
            rospy.logwarn("Extra joints in trajectory")

        if not traj.points:
            msg = "Trajectory empy."
            rospy.logerr(msg)
            self.server.set_aborted(text=msg)
            return

        try:
            indexes = [traj.joint_names.index(joint.name) for joint in self.joints]
        except ValueError as val:
            msg = "Trajectory invalid."
            rospy.logerr(msg)
            self.server.set_aborted(text=msg)
            return

        if self.executeTrajectory(traj):   
            self.server.set_succeeded()
        else:
            self.server.set_aborted(text="Execution failed.")

        rospy.loginfo(self.name + ": Done.")     

    def executeTrajectory(self, traj):
        rospy.loginfo("Executing trajectory")
        rospy.logdebug(traj)
        # carry out trajectory
        try:
            indexes = [traj.joint_names.index(joint.name) for joint in self.joints]
        except ValueError as val:
            rospy.logerr("Invalid joint in trajectory.")
            return False

        # get starting timestamp, MoveIt uses 0, need to fill in
        start = traj.header.stamp
        if start.secs == 0 and start.nsecs == 0:
            start = rospy.Time.now()

        r = rospy.Rate(self.rate)
	for point in traj.points:            
            desired = [ point.positions[k] for k in indexes ]
            for i in indexes:
                #self.joints[i].position=desired[i]
                self.joints[i].setCurrentPosition(desired[i])

            while rospy.Time.now() + rospy.Duration(0.01) < start:
                rospy.sleep(0.01)
        return True
    

if __name__=='__main__':
    try:
        rospy.loginfo("start followController...")
        FollowController('follow_Controller')
    except rospy.ROSInterruptException:
        rospy.loginfo('Failed to start followController...')
        


此类是驱动的核心代码,接下来我们详细解释其功能逻辑

    def __init__(self, name):
        self.name = name
        
        rospy.init_node(self.name)
        

        # rates
        self.rate = 10.0

初始化化ros note,设置节点名称,刷新频率为10hz

        # Arm jonits
        self.arm_base_to_arm_round_joint_stevo0=Joint('arm_base_to_arm_round_joint_stevo0',0,1.5797,-1.5707,130,0,False)
        self.shoulder_2_to_arm_joint_stevo1=Joint('shoulder_2_to_arm_joint_stevo1',1,1.5707,-0.1899,115,45,False)
        self.big_arm_round_to_joint_stevo2=Joint('big_arm_round_to_joint_stevo2',2,2.5891,1,100,20,False)
        self.arm_joint_stevo2_to_arm_joint_stevo3=Joint('arm_joint_stevo2_to_arm_joint_stevo3',3,1.5707,-1.5707,130,0,False)
        self.wrist_to_arm_joint_stevo4=Joint('wrist_to_arm_joint_stevo4',4,1.5707,-1.5707,130,0,False)
        self.arm_joint_stevo4_to_arm_joint_stevo5=Joint('arm_joint_stevo4_to_arm_joint_stevo5',5,1.5707,-1.5707,130,0,True)
        
        
        
        self.joints=[self.arm_base_to_arm_round_joint_stevo0,
        self.shoulder_2_to_arm_joint_stevo1,
        self.big_arm_round_to_joint_stevo2,
        self.arm_joint_stevo2_to_arm_joint_stevo3,
        self.wrist_to_arm_joint_stevo4,
        self.arm_joint_stevo4_to_arm_joint_stevo5]

初始化机械臂的关节,并把关节放入joints列表中,方便后续操作

        # action server
        self.server = actionlib.SimpleActionServer('arm_controller/follow_joint_trajectory', FollowJointTrajectoryAction, execute_cb=self.actionCb, auto_start=False)
        self.server.start()
        rospy.spin()
        rospy.loginfo("Started FollowController")

定义action server并启动。
指定通讯的topic为arm_controller/follow_joint_trajectory,类型为FollowJointTrajectoryAction,收到消息后的回调函数为self.actionCb,
self.actionCb的其实就是follow_controller对JointTrajectory msg的处理,所以这里先介绍一下JointTrajectory msg,只要理解了JointTrajectory msg,有助与后续代码的理解,执行如下命令就可以显示了JointTrajectory msg的结构。

$ rosmsg show JointTrajectory

可以看到消息的结构体中包含了三部分

  •  header  这是Ros的标准消息头这里就不多介绍了
  •  joint_names 这是所有关节名称的数组
  • JointTrajectoryPoint 这部分是驱动的关键,这个数组记录了机械臂从一种姿势到另外一种姿势所经过的路径点,moveit所产生的姿势路径是通过这些point点描述出来的,也就是我们驱动中要控制每个关节的舵机都按照这些point点进行运动,每个point又是由一个结构体构成:
  • positions这是一个float64的数组,记录每个point的时候舵机应该到达的角度,这里是弧度为单位的,比如说是6自由度的那每个Point的这个positions字段中应该包含六个数值[1.57,0,2,0.2,3,0.12],也就是我们舵机控制范围是180度,那这里面的取值范围就是0~π
  • velocities这个数组记录了每个关节运动的速度
  • accelerations这个数组记录每个关节运动的加速度
  • effort这个参数可以不用
  • time_from_start这个参数是指定从头部的timestamp开始算起多长时间要达到这个点的位置

理解了JointTrajectory msg的结构,我们很容易理解函数self.actionCb就是对消息做逻辑上的异常处理,判断msg中joint是否在joints列表中,消息是否为空,而真正消息体的处理是在executeTrajectory(self, traj),下面我们来看这个函数的处理逻辑

    def executeTrajectory(self, traj):
        rospy.loginfo("Executing trajectory")
        rospy.logdebug(traj)
        # carry out trajectory
        try:
            indexes = [traj.joint_names.index(joint.name) for joint in self.joints]
        except ValueError as val:
            rospy.logerr("Invalid joint in trajectory.")
            return False

将JointTrajectory msg中的Joint按照self.joints的顺序索引,并将索引放入indexes数组中。

    # get starting timestamp, MoveIt uses 0, need to fill in
        start = traj.header.stamp
        if start.secs == 0 and start.nsecs == 0:
            start = rospy.Time.now()

获取当前msg的时间戳

r = rospy.Rate(self.rate)

设定刷新频率

	for point in traj.points:            
            desired = [ point.positions[k] for k in indexes ]
            for i in indexes:
                #self.joints[i].position=desired[i]
                self.joints[i].setCurrentPosition(desired[i])

            while rospy.Time.now() + rospy.Duration(0.01) < start:
                rospy.sleep(0.01)

循环读取msg中的位置点,位置点中的舵机位置值读取出来,调用Joint类的setCurrentPosition方法发送舵机的控制指令

3. launch启动文件

moveit assistant会生成一个launch文件夹,其中包含了主要的launch文件,我们只需要做些接单的修改,即可进行测试。

3.1修改diego1_moveit_controller_manager.launch.xml

此文件是moveit assistant自动生成的,但其中内容是空的,我增加如下内容,告诉moveit,启动Controller的配置文件位置

<launch>
     <!-- Set the param that trajectory_execution_manager needs to find the controller plugin -->
     <arg name="moveit_controller_manager" default="moveit_simple_controller_manager/MoveItSimpleControllerManager" />
     <param name="moveit_controller_manager" value="$(arg moveit_controller_manager)"/>
     <!-- load controller_list -->
     <rosparam file="$(find diego_moveit_config)/config/diego1_controllers.yaml"/>
</launch>

3.2创建diego_demo.launch文件

在moveit的launch文件夹下创建diego_demo.launch文件,并添加如下内容

<launch>

     <master auto="start"/>

     <node name="arduino" pkg="ros_arduino_python" type="arduino_node.py" output="screen">
         <rosparam file="$(find ros_arduino_python)/config/my_arduino_params.yaml" command="load" />
     </node>
     <!-- By default, we are not in debug mode -->
     <arg name="debug" default="false" />

     <!-- Load the URDF, SRDF and other .yaml configuration files on the param server -->
     <include file="$(find diego_moveit_config)/launch/planning_context.launch">
         <arg name="load_robot_description" value="true"/>
     </include>

     <node name="arduino_follow_controller" pkg="ros_arduino_python" type="diego1_follow_controller.py" output="screen">
     </node>

     <!-- If needed, broadcast static tf for robot root -->
     <node pkg="tf" type="static_transform_publisher" name="virtual_joint_broadcaster_0" args="0 0 0 0 0 0 world_frame base_link 100" />

     <!-- We do not have a robot connected, so publish fake joint states -->
     <node name="joint_state_publisher" pkg="joint_state_publisher" type="joint_state_publisher">
        <param name="/use_gui" value="false"/>
        <rosparam param="/source_list">[/move_group/fake_controller_joint_states]</rosparam>
     </node>

     <!-- Run the main MoveIt executable without trajectory execution (we do not have controllers configured by default) -->
     <include file="$(find diego_moveit_config)/launch/move_group.launch">
     </include>
</launch>

这里主要是作为一个测试使用,所以joint_states使用了假数据,在调用joint_state_publisher时我们把source list设置为/move_group/fake_controller_joint_states

现在我们可以执行如下代码来启动moveit

roslaunch diego_moveit_config diego_demo.launch

这时候我们可以通过控制台输入moveit命令来控制机械臂,我们也可以用代码来调用moveit接口来控制机械臂,下面我们来写一段测试代码来

4.moveit控制测试代码

在ros_arduino_python目录下创建diego_moveit_test.py,代码如下

#!/usr/bin/env python

import rospy, sys
import moveit_commander
from control_msgs.msg import GripperCommand

class MoveItDemo:
    def __init__(self):
        # Initialize the move_group API
        moveit_commander.roscpp_initialize(sys.argv)

        # Initialize the ROS node
        rospy.init_node('moveit_demo', anonymous=True)
        
 
        # Connect to the arm move group
        arm = moveit_commander.MoveGroupCommander('arm')
                       
        # Get the name of the end-effector link
        end_effector_link = arm.get_end_effector_link()
        
        # Display the name of the end_effector link
        rospy.loginfo("The end effector link is: " + str(end_effector_link))
        
        # Set a small tolerance on joint angles
        arm.set_goal_joint_tolerance(0.001)
        
        # Start the arm target in "resting" pose stored in the SRDF file
        arm.set_named_target('arm_default_pose')
        
        # Plan a trajectory to the goal configuration
        traj = arm.plan()
         
        # Execute the planned trajectory
        arm.execute(traj)
        
        # Pause for a moment
        rospy.sleep(1)         
        
        # Cleanly shut down MoveIt
        moveit_commander.roscpp_shutdown()
        
        # Exit the script
        moveit_commander.os._exit(0)

if __name__ == "__main__":
    try:
        MoveItDemo()
    except rospy.ROSInterruptException:
        pass

在测试代码中,我们连接到group arm,并命令机械臂运行到姿势 arm_default_pose,我们运行如下命令启动测试代码

    rosrun ros_arduino_python diego_moveit_test.py

启动后可以看到机械臂马上响应到arm_default_pose的位置。
 

Lesson 19:moveit-urdf description file

从这一节开始我们讲述如何使用moveit来控制机械臂,moveit是开源运动规划库OMPL的ROS接口,其封装了OMPL的复杂功能,是的用户可以通过相对简单的配置,就可以在ROS中使用OMPL先进的控制算法,下面我们就开始moveit的第一节内容,创建Diego 1# plus的urdf描述文件,有关urdf的规范,请参考官方ROS wiki

Diego 1# plus完成后的urdf源代码请参阅github,在rviz中的显示效果如下:

您可以执行如下命令打开diego1.urdf,注意要首先进入diego1.urdf所在的目录,或者命令中包含完整的目录路径

roslaunch urdf_tutorial display.launch model:=diego1.urdf

Diego 1# plus的模型主要包括三个部分:

  • 底盘
  • 控制单元及摄像头:摄像头放置的比较高的原因是xtion的可视范围是0.8m~3m,如果放置的比较低的话,机械臂的运动范围就不在其可视范围之内。
  • 机械臂部分

1.urdf的基本语法

1.1 urdf的结构

下图是官方教程中的截图,整个urdf文件就是由link和joint组成。

  • link是指机器人的一个部件,比如说机器人的底盘,摄像头,手臂等。
  • Joint是指机器人的各个部件的连接关节。

1.2 robot节点

urdf本身是一个xml文件,遵循xml version 1.0规范。urdf的最高层的节点是<robot>,在这个节点中我们可以定义机器人的名称

<?xml version="1.0"?>
<robot name="diego1">

...

</robot>

1.3Link节点

我们拿diego1.urdf中的部分代码来做解释,下面一段代码是diego1.urdf中控制器的描述,其实就是mini pc加上arduino,电源模块,I2c舵机控制板,在diego1.urdf中我们把这一部分整体描述成为一个矩形组件。

<link name="control_box">
   <visual>
   <geometry>
      <box size="0.16 .128 .14"/>
   </geometry>
   <material name="black">
      <color rgba="255 255 255 1"/>
   </material>
   </visual>
</link>

下面我们来解释这段代码

<link name="control_box"><!--link节点的标识,这里需要给每个节点命名一个唯一的名称-->
   <visual><!--节点视觉描述部分-->
   <geometry><!--描述节点外观尺寸部分-->
      <box size="0.16 .128 .14"/><!--box说明此节点的形状为矩形,长宽高分别为0.16m,0.128m,1.14m-->
   </geometry>
   <material name="black"><!--节点外观颜色,文字描述部分,这里命名为black-->
      <color rgba="255 255 255 1"/><!--这里描述了节点的颜色,采用的rgba色彩系-->
   </material>
   </visual>
</link>

link节点还可以是圆柱体,和球状体,如下代码是diego1.urdf中车轮的描述部分

<link name="tyer_master_right_motor">
  <visual>
  <geometry>
     <cylinder length=".068" radius="0.0075"></cylinder>
  </geometry>
  <material name="black">
     <color rgba="0 0 0 1"/>
  </material> 
  </visual>
</link>

这段代码中的

<cylinder length=".068" radius="0.0075"></cylinder>

把此节点描述成为一个半径为0.0075m,而高为0.068m的圆柱体

球状的link语法和矩形,圆柱体基本类似,用的关键词是<sphere>,在Diego1.urdf中没有用到。

1.4 Joint节点

Joint节点用来标识两个link之间的连接关系,本质上是两个link之间坐标关系的转换。下面是control_box和base_link之间的Joint

<joint name="base_to_control_box" type="fixed">
   <parent link="base_link"/>
   <child link="control_box"/>
   <origin xyz="-0.07 0.0 0.0715"/>
</joint>

解释如下

<joint name="base_to_control_box" type="fixed"><!--joint节点名称,type="fixed"表明是固定的连接关系-->
   <parent link="base_link"/><!--父节点为base_link-->
   <child link="control_box"/><!--子节点为control_link-->
   <origin xyz="-0.07 0.0 0.0715"/><!--在父节点坐标系内x轴移动-0.07m,y轴移动0.0m,z轴移动0.0715m-->
</joint>

如下tyer_master_right_motor和节点right_leg之间的joint

<joint name="right_leg_to_master_right_motor" type="fixed">
   <axis xyz="0 0 1"/> 
   <parent link="right_leg"/>
   <child link="tyer_master_right_motor"/>
   <origin rpy="1.57075 0 0" xyz="0.03 0.0315 0.0175"/>
   <limit effort="100" velocity="100"/> 
   <joint_properties damping="0.0" friction="0.0"/>
</joint>

解释如下:

<joint name="right_leg_to_master_right_motor" type="fixed"><!--joint节点名称,type="continuous"表明是可以持续旋转的-->
   <axis xyz="0 0 1"/><!--旋转轴为z轴-->
   <parent link="right_leg"/><!--父节点为right_leg-->
   <child link="tyer_master_right_motor"/><!--子节点为tyer_master_right_motor-->
   <origin rpy="1.57075 0 0" xyz="0.03 0.0315 0.0175"/><!--子节点在父节点坐标系中在x轴旋转90度,x轴移动0.03m,y轴移动0.0315m,z轴移动0.0175m-->
   <limit effort="100" velocity="100"/><!--最大力度限制为100N-m,最大速度限制为100m/s-->
   <joint_properties damping="0.0" friction="0.0"/><!--指定damping为0,friction为0-->
</joint>

2.Diego urdf

urdf的制作我们还可以是用专业的3D建模工具来制作,完成后导出即可,但在diego1 #plus中我们使用基本的urdf元素来描述,虽然不能精确完整的描述urdf,但相对来说简单,对于初学者也比较容易理解,下面是diego1# plus完整的描述文件:

<?xml version="1.0"?>
<robot name="diego1">
<link name="base_link">
<visual>
<geometry>
<box size="0.20 .15 .003"/>
</geometry>
</visual>
</link>

<link name="left_leg">
<visual>
<geometry>
<box size="0.14 .003 .095"/>
</geometry>
</visual>
</link>

<joint name="base_to_left_leg" type="fixed">
<parent link="base_link"/>
<child link="left_leg"/>
<origin xyz="0.0 0.075 -0.046"/>
</joint>

<link name="right_leg">
<visual>
<geometry>
<box size="0.14 .003 .095"/>
</geometry>
</visual>
</link>

<joint name="base_to_right_leg" type="fixed">
<parent link="base_link"/>
<child link="right_leg"/>
<origin xyz="0.0 -0.075 -0.046"/>
</joint>

<link name="left_leg_front">
<visual>
<geometry>
<box size="0.05 .003 .03"/>
</geometry>
</visual>
</link>

<joint name="base_to_left_leg_front" type="fixed">
<parent link="left_leg"/>
<child link="left_leg_front"/>
<origin xyz="0.095 0.0 -0.0025"/>
</joint>

<link name="left_leg_back">
<visual>
<geometry>
<box size="0.05 .003 .03"/>
</geometry>
</visual>
</link>

<joint name="base_to_left_leg_back" type="fixed">
<parent link="left_leg"/>
<child link="left_leg_back"/>
<origin xyz="-0.095 0.0 -0.0325"/>
</joint>

<link name="right_leg_front">
<visual>
<geometry>
<box size="0.05 .003 .03"/>
</geometry>
</visual>
</link>

<joint name="base_to_right_leg_front" type="fixed">
<parent link="right_leg"/>
<child link="right_leg_front"/>
<origin xyz="0.095 0.0 -0.0025"/>
</joint>

<link name="right_leg_back">
<visual>
<geometry>
<box size="0.05 .003 .03"/>
</geometry>
</visual>
</link>

<joint name="base_to_right_leg_back" type="fixed">
<parent link="right_leg"/>
<child link="right_leg_back"/>
<origin xyz="-0.095 0.0 -0.0325"/>
</joint>

<link name="tyer_master_right_axis">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_master_right_axix" type="continuous">
<axis xyz="0 0 1"/>
<parent link="right_leg"/>
<child link="tyer_master_right_axis"/>
<origin rpy="1.57075 0 0" xyz="0.03 -0.0315 0.0175"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_master_left_axis">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="left_leg_to_master_left_axis" type="continuous">
<axis xyz="0 0 1"/>
<parent link="left_leg"/>
<child link="tyer_master_left_axis"/>
<origin rpy="1.57075 0 0" xyz="0.03 0.0315 0.0175"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_master_right_motor">
<visual>
<geometry>
<cylinder length=".068" radius="0.0075"></cylinder>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_master_right_motor" type="fixed">
<axis xyz="0 0 1"/>
<parent link="right_leg"/>
<child link="tyer_master_right_motor"/>
<origin rpy="1.57075 0 0" xyz="0.03 0.0315 0.0175"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_master_left_motor">
<visual>
<geometry>
<cylinder length=".06" radius="0.0075"></cylinder>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="left_leg_to_master_left_motor" type="fixed">
<axis xyz="0 0 1"/>
<parent link="left_leg"/>
<child link="tyer_master_left_motor"/>
<origin rpy="1.57075 0 0" xyz="0.03 -0.0315 0.0175"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_right_axis1">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_right_axix1" type="continuous">
<axis xyz="0 0 1"/>
<parent link="right_leg_front"/>
<child link="tyer_slave_right_axis1"/>
<origin rpy="1.57075 0 0" xyz="0.02 -0.0315 0.0"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_right_axis2">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_right_axix2" type="continuous">
<axis xyz="0 0 1"/>
<parent link="right_leg"/>
<child link="tyer_slave_right_axis2"/>
<origin rpy="1.57075 0 0" xyz="0.063 -0.0315 -0.0405"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_right_axis3">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_right_axix3" type="continuous">
<axis xyz="0 0 1"/>
<parent link="right_leg"/>
<child link="tyer_slave_right_axis3"/>
<origin rpy="1.57075 0 0" xyz="0.008 -0.0315 -0.0405"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_right_axis4">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_right_axix4" type="continuous">
<axis xyz="0 0 1"/>
<parent link="right_leg"/>
<child link="tyer_slave_right_axis4"/>
<origin rpy="1.57075 0 0" xyz="-0.047 -0.0315 -0.0405"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_right_axis5">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_right_axix5" type="continuous">
<axis xyz="0 0 1"/>
<parent link="right_leg_back"/>
<child link="tyer_slave_right_axis5"/>
<origin rpy="1.57075 0 0" xyz="-0.018 -0.0315 -0.008"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_left_axis1">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_left_axix1" type="continuous">
<axis xyz="0 0 1"/>
<parent link="left_leg_front"/>
<child link="tyer_slave_left_axis1"/>
<origin rpy="1.57075 0 0" xyz="0.02 0.0315 0.0"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_left_axis2">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_left_axix2" type="continuous">
<axis xyz="0 0 1"/>
<parent link="left_leg"/>
<child link="tyer_slave_left_axis2"/>
<origin rpy="1.57075 0 0" xyz="0.063 0.0315 -0.0405"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_left_axis3">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_left_axix3" type="continuous">
<axis xyz="0 0 1"/>
<parent link="left_leg"/>
<child link="tyer_slave_left_axis3"/>
<origin rpy="1.57075 0 0" xyz="0.008 0.0315 -0.0405"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_left_axis4">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_left_axix4" type="continuous">
<axis xyz="0 0 1"/>
<parent link="left_leg"/>
<child link="tyer_slave_left_axis4"/>
<origin rpy="1.57075 0 0" xyz="-0.047 0.0315 -0.0405"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave_left_axis5">
<visual>
<geometry>
<cylinder length=".06" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave_left_axix5" type="continuous">
<axis xyz="0 0 1"/>
<parent link="left_leg_back"/>
<child link="tyer_slave_left_axis5"/>
<origin rpy="1.57075 0 0" xyz="-0.018 0.0315 -0.008"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_master_left">
<visual>
<geometry>
<cylinder length=".03" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_master_left" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_master_left_axis"/>
<child link="tyer_master_left"/>
<origin rpy="0 0 0" xyz="0.0 0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_master_right">
<visual>
<geometry>
<cylinder length=".03" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_master_right" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_master_right_axis"/>
<child link="tyer_master_right"/>
<origin rpy="0 0 0" xyz="0.0 -0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave1_left">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave1_left" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_left_axis1"/>
<child link="tyer_slave1_left"/>
<origin rpy="0 0 0" xyz="0.0 0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave2_left">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave2_left" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_left_axis2"/>
<child link="tyer_slave2_left"/>
<origin rpy="0 0 0" xyz="0.0 0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave3_left">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave3_left" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_left_axis3"/>
<child link="tyer_slave3_left"/>
<origin rpy="0 0 0" xyz="0.0 0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave4_left">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave4_left" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_left_axis4"/>
<child link="tyer_slave4_left"/>
<origin rpy="0 0 0" xyz="0.0 0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave5_left">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave5_left" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_left_axis5"/>
<child link="tyer_slave5_left"/>
<origin rpy="0 0 0" xyz="0.0 0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave1_right">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave1_right" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_right_axis1"/>
<child link="tyer_slave1_right"/>
<origin rpy="0 0 0" xyz="0.0 -0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave2_right">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave2_right" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_right_axis2"/>
<child link="tyer_slave2_right"/>
<origin rpy="0 0 0" xyz="0.0 -0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>


<link name="tyer_slave3_right">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave3_right" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_right_axis3"/>
<child link="tyer_slave3_right"/>
<origin rpy="0 0 0" xyz="0.0 -0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave4_right">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave4_right" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_right_axis4"/>
<child link="tyer_slave4_right"/>
<origin rpy="0 0 0" xyz="0.0 -0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="tyer_slave5_right">
<visual>
<geometry>
<cylinder length=".024" radius="0.0275"></cylinder>
</geometry>
<material name="green">
<color rgba="0 1 0 1"/>
</material>
</visual>
</link>

<joint name="right_leg_to_slave5_right" type="fixed">
<axis xyz="0 0 1"/>
<parent link="tyer_slave_right_axis5"/>
<child link="tyer_slave5_right"/>
<origin rpy="0 0 0" xyz="0.0 -0.0025 0.00"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="caterpilar_left1">
<visual>
<geometry>
<box size="0.10 .045 .0015"/>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="tyer_master_left_motor_to_caterpilar_left1" type="fixed">
<parent link="tyer_master_left_motor"/>
<child link="caterpilar_left1"/>
<origin rpy="1.57075 0 -0.24178" xyz="0.05 0.02 -0.068"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="caterpilar_left2">
<visual>
<geometry>
<box size="0.16 .045 .0015"/>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="tyer_master_left_motor_to_caterpilar_left2" type="fixed">
<parent link="tyer_master_left_motor"/>
<child link="caterpilar_left2"/>
<origin rpy="1.57075 0 0.38178" xyz="-0.080 -0.00 -0.068"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="caterpilar_left3">
<visual>
<geometry>
<box size="0.07 .045 .0015"/>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="tyer_master_left_motor_to_caterpilar_left3" type="fixed">
<parent link="tyer_slave2_left"/>
<child link="caterpilar_left3"/>
<origin rpy="1.57075 0 0.64178" xyz="0.0405 -0.0065 -0.002"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="caterpilar_left4">
<visual>
<geometry>
<box size="0.18 .045 .0015"/>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="tyer_master_left_motor_to_caterpilar_left4" type="fixed">
<parent link="tyer_slave2_left"/>
<child link="caterpilar_left4"/>
<origin rpy="1.57075 0 0" xyz="-0.09 -0.028 -0.002"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="caterpilar_right1">
<visual>
<geometry>
<box size="0.10 .045 .0015"/>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="tyer_master_left_motor_to_caterpilar_right1" type="fixed">
<parent link="tyer_master_right_motor"/>
<child link="caterpilar_right1"/>
<origin rpy="1.57075 0 -0.24178" xyz="0.05 0.02 0.068"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="caterpilar_right2">
<visual>
<geometry>
<box size="0.16 .045 .0015"/>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="tyer_master_left_motor_to_caterpilar_right2" type="fixed">
<parent link="tyer_master_right_motor"/>
<child link="caterpilar_right2"/>
<origin rpy="1.57075 0 0.38178" xyz="-0.080 -0.00 0.068"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="caterpilar_right3">
<visual>
<geometry>
<box size="0.07 .045 .0015"/>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="tyer_master_left_motor_to_caterpilar_right3" type="fixed">
<parent link="tyer_slave2_right"/>
<child link="caterpilar_right3"/>
<origin rpy="1.57075 0 0.64178" xyz="0.0405 -0.0065 0.002"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="caterpilar_right4">
<visual>
<geometry>
<box size="0.18 .045 .0015"/>
</geometry>
<material name="black">
<color rgba="0 0 0 1"/>
</material>
</visual>
</link>

<joint name="tyer_master_left_motor_to_caterpilar_right4" type="fixed">
<parent link="tyer_slave2_right"/>
<child link="caterpilar_right4"/>
<origin rpy="1.57075 0 0" xyz="-0.09 -0.028 0.002"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="arm_base">
<visual>
<geometry>
<box size="0.11 .11 .013"/>
</geometry>
<material name="black">
<color rgba="255 255 255 1"/>
</material>
</visual>
</link>

<joint name="base_to_arm_base" type="fixed">
<parent link="base_link"/>
<child link="arm_base"/>
<origin xyz="0.085 0.0 0.008"/>
</joint>

<link name="control_box">
<visual>
<geometry>
<box size="0.16 .128 .14"/>
</geometry>
<material name="black">
<color rgba="255 255 255 1"/>
</material>
</visual>
</link>

<joint name="base_to_control_box" type="fixed">
<parent link="base_link"/>
<child link="control_box"/>
<origin xyz="-0.07 0.0 0.0715"/>
</joint>
<!-- camera holder-->
<link name="camera_holder">
<visual>
<geometry>
<box size="0.043 .03 .59"/>
</geometry>
<material name="silver">
<color rgba="161 161 161 1"/>
</material>
</visual>
</link>

<joint name="control_box_to_camera_holder" type="fixed">
<parent link="control_box"/>
<child link="camera_holder"/>
<origin xyz="-0.0215 0.0 0.365"/>
</joint>

<!-- xtion-->
<link name="xtion_camera_base">
<visual>
<geometry>
<box size="0.038 .10 .025"/>
</geometry>
<material name="black">
<color rgba="0 0 1 1"/>
</material>
</visual>
</link>

<joint name="camera_holder_to_xtion_camera_base" type="fixed">
<parent link="camera_holder"/>
<child link="xtion_camera_base"/>
<origin xyz="0.0 0.0 0.3075"/>
</joint>

<link name="xtion_camera_base_neck">
<visual>
<geometry>
<box size="0.020 .020 .020"/>
</geometry>
<material name="black">
<color rgba="0 0 50 1"/>
</material>
</visual>
</link>

<joint name="xtion_camera_base_to_neck" type="fixed">
<parent link="xtion_camera_base"/>
<child link="xtion_camera_base_neck"/>
<origin xyz="0.009 0.0 0.0125"/>
</joint>

<link name="xtion_camera">
<visual>
<geometry>
<box size="0.038 .18 .027"/>
</geometry>
<material name="black">
<color rgba="0 0 1 1"/>
</material>
</visual>
</link>

<joint name="xtion_camera_base_neck_to_camera" type="fixed">
<parent link="xtion_camera_base_neck"/>
<child link="xtion_camera"/>
<origin xyz="0.0 0.0 0.0235"/>
</joint>

<link name="xtion_camera_eye_left">
<visual>
<geometry>
<cylinder length=".002" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="xtion_camera_to_eye_left" type="fixed">
<axis xyz="0 0 1"/>
<parent link="xtion_camera"/>
<child link="xtion_camera_eye_left"/>
<origin rpy="0 1.57075 0" xyz="0.0191 0.045 0.0"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="xtion_camera_eye_right">
<visual>
<geometry>
<cylinder length=".002" radius="0.005"></cylinder>
</geometry>
<material name="yellow">
<color rgba="1 1 0 1"/>
</material>
</visual>
</link>

<joint name="xtion_camera_to_eye_right" type="fixed">
<axis xyz="0 0 1"/>
<parent link="xtion_camera"/>
<child link="xtion_camera_eye_right"/>
<origin rpy="0 1.57075 0" xyz="0.0191 -0.045 0.0"/>
<limit effort="100" velocity="100"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>
<!--arm-->
<link name="arm_round_joint_stevo0">
<visual>
<geometry>
<cylinder length=".013" radius="0.05"></cylinder>
</geometry>
<material name="blue">
<color rgba="0 0 255 1"/>
</material>
</visual>
</link>

<joint name="arm_base_to_arm_round_joint_stevo0" type="revolute">
<axis xyz="0 0 1"/>
<parent link="arm_base"/>
<child link="arm_round_joint_stevo0"/>
<origin rpy="0 0 0" xyz="0.00 0.00 0.013"/>
<limit effort="100" velocity="100" lower="-1.5707963" upper="1.5707963"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="shoulder_1">
<visual>
<geometry>
<box size="0.065 .065 .037"/>
</geometry>
<material name="silver1">
<color rgba="232 232 232 1"/>
</material>
</visual>
</link>

<joint name="arm_round_joint_stevo0_to_shoulder_1" type="fixed">
<parent link="arm_round_joint_stevo0"/>
<child link="shoulder_1"/>
<origin xyz="0.0 0.0 0.025"/>
</joint>

<link name="shoulder_2">
<visual>
<geometry>
<box size="0.05 .065 .105"/>
</geometry>
<material name="silver">
<color rgba="232 232 232 1"/>
</material>
</visual>
</link>

<joint name="shoulder_1_to_shoulder_1" type="fixed">
<parent link="shoulder_1"/>
<child link="shoulder_2"/>
<origin rpy="0 0.785398 0" xyz="0.022 0.0 0.03915"/>
</joint>

<link name="arm_joint_stevo1">
<visual>
<geometry>
<cylinder length=".09" radius="0.025"></cylinder>
</geometry>
<material name="black">
<color rgba="255 255 255 1"/>
</material>
</visual>
</link>

<joint name="shoulder_2_to_arm_joint_stevo1" type="revolute">
<axis xyz="0 0 1"/>
<parent link="shoulder_2"/>
<child link="arm_joint_stevo1"/>
<origin rpy="1.5707963 0 0" xyz="0.00 0.00 0.025"/>
<limit effort="100" velocity="100" lower="-0.1899" upper="1.5707963"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="big_arm">
<visual>
<geometry>
<box size="0.05 .05 .13"/>
</geometry>
<material name="blue">
<color rgba="0 0 255 1"/>
</material>
</visual>
</link>

<joint name="arm_joint_stevo1_to_big_arm" type="fixed">
<parent link="arm_joint_stevo1"/>
<child link="big_arm"/>
<origin rpy="1.5707963 0 0" xyz="0.0 0.065 0.0"/>
</joint>

<link name="arm_joint_stevo2">
<visual>
<geometry>
<cylinder length=".06" radius="0.025"></cylinder>
</geometry>
<material name="black">
<color rgba="255 255 255 1"/>
</material>
</visual>
</link>

<joint name="big_arm_round_to_joint_stevo2" type="revolute">
<axis xyz="0 0 1"/>
<parent link="big_arm"/>
<child link="arm_joint_stevo2"/>
<origin rpy="1.5707963 0 0" xyz="0.00 0.00 -0.065"/>
<limit effort="100" velocity="100" lower="1.0" upper="2.5891"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="arm_joint_stevo3">
<visual>
<geometry>
<cylinder length=".12" radius="0.012"></cylinder>
</geometry>
<material name="blue">
<color rgba="0 0 255 1"/>
</material>
</visual>
</link>

<joint name="arm_joint_stevo2_to_arm_joint_stevo3" type="revolute">
<axis xyz="0 0 1"/>
<parent link="arm_joint_stevo2"/>
<child link="arm_joint_stevo3"/>
<origin rpy="1.5707963 -0.52359877 0" xyz="0.00 -0.06 0.00"/>
<limit effort="100" velocity="100" lower="-1.5707963" upper="1.5707963"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="wrist">
<visual>
<geometry>
<box size="0.045 .04 .06"/>
</geometry>
<material name="blue">
<color rgba="0 0 255 1"/>
</material>
</visual>
</link>

<joint name="arm_joint_stevo3_to_wrist" type="fixed">
<parent link="arm_joint_stevo3"/>
<child link="wrist"/>
<origin rpy="0 0 0" xyz="0.0 0.0 0.09"/>
</joint>

<link name="arm_joint_stevo4">
<visual>
<geometry>
<cylinder length=".05" radius="0.02"></cylinder>
</geometry>
<material name="black">
<color rgba="255 255 255 1"/>
</material>
</visual>
</link>

<joint name="wrist_to_arm_joint_stevo4" type="revolute">
<axis xyz="0 0 1"/>
<parent link="wrist"/>
<child link="arm_joint_stevo4"/>
<origin rpy="0 1.5707963 0" xyz="0.00 0.00 0.03"/>
<limit effort="100" velocity="100" lower="-1.5707963" upper="1.5707963"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="arm_joint_stevo5">
<visual>
<geometry>
<cylinder length=".04" radius="0.02"></cylinder>
</geometry>
<material name="black">
<color rgba="255 255 255 1"/>
</material>
</visual>
</link>
<joint name="arm_joint_stevo4_to_arm_joint_stevo5" type="revolute">
<axis xyz="0 0 1"/>
<parent link="arm_joint_stevo4"/>
<child link="arm_joint_stevo5"/>
<origin rpy="0 1.5707963 0" xyz="-0.02 0.00 0.00"/>
<limit effort="100" velocity="100" lower="-1.5707963" upper="1.5707963"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="hand">
<visual>
<geometry>
<box size="0.04 .06 .02"/>
</geometry>
<material name="blue">
<color rgba="0 0 255 1"/>
</material>
</visual>
</link>

<joint name="arm_joint_stevo5_to_hand" type="fixed">
<parent link="arm_joint_stevo5"/>
<child link="hand"/>
<origin rpy="0 0 0" xyz="0.0 0.0 -0.03"/>
</joint>

<link name="grip_joint_stevo6">
<visual>
<geometry>
<cylinder length=".05" radius="0.005"></cylinder>
</geometry>
<material name="black">
<color rgba="255 255 255 1"/>
</material>
</visual>
</link>
<joint name="hand_to_grip_joint_stevo6" type="revolute">
<axis xyz="0 0 1"/>
<parent link="hand"/>
<child link="grip_joint_stevo6"/>
<origin rpy="0 1.5707963 0" xyz="0.00 0.015 0.00"/>
<limit effort="100" velocity="100" lower="-0.1762" upper="0.8285"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="grip_joint_stevo7">
<visual>
<geometry>
<cylinder length=".05" radius="0.005"></cylinder>
</geometry>
<material name="black">
<color rgba="255 255 255 1"/>
</material>
</visual>
</link>
<joint name="hand_to_grip_joint_stevo7" type="revolute">
<axis xyz="0 0 1"/>
<parent link="hand"/>
<child link="grip_joint_stevo7"/>
<origin rpy="0 1.5707963 0" xyz="0.00 -0.015 0.00"/>
<limit effort="100" velocity="100" lower="-0.8285" upper="0.1584"/>
<joint_properties damping="0.0" friction="0.0"/>
</joint>

<link name="finger_left_big">
<visual>
<geometry>
<box size="0.04 .009 .045"/>
</geometry>
<material name="red">
<color rgba="194 194 194 1"/>
</material>
</visual>
</link>

<joint name="grip_joint_stevo6_to_finger_left_big" type="fixed">
<parent link="grip_joint_stevo6"/>
<child link="finger_left_big"/>
<origin rpy="0 0 0.34906585" xyz="0.03 0.0 0.0"/>
</joint>

<link name="finger_left_small">
<visual>
<geometry>
<box size="0.04 .009 .045"/>
</geometry>
<material name="red">
<color rgba="194 194 194 1"/>
</material>
</visual>
</link>

<joint name="finger_left_big_to_finger_left_small" type="fixed">
<parent link="finger_left_big"/>
<child link="finger_left_small"/>
<origin rpy="0 0 -0.34906585" xyz="0.04 -0.008 0.0"/>
</joint>

<link name="finger_right_big">
<visual>
<geometry>
<box size="0.04 .009 .045"/>
</geometry>
<material name="red">
<color rgba="194 194 194 1"/>
</material>
</visual>
</link>

<joint name="grip_joint_stevo7_to_finger_right_big" type="fixed">
<parent link="grip_joint_stevo7"/>
<child link="finger_right_big"/>
<origin rpy="0 0 -0.34906585" xyz="0.03 0.0 0.0"/>
</joint>
<link name="finger_right_small">
<visual>
<geometry>
<box size="0.04 .009 .045"/>
</geometry>
<material name="red">
<color rgba="194 194 194 1"/>
</material>
</visual>
</link>

<joint name="finger_right_big_to_finger_right_small" type="fixed">
<parent link="finger_right_big"/>
<child link="finger_right_small"/>
<origin rpy="0 0 0.34906585" xyz="0.04 0.008 0.0"/>
</joint>
</robot>

Lesson 18:vision-face Tracking

Based on the previous two sections of face detection, keypoints tracking, we further achieve the tracking of the face, we can let the robot track the face, moved with the face, the basic process is as follows:

 

 

 

 

  • Face detection, lesson 16 has been introduced, and here can be directly quoted.
  • Feature acquisition, lesson 17 has also been achieved, but to achieve better results, the need to do some improve
  • Face tracking, according to the location of the ROI, to determine how control the robot move.

1. Face detection and feature acquisition

please check the face_tracker2.py from github,it’s support opencv3

FaceTracker inherits FaceDetector, LKTracker two classes, and made the following major changes and extensions

  • rewrite process_image function
  • add_keypoints Function is used to discover a new feature point that calls the goodFeaturesToTrack method and determines the distance from the current feature point cluster to ensure that a valid new feature point
  • drop_keypoints function uses a clustering algorithm to remove invalid feature points

1.1 code

#!/usr/bin/env python

import roslib
import rospy
import cv2
import numpy as np
from face_detector import FaceDetector
from lk_tracker import LKTracker

class FaceTracker(FaceDetector, LKTracker):
    def __init__(self, node_name):
        super(FaceTracker, self).__init__(node_name)
        
        self.n_faces = rospy.get_param("~n_faces", 1)
        self.show_text = rospy.get_param("~show_text", True)
        self.show_add_drop = rospy.get_param("~show_add_drop", False)
        self.feature_size = rospy.get_param("~feature_size", 1)
        self.use_depth_for_tracking = rospy.get_param("~use_depth_for_tracking", False)
        self.min_keypoints = rospy.get_param("~min_keypoints", 20)
        self.abs_min_keypoints = rospy.get_param("~abs_min_keypoints", 6)
        self.std_err_xy = rospy.get_param("~std_err_xy", 2.5) 
        self.pct_err_z = rospy.get_param("~pct_err_z", 0.42) 
        self.max_mse = rospy.get_param("~max_mse", 10000)
        self.add_keypoint_distance = rospy.get_param("~add_keypoint_distance", 10)
        self.add_keypoints_interval = rospy.get_param("~add_keypoints_interval", 1)
        self.drop_keypoints_interval = rospy.get_param("~drop_keypoints_interval", 1)
        self.expand_roi_init = rospy.get_param("~expand_roi", 1.02)
        self.expand_roi = self.expand_roi_init
        self.face_tracking = True

        self.frame_index = 0
        self.add_index = 0
        self.drop_index = 0
        self.keypoints = list()

        self.detect_box = None
        self.track_box = None
        
        self.grey = None
        self.prev_grey = None
        
    def process_image(self, cv_image):
        try:
            # Create a greyscale version of the image
            self.grey = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
            
            # Equalize the grey histogram to minimize lighting effects
            self.grey = cv2.equalizeHist(self.grey)
            
            # Step 1: Detect the face if we haven't already
            if self.detect_box is None:
                self.keypoints = list()
                self.track_box = None
                self.detect_box = self.detect_face(self.grey)
            else:
                # Step 2: If we aren't yet tracking keypoints, get them now
                if not self.track_box or not self.is_rect_nonzero(self.track_box):
                    self.track_box = self.detect_box
                    self.keypoints = self.get_keypoints(self.grey, self.track_box)
    
                # Store a copy of the current grey image used for LK tracking                   
                if self.prev_grey is None:
                    self.prev_grey = self.grey           
                  
                # Step 3: If we have keypoints, track them using optical flow
                self.track_box = self.track_keypoints(self.grey, self.prev_grey)
              
                # Step 4: Drop keypoints that are too far from the main cluster
                if self.frame_index % self.drop_keypoints_interval == 0 and len(self.keypoints) > 0:
                    ((cog_x, cog_y, cog_z), mse_xy, mse_z, score) = self.drop_keypoints(self.abs_min_keypoints, self.std_err_xy, self.max_mse)
                    
                    if score == -1:
                        self.detect_box = None
                        self.track_box = None
                        return cv_image
                  
                # Step 5: Add keypoints if the number is getting too low 
                if self.frame_index % self.add_keypoints_interval == 0 and len(self.keypoints) < self.min_keypoints:
                    self.expand_roi = self.expand_roi_init * self.expand_roi
                    self.add_keypoints(self.track_box)
                else:
                    self.frame_index += 1
                    self.expand_roi = self.expand_roi_init
            
            # Store a copy of the current grey image used for LK tracking            
            self.prev_grey = self.grey
              
            # Process any special keyboard commands for this module
            self.prev_grey = self.grey
              
            # Process any special keyboard commands for this module
            if 32 <= self.keystroke and self.keystroke < 128:
                cc = chr(self.keystroke).lower()
                if cc == 'c':
                    self.keypoints = []
                    self.track_box = None
                    self.detect_box = None
                elif cc == 'd':
                    self.show_add_drop = not self.show_add_drop
                    
        except AttributeError:
            pass
                    
        return cv_image

    def add_keypoints(self, track_box):
        # Look for any new keypoints around the current keypoints
        
        # Begin with a mask of all black pixels
        mask = np.zeros_like(self.grey)
        
        # Get the coordinates and dimensions of the current track box
        try:
            ((x,y), (w,h), a) = track_box
        except:
            try:
                x,y,w,h = track_box
            except:
                rospy.loginfo("Track box has shrunk to zero...")
                return
        
        x = int(x)
        y = int(y)
        
        # Expand the track box to look for new keypoints
        w_new = int(self.expand_roi * w)
        h_new = int(self.expand_roi * h)
        
        pt1 = (x - int(w_new / 2), y - int(h_new / 2))
        pt2 = (x + int(w_new / 2), y + int(h_new / 2))
        
        mask_box = ((x, y), (w_new, h_new), a)

        # Display the expanded ROI with a yellow rectangle
        if self.show_add_drop:
            cv2.rectangle(self.marker_image, pt1, pt2, (255, 255, 0))
                        
        # Create a filled white ellipse within the track_box to define the ROI
        cv2.ellipse(mask, mask_box, (255,255, 255), cv2.FILLED)
        
        if self.keypoints is not None:
            # Mask the current keypoints
            for x, y in [np.int32(p) for p in self.keypoints]:
                cv2.circle(mask, (x, y), 5, 0, -1)
         
        new_keypoints = cv2.goodFeaturesToTrack(self.grey, mask = mask, **self.gf_params)

        # Append new keypoints to the current list if they are not
        # too far from the current cluster      
        if new_keypoints is not None:
            for x, y in np.float32(new_keypoints).reshape(-1, 2):
                distance = self.distance_to_cluster((x,y), self.keypoints)
                if distance > self.add_keypoint_distance:
                    self.keypoints.append((x,y))
                    # Briefly display a blue disc where the new point is added
                    if self.show_add_drop:
                        cv2.circle(self.marker_image, (x, y), 3, (255, 255, 0, 0), cv2.FILLED, 2, 0)
                                    
            # Remove duplicate keypoints
            self.keypoints = list(set(self.keypoints))
        
    def distance_to_cluster(self, test_point, cluster):
        min_distance = 10000
        for point in cluster:
            if point == test_point:
                continue
            # Use L1 distance since it is faster than L2
            distance = abs(test_point[0] - point[0])  + abs(test_point[1] - point[1])
            if distance < min_distance:
                min_distance = distance
        return min_distance

    def drop_keypoints(self, min_keypoints, outlier_threshold, mse_threshold):
        sum_x = 0
        sum_y = 0
        sum_z = 0
        sse = 0
        keypoints_xy = self.keypoints
        keypoints_z = self.keypoints
        n_xy = len(self.keypoints)
        n_z = n_xy
        
        if self.use_depth_for_tracking:
            if self.depth_image is None:
                return ((0, 0, 0), 0, 0, -1)
        
        # If there are no keypoints left to track, start over
        if n_xy == 0:
            return ((0, 0, 0), 0, 0, -1)
        
        # Compute the COG (center of gravity) of the cluster
        for point in self.keypoints:
            sum_x = sum_x + point[0]
            sum_y = sum_y + point[1]
        
        mean_x = sum_x / n_xy
        mean_y = sum_y / n_xy
        
        if self.use_depth_for_tracking:
            for point in self.keypoints:
                try:
                    z = cv2.get2D(self.depth_image, min(self.frame_height - 1, int(point[1])), min(self.frame_width - 1, int(point[0])))
                except:
                    continue
                z = z[0]
                # Depth values can be NaN which should be ignored
                if isnan(z):
                    continue
                else:
                    sum_z = sum_z + z
                    
            mean_z = sum_z / n_z
            
        else:
            mean_z = -1
        
        # Compute the x-y MSE (mean squared error) of the cluster in the camera plane
        for point in self.keypoints:
            sse = sse + (point[0] - mean_x) * (point[0] - mean_x) + (point[1] - mean_y) * (point[1] - mean_y)
            #sse = sse + abs((point[0] - mean_x)) + abs((point[1] - mean_y))
        
        # Get the average over the number of feature points
        mse_xy = sse / n_xy

        # The MSE must be > 0 for any sensible feature cluster
        if mse_xy == 0 or mse_xy > mse_threshold:
            return ((0, 0, 0), 0, 0, -1)
        
        # Throw away the outliers based on the x-y variance
        max_err = 0
        for point in self.keypoints:
            std_err = ((point[0] - mean_x) * (point[0] - mean_x) + (point[1] - mean_y) * (point[1] - mean_y)) / mse_xy
            if std_err > max_err:
                max_err = std_err
            if std_err > outlier_threshold:
                keypoints_xy.remove(point)
                if self.show_add_drop:
                    # Briefly mark the removed points in red
                    cv2.circle(self.marker_image, (point[0], point[1]), 3, (0, 0, 255), cv2.FILLED, 2, 0)   
                try:
                    keypoints_z.remove(point)
                    n_z = n_z - 1
                except:
                    pass
                
                n_xy = n_xy - 1
                                
        # Now do the same for depth
        if self.use_depth_for_tracking:
            sse = 0
            for point in keypoints_z:
                try:
                    z = cv2.get2D(self.depth_image, min(self.frame_height - 1, int(point[1])), min(self.frame_width - 1, int(point[0])))
                    z = z[0]
                    sse = sse + (z - mean_z) * (z - mean_z)
                except:
                    n_z = n_z - 1
            
            if n_z != 0:
                mse_z = sse / n_z
            else:
                mse_z = 0
            
            # Throw away the outliers based on depth using percent error 
            # rather than standard error since depth values can jump
            # dramatically at object boundaries
            for point in keypoints_z:
                try:
                    z = cv2.get2D(self.depth_image, min(self.frame_height - 1, int(point[1])), min(self.frame_width - 1, int(point[0])))
                    z = z[0]
                except:
                    continue
                try:
                    pct_err = abs(z - mean_z) / mean_z
                    if pct_err > self.pct_err_z:
                        keypoints_xy.remove(point)
                        if self.show_add_drop:
                            # Briefly mark the removed points in red
                            cv2.circle(self.marker_image, (point[0], point[1]), 2, (0, 0, 255), cv2.FILLED)  
                except:
                    pass
        else:
            mse_z = -1
        
        self.keypoints = keypoints_xy
               
        # Consider a cluster bad if we have fewer than min_keypoints left
        if len(self.keypoints) < min_keypoints:
            score = -1
        else:
            score = 1

        return ((mean_x, mean_y, mean_z), mse_xy, mse_z, score)
    
if __name__ == '__main__':
    try:
        node_name = "face_tracker"
        FaceTracker(node_name)
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down face tracker node."
        cv2.destroyAllWindows()

1.2 launch file

<launch>
   <node pkg="diego_vision" name="face_tracker2" type="face_tracker2.py" output="screen">

   <remap from="input_rgb_image" to="/camera/rgb/image_color" />
   <remap from="input_depth_image" to="/camera/depth/image" />
 
   <rosparam>
       use_depth_for_tracking: True
       min_keypoints: 20
       abs_min_keypoints: 6
       add_keypoint_distance: 10
       std_err_xy: 2.5
       pct_err_z: 0.42
       max_mse: 10000
       add_keypoints_interval: 1
       drop_keypoints_interval: 1
       show_text: True
       show_features: True
       show_add_drop: False
       feature_size: 1
       expand_roi: 1.02
       gf_maxCorners: 200
       gf_qualityLevel: 0.02
       gf_minDistance: 7
       gf_blockSize: 10
       gf_useHarrisDetector: False
       gf_k: 0.04
       haar_scaleFactor: 1.3
       haar_minNeighbors: 3
       haar_minSize: 30
       haar_maxSize: 150
   </rosparam>

   <param name="cascade_1" value="$(find diego_vision)/data/haar_detectors/haarcascade_frontalface_alt2.xml" />
   <param name="cascade_2" value="$(find diego_vision)/data/haar_detectors/haarcascade_frontalface_alt.xml" />
   <param name="cascade_3" value="$(find diego_vision)/data/haar_detectors/haarcascade_profileface.xml" />

</node>
</launch>

In the Diego1 plus use the depth of the camera, so the use_depth_for_tracking parameter can be set to True, the practice shows that the use of depth camera will have a better effect, then we are in the two terminal start openni node, and face_tracker2.py node, there will be video Window, if someone appears in front of the camera  the face will be captured.

roslaunch diego_vision openni_node.launch

roslaucn diego_vision face_tracker2.launch

2.Face tracking

please check object_tracker2.py from github,it support opencv3

The main functions of the ObjectTracker class are as follows:

  • Subscribes to the ROI information, which is published by the FaceTracker class, indicating the location of the captured face, and the ObjectTracker subscribes to this message to determine the position of the face on the screen
  • Subscribe to the / camera / depth / image message to determine the location of the face from the camera to achieve tracking
  • Publish a Twist message to control the movement of the robot

2.1 code

#!/usr/bin/env python

import roslib
import rospy
from sensor_msgs.msg import Image, RegionOfInterest, CameraInfo
from geometry_msgs.msg import Twist
from math import copysign, isnan
from cv_bridge import CvBridge, CvBridgeError
import numpy as np

class ObjectTracker():
    def __init__(self):
        rospy.init_node("object_tracker")
                        
        # Set the shutdown function (stop the robot)
        rospy.on_shutdown(self.shutdown)
        
        # How often should we update the robot's motion?
        self.rate = rospy.get_param("~rate", 10)
        r = rospy.Rate(self.rate) 
        
        # The maximum rotation speed in radians per second
        self.max_rotation_speed = rospy.get_param("~max_rotation_speed", 2.0)
        
        # The minimum rotation speed in radians per second
        self.min_rotation_speed = rospy.get_param("~min_rotation_speed", 0.5)
        
        # The x threshold (% of image width) indicates how far off-center
        # the ROI needs to be in the x-direction before we react
        self.x_threshold = rospy.get_param("~x_threshold", 0.1)
        
        # The maximum distance a target can be from the robot for us to track
        self.max_z = rospy.get_param("~max_z", 2.0)
        
        # Initialize the global ROI
        self.roi = RegionOfInterest()
        
        # The goal distance (in meters) to keep between the robot and the person
        self.goal_z = rospy.get_param("~goal_z", 0.6)
        
        # How far away from the goal distance (in meters) before the robot reacts
        self.z_threshold = rospy.get_param("~z_threshold", 0.05)
        
        # How far away from being centered (x displacement) on the person
        # before the robot reacts
        self.x_threshold = rospy.get_param("~x_threshold", 0.05)
        
        # How much do we weight the goal distance (z) when making a movement
        self.z_scale = rospy.get_param("~z_scale", 1.0)

        # How much do we weight x-displacement of the person when making a movement        
        self.x_scale = rospy.get_param("~x_scale", 2.0)
        
        # The max linear speed in meters per second
        self.max_linear_speed = rospy.get_param("~max_linear_speed", 0.3)
        
        # The minimum linear speed in meters per second
        self.min_linear_speed = rospy.get_param("~min_linear_speed", 0.1)

        # Publisher to control the robot's movement
        self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
        
        # Intialize the movement command
        self.move_cmd = Twist()
        
        # We will get the image width and height from the camera_info topic
        self.image_width = 0
        self.image_height = 0
        
        # We need cv_bridge to convert the ROS depth image to an OpenCV array
        self.cv_bridge = CvBridge()
        self.depth_array = None
        
        # Set flag to indicate when the ROI stops updating
        self.target_visible = False
        
        # Wait for the camera_info topic to become available
        rospy.loginfo("Waiting for camera_info topic...")
        rospy.wait_for_message('camera_info', CameraInfo)
        
        # Subscribe to the camera_info topic to get the image width and height
        rospy.Subscriber('camera_info', CameraInfo, self.get_camera_info)

        # Wait until we actually have the camera data
        while self.image_width == 0 or self.image_height == 0:
            rospy.sleep(1)
                    
        # Subscribe to the registered depth image
        rospy.Subscriber("depth_image", Image, self.convert_depth_image)
        
        # Wait for the depth image to become available
        rospy.wait_for_message('depth_image', Image)
        
        # Subscribe to the ROI topic and set the callback to update the robot's motion
        rospy.Subscriber('roi', RegionOfInterest, self.set_cmd_vel)
        
        # Wait until we have an ROI to follow
        rospy.loginfo("Waiting for an ROI to track...")
        rospy.wait_for_message('roi', RegionOfInterest)
        
        rospy.loginfo("ROI messages detected. Starting tracker...")
        
        # Begin the tracking loop
        while not rospy.is_shutdown():
            # If the target is not visible, stop the robot
            if not self.target_visible:
                self.move_cmd = Twist()
            else:
                # Reset the flag to False by default
                self.target_visible = False
                
            # Send the Twist command to the robot
            self.cmd_vel_pub.publish(self.move_cmd)
            
            # Sleep for 1/self.rate seconds
            r.sleep()

    def set_cmd_vel(self, msg):
        # If the ROI has a width or height of 0, we have lost the target
        if msg.width == 0 or msg.height == 0:
            return
        
        # If the ROI stops updating this next statement will not happen
        self.target_visible = True
        
        self.roi = msg
        
        # Compute the displacement of the ROI from the center of the image
        target_offset_x = msg.x_offset + msg.width / 2 - self.image_width / 2

        try:
            percent_offset_x = float(target_offset_x) / (float(self.image_width) / 2.0)
        except:
            percent_offset_x = 0
            
        # Intialize the movement command
        self.move_cmd = Twist()
        
        # Rotate the robot only if the displacement of the target exceeds the threshold
        if abs(percent_offset_x) > self.x_threshold:
            # Set the rotation speed proportional to the displacement of the target
            try:
                speed = percent_offset_x * self.x_scale
                self.move_cmd.angular.z = -copysign(max(self.min_rotation_speed,
                                            min(self.max_rotation_speed, abs(speed))), speed)
            except:
                pass
            
        # Now compute the depth component
        n_z = sum_z = mean_z = 0
         
        # Get the min/max x and y values from the ROI
        min_x = self.roi.x_offset
        max_x = min_x + self.roi.width
        min_y = self.roi.y_offset
        max_y = min_y + self.roi.height
        
        # Get the average depth value over the ROI
        for x in range(min_x, max_x):
            for y in range(min_y, max_y):
                try:
                    z = self.depth_array[y, x]
                except:
                    continue
                
                # Depth values can be NaN which should be ignored
                if isnan(z) or z > self.max_z:
                    continue
                else:
                    sum_z = sum_z + z
                    n_z += 1   
        try:
            mean_z = sum_z / n_z
            
            if mean_z < self.max_z and (abs(mean_z - self.goal_z) > self.z_threshold):
                speed = (mean_z - self.goal_z) * self.z_scale
                self.move_cmd.linear.x = copysign(min(self.max_linear_speed, max(self.min_linear_speed, abs(speed))), speed)
        except:
            pass
                    
    def convert_depth_image(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        try:
            # The depth image is a single-channel float32 image
            depth_image = self.cv_bridge.imgmsg_to_cv2(ros_image, "32FC1")
        except CvBridgeError, e:
            print e

        # Convert the depth image to a Numpy array
        self.depth_array = np.array(depth_image, dtype=np.float32)

    def get_camera_info(self, msg):
        self.image_width = msg.width
        self.image_height = msg.height

    def shutdown(self):
        rospy.loginfo("Stopping the robot...")
        self.cmd_vel_pub.publish(Twist())
        rospy.sleep(1)     

if __name__ == '__main__':
    try:
        ObjectTracker()
        rospy.spin()
    except rospy.ROSInterruptException:
        rospy.loginfo("Object tracking node terminated.")

2.2 launch文件

<launch>
<param name="/camera/driver/depth_registration" value="True" />

<node pkg="diego_vision" name="object_tracker" type="object_tracker2.py" output="screen">

<remap from="camera_info" to="/camera/depth/camera_info" />
<remap from="depth_image" to="/camera/depth/image" />

<rosparam>
rate: 10
max_z: 2.0
goal_z: 0.6
z_threshold: 0.05
x_threshold: 0.1
z_scale: 1.0
x_scale: 0.1
max_rotation_speed: 0.2
min_rotation_speed: 0.02
max_linear_speed: 0.2
min_linear_speed: 0.05
</rosparam>

</node>
</launch>

In the case of starting the face_tracker2.py node, execute the following code to start the object_tracker2.py node, the robot will follow the detected face movement.

roslaunch diego_vision object_tracker2.launch

Need to be specified in the launch file on the speed of the parameters can not be set too much, too much will lead to the robot kept turning to track the face, because the image processing is lagging, the speed multiplied by the lag time, Will cause the robot to move beyond the expected location, non-stop correction.

Lesson 17:vision-Keypoint Detection & Tracking

In the previous section, we achieved the detection of the face, when there is a face in front of the camera, the image form will use the rectangle to show the location of the face, then we need to get feature for ROI, and keypoints track. the code for this article was transplant from  ROS By Example Volume 1 sample code, all the code are support opencv3 environment 。

1.Eigenvalue acquisition

1.1 code

 please check good_features.py from github

Opencv has been provided goodFeaturesToTrack function  to get feature, we just need to set the ROI in the program, and then call goodFeaturesToTrack to get the ROI  feature points

The source code GoodFeatures inherits from ROS2OpenCV3 and overrides the process_image function to process the image, and  call goodFeaturesToTrack in the get_keypoints function to get the feature values within detect_box.

#!/usr/bin/env python

import roslib
import rospy
import cv2
from ros2opencv3 import ROS2OpenCV3
import numpy as np

class GoodFeatures(ROS2OpenCV3):
    def __init__(self, node_name): 
        super(GoodFeatures, self).__init__(node_name)
          
        # Do we show text on the display?
        self.show_text = rospy.get_param("~show_text", True)
        
        # How big should the feature points be (in pixels)?
        self.feature_size = rospy.get_param("~feature_size", 1)
        
        # Good features parameters
        self.gf_maxCorners = rospy.get_param("~gf_maxCorners", 200)
        self.gf_qualityLevel = rospy.get_param("~gf_qualityLevel", 0.02)
        self.gf_minDistance = rospy.get_param("~gf_minDistance", 7)
        self.gf_blockSize = rospy.get_param("~gf_blockSize", 10)
        self.gf_useHarrisDetector = rospy.get_param("~gf_useHarrisDetector", True)
        self.gf_k = rospy.get_param("~gf_k", 0.04)
        
        # Store all parameters together for passing to the detector
        self.gf_params = dict(maxCorners = self.gf_maxCorners, 
                       qualityLevel = self.gf_qualityLevel,
                       minDistance = self.gf_minDistance,
                       blockSize = self.gf_blockSize,
                       useHarrisDetector = self.gf_useHarrisDetector,
                       k = self.gf_k)

        # Initialize key variables
        self.keypoints = list()
        self.detect_box = None
        self.mask = None
        
    def process_image(self, cv_image):
        # If the user has not selected a region, just return the image
        if not self.detect_box:
            return cv_image

        # Create a greyscale version of the image
        grey = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        
        # Equalize the histogram to reduce lighting effects
        grey = cv2.equalizeHist(grey)

        # Get the good feature keypoints in the selected region
        keypoints = self.get_keypoints(grey, self.detect_box)
        
        # If we have points, display them
        if keypoints is not None and len(keypoints) > 0:
            for x, y in keypoints:
                cv2.circle(self.marker_image, (x, y), self.feature_size, (0, 255, 0, 0), cv2.FILLED, 8, 0)
        # Process any special keyboard commands
        if 32 <= self.keystroke and self.keystroke < 128:
            cc = chr(self.keystroke).lower()
            if cc == 'c':
                # Clear the current keypoints
                keypoints = list()
                self.detect_box = None
                                
        return cv_image        

    def get_keypoints(self, input_image, detect_box):
        # Initialize the mask with all black pixels
        self.mask = np.zeros_like(input_image)
 
        # Get the coordinates and dimensions of the detect_box
        try:
            x, y, w, h = detect_box
        except: 
            return None
        
        # Set the selected rectangle within the mask to white
        self.mask[y:y+h, x:x+w] = 255

        # Compute the good feature keypoints within the selected region
        keypoints = list()
        kp = cv2.goodFeaturesToTrack(input_image, mask = self.mask, **self.gf_params)
        if kp is not None and len(kp) > 0:
            for x, y in np.float32(kp).reshape(-1, 2):
                keypoints.append((x, y))
                
        return keypoints

if __name__ == '__main__':
    try:
        node_name = "good_features"
        GoodFeatures(node_name)
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down the Good Features node."
        cv2.destroyAllWindows()

1.2.launch

<launch>
 <node pkg="diego_vision" name="good_features" type="good_features.py" output="screen">

 <remap from="input_rgb_image" to="/camera/rgb/image_color" />
 
 <rosparam>
 gf_maxCorners: 200
 gf_qualityLevel: 0.02
 gf_minDistance: 7
 gf_blockSize: 10
 gf_useHarrisDetector: False
 gf_k: 0.04
 feature_size: 1
 show_text: True
 </rosparam>
 
 </node>
</launch>

2.Tracking Keypoints

1.1 code

please check the lk_tracker.py from github

The function calcOpticalFlowPyrLK in Opencv  provides the matching of consecutive frame feature points in the video,the algorithm it used is Lucas-Kanade method.

The source code LKTracker inherits from GoodFeatures, and rewrites the process_image function, and call the calcOpticalFlowPyrLK method in the track_keypoints function to track keypoints.

#!/usr/bin/env python

import roslib
import rospy
import cv2
import numpy as np
from good_features import GoodFeatures

class LKTracker(GoodFeatures):
    def __init__(self, node_name):
        super(LKTracker, self).__init__(node_name)
        
        self.show_text = rospy.get_param("~show_text", True)
        self.feature_size = rospy.get_param("~feature_size", 1)
                
        # LK parameters
        self.lk_winSize = rospy.get_param("~lk_winSize", (10, 10))
        self.lk_maxLevel = rospy.get_param("~lk_maxLevel", 2)
        self.lk_criteria = rospy.get_param("~lk_criteria", (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.01))
        
        self.lk_params = dict( winSize  = self.lk_winSize, 
                  maxLevel = self.lk_maxLevel, 
                  criteria = self.lk_criteria)    
        
        self.detect_interval = 1
        self.keypoints = None

        self.detect_box = None
        self.track_box = None
        self.mask = None
        self.grey = None
        self.prev_grey = None
            
    def process_image(self, cv_image):
        # If we don't yet have a detection box (drawn by the user
        # with the mouse), keep waiting
        if self.detect_box is None:
            return cv_image

        # Create a greyscale version of the image
        self.grey = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        
        # Equalize the grey histogram to minimize lighting effects
        self.grey = cv2.equalizeHist(self.grey)
        
        # If we haven't yet started tracking, set the track box to the
        # detect box and extract the keypoints within it
        if self.track_box is None or not self.is_rect_nonzero(self.track_box):
            self.track_box = self.detect_box
            self.keypoints = self.get_keypoints(self.grey, self.track_box)
        
        else:
            if self.prev_grey is None:
                self.prev_grey = self.grey
    
            # Now that have keypoints, track them to the next frame
            # using optical flow
            self.track_box = self.track_keypoints(self.grey, self.prev_grey)

        # Process any special keyboard commands for this module
        if 32 <= self.keystroke and self.keystroke < 128:
            cc = chr(self.keystroke).lower()
            if cc == 'c':
                # Clear the current keypoints
                self.keypoints = None
                self.track_box = None
                self.detect_box = None
                
        self.prev_grey = self.grey
                
        return cv_image               
                    
    def track_keypoints(self, grey, prev_grey):
        # We are tracking points between the previous frame and the
        # current frame
        img0, img1 = prev_grey, grey
        
        # Reshape the current keypoints into a numpy array required
        # by calcOpticalFlowPyrLK()
        p0 = np.float32([p for p in self.keypoints]).reshape(-1, 1, 2)
        
        # Calculate the optical flow from the previous frame to the current frame
        p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **self.lk_params)
        
        # Do the reverse calculation: from the current frame to the previous frame
        try:
            p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **self.lk_params)
            
            # Compute the distance between corresponding points in the two flows
            d = abs(p0-p0r).reshape(-1, 2).max(-1)
            
            # If the distance between pairs of points is < 1 pixel, set
            # a value in the "good" array to True, otherwise False
            good = d < 1
        
            # Initialize a list to hold new keypoints
            new_keypoints = list()
            
            # Cycle through all current and new keypoints and only keep
            # those that satisfy the "good" condition above
            for (x, y), good_flag in zip(p1.reshape(-1, 2), good):
                if not good_flag:
                    continue
                new_keypoints.append((x, y))
                
                # Draw the keypoint on the image
                cv2.circle(self.marker_image, (x, y), self.feature_size, (0, 255, 0, 0), cv2.FILLED, 8, 0)
            # Set the global keypoint list to the new list    
            self.keypoints = new_keypoints
            
            # If we have enough points, find the best fit ellipse around them
            if len(self.keypoints) > 6:
                keypoints_array = np.float32([p for p in self.keypoints]).reshape(-1, 1, 2)  
                track_box = cv2.fitEllipse(keypoints_array)
            else:
                # Otherwise, find the best fitting rectangle
                track_box = cv2.boundingRect(keypoints_matrix)
        except:
            track_box = None
                        
        return track_box
    
if __name__ == '__main__':
    try:
        node_name = "lk_tracker"
        LKTracker(node_name)
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down LK Tracking node."
        cv2.destroyAllWindows()
    

2.2 launch

<launch>
   <node pkg="diego_vision" name="lk_tracker" type="lk_tracker.py" output="screen">
   <remap from="input_rgb_image" to="/camera/rgb/image_color" />

   <rosparam>
       show_text: True
       gf_maxCorners: 200
       gf_qualityLevel: 0.02
       gf_minDistance: 7
       gf_blockSize: 10
       gf_useHarrisDetector: False
       gf_k: 0.04
       feature_size: 1
   </rosparam>

   </node>
</launch>

3.test

open the Xtion depth camera

roslaunch diego_vision openni_node.launch

start lk_tracker

roslaunch diego_vision lk_tracker.launch

The video form will appear after the start node , we can drew a ROI by the mouse, and then will see the keypoints will be found in ROI , when  we move objects you will see the rectangular box will move by the object.

Lesson 16:vision-face detection

Machine Vision is a very complex subject that requires a more professional knowledge of computer graphics, it provides a good introductory example in the ROS By Example Volume 1 book, so we’ll begin Diego’s machine vision tour, then we will adding more complex content by gradually.

We first come to realize  the face detection, Feature point acquisition, face tracking, robot follow, because the code used in this book depends on opencv2.4, but in Diego we installed the ROS Kinetic, Opencv3 is installed by default, so I have ported its source code so that it can be compiled in the Opencv3 environment. detail please see the diego_vision directory under github Diego1 plus.

1.About image format

ROS has its own image format, but the more popular are using opencv to image video processing, because it encapsulates a large number of advanced image algorithm, and it’s  open source, so the first need to ROS image format conversion to opencv image Format, no surprise ROS has provided cv_bridge, this package can do the conversion task, and support opencv3. If the installation is ros-kinetic-desktop-full version, the package has been installed, if there is no ros-kinetic-desktop-full need to be installed separately, it is recommended that we git clone source code installed, because if apt- Get installed, it will also install opencv2.4 version, which will cause some conflict. how to use  cv_bridge method, we can refer to the official example code.

2.ros2opencv3.py

The original ros2opencv2.py is not support opencv3.0, so I  transplanted it to support opencv3, and the file is renamed to ros2opencv3.py, class name is also changed to ROS2OpenCV3, the source code see GitHub, the following is the main function Function Description:

  • __init__:ROS2OpenCV3 initialization function, set the relevant parameters, initialize the ROS node, subscribe to RGB and RGBD message topics
  • on_mouse_click:Mouse events, the user can draw the ROI through the mouse in the video window
  • image_callback:The callback function of RGB image
  • depth_callback:The callback function of the depth image
  • convert_image:Converts RGB images from ROS format to opencv format
  • convert_depth_image:Convert the depth image from the ROS format to the opencv format
  • publish_roi:Publish the ROI (Region Of Interesting) message, which describes the area  we are interested in
  • process_image:RGB image processing function, there do not do any processing
  • process_depth_image:Depth image processing function, there do not do any processing
  • display_selection:Use the rectangle to display the area selected by the user on the video. Use a small dot to display the area where the user clicks on the image
  • is_rect_nonzero:Determine whether the rectangular area is valid
  • cvBox2D_to_cvRect:Convert the cvBox2D data structure to cvRect
  • cvRect_to_cvBox2D:Convert the cvRect data structure to cvBox2D

3.Face detection

3.1 the way for Face detection

The general step in face detection is

 

 

 

分类器暨Haar特征分类器,人脸的Haar特征分类器就是一个XML文件,该文件中会描述人脸的Haar特征值,opencv算法针对图片中的数据匹配分类器,从而识别出人脸,我们这里使用到三个分类器

the face of the Haar feature classifier is an XML file, the file will describe the face Haar eigenvalue, opencv will identify the face by matching classifier by it algorithm for the data in the picture, we will use three classifiers:

  • haarcascade_frontalface_alt2.xml
  • haarcascade_frontalface_alt.xml
  • haarcascade_profileface.xml

After reading the picture from the camera video, we first need to convert the image into a gray image, and then get the feature, last we call opencv detectMultiScale function to  detect the face

3.2 code

The code has been ported to opencv3, please see github

3.2.1face_detector.py

#!/usr/bin/env python

import rospy
import cv2
from ros2opencv3 import ROS2OpenCV3

class FaceDetector(ROS2OpenCV3):
    def __init__(self, node_name):
        super(FaceDetector, self).__init__(node_name)
                  
        # Get the paths to the cascade XML files for the Haar detectors.
        # These are set in the launch file.
        cascade_1 = rospy.get_param("~cascade_1", "")
        cascade_2 = rospy.get_param("~cascade_2", "")
        cascade_3 = rospy.get_param("~cascade_3", "")
        
        # Initialize the Haar detectors using the cascade files
        self.cascade_1 = cv2.CascadeClassifier(cascade_1)
        self.cascade_2 = cv2.CascadeClassifier(cascade_2)
        self.cascade_3 = cv2.CascadeClassifier(cascade_3)
        
        # Set cascade parameters that tend to work well for faces.
        # Can be overridden in launch file
        self.haar_scaleFactor = rospy.get_param("~haar_scaleFactor", 1.3)
        self.haar_minNeighbors = rospy.get_param("~haar_minNeighbors", 3)
        self.haar_minSize = rospy.get_param("~haar_minSize", 30)
        self.haar_maxSize = rospy.get_param("~haar_maxSize", 150)
        
        # Store all parameters together for passing to the detector
        self.haar_params = dict(scaleFactor = self.haar_scaleFactor,
                                minNeighbors = self.haar_minNeighbors,
                                flags = cv2.CASCADE_DO_CANNY_PRUNING,
                                minSize = (self.haar_minSize, self.haar_minSize),
                                maxSize = (self.haar_maxSize, self.haar_maxSize)
                                )
                        
        # Do we should text on the display?
        self.show_text = rospy.get_param("~show_text", True)
        
        # Intialize the detection box
        self.detect_box = None
        
        # Track the number of hits and misses
        self.hits = 0
        self.misses = 0
        self.hit_rate = 0

    def process_image(self, cv_image):
        # Create a greyscale version of the image
        grey = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        
        # Equalize the histogram to reduce lighting effects
        grey = cv2.equalizeHist(grey)
            
        # Attempt to detect a face
        self.detect_box = self.detect_face(grey)
        
        # Did we find one?
        if self.detect_box is not None:
            self.hits += 1
        else:
            self.misses += 1
        
        # Keep tabs on the hit rate so far
        self.hit_rate = float(self.hits) / (self.hits + self.misses)
                    
        return cv_image


    def detect_face(self, input_image):
        # First check one of the frontal templates
        if self.cascade_1:
            faces = self.cascade_1.detectMultiScale(input_image, **self.haar_params)
                                         
        # If that fails, check the profile template
        if len(faces) == 0 and self.cascade_3:
            faces = self.cascade_3.detectMultiScale(input_image, **self.haar_params)

        # If that also fails, check a the other frontal template
        if len(faces) == 0 and self.cascade_2:
            faces = self.cascade_2.detectMultiScale(input_image, **self.haar_params)

        # The faces variable holds a list of face boxes.
        # If one or more faces are detected, return the first one.  
        if len(faces) > 0:
            face_box = faces[0]
        else:
            # If no faces were detected, print the "LOST FACE" message on the screen
            if self.show_text:
                font_face = cv2.FONT_HERSHEY_SIMPLEX
                font_scale = 0.5
                cv2.putText(self.marker_image, "LOST FACE!", 
                            (int(self.frame_size[0] * 0.65), int(self.frame_size[1] * 0.9)), 
                            font_face, font_scale, (255, 50, 50))
            face_box = None

        # Display the hit rate so far
        if self.show_text:
            font_face = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.5
            cv2.putText(self.marker_image, "Hit Rate: " + 
                        str(trunc(self.hit_rate, 2)), 
                        (20, int(self.frame_size[1] * 0.9)), 
                        font_face, font_scale, (255, 255, 0))
        
        return face_box
        
def trunc(f, n):
    '''Truncates/pads a float f to n decimal places without rounding'''
    slen = len('%.*f' % (n, f))
    return float(str(f)[:slen])
    
if __name__ == '__main__':
    try:
        node_name = "face_detector"
        FaceDetector(node_name)
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down face detector node."
        cv2.destroyAllWindows()

3.2.2Code explanation

FaceDetector class inherited from ROS2OpenCV3, and rewrite the process_image to convert the the image to gray-scale, and do the face detection by detect_face function

load the classifier, cascade_1, cascade_2, cascade_3 ,defines the location of the three classifiers

        
        # Get the paths to the cascade XML files for the Haar detectors.
        # These are set in the launch file.
        cascade_1 = rospy.get_param("~cascade_1", "")
        cascade_2 = rospy.get_param("~cascade_2", "")
        cascade_3 = rospy.get_param("~cascade_3", "")
        
        # Initialize the Haar detectors using the cascade files
        self.cascade_1 = cv2.CascadeClassifier(cascade_1)
        self.cascade_2 = cv2.CascadeClassifier(cascade_2)
        self.cascade_3 = cv2.CascadeClassifier(cascade_3)

Image gray conversion in the process_image function

        # Create a greyscale version of the image
        grey = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        
        # Equalize the histogram to reduce lighting effects
        grey = cv2.equalizeHist(grey)

Face detection in the detect_face function

        if self.cascade_1:
            faces = self.cascade_1.detectMultiScale(input_image, **self.haar_params)
                                         
        # If that fails, check the profile template
        if len(faces) == 0 and self.cascade_3:
            faces = self.cascade_3.detectMultiScale(input_image, **self.haar_params)

        # If that also fails, check a the other frontal template
        if len(faces) == 0 and self.cascade_2:
            faces = self.cascade_2.detectMultiScale(input_image, **self.haar_params)

Display information in the detect_face function

        # The faces variable holds a list of face boxes.
        # If one or more faces are detected, return the first one.  
        if len(faces) > 0:
            face_box = faces[0]
        else:
            # If no faces were detected, print the "LOST FACE" message on the screen
            if self.show_text:
                font_face = cv2.FONT_HERSHEY_SIMPLEX
                font_scale = 0.5
                cv2.putText(self.marker_image, "LOST FACE!", 
                            (int(self.frame_size[0] * 0.65), int(self.frame_size[1] * 0.9)), 
                            font_face, font_scale, (255, 50, 50))
            face_box = None

        # Display the hit rate so far
        if self.show_text:
            font_face = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.5
            cv2.putText(self.marker_image, "Hit Rate: " + 
                        str(trunc(self.hit_rate, 2)), 
                        (20, int(self.frame_size[1] * 0.9)), 
                        font_face, font_scale, (255, 255, 0))
        
        return face_box

The image_callback function of the parent class ROS2OpenCV3 will draw the face position  by a rectangular wireframe

4. start the face detection node

First make sure xtion was started, you can start with the following code:

roslaunch diego_vision openni_node.launch

please check face_detector.launch from github

<launch>
   <node pkg="diego_vision" name="face_detector" type="face_detector.py" output="screen">

   <remap from="input_rgb_image" to="/camera/rgb/image_color" />

   <rosparam>
       haar_scaleFactor: 1.3
       haar_minNeighbors: 3
       haar_minSize: 30
       haar_maxSize: 150
   </rosparam>

   <param name="cascade_1" value="$(find diego_vision)/data/haar_detectors/haarcascade_frontalface_alt2.xml" />
   <param name="cascade_2" value="$(find diego_vision)/data/haar_detectors/haarcascade_frontalface_alt.xml" />
   <param name="cascade_3" value="$(find diego_vision)/data/haar_detectors/haarcascade_profileface.xml" />

</node>
</launch>

start the face detector node

roslaunch diego_vision face_detector.launch

You will see a video form after the start the node, you can detect the camera to capture the face, and identified by a rectangle

Posts navigation

1 2
Scroll to top