Lesson 24:Diego 1# 4WD —No.2:motor control

针对马达控制我们主要需要修改两部分:

  • 驱动部分,使其可以支持同时控制4个马达
  • PID调速部分,试验表明,虽然是一样的型号的马达,但是给相同的PWM值,马达的转速也不一样,所以我们需要分别对4个马达进行PID调速

1.马达驱动

1.1. DualL298PMotorShield4WD.h文件的修改
增加了4个马达对应的PWM和方向控制的Pin引脚定义,和4个马达速度的设置函数

#ifndef DualL298PMotorShield4WD_h
#define DualL298PMotorShield4WD_h

#include 

class DualL298PMotorShield4WD
{
  public:  
    // CONSTRUCTORS
    DualL298PMotorShield4WD(); // Default pin selection.
    
    // PUBLIC METHODS
    void init(); // Initialize TIMER 1, set the PWM to 20kHZ. 
    void setM1Speed(int speed); // Set speed for M1.
    void setM2Speed(int speed); // Set speed for M2.
    void setM3Speed(int speed); // Set speed for M3.
    void setM4Speed(int speed); // Set speed for M4.
    void setSpeeds(int m1Speed, int m2Speed, int m3Speed, int m4Speed); // Set speed for both M1 and M2.
    
  private:
  
    // left motor
    static const unsigned char _M1DIR = 12;
    static const unsigned char _M2DIR = 7;
    static const unsigned char _M1PWM = 10;
    static const unsigned char _M2PWM = 6;
    
    // right motor
    static const unsigned char _M4DIR = 8;
    static const unsigned char _M3DIR = 13;
    static const unsigned char _M4PWM = 9;
    static const unsigned char _M3PWM = 11;
    
};

#endif

1.2. DualL298PMotorShield4WD.cpp文件的修改

主要是针对4个马达速度设置函数的实现,逻辑都是一样的,这里只截取一个马达的代码,代码的逻辑请看代码注释

// Set speed for motor 4, speed is a number betwenn -400 and 400
void DualL298PMotorShield4WD::setM4Speed(int speed)
{
  unsigned char reverse = 0;
  
  if (speed < 0) //速度是否小于0 { speed = -speed; // 如果小于0,则是反转 reverse = 1; // 反转标志变量设置为1 } if (speed > 255)  // 限定最大速度为255
    speed = 255;
  if (reverse)  //反转状态下
  {
    digitalWrite(_M4DIR,LOW);//设定马达转向的pin位低电平
    analogWrite(_M4PWM, speed);
  }
  else //正向转动状态下
  {
    digitalWrite(_M4DIR,HIGH);设定马达转向的pin位高电平
    analogWrite(_M4PWM, speed);
  }
}

在setSpeeds函数中分别调用4个马达的速度设置函数。

// Set speed for motor 1, 2, 3, 4
void DualL298PMotorShield4WD::setSpeeds(int m1Speed, int m2Speed, int m3Speed, int m4Speed)
{
  setM1Speed(m1Speed);
  setM2Speed(m2Speed);
  setM3Speed(m3Speed);
  setM4Speed(m4Speed);  
}

DualL298PMotorShield4WD.h和DualL298PMotorShield4WD.cpp修改完成后,在arduino 的library目录下新建一个名为dual-L298P-motor-shield-master-4wd的目录,将两个文件放到此文件夹下

现在我们打开android ide的library就可以看到我们刚才添加的库

1.1.3 motor_driver.h的修改

增加4驱马达控制的函数setMotorSpeeds,参数为4个马达的速度

void initMotorController();
void setMotorSpeed(int i, int spd);
#ifdef L298P
void setMotorSpeeds(int leftSpeed, int rightSpeed);
#endif
#ifdef L298P_4WD
void setMotorSpeeds(int leftSpeed_1, int leftSpeed_2, int rightSpeed_1, int rightSpeed_2);
#endif

1.1.4 motor_driver.ino的修改

#ifdef L298P_4WD
// A convenience function for setting both motor speeds
void setMotorSpeeds(int leftSpeed_1, int leftSpeed_2, int rightSpeed_1, int rightSpeed_2){
  setMotorSpeed(1, leftSpeed_1);
  setMotorSpeed(3, rightSpeed_1);
  setMotorSpeed(2, leftSpeed_2);
  setMotorSpeed(4, rightSpeed_2);
}
#endif
#else
#error A motor driver must be selected!
#endif

2.PID控制

PID控制都在diff_controller.h文件中定义修改

新增两个在4驱模式下的PID控制变量

#ifdef L298P_4WD
SetPointInfo leftPID_h, rightPID_h;
#endif

新增两个在4驱模式下的PID控制参数

#ifdef L298P_4WD

int left_h_Kp=Kp;
int left_h_Kd=Kd;
int left_h_Ki=Ki;
int left_h_Ko=Ko;

int right_h_Kp=Kp;
int right_h_Kd=Kd;
int right_h_Ki=Ki;
int right_h_Ko=Ko;

#endif

新增两个在4驱模式下的PID控制函数

#ifdef L298P_4WD
/* PID routine to compute the next motor commands */
void dorightPID_h(SetPointInfo * p) {
  long Perror;
  long output;
  int input;

  //Perror = p->TargetTicksPerFrame - (p->Encoder - p->PrevEnc);
  input =  p->Encoder-p->PrevEnc ;
  Perror = p->TargetTicksPerFrame - input;

  /*
  * Avoid derivative kick and allow tuning changes,
  * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-derivative-kick/
  * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-tuning-changes/
  */
  //output = (Kp * Perror + Kd * (Perror - p->PrevErr) + Ki * p->Ierror) / Ko;
  // p->PrevErr = Perror;
  output = (right_h_Kp * Perror - right_h_Kd * (input - p->PrevInput) + p->ITerm) / right_h_Ko;
  p->PrevEnc = p->Encoder;

  output += p->output;
  // Accumulate Integral error *or* Limit output.
  // Stop accumulating when output saturates
  if (output >= MAX_PWM)
    output = MAX_PWM;
  else if (output <= -MAX_PWM) output = -MAX_PWM; else /* * allow turning changes, see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-tuning-changes/ */ p->ITerm += left_h_Ki * Perror;

  p->output = output;
  p->PrevInput = input;
//  Serial.println("right output:");
//  Serial.println(p->output);
}
#endif
...
#ifdef L298P_4WD

/* PID routine to compute the next motor commands */
void doleftPID_h(SetPointInfo * p) {
  long Perror;
  long output;
  int input;

  //Perror = p->TargetTicksPerFrame - (p->Encoder - p->PrevEnc);
  input = p->Encoder-p->PrevEnc ;
  Perror = p->TargetTicksPerFrame - input;

  /*
  * Avoid derivative kick and allow tuning changes,
  * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-derivative-kick/
  * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-tuning-changes/
  */
  //output = (Kp * Perror + Kd * (Perror - p->PrevErr) + Ki * p->Ierror) / Ko;
  // p->PrevErr = Perror;
  output = (left_h_Kp * Perror - left_h_Kd * (input - p->PrevInput) + p->ITerm) / left_h_Ko;
  p->PrevEnc = p->Encoder;

  output += p->output;
  // Accumulate Integral error *or* Limit output.
  // Stop accumulating when output saturates
  if (output >= MAX_PWM)
    output = MAX_PWM;
  else if (output <= -MAX_PWM) output = -MAX_PWM; else /* * allow turning changes, see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-tuning-changes/ */ p->ITerm += left_h_Ki * Perror;

  p->output = output;
  p->PrevInput = input;
//  Serial.println("left output:");
//  Serial.println(p->output);
}
#endif

修改readPIDIn函数,增加在4驱模式下,pidin的读取

long readPidIn(int i) {
  long pidin=0;
  if (i == LEFT){
    pidin = leftPID.PrevInput;
  }else if (i == RIGHT){
    pidin = rightPID.PrevInput;
  }
#ifdef L298P_4WD
  else if (i== RIGHT_H){
    pidin = rightPID_h.PrevInput;
  }else{
    pidin = leftPID_h.PrevInput;
  }
#endif  
  return pidin;
}

修改readPIDOut函数,增加在4驱模式下,pidOut的读取

long readPidOut(int i) {
  long pidout=0;
  if (i == LEFT){
    pidout = leftPID.output;
  }else if (i == RIGHT){
    pidout = rightPID.output;
  }
#ifdef L298P_4WD
  else if (i == RIGHT_H){
    pidout = rightPID_h.output;
  }else{
    pidout = leftPID_h.output;
  }
#endif   
  return pidout;
}

修改updatePID函数,增加在4驱模式下马达PID控制的调用

void updatePID() {
  /* Read the encoders */
  leftPID.Encoder =readEncoder(LEFT);
  rightPID.Encoder =readEncoder(RIGHT);
#ifdef L298P_4WD 
  leftPID_h.Encoder =readEncoder(LEFT_H);
  rightPID_h.Encoder =readEncoder(RIGHT_H);
#endif
  
  /* If we're not moving there is nothing more to do */
  if (!moving){
    /*
    * Reset PIDs once, to prevent startup spikes,
    * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-initialization/
    * PrevInput is considered a good proxy to detect
    * whether reset has already happened
    */
#ifdef L298P    
    if (leftPID.PrevInput != 0 || rightPID.PrevInput != 0) resetPID();
#endif
#ifdef L298P_4WD
    if (leftPID.PrevInput != 0 || rightPID.PrevInput != 0 || leftPID_h.PrevInput != 0 || rightPID_h.PrevInput != 0) resetPID();
#endif    
    return;
  }

  /* Compute PID update for each motor */
  dorightPID(&rightPID);
  doleftPID(&leftPID);
#ifdef L298P_4WD
  dorightPID_h(&rightPID_h);
  doleftPID_h(&leftPID_h);
#endif  

  /* Set the motor speeds accordingly */
#ifdef L298P   
  setMotorSpeeds(leftPID.output, rightPID.output);
#endif

#ifdef L298P_4WD
  setMotorSpeeds(leftPID.output,leftPID_h.output, rightPID.output,rightPID_h.output);
#endif

}

至此4个马达已经可以独立驱动,独立的PID调速了,在下一篇教程中,会介绍如和上位机互动起来,让4驱底盘跑起来。

Lesson 23:Diego 1# 4WD —NO.1:ENCODER

4驱底盘由于四个轮子可以独立控制,所以具有优秀的通过性,这篇文章介绍Diego 1#的四驱底盘,所有源代码都已经上传到github。

这里会分几篇文章来介绍4驱动版diego 1#的开发,这篇我们主要说明4驱动底盘4个马达编码器数据的读取。

1.1硬件说明

  • 底盘材质:铝合金材质
  • 轮胎:12cm 橡胶轮胎
  • 电机:370直流电机,带霍尔码盘测速,输出AB项编码信号

1.2 控制器

  • 主控制器 arduino UNO,使用uno分别对4个电机进行方向,PWM控制,并采用终端方式采集4个电机输出的编码信号
  • 电机控制器 两块L298p, 网上采购,控制引脚不同
  • 上位机:树莓派,或者mini pc

2.编码器数据读取

所以代码都基于diego 1# github上的代码进行修改,这里只说明4驱版本部分的代码

首先我们在ROSAduinoBridge_diego.ino文件中定义一个预编译符号,这个预编译符号可以启动或者关闭4驱的代码,这样方便我们开关4驱的功能

#define L298P_4WD

在diego 1 4wd中实现了使用arduino uno读取4个马达的AB项编码输出,一般的网上说明中arduino uno只有两个外部中断,好像只能读取一个马达的AB项编码输出,但事实上arudino中所有IO引脚都可以作为中断使用,通过操作中断寄存器的方式。

首先我们在encoder_driver.h文件中定义编码器连接的Arduino uno引脚:

#ifdef ARDUINO_ENC_COUNTER
  //below can be changed, but should be PORTD pins; 
  //otherwise additional changes in the code are required
  #define LEFT_ENC_PIN_A PD2  //pin 2
  #define LEFT_ENC_PIN_B PD3  //pin 3
  
  //below can be changed, but should be PORTC pins
  #define RIGHT_ENC_PIN_A PC2  //pin A2
  #define RIGHT_ENC_PIN_B PC3   //pin A3

#ifdef L298P_4WD
  #define LEFT_H_ENC_PIN_A PD4  //pin 4
  #define LEFT_H_ENC_PIN_B PD5  //pin 5
  
  //below can be changed, but should be PORTC pins
  #define RIGHT_H_ENC_PIN_A PC0  //pin A0
  #define RIGHT_H_ENC_PIN_B PC1   //pin A1
#endif  
#endif

从此文件中可以看到在原来2驱的基础上增加了4WD模式下的编码器连接引脚定义,其中左后方的电机AB项连接数字IO的D4和D5,而右后方电机AB项连接模拟IO的A0和A1,这样4个马达的编码器数据读取我们就用到8个IO口,加上PWM控制,转动方向的控制,在Diego 1# 4WD版本中,底盘控制一共用了16个IO,最后只剩D0,D1作为串口和上位机通讯,和A4,A5作为I2C的接口与I2C接口模块通讯,可以说Arduino UNO做到了充分利用。

在encoder_driver.ino文件中增加对4WD新增引脚的中断处理。

arduino uno中一共有3个引脚中断函数分别是

  • ISR (PCINT0_vect)对应 D8 to D13
  • ISR (PCINT1_vect) 对应 A0 to A5
  • ISR (PCINT2_vect) 对应 D0 to D7

diego1# 4wd中只需要用到两个中断处理函数 ISR(PCINT2_vect)和ISR(PCINT1_vect),代码如下:

  ISR (PCINT2_vect){
     static uint8_t enc_last=0;
#ifdef L298P_4WD        
     static uint8_t enc_last_h=0;
#endif          
     enc_last <<=2; //shift previous state two places
     enc_last |= (PIND & (3 << 2)) >> 2; //read the current state into lowest 2 bits

#ifdef L298P_4WD
     enc_last_h<<=2;
     enc_last_h |=(PIND & (3 << 4))>>4;
#endif 
  
     left_enc_pos += ENC_STATES[(enc_last & 0x0f)];
#ifdef L298P_4WD   
     left_h_enc_pos +=ENC_STATES[(enc_last_h & 0x0f)];
#endif    
  }
  
  /* Interrupt routine for RIGHT encoder, taking care of actual counting */
  ISR (PCINT1_vect){
     static uint8_t enc_last=0;
#ifdef L298P_4WD        
     //uint8_t pinct=PINC;
     static uint8_t enc_last_h=0;
#endif   	
     enc_last <<=2; //shift previous state two places
     enc_last |= (PINC & (3 << 2)) >> 2; //read the current state into lowest 2 bits

#ifdef L298P_4WD
     enc_last_h<<=2;
     enc_last_h |=(PINC & 3);
#endif  
     right_enc_pos += ENC_STATES[(enc_last & 0x0f)];
#ifdef L298P_4WD   
     right_h_enc_pos +=ENC_STATES[(enc_last_h & 0x0f)];
#endif
  }

这段代码中主要是针对中断寄存器PINC和PIND的操作,每个IO pin都对应PINC或者PIND的一位,IO有中断产生时,对应的位就会被置位,我们只需要读取相应为即可,如(PIND & (3 << 4))>>4读取的就是PIND的第4,5位,也就是D4,D5的数据。

Lesson 18:vision-face Tracking

Based on the previous two sections of face detection, keypoints tracking, we further achieve the tracking of the face, we can let the robot track the face, moved with the face, the basic process is as follows:

 

 

 

 

  • Face detection, lesson 16 has been introduced, and here can be directly quoted.
  • Feature acquisition, lesson 17 has also been achieved, but to achieve better results, the need to do some improve
  • Face tracking, according to the location of the ROI, to determine how control the robot move.

1. Face detection and feature acquisition

please check the face_tracker2.py from github,it’s support opencv3

FaceTracker inherits FaceDetector, LKTracker two classes, and made the following major changes and extensions

  • rewrite process_image function
  • add_keypoints Function is used to discover a new feature point that calls the goodFeaturesToTrack method and determines the distance from the current feature point cluster to ensure that a valid new feature point
  • drop_keypoints function uses a clustering algorithm to remove invalid feature points

1.1 code

#!/usr/bin/env python

import roslib
import rospy
import cv2
import numpy as np
from face_detector import FaceDetector
from lk_tracker import LKTracker

class FaceTracker(FaceDetector, LKTracker):
    def __init__(self, node_name):
        super(FaceTracker, self).__init__(node_name)
        
        self.n_faces = rospy.get_param("~n_faces", 1)
        self.show_text = rospy.get_param("~show_text", True)
        self.show_add_drop = rospy.get_param("~show_add_drop", False)
        self.feature_size = rospy.get_param("~feature_size", 1)
        self.use_depth_for_tracking = rospy.get_param("~use_depth_for_tracking", False)
        self.min_keypoints = rospy.get_param("~min_keypoints", 20)
        self.abs_min_keypoints = rospy.get_param("~abs_min_keypoints", 6)
        self.std_err_xy = rospy.get_param("~std_err_xy", 2.5) 
        self.pct_err_z = rospy.get_param("~pct_err_z", 0.42) 
        self.max_mse = rospy.get_param("~max_mse", 10000)
        self.add_keypoint_distance = rospy.get_param("~add_keypoint_distance", 10)
        self.add_keypoints_interval = rospy.get_param("~add_keypoints_interval", 1)
        self.drop_keypoints_interval = rospy.get_param("~drop_keypoints_interval", 1)
        self.expand_roi_init = rospy.get_param("~expand_roi", 1.02)
        self.expand_roi = self.expand_roi_init
        self.face_tracking = True

        self.frame_index = 0
        self.add_index = 0
        self.drop_index = 0
        self.keypoints = list()

        self.detect_box = None
        self.track_box = None
        
        self.grey = None
        self.prev_grey = None
        
    def process_image(self, cv_image):
        try:
            # Create a greyscale version of the image
            self.grey = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
            
            # Equalize the grey histogram to minimize lighting effects
            self.grey = cv2.equalizeHist(self.grey)
            
            # Step 1: Detect the face if we haven't already
            if self.detect_box is None:
                self.keypoints = list()
                self.track_box = None
                self.detect_box = self.detect_face(self.grey)
            else:
                # Step 2: If we aren't yet tracking keypoints, get them now
                if not self.track_box or not self.is_rect_nonzero(self.track_box):
                    self.track_box = self.detect_box
                    self.keypoints = self.get_keypoints(self.grey, self.track_box)
    
                # Store a copy of the current grey image used for LK tracking                   
                if self.prev_grey is None:
                    self.prev_grey = self.grey           
                  
                # Step 3: If we have keypoints, track them using optical flow
                self.track_box = self.track_keypoints(self.grey, self.prev_grey)
              
                # Step 4: Drop keypoints that are too far from the main cluster
                if self.frame_index % self.drop_keypoints_interval == 0 and len(self.keypoints) > 0:
                    ((cog_x, cog_y, cog_z), mse_xy, mse_z, score) = self.drop_keypoints(self.abs_min_keypoints, self.std_err_xy, self.max_mse)
                    
                    if score == -1:
                        self.detect_box = None
                        self.track_box = None
                        return cv_image
                  
                # Step 5: Add keypoints if the number is getting too low 
                if self.frame_index % self.add_keypoints_interval == 0 and len(self.keypoints) < self.min_keypoints:
                    self.expand_roi = self.expand_roi_init * self.expand_roi
                    self.add_keypoints(self.track_box)
                else:
                    self.frame_index += 1
                    self.expand_roi = self.expand_roi_init
            
            # Store a copy of the current grey image used for LK tracking            
            self.prev_grey = self.grey
              
            # Process any special keyboard commands for this module
            self.prev_grey = self.grey
              
            # Process any special keyboard commands for this module
            if 32 <= self.keystroke and self.keystroke < 128:
                cc = chr(self.keystroke).lower()
                if cc == 'c':
                    self.keypoints = []
                    self.track_box = None
                    self.detect_box = None
                elif cc == 'd':
                    self.show_add_drop = not self.show_add_drop
                    
        except AttributeError:
            pass
                    
        return cv_image

    def add_keypoints(self, track_box):
        # Look for any new keypoints around the current keypoints
        
        # Begin with a mask of all black pixels
        mask = np.zeros_like(self.grey)
        
        # Get the coordinates and dimensions of the current track box
        try:
            ((x,y), (w,h), a) = track_box
        except:
            try:
                x,y,w,h = track_box
            except:
                rospy.loginfo("Track box has shrunk to zero...")
                return
        
        x = int(x)
        y = int(y)
        
        # Expand the track box to look for new keypoints
        w_new = int(self.expand_roi * w)
        h_new = int(self.expand_roi * h)
        
        pt1 = (x - int(w_new / 2), y - int(h_new / 2))
        pt2 = (x + int(w_new / 2), y + int(h_new / 2))
        
        mask_box = ((x, y), (w_new, h_new), a)

        # Display the expanded ROI with a yellow rectangle
        if self.show_add_drop:
            cv2.rectangle(self.marker_image, pt1, pt2, (255, 255, 0))
                        
        # Create a filled white ellipse within the track_box to define the ROI
        cv2.ellipse(mask, mask_box, (255,255, 255), cv2.FILLED)
        
        if self.keypoints is not None:
            # Mask the current keypoints
            for x, y in [np.int32(p) for p in self.keypoints]:
                cv2.circle(mask, (x, y), 5, 0, -1)
         
        new_keypoints = cv2.goodFeaturesToTrack(self.grey, mask = mask, **self.gf_params)

        # Append new keypoints to the current list if they are not
        # too far from the current cluster      
        if new_keypoints is not None:
            for x, y in np.float32(new_keypoints).reshape(-1, 2):
                distance = self.distance_to_cluster((x,y), self.keypoints)
                if distance > self.add_keypoint_distance:
                    self.keypoints.append((x,y))
                    # Briefly display a blue disc where the new point is added
                    if self.show_add_drop:
                        cv2.circle(self.marker_image, (x, y), 3, (255, 255, 0, 0), cv2.FILLED, 2, 0)
                                    
            # Remove duplicate keypoints
            self.keypoints = list(set(self.keypoints))
        
    def distance_to_cluster(self, test_point, cluster):
        min_distance = 10000
        for point in cluster:
            if point == test_point:
                continue
            # Use L1 distance since it is faster than L2
            distance = abs(test_point[0] - point[0])  + abs(test_point[1] - point[1])
            if distance < min_distance:
                min_distance = distance
        return min_distance

    def drop_keypoints(self, min_keypoints, outlier_threshold, mse_threshold):
        sum_x = 0
        sum_y = 0
        sum_z = 0
        sse = 0
        keypoints_xy = self.keypoints
        keypoints_z = self.keypoints
        n_xy = len(self.keypoints)
        n_z = n_xy
        
        if self.use_depth_for_tracking:
            if self.depth_image is None:
                return ((0, 0, 0), 0, 0, -1)
        
        # If there are no keypoints left to track, start over
        if n_xy == 0:
            return ((0, 0, 0), 0, 0, -1)
        
        # Compute the COG (center of gravity) of the cluster
        for point in self.keypoints:
            sum_x = sum_x + point[0]
            sum_y = sum_y + point[1]
        
        mean_x = sum_x / n_xy
        mean_y = sum_y / n_xy
        
        if self.use_depth_for_tracking:
            for point in self.keypoints:
                try:
                    z = cv2.get2D(self.depth_image, min(self.frame_height - 1, int(point[1])), min(self.frame_width - 1, int(point[0])))
                except:
                    continue
                z = z[0]
                # Depth values can be NaN which should be ignored
                if isnan(z):
                    continue
                else:
                    sum_z = sum_z + z
                    
            mean_z = sum_z / n_z
            
        else:
            mean_z = -1
        
        # Compute the x-y MSE (mean squared error) of the cluster in the camera plane
        for point in self.keypoints:
            sse = sse + (point[0] - mean_x) * (point[0] - mean_x) + (point[1] - mean_y) * (point[1] - mean_y)
            #sse = sse + abs((point[0] - mean_x)) + abs((point[1] - mean_y))
        
        # Get the average over the number of feature points
        mse_xy = sse / n_xy

        # The MSE must be > 0 for any sensible feature cluster
        if mse_xy == 0 or mse_xy > mse_threshold:
            return ((0, 0, 0), 0, 0, -1)
        
        # Throw away the outliers based on the x-y variance
        max_err = 0
        for point in self.keypoints:
            std_err = ((point[0] - mean_x) * (point[0] - mean_x) + (point[1] - mean_y) * (point[1] - mean_y)) / mse_xy
            if std_err > max_err:
                max_err = std_err
            if std_err > outlier_threshold:
                keypoints_xy.remove(point)
                if self.show_add_drop:
                    # Briefly mark the removed points in red
                    cv2.circle(self.marker_image, (point[0], point[1]), 3, (0, 0, 255), cv2.FILLED, 2, 0)   
                try:
                    keypoints_z.remove(point)
                    n_z = n_z - 1
                except:
                    pass
                
                n_xy = n_xy - 1
                                
        # Now do the same for depth
        if self.use_depth_for_tracking:
            sse = 0
            for point in keypoints_z:
                try:
                    z = cv2.get2D(self.depth_image, min(self.frame_height - 1, int(point[1])), min(self.frame_width - 1, int(point[0])))
                    z = z[0]
                    sse = sse + (z - mean_z) * (z - mean_z)
                except:
                    n_z = n_z - 1
            
            if n_z != 0:
                mse_z = sse / n_z
            else:
                mse_z = 0
            
            # Throw away the outliers based on depth using percent error 
            # rather than standard error since depth values can jump
            # dramatically at object boundaries
            for point in keypoints_z:
                try:
                    z = cv2.get2D(self.depth_image, min(self.frame_height - 1, int(point[1])), min(self.frame_width - 1, int(point[0])))
                    z = z[0]
                except:
                    continue
                try:
                    pct_err = abs(z - mean_z) / mean_z
                    if pct_err > self.pct_err_z:
                        keypoints_xy.remove(point)
                        if self.show_add_drop:
                            # Briefly mark the removed points in red
                            cv2.circle(self.marker_image, (point[0], point[1]), 2, (0, 0, 255), cv2.FILLED)  
                except:
                    pass
        else:
            mse_z = -1
        
        self.keypoints = keypoints_xy
               
        # Consider a cluster bad if we have fewer than min_keypoints left
        if len(self.keypoints) < min_keypoints:
            score = -1
        else:
            score = 1

        return ((mean_x, mean_y, mean_z), mse_xy, mse_z, score)
    
if __name__ == '__main__':
    try:
        node_name = "face_tracker"
        FaceTracker(node_name)
        rospy.spin()
    except KeyboardInterrupt:
        print "Shutting down face tracker node."
        cv2.destroyAllWindows()

1.2 launch file

<launch>
   <node pkg="diego_vision" name="face_tracker2" type="face_tracker2.py" output="screen">

   <remap from="input_rgb_image" to="/camera/rgb/image_color" />
   <remap from="input_depth_image" to="/camera/depth/image" />
 
   <rosparam>
       use_depth_for_tracking: True
       min_keypoints: 20
       abs_min_keypoints: 6
       add_keypoint_distance: 10
       std_err_xy: 2.5
       pct_err_z: 0.42
       max_mse: 10000
       add_keypoints_interval: 1
       drop_keypoints_interval: 1
       show_text: True
       show_features: True
       show_add_drop: False
       feature_size: 1
       expand_roi: 1.02
       gf_maxCorners: 200
       gf_qualityLevel: 0.02
       gf_minDistance: 7
       gf_blockSize: 10
       gf_useHarrisDetector: False
       gf_k: 0.04
       haar_scaleFactor: 1.3
       haar_minNeighbors: 3
       haar_minSize: 30
       haar_maxSize: 150
   </rosparam>

   <param name="cascade_1" value="$(find diego_vision)/data/haar_detectors/haarcascade_frontalface_alt2.xml" />
   <param name="cascade_2" value="$(find diego_vision)/data/haar_detectors/haarcascade_frontalface_alt.xml" />
   <param name="cascade_3" value="$(find diego_vision)/data/haar_detectors/haarcascade_profileface.xml" />

</node>
</launch>

In the Diego1 plus use the depth of the camera, so the use_depth_for_tracking parameter can be set to True, the practice shows that the use of depth camera will have a better effect, then we are in the two terminal start openni node, and face_tracker2.py node, there will be video Window, if someone appears in front of the camera  the face will be captured.

roslaunch diego_vision openni_node.launch

roslaucn diego_vision face_tracker2.launch

2.Face tracking

please check object_tracker2.py from github,it support opencv3

The main functions of the ObjectTracker class are as follows:

  • Subscribes to the ROI information, which is published by the FaceTracker class, indicating the location of the captured face, and the ObjectTracker subscribes to this message to determine the position of the face on the screen
  • Subscribe to the / camera / depth / image message to determine the location of the face from the camera to achieve tracking
  • Publish a Twist message to control the movement of the robot

2.1 code

#!/usr/bin/env python

import roslib
import rospy
from sensor_msgs.msg import Image, RegionOfInterest, CameraInfo
from geometry_msgs.msg import Twist
from math import copysign, isnan
from cv_bridge import CvBridge, CvBridgeError
import numpy as np

class ObjectTracker():
    def __init__(self):
        rospy.init_node("object_tracker")
                        
        # Set the shutdown function (stop the robot)
        rospy.on_shutdown(self.shutdown)
        
        # How often should we update the robot's motion?
        self.rate = rospy.get_param("~rate", 10)
        r = rospy.Rate(self.rate) 
        
        # The maximum rotation speed in radians per second
        self.max_rotation_speed = rospy.get_param("~max_rotation_speed", 2.0)
        
        # The minimum rotation speed in radians per second
        self.min_rotation_speed = rospy.get_param("~min_rotation_speed", 0.5)
        
        # The x threshold (% of image width) indicates how far off-center
        # the ROI needs to be in the x-direction before we react
        self.x_threshold = rospy.get_param("~x_threshold", 0.1)
        
        # The maximum distance a target can be from the robot for us to track
        self.max_z = rospy.get_param("~max_z", 2.0)
        
        # Initialize the global ROI
        self.roi = RegionOfInterest()
        
        # The goal distance (in meters) to keep between the robot and the person
        self.goal_z = rospy.get_param("~goal_z", 0.6)
        
        # How far away from the goal distance (in meters) before the robot reacts
        self.z_threshold = rospy.get_param("~z_threshold", 0.05)
        
        # How far away from being centered (x displacement) on the person
        # before the robot reacts
        self.x_threshold = rospy.get_param("~x_threshold", 0.05)
        
        # How much do we weight the goal distance (z) when making a movement
        self.z_scale = rospy.get_param("~z_scale", 1.0)

        # How much do we weight x-displacement of the person when making a movement        
        self.x_scale = rospy.get_param("~x_scale", 2.0)
        
        # The max linear speed in meters per second
        self.max_linear_speed = rospy.get_param("~max_linear_speed", 0.3)
        
        # The minimum linear speed in meters per second
        self.min_linear_speed = rospy.get_param("~min_linear_speed", 0.1)

        # Publisher to control the robot's movement
        self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
        
        # Intialize the movement command
        self.move_cmd = Twist()
        
        # We will get the image width and height from the camera_info topic
        self.image_width = 0
        self.image_height = 0
        
        # We need cv_bridge to convert the ROS depth image to an OpenCV array
        self.cv_bridge = CvBridge()
        self.depth_array = None
        
        # Set flag to indicate when the ROI stops updating
        self.target_visible = False
        
        # Wait for the camera_info topic to become available
        rospy.loginfo("Waiting for camera_info topic...")
        rospy.wait_for_message('camera_info', CameraInfo)
        
        # Subscribe to the camera_info topic to get the image width and height
        rospy.Subscriber('camera_info', CameraInfo, self.get_camera_info)

        # Wait until we actually have the camera data
        while self.image_width == 0 or self.image_height == 0:
            rospy.sleep(1)
                    
        # Subscribe to the registered depth image
        rospy.Subscriber("depth_image", Image, self.convert_depth_image)
        
        # Wait for the depth image to become available
        rospy.wait_for_message('depth_image', Image)
        
        # Subscribe to the ROI topic and set the callback to update the robot's motion
        rospy.Subscriber('roi', RegionOfInterest, self.set_cmd_vel)
        
        # Wait until we have an ROI to follow
        rospy.loginfo("Waiting for an ROI to track...")
        rospy.wait_for_message('roi', RegionOfInterest)
        
        rospy.loginfo("ROI messages detected. Starting tracker...")
        
        # Begin the tracking loop
        while not rospy.is_shutdown():
            # If the target is not visible, stop the robot
            if not self.target_visible:
                self.move_cmd = Twist()
            else:
                # Reset the flag to False by default
                self.target_visible = False
                
            # Send the Twist command to the robot
            self.cmd_vel_pub.publish(self.move_cmd)
            
            # Sleep for 1/self.rate seconds
            r.sleep()

    def set_cmd_vel(self, msg):
        # If the ROI has a width or height of 0, we have lost the target
        if msg.width == 0 or msg.height == 0:
            return
        
        # If the ROI stops updating this next statement will not happen
        self.target_visible = True
        
        self.roi = msg
        
        # Compute the displacement of the ROI from the center of the image
        target_offset_x = msg.x_offset + msg.width / 2 - self.image_width / 2

        try:
            percent_offset_x = float(target_offset_x) / (float(self.image_width) / 2.0)
        except:
            percent_offset_x = 0
            
        # Intialize the movement command
        self.move_cmd = Twist()
        
        # Rotate the robot only if the displacement of the target exceeds the threshold
        if abs(percent_offset_x) > self.x_threshold:
            # Set the rotation speed proportional to the displacement of the target
            try:
                speed = percent_offset_x * self.x_scale
                self.move_cmd.angular.z = -copysign(max(self.min_rotation_speed,
                                            min(self.max_rotation_speed, abs(speed))), speed)
            except:
                pass
            
        # Now compute the depth component
        n_z = sum_z = mean_z = 0
         
        # Get the min/max x and y values from the ROI
        min_x = self.roi.x_offset
        max_x = min_x + self.roi.width
        min_y = self.roi.y_offset
        max_y = min_y + self.roi.height
        
        # Get the average depth value over the ROI
        for x in range(min_x, max_x):
            for y in range(min_y, max_y):
                try:
                    z = self.depth_array[y, x]
                except:
                    continue
                
                # Depth values can be NaN which should be ignored
                if isnan(z) or z > self.max_z:
                    continue
                else:
                    sum_z = sum_z + z
                    n_z += 1   
        try:
            mean_z = sum_z / n_z
            
            if mean_z < self.max_z and (abs(mean_z - self.goal_z) > self.z_threshold):
                speed = (mean_z - self.goal_z) * self.z_scale
                self.move_cmd.linear.x = copysign(min(self.max_linear_speed, max(self.min_linear_speed, abs(speed))), speed)
        except:
            pass
                    
    def convert_depth_image(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        try:
            # The depth image is a single-channel float32 image
            depth_image = self.cv_bridge.imgmsg_to_cv2(ros_image, "32FC1")
        except CvBridgeError, e:
            print e

        # Convert the depth image to a Numpy array
        self.depth_array = np.array(depth_image, dtype=np.float32)

    def get_camera_info(self, msg):
        self.image_width = msg.width
        self.image_height = msg.height

    def shutdown(self):
        rospy.loginfo("Stopping the robot...")
        self.cmd_vel_pub.publish(Twist())
        rospy.sleep(1)     

if __name__ == '__main__':
    try:
        ObjectTracker()
        rospy.spin()
    except rospy.ROSInterruptException:
        rospy.loginfo("Object tracking node terminated.")

2.2 launch文件

<launch>
<param name="/camera/driver/depth_registration" value="True" />

<node pkg="diego_vision" name="object_tracker" type="object_tracker2.py" output="screen">

<remap from="camera_info" to="/camera/depth/camera_info" />
<remap from="depth_image" to="/camera/depth/image" />

<rosparam>
rate: 10
max_z: 2.0
goal_z: 0.6
z_threshold: 0.05
x_threshold: 0.1
z_scale: 1.0
x_scale: 0.1
max_rotation_speed: 0.2
min_rotation_speed: 0.02
max_linear_speed: 0.2
min_linear_speed: 0.05
</rosparam>

</node>
</launch>

In the case of starting the face_tracker2.py node, execute the following code to start the object_tracker2.py node, the robot will follow the detected face movement.

roslaunch diego_vision object_tracker2.launch

Need to be specified in the launch file on the speed of the parameters can not be set too much, too much will lead to the robot kept turning to track the face, because the image processing is lagging, the speed multiplied by the lag time, Will cause the robot to move beyond the expected location, non-stop correction.

Lesson 11: Navigation-Location & planner

In the case of existing maps, the need to allow the robot to locate the location of the map, which requires the use of ROS acml package to achieve, while the release of the target location through the move_base to do the path planning, bypass obstacles to reach destination.

1.Location

1.1 Write acml launch script

<launch>
<master auto="start"/>

<include file="$(find flashgo)/launch/lidar.launch" />

<node name="arduino" pkg="ros_arduino_python" type="arduino_node.py" output="screen">
<rosparam file="$(find ros_arduino_python)/config/my_arduino_params.yaml" command="load" />
</node>

<node pkg="tf" type="static_transform_publisher" name="base_frame_2_laser_link" args="0.0 0.0 0.2 3.14 3.14 0 /base_link /laser 40"/>

<!-- Map server -->
<node name="map_server" pkg="map_server" type="map_server" args="$(find diego_nav)/maps/f4_gmapping.yaml" />

<!-- amcl node -->
<node pkg="amcl" type="amcl" name="amcl" output="screen">
<remap from="scan" to="scan"/>
<!-- Publish scans from best pose at a max of 10 Hz -->
<param name="initial_pose_x" value="0.0"/>
<param name="initial_pose_y" value="0.0"/>
<param name="initial_pose_a" value="0.0"/>
<param name="use_map_topic" value="true"/>
<param name="odom_model_type" value="diff"/>
<param name="odom_alpha5" value="0.1"/>
<param name="transform_tolerance" value="0.5" />
<param name="gui_publish_rate" value="10.0"/>
<param name="laser_max_beams" value="300"/>
<param name="min_particles" value="500"/>
<param name="max_particles" value="5000"/>
<param name="kld_err" value="0.1"/>
<param name="kld_z" value="0.99"/>
<param name="odom_alpha1" value="0.1"/>
<param name="odom_alpha2" value="0.1"/>
<!-- translation std dev, m -->
<param name="odom_alpha3" value="0.1"/>
<param name="odom_alpha4" value="0.1"/>
<param name="laser_z_hit" value="0.9"/>
<param name="laser_z_short" value="0.05"/>
<param name="laser_z_max" value="0.05"/>
<param name="laser_z_rand" value="0.5"/>
<param name="laser_sigma_hit" value="0.2"/>
<param name="laser_lambda_short" value="0.1"/>
<param name="laser_lambda_short" value="0.1"/>
<param name="laser_model_type" value="likelihood_field"/>
<!-- <param name="laser_model_type" value="beam"/> -->
<param name="laser_min_range" value="1"/>
<param name="laser_max_range" value="8"/>
<param name="laser_likelihood_max_dist" value="2.0"/>
<param name="update_min_d" value="0.2"/>
<param name="update_min_a" value="0.5"/>
<param name="resample_interval" value="1"/>
<param name="transform_tolerance" value="0.1"/>
<param name="recovery_alpha_slow" value="0.0"/>
<param name="recovery_alpha_fast" value="0.0"/>
</node>
</launch>

Here is the need to emphasize that the robot must be configured in fact the location, the robot casually put a position, amcl will not be positioned to the need to specify the initial location, the map is the origin of the map with gmapping / hector when the starting point, The file needs to set the initial position, we put the robot here in the starting position, the value is set to 0.0.

<param name="initial_pose_x" value="0.0"/>
<param name="initial_pose_y" value="0.0"/>

1.2 start acml node

roslaunch diego_nav diego_run_gmapping_amcl_flashgo.launch

Open a new terminal and start the keyboard control node

rosrun teleop_twist_keyboard teleop_twist_keyboard.py

At this time we can use the keyboard to control the robot to move up and open rviz to view the positioning

rosin rviz rviz

In this picture we can see a large number of red arrows and Monte Carlo positioning algorithm generated by the distribution of particles, in which the direction of the arrow is the direction of the robot movement

This picture can be seen in the chart more aggregates than the previous chart, from the actual observation of the case of high degree of polymerization more positioning effect.

2.route plan

Path planning and how to allow the robot to move from one location to another location, the way to avoid obstacles, is also the usual sense of the most intuitive navigation, in the ROS path planning is achieved through the move_base package.

2.1 Write the launch_base launch script

<launch>

<!-- move_base node -->
<node pkg="move_base" type="move_base" respawn="false" name="move_base" output="screen">
<rosparam file="$(find diego_nav)/config/costmap_common_params.yaml" command="load" ns="global_costmap" />
<rosparam file="$(find diego_nav)/config/costmap_common_params.yaml" command="load" ns="local_costmap" />
<rosparam file="$(find diego_nav)/config/local_costmap_params.yaml" command="load" />
<rosparam file="$(find diego_nav)/config/global_costmap_params.yaml" command="load" />
<rosparam file="$(find diego_nav)/config/base_local_planner_params.yaml" command="load" />
</node>

</launch>
The contents of the launch file can also be combined with the acml startup script in a file.
The configuration parameters of the move_base node are distributed in four configuration files:
  • costmap_common_params.yaml
  • global_costmap_params.yaml
  • local_costmap_params.yaml
  • base_local_planner_params.yaml

2.2 costmap_common_params.yaml

Cost map general configuration file

obstacle_range: 2.5  #Maximum obstacle detection range
raytrace_range: 3.0. #Detects the maximum range of free space
footprint: [[0.14, 0.14], [0.14, -0.14], [-0.14, 0.14], [-0.14, -0.14]] #The robot is rectangular and sets the area occupied by the machine in the coordinate system
inflation_radius: 0.55 #And the safety factor of the obstacle

observation_sources: laser_scan_sensor #Only focus on the data of the lidar

laser_scan_sensor: {sensor_frame: laser, data_type: LaserScan, topic: scan, marking: true, clearing: true} #Set the relevant parameters of the lidar

 

2.3 global_costmap_params.yaml

Global cost map configuration file

global_costmap: 
 global_frame: /map #the global cost map reference is /map
 robot_base_frame: base_link #base_frame is base_link
 update_frequency: #Specify the map update frequency
 static_map: true 5.0 #Use static maps and initialize them
 transform_tolerance: 0.8 #Set tf update tolerance of 0.8, can be more hardware to adjust the actual situation of this parameter, in the raspberry sent less than 0.8 will continue to report tf release timeout warning message

 

2.4 local_costmap_params.yaml

Local cost map configuration file

local_costmap:
 global_frame: odom #the local cost map reference is odom
 robot_base_frame: base_link #base_frame is base_link
 update_frequency: 5.0 #the map update frequency
 publish_frequency: 2.0 #Cost Map The frequency at which visual information is published
 static_map: false #The local cost map will continue to update the map, so here is set to false
 rolling_window: true #设Set the scroll window so that the robot is always in the center of the form
 width: 4.0 #the width of cost map
 height: 6.0 #the length of cost map
 resolution: 0.05 #Cost map resolution

 

2.5 base_local_planner_params.yaml

Local Planner configuration file

TrajectoryPlannerROS:
 max_vel_x: 0.45 #Maximum speed in the x-axis direction
 min_vel_x: 0.1 #xMinimum speed in the direction of the shaft
 max_vel_theta: 1.0 #Maximum angular velocity
 min_in_place_vel_theta: 0.4

 acc_lim_theta: 3.2 #Maximum angular acceleration
 acc_lim_x: 2.5 #Maximum acceleration in the x-axis direction
 acc_lim_y: 2.5 #Maximum acceleration in the y-axis direction

2.6start move_base node

roslaunch diego_nav diego_run_gmapping_amcl_flashgo.launch
Diego 1 # already has a positioning, navigation function, then we conducted a navigation test

3.Navigation test

Navigation can be placed in the indoor fixed position obstacles, using gmapping or hector to draw a map of obstacles, and then use the above method to start acml and move_base for positioning and navigation。

3.1Navigate the test code

We can modify the navigation test code, change the location to the location on the actual map

#!/usr/bin/env python
import roslib;
import rospy
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from random import sample
from math import pow, sqrt

class NavTest():
def __init__(self):
rospy.init_node('nav_test', anonymous=True)

rospy.on_shutdown(self.shutdown)

# How long in seconds should the robot pause at each location?
self.rest_time = rospy.get_param("~rest_time", 10)

# Are we running in the fake simulator?
self.fake_test = rospy.get_param("~fake_test", False)

# Goal state return values
goal_states = ['PENDING', 'ACTIVE', 'PREEMPTED',
'SUCCEEDED', 'ABORTED', 'REJECTED',
'PREEMPTING', 'RECALLING', 'RECALLED',
'LOST']

# Set up the goal locations. Poses are defined in the map frame.
# An easy way to find the pose coordinates is to point-and-click
# Nav Goals in RViz when running in the simulator.
# Pose coordinates are then displayed in the terminal
# that was used to launch RViz.
locations = dict()

locations['point_0'] = Pose(Point(0.0, 2.0, 0.000), Quaternion(0.000, 0.000, 0.223, 0.975))
locations['point_1'] = Pose(Point(0.5, 2.0, 0.000), Quaternion(0.000, 0.000, -0.670, 0.743))

# Publisher to manually control the robot (e.g. to stop it)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)

# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)

rospy.loginfo("Waiting for move_base action server...")

# Wait 60 seconds for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))

rospy.loginfo("Connected to move base server")

# A variable to hold the initial pose of the robot to be set by
# the user in RViz
initial_pose = PoseWithCovarianceStamped()

# Variables to keep track of success rate, running time,
# and distance traveled
n_locations = len(locations)
n_goals = 0
n_successes = 0
i = n_locations
distance_traveled = 0
start_time = rospy.Time.now()
running_time = 0
location = ""
last_location = ""

# Get the initial pose from the user
#rospy.loginfo("*** Click the 2D Pose Estimate button in RViz to set the robot's initial pose...")
#rospy.wait_for_message('initialpose', PoseWithCovarianceStamped)
#self.last_location = Pose()
#rospy.Subscriber('initialpose', PoseWithCovarianceStamped, self.update_initial_pose)

# Make sure we have the initial pose
#while initial_pose.header.stamp == "":
# rospy.sleep(1)

rospy.loginfo("Starting navigation test")

# Begin the main loop and run through a sequence of locations
while not rospy.is_shutdown():
# If we've gone through the current sequence,
# start with a new random sequence
if i == n_locations:
i = 0
sequence = sample(locations, n_locations)
# Skip over first location if it is the same as
# the last location
if sequence[0] == last_location:
i = 1

# Get the next location in the current sequence
location = sequence[i]

# Keep track of the distance traveled.
# Use updated initial pose if available.
if initial_pose.header.stamp == "":
distance = sqrt(pow(locations[location].position.x -
locations[last_location].position.x, 2) +
pow(locations[location].position.y -
locations[last_location].position.y, 2))
else:
rospy.loginfo("Updating current pose.")
distance = sqrt(pow(locations[location].position.x -
initial_pose.pose.pose.position.x, 2) +
pow(locations[location].position.y -
initial_pose.pose.pose.position.y, 2))
initial_pose.header.stamp = ""

# Store the last location for distance calculations
last_location = location

# Increment the counters
i += 1
n_goals += 1

# Set up the next goal location
self.goal = MoveBaseGoal()
self.goal.target_pose.pose = locations[location]
self.goal.target_pose.header.frame_id = 'map'
self.goal.target_pose.header.stamp = rospy.Time.now()

# Let the user know where the robot is going next
rospy.loginfo("Going to: " + str(location))

# Start the robot toward the next location
self.move_base.send_goal(self.goal)

# Allow 5 minutes to get there
finished_within_time = self.move_base.wait_for_result(rospy.Duration(300))

# Check for success or failure
if not finished_within_time:
self.move_base.cancel_goal()
rospy.loginfo("Timed out achieving goal")
else:
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Goal succeeded!")
n_successes += 1
distance_traveled += distance
rospy.loginfo("State:" + str(state))
else:
rospy.loginfo("Goal failed with error code: " + str(goal_states[state]))

# How long have we been running?
running_time = rospy.Time.now() - start_time
running_time = running_time.secs / 60.0

# Print a summary success/failure, distance traveled and time elapsed
rospy.loginfo("Success so far: " + str(n_successes) + "/" +
str(n_goals) + " = " +
str(100 * n_successes/n_goals) + "%")
rospy.loginfo("Running time: " + str(trunc(running_time, 1)) +
" min Distance: " + str(trunc(distance_traveled, 1)) + " m")
rospy.sleep(self.rest_time)

def update_initial_pose(self, initial_pose):
self.initial_pose = initial_pose

def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.move_base.cancel_goal()
rospy.sleep(2)
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)

def trunc(f, n):
# Truncates/pads a float f to n decimal places without rounding
slen = len('%.*f' % (n, f))
return float(str(f)[:slen])

if __name__ == '__main__':
try:
NavTest()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("AMCL navigation test finished.")

3.2Start navigation

rosrun diego_nav nav_test.py

After the start, the robot will move in accordance with the location set in the navigation test file one by one, and can avoid obstacles in the map, but if it is in a small space, the machine will keep spinning, and no way out, this may Set the parameters related to the need to achieve good results also need to constantly debug parameters.

Lesson 10: Navigation-Create map based on the laser

Construction of the map is the basis of SLAM, positioning and path planning are based on a certain map to achieve, this section we will be based on the laser radar, using gmapping and hector slam two packages to build the map

1.Lidar

Lidar has a very high measurement accuracy characteristics, is a very good choice of distance measurement, but at the same time he also has high power consumption, high cost shortcomings, the current widespread use of 2D laser radar, intelligent measurement of a plane around the distance, and The cost of 3D laser radar is high and can not be widely promoted at present. 2D laser radar can only measure a plane problem, but also affect the use of the scene, if used as navigation, only suitable for some relatively simple rules in the environment.

In the Diego robot, we use the Flash go F4 radar from EAI, which features the following:

• 360 degree omni-directional scanning, 10 Hz adaptive scanning frequency, 4500 laser strokes per second
• Not less than 8 m range of ranging, measuring the resolution of 1% of the range
• Low noise and long life
Class 1 laser safety standard

F4 using USB interface and the host connection, at the same time with power and data transmission function, so the installation is still very convenient, only need to plug in the host’s USB interface

1.1. The installation of the laser radar ROS driver package

Execute the following command

cd ~/catkin_ws/src
git clone https://github.com/EAIBOT/flashgo.git
cd ..
catkin_make

After the completion of the implementation of the src directory to add a flashgo directory, is the F4 ROS package

 

 

 

 

 

 

 

 

 

1.2. Modify the serial number in the configuration file

F4 USB and motherboard connection, and raspberry connection, the system will appear in a serial port, because Arduino UNO is also connected through the USB serial communication, so there will be two serial devices in the system, the configuration must be clearly distinguish The serial number of the two devices


As shown above ttyACM0, and ttyACM1 is the corresponding serial port number, in the diego1 # ttyACM1 corresponds to arduino, ttyACM0 corresponding F4 laser radar, we need to modify the corresponding parameters, first modify the Ardunio configuration file, open the config directory my_arduino_params.yaml

 

 

 

 

 

Modify the arduino_bridge connection to the serial port for ttyACM1

 

 

 

 

 

 

Modify the F4 configuration file, open the flashar / launch directory under the lidar.launch file

 

 

 

 

 

Modify the F4 connection to the serial port ttyACM0

 

 

 

 

 

 

2.Use hector slam to build a map

2.1Install hector slam

sudo apt-get install ros-kinetic-hector-slam

2.2Write the hector slam to start the launch file

The following code is written hector start launch the file, here need to pay attention to the point of the EAI Flash lidar using the left hand coordinate system, and ROS use the right hand coordinate system, so the need to prepare a file in the static tf type base_frame_2_laser coordinate conversion.

<?xml version="1.0"?>

<launch>
  <arg name="tf_map_scanmatch_transform_frame_name" default="/scanmatcher_frame"/>
  <arg name="pub_map_odom_transform" value="true"/> 
  <arg name="map_frame" value="map"/> 
  <arg name="base_frame" value="base_link"/> 
  <arg name="odom_frame" value="base_link"/>
  <arg name="scan_subscriber_queue_size" default="5"/>
  <arg name="scan_topic" default="scan"/>
  <arg name="map_size" default="2048"/>

  <master auto="start"/>

  <include file="$(find flashgo)/launch/lidar.launch" />

  <node name="arduino" pkg="ros_arduino_python" type="arduino_node.py" output="screen">
      <rosparam file="$(find ros_arduino_python)/config/my_arduino_params.yaml" command="load" />
  </node> 

  <node pkg="tf" type="static_transform_publisher" name="base_frame_2_laser" args="0 0 0 0 0 0 /$(arg base_frame) /laser 100"/>  
  <node pkg="tf" type="static_transform_publisher" name="map_2_odom" args="0.0 0.0 0.0 0 0 0.0 /odom /$(arg base_frame) 10"/>


  <node pkg="hector_mapping" type="hector_mapping" name="hector_mapping" output="screen">   
  <!-- Frame names -->
  <param name="map_frame" value="$(arg map_frame)" />
  <param name="base_frame" value="$(arg base_frame)" />
  <param name="odom_frame" value="$(arg base_frame)" />   
   <!-- Tf use -->
  <param name="use_tf_scan_transformation" value="true"/>
  <param name="use_tf_pose_start_estimate" value="false"/>
  <param name="pub_map_odom_transform" value="$(arg pub_map_odom_transform)"/> 
  <!-- Map size / start point -->
  <param name="map_resolution" value="0.050"/>
  <param name="map_size" value="$(arg map_size)"/>
  <param name="map_start_x" value="0.5"/>
  <param name="map_start_y" value="0.5" />
  <param name="map_multi_res_levels" value="2" />
  <!-- Map update parameters -->
  <param name="update_factor_free" value="0.4"/>
  <param name="update_factor_occupied" value="0.7" />   
  <param name="map_update_distance_thresh" value="0.2"/>
  <param name="map_update_angle_thresh" value="0.9" />
  <param name="laser_z_min_value" value = "-1.0" />
  <param name="laser_z_max_value" value = "1.0" />  
  <!-- Advertising config -->
  <param name="advertise_map_service" value="true"/>
  <param name="scan_subscriber_queue_size" value="$(arg scan_subscriber_queue_size)"/>
  <param name="scan_topic" value="$(arg scan_topic)"/>
  <param name="tf_map_scanmatch_transform_frame_name" value="$(arg tf_map_scanmatch_transform_frame_name)" />
  </node>

  <include file="$(find hector_geotiff)/launch/geotiff_mapper.launch"/> 

</launch>

2.3 start hector slam

Now you can execute the following code to start the lidar

roslaunch diego_nav diego_run_hector_flashgo.launch

At this time we can control through the keyboard diego # in the room movement, start rviz can draw the corresponding map, in different terminals in the implementation of the following order
rosrun teleop_twist_keyboard teleop_twist_keyboard.py
rosrun rviz rviz

3.Use gmapping to map

3.1Install the ros navigation package

sudo apt-get install ros-kinetic-navigation

gmapping,acml,move_base will be installed after the implementation

3.2Write the gmapping package to launch the launch file

<launch>
<master auto="start"/>

<include file="$(find flashgo)/launch/lidar.launch" />

<node name="arduino" pkg="ros_arduino_python" type="arduino_node.py" output="screen">
<rosparam file="$(find ros_arduino_python)/config/my_arduino_params.yaml" command="load" />
</node>

<node pkg="tf" type="static_transform_publisher" name="base_frame_2_laser_link" args="0.0 0.0 0.2 3.14 3.14 0 /base_link /laser 40"/>

<!-- gmapping node -->
<node pkg="gmapping" type="slam_gmapping" name="slam_gmapping">
<param name="base_frame" value="base_link"/>
<param name="odom_frame" value="odom"/>
<param name="maxUrange" value="4.0"/>
<param name="maxRange" value="5.0"/>
<param name="sigma" value="0.05"/>
<param name="kernelSize" value="3"/>
<param name="lstep" value="0.05"/>
<param name="astep" value="0.05"/>
<param name="iterations" value="5"/>
<param name="lsigma" value="0.075"/>
<param name="ogain" value="3.0"/>
<param name="lskip" value="0"/>
<param name="minimumScore" value="30"/>
<param name="srr" value="0.01"/>
<param name="srt" value="0.02"/>
<param name="str" value="0.01"/>
<param name="stt" value="0.02"/>
<param name="linearUpdate" value="0.05"/>
<param name="angularUpdate" value="0.0436"/>
<param name="temporalUpdate" value="-1.0"/>
<param name="resampleThreshold" value="0.5"/>
<param name="particles" value="8"/>
<!--
<param name="xmin" value="-50.0"/>
<param name="ymin" value="-50.0"/>
<param name="xmax" value="50.0"/>
<param name="ymax" value="50.0"/>
make the starting size small for the benefit of the Android client's memory...
-->
<param name="xmin" value="-1.0"/>
<param name="ymin" value="-1.0"/>
<param name="xmax" value="1.0"/>
<param name="ymax" value="1.0"/>

<param name="delta" value="0.05"/>
<param name="llsamplerange" value="0.01"/>
<param name="llsamplestep" value="0.01"/>
<param name="lasamplerange" value="0.005"/>
<param name="lasamplestep" value="0.005"/>
</node>
</launch>

3.3 start gmapping

Now you can execute the following code to start the lidar

roslaunch diego_nav diego_run_gmapping_flashgo.launch

At this time we can control through the keyboard diego # in the room movement, start rviz can draw the corresponding map, in different terminals in the implementation of the following order
rosrun teleop_twist_keyboard teleop_twist_keyboard.py
rosrun rviz rviz

4.Generate map files

Whether you use hector or gmapping to create a map dynamically, we can generate a map file by command

4.1.Create the maps folder

The map file needs to give it permission to 777, allowing map_server to generate a map file in this folder

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

4.2.Generate map files

Open the terminal cd into the maps directory and execute the map generation command

cd ~/catkin_ws/src/diego_nav/maps/

rosrun map_server map_saver -f f4_gmapping

 

Lesson 9: Navigation-Diego 1# navigation framework

SLAM navigation is a complicated topic, which is related to many mathematics model and algorithm. While in ROS platform, these models and algorithms have been implemented and encapsulated as a function package within ROS architecture, so in diego 1#, we use ROS navigation framework.

1. ROS navigation framework

Following figure is navigation framework illustrated by ROS, you can find more detail from http://wiki.ros.org/navigation/Tutorials/RobotSetup

 

In the navigation stack, function packages in white and grey have been encapsulated in ROS, while function packages in blue require customized development based on hardware platform.

Though both gmapping and hector can be used to construct map, they have different algorithm, that is, gmapping relies on odom data while hector not when constructing map. You can choose one of them for application.

2. Diego 1# related function package

Following figure shows function packages and corresponding hardware of Diego 1#.

ROS导航功能软件包硬件资源说明
move_basemove_base.树莓派
map_servergmapping
. hector
. 树莓派
. 激光雷达
. xtion
激光雷达和xtion深度相机,可以二选其一
acmlacml. 树莓派
. 激光雷达
. xtion
激光雷达和xtion深度相机,可以二选其一
base_controllerros_arduino_bridge.Arduino UNOros_arduino_bridge包包含了base_controller
odometer sourceros_arduino_bridge.Arduino UNO
.霍尔编码器
ros_arduino_firmware获取霍尔编码器数据,在ros_arduino_bridge中经过计算发布odom消息
sensor transform. 树莓派tf基本上都是静态的,可以在launch文件中实现
sensor sourcerplidar A2 Driver
OpenNI
. 树莓派
. 激光雷达
. xtion
rplidar激光雷达本身提供laser数据发布的驱动包
如果用深度相机,可以使用OpenNI包发布laser数据

Lesson 7: Move control-calibrate linear

After above steps, robot can work but lacking of precise control. To fulfill ideal control precision, calibration is required, including linear velocity calibration for moving distance and angle velocity calibration for rotation angle.

1. Linear velocity calibration principle and code

Linear velocity calibration is fulfilled via checking the difference between robot move distance and control command required move distance. Considering of hardware precision, the error in allowable range can be accepted

Following code control robot moving forward 1m distance.

#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, Point
from math import copysign, sqrt, pow
import tf
class CalibrateLinear():
    def __init__(self):
        # Give the node a name
        rospy.init_node('calibrate_linear', anonymous=False)

        # Set rospy to execute a shutdown function when terminating the script
        rospy.on_shutdown(self.shutdown)

        # How fast will we check the odometry values?
        self.rate = 10
        r = rospy.Rate(self.rate)

        # Set the distance to travel
        self.test_distance = 1.0 # meters
        self.speed = 1.0 # meters per second
        self.tolerance = 0.01 # meters
        self.odom_linear_scale_correction = 1.0
        self.start_test = True

        # Publisher to control the robot's speed
        self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=5)

        # The base frame is base_footprint for the TurtleBot but base_link for Pi Robot
        self.base_frame = rospy.get_param('~base_frame', '/base_link')

        # The odom frame is usually just /odom
        self.odom_frame = rospy.get_param('~odom_frame', '/odom')

        # Initialize the tf listener
        self.tf_listener = tf.TransformListener()

        # Give tf some time to fill its buffer
        rospy.sleep(2)

        # Make sure we see the odom and base frames
        self.tf_listener.waitForTransform(self.odom_frame, self.base_frame, rospy.Time(), rospy.Duration(60.0))

        rospy.loginfo("Bring up rqt_reconfigure to control the test.")

        self.position = Point()

        # Get the starting position from the tf transform between the odom and base frames
        self.position = self.get_position()

        x_start = self.position.x
        y_start = self.position.y

        move_cmd = Twist()

        while not rospy.is_shutdown():
            # Stop the robot by default
            move_cmd = Twist()

            if self.start_test:
                # Get the current position from the tf transform between the odom and base frames
                self.position = self.get_position()

                # Compute the Euclidean distance from the target point
                distance = sqrt(pow((self.position.x - x_start), 2) +
                                pow((self.position.y - y_start), 2))

                # Correct the estimated distance by the correction factor
                distance *= self.odom_linear_scale_correction
                # How close are we?
                error =  distance - self.test_distance

                # Are we close enough?
                if not self.start_test or abs(error) <  self.tolerance:
                    self.start_test = False
                    params = False
                    rospy.loginfo(params)
                else:
                    # If not, move in the appropriate direction
                    move_cmd.linear.x = copysign(self.speed, -1 * error)
            else:
                self.position = self.get_position()
                x_start = self.position.x
                y_start = self.position.y

            self.cmd_vel.publish(move_cmd)
            r.sleep()

        # Stop the robot
        self.cmd_vel.publish(Twist())

    def get_position(self):
        # Get the current transform between the odom and base frames
        try:
            (trans, rot)  = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
        except (tf.Exception, tf.ConnectivityException, tf.LookupException):
            rospy.loginfo("TF Exception")
            return

        return Point(*trans)

    def shutdown(self):
        # Always stop the robot when shutting down the node
        rospy.loginfo("Stopping the robot...")
        self.cmd_vel.publish(Twist())
        rospy.sleep(1)

if __name__ == '__main__':
    try:
        CalibrateLinear()
        rospy.spin()
    except:
        rospy.loginfo("Calibration terminated.")

2. Code description

First importing necessary package

import rospy
from geometry_msgs.msg import Twist, Point
from math import copysign, sqrt, pow
import tf

Initiating node and setting command sending frequency based on ROS standard initiation code


class CalibrateLinear():
    def __init__(self):
        # Give the node a name
        rospy.init_node('calibrate_linear', anonymous=False)

        # Set rospy to execute a shutdown function when terminating the script
        rospy.on_shutdown(self.shutdown)

        # How fast will we check the odometry values?
        self.rate = 10
        r = rospy.Rate(self.rate)

Setting calibration parameter

test_distance:Moving forward distance

speed:Moving forward velocity

tolerance:precision range

odom_linear_scale_correction:odom data scale

start_test:test ON/OFF switch

       # Set the distance to travel
        self.test_distance = 1.0 # meters
        self.speed = 1.0 # meters per second
        self.tolerance = 0.01 # meters
        self.odom_linear_scale_correction = 1.0
        self.start_test = True

Publishing Twist message to control robot move

        # Publisher to control the robot's speed
        self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=5)

Initiating base_frame, odom_frame, tf listener

        # The base frame is base_footprint for the TurtleBot but base_link for Pi Robot
        self.base_frame = rospy.get_param('~base_frame', '/base_link')

        # The odom frame is usually just /odom
        self.odom_frame = rospy.get_param('~odom_frame', '/odom')

        # Initialize the tf listener
        self.tf_listener = tf.TransformListener()

        # Give tf some time to fill its buffer
        rospy.sleep(2)

        # Make sure we see the odom and base frames
        self.tf_listener.waitForTransform(self.odom_frame, self.base_frame, rospy.Time(), rospy.Duration(60.0))

        rospy.loginfo("Bring up rqt_reconfigure to control the test.")

Initiating position

        self.position = Point()

        # Get the starting position from the tf transform between the odom and base frames
        self.position = self.get_position()

        x_start = self.position.x
        y_start = self.position.y

        move_cmd = Twist()

While loop is used to control robot move forward 1m as shown in following code


        while not rospy.is_shutdown():
            # Stop the robot by default
            move_cmd = Twist()

            if self.start_test:
                # Get the current position from the tf transform between the odom and base frames
                self.position = self.get_position() #Getting current location information

                #  Calculating the distance between current location and initial location
                distance = sqrt(pow((self.position.x - x_start), 2) +
                                pow((self.position.y - y_start), 2))

                # Correct the estimated distance by the correction factor
                distance *= self.odom_linear_scale_correction

                # Calculating the distance to target location
                error =  distance - self.test_distance

                # Are we close enough?
                if not self.start_test or abs(error) <  self.tolerance: #When arriving target location, stop robot moving
                    self.start_test = False
                    params = False
                    rospy.loginfo(params)
                else:
                    # If still not arriving target location, keep moving forward; if exceeding target location, control motor reversion
                    move_cmd.linear.x = copysign(self.speed, -1 * error)
            else:
                self.position = self.get_position()
                x_start = self.position.x
                y_start = self.position.y

            self.cmd_vel.publish(move_cmd)#Publishing Twist message
            r.sleep()

Getting current location based on tf data

    def get_position(self):
        # Get the current transform between the odom and base frames
        try:
            (trans, rot)  = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
        except (tf.Exception, tf.ConnectivityException, tf.LookupException):
            rospy.loginfo("TF Exception")
            return

        return Point(*trans)

Stopping node

    def shutdown(self):
        # Always stop the robot when shutting down the node
        rospy.loginfo("Stopping the robot...")
        self.cmd_vel.publish(Twist())
        rospy.sleep(1)

3.Calibration

With above calibrated node, we can calibrate robot linear velocity. First, finding a flat space for moving forward 1m, putting robot at starting point, then executing following command to check whether robot can move forward 1m

rosrun diego_nav calibrate_linear.py 

It’s ideal if robot exactly moves forward 1m, if not, double checking robot related parameters in my_arduino_params.yaml file, also, keeping in mind that the unit is meter in ROS.

# === Robot drivetrain parameters
wheel_diameter: 0.02900
wheel_track: 0.18
encoder_resolution: 2 # from Pololu for 131:1 motors
gear_reduction: 75.0
motors_reversed: True

If parameters of configuration file is the same as real data but robot cannot move forward as required precision, it may be related to motor performance, my_arduino_params.yaml parameter can be adjusted for precision compensation.

For high precision control, high precision hardware is required.

Lesson 6: Move control-PID Control for 2 motor

Ros_arduino_bridge the base controller in the PID control of the two motors is a set of PID parameters, but the actual use, due to the characteristics of the motor, terrain, or robot load balance of the many problems so that the robot can not follow the scheduled trajectory , The simplest is whether the robot can go straight, which requires the two motors were PID speed, this section will be modified ros_arduino_bridge to support the two motor with different PID parameters speed

1.First modify the arduino code

1.1.diff_controller.h
Increase the PID control variables of the left and right motors:

/* PID Parameters */
int Kp = 20;
int Kd = 12;
int Ki = 0;
int Ko = 50;

int left_Kp=Kp;
int left_Kd=Kd;
int left_Ki=Ki;
int left_Ko=Ko;

int right_Kp=Kp;
int right_Kd=Kd;
int right_Ki=Ki;
int right_Ko=Ko;

modify resetPID function

void resetPID(){
 leftPID.TargetTicksPerFrame = 0.0;
 leftPID.Encoder = readEncoder(LEFT);
 leftPID.PrevEnc = leftPID.Encoder;
 leftPID.output = 0;
 leftPID.PrevInput = 0;
 leftPID.ITerm = 0;

 rightPID.TargetTicksPerFrame = 0.0;
 rightPID.Encoder = readEncoder(RIGHT);
 rightPID.PrevEnc = rightPID.Encoder;
 rightPID.output = 0;
 rightPID.PrevInput = 0;
 rightPID.ITerm = 0;
}

Define the dorightPID () and doleftPID () functions of the left and right motors, respectively

/* PID routine to compute the next motor commands */
void dorightID(SetPointInfo * p) {
  long Perror;
  long output;
  int input;

  //Perror = p->TargetTicksPerFrame - (p->Encoder - p->PrevEnc);
  input = p->Encoder - p->PrevEnc;
  Perror = p->TargetTicksPerFrame - input;

  /*
  * Avoid derivative kick and allow tuning changes,
  * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-derivative-kick/
  * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-tuning-changes/
  */
  //output = (Kp * Perror + Kd * (Perror - p->PrevErr) + Ki * p->Ierror) / Ko;
  // p->PrevErr = Perror;
  output = (right_Kp * Perror - right_Kd * (input - p->PrevInput) + p->ITerm) / right_Ko;
  p->PrevEnc = p->Encoder;

  output += p->output;
  // Accumulate Integral error *or* Limit output.
  // Stop accumulating when output saturates
  if (output >= MAX_PWM)
    output = MAX_PWM;
  else if (output <= -MAX_PWM)
    output = -MAX_PWM;
  else
  /*
  * allow turning changes, see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-tuning-changes/
  */
    p->ITerm += Ki * Perror;

  p->output = output;
  p->PrevInput = input;
}

/* PID routine to compute the next motor commands */
void doleftPID(SetPointInfo * p) {
  long Perror;
  long output;
  int input;

  //Perror = p->TargetTicksPerFrame - (p->Encoder - p->PrevEnc);
  input = p->Encoder - p->PrevEnc;
  Perror =p->TargetTicksPerFrame + input;

  /*
  * Avoid derivative kick and allow tuning changes,
  * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-derivative-kick/
  * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-tuning-changes/
  */
  //output = (Kp * Perror + Kd * (Perror - p->PrevErr) + Ki * p->Ierror) / Ko;
  // p->PrevErr = Perror;
  output = (left_Kp * Perror - left_Kd * (input - p->PrevInput) + p->ITerm) / left_Ko;
  p->PrevEnc = p->Encoder;

  output += p->output;
  // Accumulate Integral error *or* Limit output.
  // Stop accumulating when output saturates
  if (output >= MAX_PWM)
    output = MAX_PWM;
  else if (output <= -MAX_PWM)
    output = -MAX_PWM;
  else
  /*
  * allow turning changes, see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-tuning-changes/
  */
    p->ITerm += Ki * Perror;

  p->output = output;
  p->PrevInput = input;
}

modify updatePID() function

void updatePID() {
  /* Read the encoders */
  leftPID.Encoder =readEncoder(LEFT);
  rightPID.Encoder = readEncoder(RIGHT);

  /* If we're not moving there is nothing more to do */
  if (!moving){
    /*
    * Reset PIDs once, to prevent startup spikes,
    * see http://brettbeauregard.com/blog/2011/04/improving-the-beginner%E2%80%99s-pid-initialization/
    * PrevInput is considered a good proxy to detect
    * whether reset has already happened
    */
    if (leftPID.PrevInput != 0 || rightPID.PrevInput != 0) resetPID();
    return;
  }

  /* Compute PID update for each motor */
  dorightID(&rightPID);
  doleftPID(&leftPID);

  /* Set the motor speeds accordingly */
  setMotorSpeeds(leftPID.output, rightPID.output);

}

1.2. encoder_driver.ino

Since the direction of rotation of the left and right motors is reversed, there will be an increase in the reading of the encoder, but when we actually control, the direction of travel of the two motors is the same, which affects the PID calculation. Need to reverse the encoder on the left side of the motor read, modify the readEncoder function.

 /* Wrap the encoder reading function */
 long readEncoder(int i) {
 if (i == LEFT) return 0-left_enc_pos;
 else return right_enc_pos;
 }

1.3.ROSArduinoBridge.ino

Modify the declaration of pid_args to the following code

int pid_args[8];

Modify the runCommand () function, modify the case UPDATE_PID, the original Kp, Kd, Ki, Ko assignment code comment out, modify the following code

    case UPDATE_PID:
      while ((str = strtok_r(p, ":", &p)) != '\0') {
        pid_args[i] = atoi(str);
        i++;
      }
//      Kp = pid_args[0];
//      Kd = pid_args[1];
//      Ki = pid_args[2];
//      Ko = pid_args[3];

      left_Kp = pid_args[0];
      left_Kd = pid_args[1];
      left_Ki = pid_args[2];
      left_Ko = pid_args[3];

      right_Kp = pid_args[4];
      right_Kd = pid_args[5];
      right_Ki = pid_args[6];
      right_Ko = pid_args[7];
      Serial.println("OK");
      break;

Now arduino side has been supported on the two motors using different PID parameters of the speed control

1.4ROSArduinoBridge.ino

Modify the file argv1 argv2 two parameters of the definition of its array length expansion, because these two parameters used to receive the host computer to send the command, we increased the pid parameters, making the command length exceeds the original two parameters length.
// Character arrays to hold the first and second arguments
char argv1[32];
char argv2[32];

2.Modify ROS package code

2.1.my_arduino_params.yaml
Increase the PID parameters of the left and right motors in the PID parameter section

# === PID parameters
Kp: 10
Kd: 12
Ki: 0
Ko: 50
accel_limit: 1.0

left_Kp: 10
left_Kd: 12
left_Ki: 0
left_Ko: 50

right_Kp: 8
right_Kd: 12
right_Ki: 0

2.2.arduino_driver.py

modify update_pid function

def update_pid(self, left_Kp, left_Kd, left_Ki, left_Ko, right_Kp, right_Kd, right_Ki, right_Ko):
        ''' Set the PID parameters on the Arduino
        '''
        print "Updating PID parameters"
        cmd = 'u ' + str(left_Kp) + ':' + str(left_Kd) + ':' + str(left_Ki) + ':' + str(left_Ko) + ':' + str(right_Kp) + ':' + str(right_Kd) + ':' + str(right_Ki) + ':' + str(right_Ko)
        self.execute_ack(cmd)  

2.3.base_controller.py

modify def init(self, arduino, base_frame),Increase the initialization code of the left and right motor PID parameters

def __init__(self, arduino, base_frame):
        self.arduino = arduino
        self.base_frame = base_frame
        self.rate = float(rospy.get_param("~base_controller_rate", 10))
        self.timeout = rospy.get_param("~base_controller_timeout", 1.0)
        self.stopped = False

        pid_params = dict()
        pid_params['wheel_diameter'] = rospy.get_param("~wheel_diameter", "") 
        pid_params['wheel_track'] = rospy.get_param("~wheel_track", "")
        pid_params['encoder_resolution'] = rospy.get_param("~encoder_resolution", "") 
        pid_params['gear_reduction'] = rospy.get_param("~gear_reduction", 1.0)

        #modify by william 
        pid_params['left_Kp'] = rospy.get_param("~left_Kp", 20)
        pid_params['left_Kd'] = rospy.get_param("~left_Kd", 12)
        pid_params['left_Ki'] = rospy.get_param("~left_Ki", 0)
        pid_params['left_Ko'] = rospy.get_param("~left_Ko", 50)
        pid_params['right_Kp'] = rospy.get_param("~right_Kp", 20)
        pid_params['right_Kd'] = rospy.get_param("~right_Kd", 12)
        pid_params['right_Ki'] = rospy.get_param("~right_Ki", 0)
        pid_params['right_Ko'] = rospy.get_param("~right_Ko", 50)

        self.accel_limit = rospy.get_param('~accel_limit', 0.1)
        self.motors_reversed = rospy.get_param("~motors_reversed", False)

        # Set up PID parameters and check for missing values
        self.setup_pid(pid_params)

        # How many encoder ticks are there per meter?
        self.ticks_per_meter = self.encoder_resolution * self.gear_reduction  / (self.wheel_diameter * pi)

        # What is the maximum acceleration we will tolerate when changing wheel speeds?
        self.max_accel = self.accel_limit * self.ticks_per_meter / self.rate

        # Track how often we get a bad encoder count (if any)
        self.bad_encoder_count = 0

        now = rospy.Time.now()    
        self.then = now # time for determining dx/dy
        self.t_delta = rospy.Duration(1.0 / self.rate)
        self.t_next = now + self.t_delta

        # internal data        
        self.enc_left = None            # encoder readings
        self.enc_right = None
        self.x = 0                      # position in xy plane
        self.y = 0
        self.th = 0                     # rotation in radians
        self.v_left = 0
        self.v_right = 0
        self.v_des_left = 0             # cmd_vel setpoint
        self.v_des_right = 0
        self.last_cmd_vel = now

        # subscriptions
        rospy.Subscriber("cmd_vel", Twist, self.cmdVelCallback)

        # Clear any old odometry info
        self.arduino.reset_encoders()

        # Set up the odometry broadcaster
        self.odomPub = rospy.Publisher('odom', Odometry)
        self.odomBroadcaster = TransformBroadcaster()

        rospy.loginfo("Started base controller for a base of " + str(self.wheel_track) + "m wide with " + str(self.encoder_resolution) + " ticks per rev")
        rospy.loginfo("Publishing odometry data at: " + str(self.rate) + " Hz using " + str(self.base_frame) + " as base frame")

修改def setup_pid(self, pid_params):

def setup_pid(self, pid_params):
        # Check to see if any PID parameters are missing
        missing_params = False
        for param in pid_params:
            if pid_params[param] == "":
                print("*** PID Parameter " + param + " is missing. ***")
                missing_params = True

        if missing_params:
            os._exit(1)

        self.wheel_diameter = pid_params['wheel_diameter']
        self.wheel_track = pid_params['wheel_track']
        self.encoder_resolution = pid_params['encoder_resolution']
        self.gear_reduction = pid_params['gear_reduction']

        #self.Kp = pid_params['Kp']
        #self.Kd = pid_params['Kd']
        #self.Ki = pid_params['Ki']
        #self.Ko = pid_params['Ko']

        #self.arduino.update_pid(self.Kp, self.Kd, self.Ki, self.Ko)

        #modify by william
        self.left_Kp = pid_params['left_Kp']
        self.left_Kd = pid_params['left_Kd']
        self.left_Ki = pid_params['left_Ki']
        self.left_Ko = pid_params['left_Ko']

        self.right_Kp = pid_params['right_Kp']
        self.right_Kd = pid_params['right_Kd']
        self.right_Ki = pid_params['right_Ki']
        self.right_Ko = pid_params['right_Ko']

        #Pass the 8 parameters to the update_pid function
        self.arduino.update_pid(self.left_Kp, self.left_Kd, self.left_Ki, self.left_Ko, self.right_Kp, self.right_Kd, self.right_Ki, self.right_Ko)

After modifying the above code ros_arduino_bridge can be set on the two different PID parameters of the motor speed control.

3.PID Speed control

Two motor PID speed is more trouble, need to constantly modify the parameters of the corresponding parameters, so that the speed of the two motors can be consistent, the author’s experience is to debug a good side of the motor, in this motor as the subject matter, adjust the other A motor PID parameters, of course, the control accuracy is also limited by the motor performance and encoder accuracy, in order to facilitate the PID speed here we rqt_plot graphics curve to the two motor speed, PID input and output display.

3.1. base_controller.py

modify def init(self, arduino, base_frame),Increase the self.debugPID parameter at the beginning, debugPID = true, start debugging, debugPID = false, turn off debugging

class BaseController:
     def __init__(self, arduino, base_frame):
     self.arduino = arduino
     self.base_frame = base_frame
     self.rate = float(rospy.get_param("~base_controller_rate", 10))
     self.timeout = rospy.get_param("~base_controller_timeout", 1.0)
     self.stopped = False
     self.debugPID=False

modify def init(self, arduino, base_frame),Add the following code at the end of the function

if self.debugPID:
     self.lEncoderPub = rospy.Publisher('Lencoder', Int32)
     self.rEncoderPub = rospy.Publisher('Rencoder', Int32)
     self.lPidoutPub = rospy.Publisher('Lpidout', Int32)
     self.rPidoutPub = rospy.Publisher('Rpidout', Int32)
     self.lVelPub = rospy.Publisher('Lvel', Int32)
     self.rVelPub = rospy.Publisher('Rvel', Int32)

modify def poll(self),Add the following code to the function header

if self.debugPID:
     rospy.logdebug("poll start-------------------------------: ")
     try:
         left_pidin, right_pidin = self.arduino.get_pidin()
         self.lEncoderPub.publish(left_pidin)
         self.rEncoderPub.publish(right_pidin)
         rospy.logdebug("left_pidin: "+str(left_pidin))
         rospy.logdebug("right_pidin: "+str(right_pidin))
     except:
         rospy.logerr("getpidin exception count: ")
         return

     try:
         left_pidout, right_pidout = self.arduino.get_pidout()
         self.lPidoutPub.publish(left_pidout)
         self.rPidoutPub.publish(right_pidout)
         rospy.logdebug("left_pidout: "+str(left_pidout))
         rospy.logdebug("right_pidout: "+str(right_pidout))
     except:
         rospy.logerr("getpidout exception count: ")
         return

modify def poll(self),Add the following code after if not self.stopped:

if self.debugPID:
     self.lVelPub.publish(self.v_left)
     self.rVelPub.publish(self.v_right)

3.2. arduino_driver.py

Add get_pidin and get_pidout

def get_pidin(self):
    values = self.execute_array('i')
    if len(values) != 2:
         print "pidin was not 2"
         raise SerialException
         return None
    else:
         return values

def get_pidout(self):
    values = self.execute_array('f')
    if len(values) != 2:
         print "pidout was not 2"
         raise SerialException
         return None
    else:
         return values

3.3. Arduino 的commands.h

and command pidin and pidout

#define READ_PIDOUT    'f'
#define READ_PIDIN     'i'

 

3.4. Arduino 的diff_controller.h

Add readPidIn and readPidOut functions at the end

long readPidIn(int i) {
  long pidin=0;
    if (i == LEFT){
    pidin = leftPID.PrevInput;
  }else {
    pidin = rightPID.PrevInput;
  }
  return pidin;
}

long readPidOut(int i) {
  long pidout=0;
    if (i == LEFT){
    pidout = leftPID.output;
  }else {
    pidout = rightPID.output;
  }
  return pidout;
}

 

3.5. Arduino ROSArduinoBridge.ino

modify runcammand(),Add case READ_PIDIN: and case READ_PIDOUT in the switch section:

  switch (cmd) {
    case GET_BAUDRATE:
      Serial.println(BAUDRATE);
      break;
    case READ_PIDIN:
      Serial.print(readPidIn(LEFT));
      Serial.print(" ");
      Serial.println(readPidIn(RIGHT));
      break;
    case READ_PIDOUT:
      Serial.print(readPidOut(LEFT));
      Serial.print(" ");
      Serial.println(readPidOut(RIGHT));
      break;
     ...

3.6. execute PID speed control

Execute the following commands at different terminals
start roscore

roscore

add path to bash

. ~/catkin_ws/devel/setup.bash

start node

roslaunch ros_arduino_python arduino.launch

At this time we can release the Twist message to control the robot’s operation, such as:

rostopic pub /cmd_vel geometry_msgs/Twist -r 1 -- '[0.5, 0.0, 0.0]' '[0.0, 0.0, 0.0]'

start rqt_plot

rqt_plot

For the use of rqt_plot, see the official documentationhttp://wiki.ros.org/rqt_plot
This time you can rqt_plot provided by the graphical interface, according to the following PID speed formula to debug,

Parameter tuning to find the best, from small to large order check
First after the proportion of points, and finally add the differential plus
Curve oscillation is very frequent, proportional dial to enlarge
Curve floating around the big bay, the proportion of the plate to a small pull
The curve deviates slowly and the integration time goes down
Curve fluctuation cycle is long and the integration time is lengthened
Curve oscillation frequency fast, first differential down
Moving to big slow fluctuations. The derivative time should be longer
Ideal curve of two waves, the former high to low 4 to 1
A look at the two tone more analysis, the quality of regulation will not be low.

Scroll to top