ROS+科大讯飞语音识别控制机器人

科大讯飞语音识别控制实际机器人运动。

本文将ros与语音识别想结合进行开发。进行以下步骤,
1、创作ros工作空间
2、安装mpalyer播放器

sudo apt-get install mplayer

3、将讯非的语音库动态文件.so文件放到/usr/lib/下
4、ros工程目录src下新建文件xf_asr.cpp 并将以下内容复制进去,appid改成你自己的(官网申请)。

/*
* 语音听写(iFly Auto Transform)技术能够实时地将语音转换成对应的文字。
*/
 
#include 
#include 
#include 
#include 
#include "qisr.h"
#include "msp_cmn.h"
#include "msp_errors.h"
#include "speech_recognizer.h"
#include 
 
#include "ros/ros.h"
#include "std_msgs/String.h"
 
#define FRAME_LEN   640 
#define BUFFER_SIZE 4096
 
int wakeupFlag   = 0 ;
int resultFlag   = 0 ;
 
static void show_result(char *string, char is_over)
{
    resultFlag=1;   
    printf("\rResult: [ %s ]", string);
    if(is_over)
        putchar('\n');
}
 
static char *g_result = NULL;

static unsigned int g_buffersize = BUFFER_SIZE;
 
void on_result(const char *result, char is_last)
{
    if (result) {
        size_t left = g_buffersize - 1 - strlen(g_result);
        size_t size = strlen(result);
        if (left < size) {
            g_result = (char*)realloc(g_result, g_buffersize + BUFFER_SIZE);
            if (g_result)
                g_buffersize += BUFFER_SIZE;
            else {
                printf("mem alloc failed\n");
                return;
            }
        }
        strncat(g_result, result, size);
        show_result(g_result, is_last);
    }
}
 

void on_speech_begin()
{
    if (g_result)
    {
        free(g_result);
    }
    g_result = (char*)malloc(BUFFER_SIZE);
    g_buffersize = BUFFER_SIZE;
    memset(g_result, 0, g_buffersize);
 
    printf("Start Listening...\n");
}
void on_speech_end(int reason)
{
    if (reason == END_REASON_VAD_DETECT)
        printf("\nSpeaking done \n");
    else
        printf("\nRecognizer error %d\n", reason);
}
 


/* demo recognize the audio from microphone */
static void demo_mic(const char* session_begin_params)
{
    int errcode;
    int i = 0;
 
    struct speech_rec iat;
 
    struct speech_rec_notifier recnotifier = {
        on_result,
        on_speech_begin,
        on_speech_end
    };
 
    errcode = sr_init(&iat, session_begin_params, SR_MIC, &recnotifier);
    if (errcode) {
        printf("speech recognizer init failed\n");
        return;
    }
    errcode = sr_start_listening(&iat);
    if (errcode) {
        printf("start listen failed %d\n", errcode);
    }
    /* demo 10 seconds recording */
    while(i++ < 2)
        sleep(1);
    errcode = sr_stop_listening(&iat);
    if (errcode) {
        printf("stop listening failed %d\n", errcode);
    }
 
    sr_uninit(&iat);
}
 
 
/* main thread: start/stop record ; query the result of recgonization.
 * record thread: record callback(data write)
 * helper thread: ui(keystroke detection)
 */
 

void WakeUp(const std_msgs::String::ConstPtr& msg)
{
    printf("waking up\r\n");
    usleep(700*1000);
    wakeupFlag=1;
}


int main(int argc, char* argv[])
{
    // 初始化ROS
    ros::init(argc, argv, "voiceRecognition");
    ros::NodeHandle n;
    ros::Rate loop_rate(10);
 
    // 声明Publisher和Subscriber
    // 订阅唤醒语音识别的信号
    ros::Subscriber wakeUpSub = n.subscribe("voiceWakeup", 1000, WakeUp);   
    // 订阅唤醒语音识别的信号    
    ros::Publisher voiceWordsPub = n.advertise("voiceWords", 1000);  
 
    ROS_INFO("Sleeping...");
    int count=0;
    while(ros::ok())
    {
        // 语音识别唤醒
        if (wakeupFlag){
            ROS_INFO("Wakeup...");
            int ret = MSP_SUCCESS;
            const char* login_params = "appid = , work_dir = .";
 
            const char* session_begin_params =
                "sub = iat, domain = iat, language = zh_cn, "
                "accent = mandarin, sample_rate = 16000, "
                "result_type = plain, result_encoding = utf8";
 
            ret = MSPLogin(NULL, NULL, login_params);
            if(MSP_SUCCESS != ret){
                MSPLogout();
                printf("MSPLogin failed , Error code %d.\n",ret);
            }
 
            printf("Demo recognizing the speech from microphone\n");
	
	    printf("Speak in 3 seconds\n");
 	
            demo_mic(session_begin_params);
 
            printf("3 sec passed\n");
        
            wakeupFlag=1;
            MSPLogout();
        }
 
        // 语音识别完成
        if(resultFlag)
	{
            resultFlag=0;
            std_msgs::String msg;
            msg.data = g_result;
            voiceWordsPub.publish(msg);
        }
 
        ros::spinOnce();
        loop_rate.sleep();
        count++;
    }
 
exit:
    MSPLogout(); // Logout...
 
    return 0;
}

5、CMakeLists.txt增加以下代码

add_executable(xf_asr src/xf_asr.cpp)
target_link_libraries(xf_asr ${catkin_LIBRARIES} -lmsc  -ldl -lpthread -lm -lrt)
add_dependencies(xf_asr xf_voice_generate_messages_cpp)(这句要不要都可以)

6、回到工作空间编译,编译后source一下。此时语音识别已经可以运行,打开新的终端运行roscore。在另一个终端运行asr。
7、添加语音控制的package,新创建一个新的package,在新的src里面创建voice_cmd.cpp。复制一下代码、

#include
#include
#include
#include
#include
#include
//#include
using namespace std;
ros::Publisher pub;
geometry_msgs::Twist vel_cmd;
pthread_t pth_[5];


void* vel_ctr(void* arg)
{
    while(true)
    {

        pub.publish(vel_cmd);
        ros::spinOnce();
        sleep(1);
    }
    return 0;
}
//void callback(const package_name::type_name & msg)
void callback(const std_msgs::String::ConstPtr& msg)
{
    cout<<"收到:"<data.c_str()<data.c_str();
    string str2 = "11";
    string str3 = "22";
    string str4 = "33";
    string str5 = "44";
    string str6 = "55";
    //if(str1 == str2)
    if(str1.find("前进"))
//    if(strstr(&str1, &str2))
    {
      //  cout<<"11111"<("/cmd_vel_mux/input/teleop",10);
    pub = n.advertise("/mobile_base/mobile_base_controller/cmd_vel",1000);
    
    ros::Subscriber sub = n.subscribe("/voiceWords",10,callback);
//    ros::Subscriber sub = n.subscribe("read",10,callback);
    //ros::Publisher pub = n.advertise("/cmd_vel_mux/input/teleop", 10);
    
cout<<"您好!你可以语音控制啦!"<前进"<后退"<左转"<右转"<停止"<

8、同理在Cmakelist加上以下:

add_executable(voice_cmd src/voice_cmd.cpp)
target_link_libraries(voice_cmd ${catkin_LIBRARIES} )
add_dependencies(voice_cmd voice_cmd_generate_messages_cpp)

9、回到工作空间编译,运行。
10、运行上面两个模块后再打开一个终端运行rqt_graph查看节点图是否联通。(完)

或者用python去控制机器人运动,订阅asr的内容,
以下是python代码:

#!/usr/bin/env python
# This Python file uses the following encoding: utf-8
import os, sys

import roslib; roslib.load_manifest('pocketsphinx')
import rospy
import math


from geometry_msgs.msg import Twist
from std_msgs.msg import String

class voice_cmd_vel:

    def __init__(self):
        rospy.on_shutdown(self.cleanup)
        self.speed = 0.2
        self.msg = Twist()
	
	

        # publish to cmd_vel, subscribe to speech output
        #self.pub_ = rospy.Publisher('/mobile_base/commands/velocity', Twist)
	self.pub_ = rospy.Publisher('/mobile_base/mobile_base_controller/cmd_vel', Twist)
        rospy.Subscriber('/voiceWords', String, self.speechCb)

	r = rospy.Rate(10.0)
	while not rospy.is_shutdown():
            self.pub_.publish(self.msg)
            r.sleep()
        
    def speechCb(self, msg):
        rospy.loginfo(msg.data)
	
        #if msg.data.find("full speed") > -1:
	if msg.data.find("加速") > -1:
            if self.speed == 0.2:
                self.msg.linear.x = self.msg.linear.x*2
                self.msg.angular.z = self.msg.angular.z*2
                self.speed = 0.4
        if msg.data.find("减速") > -1:
            if self.speed == 0.4:
                self.msg.linear.x = self.msg.linear.x/2
                self.msg.angular.z = self.msg.angular.z/2
                self.speed = 0.2
	if msg.data.find("前进") > -1:
        #if msg.data.find("forward") > -1:    
            self.msg.linear.x = self.speed
            self.msg.angular.z = 0
        elif msg.data.find("左转") > -1:
            if self.msg.linear.x != 0:
                if self.msg.angular.z < self.speed:
                    self.msg.angular.z += 0.05
            else:        
                self.msg.angular.z = self.speed*2
        elif msg.data.find("右转") > -1:    
            if self.msg.linear.x != 0:
                if self.msg.angular.z > -self.speed:
                    self.msg.angular.z -= 0.05
            else:        
                self.msg.angular.z = -self.speed*2
        elif msg.data.find("后退") > -1:
            self.msg.linear.x = -self.speed
            self.msg.angular.z = 0
        elif msg.data.find("停止") > -1 or msg.data.find("立定") > -1:          
            self.msg = Twist()
        
        self.pub_.publish(self.msg)
	
	

    def cleanup(self):
        # stop the robot!
        twist = Twist()
        self.pub_.publish(twist)

if __name__=="__main__":
    rospy.init_node('voice_cmd_vel')
    try:
        voice_cmd_vel()
    except:
        pass

你可能感兴趣的:(日常使用类,开发类,语音识别,ros,机器人,语音控制)