最近项目在使用云知声SDK,遇到了不少麻烦现在总结下。自己留个记录也希望能够对有用到云知声的一个帮助。。不多说了上代码啦!!
一,语义识别和语音识别(在线语音识别和语义)至于本地识别就是类型不同已备注,云知声语音识别和语义识别是在一起的,这个大家使用时可注意了。语音识别我这边就直接转换成了String了,语义识别可能大家要根据自己需求去解析了。返回的是Json格式字符串
首先初始化key和secret这个大家要去云知声开放平台申请啦.为了方便大家链接云知声开放平台http://n
public class Config {
public static final String appKey = "";
public static final String secret ="";
}
public class ASROnline {
private final Context mContext;
private SpeechUnderstander mUnderstander;
private StringBuffer mAsrResultBuffer;
public ASROnline(Context context) {
this.mContext = context;
}
/**
* 当前识别状态
*/
enum AsrStatus {
idle, recording, recognizing
}
private static int currentDomain = 0;
private static String arrayDomain[] = new String[]{"general", "poi", "song", "movietv", "medical"};
private static String arrayLanguageStr[] = new String[]{SpeechConstants.LANGUAGE_MANDARIN,
SpeechConstants.LANGUAGE_ENGLISH, SpeechConstants.LANGUAGE_CANTONESE};
private static int arraySample[] = new int[]{SpeechConstants.ASR_SAMPLING_RATE_BANDWIDTH_AUTO,
SpeechConstants.ASR_SAMPLING_RATE_16K, SpeechConstants.ASR_SAMPLING_RATE_8K};
private static int currentSample = 0;
private static int currentLanguage = 0;
private AsrStatus statue = AsrStatus.idle;
public void startUnderstand() {
mAsrResultBuffer = new StringBuffer();
//创建语音理解对象,appKey和 secret通过 http://dev.hivoice.cn/ 网站申请
mUnderstander = new SpeechUnderstander(mContext, Config.appKey, Config.secret);
// 开启可变结果
mUnderstander.setOption(SpeechConstants.ASR_OPT_TEMP_RESULT_ENABLE, true);
mUnderstander.setOption(SpeechConstants.ASR_EVENT_MODEL_LOAD_SUCCESS, true);
// mUnderstander.setOption(SpeechConstants.VPR_FARFILED_ENABLED, true); //设置远讲是否可用
// mUnderstander.setOption( SpeechConstants.VPR_BLUETOOTH_ENABLED, true);开启蓝牙录音
// mUnderstander.setOption(SpeechConstants.ASR_SERVER_ADDR, true);设置语义解析服务器 SpeechConstants.NLU_SERVER_ADDR server=ip:port
//mUnderstander.setOption(SpeechConstants.NLU_SERVER_ADDR,value); 设置语义解析服务器key/value
mUnderstander.init("");
// 保存录音数据
// recognizer.setRecordingDataEnable(true);
if (statue == AsrStatus.idle) {
// 修改录音采样率
mUnderstander.setOption(SpeechConstants.ASR_SAMPLING_RATE, arraySample[currentSample]);
// 修改识别领域
mUnderstander.setOption(SpeechConstants.ASR_DOMAIN, arrayDomain[currentDomain]);
// 修改识别语音
mUnderstander.setOption(SpeechConstants.ASR_LANGUAGE, arrayLanguageStr[currentLanguage]);
mUnderstander.start();
} else if (statue == AsrStatus.recording) {
stopRecord();
} else if (statue == AsrStatus.recognizing) {
mUnderstander.cancel();
statue = AsrStatus.idle;
}
//net_asr 在线识别结果
//net_nlu 在线语义结果
//local_asr 离线识别结果
mUnderstander.setListener(new SpeechUnderstanderListener() {
@Override
public void onResult(int type, String jsonResult) {
switch (type) {
case SpeechConstants.ASR_RESULT_NET: //网络识别结果
if (jsonResult.contains("net_asr")
&& jsonResult.contains("net_nlu")){
parseJsonData(jsonResult);
} else {
//取出语音识别结果
asrResultOperate(jsonResult);
}
break;
case SpeechConstants.ASR_RESULT_LOCAL://本地识别结果
break;
}
}
@Override
public void onEvent(int type, int timeMs) {
switch (type) {
case SpeechConstants.ASR_EVENT_NET_END: //网络识别结束
statue = AsrStatus.idle;
break;
case SpeechConstants.ASR_EVENT_VOLUMECHANGE: //声音音量发生改变
// 说话音量实时返回可以通过progressbar显示
int volume = (Integer) mUnderstander.getOption(SpeechConstants.GENERAL_UPDATE_VOLUME);
break;
case SpeechConstants.ASR_EVENT_VAD_TIMEOUT://未说话超时自动停止说话
// 收到用户停止说话事件,停止录音
stopRecord();
break;
case SpeechConstants.ASR_EVENT_RECORDING_STOP: //停止录音
statue = AsrStatus.recognizing;
break;
case SpeechConstants.ASR_EVENT_SPEECH_DETECTED://检测到说话
break;
case SpeechConstants.ASR_EVENT_RECORDING_START://录音设备开启
statue = AsrStatus.recording;
break;
default:
break;
}
}
@Override
public void onError(int type, String errorMSG) {
if (errorMSG != null) {
// 显示错误信息
hitErrorMsg(errorMSG);
} else {
YunSpeechSynthesisUtil.startYunSpeechSYnthesis(mContext, "我是小助手有什么可以帮您,请将");
}
}
});
}
//语义理解json
private void asrResultOperate(String jsonResult) {
WeatherJavaBean weatherJavaBean = new Gson().fromJson(jsonResult, WeatherJavaBean.class);
WeatherJavaBean.NetNluBean netNluBean = weatherJavaBean.net_nlu.get(0);
String code = netNluBean.code;
if (code.equals("FORECAST")){
String header = netNluBean.data.header;
String carWashIndexDesc = netNluBean.data.result.weatherDays.get(0).carWashIndexDesc;
YunSpeechSynthesisUtil.startYunSpeechSYnthesis(mContext,header+carWashIndexDesc);
}
}
private void stopRecord() {
mUnderstander.stop();
}
//语音识别json结果
private void parseJsonData(String jsonResult) {
InitJava initJava = new Gson().fromJson(jsonResult, InitJava.class);
List net_asr = initJava.net_asr;
InitJava.NetAsrBean netAsrBean = net_asr.get(0);
String status = netAsrBean.result_type;
if (status.equals("full")){
String result = netAsrBean.recognition_result;
if (result!=null){
YunSpeechSynthesisUtil.startYunSpeechSYnthesis(mContext,result.trim());
}else {
YunSpeechSynthesisUtil.startYunSpeechSYnthesis(mContext, "抱歉,未听清");
}
}
}
private void hitErrorMsg(String msg) {
Toast.makeText(mContext, msg, Toast.LENGTH_LONG).show();
}
}
public class InitJava {
public List net_asr;
public List net_nlu;
public static class NetAsrBean {
public String engine_mode;
public boolean last_result;
public String recognition_result;
public String result_type;
public String sessionID;
}
public static class NetNluBean {
public String code;
public GeneralBean general;
public String history;
public String nluProcessTime;
public int rc;
public String responseId;
public String service;
public String text;
public static class GeneralBean {
public String text;
public String type;
}
}
二,语音合成
private static SpeechSynthesizer mTTSPlayer;
public static void startYunSpeechSYnthesis(Context context, String speechContent){
// 创建语音合成对象
mTTSPlayer = new SpeechSynthesizer(context, Config.appKey, Config.secret);
mTTSPlayer.setOption(SpeechConstants.TTS_SERVICE_MODE, SpeechConstants.TTS_SERVICE_MODE_NET);
mTTSPlayer.init("");
mTTSPlayer.setTTSListener(new SpeechSynthesizerListener() {
@Override
public void onEvent(int type) {
switch (type) {
case SpeechConstants.TTS_EVENT_INIT:
// 初始化成功回调
break;
case SpeechConstants.TTS_EVENT_SYNTHESIZER_START:
// 开始合成回调
break;
case SpeechConstants.TTS_EVENT_SYNTHESIZER_END:
// 合成结束回调
break;
case SpeechConstants.TTS_EVENT_BUFFER_BEGIN:
// 合成结束回调
break;
case SpeechConstants.TTS_EVENT_BUFFER_READY:
// 合成结束回调
break;
case SpeechConstants.TTS_EVENT_PLAYING_START:
// 开始播放回调
break;
case SpeechConstants.TTS_EVENT_PLAYING_END:
// 播放完成回调
break;
case SpeechConstants.TTS_EVENT_PAUSE:
// 暂停回调
break;
case SpeechConstants.TTS_EVENT_RESUME:
// 恢复回调
break;
case SpeechConstants.TTS_EVENT_STOP:
// 停止回调
break;
case SpeechConstants.TTS_EVENT_RELEASE:
// 释放资源回调
break;
default:
break;
}
}
// 语音合成错误回调
@Override
public void onError(int type, String errorMSG) {
if (errorMSG != null) {
System.out.println(errorMSG+"错误信息");
} else {
System.out.println("对不起,说的没听清");
}
}
});
mTTSPlayer.playText(speechContent);
}
三,语音唤醒
private Context mWakeContext;
private SpeechUnderstander mWakeUpRecognizer;
private static final String WAKEUP_TAG = "wakeup";
private Vibrator mVibrator;
public YunWakeUpOffline(Context context){
this.mWakeContext =context;
}
/**
* 初始化本地离线唤醒
*/
public void startWakeUp() {
mWakeUpRecognizer = new SpeechUnderstander(mWakeContext, Config.appKey, null);
mWakeUpRecognizer.setOption(SpeechConstants.ASR_SERVICE_MODE, SpeechConstants.ASR_SERVICE_MODE_LOCAL);
mWakeUpRecognizer.setOption(SpeechConstants.WAKEUP_EVENT_SET_WAKEUPWORD_DONE,"你好,魔方");//设置唤醒词
mWakeUpRecognizer.init("");
mWakeUpRecognizer.setListener(new SpeechUnderstanderListener() {
//成功结果
@Override
public void onResult(int type, String jsonResult) {
System.out.println(jsonResult+"语音唤醒jsonResult");
YunSpeechSynthesisUtil.startYunSpeechSYnthesis(mWakeContext,"你好");
}
@Override
public void onEvent(int type, int timeMs) {
switch (type) {
case SpeechConstants.WAKEUP_EVENT_RECOGNITION_SUCCESS: //识别成功
Log.d("TEMPLOG", "WAKEUP_EVENT_RECOGNITION_SUCCESS");
mVibrator.vibrate(300);
YunSpeechSynthesisUtil.startYunSpeechSYnthesis(mWakeContext,"语音已唤醒");
break;
case SpeechConstants.ASR_EVENT_RECORDING_START:
Log.d("TEMPLOG", "ASR_EVENT_RECORDING_START");
//TODO 设置提醒
break;
case SpeechConstants.ASR_EVENT_RECORDING_STOP:
Log.d("TEMPLOG", "ASR_EVENT_RECORDING_STOP");
break;
case SpeechConstants.ASR_EVENT_ENGINE_INIT_DONE:
Log.d("TEMPLOG", "ASR_EVENT_ENGINE_INIT_DONE");
wakeUpStart();
break;
default:
break;
}
}
@Override
public void onError(int type, String errorMSG) {
}
});
}
/**
* 启动语音唤醒
*/
protected void wakeUpStart() {
mWakeUpRecognizer.start(WAKEUP_TAG);
}
在实际开发中基本都是相互使用的,大家使用到的话根据自己需要来!