项目地址:https://github.com/zhanlv/VtDemo
讯飞AndroidSDK文档:https://doc.xfyun.cn/msc_android/%E9%A2%84%E5%A4%87%E5%B7%A5%E4%BD%9C.html
讯飞语音SDK不提供android平台语音转写的功能(只支持java平台,当然android可以通过访问后台去请求),这里是通过语音听写来识别音频文件内容,支持音频长度≤60s(没有测试过音频文件这么长的)。
创建应用-添加新服务-【语音听写】
集成讯飞语音SDK到项目后,可以创建一个识别类,将下面代码复制到项目即可
SpeechRecognizer实例,一些初始化操作
@Override public void start(Context context) {
mRecognizer = SpeechRecognizer.createRecognizer(mContext, mInitListener);
mSpeechResult = new StringBuilder();
if (mRecognizer == null) {
return;
}
mUserList = UserManager.getUserList();
UserWords userWords = new UserWords();
//支持上传热词,比如用户名、关键字,可提高识别准确率
for (User user : mUserList) {
userWords.putWord(user.getName());
}
mRecognizer.setParameter(SpeechConstant.ENGINE_TYPE, SpeechConstant.TYPE_CLOUD);
mRecognizer.setParameter(SpeechConstant.TEXT_ENCODING, "utf-8");
ret = mRecognizer.updateLexicon("userword", userWords.toString(), new LexiconListener() {
@Override public void onLexiconUpdated(String s, SpeechError error) {
if (error != null) {
LogUtil.e(error.toString());
} else {
LogUtil.e("success");
}
}
});
if (ret != ErrorCode.SUCCESS) mView.showTip("上传热词失败,错误码:" + ret);
}
//设置参数
public void setParam(String vadEos, String asrPtt, String asrPath) {
if (mRecognizer == null) {
return;
}
mView.stopPlayer();
mRecognizer.setParameter(SpeechConstant.PARAMS, null);
// 设置引擎
mRecognizer.setParameter(SpeechConstant.ENGINE_TYPE, SpeechConstant.TYPE_CLOUD);
// 设置返回结果格式
mRecognizer.setParameter(SpeechConstant.RESULT_TYPE, "json");
mRecognizer.setParameter(SpeechConstant.LANGUAGE, "zh_cn");
// 设置语言区域
mRecognizer.setParameter(SpeechConstant.ACCENT, "zh_cn");
// 设置语音前端点:静音超时时间,即用户多长时间不说话则当做超时处理
mRecognizer.setParameter(SpeechConstant.VAD_BOS, "4000");
// 设置语音后端点:后端点静音检测时间,即用户停止说话多长时间内即认为不再输入, 自动停止录音
mRecognizer.setParameter(SpeechConstant.VAD_EOS, vadEos);
// 设置标点符号,设置为"0"返回结果无标点,设置为"1"返回结果有标点
mRecognizer.setParameter(SpeechConstant.ASR_PTT, asrPtt);
mRecognizer.setParameter(SpeechConstant.AUDIO_FORMAT, "wav");
//是否保存识别语音文件
mRecognizer.setParameter(SpeechConstant.ASR_AUDIO_PATH, asrPath);
//设置音频资源,通过写音频流方式(-1),如果通过Android自带的录音机录制音频方式(注释掉这一行)
mRecognizer.setParameter(SpeechConstant.AUDIO_SOURCE, "-1");
}
//设置参数完成后,开始识别
public void startRecognizer(File asrFile) {
ret = mRecognizer.startListening(mRecognizerListener);
if (ret != ErrorCode.SUCCESS) {
mView.showTip("听写失败,错误码:" + ret);
return;
}
if (asrFile == null) {
return;
}
final byte[] audioData = FileUtils.readFile(asrFile);
if (audioData != null) {
mRecognizer.writeAudio(audioData, 0, audioData.length);
mRecognizer.stopListening();
} else {
mRecognizer.cancel();
LogUtil.e("read audiorecord file failed!");
}
}
private RecognizerListener mRecognizerListener = new RecognizerListener() {
@Override public void onBeginOfSpeech() {
mView.showTip("开始说话");
mSpeechResult.delete(0, mSpeechResult.length());
}
@Override public void onEndOfSpeech() {
mView.showTip("结束说话");
}
@Override public void onResult(RecognizerResult results, boolean isLast) {
final String result = JsonParser.parseIatResult(results.getResultString());
mSpeechResult.append(result);
//可在这打印结果 mSpeechResult.toString();
}
@Override public void onError(SpeechError error) {
mView.showTip(error.getPlainDescription(true));
}
@Override public void onVolumeChanged(int volume, byte[] data) {
mView.showTip("当前正在说话,音量大小:" + volume);
}
@Override public void onEvent(int eventType, int arg1, int arg2, Bundle obj) {
}
};
public static byte[] readFile(File file) {
FileInputStream fis = null;
byte[] bytes = null;
try {
fis = new FileInputStream(file);
bytes = new byte[fis.available()];
fis.read(bytes);
} catch (Exception e) {
e.printStackTrace();
} finally {
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return bytes;
}
注意事项:默认识别语音文件,且格式是WAV或PCM。如果想通过麦克风获取语音,注释 mRecognizer.setParameter(SpeechConstant.AUDIO_SOURCE, "-1")这一行,startRecognizer(null)传入参数为null即可