语音合成(iOS 系统内建 和 科大讯飞语音合成)

一.简单的封装了一下 iOS 系统内建的语音合成器(iOS 7以后提供的 API).

////------------------------------------------------

////封装iOS自带的语音合成器

@interfaceZRRSpeechSynthesizer :NSObject

//单例模式

+ (ZRRSpeechSynthesizer*)sharedSpeechInstance;

//设置单例的播放语言

- (BOOL)setSpeechLanguage:(NSString*)language;

//开始语音播报

- (void)startSpeechText:(NSString*)text;

//暂停语音播报

- (void)pauseSpeechText;

//停止语音播报

- (void)stopSpeechText;

//语音合成器

@property(nonatomic,strong)AVSpeechSynthesizer*speechSynthesizer;

//是否启用,默认为YES

@property(nonatomic,assign)BOOLenabled;

//语音播报的的语音,默认中文,zh-CN

@property(nonatomic,strong)AVSpeechSynthesisVoice*voice;

@end


@implementationZRRSpeechSynthesizer

- (instancetype)init

{

if(self= [superinit]) {

_enabled=YES;

_voice= [AVSpeechSynthesisVoicevoiceWithLanguage:@"zh-CN"];

_speechSynthesizer= [[AVSpeechSynthesizeralloc]init];

}

returnself;

}

+ (ZRRSpeechSynthesizer*)sharedSpeechInstance

{

__strongstaticZRRSpeechSynthesizer*instance =nil;

staticdispatch_once_tonceToken;

dispatch_once(&onceToken, ^{

instance = [[selfalloc]init];

});

returninstance;

}

- (BOOL)setSpeechLanguage:(NSString*)language

{

AVSpeechSynthesisVoice*voice = [AVSpeechSynthesisVoicevoiceWithLanguage:language];

if(voice) {

self.voice= voice;

returnYES;

}

returnNO;

}

- (void)startSpeechText:(NSString*)text

{

if(_enabled==NO)

return;

AVSpeechUtterance*utterance = [AVSpeechUtterancespeechUtteranceWithString:text];

utterance.voice=_voice;

utterance.volume=1.0;//设置音量[0-1] Default = 1

utterance.rate=0.5;//设置语速

utterance.pitchMultiplier=1;//设置语调,可能是高低音[0.5 - 2] Default = 1

[_speechSynthesizer stopSpeakingAtBoundary:AVSpeechBoundaryImmediate];

[_speechSynthesizer speakUtterance:utterance];

}

- (void)pauseSpeechText

{

if([self.speechSynthesizerisSpeaking]) {

[self.speechSynthesizerpauseSpeakingAtBoundary:AVSpeechBoundaryImmediate];

}

}

- (void)stopSpeechText

{

if([self.speechSynthesizer isSpeaking]) {

[self.speechSynthesizer stopSpeakingAtBoundary:AVSpeechBoundaryImmediate];

}

}

@end


二.调用科大讯飞的 SDK

//通过appid连接讯飞语音服务器,把@"53b5560a"换成你申请的appid

NSString*initString = [NSStringstringWithFormat:@"%@=%@", [IFlySpeechConstantAPPID],@"53b5560a"];

//所有服务启动前,需要确保执行createUtility

[IFlySpeechUtilitycreateUtility:initString];

//创建合成对象,为单例模式

_iFlySpeechSynthesizer= [IFlySpeechSynthesizersharedInstance];

_iFlySpeechSynthesizer.delegate=self;

//设置语音合成的参数

//合成的语速,取值范围0~100

[_iFlySpeechSynthesizersetParameter:@"50"forKey:[IFlySpeechConstantSPEED]];

//合成的音量;取值范围0~100

[_iFlySpeechSynthesizersetParameter:@"50"forKey:[IFlySpeechConstantVOLUME]];

//发音人,默认为”xiaoyan”;可以设置的参数列表可参考个性化发音人列表

[_iFlySpeechSynthesizersetParameter:@"vixr"forKey:[IFlySpeechConstantVOICE_NAME]];//

//音频采样率,目前支持的采样率有16000和8000

[_iFlySpeechSynthesizersetParameter:@"8000"forKey:[IFlySpeechConstantSAMPLE_RATE]];

////asr_audio_path保存录音文件路径,如不再需要,设置value为nil表示取消,默认目录是documents

[_iFlySpeechSynthesizer setParameter:@"tts.pcm"forKey:[IFlySpeechConstantTTS_AUDIO_PATH]];


/**

*语音合成回调

*/

@protocolIFlySpeechSynthesizerDelegate

@required

/**

*结束回调

*当整个合成结束之后会回调此函数

*

*@param error错误码

*/

- (void) onCompleted:(IFlySpeechError*) error;

@optional

/**

*开始合成回调

*/

- (void) onSpeakBegin;

/**

*缓冲进度回调

*

*@param progress缓冲进度,0-100

*@param msg附件信息,此版本为nil

*/

- (void) onBufferProgress:(int) progress message:(NSString*)msg;

/**

*播放进度回调

*

*@param progress播放进度,0-100

*/

- (void) onSpeakProgress:(int) progress;

/**

*暂停播放回调

*/

- (void) onSpeakPaused;

/**

*恢复播放回调

*/

- (void) onSpeakResumed;

/**

*正在取消回调

*当调用`cancel`之后会回调此函数

*/

- (void) onSpeakCancel;

/**

*扩展事件回调

*根据事件类型返回额外的数据

*

*@param eventType事件类型,具体参见IFlySpeechEventType枚举。目前只支持EVENT_TTS_BUFFER也就是实时返回合成音频。

*@param arg0arg0

*@param arg1arg1

*@param eventData事件数据

*/

- (void) onEvent:(int)eventType arg0:(int)arg0 arg1:(int)arg1 data:(NSData*)eventData;

@end

你可能感兴趣的:(语音合成(iOS 系统内建 和 科大讯飞语音合成))