在iOS中音频按照播放形式可以分为音效播放和音乐播放。音效主要指的是一些短音频,通常作为点缀音频,如提示音,对于这类音频不需要进行进度,循环等控制。音乐主要指的是一些较长的音频,通常是主音频,对于这类音频播放通常需要精确的控制。在iOS中播放音效一般使用AudioToolbox.framework这个框架,播放音乐一般使用AVFoundation.framework
语音合成
苹果的语音合成比较简单,直接上代码
1 2 3 4 5 6 7 8 9 10 11 12 13 14
| @protocol RBAppleSpeechSynthesizeDelegate<NSObject> @required - (void)appleSynthesizerWithResult:(NSString *)status info:(NSString *)info; @end
@interface RBAppleSpeechSynthesize : NSObject @property(nonatomic, weak) id<RBAppleSpeechSynthesizeDelegate> delegate;
+ (RBAppleSpeechSynthesize *)sharedInstance; - (void)appleStartSpeak:(NSString *)string; - (void)appleStopSpeak; - (void)appleCancel;
@end
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
| @interface RBAppleSpeechSynthesize ()<AVSpeechSynthesizerDelegate>
@property(nonatomic, strong) AVSpeechSynthesizer *synthesizer; @property(nonatomic, strong) AVSpeechSynthesisVoice *synthesizeVoice; @property(nonatomic, strong) AVSpeechUtterance *speechUtterance; @end
@implementation RBAppleSpeechSynthesize
+ (RBAppleSpeechSynthesize *)sharedInstance { static RBAppleSpeechSynthesize *singleSynthesize = nil; static dispatch_once_t onceToken; dispatch_once(&onceToken, ^{ singleSynthesize = [[RBAppleSpeechSynthesize alloc] init]; }); return singleSynthesize; }
- (instancetype)init { self = [super init]; if (self) { [self initAppleSysthesize]; } return self; }
- (void)initAppleSysthesize { self.synthesizer = [[AVSpeechSynthesizer alloc] init]; self.synthesizer.delegate = self; self.synthesizeVoice = [AVSpeechSynthesisVoice voiceWithLanguage:@"zh-CN"]; } - (void)appleStartSpeak:(NSString *)string { if (string != nil) { NSString *tempStr =[string stringByReplacingOccurrencesOfString:@" " withString:@""]; if (tempStr.length > 0) { if (self.synthesizer.isSpeaking) { BOOL stoped =[self.synthesizer stopSpeakingAtBoundary:AVSpeechBoundaryImmediate]; if (stoped) { [self speechWithString:tempStr]; } } else { [self speechWithString:tempStr]; } } } } - (void)speechWithString:(NSString *)string { AVAudioSession *avSession = [AVAudioSession sharedInstance]; NSError *avError = nil; [avSession setCategory:AVAudioSessionCategoryPlayback error:&avError]; if (avError) { NSLog(@"AVAudioSession category error:[%@]", avError.localizedDescription); } [avSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil]; self.speechUtterance = [AVSpeechUtterance speechUtteranceWithString:string]; self.speechUtterance.voice = self.synthesizeVoice; self.speechUtterance.rate = 0.52; self.speechUtterance.volume = 1.0; self.speechUtterance.pitchMultiplier =1; [self.synthesizer speakUtterance:self.speechUtterance]; }
- (void)appleStopSpeak { [self.synthesizer stopSpeakingAtBoundary:AVSpeechBoundaryImmediate]; }
- (void)appleCancel { [self.synthesizer stopSpeakingAtBoundary:AVSpeechBoundaryImmediate]; self.synthesizer.delegate = nil; self.synthesizer = nil; self.synthesizeVoice = nil; self.speechUtterance = nil; }
#pragma mark - AVSpeechSynthesizerDelegate - (void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didStartSpeechUtterance:(AVSpeechUtterance *)utterance { } - (void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didFinishSpeechUtterance:(AVSpeechUtterance *)utterance { }
- (void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didCancelSpeechUtterance:(AVSpeechUtterance *)utterance { }
@end
|
使用语音合成可以避免倒入音频资源,从而增大安装包的大小。
参考
- iOS语音识别和语音合成