1.導入依賴庫
SystemConfiguration.framework
AudioToolbox.framework
UIkit.framework
AVFoundation.framework
Foundation.framework
libz.tbd
Security.framework
QuartzCore.framework
CoreText.framework
GLKit.framework
OpenGLES.framework
CoreLocation.framework
CFNetwork.framework
CoreGraphics.framework
注意還要導入依賴庫:CoreTelephony.framework
2.需要包含的SDK文件有:
從開放平台下載的包里面的
Headers文件夾、Third Part文件夾、還有..resources文件夾(tone和scheme)、還有.a靜態庫文件
3.將工程的bitcode設置為NO
4.build Setting里面設置other linker flags為-ObjC
示例Demo:
#import "ViewController.h"
#import "BDVoiceRecognitionClient.h"
@interface ViewController ()<MVoiceRecognitionClientDelegate>
@end
@implementation ViewController
- (void)viewDidLoad {
[super viewDidLoad];
}
- (void)viewWillDisappear:(BOOL)animated
{
[super viewWillDisappear:animated];
//取消監聽語音音量
[[BDVoiceRecognitionClient sharedInstance] cancelListenCurrentDBLevelMeter];
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
- (IBAction)inputBtnClick:(id)sender {
BDVoiceRecognitionClient * client = [BDVoiceRecognitionClient sharedInstance];
[client setApiKey:@"Au2wN2SaDOpYZHgGqrIymMkU" withSecretKey:@"a0212d1fa0f28699aa5d1162a1bcbf1c"];
//設置識別垂類
//[client setPropertyList:@[[NSNumber numberWithInt:EVoiceRecognitionPropertyVideo]]];
//設置識別語言為普通話
[client setLanguage:EVoiceRecognitionPropertyMusic];
//禁用標點符號<默認不禁用>
[client disablePuncs:YES];
//設置是否對語音進行端點檢測,即SDK會自動判斷說話是否結束<默認開啟>
[client setNeedVadFlag:YES];
//設置是否對上傳的語音進行壓縮<默認壓縮>
[client setNeedCompressFlag:YES];
//設置在線識別的響應等待時間,如果超時,觸發同步離線識別
[client setOnlineWaitTime:5];
//開啟自然語言理解結果
[client setConfig:@"nlu" withFlag:YES];
//開始說話開始提示音
[client setPlayTone:EVoiceRecognitionPlayTonesRecStart isPlay:YES];
//開始說話結束提示音
[client setPlayTone:EVoiceRecognitionPlayTonesRecEnd isPlay:YES];
//打開語音音量功能
[client listenCurrentDBLevelMeter];
//獲取當前語音音量級別
[client getCurrentDBLevelMeter];
int startStatus = [client startVoiceRecognition:self];
switch (startStatus) {
case EVoiceRecognitionStartWorking:
self.label.text = @"啟動成功!";
break;
default:
self.label.text = [NSString stringWithFormat:@"啟動失敗 - 錯誤碼:%d",startStatus];
break;
}
}
- (void)VoiceRecognitionClientWorkStatus:(int)aStatus obj:(id)aObj {
switch (aStatus) {
case EVoiceRecognitionClientWorkStatusFlushData: {
// 該狀態值表示服務器返回了中間結果,如果想要將中間結果展示給用戶(形成連續上屏的效果), // 可以利用與該狀態同時返回的數據,每當接到新的該類消息應當清空顯示區域的文字以免重復
NSMutableString *tmpString = [[NSMutableString alloc] initWithString:@""]; [tmpString appendFormat:@"%@",[aObj objectAtIndex:0]];
NSLog(@"result: %@", tmpString);
break; }
case EVoiceRecognitionClientWorkStatusFinish: {
// 該狀態值表示語音識別服務器返回了最終結果,結果以數組的形式保存在 aObj 對象中 // 接受到該消息時應當清空顯示區域的文字以免重復
if ([[BDVoiceRecognitionClient sharedInstance] getRecognitionProperty] != EVoiceRecognitionPropertyInput)
{
NSMutableArray *resultData = (NSMutableArray *)aObj; //解析結果,並顯示
NSMutableString *tmpString = [[NSMutableString alloc] initWithString:@""];
// 獲取識別候選詞列表
for (int i=0; i<[resultData count]; i++) {
[tmpString appendFormat:@"%@\r\n",[resultData objectAtIndex:i]]; }
self.label.text = tmpString;
} else {
NSMutableString *sentenceString = [[NSMutableString alloc] initWithString:@""]; for (NSArray *result in aObj)// 此時 aObj 是 array,result 也是 array
{
// 取每條候選結果的第 條,進 組合
// result 的元素是 dictionary,對應 個候選詞和對應的可信度
NSDictionary *dic = [result objectAtIndex:0];
NSString *candidateWord = [[dic allKeys] objectAtIndex:0];
[sentenceString appendString:candidateWord];
}
NSLog(@"result: %@", sentenceString);
}
break; }
case EVoiceRecognitionClientWorkStatusReceiveData: {
// 此狀態只在輸入模式下發生,表示語音識別正確返回結果,每個子句會通知一次(全量, // 即第二次收到該消息時所攜帶的結果包含第一句的識別結果),應用程序可以
// 逐句顯示。配合連續上屏的中間結果,可以進一步 升語音輸入的體驗
NSMutableString *sentenceString = [[NSMutableString alloc] initWithString:@""];
for (NSArray *result in aObj)// 此時 aObj 是 array,result 也是 array
{
// 取每條候選結果的第 條,進 組合
// result 的元素是 dictionary,對應 個候選詞和對應的可信度
NSDictionary *dic = [result objectAtIndex:0];
NSString *candidateWord = [[dic allKeys] objectAtIndex:0];
[sentenceString appendString:candidateWord];
}
NSLog(@"result: %@", sentenceString); break;
}
case EVoiceRecognitionClientWorkStatusNewRecordData: {
// 有音頻數據輸出,音頻數據格式為 PCM,在有 WiFi 連接的條件下為 16k16bit,非 WiFi
// 為 8k16bit break;
}
case EVoiceRecognitionClientWorkStatusEnd: {
// 用戶說話完成,但服務器尚未返回結果
break; }
case EVoiceRecognitionClientWorkStatusCancel: {
// 用戶主動取消
break; }
case EVoiceRecognitionClientWorkStatusError: {
// 錯誤狀態
self.label.text = @"沒有語音輸入";
break; }
case EVoiceRecognitionClientWorkPlayStartTone:
case EVoiceRecognitionClientWorkPlayStartToneFinish:
case EVoiceRecognitionClientWorkStatusStartWorkIng:
case EVoiceRecognitionClientWorkStatusStart:
case EVoiceRecognitionClientWorkPlayEndToneFinish:
case EVoiceRecognitionClientWorkPlayEndTone:
break;
}
}