應用場景:
1. 有告警出現時,海康攝像頭能自動播報(如:禁止游泳,請快速里離開);
2. 在web頁面點擊“開始對講”,能講PC上的麥克風聲音傳輸到海康攝像頭進行對講;點擊“停止對講”,海康攝像頭停止對講;
技術實現:
使用海康的SDK實現,nettyserver框架,提供webapi接口及websocket接口,兼容window、linux
軟件功能:
1. 集成海康SDK
2. WebAPI接口(PlayMedia,StartTalk,StopTalk),采集的是本地音頻數據
3. WebSocket接口,講web用戶的音頻數據轉發到Server程序,通過SDK發送至海康攝像機
對講初始化
H5->Server 發送指令 StartTalk::{ "ip": "192.168.3.2", "port": 8000, "name": "admin", "password": "yswy123456" }
Server->H5 返回指令 StartTalk::ACK_OK
Server->H5 返回指令 StartTalk::ACK_ERROE
開始對講
H5->Server 發送指令 Base64音頻字符串
結束對講
H5->Server 發送指令 StopTalk::
Server->H5 返回指令 StopTalk::ACK_OK
Server->H5 返回指令 StartTalk::ACK_ERROR
4. 鏈路檢查超時退出SDK
測試報告
音頻播放
開始對講
結束對講
方案內容
相關配置
1. 海康相機音頻配置
2. 將dll/so文件拷貝至系統目錄下
window:C:/Windows/System32
linux: /usr/lib
3. pcm制作
1. 安裝ekho-5.8.exe
2. 安裝girl_xiaokun.exe
3. 運行ttsapp.exe
4.使用UltraEdit編輯,然后選中文件頭的44個字節並剪切(因為退格鍵不管用),將這44個字節刪掉,另存為pcm文件
核心代碼
// 開始音頻文件 static void StartMedia(Camera entity, String sfilePath) { lockAudio.lock(); HCNetSDK.NET_DVR_COMPRESSION_AUDIO lpCompressAudio = new HCNetSDK.NET_DVR_COMPRESSION_AUDIO(); boolean net_DVR_GetCurrentAudioCompress = hCNetSDK.NET_DVR_GetCurrentAudioCompress(entity.UserID, lpCompressAudio); if (!net_DVR_GetCurrentAudioCompress) return; byte byAudioEncType = lpCompressAudio.byAudioEncType; byte byAudioSamplingRate = lpCompressAudio.byAudioSamplingRate; byte byAudioBitRate = lpCompressAudio.byAudioBitRate; byte bySupport = lpCompressAudio.bySupport; System.out.println("音頻編碼類型=" + byAudioEncType + " 音頻采樣率=" + byAudioSamplingRate + " 音頻碼率=" + byAudioBitRate + " bySupport=" + bySupport); NativeLong mr = hCNetSDK.NET_DVR_StartVoiceCom_MR_V30(entity.UserID, 1, null, null); File file = new File(sfilePath); FileInputStream inputStream = null; try { inputStream = new FileInputStream(file); Memory pInBuff = new Memory(file.length()); pInBuff.clear(); if (pInBuff != Memory.NULL) { int buffLen = 320; long currFileLen = 0; int readLen; byte[] buffer = new byte[buffLen]; Memory pIB = new Memory(buffLen); while (currFileLen < file.length()) { entity.LastTime = System.currentTimeMillis(); readLen = inputStream.read(buffer); pIB.write(0, buffer, 0, readLen); currFileLen += readLen; Memory pOutBuffer = new Memory(buffLen); HCNetSDK.NET_DVR_AUDIOENC_INFO enc_info = new HCNetSDK.NET_DVR_AUDIOENC_INFO(); enc_info.in_frame_size = buffLen; Pointer encoder = hCNetSDK.NET_DVR_InitG711Encoder(enc_info); // HCNetSDK.NET_DVR_AUDIOENC_PROCESS_PARAM param = new HCNetSDK.NET_DVR_AUDIOENC_PROCESS_PARAM(); param.in_buf = pIB; param.out_buf = pOutBuffer; param.out_frame_size = 160; param.g711_type = 0; boolean frame = hCNetSDK.NET_DVR_EncodeG711Frame(encoder, param); if (!frame) { int iErr = hCNetSDK.NET_DVR_GetLastError(); System.out.println("G711音頻編碼失敗!iErr = " + iErr); break; } frame = hCNetSDK.NET_DVR_ReleaseG711Encoder(encoder); if (!frame) { int iErr = hCNetSDK.NET_DVR_GetLastError(); System.out.println("G711音頻編碼失敗!iErr = " + iErr); break; } boolean sendData = hCNetSDK.NET_DVR_VoiceComSendData(mr, pOutBuffer, 160); if (!sendData) { int iErr = hCNetSDK.NET_DVR_GetLastError(); System.out.println("轉發語音數據!iErr = " + iErr); break; } Thread.sleep(20); } } } catch (Exception ex) { DataHelper.AddExceptionStackTrace("[Error] StartMedia", ex); } finally { if (null != inputStream) { try { inputStream.close(); } catch(Exception e) {} } lockAudio.unlock(); } hCNetSDK.NET_DVR_StopVoiceCom(mr); System.out.println("讀取完成"); }