調用科大訊飛語音聽寫,使用
Python
實現語音識別,將實時語音轉換為文字。
參考這篇博客實現的錄音,首先在官網下載了關於語音聽寫的SDK
,然后在文件夾內新建了兩個.py
文件,分別是get_audio.py
和iat_demo.py
,並且新建了一個存放錄音的文件夾audios
,文件夾內存放錄音文件input.wav
,我的整個文件目錄如下:
asr_SDK(文件名)
├─ Readme.html
├─ audios
│ └─ input.wav(存放音頻)
├─ bin
│ ├─ gm_continuous_digit.abnf
│ ├─ ise_cn
│ ├─ ise_en
│ ├─ msc
│ ├─ msc.dll (因為我是32位的python,所以用的這個動態鏈接庫)
│ ├─ msc_x64.dll
│ ├─ source.txt
│ ├─ userwords.txt
│ └─ wav
├─ doc
├─ get_audio.py
├─ iat_demo.py
├─ include
├─ libs
├─ release.txt
└─ samples
這里使用的是pyaudio
進行錄音,需要下載相關的輪子,具體可參考我的另一篇博客。然后根據自己的需要進行了修改,gt_audio.py
全部代碼如下:
import pyaudio # 這個需要自己下載輪子 import wave in_path = "./audios/input.wav" # 存放錄音的路徑 def get_audio(filepath): aa = str(input("是否開始錄音? (y/n)")) if aa == str("y") : CHUNK = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 1 # 聲道數 RATE = 11025 # 采樣率 RECORD_SECONDS = 5 # 錄音時間 WAVE_OUTPUT_FILENAME = filepath p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) print("*"*5, "開始錄音:請在5秒內輸入語音", "*"*5) frames = [] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print("*"*5, "錄音結束\n") stream.stop_stream() stream.close() p.terminate() wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() elif aa == str("否"): exit() else: print("語音錄入失敗,請重新開始") get_audio(in_path)
錄音的保持是可循環的,每當重新錄音,都會覆蓋前一次的音頻。
直接使用的是科大訊飛官網語音聽寫的web API
關於Python
的例子,在此基礎上進行了相關的調整,自動識別錄音轉換為文字。iat_demo.py
的全部代碼如下:
import websocket import requests import datetime import hashlib import base64 import hmac import json import os, sys import re from urllib.parse import urlencode import logging import time import ssl import wave from wsgiref.handlers import format_date_time from datetime import datetime from time import mktime from pyaudio import PyAudio,paInt16 from get_audio import get_audio # 導入錄音.py文件 input_filename = "input.wav" # 麥克風采集的語音輸入 input_filepath = "./audios/" # 輸入文件的path in_path = input_filepath + input_filename type = sys.getfilesystemencoding() path_pwd = os.path.split(os.path.realpath(__file__))[0] os.chdir(path_pwd) try: import thread except ImportError: import _thread as thread logging.basicConfig() STATUS_FIRST_FRAME = 0 # 第一幀的標識 STATUS_CONTINUE_FRAME = 1 # 中間幀標識 STATUS_LAST_FRAME = 2 # 最后一幀的標識 framerate = 8000 NUM_SAMPLES = 2000 channels = 1 sampwidth = 2 TIME = 2 global wsParam class Ws_Param(object): # 初始化 def __init__(self, host): self.Host = host self.HttpProto = "HTTP/1.1" self.HttpMethod = "GET" self.RequestUri = "/v2/iat" self.APPID = "5d312675" # 在控制台-我的應用-語音聽寫(流式版)獲取APPID self.Algorithm = "hmac-sha256" self.url = "wss://" + self.Host + self.RequestUri # 采集音頻 錄音 get_audio("./audios/input.wav") # 設置測試音頻文件,流式聽寫一次最多支持60s,超過60s會引起超時等錯誤。 self.AudioFile = r"./audios/input.wav" self.CommonArgs = {"app_id": self.APPID} self.BusinessArgs = {"domain":"iat", "language": "zh_cn","accent":"mandarin"} def create_url(self): url = 'wss://ws-api.xfyun.cn/v2/iat' now = datetime.now() date = format_date_time(mktime(now.timetuple())) APIKey = 'a6aabfcca4ae28f9b6a448f705b7e432' # 在控制台-我的應用-語音聽寫(流式版)獲取APIKey APISecret = 'e649956e14eeb085d1b0dce77a671131' # 在控制台-我的應用-語音聽寫(流式版)獲取APISecret signature_origin = "host: " + "ws-api.xfyun.cn" + "\n" signature_origin += "date: " + date + "\n" signature_origin += "GET " + "/v2/iat " + "HTTP/1.1" signature_sha = hmac.new(APISecret.encode('utf-8'), signature_origin.encode('utf-8'), digestmod=hashlib.sha256).digest() signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8') authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % ( APIKey, "hmac-sha256", "host date request-line", signature_sha) authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8') v = { "authorization": authorization, "date": date, "host": "ws-api.xfyun.cn" } url = url + '?' + urlencode(v) return url # 收到websocket消息的處理 這里我對json解析進行了一些更改 打印簡短的一些信息 def on_message(ws, message): msg = json.loads(message) # 將json對象轉換為python對象 json格式轉換為字典格式 try: code = msg["code"] sid = msg["sid"] if code != 0: errMsg = msg["message"] print("sid:%s call error:%s code is:%s\n" % (sid, errMsg, code)) else: result = msg["data"]["result"]["ws"] # 以json格式顯示 data_result = json.dumps(result, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ': ')) print("sid:%s call success!" % (sid)) print("result is:%s\n" % (data_result)) except Exception as e: print("receive msg,but parse exception:", e) # 收到websocket錯誤的處理 def on_error(ws, error): print("### error:", error) # 收到websocket關閉的處理 def on_close(ws): print("### closed ###") # 收到websocket連接建立的處理 def on_open(ws): def run(*args): frameSize = 1280 # 每一幀的音頻大小 intervel = 0.04 # 發送音頻間隔(單位:s) status = STATUS_FIRST_FRAME # 音頻的狀態信息,標識音頻是第一幀,還是中間幀、最后一幀 with open(wsParam.AudioFile, "rb") as fp: while True: buf = fp.read(frameSize) # 文件結束 if not buf: status = STATUS_LAST_FRAME # 第一幀處理 # 發送第一幀音頻,帶business 參數 # appid 必須帶上,只需第一幀發送 if status == STATUS_FIRST_FRAME: d = {"common": wsParam.CommonArgs, "business": wsParam.BusinessArgs, "data": {"status": 0, "format": "audio/L16;rate=16000", "audio": str(base64.b64encode(buf),'utf-8'), "encoding": "raw"}} d = json.dumps(d) ws.send(d) status = STATUS_CONTINUE_FRAME # 中間幀處理 elif status == STATUS_CONTINUE_FRAME: d = {"data": {"status": 1, "format": "audio/L16;rate=16000", "audio": str(base64.b64encode(buf),'utf-8'), "encoding": "raw"}} ws.send(json.dumps(d)) # 最后一幀處理 elif status == STATUS_LAST_FRAME: d = {"data": {"status": 2, "format": "audio/L16;rate=16000", "audio": str(base64.b64encode(buf),'utf-8'), "encoding": "raw"}} ws.send(json.dumps(d)) time.sleep(1) break # 模擬音頻采樣間隔 time.sleep(intervel) ws.close() thread.start_new_thread(run, ()) if __name__ == "__main__": wsParam = Ws_Param("ws-api.xfyun.cn") #流式聽寫 域名 websocket.enableTrace(False) wsUrl = wsParam.create_url() ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close) ws.on_open = on_open ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
在程序文件夾內,右鍵點擊iat_demo
,選擇Edit with IDLE
->Edit with IDLE3.7(32 bit)
打開,然后使用F5
快速啟動,啟動的時候如果提示沒有哪個第三方包,自行安裝。啟動成功后根據提示進行操作,這是我修改json
解析之前操作的打印結果:
相關代碼下載