Mockingbird_對生成的語音進行降噪


一.處理源語音
1.定位:

2.仿造load_preprocess_wav()函數讀入音頻

3.新建enhance.py文件,主要使用減譜法和自適應濾波器法,代碼如下:

#!/usr/bin/env python
import librosa
import numpy as np
import wave
import math
from synthesizer.hparams import hparams
import os
import ctypes as ct
from encoder import inference as encoder
from utils import logmmse

#減譜法:通過發生前所謂的“寂靜段”(認為在這一段里沒有語音只有噪聲)來估計噪聲的功率譜。從帶噪語音的功率譜中減去噪聲功率譜
#自適應濾波器法:實現帶噪信號中的噪聲估計,並用原始信號減去估計值

def enhance(fpath):
    class FloatBits(ct.Structure):
        _fields_ = [
            ('M', ct.c_uint, 23),
            ('E', ct.c_uint, 8),
            ('S', ct.c_uint, 1)
        ]

    class Float(ct.Union):
        _anonymous_ = ('bits',)
        _fields_ = [
            ('value', ct.c_float),
            ('bits', FloatBits)
        ]

    def nextpow2(x):
        if x < 0:
            x = -x
        if x == 0:
            return 0
        d = Float()
        d.value = x
        if d.M == 0:
            return d.E - 127
        return d.E - 127 + 1


    # 打開WAV文檔
    f = wave.open(str(fpath))
    # 讀取格式信息
    # (nchannels, sampwidth, framerate, nframes, comptype, compname)
    params = f.getparams()
    nchannels, sampwidth, framerate, nframes = params[:4]
    fs = framerate
    # 讀取波形數據
    str_data = f.readframes(nframes)
    f.close()
    # 將波形數據轉換為數組
    x = np.fromstring(str_data, dtype=np.short)
    # 計算參數
    len_ = 20 * fs // 1000 # 樣本中幀的大小
    PERC = 50 # 窗口重疊占幀的百分比
    len1 = len_ * PERC // 100  # 重疊窗口
    len2 = len_ - len1   # 非重疊窗口
    # 設置默認參數
    Thres = 3
    Expnt = 2.0
    beta = 0.002
    G = 0.9
    # 初始化漢明窗
    win = np.hamming(len_)
    # normalization gain for overlap+add with 50% overlap
    winGain = len2 / sum(win)

    # Noise magnitude calculations - assuming that the first 5 frames is noise/silence
    nFFT = 2 * 2 ** (nextpow2(len_))
    noise_mean = np.zeros(nFFT)

    j = 0
    for k in range(1, 6):
        noise_mean = noise_mean + abs(np.fft.fft(win * x[j:j + len_], nFFT))
        j = j + len_
    noise_mu = noise_mean / 5

    # --- allocate memory and initialize various variables
    k = 1
    img = 1j
    x_old = np.zeros(len1)
    Nframes = len(x) // len2 - 1
    xfinal = np.zeros(Nframes * len2)

    # =========================    Start Processing   ===============================
    for n in range(0, Nframes):
        # Windowing
        insign = win * x[k-1:k + len_ - 1]
        # compute fourier transform of a frame
        spec = np.fft.fft(insign, nFFT)
        # compute the magnitude
        sig = abs(spec)

        # save the noisy phase information
        theta = np.angle(spec)
        SNRseg = 10 * np.log10(np.linalg.norm(sig, 2) ** 2 / np.linalg.norm(noise_mu, 2) ** 2)


        def berouti(SNR):
            if -5.0 <= SNR <= 20.0:
                a = 4 - SNR * 3 / 20
            else:
                if SNR < -5.0:
                    a = 5
                if SNR > 20:
                    a = 1
            return a


        def berouti1(SNR):
            if -5.0 <= SNR <= 20.0:
                a = 3 - SNR * 2 / 20
            else:
                if SNR < -5.0:
                    a = 4
                if SNR > 20:
                    a = 1
            return a

        if Expnt == 1.0:  # 幅度譜
            alpha = berouti1(SNRseg)
        else:  # 功率譜
            alpha = berouti(SNRseg)
        #############
        sub_speech = sig ** Expnt - alpha * noise_mu ** Expnt;
        # 當純凈信號小於噪聲信號的功率時
        diffw = sub_speech - beta * noise_mu ** Expnt
        # beta negative components

        def find_index(x_list):
            index_list = []
            for i in range(len(x_list)):
                if x_list[i] < 0:
                    index_list.append(i)
            return index_list

        z = find_index(diffw)
        if len(z) > 0:
            # 用估計出來的噪聲信號表示下限值
            sub_speech[z] = beta * noise_mu[z] ** Expnt
            # --- implement a simple VAD detector --------------
        if SNRseg < Thres:  # Update noise spectrum
            noise_temp = G * noise_mu ** Expnt + (1 - G) * sig ** Expnt  # 平滑處理噪聲功率譜
            noise_mu = noise_temp ** (1 / Expnt)  # 新的噪聲幅度譜
        # flipud函數實現矩陣的上下翻轉,是以矩陣的“水平中線”為對稱軸
        # 交換上下對稱元素
        sub_speech[nFFT // 2 + 1:nFFT] = np.flipud(sub_speech[1:nFFT // 2])
        x_phase = (sub_speech ** (1 / Expnt)) * (np.array([math.cos(x) for x in theta]) + img * (np.array([math.sin(x) for x in theta])))
        # take the IFFT

        xi = np.fft.ifft(x_phase).real
        # --- Overlap and add ---------------
        xfinal[k-1:k + len2 - 1] = x_old + xi[0:len1]
        x_old = xi[0 + len1:len_]
        k = k + len2
    # 保存文件
    wf = wave.open('out.wav', 'wb')
    # 設置參數
    wf.setparams(params)
    # 設置波形文件 .tostring()將array轉換為data
    wave_data = (winGain * xfinal).astype(np.short)
    wf.writeframes(wave_data.tostring())
    wf.close()
    wav = librosa.load("./out.wav", hparams.sample_rate)[0]

    #再使用一次load_preprocess_wav()函數進行處理:在給定噪聲配置文件(參考噪聲)的情況下清除語音波形中的噪聲。 波形必須有與用於創建噪聲配置文件的采樣率相同
    if hparams.rescale:
        wav = wav / np.abs(wav).max() * hparams.rescaling_max
    # denoise(自適應濾波器法)
    if len(wav) > hparams.sample_rate * (0.3 + 0.1):
        noise_wav = np.concatenate([wav[:int(hparams.sample_rate * 0.15)],
                                    wav[-int(hparams.sample_rate * 0.15):]])
        profile = logmmse.profile_noise(noise_wav, hparams.sample_rate)
        wav = logmmse.denoise(wav, profile)

    # Trim excessive silences
    wav = encoder.preprocess_wav(wav)




    #刪除保存的輸出文件
    os.remove("./out.wav")
    return wav

4.在__init__.py文件中調用enhance()

二.處理生成語音
1.定位:我選擇在UI界面中的“Enhance vocoder output”中的槽函數中進行降噪處理
2.代碼如下:

        # Trim excessive silences
        if self.ui.trim_silences_checkbox.isChecked():
            #wav = encoder.preprocess_wav(wav)
            sf.write('output.wav', wav, hparams.sample_rate)      #先將變量wav寫為文件的形式
            wav = enhance('output.wav')
            os.remove("./output.wav")

三.處理錄音音頻
1.代碼如下:

    def record(self):
        wav = self.ui.record_one(encoder.sampling_rate, 5)
        sf.write('output1.wav', wav, hparams.sample_rate)  # 先將變量wav寫為文件的形式
        wav = enhance('output1.wav')
        os.remove("./output1.wav")
        if wav is None:
            return 
        self.ui.play(wav, encoder.sampling_rate)

ps:python報錯-AttributeError: module ‘librosa‘ has no attribute ‘output‘,解決報錯參考辦法網址:python報錯-AttributeError: module ‘librosa‘ has no attribute ‘output‘


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM