1 深度學習解調思路
-
對於 \(M\)-QAM 解調而言,可以將其看成一個多分類問題,比如 16QAM 的解調就可以看成一個 16 個類別的分類問題。每次將接收到的信號作為神經網絡的輸入,神經網絡的輸出將其分類為\(M\)個調制符號中的一個,從而實現 \(M\)-QAM 的解調。
-
由於這個 16QAM 的解調問題並不復雜,將采用四層的全連接神經網絡加上
softmax激活函數來實現 AWGN 信道下 QAM 的解調。其中輸入層、兩層隱藏層的激活函數都為ReLu函數,網絡節點數分別是2、 40、80、16,輸出層的激活函數是softmax函數。 -
在 QAM 解調這個多分類問題中,損失函數采用分類問題中常用的交叉熵(cross entropy)損失函數。
由於神經網絡的輸入只能是實數,所以要把接收到的復信號拆成實部和虛部送進神經網絡,所以輸入層維度是2。
2 仿真結果
首先隨機生成 10000 組符號,經過AWGN信道后通過傳統解調方法進行解調,對仿真的結果進行取平均,並且與 16QAM 的理論誤碼率進行對比,可以發現與理論誤碼率基本重合,結果如下圖所示:
對於深度學習方法,首先隨機生成 5000 組符號數據,經過高斯白噪聲信道后,得到在不同信噪比下的接收信號,以此作為神經網絡的訓練集。並且將傳統解調方法中所使用的10000組符號作為測試集,來驗證訓練好的神經網絡性能。兩種方法的性能對比如下圖所示:
3 仿真代碼
本實驗程序建立在 Python3.7.9,Tensorflow2.0.0 的環境基礎之上,源碼詳見Github。
3.1 Traditional Method
#%%
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
import scipy.io as sio
#%%
nSymbol = 10000
cpNum = nSymbol // 4
TotalSymbol = nSymbol + cpNum
SNR = list(range(0, 20))
M = 16
# 生成發送數據
data_scource = np.random.randint(0, 2, [nSymbol, 4])
# 映射成16QAM符號
mapBitToSymbol = {
(0, 0, 0, 0) : [-3+3*1j, 0],
(0, 0, 0, 1) : [-3+1*1j, 1],
(0, 0, 1, 0) : [-3-3*1j, 2],
(0, 0, 1, 1) : [-3-1*1j, 3],
(0, 1, 0, 0) : [-1+3*1j, 4],
(0, 1, 0, 1) : [-1+1*1j, 5],
(0, 1, 1, 0) : [-1-3*1j, 6],
(0, 1, 1, 1) : [-1-1*1j, 7],
(1, 0, 0, 0) : [3+3*1j, 8],
(1, 0, 0, 1) : [3+1*1j, 9],
(1, 0, 1, 0) : [3-3*1j, 10],
(1, 0, 1, 1) : [3-1*1j, 11],
(1, 1, 0, 0) : [1+3*1j, 12],
(1, 1, 0, 1) : [1+1*1j, 13],
(1, 1, 1, 0) : [1-3*1j, 14],
(1, 1, 1, 1) : [1-1*1j, 15],
}
data_send = []
data_send_index = []
for i in range(nSymbol):
data_send.append(mapBitToSymbol[tuple(data_scource[i])][0]) #調制后的符號
data_send_index.append(mapBitToSymbol[tuple(data_scource[i])][1]) #符號索引
data_ifft = np.fft.ifft(data_send) #變換到時域
data_ofdm_send = np.hstack([data_ifft[-cpNum:], data_ifft])
#%%
Es = np.linalg.norm(data_ofdm_send) ** 2 / TotalSymbol #求每個符號的能量
Eb = Es / np.log2(M) #求每個比特的能量
Pe_simu = []
test_data = []
test_label = data_send_index
for snrdB in SNR:
snr = 10 ** (snrdB / 10.0)
sigma = Eb / snr
noise = np.sqrt(sigma/2) * np.random.randn(1, TotalSymbol) + \
np.sqrt(sigma/2) * np.random.randn(1, TotalSymbol)*1j
data_ofdm_receive = data_ofdm_send + noise
data_fft = data_ofdm_receive[0, cpNum : cpNum+nSymbol]
data_receive = np.fft.fft(data_fft)
test_data.append(np.concatenate((np.real(data_receive).reshape(-1, 1),
np.imag(data_receive.reshape(-1, 1))), axis=-1))
TotalErrorSymbol = 0
#data_receive_index = []
for i in range(len(data_receive)):
min_index = 0
min_value = np.linalg.norm(mapBitToSymbol[(0, 0, 0, 0)][0] - data_receive[i])
for bit, (symbol, index) in mapBitToSymbol.items():
error_value = np.linalg.norm(symbol - data_receive[i])
if error_value < min_value:
min_index = index
min_value = error_value
# 統計錯誤符號個數
if min_index != data_send_index[i]:
TotalErrorSymbol += 1
#print(TotalErrorSymbol)
Pe_simu.append(TotalErrorSymbol / nSymbol)
#%%
import math
def Q(x):
return math.erfc(x / math.sqrt(2)) / 2
a= 4 * (1 - 1/math.sqrt(M)) / math.log2(M)
k = math.log2(M)
b = 3 * k / (M-1)
Pe_theory = []
# 計算理論誤碼率
for snrdB in range(20):
Pe_theory.append(a * Q(math.sqrt(b*10**(snrdB/10))) * math.log2(M))
# 繪圖
snrdB = list(range(0, 20))
plt.semilogy(snrdB, Pe_theory, 'r-.*')
plt.semilogy(snrdB, Pe_simu, 'k-^')
plt.grid(True, which='major')
plt.grid(True, which='minor', linestyle='--')
plt.xlabel('SNR(dB)')
plt.ylabel('Symbol Error Rate')
plt.legend(['Theory', 'Simulation'])
plt.axis([0, 18, 10**-3, 10**0])
plt.savefig('Theory_Tra_Compare.svg')
plt.savefig('Theory_Tra_Compare.pdf')
#%%
# 保存數據
sio.savemat('./Traditional_data.mat',
{'test_data':test_data,
'test_label':test_label,
'Pe_simu':Pe_simu}
)
3.2 Deep Learning Method
#%%
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, layers, Input
from tensorflow import keras
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import matplotlib.pyplot as plt
import scipy.io as sio
#%%
nSymbol = 5000 #用於訓練的符號
cpNum = nSymbol // 4 #循環前綴
TotalSymbol = nSymbol + cpNum
M = 16 #調制階數
SNR = list(range(0, 20)) #信噪比
batch_size = 256
epochs = 20
#%%
# 生成發送數據
data_scource = np.random.randint(0, 2, [nSymbol, 4])
# 映射成16QAM符號
mapBitToSymbol = {
(0, 0, 0, 0) : [-3+3*1j, 0],
(0, 0, 0, 1) : [-3+1*1j, 1],
(0, 0, 1, 0) : [-3-3*1j, 2],
(0, 0, 1, 1) : [-3-1*1j, 3],
(0, 1, 0, 0) : [-1+3*1j, 4],
(0, 1, 0, 1) : [-1+1*1j, 5],
(0, 1, 1, 0) : [-1-3*1j, 6],
(0, 1, 1, 1) : [-1-1*1j, 7],
(1, 0, 0, 0) : [3+3*1j, 8],
(1, 0, 0, 1) : [3+1*1j, 9],
(1, 0, 1, 0) : [3-3*1j, 10],
(1, 0, 1, 1) : [3-1*1j, 11],
(1, 1, 0, 0) : [1+3*1j, 12],
(1, 1, 0, 1) : [1+1*1j, 13],
(1, 1, 1, 0) : [1-3*1j, 14],
(1, 1, 1, 1) : [1-1*1j, 15],
}
data_send = []
data_send_index = []
for i in range(nSymbol):
data_send.append(mapBitToSymbol[tuple(data_scource[i])][0]) #調制后的符號
data_send_index.append(mapBitToSymbol[tuple(data_scource[i])][1]) #符號索引
data_ifft = np.fft.ifft(data_send) #變換到時域
data_ofdm_send = np.hstack([data_ifft[-cpNum:], data_ifft])
#%%
#用不同的信噪比訓練
train_label_index = data_send_index * len(SNR) #標簽索引
train_data_list = []
#%%
Es = np.linalg.norm(data_ofdm_send) ** 2 / TotalSymbol #求每個符號的能量
Eb = Es / np.log2(M) #求每個比特的能量
Pe_simu = []
# 把不同信噪比下的接收信號加入到訓練集中
for snrdB in SNR:
snr = 10 ** (snrdB / 10.0)
sigma = Eb / snr
noise = np.sqrt(sigma/2) * np.random.randn(1, TotalSymbol) + \
np.sqrt(sigma/2) * np.random.randn(1, TotalSymbol)*1j
data_ofdm_receive = data_ofdm_send + noise
data_fft = data_ofdm_receive[0, cpNum : cpNum+nSymbol]
data_receive = np.fft.fft(data_fft)
train_data_list += list(data_receive)
#%%
# ================
# 搭建網絡
# ================
merged_inputs = Input(shape=(2,))
temp = layers.Dense(40,activation='relu')(merged_inputs)
temp = layers.BatchNormalization()(temp)
temp = layers.Dense(80, activation='relu')(temp)
temp = layers.BatchNormalization()(temp)
out= layers.Dense(M, activation='softmax')(temp)
model = models.Model(inputs=merged_inputs, outputs=out)
model.compile(loss='categorical_crossentropy',\
optimizer=keras.optimizers.Adam(lr=0.001), metrics=['accuracy'])
model.summary()
#%%
train_label_tf = tf.one_hot(train_label_index, depth=M) #轉換成one-hot類型數據
train_data_tmp = np.array(train_data_list)
data_real = np.real(train_data_tmp).reshape(-1, 1) #拆成虛實, nSymbol*2列
data_imag = np.imag(train_data_tmp).reshape(-1, 1)
train_data = np.concatenate((data_real, data_imag), axis=-1)
train_label = np.array(train_label_tf)
# 打亂一下順序,數據和標簽要對應上
state = np.random.get_state()
np.random.shuffle(train_data)
np.random.set_state(state)
np.random.shuffle(train_label)
history = model.fit(train_data,
train_label,
epochs=epochs,
batch_size=batch_size,
verbose=1,
shuffle=True,
#分割一部分用於驗證集
validation_split=0.2,
#callbacks=[checkpointer,early_stopping,reduce_lr]
)
#%%
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
# plt.show()
plt.savefig('Accuracy.pdf')
plt.savefig('Accuracy.svg')
#%%
test_origin_data = sio.loadmat('./Traditional_data.mat')
test_origin_data.keys()
#%%
test_data = test_origin_data['test_data']
test_label = test_origin_data['test_label']
Total = len(test_label[0]) #總符號個數
Pe_Tra_simu = test_origin_data['Pe_simu']
#%%
Pe_Deep_simu = []
# data_predict = model.predict(test_data, verbose=1)
for i in range(len(test_data)):
data_predict_OneHot = model.predict(test_data[i])
data_predict = np.argmax(data_predict_OneHot, axis=-1)
Pe_Deep_simu.append((data_predict != test_label).sum() / Total)
#%%
snrdB = list(range(0, 20))
plt.semilogy(snrdB, Pe_Tra_simu[0], 'k-^')
plt.semilogy(snrdB, Pe_Deep_simu, 'r-.*')
plt.grid(True, which='major')
plt.grid(True, which='minor', linestyle='--')
plt.xlabel('SNR(dB)')
plt.ylabel('Symbol Error Rate')
plt.legend(['Traditional Method', 'Deep Learning'])
plt.axis([0, 18, 10**-3, 10**0])
plt.savefig('Deep_Tra_Compare.pdf')
plt.savefig('Deep_Tra_Compare.svg')
#%%
