import torch
import random
import matplotlib.pyplot as plt
from torch.autograd import Variable
def rbf_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
"""
將源域數據和目標域數據轉化為核矩陣,即上文中的K
Params:
source: 源域數據(n * len(x))
target: 目標域數據(m * len(y))
kernel_mul:
kernel_num: 取不同高斯核的數量
fix_sigma: 不同高斯核的sigma值
Return:
sum(kernel_val): 多個核矩陣之和
"""
n_samples = int(source.size()[0]) + int(target.size()[0]) # 求矩陣的行數,一般source和target的尺度是一樣的,這樣便於計算
total = torch.cat([source, target], dim=0) # 將source,target按列方向合並
# 將total復制(n+m)份
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
# 將total的每一行都復制成(n+m)行,即每個數據都擴展成(n+m)份
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
# 求任意兩個數據之間的和,得到的矩陣中坐標(i,j)代表total中第i行數據和第j行數據之間的l2 distance(i==j時為0)
L2_distance = ((total0 - total1) ** 2).sum(2)
# 調整高斯核函數的sigma值
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples ** 2 - n_samples)
# 以fix_sigma為中值,以kernel_mul為倍數取kernel_num個bandwidth值(比如fix_sigma為1時,得到[0.25,0.5,1,2,4]
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul ** i) for i in range(kernel_num)]
# 高斯核函數的數學表達式
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
# 得到最終的核矩陣
return sum(kernel_val) # /len(kernel_val)
def mmd_rbf(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
"""
計算源域數據和目標域數據的MMD距離
Params:
source: 源域數據(n * len(x))
target: 目標域數據(m * len(y))
kernel_mul:
kernel_num: 取不同高斯核的數量
fix_sigma: 不同高斯核的sigma值
Return:
loss: MMD loss
"""
batch_size = int(source.size()[0]) # 一般默認為源域和目標域的batchsize相同
kernels = rbf_kernel(source, target,
kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
# 根據式(3)將核矩陣分成4部分
XX = kernels[:batch_size, :batch_size]
YY = kernels[batch_size:, batch_size:]
XY = kernels[:batch_size, batch_size:]
YX = kernels[batch_size:, :batch_size]
loss = torch.mean(XX + YY - XY - YX)
return loss # 因為一般都是n==m,所以L矩陣一般不加入計算
sample_size = 500
buckets = 50
# 第一種分布:對數正態分布,得到一個中值為mu,標准差為sigma的正態分布。mu可以取任何值,sigma必須大於零。
plt.subplot(1, 2, 1)
plt.xlabel("random.lognormalvariate")
mu = -0.6
sigma = 0.15 # 將輸出數據限制到0-1之間
res1 = [random.lognormvariate(mu, sigma) for _ in range(1, sample_size)]
plt.hist(res1, buckets)
# 第二種分布:beta分布。參數的條件是alpha 和 beta 都要大於0, 返回值在0~1之間。
plt.subplot(1, 2, 2)
plt.xlabel("random.betavariate")
alpha = 1
beta = 10
res2 = [random.betavariate(alpha, beta) for _ in range(1, sample_size)]
plt.hist(res2, buckets)
plt.show()
# 兩種分布有明顯的差異,下面從兩個方面用MMD來量化這種差異:
# 1. 分別從不同分布取兩組數據(每組為10*500)
# 參數值見上段代碼
# 分別從對數正態分布和beta分布取兩組數據
diff_1 = []
for i in range(10):
diff_1.append([random.lognormvariate(mu, sigma) for _ in range(1, sample_size)])
diff_2 = []
for i in range(10):
diff_2.append([random.betavariate(alpha, beta) for _ in range(1, sample_size)])
X = torch.Tensor(diff_1)
Y = torch.Tensor(diff_2)
X,Y = Variable(X), Variable(Y)
print(mmd_rbf(X,Y))
# 2. 分別從相同分布取兩組數據(每組為10*500)
# 參數值見以上代碼
# 從對數正態分布取兩組數據
same_1 = []
for i in range(10):
same_1.append([random.lognormvariate(mu, sigma) for _ in range(1, sample_size)])
same_2 = []
for i in range(10):
same_2.append([random.lognormvariate(mu, sigma) for _ in range(1, sample_size)])
X = torch.Tensor(same_1)
Y = torch.Tensor(same_2)
X,Y = Variable(X), Variable(Y)
print(mmd_rbf(X,Y))