參考:https://zhuanlan.zhihu.com/p/40236865
faiss是Facebook開源的用於快速計算海量向量距離的庫,但是沒有提供余弦距離,而余弦距離的使用率還是很高的,那怎么解決呢
答案說在前面
knowledge_embedding = np.random.random((1000, 300)).astype('float32') # 1000個待查知識點
query_embedding = np.random.random((100, 300)).astype('float32') # 100個查詢語句
normalize_L2(knowledge_embedding) # 熟悉余弦相似度公式的都知道,點擊后會除於長度,所以要把長度歸一化到1,就可以直接點擊算出余弦相似度
normalize_L2(query_embedding) # 熟悉余弦相似度公式的都知道,點擊后會除於長度,所以要把長度歸一化到1,就可以直接點擊算出余弦相似度
index = faiss.IndexFlat(d, faiss.METRIC_INNER_PRODUCT) # 等價 index=faiss.IndexFlatIP(d)
index.add(knowledge_embedding) # 把知識點加到索引里面
D, I =index.search(query_embedding, k=5) # 召回5個
進一步實驗
import faiss
from faiss import normalize_L2
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import copy
def faiss_cos_similar_search(x, k=None):
#
assert len(x.shape) == 2, "僅支持2維向量的距離計算"
x = copy.deepcopy(x)
nb, d = x.shape
x = x.astype('float32')
k_search = k if k else nb
normalize_L2(x)
index = faiss.IndexFlat(d, faiss.METRIC_INNER_PRODUCT)
# index=faiss.IndexFlatIP(d)
# index.train(x)
# index=faiss.IndexFlatL2(d)
index.add(x)
D, I =index.search(x, k=k_search)
return I
def sklearn_cos_search(x, k=None):
assert len(x.shape) == 2, "僅支持2維向量的距離計算"
x = copy.deepcopy(x)
nb, d = x.shape
ag=cosine_similarity(x)
np.argsort(-ag, axis=1)
k_search = k if k else nb
return np.argsort(-ag, axis=1)[:, :k_search]
def test_IndexFlatIP_only(nb = 1000, d = 100, kr = 0.005, n_times=10):
k = int(nb * kr)
print("recall count is %d" % (k))
for i in range(n_times):
x = np.random.random((nb, d)).astype('float32')
# x = np.random.randint(0,2, (nb,d))
# faiss_I = faiss_cos_similar_search(x, k)
index=faiss.IndexFlatIP(d)
index.train(x)
index.add(x)
D, faiss_I =index.search(x, k=k)
sklearn_I = sklearn_cos_search(x, k)
cmp_result = faiss_I == sklearn_I
print("is all correct: %s, correct batch rate: %d/%d, correct sample rate: %d/%d" % \
(np.all(cmp_result), \
np.all(cmp_result, axis=1).sum(),cmp_result.shape[0], \
cmp_result.sum(),cmp_result.shape[0]*cmp_result.shape[1] ) )
def test_embedding(nb = 1000, d = 100, kr = 0.005, n_times=10):
k = int(nb * kr)
print("recall count is %d" % (k))
for i in range(n_times):
x = np.random.random((nb, d)).astype('float32')
# x = np.random.randint(0,2, (nb,d))
faiss_I = faiss_cos_similar_search(x, k)
sklearn_I = sklearn_cos_search(x, k)
cmp_result = faiss_I == sklearn_I
print("is all correct: %s, correct batch rate: %d/%d, correct sample rate: %d/%d" % \
(np.all(cmp_result), \
np.all(cmp_result, axis=1).sum(),cmp_result.shape[0], \
cmp_result.sum(),cmp_result.shape[0]*cmp_result.shape[1] ) )
def test_one_hot(nb = 1000, d = 100, kr = 0.005, n_times=10):
k = int(nb * kr)
print("recall count is %d" % (k))
for i in range(n_times):
# x = np.random.random((nb, d)).astype('float32')
x = np.random.randint(0,2, (nb,d))
faiss_I = faiss_cos_similar_search(x, k)
sklearn_I = sklearn_cos_search(x, k)
cmp_result = faiss_I == sklearn_I
print("is all correct: %s, correct batch rate: %d/%d, correct sample rate: %d/%d" % \
(np.all(cmp_result), \
np.all(cmp_result, axis=1).sum(),cmp_result.shape[0], \
cmp_result.sum(),cmp_result.shape[0]*cmp_result.shape[1] ) )
if __name__ == "__main__":
print("test use IndexFlatIP only")
test_IndexFlatIP_only()
print("-"*100 + "\n\n")
print("test when one hot")
test_one_hot()
print("-"*100 + "\n\n")
print("test use normalize_L2 + IndexFlatIP")
test_embedding()
print("-"*100 + "\n\n")
下面是實驗結果,比較faiss和sklearn實現的余弦相似度召回順序是不是完全一樣
分析:第一份結果(橫線隔開),是僅用IndexFlatIP的時候,跟余弦距離的結果相差非常大
第二份結果,是當數據是 one hot 的時候,用 normalize_L2 + IndexFlatIP,faiss和sklearn召回不完全一樣是因為余弦相似度相同的時候召回id排序不同而已
第二份結果,是當數據是 embedding 的向量的時候,用 normalize_L2 + IndexFlatIP,faiss和sklearn召回一般都會全部對得上,因為相同距離的情況很少會出現