第1关:用Jaccard系数计算文本之间的相似度
#import numpy as np
#from scipy.spatial.distance import pdist#直接调包可以计算JC值 ,需要两个句子长度一样;
import jieba
jieba.setLogLevel(jieba.logging.INFO)
def Jaccrad(a, b):#terms_B为源句子,terms_A为候选句子
#1.分词
########## Begin ##########
B = jieba.cut(b, cut_all=False)
# print("Default Mode: " + "/ ".join(seg_list))
A = jieba.cut(a, cut_all=False)
# print(A)
# print(B)
########## End ##########
grams_reference = list(set(B))
grams_model = list(set(A))
# print(grams_reference)
# print(grams_model )
#2.计算交集
########## Begin ##########
temp =0
for i in grams_model:
if i in grams_reference :
temp=temp+1
########## End ##########
# print(temp)
fenmu=len(grams_model)+len(grams_reference)-temp #并集
#3.计算Jaccard系数
########## Begin ##########
jaccard_coefficient= temp*1.0/fenmu
########## End ##########
return jaccard_coefficient
str1="我爱北京天安门"
str2="天安门雄伟壮阔让人不得不爱"
jaccard_coefficient=Jaccrad(str1,str2)
print(jaccard_coefficient)