# coding=utf-8 import re import html import jieba import jieba.analyse from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity stopwords = open('stop_words.utf8', encoding='utf8') stopword_list = [k.strip() for k in stopwords.readlines() if k.strip() != ''] def replace_tongyici(keywords): # tongyici_tihuan.txt是同义词表,每行是一系列同义词,用tab分割 # 1读取同义词表:并生成一个字典。 combine_dict = {} for line in open("tongyici_tihuan.txt", "r"): seperate_word = line.strip().split(" ") num = len(seperate_word) for i in range(1, num): combine_dict[seperate_word[i]] = seperate_word[0] kws = [] for word in keywords: if word in combine_dict: word = combine_dict[word] kws.append(word) else: kws.append(word) return kws class CosineSimilarity(object): """ 余弦相似度 """ def __init__(self, content_x1, content_y2): self.s1 = content_x1 self.s2 = content_y2 @staticmethod def extract_keyword(seq_str): # 提取关键词 # 正则过滤 html 标签 re_exp = re.compile(r'()|(<[^>]+>)', re.S) content = re_exp.sub(' ', seq_str) # html 转义符实体化 content = html.unescape(content) # 切割 jieba.load_userdict("user_dict.txt") seg = [i for i in jieba.cut(content, cut_all=True) if i != ''] # keywords = [k for k in jieba.cut(content, cut_all=True) if k != ' ' and k != '' and k not in stopword_list] keywords = [k for k in jieba.analyse.extract_tags("|".join(seg), topK=500, withWeight=False) if k != ' ' and k != '' and k not in stopword_list] # keywords = replace_tongyici(keywords) # 提取关键词 # keywords = jieba.analyse.extract_tags("|".join(seg), topK=500, withWeight=False, allowPOS=('n', 'nr', 'ns')) # keywords = jieba.analyse.extract_tags(content, topK=2000, withWeight=False) # print(keywords) # return keywords return [],keywords @staticmethod def one_hot(word_dict, keywords): # oneHot编码 # cut_code = [word_dict[word] for word in keywords] cut_code = [0]*len(word_dict) for word in keywords: cut_code[word_dict[word]] += 1 return cut_code def main(self): # 去除停用词 # jieba.analyse.set_stop_words('stop_words.utf8') # 提取关键词 seg1,keywords1 = self.extract_keyword(self.s1) seg2,keywords2 = self.extract_keyword(self.s2) # 词的并集 union = set(keywords1).union(set(keywords2)) # union = set(seg1).union(set(seg2)) # 编码 word_dict = {} i = 0 for word in union: word_dict[word] = i i += 1 # # oneHot编码 s1_cut_code = self.one_hot(word_dict, keywords1) s2_cut_code = self.one_hot(word_dict, keywords2) # s1_cut_code = self.one_hot(word_dict, seg1) # s2_cut_code = self.one_hot(word_dict, seg2) # stopwords = open('stop_words.utf8', encoding='utf8') # stopword_list = [k.strip() for k in stopwords.readlines() if k.strip() != ''] # stopwords.close() # vector = TfidfVectorizer(max_df=10, min_df=1) # tfidf = vector.fit_transform([" ".join(keywords1), " ".join(keywords2)]) # 余弦相似度计算 sample = [s1_cut_code, s2_cut_code] # 除零处理 try: sim = cosine_similarity(sample) # sim = cosine_similarity(tfidf).tolist() return sim[1][0],keywords1,keywords2 except Exception as e: print(e) return 0.0,keywords1,keywords2 # 测试 if __name__ == '__main__': # with open(r'D:\pythonDM\Ndkj\live111\result\1.txt', encoding='UTF-8') as x, open(r'D:\pythonDM\Ndkj\live111\result\2.txt', encoding='UTF-8') as y: # content_x = x.read() # content_y = y.read() content_x = """中英文双语版本开发建设,为平台提供国际化能力,对平台APP端所有功能菜单以及所有官方维护内容进行中英翻译,实现中英双语的APP版本,同时提供版本一键切换功能,提升一机游丽水平台服务的全面性,将一机游丽水打造成全国智慧文旅平台领域专业、专注、领先的范本。""" content_y = """(1)诉求受理、分流功能: 用户可以对进入统一受理中心的诉求信息进行识别,对有效且需要分流的诉求进行受理、分派操作。操作后,诉求自动进入下一个流程环节,操作后信息状态变为无效信息。对应的诉求状态变化会同步通知诉求来源系统。 (2)诉求结案回复、设为无效功能 用户对进入统一受理中心的诉求信息进行识别,对可以直接答复的信息进行回复并结案的操作,操作后诉求会自动结案。如诉求信息无效,则可以对其信息不受理操作,操作后信息状态变为无效信息。对应的诉求状态变化会同步通知诉求来源系统。 诉求流转跟踪视图用户可在统一受理中心的工作台上看到已分派的系统列表,信息详情中会展示该诉求的处理流程,内部和外部系统的处理过程都可以看到,方便用户掌握诉求的进展以便对诉求流转进行跟踪。 (3)自动分类、分流: 统一受理中心通过大数据分析,对诉求内容的语义解析算法,提取出该诉求的事件分类自动填充到分流信息中,再通过事项清单配置,将负责该类型事件的处理对象系统自动填充到分流信息中。用户只需核对系统填充信息即可实现一键分派。 (4)自动区分无效信息: 统一受理中心通过大数据分析,对诉求内容的语义解析算法,将疑似无效内容的诉求信息标记出来,提供用户判断的依据,提高用户处理业务的效率。""" similarity = CosineSimilarity(content_x, content_y) # similarity = CosineSimilarity(file, file2) similarity = similarity.main() print(similarity)