机器学习算法Python实现:tfidf 特征词提取及文本相似度分类
生活随笔
收集整理的這篇文章主要介紹了
机器学习算法Python实现:tfidf 特征词提取及文本相似度分类
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
# coding: utf-8
#本代碼主要實現(xiàn)了對于商品名稱根據(jù)tfidf提取特征詞,然后基于已經(jīng)訓(xùn)練好的word2vec模型,對每行商品的tfidf值大于某一閾值的特征詞相似度匹配已經(jīng)給定的商品類別。import jieba
import jieba.posseg as pseg
import jieba.analyse
import pymssql
import xlwt
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
import pandas as pd
jieba.load_userdict('C:\\Users\\Desktop\\s_proj\\dict.txt') #導(dǎo)入自定義的分詞詞典#連接數(shù)據(jù)庫
conn = pymssql.connect(host='1.1.1.1',user='username',password='password',database='database',charset='utf8')
cur = conn.cursor()
sql='select distinct(column) from table'
cur.execute(sql)
listl=cur.fetchall()#分詞
words=[]
for word in listl:for i in word:seg_list = jieba.cut(i, cut_all=False)words.append(" ".join(seg_list))#計算tfidf
tfidf_vectorizer = CountVectorizer(min_df=2,max_df = 0.8,token_pattern=u'(?u)\\b[^\\d\\W]\\w+\\b')
transformer=TfidfTransformer()
word_tfidf = tfidf_vectorizer.fit_transform(words)
tfidf=transformer.fit_transform(word_tfidf)
print(tfidf.shape)#獲取特征詞
features=tfidf_vectorizer.get_feature_names()#加載已訓(xùn)練好的word2vec模型
from gensim.models import word2vec
import logging
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', level=logging.INFO)
model=gensim.models.KeyedVectors.load_word2vec_format('model_word.bin',binary=True) #打開要匹配相似度的商品類文本
f4=open('C:\\Users\\Desktop\\s_proj\\c.txt','r',encoding='utf-8',errors='ignore') #對商品類文本做文本處理
ff=[]
for j in f4.readlines():j=j.replace('\n','')ff.append(j)
print(len(ff))#tfidf 稀疏矩陣
tfidf_tuple = tfidf.nonzero()
tfidf_rows = tfidf_tuple[0]
tfidf_columns = tfidf_tuple[1]
size = len(tfidf_columns)
print('nonzero.size=%s' % size)# 將TF-IDF>=某一閾值d的數(shù)據(jù)存入字典,Key為電商索引。
product_dict = {}
for i in range(size):row = tfidf_rows[i]column = tfidf_columns[i]tfidf_value = tfidf[row,column]if tfidf_value <=0.4:continuekey_words = product_dict.setdefault(row, []) key_word = {}key_word["key_word"]=features[column]key_word["tfidf_value"]=tfidf_valuekey_words.append(key_word)
print('product_dict.len=%s' % len(product_dict))#tfidf提取的所有features計算word2vec相似度
f=open('gabbage.txt','w',encoding='utf-8',errors='ignore')
word2vec={}
for i in range(size):column = tfidf_columns[i]cate=word2vec.setdefault(features[column],[])for jj in ff:try:y1 = model.wv.similarity(features[column],jj) insert={"category_name":jj,"similarity":y1}cate.append(insert)except:f.write('')
print('word2vec.len=%s' % len(word2vec))
f.close()#相似度值排序
import operator
for k,v in word2vec.items():new_dict_list=[]s=sorted(v,key=operator.itemgetter("similarity"),reverse=True)word2vec[k]=sfor w in word2vec[k]:if w not in new_dict_list:new_dict_list.append(w)word2vec[k]=new_dict_list#創(chuàng)建滿足某一閾值的tfidf的特征詞的相似度的詞典
l=[ll for l in product_dict.keys()for ll in product_dict[l]]
for k in word2vec.keys():for m in l:if m["key_word"]==k:m["category_names"]=word2vec[k]
print(len(product_dict))# 將字典轉(zhuǎn)為數(shù)組,索引轉(zhuǎn)為商品名稱。
result=[r for r in listl]
product_list = []
for i in product_dict:product = {}product['product'] = result[i][0]product['key_words'] = product_dict[i]product_list.append(product)
print('product_list.len=%s' % len(product_list))
print(len(product_list))#匹配填充tfidf值滿足某一閾值的相似度詞典
f=open('gabbage.txt','w',encoding='utf-8',errors='ignore')
# 將TF-IDF>=0。4的數(shù)據(jù)存入字典,Key為電商索引。
product_dict = {}
for i in range(size):row = tfidf_rows[i]column = tfidf_columns[i]tfidf_value = tfidf[row,column]if tfidf_value <=0.4:continuekey_words = product_dict.setdefault(row, [])for jj in ff:try:y1 = model.wv.similarity(features[column],jj) insert={'key_word':features[column],'tfidf_value':tfidf_value,'category_names':{'goods_name':jj,'word2vec':y1}}key_words.append(insert)except:f.write('')
f.close()#將結(jié)果寫入json
import json
t=open('word2vec_result1.txt','w',encoding='utf-8',errors='ignore')
json_string = json.dumps(product_list[0:11]).encode('utf-8').decode('unicode_escape')
print(json_string)
t.write(json_string)
t.close()
創(chuàng)作挑戰(zhàn)賽新人創(chuàng)作獎勵來咯,堅持創(chuàng)作打卡瓜分現(xiàn)金大獎
總結(jié)
以上是生活随笔為你收集整理的机器学习算法Python实现:tfidf 特征词提取及文本相似度分类的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 机器学习算法Python实现:word2
- 下一篇: 机器学习算法Python实现:kmean