机器学习算法Python实现:kmeans文本聚类
生活随笔
收集整理的這篇文章主要介紹了
机器学习算法Python实现:kmeans文本聚类
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
# -*- coding:utf-8 -*
#本代碼是在jupyter notebook上實現,author:huzhifei, create time:2018/8/14
#本腳本主要實現了基于python通過kmeans做的文本聚類的項目目的#導入相關包
import numpy as np
import pandas as pd
import re
import os
import codecs
from sklearn import feature_extraction
import jieba#對title文本做分詞
f1 =open("title.txt","r",encoding='utf-8',errors='ignore')
f2 =open("title_fenci", 'w',encoding='utf-8',errors='ignore')
for line in f1:seg_list = jieba.cut(line, cut_all=False)f2.write((" ".join(seg_list)).replace("\t\t\t","\t"))
#print(w)
f1.close()
f2.close()#對summary(在這里用content表示summary)文本做分詞
f1 =open("content.txt","r",encoding='utf-8',errors='ignore')
f2 =open("content_fenci.txt", 'w',encoding='utf-8',errors='ignore')
for line in f1:seg_list = jieba.cut(line, cut_all=False)f2.write((" ".join(seg_list)).replace("\t\t\t","\t"))
#print(w)
f1.close()
f2.close()#打開已經分好詞的title與content文本內容
titles = open('title_fenci.txt',encoding='utf-8',errors='ignore').read().split('\n')
#print(titles)
print(str(len(titles)) + ' titles')
contents = open('content_fenci.txt',encoding='utf-8',errors='ignore').read().split('\n')
contents = contents[:len(titles)]
#print(contents)
print(str(len(contents)) + ' contents')#中文停用詞
def get_custom_stopwords(stop_words_file):with open(stop_words_file,encoding='utf-8')as f:stopwords=f.read()stopwords_list=stopwords.split('\n')custom_stopwords_list=[i for i in stopwords_list]return custom_stopwords_list#停用詞函數調用
stop_words_file="stopwordsHIT.txt"
stopwords=get_custom_stopwords(stop_words_file)#做tfidf
from sklearn.feature_extraction.text import TfidfVectorizer
max_df=0.8
min_df=2
tfidf_vectorizer = TfidfVectorizer(max_df=max_df,min_df=min_df, max_features=200000,stop_words='english',use_idf=True, token_pattern=u'(?u)\\b[^\\d\\W]\\w+\\b',tokenizer=tokenize_and_stem, ngram_range=(1,2))%time tfidf_matrix = tfidf_vectorizer.fit_transform(contents)print(tfidf_matrix.shape)#獲取特證詞
terms = tfidf_vectorizer.get_feature_names()#kmeans聚類
from sklearn.cluster import KMeansnum_clusters = 6km = KMeans(n_clusters=num_clusters)%time km.fit(tfidf_matrix)clusters = km.labels_.tolist()#調用pkl的kmeans模型
from sklearn.externals import joblibjoblib.dump(km, 'y_cluster.pkl')
km = joblib.load('y_cluster.pkl')
#print(km)
clusters = km.labels_.tolist()
print(len(clusters))#將結果存入pandas
import pandas as pd
films = { 'title': titles, 'rank': ranks, 'synopsis': contents[0:53612],'cluster': clusters[0:53612]}
frame = pd.DataFrame(films, index=[films['cluster']],columns = ['cluster','title','rank', 'synopsis'])#簇統計
frame['cluster'].value_counts()#打印出每個簇的詳細簇信息
from __future__ import print_function
print("Top terms per cluster:")
print()
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
#print(order_centroids)
for i in range(num_clusters):print("Cluster %d words:" % i, end='')#print(order_centroids[1,:100])for ind in order_centroids[i, :50]:print (ind)frame=frame.insert(4,'word',(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore'), end=','))t=vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore')print(len(t))print(' %s' % vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore'), end=',')print()
總結
以上是生活随笔為你收集整理的机器学习算法Python实现:kmeans文本聚类的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 机器学习算法Python实现:tfidf
- 下一篇: 机器学习算法Python实现:基于情感词