对全职高手的自然语言处理
一、創(chuàng)作背景
這學(xué)期python課程主要講數(shù)據(jù)分析,在這個月的學(xué)習(xí)中,學(xué)習(xí)了自然語言處理,包括使用jieba進行分句、獲取詞性、統(tǒng)計詞頻,使用wordcloud繪制詞云,使用matplotlib繪制統(tǒng)計圖。根據(jù)老師作業(yè)要求(根據(jù)第1部分自然語言處理教學(xué)內(nèi)容,請選擇一本你喜歡的小說,利用上課講的但不限于授課內(nèi)容,對該小說進行分析。比如分析該小說的分詞,詞頻, # 詞性,小說人物出場次數(shù)排序,小說中食物排序(這個得有,我喜歡吃),小說人物關(guān)系等等。 # 要求:1代碼以py文件附件形式上傳,有功能性注釋和普通注釋。 # 2.功能介紹和運行結(jié)果截圖可以在作業(yè)里寫上。 # 3.小說文件用txt形式存儲。 # 4.最后視功能完整性給分.)完成編寫。
二、主要功能
1.人物出場順序的的統(tǒng)計
2.食物出場排序的統(tǒng)計
3.創(chuàng)建人物關(guān)系的可視化圖
4.繪制詞云
三、功能詳解
1.對句子進行中文分詞,詞頻,詞性統(tǒng)計,并去除停頓詞和字符
def seg_depart(path,total):# 無符號文本outstr = ''# 創(chuàng)建一個停用詞列表,停頓詞放到了stopword.txt文件中stopwords = open_text('.\stopword.txt')# # 對文檔中的每一行進行中文分詞with open(path,'r',encoding='utf-8') as text:for line in text:sentence_depart = pseg.cut(line.strip())for word,flag in sentence_depart: #將文件中所有的標點符號去除if word not in stopwords and word != '\t' and word != '' and len(word) >=2 and word.isdigit()!=True:total[(word,flag)] = total.get((word,flag),0) + 1outstr += word#將分詞保存到txt文件中with open('./全職高手分詞詞頻詞性.txt','w',encoding='utf-8') as text1:#將total中的元素進行排序,再運用循環(huán)寫入文件items1 = list(total.items())items1.sort(key=lambda x: x[1], reverse=True)for key,value in items1:text1.write('%s,%s,%d\n' %(key[0],key[1],value))with open('./純凈版全職高手.txt','w',encoding='utf-8') as text2:text2.write(outstr)return total2.對人物出場次數(shù)排序
def character_sequence(total):sequence = {}#使用循環(huán)對total中的分詞進行合并,將表示同一個人的詞合并到一起(小說中人物太多,只列舉部分)for key,value in total.items():if key[1]=='nr':if key[0] == '葉修' or key[0] == '君莫笑' or key[0] == '老葉' or key[0] == '無敵最俊朗':word = '葉修'elif key[0] == '蘇沐橙' or key[0] == '沐雨橙風(fēng)' or key[0] == '沐沐' or key[0] == '沐橙':word = '蘇沐橙'elif key[0] == '方銳' or key[0] == '海無量':word = '方銳'elif key[0] == '唐柔' or key[0] == '寒煙柔' or key[0] == '小唐' :word = '唐柔'elif key[0] == '喬一帆' or key[0] == '一寸灰' or key[0] == '一帆':word = '喬一帆'elif key[0] == '包榮興' or key[0] == '包子入侵' or key[0] == '包子':word = '包榮興'elif key[0] == '羅輯' or key[0] == '昧光':word = '羅輯'elif key[0] == '莫凡' or key[0] == '毀人不倦':word = '莫凡'elif key[0] == '安文逸' or key[0] == '小手冰涼':word = '安文逸'elif key[0] == '陳果' or key[0] == '逐煙霞' or key[0] == '果果':word = '陳果'elif key[0] == '魏琛' or key[0] == '迎風(fēng)布陣' or key[0] == '老魏' :word = '魏琛'elif key[0] == '孫翔' or key[0] == '一葉知秋':word = '孫翔'elif key[0] == '韓文清' or key[0] == '大漠孤煙':word ='韓文清'elif key[0] == '喻文州' or key[0] == '索克薩爾':word = '喻文州'elif key[0] == ' 黃少天' or key [0] == '夜雨聲煩' or key[0] == '少天' or key[0] == '黃少':word = '黃少天'elif key[0] == '王杰希' or key[0] == '王不留行' or key[0] == '王大眼':word = '王杰希'else:word = key[0]# 字典的get方法,查找是否有鍵word,有則返回其對應(yīng)鍵值,沒有則返回后面的值0sequence[word] = sequence.get(word,0) + value# 剔除掉已經(jīng)找出的不是人名的多頻率詞for word in excludes:if sequence.get(word,0) > 0:del sequence[word]# 根據(jù)字典值從大到小排序sequence_new = sorted(sequence.items(),key=lambda x:x[1],reverse=True)with open('./全職高手人物出場次數(shù)排序.txt','w',encoding='utf-8') as f:for name,num in sequence_new:f.write('%s,%d\n' %(name,num))3.小說中食物排序
def food_sequence(total):sequence = {}#這里提前統(tǒng)計了大部分小說中的食物,保存到txt文件中,使用循環(huán)統(tǒng)計出現(xiàn)次數(shù)food = open_text('./全職高手食物.txt')for key,value in total.items():if key[0] in food and key[0] not in sequence:sequence[key[0]] = valuewith open('./全職高手食物排序.txt','w',encoding='utf-8') as f:for word,value in sequence.items():f.write('%s,%d\n' %(word,value))4.隨機生成顏色,保存顏色列表,在繪制人物關(guān)系圖中需要使用
# 隨機生成顏色 colorNum = len(open_text('./全職高手人物.txt')) def randomcolor():#隨機生成顏色的16進制表示形式,如#FFA25CcolorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']color = ""for i in range(6):color += colorArr[random.randint(0, 14)]return "#" + color# 顏色存儲列表 def color_list():colorList = []for i in range(colorNum):colorList.append(randomcolor())return colorList5.生成人物關(guān)系圖
# 生成人物關(guān)系圖 def creat_relationship(path):# 人物節(jié)點顏色colors = color_list()Names = open_text('./全職高手人物.txt')relations = {}# 按段落劃分,假設(shè)在同一段落中出現(xiàn)的人物具有共現(xiàn)關(guān)系lst_para = open_text(path) # lst_para是每一段for text in lst_para:for name_0 in Names:if name_0 in text:for name_1 in Names:if name_1 in text and name_0 != name_1 and (name_1, name_0) not in relations:relations[(name_0, name_1)] = relations.get((name_0, name_1), 0) + 1maxRela = max([v for k, v in relations.items()])relations = {k: v / maxRela for k, v in relations.items()}# return relationsplt.figure(figsize=(15, 15))# 創(chuàng)建無多重邊無向圖G = nx.Graph()for k, v in relations.items():G.add_edge(k[0], k[1], weight=v)# 篩選權(quán)重大于0.6的邊elarge = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.6]# 篩選權(quán)重大于0.3小于0.6的邊emidle = [(u, v) for (u, v, d) in G.edges(data=True) if (d['weight'] > 0.3) & (d['weight'] <= 0.6)]# 篩選權(quán)重小于0.3的邊esmall = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= 0.3]# 設(shè)置圖形布局pos = nx.spring_layout(G) # 用Fruchterman-Reingold算法排列節(jié)點(樣子類似多中心放射狀)# 設(shè)置節(jié)點樣式nx.draw_networkx_nodes(G, pos, alpha=0.8, node_size=1300, node_color=colors)# 設(shè)置大于0.6的邊的樣式nx.draw_networkx_edges(G, pos, edgelist=elarge, width=2.5, alpha=0.9, edge_color='g')# 0.3~0.6nx.draw_networkx_edges(G, pos, edgelist=emidle, width=1.5, alpha=0.6, edge_color='y')# <0.3nx.draw_networkx_edges(G, pos, edgelist=esmall, width=1, alpha=0.4, edge_color='b', style='dashed')nx.draw_networkx_labels(G, pos, font_size=14)plt.title("《全職高手》主要人物社交關(guān)系網(wǎng)絡(luò)圖")# 關(guān)閉坐標軸plt.axis('off')# 保存圖表plt.savefig('./全職高手人物關(guān)系圖', bbox_inches='tight')plt.show()6.繪制詞云
def GetWordCloud():path_txt = './純凈版全職高手.txt'path_img = './全職高手.png'f = open(path_txt,'r',encoding='utf-8').read()background_image = imread(path_img)# background_image = np.array(Image.open(path_img))cut_text = " ".join(jieba.cut(f))wordcloud = WordCloud(background_color="white", # 設(shè)置背景顏色mask = background_image, #設(shè)置背景圖片max_words=400, #width=600,height=800,# stopwords = "", #設(shè)置停用詞font_path="./simfang.ttf",# 設(shè)置中文字體,使得詞云可以顯示(詞云默認字體是“DroidSansMono.ttf字體庫”,不支持中文),不加這個的話顯示口型亂碼max_font_size=50, # 設(shè)置字體最大值min_font_size=10,random_state=30, # 設(shè)置有多少種配色方案margin=2,)# 生成詞云wc = wordcloud.generate(cut_text)# 展示詞云圖# 生成顏色值image_colors = ImageColorGenerator(background_image)plt.imshow(wordcloud.recolor(color_func=image_colors), interpolation="bilinear")plt.axis("off")plt.show()wc.to_file('./wordcloud.jpg') # 保存圖片7.完成分詞、詞頻、詞性的可視化處理
# 分詞,詞頻,詞性的可視化圖 def create_wordPhotograph(total):# 創(chuàng)建繪圖對象,figsize參數(shù)可以指定繪圖對象的寬度和高度,單位為英寸,一英寸=80pxplt.figure(figsize=(9, 6))Y = []sign = []c = Counter(total).most_common(10)for word,num in c:Y.append(num)sign.append(word[0]+"_"+word[1])plt.bar(np.arange(10) , Y, width=0.3, facecolor='yellow', edgecolor='white')plt.xticks(np.arange(10), sign)i = 0X = np.arange(10)for x, y in zip(X, Y):plt.text(x + 0.15, y + 0.1, '%d' % (Y[i]), ha='center', va='bottom')i = i + 1plt.xlabel(u"分詞詞性Top10")plt.ylabel(u"詞頻數(shù)")plt.title(u"分詞詞頻詞性可視化圖")plt.savefig('./分詞詞頻詞性可視化圖.jpg',bbox_inches='tight')plt.show()小說中人物的出場次序和食物排序的可視化,可以參照詞頻的可視化進行編寫。
四、完整的代碼
# 開發(fā)時間: 2022/3/29 15:10 # 開發(fā)人:張鵬翔 # 功能:根據(jù)第1部分自然語言處理教學(xué)內(nèi)容,請選擇一本你喜歡的小說,利用上課講的但不限于授課內(nèi)容,對該小說進行分析。比如分析該小說的分詞,詞頻, # 詞性,小說人物出場次數(shù)排序,小說中食物排序(這個得有,我喜歡吃),小說人物關(guān)系等等。 # 要求:1代碼以py文件附件形式上傳,有功能性注釋和普通注釋。 # 2.功能介紹和運行結(jié)果截圖可以在作業(yè)里寫上。 # 3.小說文件用txt形式存儲。 # 4.最后視功能完整性給分. import random import networkx as nx from imageio import imread from wordcloud import WordCloud,ImageColorGenerator import jieba import jieba.posseg as pseg # 獲取詞性 from collections import Counter import matplotlib.pyplot as plt import numpy as np from matplotlib.font_manager import FontProperties# 解決中文亂碼,Python實現(xiàn)matplotlib顯示中文的方法 plt.rcParams['font.sans-serif']=['SimHei'] plt.rcParams['axes.unicode_minus'] = False font = FontProperties(fname=r"C:\Python\src\python與數(shù)據(jù)分析\simfang.ttf", size=14)excludes = ['樂章','小姑娘','榮耀','易拉灌','易容術(shù)','明白','全明星','藍溪閣','季后賽','本賽季','砰砰','和興欣','上賽季','華麗','司儀','西風(fēng)','連勝','銀武','周旋','馬踏','安靜','大屏幕','和嘉世','修正','了興欣','衛(wèi)星','謝謝','呼嘯山莊','馬甲','明星','英勇','真是太','冷不丁','小精靈','高潮','太久','布陣','祝福','段時間','格斗','高水平','言語','別提','冷笑','曉槍','白癡','賽中','顧忌','越來越近','封鎖','小鎮(zhèn)','貢獻度','高階','嘉世']# 打開文本 def open_text(path):with open(path,'r',encoding='utf-8') as f:return [line.strip() for line in f.readlines()]# 對句子進行中文分詞,詞頻,詞性,并生成去除停用詞和字符的小說文本 def seg_depart(path,total):# 無符號文本outstr = ''# 創(chuàng)建一個停用詞列表stopwords = open_text('.\stopword.txt')# # 對文檔中的每一行進行中文分詞with open(path,'r',encoding='utf-8') as text:for line in text:sentence_depart = pseg.cut(line.strip())for word,flag in sentence_depart: #將文件中所有的標點符號去除if word not in stopwords and word != '\t' and word != '' and len(word) >=2 and word.isdigit()!=True:total[(word,flag)] = total.get((word,flag),0) + 1outstr += word#將分詞保存到txt文件中with open('./全職高手分詞詞頻詞性.txt','w',encoding='utf-8') as text1:#將total中的元素進行排序,再運用循環(huán)寫入文件items1 = list(total.items())items1.sort(key=lambda x: x[1], reverse=True)for key,value in items1:text1.write('%s,%s,%d\n' %(key[0],key[1],value))with open('./純凈版全職高手.txt','w',encoding='utf-8') as text2:text2.write(outstr)return total# 人物出場次數(shù)排序 def character_sequence(total):sequence = {}for key,value in total.items():if key[1]=='nr':if key[0] == '葉修' or key[0] == '君莫笑' or key[0] == '老葉' or key[0] == '無敵最俊朗':word = '葉修'elif key[0] == '蘇沐橙' or key[0] == '沐雨橙風(fēng)' or key[0] == '沐沐' or key[0] == '沐橙':word = '蘇沐橙'elif key[0] == '方銳' or key[0] == '海無量':word = '方銳'elif key[0] == '唐柔' or key[0] == '寒煙柔' or key[0] == '小唐' :word = '唐柔'elif key[0] == '喬一帆' or key[0] == '一寸灰' or key[0] == '一帆':word = '喬一帆'elif key[0] == '包榮興' or key[0] == '包子入侵' or key[0] == '包子':word = '包榮興'elif key[0] == '羅輯' or key[0] == '昧光':word = '羅輯'elif key[0] == '莫凡' or key[0] == '毀人不倦':word = '莫凡'elif key[0] == '安文逸' or key[0] == '小手冰涼':word = '安文逸'elif key[0] == '陳果' or key[0] == '逐煙霞' or key[0] == '果果':word = '陳果'elif key[0] == '魏琛' or key[0] == '迎風(fēng)布陣' or key[0] == '老魏' :word = '魏琛'elif key[0] == '孫翔' or key[0] == '一葉知秋':word = '孫翔'elif key[0] == '韓文清' or key[0] == '大漠孤煙':word ='韓文清'elif key[0] == '喻文州' or key[0] == '索克薩爾':word = '喻文州'elif key[0] == ' 黃少天' or key [0] == '夜雨聲煩' or key[0] == '少天' or key[0] == '黃少':word = '黃少天'elif key[0] == '王杰希' or key[0] == '王不留行' or key[0] == '王大眼':word = '王杰希'else:word = key[0]# 字典的get方法,查找是否有鍵word,有則返回其對應(yīng)鍵值,沒有則返回后面的值0sequence[word] = sequence.get(word,0) + value# 剔除掉已經(jīng)找出的不是人名的多頻率詞for word in excludes:if sequence.get(word,0) > 0:del sequence[word]# 根據(jù)字典值從大到小排序sequence_new = sorted(sequence.items(),key=lambda x:x[1],reverse=True)with open('./全職高手人物出場次數(shù)排序.txt','w',encoding='utf-8') as f:for name,num in sequence_new:f.write('%s,%d\n' %(name,num))# 小說食物排序 def food_sequence(total):sequence = {}food = open_text('./全職高手食物.txt')for key,value in total.items():if key[0] in food and key[0] not in sequence:sequence[key[0]] = valuewith open('./全職高手食物排序.txt','w',encoding='utf-8') as f:for word,value in sequence.items():f.write('%s,%d\n' %(word,value))# 隨機生成顏色 colorNum = len(open_text('./全職高手人物.txt')) def randomcolor():#隨機生成顏色的16進制表示形式,如#FFA25CcolorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']color = ""for i in range(6):color += colorArr[random.randint(0, 14)]return "#" + color# 顏色存儲列表 def color_list():colorList = []for i in range(colorNum):colorList.append(randomcolor())return colorList# 生成人物關(guān)系圖 def creat_relationship(path):# 人物節(jié)點顏色colors = color_list()Names = open_text('./全職高手人物.txt')relations = {}# 按段落劃分,假設(shè)在同一段落中出現(xiàn)的人物具有共現(xiàn)關(guān)系lst_para = open_text(path) # lst_para是每一段for text in lst_para:for name_0 in Names:if name_0 in text:for name_1 in Names:if name_1 in text and name_0 != name_1 and (name_1, name_0) not in relations:relations[(name_0, name_1)] = relations.get((name_0, name_1), 0) + 1maxRela = max([v for k, v in relations.items()])relations = {k: v / maxRela for k, v in relations.items()}# return relationsplt.figure(figsize=(15, 15))# 創(chuàng)建無多重邊無向圖G = nx.Graph()for k, v in relations.items():G.add_edge(k[0], k[1], weight=v)# 篩選權(quán)重大于0.6的邊elarge = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.6]# 篩選權(quán)重大于0.3小于0.6的邊emidle = [(u, v) for (u, v, d) in G.edges(data=True) if (d['weight'] > 0.3) & (d['weight'] <= 0.6)]# 篩選權(quán)重小于0.3的邊esmall = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= 0.3]# 設(shè)置圖形布局pos = nx.spring_layout(G) # 用Fruchterman-Reingold算法排列節(jié)點(樣子類似多中心放射狀)# 設(shè)置節(jié)點樣式nx.draw_networkx_nodes(G, pos, alpha=0.8, node_size=1300, node_color=colors)# 設(shè)置大于0.6的邊的樣式nx.draw_networkx_edges(G, pos, edgelist=elarge, width=2.5, alpha=0.9, edge_color='g')# 0.3~0.6nx.draw_networkx_edges(G, pos, edgelist=emidle, width=1.5, alpha=0.6, edge_color='y')# <0.3nx.draw_networkx_edges(G, pos, edgelist=esmall, width=1, alpha=0.4, edge_color='b', style='dashed')nx.draw_networkx_labels(G, pos, font_size=14)plt.title("《全職高手》主要人物社交關(guān)系網(wǎng)絡(luò)圖")# 關(guān)閉坐標軸plt.axis('off')# 保存圖表plt.savefig('./全職高手人物關(guān)系圖', bbox_inches='tight')plt.show()def GetWordCloud():path_txt = './純凈版全職高手.txt'path_img = './全職高手.png'f = open(path_txt,'r',encoding='utf-8').read()background_image = imread(path_img)# background_image = np.array(Image.open(path_img))cut_text = " ".join(jieba.cut(f))wordcloud = WordCloud(background_color="white", # 設(shè)置背景顏色mask = background_image, #設(shè)置背景圖片max_words=400, #width=600,height=800,# stopwords = "", #設(shè)置停用詞font_path="./simfang.ttf",# 設(shè)置中文字體,使得詞云可以顯示(詞云默認字體是“DroidSansMono.ttf字體庫”,不支持中文),不加這個的話顯示口型亂碼max_font_size=50, # 設(shè)置字體最大值min_font_size=10,random_state=30, # 設(shè)置有多少種配色方案margin=2,)# 生成詞云wc = wordcloud.generate(cut_text)# 展示詞云圖# 生成顏色值image_colors = ImageColorGenerator(background_image)plt.imshow(wordcloud.recolor(color_func=image_colors), interpolation="bilinear")plt.axis("off")plt.show()wc.to_file('./wordcloud.jpg') # 保存圖片# 分詞,詞頻,詞性的可視化圖 def create_wordPhotograph(total):# 創(chuàng)建繪圖對象,figsize參數(shù)可以指定繪圖對象的寬度和高度,單位為英寸,一英寸=80pxplt.figure(figsize=(9, 6))Y = []sign = []c = Counter(total).most_common(10)for word,num in c:Y.append(num)sign.append(word[0]+"_"+word[1])plt.bar(np.arange(10) , Y, width=0.3, facecolor='yellow', edgecolor='white')plt.xticks(np.arange(10), sign)i = 0X = np.arange(10)for x, y in zip(X, Y):plt.text(x + 0.15, y + 0.1, '%d' % (Y[i]), ha='center', va='bottom')i = i + 1plt.xlabel(u"分詞詞性Top10")plt.ylabel(u"詞頻數(shù)")plt.title(u"分詞詞頻詞性可視化圖")plt.savefig('./分詞詞頻詞性可視化圖.jpg',bbox_inches='tight')plt.show()# 人物出場次序排序可視化圖 def create_CharacterPhotograph():# 創(chuàng)建繪圖對象,figsize參數(shù)可以指定繪圖對象的寬度和高度,單位為英寸,一英寸=80pxplt.figure(figsize=(9, 6))Y = []sign = []i = 0text = open_text('./全職高手人物出場次數(shù)排序.txt')for t in text:if i<10:tt = t.split(',')Y.append(int(tt[1]))sign.append(tt[0])i+=1plt.bar(np.arange(10) , Y, width=0.3, facecolor='green', edgecolor='white')plt.xticks(np.arange(10), sign)i = 0X = np.arange(10)for x, y in zip(X, Y):plt.text(x + 0.15, y + 0.1, '%d' % (Y[i]), ha='center', va='bottom')i = i + 1plt.xlabel(u"出場人物Top10")plt.ylabel(u"出場次數(shù)")plt.title(u"人物出場次序排序可視化圖")plt.savefig('./人物出場次序排序可視化圖.jpg',bbox_inches='tight')plt.show()# 小說食物排序可視化圖 def create_foodPhotograph():# 創(chuàng)建繪圖對象,figsize參數(shù)可以指定繪圖對象的寬度和高度,單位為英寸,一英寸=80pxplt.figure(figsize=(9, 6))Y = []sign = []i = 0text = open_text('./全職高手食物排序.txt')for t in text:if i < len(text):tt = t.split(',')Y.append(int(tt[1]))sign.append(tt[0])i += 1plt.bar(np.arange(len(text)), Y, width=0.3, facecolor='blue', edgecolor='white')plt.xticks(np.arange(len(text)), sign)i = 0X = np.arange(len(text))for x, y in zip(X, Y):plt.text(x + 0.15, y + 0.1, '%d' % (Y[i]), ha='center', va='bottom')i = i + 1plt.xlabel(u"出場食物名")plt.ylabel(u"出場次數(shù)")plt.title(u"食物出場排序可視化圖")plt.savefig('./食物出場排序可視化圖.jpg', bbox_inches='tight')plt.show() def main():total = {}jieba.load_userdict("./全職高手用戶字典.txt")total = seg_depart("./全職高手.txt",total)#character_sequence(total)#food_sequence(total)#creat_relationship("./全職高手.txt")#GetWordCloud()create_wordPhotograph(total)create_CharacterPhotograph()create_foodPhotograph()main()五、效果圖
?
?
?六、總結(jié)
通過這次的作業(yè)項目一方面起到了總結(jié)這四周的學(xué)習(xí)效果,另一方面暴露出了自己在學(xué)習(xí)上的不足之處。由于疫情的原因?qū)е聦W(xué)校進行線上教學(xué),個人上課時的效率有所下降,對老師上課講的內(nèi)容不能及時操作實踐。計算機編程的學(xué)習(xí)最主要的還是動手能力,既要不斷學(xué)習(xí)新的知識也要積極動手實踐操作,只停留在理論基礎(chǔ)上并不能帶來很好的學(xué)習(xí)效果。希望在接下來幾周的學(xué)習(xí)中能有更好更多的收獲。
總結(jié)
以上是生活随笔為你收集整理的对全职高手的自然语言处理的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 技术人员的赚钱之道-1:开篇
- 下一篇: 基于vue的适老化住宅设计系统的设计与实