if __name__ =='__main__':embedding_size =2# 詞向量的維度sequence_length =3# 句子的長度num_classes =2# 分類結果filter_sizes =[2,2,2]# 卷積核num_filters =3# 通道的數量# 3 words sentences (=sequence_length is 3)sentences =["i love you","he loves me","she likes baseball","i hate you","sorry for that","this is awful"]labels =[1,1,1,0,0,0]# 1 is good, 0 is not good.# 1.建立詞匯表word_list =" ".join(sentences).split()word_list =list(set(word_list))word_dict ={w: i for i, w inenumerate(word_list)}vocab_size =len(word_dict)# 2. 構建模型model = TextCNN()criterion = nn.CrossEntropyLoss()optimizer = optim.Adam(model.parameters(), lr=0.001)inputs = torch.LongTensor([np.asarray([word_dict[n]for n in sen.split()])for sen in sentences])targets = torch.LongTensor([out for out in labels])# To using Torch Softmax Loss function# 3.訓練for epoch inrange(5000):optimizer.zero_grad()output = model(inputs)# output : [batch_size, num_classes], target_batch : [batch_size] (LongTensor, not one-hot)loss = criterion(output, targets)if(epoch +1)%1000==0:print('Epoch:','%04d'%(epoch +1),'cost =','{:.6f}'.format(loss))loss.backward()optimizer.step()# Testtest_text ='sorry hate you'tests =[np.asarray([word_dict[n]for n in test_text.split()])]test_batch = torch.LongTensor(tests)# Predictpredict = model(test_batch).data.max(1, keepdim=True)[1]if predict[0][0]==0:print(test_text,"is Bad Mean...")else:print(test_text,"is Good Mean!!")