from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
#读取数据集
import csv
file_path=r'F:\SMSSpamCollectionjsn.txt'
sms=open(file_path,'r',encoding='utf-8')
sms_data=[]
sms_label=[]
csv_reader=csv.reader(sms,delimiter='\t')
for line in csv_reader:
sms_label.append(line[0])
sms_data.append((line[1]))
sms.close()
print(sms_label)
print(sms_data)
![]()
![]()
#预处理
def preprocessing(sms_data):
#text=text.decode('utf-8')
tokens=[word for sent in nltk.sent_tokenize(sms_data) for word in nltk.word_tokenize(sent)]
stops=stopwords.words('english')
tokens=[token for token in tokens if token not in stops]
tokens=[token.lower() for token in tokens if len(token)>=3]
lmter=WordNetLemmatizer()
tokens=[lmtzr.lemmatize(token) for token in tokens]
preprocessed_text=' '.join(tokens)
return preprocessed_text
preprocessing(sms_data)
#按0.7:0.3比例分为训练集和测试集
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(sms_data,sms_label,test_size=0.3,random_state=0,stratify=sms_label)
#将其向量化
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer=TfidfVectorizer(min_df=2,ngram_range=(1,2),stop_words='english',strip_accents='unicode',norm='l2')
X_train=vectorizer.fit_transform(x_train)
X_test=vectorizer.transform(x_test)
X_train
a=X_train.toarray()
print(a)
for i in range(1000):
for j in range(5984):
if a[i,j]!=0:
print(i,j,a[i,j])
#多项式朴素贝叶斯
from sklearn.naive_bayes import MultinomialNB
clf= MultinomialNB().fit(X_train,y_train)
y_nb_pred=clf.predict(X_test)
#分类结果显示
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
#x_test预测结果
print(y_nb_pred.shape,y_nb_pred)
print('nb_confusion_matrix:')
#混淆矩阵
cm=confusion_matrix(y_test,y_nb_pred)
print(cm)
print('nb_classification_report:')
cr=classification_report(y_test,y_nb_pred)#主要分类指标的文本报告
print(cr)
![]()
![]()