一、boston房价预测
1. 读取数据集 from sklearn.datasets import load_boston boston = load_boston() boston.keys() print(boston.DESCR) boston.data.shape import pandas as pd pd.DataFrame(boston.data)

2. 训练集与测试集划分 from sklearn.cross_validation import train_test_split x_train, x_test, y_train, y_test = train_test_split(boston.data,boston.target,test_size=0.3) x_train.shape y_train.shape

3.线性回归模型:建立13个变量与房价之间的预测模型,并检测模型好坏。 from sklearn.linear_model import LinearRegression LineR=LinearRegression() #线性回归 LineR.fit(x_train,y_train) #对数据进行训练 print(LineR.coef_,LineR.intercept_) #通过数据训练得出回归方程的斜率和截距 from sklearn.metrics import regression # 检测模型好坏 y_pred= LineR.predict(x_test) print("预测的均方误差:", regression.mean_squared_error(y_test,y_pred)) # 计算模型的预测指标 print("预测的平均绝对误差:", regression.mean_absolute_error(y_test,y_pred)) print("模型的分数:",LineR.score(x_test, y_test)) # 输出模型的分数

4. 多项式回归模型:建立13个变量与房价之间的预测模型,并检测模型好坏。
from sklearn.preprocessing import PolynomialFeatures
# 多项式化poly2 =PolynomialFeatures(degree=2)x_poly_train = poly2.fit_transform(x_train)x_poly_test = poly2.transform(x_test)mlrp = LinearRegression()# 建立模型mlrp.fit(x_poly_train, y_train)y_predict2 = mlrp.predict(x_poly_test)# 测模型好坏print("多项式回归模型:")print("预测的均方误差:",regression.mean_squared_error(y_test,y_predict2))print("预测平均绝对误差:",regression.mean_absolute_error(y_test,y_predict2))print("模型的分数:",mlrp.score(x_poly_test,y_test))

5. 比较线性模型与非线性模型的性能,并说明原因:
非线性模型的模型性能较好,因为它是有很多点连接而成的曲线,对样本的拟合程度较高,而且多项式模型是一条平滑的曲线,更贴合样本点的分布,预测效果误差较小。
二、中文文本分类
import os
import numpy as np
import sys
from datetime import datetime
import gc
path = 'F:\\jj147'
# 导入结巴库,并将需要用到的词库加进字典
import jieba
# 导入停用词:
with open(r'F:\stopsCN.txt', encoding='utf-8') as f:
stopwords = f.read().split('\n')
def processing(tokens):
# 去掉非字母汉字的字符
tokens = "".join([char for char in tokens if char.isalpha()])
# 结巴分词
tokens = [token for token in jieba.cut(tokens,cut_all=True) if len(token) >=2]
# 去掉停用词
tokens = " ".join([token for token in tokens if token not in stopwords])
return tokens
tokenList = []
targetList = []
# 用os.walk获取需要的变量,并拼接文件路径再打开每一个文件
for root,dirs,files in os.walk(path):
for f in files:
filePath = os.path.join(root,f)
with open(filePath, encoding='utf-8') as f:
content = f.read()
# 获取新闻类别标签,并处理该新闻
target = filePath.split('\\')[-2]
targetList.append(target)
tokenList.append(processing(content))
#划分训练集和测试,用TF-IDF算法进行单词权值的计算
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
vectorizer= TfidfVectorizer()
x_train,x_test,y_train,y_test=train_test_split(tokenList,targetList,test_size=0.2)
X_train=vectorizer.fit_transform(x_train)
X_test=vectorizer.transform(x_test)
#构建贝叶斯模型
from sklearn.naive_bayes import MultinomialNB #用于离散特征分类,文本分类单词统计,以出现的次数作为特征值
mulp=MultinomialNB ()
mulp_NB=mulp.fit(X_train,y_train)
#对模型进行预测
y_predict=mulp.predict(X_test)
# # 从sklearn.metrics里导入classification_report做分类的性能报告
from sklearn.metrics import classification_report
print('模型的准确率为:', mulp.score(X_test, y_test))
print('classification_report:\n',classification_report(y_test, y_predict))

浙公网安备 33010602011771号