Blending集成学习算法及决策边界画法
算法原理
blending集成学习算法即简化的stacking算法,其具体操作流程为:
- 将数据划分为训练集和测试集(test_set),其中训练集需要再次划分为训练集(train_set)和验证集
(val_set); - 创建第一层的多个模型,这些模型可以使同质的也可以是异质的;
- 使用train_set训练步骤2中的多个模型,然后用训练好的模型预测val_set和test_set得到val_predict,
test_predict1; - 创建第二层的模型,使用val_predict作为训练集训练第二层的模型;
- 使用第二层训练好的模型对第二层测试集test_predict1进行预测,该结果为整个测试集的结果。
优点:实现简单粗暴,没有太多的理论的分析。
缺点:blending只使用了一部分数据集作为留出集进行验证,也就是只能用上数据中的一部分,实际上这对数据来说是很奢侈浪费的。
代码实现
# -*- coding: utf-8 -*-
"""
Created on Tue May 11 15:47:59 2021
@author: 510009
"""
# 加载相关工具包
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("ggplot")
%matplotlib inline
import seaborn as sns
# 创建数据
from sklearn import datasets
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
data, target = make_blobs(
n_samples=10000,
centers=2,
random_state=1,
cluster_std=1.0
)
## 创建训练集和测试集
X_train1,X_test,y_train1,y_test = train_test_split(
data,
target,
test_size=0.2,
random_state=1
)
## 创建训练集和验证集
X_train,X_val,y_train,y_val = train_test_split(
X_train1,
y_train1,
test_size=0.3,
random_state=1
)
print("The shape of training X:",X_train.shape)
print("The shape of training y:",y_train.shape)
print("The shape of test X:",X_test.shape)
print("The shape of test y:",y_test.shape)
print("The shape of validation X:",X_val.shape)
print("The shape of validation y:",y_val.shape)
# 设置第一层分类器
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
clfs = [
SVC(probability = True),
RandomForestClassifier(
n_estimators=5,
n_jobs=-1,
criterion='gini'),
KNeighborsClassifier()
]
# 设置第二层分类器
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
# 输出第一层的验证集结果与测试集结果
val_features = np.zeros((X_val.shape[0],len(clfs))) # 初始化验证集结果
test_features = np.zeros((X_test.shape[0],len(clfs))) # 初始化测试集结果
for i,clf in enumerate(clfs):
clf.fit(X_train,y_train)
val_feature = clf.predict_proba(X_val)[:, 1]
test_feature = clf.predict_proba(X_test)[:,1]
val_features[:,i] = val_feature
test_features[:,i] = test_feature
# 将第一层的验证集的结果输入第二层训练第二层分类器
lr.fit(val_features,y_val)
# 输出预测的结果
from sklearn.model_selection import cross_val_score
cross_val_score(lr,test_features,y_test,cv=5)
iris实例
# -*- coding: utf-8 -*-
"""
Created on Tue May 11 17:39:37 2021
@author: 510009
"""
# 导入工具包
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
# 下载数据集
iris = datasets.load_iris()
X = iris.data
y = iris.target
# 数据集可视化观察
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red')
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue')
# plt.scatter(X[y == 2, 0], X[y == 2, 1], color='green')
plt.show()
# 数据集划分
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=1)
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=0.3,
random_state=1)
# 设置第一层分类器
clfs = [
SVC(probability=True),
RandomForestClassifier(
n_estimators=5,
n_jobs=-1,
criterion='gini'),
KNeighborsClassifier()
]
# 设置第二层分类器
lr = LinearRegression()
# 输出第一层的验证集结果与测试集结果
val_features = np.zeros((X_val.shape[0], len(clfs))) # 初始化验证集结果
test_features = np.zeros((X_test.shape[0], len(clfs))) # 初始化测试集结果
for i, clf in enumerate(clfs):
clf.fit(X_train,y_train)
val_feature = clf.predict_proba(X_val)[:, 1]
test_feature = clf.predict_proba(X_test)[:,1]
val_features[:,i] = val_feature
test_features[:,i] = test_feature
# 将第一层的验证集的结果输入第二层训练第二层分类器
lr.fit(val_features, y_val)
# 输出预测的结果
scores = cross_val_score(lr, test_features, y_test, cv=5)
print(scores)
print(lr.score(val_features, y_val))

浙公网安备 33010602011771号