# 导入必要的库
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, cross_val_score, cross_validate
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
import seaborn as sns
# (1)加载iris数据集,并留出1/3的样本作为测试集
iris = load_iris()
X = iris.data
y = iris.target
# 使用留出法,留出1/3的样本作为测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3, random_state=42, stratify=y)
# (2)使用训练集训练分类带有预剪枝和后剪枝的决策树模型
# 预剪枝可以通过设置max_depth参数来实现
# 后剪枝可以通过设置min_samples_split和min_samples_leaf参数来实现
clf_pre_pruning = DecisionTreeClassifier(max_depth=3, random_state=42)
clf_pre_pruning.fit(X_train, y_train)
# (3)使用五折交叉验证对模型性能进行评估和选择
scores = cross_validate(clf_pre_pruning, X_train, y_train, cv=5,
scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'])
# 打印交叉验证结果
print("五折交叉验证结果:")
print(f"准确度:{scores['test_accuracy'].mean():.4f} ± {scores['test_accuracy'].std():.4f}")
print(f"精度:{scores['test_precision_macro'].mean():.4f} ± {scores['test_precision_macro'].std():.4f}")
print(f"召回率:{scores['test_recall_macro'].mean():.4f} ± {scores['test_recall_macro'].std():.4f}")
print(f"F1值:{scores['test_f1_macro'].mean():.4f} ± {scores['test_f1_macro'].std():.4f}")
# (4)使用测试集测试模型的性能
y_pred = clf_pre_pruning.predict(X_test)
# 计算测试集的性能指标
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='macro')
recall = recall_score(y_test, y_pred, average='macro')
f1 = f1_score(y_test, y_pred, average='macro')
# 打印测试集的性能指标
print("测试集性能指标:")
print(f"准确度:{accuracy:.4f}")
print(f"精度:{precision:.4f}")
print(f"召回率:{recall:.4f}")
print(f"F1值:{f1:.4f}")
# 绘制混淆矩阵
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=iris.target_names, yticklabels=iris.target_names)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()