sklearn linear_model,svm,tree,naive bayes,ensemble

 

sklearn linear_model,svm,tree,naive bayes,ensemble by iris dataset

 

 

In [15]:
from sklearn import datasets
import numpy as np
from sklearn.model_selection import train_test_split

iris =datasets.load_iris()
# print(iris.data)
X = iris.data[:,[2,3]]
y =iris.target
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=0)

print(X_train.shape,y_train.shape,X_test.shape,y_test.shape)
print(X_train,y_train,X_test,y_test)
 
(105, 2) (105,) (45, 2) (45,)
[[ 3.5  1. ]
 [ 5.5  1.8]
 [ 5.7  2.5]
 [ 5.   1.5]
 [ 5.8  1.8]
 [ 3.9  1.1]
 [ 6.1  2.3]
 [ 4.7  1.6]
 [ 3.8  1.1]
 [ 4.9  1.8]
 [ 5.1  1.5]
 [ 4.5  1.7]
 [ 5.   1.9]
 [ 4.7  1.4]
 [ 5.2  2. ]
 [ 4.5  1.6]
 [ 1.6  0.2]
 [ 5.1  1.9]
 [ 4.2  1.3]
 [ 3.6  1.3]
 [ 4.   1.3]
 [ 4.6  1.4]
 [ 6.   1.8]
 [ 1.5  0.2]
 [ 1.1  0.1]
 [ 5.3  1.9]
 [ 4.2  1.2]
 [ 1.7  0.2]
 [ 1.5  0.4]
 [ 4.9  1.5]
 [ 1.5  0.2]
 [ 5.1  1.8]
 [ 3.   1.1]
 [ 1.4  0.3]
 [ 4.5  1.5]
 [ 6.1  2.5]
 [ 4.2  1.3]
 [ 1.4  0.1]
 [ 5.9  2.1]
 [ 5.7  2.3]
 [ 5.8  2.2]
 [ 5.6  2.1]
 [ 1.6  0.2]
 [ 1.6  0.2]
 [ 5.1  2. ]
 [ 5.7  2.1]
 [ 1.3  0.3]
 [ 5.4  2.3]
 [ 1.4  0.2]
 [ 5.   2. ]
 [ 5.4  2.1]
 [ 1.3  0.2]
 [ 1.4  0.2]
 [ 5.8  1.6]
 [ 1.4  0.3]
 [ 1.3  0.2]
 [ 1.7  0.4]
 [ 4.   1.3]
 [ 5.9  2.3]
 [ 6.6  2.1]
 [ 1.4  0.2]
 [ 1.5  0.1]
 [ 1.4  0.2]
 [ 4.5  1.3]
 [ 4.4  1.4]
 [ 1.2  0.2]
 [ 1.7  0.5]
 [ 4.3  1.3]
 [ 1.5  0.4]
 [ 6.9  2.3]
 [ 3.3  1. ]
 [ 6.4  2. ]
 [ 4.4  1.4]
 [ 1.5  0.1]
 [ 4.8  1.8]
 [ 1.2  0.2]
 [ 6.7  2. ]
 [ 1.5  0.3]
 [ 1.6  0.2]
 [ 6.1  1.9]
 [ 1.4  0.2]
 [ 5.6  2.4]
 [ 4.1  1.3]
 [ 3.9  1.2]
 [ 3.5  1. ]
 [ 5.3  2.3]
 [ 5.2  2.3]
 [ 4.9  1.5]
 [ 5.   1.7]
 [ 1.6  0.2]
 [ 3.7  1. ]
 [ 5.6  2.4]
 [ 5.1  1.9]
 [ 1.5  0.2]
 [ 4.6  1.3]
 [ 4.1  1.3]
 [ 4.8  1.8]
 [ 4.4  1.3]
 [ 1.3  0.2]
 [ 1.5  0.4]
 [ 1.5  0.1]
 [ 5.6  1.8]
 [ 4.1  1. ]
 [ 6.7  2.2]
 [ 1.4  0.2]] [1 2 2 2 2 1 2 1 1 2 2 2 2 1 2 1 0 2 1 1 1 1 2 0 0 2 1 0 0 1 0 2 1 0 1 2 1
 0 2 2 2 2 0 0 2 2 0 2 0 2 2 0 0 2 0 0 0 1 2 2 0 0 0 1 1 0 0 1 0 2 1 2 1 0
 2 0 2 0 0 2 0 2 1 1 1 2 2 1 1 0 1 2 2 0 1 1 1 1 0 0 0 2 1 2 0] [[ 5.1  2.4]
 [ 4.   1. ]
 [ 1.4  0.2]
 [ 6.3  1.8]
 [ 1.5  0.2]
 [ 6.   2.5]
 [ 1.3  0.3]
 [ 4.7  1.5]
 [ 4.8  1.4]
 [ 4.   1.3]
 [ 5.6  1.4]
 [ 4.5  1.5]
 [ 4.7  1.2]
 [ 4.6  1.5]
 [ 4.7  1.4]
 [ 1.5  0.1]
 [ 4.5  1.5]
 [ 4.4  1.2]
 [ 1.4  0.3]
 [ 1.3  0.4]
 [ 4.9  2. ]
 [ 4.5  1.5]
 [ 1.9  0.2]
 [ 1.4  0.2]
 [ 4.8  1.8]
 [ 1.   0.2]
 [ 1.9  0.4]
 [ 4.3  1.3]
 [ 3.3  1. ]
 [ 1.6  0.4]
 [ 5.5  1.8]
 [ 4.5  1.5]
 [ 1.5  0.2]
 [ 4.9  1.8]
 [ 5.6  2.2]
 [ 3.9  1.4]
 [ 1.7  0.3]
 [ 5.1  1.6]
 [ 4.2  1.5]
 [ 4.   1.2]
 [ 5.5  2.1]
 [ 1.3  0.2]
 [ 5.1  2.3]
 [ 1.6  0.6]
 [ 1.5  0.2]] [2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0
 1 1 1 2 0 2 0 0]
In [61]:
from sklearn.model_selection import train_test_split
from sklearn import datasets
import numpy as pn
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt

iris = datasets.load_iris()
X = iris.data[:,[2,3]]
y = iris.target

X_train,X_test,y_train,y_test = train_test_split(X,y,test_size =0.3,random_state=0)

print(X_train.shape,y_train.shape,X_test.shape,y_test)

logreg = LogisticRegression()
logreg.fit(X_train,y_train)
print(logreg.score(X_test,y_test))

linear = LinearRegression()
linear.fit(X_train,y_train)
print(linear.score(X_test,y_test))

decisiont = DecisionTreeRegressor()
decisiont.fit(X_train,y_train)
print(decisiont.score(X_test,y_test))
res =decisiont.predict([[3.2,1]])
print(res)

nb = GaussianNB()
nb.fit(X_train,y_train)
print(nb.score(X_test,y_test))
print(nb.predict([[3.2,1]]))

rd = RandomForestClassifier()
rd.fit(X_train,y_train)
print(rd.score(X_test,y_test))

rr = RandomForestRegressor()
rr.fit(X_train,y_train)
print(rr.score(X_test,y_test))

svm = SVC()
svm.fit(X_train,y_train)
print(svm.score(X_test,y_test))

svr = SVR()
svr.fit(X_train,y_train)
print(svr.score(X_test,y_test))

plt.plot(X_test)
plt.show()
 
(105, 2) (105,) (45, 2) [2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0
 1 1 1 2 0 2 0 0]
0.688888888889
0.906552111693
0.952731092437
[ 1.]
0.977777777778
[1]
0.955555555556
0.956386554622
0.977777777778
0.948460175898
 
In [ ]:
 
posted @ 2018-02-06 11:41  大树2  阅读(374)  评论(0编辑  收藏  举报