完整的机器学习项目代码
包和函数
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import re, pickle, random, jieba, gensim
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import Input, Model, Sequential
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.utils import to_categorical, plot_model, Sequence
from tensorflow.keras.layers import Embedding, Dense, Conv1D, GlobalMaxPooling1D, Concatenate, Dropout
from tensorflow.keras.losses import SparseCategoricalCrossentropy,CategoricalCrossentropy
def dump(obj,path):
with open(path,"wb") as f:
pickle.dump(obj,f)
def load(path):
with open(path,"rb") as f:
return pickle.load(f)
def vec(X,Y,bs):
for i in range(len(X)//bs):
yield np.asarray(X[i:i+bs]), np.asarray(Y[i:i+bs])
import math
class Generator(Sequence):
def __init__(self, x, y, b_size):
self.x, self.y = x, y
self.batch_size = b_size
def __len__(self):
return len(self.y)//self.batch_size+1
def __getitem__(self, idx):
b_x = self.x[idx*self.batch_size:(idx+1)*self.batch_size]
b_y = self.y[idx*self.batch_size:(idx+1)*self.batch_size]
return np.array(b_x), np.array(b_y)
def on_epoch_end(self):
pass
def plot_loss_acc(history,model_name,accuracy):
plt.switch_backend('agg')
fig1 = plt.figure()
# plt.subplot(1,2,1)
plt.plot(history.history['loss'],'r',linewidth=3.0)
plt.plot(history.history['val_loss'],'b',linewidth=3.0)
plt.legend(['Training loss', 'Validation Loss'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Loss',fontsize=16)
plt.title('Loss Curves : '+model_name,fontsize=16)
# fig1.savefig(prepath+model_name+'_loss.png')
fig2 = plt.figure()
# plt.subplot(1,2,2)
plt.plot(history.history[accuracy],'r',linewidth=3.0)
plt.plot(history.history['val_'+accuracy],'b',linewidth=3.0)
plt.legend(['Training Accuracy', 'Validation Accuracy'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Accuracy',fontsize=16)
plt.title('Accuracy Curves : '+model_name,fontsize=16)
# fig2.savefig(prepath+model_name+'_accuracy.png')
plt.show()
# GPU 支持
import tensorflow as tf
print(tf.config.list_physical_devices('GPU'))
print(tf.test.is_gpu_available())
import torch
torch.cuda.is_available()
TfidfVectorizer的使用
from sklearn.feature_extraction.text import TfidfVectorizer
s=['\t','x','小','cat','精神 小',' ','空军 一号']
tv=TfidfVectorizer(token_pattern=r"(?u)\b\w+\b",
max_features=10,
ngram_range=(1,2),
stop_words=["是", "的"])
tv.fit(s)
x=tv.transform(s)
# 稀疏矩阵
print(x.todense())
# word to index
print(tv.vocabulary_)
print(tv..get_feature_names()
一个将单词映射到整数索引的词典
word_index = imdb.get_word_index() # 索引从1开始
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
构造模型
Sequential方式
model=tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, 16), # [batch_size, seq_len, 16]
tf.keras.layers.GlobalAveragePooling1D(), # [batch_size, 16]
tf.keras.layers.Dense(16, activation='relu'), # [batch_size, 16]
tf.keras.layers.Dense(1, activation='sigmoid') # [batch_size, 1]
])
model.summary() # 打印网络结构概览
subclass方式
class MyModel(tf.keras.models.Model):
def __init__(self):
super(MyModel, self).__init__()
self.embedding = tf.keras.layers.Embedding(vocab_size, 16)
self.g_avg_pool = tf.keras.layers.GlobalAveragePooling1D()
self.d1 = tf.keras.layers.Dense(16, activation="relu")
self.d2 = tf.keras.layers.Dense(1, activation="sigmoid")
def call(self, inputs, training=None, mask=None):
# inputs: [batch_size, seq_len]
x = self.embedding(inputs) # [batch_size, seq_len, 16]
x = self.g_avg_pool(x) # [batch_size, 16]
x = self.d1(x) # [batch_size, 16]
x = self.d2(x) # [batch_size, 1]]
return x
model = MyModel()
模型训练与评估
# 配置模型训练参数
# model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.BinaryAccuracy()])
# 训练模型
history = model.fit(train_data, train_labels, epochs=40, batch_size=512)
# 评估测试集
model.evaluate(test_data, test_labels, verbose=2)
模型的保存与加载
checkpoint方式
checkpoint方式只保留了模型的权重,并没有保留模型结构。优点是保存的模型较小,缺点是不知道模型结构的时候就不好用。因为只保存了权重,所以在加载过程中,需要先构建模型(并编译),然后才能使用。
# 保存权重
model.save_weights("checkpoint/my_checkpoint")
# 加载权重
new_model = create_model_by_subclass()
# 预测之前需要先编译
new_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
new_model.load_weights("checkpoint/my_checkpoint")
# 评估测试集
new_model.evaluate(test_data, test_labels, verbose=2)
hdf5方式
hdf5方式既保留了模型权重也保留了模型结构,但目前只能保存用sequential方式构建的模型,subclass方式构建的模型则不能保存为hdf5模型。因为其保留了模型结构,所以模型加载后就可以直接使用,也方便移植到其他环境中使用。
"""只能用于Functional model or a Sequential model,目前不能用于subclassed model,2020-06"""
# 保存模型
model.save("h5/my_model.h5")
# 加载模型
# 重新创建完全相同的模型,包括其权重和优化程序
new_model = tf.keras.models.load_model('h5/my_model.h5')
# 显示网络结构
new_model.summary()
# 评估测试集
new_model.evaluate(test_data, test_labels, verbose=2)
save_model方式
saved_model方式跟hdf5一样将整个模型都保留下来了,这种格式可以保存各种方法构建的模型。saved_model格式常用于预测或部署时,跟前两种情况不同,这种格式加载后的模型,已经不具备Model(sequential或subclass方式构建的模型)的一些特性,比如没有了fit,evaluate方法,但可以用来直接进行预测。这种格式常用在tensorflow serving中。
# 保存模型
tf.saved_model.save(model, "saved_model/1")
# 加载模型
new_model = tf.saved_model.load("saved_model/1")
# 预测结果
result = new_model(test_data)
模型部署 TF Serving
工作流程
主要分为以下几个步骤:
- Source会针对需要进行加载的模型创建一个Loader,Loader中会包含要加载模型的全部信息;
- Source通知Manager有新的模型需要进行加载;
- Manager通过版本管理策略(Version Policy)来确定哪些模型需要被下架,哪些模型需要被加载;
- Manger在确认需要加载的模型符合加载策略,便通知Loader来加载最新的模型;
- 客户端像服务端请求模型结果时,可以指定模型的版本,也可以使用最新模型的结果;
安装
TF Serving官方文档:https://www.tensorflow.org/tfx/guide/serving
docker
docker pull tensorflow/serving
下载
git clone https://github.com/tensorflow/serving
运行
docker run -p 8501:8501 \
--mount type=bind,\
source=/tmp/tfserving/serving/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu,\
target=/models/half_plus_two \
-e MODEL_NAME=half_plus_two -t tensorflow/serving &
验证
curl -d '{"instances": [1.0, 2.0, 5.0]}' \
-X POST http://localhost:8501/v1/models/half_plus_two:predict
部署前
模型导出
import tensorflow as tf
import shutil
model = tf.keras.models.load_model('./cnn_model.h5')
# 指定路径
if os.path.exists('./Models/CNN/1'):
shutil.rmtree('./Models/CNN/1')
export_path = './Models/CNN/1'
# 导出tensorflow模型以便部署
tf.saved_model.save(model,export_path)
检查和测试
saved_model_cli show --dir ./Models/CNN/1 --all
输入部分数据
saved_model_cli run --dir ./Models/CNN/1 --tag_set serve --signature_def serving_default --input_exp 'input_1=np.random.rand(1,100)'
部署
from tensorflow.keras.preprocessing import sequence
import random
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.utils import to_categorical
from utils import *
import json
import numpy
import requests
import jieba
# 路径等配置
data_dir = "./processed_data"
vocab_file = "./vocab/vocab.txt"
vocab_size = 40000
# 神经网络配置
max_features = 40001
maxlen = 100
batch_size = 256
embedding_dims = 50
epochs = 8
print('数据预处理与加载数据...')
# 如果不存在词汇表,重建
if not os.path.exists(vocab_file):
build_vocab(data_dir, vocab_file, vocab_size)
# 获得 词汇/类别 与id映射字典
categories, cat_to_id = read_category()
words, word_to_id = read_vocab(vocab_file)
text = "这是该国史上最大的一次军事演习"
text_seg = encode_sentences([jieba.lcut(text)], word_to_id)
text_input = sequence.pad_sequences(text_seg, maxlen=maxlen)
data = json.dumps({"signature_name": "serving_default",
"instances": text_input.reshape(1,100).tolist()})
headers = {"content-type": "application/json"}
json_response = requests.post('http://localhost:8505/v1/models/default:predict',
data=data, headers=headers)
#print(json.loads(json_response.text))
print(json_response.text)

浙公网安备 33010602011771号