#电池老化率测定的神经网络模型
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

path = 'SOH_Data.xlsx'
#训练集读取及归一化
xTrainData = pd.read_excel(path, sheetname = 0)
yTrainData = pd.read_excel(path, sheetname = 1)
n1 = np.shape(xTrainData)[1]
x_data = np.array(xTrainData).astype('float32')
for i in range(n1):
    x_data[:, i] = (x_data[:, i] - np.amin(x_data[:, i]))/(np.amax(x_data[:, i]) - np.amin(x_data[:, i]))
y_data = np.array(yTrainData).astype('float32')
y_data[:] = (y_data[:] - np.amin(y_data[:]))/(np.amax(y_data[:]) - np.amin(y_data[:]))

#测试集读取及归一化
xTestData = pd.read_excel(path, sheetname = 2)
yTestData = pd.read_excel(path, sheetname = 3)
xTest = np.array(xTestData).astype('float32')
n2 = np.shape(xTrainData)[1]
xTrain = np.array(xTrainData).astype('float32')
for i in range(n2):
    xTest[:, i] = (xTest[:, i] - np.amin(xTest[:, i]))/(np.amax(xTest[:, i]) - np.amin(xTest[:, i]))
yTest = np.array(yTestData).astype('float32')
yTest[:] = (yTest[:] - np.amin(yTest[:]))/(np.amax(yTest[:]) - np.amin(yTest[:]))

#参数概要
def variable_summaries(var):
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)#平均值
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
            tf.summary.scalar('stddev', stddev)#标准差
            tf.summary.scalar('max', tf.reduce_max(var))#最大值
            tf.summary.scalar('min', tf.reduce_min(var))#最小值
            tf.summary.histogram('histogram', var)#直方图

#5层神经网络,每层神经元个数
IHO = [12, 8, 5, 4, 1]

#命名空间
with tf.name_scope('input'):
    #定义两个placeholder
    x = tf.placeholder(tf.float32, [None, 12], name = 'xInput')
    y = tf.placeholder(tf.float32, [None, 1], name = 'y')

#神经元中间层
with tf.name_scope('layer'):
    with tf.name_scope('weights_L1'):
        Weight_L1 = tf.Variable(tf.random_normal([12, 8]), name = 'W1')
        variable_summaries(Weight_L1)
    with tf.name_scope('bias_L1'):
        biases_L1 = tf.Variable(tf.zeros([8]), name = 'b1')
        variable_summaries(biases_L1)
    with tf.name_scope('L_1'):
        Wx_plus_b_L1 = tf.matmul(x, Weight_L1) + biases_L1
        L1 = tf.nn.tanh(Wx_plus_b_L1)

    with tf.name_scope('weights_L2'):
        Weight_L2 = tf.Variable(tf.random_normal([8, 5]), name = 'W2')
        variable_summaries(Weight_L2)
    with tf.name_scope('bias_L2'):
        biases_L2 = tf.Variable(tf.zeros([5]), name = 'b2')
        variable_summaries(biases_L2)
    with tf.name_scope('L_2'):
        Wx_plus_b_L2 = tf.matmul(L1, Weight_L2) + biases_L2
        L2 = tf.nn.tanh(Wx_plus_b_L2)

    with tf.name_scope('weights_L3'):
        Weight_L3 = tf.Variable(tf.random_normal([5, 4]), name = 'W3')
        variable_summaries(Weight_L3)
    with tf.name_scope('bias_L3'):  
        biases_L3 = tf.Variable(tf.zeros([4]), name = 'b3')
        variable_summaries(biases_L3)
    with tf.name_scope('L_3'):
        Wx_plus_b_L3 = tf.matmul(L2, Weight_L3) + biases_L3
        L3 = tf.nn.tanh(Wx_plus_b_L3)
#神经元输出层
    with tf.name_scope('weights_L4'):
        Weight_L4 = tf.Variable(tf.random_normal([4, 1]), name = 'W4')
        variable_summaries(Weight_L4)
    with tf.name_scope('bias_L4'):
        biases_L4 = tf.Variable(tf.zeros([1]), name = 'b4')
        variable_summaries(biases_L4)
    with tf.name_scope('prediction'):
        Wx_plus_b_L4 = tf.matmul(L3, Weight_L4) + biases_L4
        prediction = tf.nn.tanh(Wx_plus_b_L4)

#二次代价函数
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.square(y - prediction), name = 'loss')
    tf.summary.scalar('loss', loss)
#使用梯度下降法训练
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

#合并所有summary
merged = tf.summary.merge_all()
with tf.Session() as sess:
    #变量初始化
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter('logs/', sess.graph)
    for i in range(10000):
        summary, _ = sess.run([merged, train_step], feed_dict = {x: x_data, y: y_data})
        writer.add_summary(summary, i)
        curr_loss = sess.run(loss, feed_dict = {x: x_data, y: y_data})
        if (i + 1)%100 == 0:
            print('第%d次迭代loss:'%(i + 1), curr_loss)
    #训练集预测集
    prediction_value = sess.run(prediction, feed_dict = {x: x_data})
    #测试集预测集
    prediction_value_test = sess.run(prediction, feed_dict = {x: xTest})
    test_loss = sess.run(loss, feed_dict = {x: xTest, y: yTest})
    print('测试误差:', test_loss)
    print(prediction_value_test)

 

#电池老化率测定的神经网络模型
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from numpy import *
import os
np.set_printoptions(suppress=True)
np.set_printoptions(threshold=np.NaN)
BATCH_SIZE = 256
with open("D:\\bs\\finall_data\\marry2.11\\train\\nc_all.txt","rb") as fa,open("D:\\bs\\finall_data\\marry2.11\\train\\calipso_all.txt","rb") as fb,open("D:\\bs\\finall_data\\marry2.11\\test\\nc_text.txt","rb") as fc,open("D:\\bs\\finall_data\\marry2.11\\test\\calipso_text.txt","rb") as fd:

    #训练集读取及归一化
    # xTrainData = pd.read_excel(path, sheetname = 0)
    # yTrainData = pd.read_excel(path, sheetname = 1)
    # # print(xTrainData)
    # # print(yTrainData)
    # n1 = np.shape(xTrainData)[1]
    # # print(n1)
    # x_data = np.array(xTrainData).astype('float32')
    # print(x_data)

    #
    # def Polyfit(x, y, degree):
    #     results = {}
    #     coeffs = np.polyfit(x, y, degree)
    #     # results['polynomial'] = coeffs.tolist()
    #
    #     # r-squared
    #     p = np.poly1d(coeffs)
    #     # print(p)
    #     # # fit values, and mean
    #     # yhat = p(x)  # or [p(z) for z in x]
    #     # ybar = np.sum(y) / len(y)  # or sum(y)/len(y)
    #     # ssreg = np.sum((yhat - ybar) ** 2)  # or sum([ (yihat - ybar)**2 for yihat in yhat])
    #     # sstot = np.sum((y - ybar) ** 2)  # or sum([ (yi - ybar)**2 for yi in y])
    #     # results['determination'] = ssreg / sstot  # 准确率
    #     return results
    list_x = []
    for i in fa.readlines():
        # print(str(i))
        x_data_1 = str(i).split(" ")[2:18]
        for x_data_12 in x_data_1:
            x_data_12=float(x_data_12)
            list_x.append(x_data_12)

    mat_x = mat(list_x)
    x_data = mat_x.reshape(-1, 16)
    for i in range(16):
        x_data[i, :] = (x_data[i, :] - np.amin(x_data[i, :])) / (np.amax(x_data[i, :]) - np.amin(x_data[i, :]))


    list_y=[]
    for v in fb.readlines():
        y_data_1 = str(v).split("   ")[2].split(" ")[0]
        y_data_1=1/(1+float(y_data_1))
        # print(y_data)
        list_y.append(float(y_data_1))
    # print(list_y)
    mat_y=mat(list_y)
    y_data = mat_y.reshape(-1, 1)
    # print(y_data)

    # y_data = np.array(yTrainData).astype('float32')
    # y_data[:] = (y_data[:] - np.amin(y_data[:]))/(np.amax(y_data[:]) - np.amin(y_data[:]))
    #

    #
    # # print(y_data[:])
    # z1 = Polyfit(x_data, y_data, 2)
    # plt.plot(x_data, y_data, 'o')
    # # plt.plot(x_data, np.polyval(z1, x_data))
    # plt.show()
    #

    # #测试集读取及归一化
    list_t_x = []
    for m in fc.readlines():
        x_data_t_1 = str(m).split(" ")[2:18]
        for x_data_t_12 in x_data_t_1:
            # print(x_data_t_12)
            x_data_t_12 = float(x_data_t_12)
            list_t_x.append(x_data_t_12)

    mat_t_x = mat(list_t_x)
    xTest = mat_t_x.reshape(-1, 16)
    # print(xTest.shape)                        #(1598,16)
    # xTestData = pd.read_excel(path, sheetname = 2)
    # yTestData = pd.read_excel(path, sheetname = 3)
    # xTest = np.array(xTestData).astype('float32')
    # n2 = np.shape(xTrainData)[1]
    # xTrain = np.array(xTrainData).astype('float32')
    for i in range(16):
        xTest[i, :] = (xTest[i, :] - np.amin(xTest[i, :]))/(np.amax(xTest[i, :]) - np.amin(xTest[i, :]))

    list_t_y = []
    for n in fd.readlines():
        y_data_t_1 = str(n).split("   ")[2].split(" ")[0]
        # print(y_data)
        y_data_t_1=1/(1+float(y_data_t_1))
        list_t_y.append(float(y_data_t_1))
    # print(list_y)
    mat_t_y = mat(list_t_y)
    yTest = mat_t_y.reshape(-1, 1)
    # print(yTest)

    # yTest = np.array(yTestData).astype('float32')
    # yTest[:] = (yTest[:] - np.amin(yTest[:]))/(np.amax(yTest[:]) - np.amin(yTest[:]))
    # print(np.amax(yTest[:]))
    # print(np.amax(y_Test[:]))


    #参数概要
    def variable_summaries(var):
        with tf.name_scope('summaries'):
            mean = tf.reduce_mean(var)#平均值
            tf.summary.scalar('mean', mean)
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
                tf.summary.scalar('stddev', stddev)#标准差
                tf.summary.scalar('max', tf.reduce_max(var))#最大值
                tf.summary.scalar('min', tf.reduce_min(var))#最小值
                tf.summary.histogram('histogram', var)#直方图

    #5层神经网络,每层神经元个数
    IHO = [16, 8, 5, 4, 1]

    #命名空间
    with tf.name_scope('input'):
        #定义两个placeholder
        x = tf.placeholder(tf.float32, [None, 16], name = 'xInput')
        y = tf.placeholder(tf.float32, [None, 1], name = 'y')

    #神经元中间层
    with tf.name_scope('layer'):
        with tf.name_scope('weights_L1'):
            Weight_L1 = tf.Variable(tf.random_normal([16, 8]), name = 'W1')
            variable_summaries(Weight_L1)
        with tf.name_scope('bias_L1'):
            biases_L1 = tf.Variable(tf.zeros([8]), name = 'b1')
            variable_summaries(biases_L1)
        with tf.name_scope('L_1'):
            Wx_plus_b_L1 = tf.matmul(x, Weight_L1) + biases_L1
            L1 = tf.nn.sigmoid(Wx_plus_b_L1)

        with tf.name_scope('weights_L2'):
            Weight_L2 = tf.Variable(tf.random_normal([8, 5]), name = 'W2')
            variable_summaries(Weight_L2)
        with tf.name_scope('bias_L2'):
            biases_L2 = tf.Variable(tf.zeros([5]), name = 'b2')
            variable_summaries(biases_L2)
        with tf.name_scope('L_2'):
            Wx_plus_b_L2 = tf.matmul(L1, Weight_L2) + biases_L2
            L2 = tf.nn.sigmoid(Wx_plus_b_L2)

        with tf.name_scope('weights_L3'):
            Weight_L3 = tf.Variable(tf.random_normal([5, 4]), name = 'W3')
            variable_summaries(Weight_L3)
        with tf.name_scope('bias_L3'):
            biases_L3 = tf.Variable(tf.zeros([4]), name = 'b3')
            variable_summaries(biases_L3)
        with tf.name_scope('L_3'):
            Wx_plus_b_L3 = tf.matmul(L2, Weight_L3) + biases_L3
            L3 = tf.nn.sigmoid(Wx_plus_b_L3)
    #神经元输出层
        with tf.name_scope('weights_L4'):
            Weight_L4 = tf.Variable(tf.random_normal([4, 1]), name = 'W4')
            variable_summaries(Weight_L4)
        with tf.name_scope('bias_L4'):
            biases_L4 = tf.Variable(tf.zeros([1]), name = 'b4')
            variable_summaries(biases_L4)
        with tf.name_scope('prediction'):
            Wx_plus_b_L4 = tf.matmul(L3, Weight_L4) + biases_L4
            prediction = tf.nn.sigmoid(Wx_plus_b_L4)

    #二次代价函数
    with tf.name_scope('loss'):
        loss = tf.reduce_mean(tf.square(y - prediction), name = 'loss')
        tf.summary.scalar('loss', loss)
    #使用梯度下降法训练
    with tf.name_scope('train'):
        train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

    #合并所有summary
    merged = tf.summary.merge_all()
    with tf.Session() as sess:
        #变量初始化
        sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('logs/', sess.graph)
        for i in range(1000):
            summary, _ = sess.run([merged, train_step], feed_dict = {x: x_data, y: y_data})
            writer.add_summary(summary, i)
            curr_loss = sess.run(loss, feed_dict = {x: x_data, y: y_data})
            if (i + 1)%100 == 0:
                print('第%d次迭代loss:'%(i + 1), curr_loss)
        #训练集预测集
        prediction_value = sess.run(prediction, feed_dict = {x: x_data})
        #测试集预测集
        prediction_value_test = sess.run(prediction, feed_dict = {x: xTest})
        test_loss = sess.run(loss, feed_dict = {x: xTest, y: yTest})
        print('测试误差:', test_loss)
        # print(len(prediction_value_test))           #text数据的个数
        # print(yTest)
        print(prediction_value_test)

 

from __future__ import print_function

import numpy as np
np.random.seed(1337)


from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.optimizers import SGD


batch_size = 128

nb_classes = 10

nb_epoch = 20

(X_train, y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')



Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

model1 = Sequential()

model1.add(Dense(256, activation='relu', input_dim=784))
model1.add(Dropout(0.2))
model1.add(Dense(256, activation='relu'))
model1.add(Dropout(0.2))
model1.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model1.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

history1 = model1.fit(X_train, Y_train,
                    batch_size = batch_size,
                    epochs = nb_epoch,
                    verbose = 2,
                    validation_data = (X_test, Y_test))

model2 = Sequential()

model2.add(Dense(256, activation='relu', input_dim=784))

model2.add(Dense(256, activation='relu'))

model2.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model2.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

history2 = model2.fit(X_train, Y_train,
                    batch_size = batch_size,
                    epochs = nb_epoch,
                    verbose = 2,
                    validation_data = (X_test, Y_test))
model3 = Sequential()

model3.add(Dense(256, activation='relu', input_dim=784))

model3.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model3.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

history3 = model3.fit(X_train, Y_train,
                    batch_size = batch_size,
                    epochs = nb_epoch,
                    verbose = 2,
                    validation_data = (X_test, Y_test))


import matplotlib.pyplot as plt
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history1.history['acc'])
plt.plot(history1.history['val_acc'])
plt.title('model1 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history2.history['acc'])
plt.plot(history2.history['val_acc'])
plt.title('model2 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history3.history['acc'])
plt.plot(history3.history['val_acc'])
plt.title('model3 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history1.history['val_acc'])
plt.plot(history2.history['val_acc'])
plt.plot(history3.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['model1', 'model2', 'model3'], loc='upper left')
plt.show()



# summarize history for loss
plt.plot(history1.history['loss'])
plt.plot(history1.history['val_loss'])
plt.title('model1 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history2.history['loss'])
plt.plot(history2.history['val_loss'])
plt.title('model2 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history3.history['loss'])
plt.plot(history3.history['val_loss'])
plt.title('model3 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

 

Posted on 2018-11-13 14:03  小萝卜头12138  阅读(228)  评论(0编辑  收藏  举报