class MyRnn(tf.keras.Model):
def __init__(self, units):
super(MyRnn, self).__init__()
self.mycnn = tf.keras.Sequential([
tf.keras.layers.Conv2D(12, kernel_size=[3, 3], activation=tf.nn.relu),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPool2D(pool_size=[3, 3], strides=3, padding='same'),
tf.keras.layers.Conv2D(24, kernel_size=[4, 4], activation=tf.nn.relu),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPool2D(pool_size=[3, 3], strides=3, padding='same'),
tf.keras.layers.Conv2D(48, kernel_size=[3, 3], activation=tf.nn.relu),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.AveragePooling2D(pool_size=[8, 8], strides=8, padding='same'),
tf.keras.layers.Flatten()
])
# [b,80,100] ,-> h_dim:units(比如64)
self.units = units
# # [b, 80, 100] = > [b, 64]
# self.rnn_cell0 = tf.keras.layers.LSTMCell(units,dropout=0.5)
# self.rnn_cell1 = tf.keras.layers.LSTMCell(units, dropout=0.5)
self.myrnn = tf.keras.Sequential([
tf.keras.layers.LSTM(units, return_sequences=True, unroll=True),
tf.keras.layers.LSTM(units, return_sequences=True, unroll=True),
tf.keras.layers.LSTM(units, unroll=True),
])
# fc , [b,80,100] =>[b,64]=>[b,1]
self.myDense = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, bias_initializer=tf.keras.initializers.constant(0.))
])
def __call__(self, inputs, training=None):
"""
:param inputs:[b,80] [b,句子最大长度(80)]
:param training:
"""
# [b,80]
x = tf.cast(inputs,dtype=tf.float32)
# print('x进:',x.shape)
out = tf.keras.layers.TimeDistributed(self.mycnn)(x)
# print('cnnout:', out.shape)
out = self.myrnn(out)
# print('out:', out.shape)
# out = tf.squeeze(out)
x = self.myDense(out)
return x
# 定义优化器
opt = tf.keras.optimizers.Adam(lr=learn_rate)
sum_model = MyRnn(128)