tensorflow2.0——实现先卷积后LSTM的网络

class MyRnn(tf.keras.Model):
    def __init__(self, units):
        super(MyRnn, self).__init__()
        self.mycnn = tf.keras.Sequential([
            tf.keras.layers.Conv2D(12, kernel_size=[3, 3], activation=tf.nn.relu),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.MaxPool2D(pool_size=[3, 3], strides=3, padding='same'),
            tf.keras.layers.Conv2D(24, kernel_size=[4, 4], activation=tf.nn.relu),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.MaxPool2D(pool_size=[3, 3], strides=3, padding='same'),
            tf.keras.layers.Conv2D(48, kernel_size=[3, 3], activation=tf.nn.relu),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.AveragePooling2D(pool_size=[8, 8], strides=8, padding='same'),
            tf.keras.layers.Flatten()
        ])

        #   [b,80,100] ,-> h_dim:units(比如64)
        self.units = units
        #     # [b, 80, 100] = > [b, 64]
        #     self.rnn_cell0 = tf.keras.layers.LSTMCell(units,dropout=0.5)
        #     self.rnn_cell1 = tf.keras.layers.LSTMCell(units, dropout=0.5)
        self.myrnn = tf.keras.Sequential([
            tf.keras.layers.LSTM(units, return_sequences=True, unroll=True),
            tf.keras.layers.LSTM(units, return_sequences=True, unroll=True),
            tf.keras.layers.LSTM(units, unroll=True),
        ])

        #   fc , [b,80,100] =>[b,64]=>[b,1]
        self.myDense = tf.keras.Sequential([
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.Dense(1, bias_initializer=tf.keras.initializers.constant(0.))
        ])


    def __call__(self, inputs, training=None):
        """
        :param inputs:[b,80] [b,句子最大长度(80)]
        :param training:
        """
        # [b,80]
        x = tf.cast(inputs,dtype=tf.float32)

        # print('x进:',x.shape)
        out = tf.keras.layers.TimeDistributed(self.mycnn)(x)
        # print('cnnout:', out.shape)

        out = self.myrnn(out)
        # print('out:', out.shape)
        # out = tf.squeeze(out)

        x = self.myDense(out)
        return x


#   定义优化器
opt = tf.keras.optimizers.Adam(lr=learn_rate)
sum_model = MyRnn(128)

 

posted @ 2020-11-13 11:35  山…隹  阅读(748)  评论(0编辑  收藏  举报