TF中的自定义正则项

# 计算正则项
# t_l2_reg = tf.nn.l2_loss(emb, name=reg_name) * l2_reg
t_l2_reg = tf.reduce_mean(tf.reduce_sum(emb**2, axis=-1), name=reg_name) * l2_reg
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, t_l2_reg)

# loss 中使用
loss = loss0 + tf.losses.get_regularization_loss()
  • 通过自定义函数实现
def sparse_embed(name, shape, sp_x, regularizer=None, l2=0, l1=0, bias=False):
    '''
    通过稀疏矩阵乘法实现,  sp_x * W + b, 正则项稍微不同。
    '''

    def minibatch_reg(weight_matrix):
        I, _, I_count = tf.unique_with_counts(sp_x.indices[:, 1])
        vals = tf.gather(weight_matrix, I)

        def _get_reg(typo, lambdo):
            if lambdo <= 0:
                return 0
            _m = {'l2': tf.square(vals), 'l1': tf.abs(vals)}
            return tf.reduce_sum(tf.reduce_sum(_m[typo], axis=1) / tf.sqrt(tf.cast(I_count, tf.float32)))

        return l2 * _get_reg('l2', l2) + l1 * _get_reg('l1', l1)

    W = tf.get_variable(
            name=name + '_weights',
            initializer=tf.contrib.layers.variance_scaling_initializer(),
            regularizer=minibatch_reg if l2 + l1 > 0 else regularizer,
            shape=shape)
    if bias is True:
        b = tf.get_variable(name=name+'_biases', initializer=tf.constant_initializer(), shape=[shape[1]])
        return tf.sparse_tensor_dense_matmul(sp_x, W) + b
    else:
        return tf.sparse_tensor_dense_matmul(sp_x, W)
posted @ 2020-11-05 15:52  bregman  阅读(157)  评论(0编辑  收藏  举报