百度框架paddlepaddle实现改进三元组损失batch hard Triplet Loss

import paddle.fluid as fluid

def batch_hard_triplet_loss(input, y_true, margin,batch_size):
    """
    :param input: shape:[batch,feacture]
    :param y_true:shape:[batch,1],y_true is label
    :param margin:float
    :param batch_size:batch大小
    :return:tript_loss,shape:float
    """
    # y_true shape [64, 1]
    # y_true.t() shape [1, 64]
    # pos_mask shape [64, 64], 矩阵中对角线上值为1
    # pos_mask矩阵中 若[2,3]位置为1,代表labels中第2个值和第3个值得label相同
    y_true_trans = fluid.layers.transpose(y_true, [1, 0])
    y_true_trans_matrix = fluid.layers.concat([y_true_trans] * batch_size, axis=0)
    pos_mask = fluid.layers.cast(
        fluid.layers.equal(y_true_trans_matrix, y_true), "float32")

    def _mask_max(input_tensor, mask, axis=None, keepdims=False):
        # mask = 1(相同) 距离=input_tensor - 0
        # mask = 0(不同) 距离=input_tensor - 1
        # 距离最远的pos对
        input_tensor = input_tensor-(1-mask)*1e6
        _max = fluid.layers.reduce_max(input_tensor, dim=axis, keep_dim=keepdims)
        return _max

    def _mask_min(input_tensor, mask, axis=None, keepdims=False):
        input_tensor=input_tensor+mask*1e6
        _min = fluid.layers.reduce_min(input_tensor, dim=axis, keep_dim=keepdims)
        return _min


    a=fluid.layers.reduce_sum(fluid.layers.pow(input, 2.0), dim=1, keep_dim=True)
    b=fluid.layers.reduce_sum(fluid.layers.pow(fluid.layers.transpose(input, [1, 0]), 2.0), dim=0,keep_dim=True)
    b_matrix=fluid.layers.concat([b]*batch_size,axis=0)

    dist_squared=(b_matrix+a)-2*fluid.layers.matmul(input, input,transpose_y=True)
    dist=fluid.layers.elementwise_max(dist_squared,fluid.layers.fill_constant([1],dtype='float32',value=1e-16))
    dist = fluid.layers.sqrt(dist)#欧式距离

    pos_max = _mask_max(dist, pos_mask, axis=-1,keepdims=True)#pos_max.shape:(batch_size,1)
    neg_min = _mask_min(dist, pos_mask, axis=-1,keepdims=True)#neg_min.shape:(batch_size,1)

    basic_loss = (pos_max-neg_min)+margin#曼哈顿距离+margin
    #截断triplet_loss损失一***********************************************
    # clip_loss=fluid.layers.relu(basic_loss)
    # loss = fluid.layers.reduce_mean(clip_loss,dim=0)

    #log(1+exp(x))triplet_loss损失二**************************************
    loss = fluid.layers.reduce_mean(
        fluid.layers.log(1+fluid.layers.exp(basic_loss)), dim=0)
    # loss=fluid.layers.reduce_mean(
    #     fluid.layers.log(fluid.layers.elementwise_add(
    #         fluid.layers.exp(basic_loss),fluid.layers.fill_constant([1],dtype='float32',value=1))),dim=0)

    return loss

函数输入input是神经网络输出层的值,维度为[batch_size,feacture],y_true为标签,即batch_size个输出中每一个输出的类别,维度为[batch_size,1].

batch hard Triplet Loss的数学表述为:

tripletLoss数学公式

猜你喜欢

转载自blog.csdn.net/weixin_37864449/article/details/95454772