calcifications loss

import keras
import tensorflow as tf
from keras.models import Model
from keras import backend as K
# import tensorflow.contrib.eager as tfe
# tfe.enable_eager_execution()

beta = K.constant(0.5, dtype='float32')
lamdap = K.constant(1.5, dtype='float32')
lamdan = K.constant(1.8, dtype='float32')

# Custom loss function
def t_test_loss(y_true, y_pred):
    sess = tf.Session()
    # print("y_pred:",y_pred)
    # print(K.sum(y_true[:, 0]))
    # print(tuple(y_pred.shape[1:].as_list()))
    pos = K.zeros(shape=(K.sum(y_true[:, 0]),) + tuple(y_pred.shape[1:].as_list()), dtype='float32') #[0]
    neg = K.zeros(shape=(K.sum(y_true[:, 1]),) + tuple(y_pred.shape[1:].as_list()), dtype='float32') #[0]
    print("pos,neg:",pos,neg)
    pos_cur = 0
    neg_cur = 0
    print("y_pred:",tf.Variable(y_pred[0]))

    ind_pos = tf.where(tf.equal(y_true[:, 0], 1))
    print(sess.run(ind_pos))
    pos = tf.gather(y_pred, tf.squeeze(ind_pos), axis=0)
    print(sess.run(pos))

    ind_neg = tf.where(tf.equal(y_true[:, 1], 1))
    print(sess.run(ind_neg))
    neg = tf.gather(y_pred, tf.squeeze(ind_neg), axis=0)
    print(sess.run(neg))

    meanp = K.mean(pos)
    meann = K.mean(neg)
    varp = K.var(pos)
    varn = K.var(neg)
    lossp = lamdap * (K.maximum(beta - meanp, K.zeros_like(meanp)) + varp)
    lossn = lamdan * (meann + varn)
    loss = lossp + lossn
    print("loss:",sess.run(loss))
    sess.close()
    return loss



t_test_loss(K.constant([[1, 0], [0, 1], [0, 1]], dtype='int32'),
            K.constant([[[0.1, 0.6], [0.2, 0.7]], [[0.2, 0.7], [0.3, 0.8]], [[0.3, 0.8], [0.4, 0.9]]],
                       dtype='float32'))

难受

猜你喜欢

转载自www.cnblogs.com/ziytong/p/11273672.html