import numpy as np import tensorflow as tf from tensorflow.keras import backend as K from keras.layers import * from keras.losses import binary_crossentropy import keras from aix import logger import aix.constants as C epsilon = 1e-5 smooth = 1 def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + K.epsilon()) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon()) alpha = 0.25 gamma = 2 def focal_loss_with_logits(logits, targets, alpha, gamma, y_pred): weight_a = alpha * (1 - y_pred) ** gamma * targets weight_b = (1 - alpha) * y_pred ** gamma * (1 - targets) return (tf.math.log1p(tf.exp(-tf.abs(logits))) + tf.nn.relu(-logits)) * (weight_a + weight_b) + logits * weight_b def focal_loss(y_true, y_pred): y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon()) logits = tf.math.log(y_pred / (1 - y_pred)) loss = focal_loss_with_logits(logits=logits, targets=y_true, alpha=alpha, gamma=gamma, y_pred=y_pred) return tf.reduce_mean(loss) @keras.saving.register_keras_serializable(package="aix.losses") def dsc(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) return score @keras.saving.register_keras_serializable(package="aix.losses") def dice_loss(y_true, y_pred): loss = 1 - dsc(y_true, y_pred) return loss def bce_dice_loss(y_true, y_pred): loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) return loss def confusion(y_true, y_pred): smooth = 1 y_pred_pos = K.clip(y_pred, 0, 1) y_pred_neg = 1 - y_pred_pos y_pos = K.clip(y_true, 0, 1) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) prec = (tp + smooth) / (tp + fp + smooth) recall = (tp + smooth) / (tp + fn + smooth) return prec, recall def tp(y_true, y_pred): smooth = 1 y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pos = K.round(K.clip(y_true, 0, 1)) tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth) return tp def tn(y_true, y_pred): smooth = 1 y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tn = (K.sum(y_neg * y_pred_neg) + smooth) / (K.sum(y_neg) + smooth ) return tn def tversky(y_true, y_pred): y_true_pos = K.flatten(y_true) y_pred_pos = K.flatten(y_pred) true_pos = K.sum(y_true_pos * y_pred_pos) false_neg = K.sum(y_true_pos * (1 - y_pred_pos)) false_pos = K.sum((1 - y_true_pos) * y_pred_pos) alpha = 0.7 return (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth) def tversky_loss(y_true, y_pred): return 1 - tversky(y_true, y_pred) def focal_tversky(y_true,y_pred): pt_1 = tversky(y_true, y_pred) gamma = 0.75 return K.pow((1 - pt_1), gamma) def sensitivity(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) return true_positives / (possible_positives + K.epsilon()) def specificity(y_true, y_pred): true_negatives = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1))) possible_negatives = K.sum(K.round(K.clip(1 - y_true, 0, 1))) return true_negatives / (possible_negatives + K.epsilon())