如何计算Keras中的F1宏?
在删除Keras提供的代码之前,我尝试过使用这些代码。代码如下:如何计算Keras中的F1宏?,keras,Keras,在删除Keras提供的代码之前,我尝试过使用这些代码。代码如下: def precision(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives +
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def fmeasure(y_true, y_pred):
return fbeta_score(y_true, y_pred, beta=1)
def精度(y_真,y_pred):
真正数=K.sum(K.round(K.clip(y_真*y_pred,0,1)))
预测正=K.sum(K.round(K.clip(y\u pred,0,1)))
精度=真正/(预测正+K.ε()
返回精度
def召回(y_真,y_pred):
真正数=K.sum(K.round(K.clip(y_真*y_pred,0,1)))
可能的正=K.sum(K.round(K.clip(y\u true,0,1)))
回忆=真阳性/(可能阳性+K.epsilon())
召回
def fbeta_分数(y_真,y_预测,β=1):
如果β<0:
raise VALUERROR('可选择的最低beta值为零(仅精度)。')
#如果没有真正的积极因素,将F分数修正为0,如sklearn。
如果K.sum(K.round(K.clip(y_true,0,1))==0:
返回0
p=精度(y_真,y_pred)
r=回忆(y_真实,y_预测)
bb=β**2
fbeta_得分=(1+bb)*(p*r)/(bb*p+r+K.epsilon())
返回fbeta_分数
def fmeasure(y_true,y_pred):
返回fbeta_分数(y_真,y_预测,β=1)
从我所看到的(我是这方面的业余爱好者),他们似乎使用了正确的公式。但是,当我尝试在训练过程中使用它作为衡量标准时,我得到了完全相同的val_准确度、val_准确度、val_召回率和val_fmeasure输出。我确实相信,即使公式正确,这种情况也可能发生,但我认为不太可能。对此问题有何解释?感谢您正如@Pedia在其上述评论中所说的那样,
关于_epoch_end
,如中所述是最好的方法。自Keras 2.0度量f1以来,精度和召回率已被删除。解决方案是使用自定义度量函数:
from keras import backend as K
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
model.compile(loss='binary_crossentropy',
optimizer= "adam",
metrics=[f1])
此函数的返回行
return 2*((precision*recall)/(precision+recall+K.epsilon()))
通过添加常量ε进行修改,以避免除以0。因此,不会计算NaN 我还建议您解决这一问题
- 由ybubnov安装软件包
- 在for循环内调用
,利用每个epoch后输出的精度/召回度量model.fit(nb_epoch=1,…)
for mini_batch in range(epochs):
model_hist = model.fit(X_train, Y_train, batch_size=batch_size, epochs=1,
verbose=2, validation_data=(X_val, Y_val))
precision = model_hist.history['val_precision'][0]
recall = model_hist.history['val_recall'][0]
f_score = (2.0 * precision * recall) / (precision + recall)
print 'F1-SCORE {}'.format(f_score)
使用Keras度量函数不是计算F1或AUC等的正确方法 原因是在验证的每个批处理步骤中都会调用metric函数。这样,Keras系统计算批次结果的平均值。这不是正确的F1分数 这就是为什么F1分数被从keras的度量函数中删除的原因。请看这里:
for mini_batch in range(epochs):
model_hist = model.fit(X_train, Y_train, batch_size=batch_size, epochs=1,
verbose=2, validation_data=(X_val, Y_val))
precision = model_hist.history['val_precision'][0]
recall = model_hist.history['val_recall'][0]
f_score = (2.0 * precision * recall) / (precision + recall)
print 'F1-SCORE {}'.format(f_score)
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
import numpy as np
def create_f1():
def f1_function(y_true, y_pred):
y_pred_binary = tf.where(y_pred>=0.5, 1., 0.)
tp = tf.reduce_sum(y_true * y_pred_binary)
predicted_positives = tf.reduce_sum(y_pred_binary)
possible_positives = tf.reduce_sum(y_true)
return tp, predicted_positives, possible_positives
return f1_function
class F1_score(keras.metrics.Metric):
def __init__(self, **kwargs):
super().__init__(**kwargs) # handles base args (e.g., dtype)
self.f1_function = create_f1()
self.tp_count = self.add_weight("tp_count", initializer="zeros")
self.all_predicted_positives = self.add_weight('all_predicted_positives', initializer='zeros')
self.all_possible_positives = self.add_weight('all_possible_positives', initializer='zeros')
def update_state(self, y_true, y_pred,sample_weight=None):
tp, predicted_positives, possible_positives = self.f1_function(y_true, y_pred)
self.tp_count.assign_add(tp)
self.all_predicted_positives.assign_add(predicted_positives)
self.all_possible_positives.assign_add(possible_positives)
def result(self):
precision = self.tp_count / self.all_predicted_positives
recall = self.tp_count / self.all_possible_positives
f1 = 2*(precision*recall)/(precision+recall)
return f1
X = np.random.random(size=(1000, 10))
Y = np.random.randint(0, 2, size=(1000,))
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
model = keras.models.Sequential([
keras.layers.Dense(5, input_shape=[X.shape[1], ]),
keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=[F1_score()])
history = model.fit(X_train, y_train, epochs=5, validation_data=(X_test, y_test))
正如@Diesche所提到的,以这种方式实现f1_分数的主要问题是,它在每个批处理步骤中都被调用,并且比其他任何操作都更容易导致结果混乱 我一直在努力解决这个问题,但最终通过使用回调解决了这个问题:在一个纪元结束时,回调使用新的模型参数预测数据(在本例中,我选择仅将其应用于验证数据),并提供在整个纪元上评估的一致性度量 我在python3上使用tensorflow gpu(1.14.0)
from tensorflow.python.keras.models import Sequential, Model
from sklearn.metrics import f1_score
from tensorflow.keras.callbacks import Callback
from tensorflow.python.keras import optimizers
optimizer = optimizers.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=['accuracy'])
model.summary()
class Metrics(Callback):
def __init__(self, model, valid_data, true_outputs):
super(Callback, self).__init__()
self.model=model
self.valid_data=valid_data #the validation data I'm getting metrics on
self.true_outputs=true_outputs #the ground truth of my validation data
self.steps=len(self.valid_data)
def on_epoch_end(self, args,*kwargs):
gen=generator(self.valid_data) #generator yielding the validation data
val_predict = (np.asarray(self.model.predict(gen, batch_size=1, verbose=0, steps=self.steps)))
"""
The function from_proba_to_output is used to transform probabilities
into an understandable format by sklearn's f1_score function
"""
val_predict=from_proba_to_output(val_predict, 0.5)
_val_f1 = f1_score(self.true_outputs, val_predict)
print ("val_f1: ", _val_f1, " val_precision: ", _val_precision, " _val_recall: ", _val_recall)
从概率到输出的函数如下所示:
def from_proba_to_output(probabilities, threshold):
outputs = np.copy(probabilities)
for i in range(len(outputs)):
if (float(outputs[i])) > threshold:
outputs[i] = int(1)
else:
outputs[i] = int(0)
return np.array(outputs)
然后,我通过引用fit_generator回调部分中的这个度量类来训练我的模型。我没有详细说明我的train_generator和valid_generator的实现,因为这些数据生成器特定于手头的分类问题,发布它们只会带来混乱
model.fit_generator(
train_generator, epochs=nbr_epochs, verbose=1, validation_data=valid_generator, callbacks=[Metrics(model, valid_data)])
输出值是否相同为零?能否提供完整的代码-使用fit
和compile
调用?您还可以提供有关您的数据的更多详细信息吗?这是Keras中的一个已知问题(请参阅:)。精度、Rcall和F1分数正在以批处理方式进行估计。我认为最好的方法是在中采用的方法:有两类标签,0和1。我使用分类交叉熵,最后一个密集层使用softmax激活函数。我尝试将代码改为使用二进制交叉熵,最后一个密集层使用relu,精度等都很好。我想这是因为这个函数不能应用于张量数据。有什么建议吗?我试过了,但它只返回NaN(准确率为99%,但我得到了准确率和召回率,使用此代码,0%)就可以解决问题。我的模型不是预测真实值。谢谢,@Paddy.wird,在评估整个测试集时,我也得到了一个NaN值。但是,在获取前5个样本时,它会正确返回f1分数。将return 2*((精度*召回)/(精度+召回))
更改为return 2*((精度*召回)/(精度+召回+K.epsilon())
以修复NaN的错误s@RonakAgrawal,即使如此,我还是有一个男朋友