F1值的優化macro


1.F1值優化

https://www.jianshu.com/p/51debab91824

from functools import partial
import numpy as np import scipy as sp from sklearn.metrics import f1_score class OptimizedF1(object): def __init__(self): self.coef_ = [] def _kappa_loss(self, coef, X, y): """ y_hat = argmax(coef*X, axis=-1) :param coef: (1D array) weights :param X: (2D array)logits :param y: (1D array) label :return: -f1 """ X_p = np.copy(X) X_p = coef*X_p ll = f1_score(y, np.argmax(X_p, axis=-1), average='macro') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [1. for _ in range(len(set(y)))]#權重都初始化為1 self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') def predict(self, X, y): X_p = np.copy(X) X_p = self.coef_['x'] * X_p return f1_score(y, np.argmax(X_p, axis=-1), average='macro') def coefficients(self): return self.coef_['x']

 可以發現這個和https://mp.weixin.qq.com/s/jH9grYg-xiuQxMTDq99olg所提供的有序關系的離散標簽優化所提供的代碼,

主要是_kappa_loss和fit/predict函數的實現不同。

調用時:

op = OptimizedF1()
op.fit(logits,labels)
logits = op.coefficients()*logits 

 

那么在進行預測時,就使用predict函數和logist相乘,然后再softmax取最值,這樣就對每個都有一個縮放的權重。

2.優化分類閾值的實現

https://mp.weixin.qq.com/s/jH9grYg-xiuQxMTDq99olg

from functools import partial
import numpy as np
import scipy as sp

class OptimizedRounder(object):
    def __init__(self):
        self.coef_ = 0

    def _kappa_loss(self, coef, X, y):
        X_p = np.copy(X)
        for i, pred in enumerate(X_p):
            if pred < coef[0]:
                X_p[i] = 0
            elif pred >= coef[0] and pred < coef[1]:
                X_p[i] = 1
            elif pred >= coef[1] and pred < coef[2]:
                X_p[i] = 2
            elif pred >= coef[2] and pred < coef[3]:
                X_p[i] = 3
            else:
                X_p[i] = 4

        ll = quadratic_weighted_kappa(y, X_p)
        return -ll

    def fit(self, X, y):
        loss_partial = partial(self._kappa_loss, X=X, y=y)
        initial_coef = [0.5, 1.5, 2.5, 3.5]
        self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')

    def predict(self, X, coef):
        X_p = np.copy(X)
        for i, pred in enumerate(X_p):
            if pred < coef[0]:
                X_p[i] = 0
            elif pred >= coef[0] and pred < coef[1]:
                X_p[i] = 1
            elif pred >= coef[1] and pred < coef[2]:
                X_p[i] = 2
            elif pred >= coef[2] and pred < coef[3]:
                X_p[i] = 3
            else:
                X_p[i] = 4
        return X_p

    def coefficients(self):
        return self.coef_['x']

 

也學到了,那么在調用它的時候應該先fit,然后coefficients獲取閾值參數,或者調用predict,然后和real labels做交叉熵損失這樣。


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM