Skip to content

bokbokbok.loss_functions.regression

LogCoshLoss()

Log Cosh Loss is an alternative to Mean Absolute Error.

Source code in bokbokbok/loss_functions/regression/regression_loss_functions.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def LogCoshLoss() -> Callable:
    """
    [Log Cosh Loss](https://openreview.net/pdf?id=rkglvsC9Ym) is an alternative to Mean Absolute Error.
    """

    def _gradient(yhat: np.ndarray, dtrain: "xgb.DMatrix") -> np.ndarray:
        """Compute the log cosh gradient.

        Args:
            yhat (np.array): Predictions
            dtrain: The XGBoost / LightGBM dataset

        Returns:
            log cosh gradient
        """

        y = dtrain.get_label()
        return -np.tanh(y - yhat)

    def _hessian(yhat: np.ndarray, dtrain: "xgb.DMatrix") -> np.ndarray:
        """Compute the log cosh hessian.

        Args:
            yhat (np.array): Predictions
            dtrain: The XGBoost / LightGBM dataset

        Returns:
            log cosh Hessian
        """

        y = dtrain.get_label()
        return 1. / np.power(np.cosh(y - yhat), 2)

    def log_cosh_loss(
            yhat: np.ndarray,
            dtrain: "xgb.DMatrix"
    ) -> tuple[np.ndarray, np.ndarray]:
        """
        Calculate gradient and hessian for log cosh loss.

        Args:
            yhat (np.array): Predictions
            dtrain: The XGBoost / LightGBM dataset

        Returns:
            grad: log cosh loss gradient
            hess: log cosh loss Hessian
        """
        grad = _gradient(yhat, dtrain)

        hess = _hessian(yhat, dtrain)

        return grad, hess

    return log_cosh_loss

SPELoss()

Squared Percentage Error loss

Source code in bokbokbok/loss_functions/regression/regression_loss_functions.py
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def SPELoss() -> Callable:
    """
    Squared Percentage Error loss
    """

    def _gradient(yhat: np.ndarray, dtrain: "xgb.DMatrix") -> np.ndarray:
        """
        Compute the gradient squared percentage error.
        Args:
            yhat (np.array): Predictions
            dtrain: The XGBoost / LightGBM dataset

        Returns:
            SPE Gradient
        """
        y = dtrain.get_label()
        return -2*(y-yhat)/(y**2)

    def _hessian(dtrain: "xgb.DMatrix") -> np.ndarray:
        """
        Compute the hessian for squared percentage error.
        Args:
            yhat (np.array): Predictions
            dtrain: The XGBoost / LightGBM dataset

        Returns:
            SPE Hessian
        """
        y = dtrain.get_label()
        return 2/(y**2)

    def squared_percentage(
        yhat: np.ndarray, 
        dtrain: "xgb.DMatrix"
        ) -> tuple[np.ndarray, np.ndarray]:
        """
        Calculate gradient and hessian for squared percentage error.

        Args:
            yhat (np.array): Predictions
            dtrain: The XGBoost / LightGBM dataset

        Returns:
            grad: SPE loss gradient
            hess: SPE loss Hessian
        """
        grad = _gradient(yhat, dtrain)

        hess = _hessian(dtrain)

        return grad, hess

    return squared_percentage