Use Root Mean Squared Percentage Error
When to use (Root Mean) Squared Percentage Error?¶
This function is defined according to this Kaggle competition for volatility calculation.
RMSPE cannot be used as a Loss function - the gradient is constant and hence the Hessian is 0. Nevertheless, it can still be used as an evaluation metric as the model trains. To use the loss function, we simply remove the square for a non-zero Hessian.
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from bokbokbok.eval_metrics.regression import RMSPEMetric
from bokbokbok.loss_functions.regression import SPELoss
X, y = make_regression(n_samples=10000,
n_features=10,
random_state=41114)
X_train, X_valid, y_train, y_valid = train_test_split(X,
y,
test_size=0.25,
random_state=41114)
Usage in LightGBM¶
import lightgbm as lgb
train = lgb.Dataset(X_train, y_train)
valid = lgb.Dataset(X_valid, y_valid, reference=train)
params = {
'n_estimators': 3000,
'seed': 41114,
'n_jobs': 8,
'max_leaves':10,
}
clf = lgb.train(params=params,
train_set=train,
valid_sets=[train, valid],
valid_names=['train','valid'],
fobj=SPELoss(),
feval=RMSPEMetric(),
early_stopping_rounds=100)
mean_absolute_error(y_valid, clf.predict(X_valid))
Usage in XGBoost¶
import xgboost as xgb
dtrain = xgb.DMatrix(X_train, y_train)
dvalid = xgb.DMatrix(X_valid, y_valid)
params = {
'seed': 41114,
'learning_rate': 0.1,
'disable_default_eval_metric': 1
}
bst = xgb.train(params,
dtrain=dtrain,
num_boost_round=3000,
early_stopping_rounds=100,
verbose_eval=100,
obj=SPELoss(),
maximize=False,
feval=RMSPEMetric(XGBoost=True),
evals=[(dtrain, 'dtrain'), (dvalid, 'dvalid')])
mean_absolute_error(y_valid, bst.predict(dvalid))