Given a lgb.Booster
, return evaluation results for a
particular metric on a particular dataset.
lgb.get.eval.result(
booster,
data_name,
eval_name,
iters = NULL,
is_err = FALSE
)
Object of class lgb.Booster
Name of the dataset to return evaluation results for.
Name of the evaluation metric to return results for.
An integer vector of iterations you want to get evaluation results for. If NULL (the default), evaluation results for all iterations will be returned.
TRUE will return evaluation error instead
numeric vector of evaluation result
# \donttest{
setLGBMthreads(2L)
data.table::setDTthreads(1L)
# train a regression model
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
data(agaricus.test, package = "lightgbm")
test <- agaricus.test
dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label)
params <- list(
objective = "regression"
, metric = "l2"
, min_data = 1L
, learning_rate = 1.0
, num_threads = 2L
)
valids <- list(test = dtest)
model <- lgb.train(
params = params
, data = dtrain
, nrounds = 5L
, valids = valids
)
#> [LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.000893 seconds.
#> You can set `force_row_wise=true` to remove the overhead.
#> And if memory is not enough, you can set `force_col_wise=true`.
#> [LightGBM] [Info] Total Bins 232
#> [LightGBM] [Info] Number of data points in the train set: 6513, number of used features: 116
#> [LightGBM] [Info] Start training from score 0.482113
#> [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
#> [1]: test's l2:6.44165e-17
#> [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
#> [2]: test's l2:1.97215e-31
#> [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
#> [3]: test's l2:0
#> [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
#> [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
#> [4]: test's l2:0
#> [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
#> [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
#> [5]: test's l2:0
# Examine valid data_name values
print(setdiff(names(model$record_evals), "start_iter"))
#> [1] "test"
# Examine valid eval_name values for dataset "test"
print(names(model$record_evals[["test"]]))
#> [1] "l2"
# Get L2 values for "test" dataset
lgb.get.eval.result(model, "test", "l2")
#> [1] 6.441652e-17 1.972152e-31 0.000000e+00 0.000000e+00 0.000000e+00
# }