Summarizes a fitted neuralGAM object: family, formula, sample size,
intercept, training MSE, per-term neural net settings, per-term NN layer
configuration, and training history. If a linear component is present, its
coefficients are also reported.
# S3 method for class 'neuralGAM'
summary(object, ...)Invisibly returns object. Prints a human-readable summary.
# \dontrun{
library(neuralGAM)
dat <- sim_neuralGAM_data()
train <- dat$train
test <- dat$test
ngam <- neuralGAM(
y ~ s(x1) + x2 + s(x3),
data = train,
num_units = 128,
family = "gaussian",
activation = "relu",
learning_rate = 0.001,
bf_threshold = 0.001,
max_iter_backfitting = 10,
max_iter_ls = 10,
seed = 1234
)
#> [1] "Initializing neuralGAM..."
#> [1] "BACKFITTING Iteration 1 - Current Err = 0.00314307849443241 BF Threshold = 0.001 Converged = FALSE"
#> [1] "BACKFITTING Iteration 2 - Current Err = 0.0035904802501205 BF Threshold = 0.001 Converged = FALSE"
#> [1] "BACKFITTING Iteration 3 - Current Err = 0.0022177874489898 BF Threshold = 0.001 Converged = FALSE"
#> [1] "BACKFITTING Iteration 4 - Current Err = 0.00198120946126081 BF Threshold = 0.001 Converged = FALSE"
#> [1] "BACKFITTING Iteration 5 - Current Err = 0.00268658957361894 BF Threshold = 0.001 Converged = FALSE"
#> [1] "BACKFITTING Iteration 6 - Current Err = 0.00280545783365124 BF Threshold = 0.001 Converged = FALSE"
#> [1] "BACKFITTING Iteration 7 - Current Err = 0.0019751075608256 BF Threshold = 0.001 Converged = FALSE"
#> [1] "BACKFITTING Iteration 8 - Current Err = 0.000915071832718818 BF Threshold = 0.001 Converged = TRUE"
summary(ngam)
#> neuralGAM summary
#> Family : gaussian
#> Formula : y ~ s(x1) + x2 + s(x3)
#> Observations : 1400
#> Intercept (eta0) : 4.47201
#> Deviance explained : 90.78%
#> Train MSE : 1.22241
#> Pred. / Conf. Int. : disabled
#> ------------------------------------------------------------------------
#> Per-term configuration (parsed from s(...))
#> -- x1 - units: 128 | activation: relu | loss: mse | learning rate: 0.001 | k_init: glorot_normal | b_init: zeros | k_reg: NA | b_reg: NA | a_reg: NA
#> -- x3 - units: 128 | activation: relu | loss: mse | learning rate: 0.001 | k_init: glorot_normal | b_init: zeros | k_reg: NA | b_reg: NA | a_reg: NA
#> ------------------------------------------------------------------------
#> Neural network layer configuration per smooth term
#> -- x1
#> layer_index class units activation kernel_init bias_init kernel_reg
#> 1 dense_31 1 linear GlorotUniform Zeros <NA>
#> 2 dense_32 128 relu GlorotNormal Zeros <NA>
#> 3 dense_33 1 linear GlorotUniform Zeros <NA>
#> bias_reg
#> <NA>
#> <NA>
#> <NA>
#> -- x3
#> layer_index class units activation kernel_init bias_init kernel_reg
#> 1 dense_34 1 linear GlorotUniform Zeros <NA>
#> 2 dense_35 128 relu GlorotNormal Zeros <NA>
#> 3 dense_36 1 linear GlorotUniform Zeros <NA>
#> bias_reg
#> <NA>
#> <NA>
#> <NA>
#> ------------------------------------------------------------------------
#> Linear component coefficients
#> (Intercept) x2
#> 4.472011 1.956564
#> ------------------------------------------------------------------------
#> Training history
#> Timestamp Model BF.It. Train.Loss
#> 1 2025-10-20 09:26:34 x1 1 5.0296
#> 3 2025-10-20 09:26:36 x1 2 4.2344
#> 5 2025-10-20 09:26:36 x1 3 3.5472
#> 7 2025-10-20 09:26:36 x1 4 2.9337
#> 9 2025-10-20 09:26:37 x1 5 2.3201
#> 11 2025-10-20 09:26:37 x1 6 1.7902
#> 13 2025-10-20 09:26:38 x1 7 1.4436
#> 15 2025-10-20 09:26:38 x1 8 1.2758
#> 2 2025-10-20 09:26:35 x3 1 4.6098
#> 4 2025-10-20 09:26:36 x3 2 3.4681
#> 6 2025-10-20 09:26:36 x3 3 2.4593
#> 8 2025-10-20 09:26:37 x3 4 1.6760
#> 10 2025-10-20 09:26:37 x3 5 1.2357
#> 12 2025-10-20 09:26:37 x3 6 1.3635
#> 14 2025-10-20 09:26:38 x3 7 2.0646
#> 16 2025-10-20 09:26:38 x3 8 2.9907
# }