# set up the XOR problem inputs and outputs
inp <- c(0, 0, 1, 1, 0, 1, 0, 1)
dim(inp) <- c(4, 2)
outp <- c(0, 1, 1, 0)
dim(outp) <- c(4, 1)
# create a 2-6-1 network
net <- mlp_net(c(2, 6, 1))
# set activation function in all layers
net <- mlp_set_activation(net, layer = "a", "sigmoid")
# randomise weights
net <- mlp_rnd_weights(net)
# tolerance level
tol <- 0.5e-4
# teach using Rprop, assign trained network and plot learning history
netmse <- mlp_teach_rprop(net, inp, outp, tol_level = tol,
max_epochs = 500, report_freq = 10)
net <- netmse$net
plot(netmse$mse, type = 'l')
# plot network with weights
mlp_plot(net, TRUE)
# if the algorithm had converged, prune using Optimal Brain Surgeon and plot
if (mlp_mse(net, inp, outp) <= tol) {
net <- mlp_prune_obs(net, inp, outp, tol_level = tol,
max_reteach_epochs = 500, report = TRUE)[[1]]
mlp_plot(net, TRUE)
}
# check network output
round(mlp_eval(net, inp), digits = 3)
Run the code above in your browser using DataLab