## Not run:
# # set up XOR problem
# inp <- c(0, 0, 1, 1, 0, 1, 0, 1)
# dim(inp) <- c(4, 2)
# outp <- c(0, 1, 1, 0)
# dim(outp) <- c(4, 1)
# # objective
# obj <- function(net)
# {
# return(mlp_mse(net, inp, outp))
# }
# # gradient
# grad <- function(net)
# {
# return(mlp_grad(net, inp, outp)$grad)
# }
# # stopping citerion
# tol <- function(oh) {
# if (oh[length(oh)] <= 5e-5) { return(TRUE); }
# return(FALSE)
# }
# # create a 2-6-1 network
# net <- mlp_net(c(2, 6, 1))
# # set activation function in all layers
# net <- mlp_set_activation(net, layer = "a", "sigmoid")
# # randomise weights
# net <- mlp_rnd_weights(net)
# # teach
# netobj <- mlp_teach_grprop(net, obj, grad, epochs = 500,
# stop = tol,
# report_freq = 1)
# # plot learning history
# plot(netobj$obj, type = 'l')
# ## End(Not run)
Run the code above in your browser using DataLab