library(autoencoder)
data(iris)
#### Train a stacked sparse autoencoder with a (5,3) architecture and
#### a relatively minor sparsity penalty. Try experimenting with the
#### lambda and beta parameters if you haven't worked with sparse
#### autoencoders before - it's worth inspecting the final layer
#### to ensure that output activations haven't simply converged to the value of
#### rho that you gave (which is the desired activation level on average).
#### If the lambda/beta parameters are set high, this is likely to happen.
output <- SAENET.train(as.matrix(iris[1:100,1:4]), n.nodes = c(5,3),
lambda = 1e-5, beta = 1e-5, rho = 0.01, epsilon = 0.01)
predict.out <- SAENET.predict(output, as.matrix(iris[101:150,1:4]), layers = c(2))
Run the code above in your browser using DataLab