# Example 1:
# (1.A) create new 'NN' object:
n <- new("NN")
# (1.B) Add topology components:
# 1. add a layer of 4 generic nodes:
n$add_layer("generic",4)
# 2. add a set for connections that pass data unmodified:
n$add_connection_set("pass-through")
# 3. add another layer of 2 generic nodes:
n$add_layer("generic",2)
# 4. add a set for connections that pass data x weight:
n$add_connection_set("wpass-through")
# 5. add a layer of 1 generic node:
n$add_layer("generic",1)
# Create actual full connections in sets, random initial weights in [0,1]:
n$create_connections_in_sets(0,1)
# Optionaly, show an outline of the topology:
n$outline()
# (1.C) use the network.
# input some data, and create output for it:
n$input_at(1,c(10,20,30,40))
n$recall_all(TRUE)
# the final output:
n$get_output_from(5)
# (1.D) optionally, examine the network:
# the input for set of connections at position 2:
n$get_input_at(2)
# Data is passed unmodified through connections at position 2,
# and (by default) summed together at each node of layer at position 3.
# Final output from layer in position 3:
n$get_output_from(3)
# Data is then passed multiplied by the random weights through
# connections at position 4. The weights of these connections:
n$get_weights_at(4)
# Data is finally summed together at the node of layer at position 5,
# producing the final output, which (again) is:
n$get_output_from(5)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Example 2: A simple MAM NN
# (2.A) Preparation:
# Create data pairs
iris_data <- as.matrix( scale( iris[1:4] ) )
iris_species <- matrix(data=-1, nrow=nrow(iris_data), ncol=3)
for(r in 1:nrow(iris_data))
iris_species[r ,as.integer( iris$Species )[r]]=1
# Create the NN and its components:
m <- new( "NN" )
m$add_layer( "generic" , 4 )
m$add_layer( "generic" , 3 )
m$fully_connect_layers_at(1, 2, "MAM", 0, 0)
# (2.B) Use the NN to store iris (data,species) pair:
# encode pairs in NN:
m$encode_datasets_supervised(
iris_data,1,
iris_species,3,0,
1,TRUE)
# (2.C) Recall iris species from NN:
recalled_data <- m$recall_dataset(iris_data,1,3,TRUE)
# (2.D) Convert recalled data to ids and plot results:
recalled_ids <- apply(recalled_data, 1, which.max)
plot(iris_data, pch=recalled_ids)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Example 3: Using add_R_... methods in a NN:
# (3.A) add_R_ignoring, for functions whose result will be ignored by the NN:
a<-new("NN")
a$add_layer("pass-through",4)
a$add_R_ignoring("on recall","print","output of",1)
a$add_connection_set("pass-through")
a$add_R_ignoring("on recall","print","input of",3)
a$add_layer("pass-through",2)
a$add_R_ignoring("on recall","print","output of",5)
a$create_connections_in_sets(0,0)
# below a fwd recall. During it, the NN will print the output
# of layer @1, then print the input of connections @3, and
# finally print the output of layer @5:
a$set_input_at(1,1:4)
a$recall_all(TRUE)
# (3.B) add_R_forwarding is used to read output of component above,
# apply an R function and send result as input to component below.
# (Due to current limitations of various component types, place the
# add_R_forwarding between two layers and connect other components
# two those layers)
a<-new("NN")
a$add_layer("pass-through",4)
a$add_R_forwarding("on recall","sin")
a$add_layer("pass-through",4)
# during a fwd recall, the R component @2 will get the output
# of layer @1, apply an R function (here function sin) and send
# the result as input to layer @3.
a$set_input_at(1,1:4)
a$recall_all(TRUE)
a$get_output_from(3)
# (3.C) add_R_pipelining is similar to add_R_forwarding but allows reading
# the output of component below, and feed result to component above
# (for encode/recalls in backwards direction)
a<-new("NN")
a$add_layer("pass-through",4)
a$add_R_pipelining("on recall","sin",FALSE)
a$add_layer("pass-through",4)
# below is a recall backwards, the R component @2 will get the output
# of layer @3, apply R function and send the its as input to layer @1.
a$set_input_at(3,1:4)
a$recall_all(FALSE)
a$get_output_from(1)
# (3.D) add_R_function allows us to define the destination for the function's
# results. This may include destinations such as PE biases, connection
# weights etc.
a<-new("NN")
a$add_layer("pass-through",4)
a$add_R_function("on recall","sum","output of",1,"to input",3, FALSE)
a$add_layer("pass-through",1)
# below, in a typical forward recall, the R component @2 will get the output
# of layer @1, apply an R function (here function sum) and send it as
# input of layer @3.
a$set_input_at(1,1:4)
a$recall_all(TRUE)
a$get_output_from(3)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Example 4: A more complete example where a NN similar to that of help(LVQs)
# is implemented via 'NN'. It is a (supervised) LVQ. This version
# also allows using multiple output nodes per class.
# Note: while this is similar to LVQs, learning rate is NOT affected by epoch.
# Obviously (as goes for most NN, especially simple ones like this), one could
# easily create the model using just a matrix and some R code processing it;
# more elaborately, it could be implemented via R components (see help(NN_R_components));
# but how could one then be able to use all that fancy NN terminology? :)
# some options:
# define how many output nodes will be implicitly assigned for each class,
# i.e. groups of connections / prototype vectors / codebook vectors per class:
number_of_output_pes_per_class <- 3
# plot results?
plot_result = FALSE
# also use a mechanism to store weights (so we can plot them later)?
record_weights_at_each_iteration <- FALSE
# Next, prepare some data (based on iris).
# LVQ expects data in 0 to 1 range, so scale some numeric data...
DATA <- as.matrix(iris[1:4])
c_min <- apply(DATA, 2, FUN = "min")
c_max <- apply(DATA, 2, FUN = "max")
c_rng <- c_max - c_min
DATA <- sweep(DATA, 2, FUN = "-", c_min)
DATA <- sweep(DATA, 2, FUN = "/", c_rng)
# create a vector of desired class ids:
desired_class_ids <- as.integer(iris$Species)
# defined just to make names more general (independent from iris):
input_length <- ncol(DATA)
number_of_classes <- length(unique(desired_class_ids))
# Next, setup the LVQ NN.
# output layer may be expanded to accommodate multiple PEs per class:
output_layer_size <-
number_of_classes * number_of_output_pes_per_class
# next, implement a supervised LVQ using NN module:
LVQ_PUNISH_PE <- 10 # as defined in the C++ LVQ code.
LVQ_DEACTI_PE <- 20 # as defined in the C++ LVQ code.
LVQ_REWARD_PE <- 30 # as defined in the C++ LVQ code.
LVQ_RND_MIN <- 0 # as defined in the C++ LVQ code.
LVQ_RND_MAX <- +1 # as defined in the C++ LVQ code.
# create a typical LVQ topology for this problem:
n <- new('NN')
n$add_layer('pass-through', input_length)
n$add_connection_set('LVQ', 0)
n$add_layer('LVQ-output', output_layer_size)
n$create_connections_in_sets(LVQ_RND_MIN, LVQ_RND_MAX)
# optional, store current weights (so we can plot them later):
if (record_weights_at_each_iteration)
cvs <- n$get_weights_at(2)
# an ugly (nested loop) encoding code:
for (epoch in 1:5)
for (i in 1:nrow(DATA))
{
# recall a data vector:
n$input_at(1, DATA[i, ])
n$recall_all_fwd()
# find which output node is best for input vector (has smallest distance)
current_winner_pe <- which.min(n$get_output_at(3))
# translate winning node to class id:
returned_class <-
ceiling(current_winner_pe / number_of_output_pes_per_class)
# now check if the correct class was recalled (and reward)
# or an incorrect (and punish):
# in LVQ layers, the 'bias' node (PE) register is used to indicate if
# positive (reward) or negative (punishment) should be applied.
new_output_flags <- rep(LVQ_DEACTI_PE, output_layer_size)
new_output_flags[current_winner_pe] <- LVQ_PUNISH_PE
if (returned_class == desired_class_ids[i])
new_output_flags[current_winner_pe] <- LVQ_REWARD_PE
n$set_biases_at(3, new_output_flags)
# note: for this example (and unlike LVQs) learning rate is constant,
# NOT dicreasing as epochs increase.
n$encode_at(2)
# optional, store current weights (so we can plot them later):
if (record_weights_at_each_iteration)
cvs <- rbind(cvs, n$get_weights_at(2))
}
# done encoding.
# recall all data:
lvq_recalled_winning_nodes <-
apply(n$recall_dataset(DATA, 1, 3, TRUE), 1, which.min)
# translate winning node to class id:
lvq_recalled_class_ids <-
ceiling(lvq_recalled_winning_nodes / number_of_output_pes_per_class)
correct <- lvq_recalled_class_ids == desired_class_ids
cat("Correct:", sum(correct), "\n")
cat("Number of produced classes:", length(unique(lvq_recalled_class_ids)), "\n")
# plot results if requested (here only columns 1 and 2 are displayed):
if (plot_result)
{
plot(data, pch = lvq_recalled_class_ids,
main = "LVQ recalled clusters (module)")
# optional, if weights were stored, plot them later:
if (record_weights_at_each_iteration)
{
for (cv in 0:(output_layer_size - 1))
lines(cvs[, (cv * input_length + 1):(cv * input_length + 2)],
lwd = 2, col = cv + 1)
}
}
Run the code above in your browser using DataLab