diff --git a/spider_control/control.py b/spider_control/control.py index f1b46da0cc8546aaf3099452efacbd5ee3d63fb7..e7ca5e0d48987bfa75acec329ab4ae6b2bd250cc 100644 --- a/spider_control/control.py +++ b/spider_control/control.py @@ -264,9 +264,11 @@ class MultiClassLogistic: global weight_hid global weight_out self.neuron_input = np.array(neuron_values)#assigning input values to neurons + self.neuron_hidden = np.array(4, 1)#an array to assign neuron values of hidden layer + self.neuron_out = np.array(3, 1)#an array to assign neuron values of output layer weights1 = tf.random_normal((neurons, 1), dtype=tf.float32, seed=1606)#weights of input layer - weights2 = tf.random_normal((neurons/4, 1), dtype=tf.float32, seed=1003)#weights for hidden layer, number of neurons in hidden layer is identified by the quadrant concept - weights3 = tf.random_normal((neurons, 1), dtype=tf.float32, seed=0207)#weights for output layer + weights2 = tf.random_normal((neurons/2, 1), dtype=tf.float32, seed=1003)#weights for hidden layer, number of neurons in hidden layer is identified by the quadrant concept + weights3 = tf.random_normal((3, 1), dtype=tf.float32, seed=0207)#weights for output layer sess = tf.Session() weight_in, weight_hid, weight_out = sess.run(weights1, weights2, weights3) @@ -283,18 +285,40 @@ class MultiClassLogistic: n_input = sess.run(multiplication) return n_input #function that returns the softmax of the output layer or any given array of neurons - def out_softmax(self, neuron_out, neurons): - output = np.array(neurons) + def out_softmax(self, neuron_out): + output = np.array(3) output = tf.nn.softmax(neuron_out) return output - def activation(self, a, b): - 'where a ' + def output_func(self, ): + + def activation(self, a): + 'where a is the input of the neuron and the activation function is modelled as rectified linear output' + if(a>=0): + b = a + else: + b = 0 + return b + def kt_learning(self, n_out = [], n_preferred = [], learning_rate): + 'where n_out is the actual output and n_preffered is the prefered output of the network' + init = tf.global_variables_initializer() + with tf.Session as session: + session.run(init) + loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=n_out, logits=n_preferred)) + optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) + with tf.Session() as sess: + sess.run(optimizer) + + + + + + -#function to normalize the input data + #function to normalize the input data def normalization(a): a_min = min(a) a_max = max(a)