diff --git a/spider_control/__pycache__/knowledge_transfer.cpython-36.pyc b/spider_control/__pycache__/knowledge_transfer.cpython-36.pyc index 4a1553cc388fa8b34a2971295172a0a9547783df..e768297ea7f4e599855ac1349064733ffde88fc4 100644 Binary files a/spider_control/__pycache__/knowledge_transfer.cpython-36.pyc and b/spider_control/__pycache__/knowledge_transfer.cpython-36.pyc differ diff --git a/spider_control/__pycache__/leg_dynamics.cpython-36.pyc b/spider_control/__pycache__/leg_dynamics.cpython-36.pyc index e3acb690c50ce73879f6e6c7fcd3c35ad26aca41..ad33e58cdb0f4b55e3658926f86a7f47fa4f48de 100644 Binary files a/spider_control/__pycache__/leg_dynamics.cpython-36.pyc and b/spider_control/__pycache__/leg_dynamics.cpython-36.pyc differ diff --git a/spider_control/__pycache__/my_network.cpython-36.pyc b/spider_control/__pycache__/my_network.cpython-36.pyc index 1da9b53fd8d9cb90cccee872aab7df580ff333ac..eed87b38b4403fe9c137f7359e9ed47455bf8153 100644 Binary files a/spider_control/__pycache__/my_network.cpython-36.pyc and b/spider_control/__pycache__/my_network.cpython-36.pyc differ diff --git a/spider_control/control1.py b/spider_control/control1.py index fda6c348d12f4bcab9e94aed3d12a27755f2710f..b2d6575eff2263ba27bf40c6684355f401f2072d 100755 --- a/spider_control/control1.py +++ b/spider_control/control1.py @@ -45,8 +45,8 @@ def joint_callback(data, args): simulation_mode = args[0] #to distinguish between training and testing model_number = args[1]#to distiguish between the type of model to train incase of training ou = tf.one_hot([model_number-1], 4) - with tf.Session() as session: - out = session.run(ou) + #with tf.Session() as session: + # out = session.run(ou) #out = kt.one_hot_encoding(model_number) rospy.loginfo(data.position)#testing pub_msg = JointState() # Make a new msg to publish results @@ -60,7 +60,7 @@ def joint_callback(data, args): carollis_inp = leg.carollis_input() print("carollis input is ")#testing print(carollis_inp) - knowledge_out = knowledge.run(carollis_inp) + knowledge_out, out_tensor = knowledge.run(carollis_inp) model_num = np.where(knowledge_out == np.amax(knowledge_out)) reward = leg.leg_run() if(model_num == 0): @@ -80,7 +80,7 @@ def joint_callback(data, args): pub_msg.position = new_position model4.nn_learn(reward) if(simulation_mode == 'train'): - knowledge.learn(out, knowledge_out) + knowledge.learn(out_tensor, ou) leg.update_angles(new_position) leg.update_effort(effort) leg.update_velocity(velocity) @@ -98,7 +98,7 @@ if __name__=='__main__': eff = 100 tact = np.zeros(8) leg = ld.Leg_attribute(pos, vel, eff, tact) - mode=input('please enter whether you want to test or train ?') + mode = input('please enter whether you want to test or train ?') if(mode == 'train'): model_number = int(input('please enter the model you wish to train i.e. 1, 2, 3, 4 \n ')) rospy.init_node('joint_logger_node', anonymous = True) diff --git a/spider_control/knowledge_transfer.py b/spider_control/knowledge_transfer.py index 0f1b10c3c4e10a594dbff0c9bfb43613d833cfe7..8d4271ed458cb25f6cbc262a51fbf483f61ee044 100755 --- a/spider_control/knowledge_transfer.py +++ b/spider_control/knowledge_transfer.py @@ -62,35 +62,35 @@ class MultiClassLogistic: for i in range(len(inputs)): inputs[i] = (inputs[i]-a_min)/(a_max-a_min) - def input_weight(self): - with tf.compat.v1.variable_scope("weight_in", reuse=tf.compat.v1.AUTO_REUSE): - v = tf.compat.v1.get_variable("weight_input", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.weight_initer) - return v + #def input_weight(self): + #with tf.compat.v1.variable_scope("weight_in", reuse=tf.compat.v1.AUTO_REUSE): + #v = tf.compat.v1.get_variable("weight_input", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.weight_initer) + #return v - def hid_weight(self): - with tf.compat.v1.variable_scope("weight_hid", reuse=tf.compat.v1.AUTO_REUSE): - v = tf.compat.v1.get_variable("weight_hidden", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.weight_initer1) - return v + #def hid_weight(self): + #with tf.compat.v1.variable_scope("weight_hid", reuse=tf.compat.v1.AUTO_REUSE): + #v = tf.compat.v1.get_variable("weight_hidden", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.weight_initer1) + #return v - def out_weight(self): - with tf.compat.v1.variable_scope("weight_out", reuse=tf.compat.v1.AUTO_REUSE): - v = tf.compat.v1.get_variable("weight_input", dtype=tf.float64, shape=[4, 2], initializer=self.weight_initer2) - return v + #def out_weight(self): + #with tf.compat.v1.variable_scope("weight_out", reuse=tf.compat.v1.AUTO_REUSE): + #v = tf.compat.v1.get_variable("weight_input", dtype=tf.float64, shape=[4, 2], initializer=self.weight_initer2) + #return v - def bias_in(self): - with tf.compat.v1.variable_scope("bias_in", reuse=tf.compat.v1.AUTO_REUSE): - v = tf.compat.v1.get_variable("bias_input", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.bias_initer) - return v + #def bias_in(self): + # with tf.compat.v1.variable_scope("bias_in", reuse=tf.compat.v1.AUTO_REUSE): + # v = tf.compat.v1.get_variable("bias_input", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.bias_initer) + # return v - def bias_hid(self): - with tf.compat.v1.variable_scope("bias_hidd", reuse=tf.compat.v1.AUTO_REUSE): - v = tf.compat.v1.get_variable("bias_hidden", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.bias_initer1) - return v + #def bias_hid(self): + # with tf.compat.v1.variable_scope("bias_hidd", reuse=tf.compat.v1.AUTO_REUSE): + # v = tf.compat.v1.get_variable("bias_hidden", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.bias_initer1) + # return v - def bias_out(self): - with tf.compat.v1.variable_scope("bias_outt", reuse=tf.compat.v1.AUTO_REUSE): - v = tf.compat.v1.get_variable("bias_out", dtype=tf.float64, shape=[4, 1], initializer=self.bias_initer2) - return v + #def bias_out(self): + # with tf.compat.v1.variable_scope("bias_outt", reuse=tf.compat.v1.AUTO_REUSE): + # v = tf.compat.v1.get_variable("bias_out", dtype=tf.float64, shape=[1, 4], initializer=self.bias_initer2) + # return v def run(self, carollis_input): c_in=normalize(carollis_input, axis = 0) print('normalized carollis input is \n') @@ -98,28 +98,53 @@ class MultiClassLogistic: c_in = np.array(c_in) c_input = tf.compat.v1.convert_to_tensor(c_in, tf.float64) #'finding the output of the input layer' - weight_i = self.input_weight() - weight_h = self.hid_weight() - weight_o = self.out_weight() - bias_i = self.bias_in() - bias_h = self.bias_hid() - bias_o = self.bias_out() - init = tf.global_variables_initializer() + with tf.compat.v1.variable_scope("weight_in", reuse=tf.compat.v1.AUTO_REUSE): + weight_i = tf.compat.v1.get_variable("weight_input", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.weight_initer) + with tf.compat.v1.variable_scope("weight_hid", reuse=tf.compat.v1.AUTO_REUSE): + weight_h = tf.compat.v1.get_variable("weight_hidden", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.weight_initer1) + with tf.compat.v1.variable_scope("weight_out", reuse=tf.compat.v1.AUTO_REUSE): + weight_o = tf.compat.v1.get_variable("weight_input", dtype=tf.float64, shape=[4, 2], initializer=self.weight_initer2) + with tf.compat.v1.variable_scope("bias_in", reuse=tf.compat.v1.AUTO_REUSE): + bias_i = tf.compat.v1.get_variable("bias_input", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.bias_initer) + with tf.compat.v1.variable_scope("bias_hid", reuse=tf.compat.v1.AUTO_REUSE): + bias_h = tf.compat.v1.get_variable("bias_hidden", dtype=tf.float64, shape=[self.neurons, 1], initializer=self.bias_initer1) + with tf.compat.v1.variable_scope("bias_out", reuse=tf.compat.v1.AUTO_REUSE): + bias_o = tf.compat.v1.get_variable("bias_out", dtype=tf.float64, shape=[1, 4], initializer=self.bias_initer2) + #weight_i = self.input_weight() + #weight_h = self.hid_weight() + #weight_o = self.out_weight() + #bias_i = self.bias_in() + #bias_h = self.bias_hid() + #bias_o = self.bias_out() + uninitialized_variables = [] sess = tf.Session() + print("printing the names of all variables") + print(tf.all_variables()) + for var in tf.all_variables(): + try: + sess.run(var) + except tf.errors.FailedPreconditionError: + uninitialized_variables.append(var) + init = tf.variables_initializer(uninitialized_variables) sess.run(init) knowledge_input = tf.add(tf.multiply(c_input, weight_i), bias_i) - sess.run(knowledge_input) + #sess.run(knowledge_input) knowledge_hidden = tf.nn.leaky_relu(knowledge_input, alpha=0.01) #'calculating the output of hidden layer' knowledge_hidden_output = 3.14*(tf.add(tf.multiply(knowledge_hidden, weight_h), bias_h))#input function of hidden layer knowledge_hidden_out = tf.nn.leaky_relu(knowledge_hidden_output, alpha=0.01, name='leaky_relu') - sess.run(knowledge_hidden_out) + #sess.run(knowledge_hidden_out) #'calculating the input of output layer' knowledge_hidden_out = tf.reshape(knowledge_hidden_out, [4, 2])#for quadrant method out_mult = tf.multiply(knowledge_hidden_out, weight_o) out_add = tf.add(out_mult[:, 0], out_mult[:, 1]) in_out = tf.add(out_add, bias_o) + #deb = sess.run(in_out) + #print("printing the shape of tensor in last layer") + #print(tf.shape(in_out)) + #print(deb) + in_out = tf.reshape(in_out, [1, 4]) #i_o = sess.run(in_out) #r_i_o = np.reshape(i_o, (8, 1)) #outt = np.add(r_i_o[0:4, 0], r_i_o[4:8, 0]) @@ -129,9 +154,10 @@ class MultiClassLogistic: output = sess.run(in_out) #output = sess.run(in_out) #'finding the softmax output of the neurons' - softmax_output = tf.nn.softmax(in_out) - output = sess.run(softmax_output) - return output + #just testing + #softmax_output = tf.nn.softmax(in_out) + #output = sess.run(softmax_output) + return output, in_out def learn(self, preferred_out, soft_out): self.kt_learning(0.1, soft_out, preferred_out)