import numpy as np
from K_Means import kMeans
from DBN import *
import csv
import tensorflow as tf
from switch_case import switch
training_set_k = []
reader = csv.reader(open('pemstrain.csv','r'))
for row in reader:
training_set_k.append(row)
#print(training_set_k[1][0])
training_set_1 = [[training_set_k[0][0],training_set_k[1][0],training_set_k[2][0],training_set_k[3][0]],
[training_set_k[0][1],training_set_k[1][1],training_set_k[2][1],training_set_k[3][1]],
[training_set_k[0][2],training_set_k[1][2],training_set_k[2][2],training_set_k[3][2]],
[training_set_k[0][3],training_set_k[1][3],training_set_k[2][3],training_set_k[3][3]],
[training_set_k[0][4],training_set_k[1][4],training_set_k[2][4],training_set_k[3][4]],
[training_set_k[0][5],training_set_k[1][5],training_set_k[2][5],training_set_k[3][5]],
[training_set_k[0][6],training_set_k[1][6],training_set_k[2][6],training_set_k[3][6]],
[training_set_k[0][7],training_set_k[1][7],training_set_k[2][7],training_set_k[3][7]],
[training_set_k[0][8],training_set_k[1][8],training_set_k[2][8],training_set_k[3][8]],
[training_set_k[0][9],training_set_k[1][9],training_set_k[2][9],training_set_k[3][9]]]
training_set_1n = np.array(training_set_1,dtype="float64")
reader = csv.reader(open('pemstrainlabel.csv','r'))
training_label_k = []
for row in reader:
training_label_k.append(row)
# print(training_label_k[0][0])
training_label_1 = [[training_label_k[0][0],training_label_k[1][0],training_label_k[2][0],training_label_k[3][0]],
[training_label_k[0][1], training_label_k[1][1], training_label_k[2][1], training_label_k[3][1]],
[training_label_k[0][2], training_label_k[1][2], training_label_k[2][2], training_label_k[3][2]],
[training_label_k[0][3], training_label_k[1][3], training_label_k[2][3], training_label_k[3][3]],
[training_label_k[0][4], training_label_k[1][4], training_label_k[2][4], training_label_k[3][4]],
[training_label_k[0][5], training_label_k[1][5], training_label_k[2][5], training_label_k[3][5]],
[training_label_k[0][6], training_label_k[1][6], training_label_k[2][6], training_label_k[3][6]],
[training_label_k[0][7], training_label_k[1][7], training_label_k[2][7], training_label_k[3][7]],
[training_label_k[0][8], training_label_k[1][8], training_label_k[2][8], training_label_k[3][8]],
[training_label_k[0][9], training_label_k[1][9], training_label_k[2][9], training_label_k[3][9]]]
training_label_1n = np.array(training_label_1,dtype="float64")
#print(training_label_1n.dtype)
testing_set_k = []
reader = csv.reader(open('pemstest.csv','r'))
for row in reader:
testing_set_k.append(row)
#print(testing_set_k[1][0])
testing_set_1 = [[testing_set_k[0][0],testing_set_k[1][0],testing_set_k[2][0],testing_set_k[3][0]],
[testing_set_k[0][1],testing_set_k[1][1],testing_set_k[2][1],testing_set_k[3][1]]]
testing_set_1n = np.array(testing_set_1,dtype="float64")
reader = csv.reader(open('pemstrainlabel.csv','r'))
testing_label_k = []
for row in reader:
testing_label_k.append(row)
# print(testing_label_k[0][0])
testing_label_1 = [[testing_label_k[0][0],testing_label_k[1][0],testing_label_k[2][0],testing_label_k[3][0]],
[testing_label_k[0][1], testing_label_k[1][1], testing_label_k[2][1], testing_label_k[3][1]]]
testing_label_1n = np.array(testing_label_1,dtype="float64")
#print(training_set_1)
dbn = DBN(input=training_set_1n,label=training_label_1n,n_ins=4 , hidden_layer_sizes=[128,128,128], n_outs= 4)
dbn.pretrain(lr=0.3,k=1,epochs=40)
dbn.finetune(0.1,40)
# print(dbn.get_last_layer().W)
center,ClusterAssiment = kMeans(dbn.rbm_layers[-1].W,k=2)
# print(ClusterAssiment)
# print(counter)
# print(dbn.log_layer.W.T)
training_set_1t1 = []
training_set_1t2 = []
training_label_1t1 = []
training_label_1t2 = []
testing_set_1t1 = []
testing_set_1t2 = []
counter1=[]
counter2 = []
for i in range(len(dbn.log_layer.W.T)):
if ClusterAssiment[i,0] == 0.0:
training_set_1t1.append(training_set_1n[:,i])
training_label_1t1.append(training_label_1n[:,i])
testing_set_1t1.append(testing_set_1n[:,i])
counter1.append(i)
else:
training_set_1t2.append(training_set_1n[:,i])
training_label_1t2.append(training_label_1n[:,i])
testing_set_1t2.append(testing_set_1n[:,i])
counter2.append(i)
training_set_1t1_n = np.array(training_set_1t1)
training_set_1t2_n = np.array(training_set_1t2)
training_label_1t1_n = np.array(training_label_1t1)
training_label_1t2_n = np.array(training_label_1t2)
# print(training_label_1t1_n.T)
# print(training_label_1t1_n[0,:])
# print(training_set_1n)
# print(len(training_set_1t1_n))
# print(training_set_1t2_n.T)
# print(training_label_1t1_n.T)
if len(training_set_1t1_n)!=0 and len(training_set_1t2_n)!=0:
dbn1 = DBN(input=training_set_1t1_n.T,label=training_label_1t1_n.T,n_ins=len(training_set_1t1_n),hidden_layer_sizes=[16],n_outs=len(training_set_1t1_n))
dbn1.pretrain(0.3,1,2)
dbn1.finetune(0.1,2)
dbn2 = DBN(input=training_set_1t2_n.T,label=training_label_1t2_n.T,n_ins=len(training_set_1t2_n),hidden_layer_sizes=[16],n_outs=len(training_set_1t2_n))
dbn2.pretrain(0.3,1,2)
dbn2.finetune(0.1,2)
elif len(training_set_1t1_n)==0:
dbn2 = DBN(input=training_set_1t2_n.T, label=training_label_1t2_n.T, n_ins=len(training_set_1t2_n),
hidden_layer_sizes=[128,128,128], n_outs=len(training_set_1t2_n))
dbn2.pretrain(0.3, 2)
dbn2.finetune()
else:
dbn1 = DBN(input=training_set_1t1_n.T, label=training_label_1t1_n.T, n_ins=len(training_set_1t1_n),
hidden_layer_sizes=[128,128,128], n_outs=len(training_set_1t1_n))
dbn1.pretrain(0.3, 2)
dbn1.finetune()
print(dbn.log_layer.W)
def share_train(epchos,training_x1,training_x2,initial_shared_weights_1,initial_shared_weights_2,training_label_1,training_label_2,Y1_shared_weights,Y2_shared_weights,Y3_shared_weights,Y4_shared_weights):
d_y_1 = []
d_y_2 = []
y1_given_x1 = softmax(np.dot(training_x1, initial_shared_weights_1))
y2_given_x1 = softmax(np.dot(training_x2, initial_shared_weights_2))
# y_given_x1 = softmax(np.dot(training_x,initial_shared_weights))
# print(initial_shared_weights_1.shape)
# print(Y1_shared_weights.shape)
# print(training_x1.shape)
# print(training_x2.shape)
# print(y1_given_x1.shape)
# print(training_label_1t1_n.T.shape)
# print(y1_given_x1.shape)
for epcho in range(epchos):
# p_y_given_x1 = softmax(np.dot(y_given_x1,Y1_shared_weights))
for i in range(len(counter1)):
if counter1[i] == 0:
p_y1_given_x_1 = softmax(np.dot(y1_given_x1, Y1_shared_weights))
d_y1 = training_label_1[i,:].T.reshape(10,1)-p_y1_given_x_1
# d_y_1.append(d_y1)
# print(p_y1_given_x_1.shape)
Y1_shared_weights += 0.1 * np.dot(y1_given_x1.T,d_y1)
elif counter1[i] == 1:
p_y2_given_x_1 = softmax(np.dot(y1_given_x1, Y2_shared_weights))
d_y2 = training_label_1[i,:].T.reshape(10,1)-p_y2_given_x_1
# d_y_1.append(d_y2)
# print(p_y2_given_x_1.shape)
Y2_shared_weights += 0.1 * np.dot(y1_given_x1.T,d_y2)
elif counter1[i] == 2:
p_y3_given_x_1 = softmax(np.dot(y1_given_x1, Y3_shared_weights))
d_y3 = training_label_1[i,:].T.reshape(10,1)-p_y3_given_x_1
# d_y_1.append(d_y3)
# print(p_y3_given_x_1.shape)
Y3_shared_weights += 0.1 * np.dot(y1_given_x1.T,d_y3)
elif counter1[i] == 3:
p_y4_given_x_1 = softmax(np
- 1
- 2
前往页