Commit 340ab605 authored by Sensing's avatar Sensing

Delete ResNet_v2.py

parent 15e3339e
#coding=utf-8
'''
缩放卷积,1个接收设备,2层通道,长度20,index对应,523,修改卷积核和stride,增加了一层卷积和缩放卷积,1个batch,没滤波
'''
import tensorflow as tf
import numpy as np
#import matplotlib.pyplot as plt
import pickle
#from DWTfliter import dwtfilter
import normalization
import cv2
import math
import os
import random
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# from stn import spatial_transformer_network as transformer
#import pylab
os.environ['CUDA_VISIBLE_DEVICES']='0'
class network():
def __init__(
self,
train_data = None,
batch_size = 16,
learning_rate = 0.0001, #0.0001
training_epochs = 4, # ######
time_scale = 20,
param_file = True,
is_train = False
):
self.train = train_data
self.batch_size = batch_size
self.lr_init = learning_rate
self.is_train = is_train
self.training_epochs = training_epochs
self.time_scale = time_scale
self.build()
print("Neural networks build!")
self.saver = tf.train.Saver()
# print("Will load the graph!")
# self.saver = tf.train.import_meta_graph('./checkpoint5/train.ckpt.meta')
# self.saver = tf.train.Saver(var_list=tf.global_variables())
self.sess = tf.Session()
# sess = tf.Session(config=config)
init = tf.global_variables_initializer()
self.sess.run(init)
if is_train is True:
if param_file is True:
self.saver.restore(self.sess, "./checkpoint5/train.ckpt")
print("loading neural-network params...")
self.learn()
else:
print("learning initialization!")
self.learn()
else:
self.saver.restore(self.sess, "./checkpoint5/train.ckpt")
print("loading neural-network params_final...")
self.show()
def build(self):
# input is X*30*20*4. 4 channal.30*20
self.input = tf.placeholder(tf.float32, shape = [None, 30, self.time_scale, 4], name='csi_input')
self.tag = tf.placeholder(tf.float32, shape = [None, 1, 51, 1], name ='kp_origin')
self.output= tf.placeholder(tf.float32, shape = [None, 1, 51,1], name='kp_output')
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.lr = tf.train.exponential_decay(learning_rate=self.lr_init, global_step=self.global_step,
decay_steps=2050000, decay_rate=0.90, staircase=True) #decay_
self.pic_num = tf.placeholder(tf.float32, name ='pic_num')
with tf.variable_scope('CNN'):
w_init = tf.random_normal_initializer(0., 0.1)
b_init = tf.constant_initializer(0.1)
self.block_1 = self.resnet_layer(self.input, 4, 4, w_init, b_init, 1)
self.block_2 = self.resnet_layer(self.block_1, 4, 8, w_init, b_init, 2)
self.block_3 = self.resnet_layer(self.block_2, 8, 8, w_init, b_init, 3)
self.block_4 = self.resnet_layer(self.block_3, 8, 16, w_init, b_init, 4)
self.block_5 = self.resnet_layer(self.block_4, 16, 16, w_init, b_init, 5)
self.block_6 = self.resnet_layer(self.block_5, 16, 64, w_init, b_init, 6)
self.block_7 = self.resnet_layer(self.block_6, 64, 64, w_init, b_init, 7)
self.block_8 = self.resnet_layer(self.block_7, 64, 256, w_init, b_init, 8)
self.block_9 = self.resnet_layer(self.block_8, 256, 256, w_init, b_init, 9)
self.block_10 = self.resnet_layer(self.block_9, 256, 1024, w_init, b_init, 10)
self.block_11 = self.resnet_layer(self.block_10, 1024, 1024, w_init, b_init, 11)
self.block_12 = self.resnet_layer(self.block_10, 1024, 2048, w_init, b_init, 12)
self.block_13 = self.resnet_layer(self.block_10, 2048, 2048, w_init, b_init, 13)
self.fc1 = tf.layers.dense(self.block_11, 512) #是否需要激活?
self.fc2 = tf.layers.dense(self.fc1, 51)
self.output = tf.reshape(self.fc2, [-1, 1, 51, 1])
with tf.variable_scope('loss'):
"""
HUBER LOSS + L2
"""
hubers = tf.losses.huber_loss(self.tag, self.output, delta=0.75) #注意,默认的delta为1,是否需要调整要看后续结果
hubers_loss = tf.reduce_sum(hubers)
L2 = self.L2_loss(self.tag,self.output,17.0,self.pic_num)
self.loss = tf.add(L2,hubers_loss) #注意维度,及时修改
with tf.variable_scope('train'):
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss,global_step=self.global_step)
def SENET(self, feature_map, ratio):
with tf.variable_scope('squeeze_and_excitation'):
w_initializer = tf.random_normal_initializer(0., 0.1)
b_initializer = tf.constant_initializer(0.1)
shape = feature_map.get_shape().as_list()
channel_out = shape[3]
print(shape[0], shape[1], shape[2], shape[3])
squeeze = tf.nn.avg_pool(feature_map, [1, shape[1], shape[2], 1], [1, shape[1], shape[2], 1],
padding="SAME")
squeeze = tf.reshape(squeeze, [-1, channel_out])
w_excitation1 = tf.get_variable('FC_w_1', [channel_out, channel_out / ratio], initializer=w_initializer)
b_excitation1 = tf.get_variable('FC_b_1', [channel_out / ratio, ], initializer=b_initializer, )
excitation1_output = tf.nn.relu(tf.matmul(squeeze, w_excitation1) + b_excitation1)
w_excitation2 = tf.get_variable('FC_w_2', [channel_out / ratio, channel_out], initializer=w_initializer)
b_excitation2 = tf.get_variable('FC_b_2', [channel_out, ], initializer=b_initializer)
excitation2_output = tf.nn.sigmoid(tf.matmul(excitation1_output, w_excitation2) + b_excitation2)
excitation_output = tf.reshape(excitation2_output, [-1, 1, 1, channel_out])
h_output = excitation_output * feature_map
return h_output
def resnet_layer(self,input, in_channel,out_channel,init_w,init_b,layer_num):
W_1 = tf.get_variable('w'+str(layer_num)+'_1', [1, 1, in_channel, in_channel], initializer=init_w)
b_1 = tf.get_variable('b'+str(layer_num)+'_1', [in_channel, ], initializer=init_b)
conv_1 = tf.add(tf.nn.conv2d(input, W_1, strides=[1, 1, 1, 1], padding='SAME'), b_1)
bn_1 = tf.layers.batch_normalization(conv_1, training=True)
input_1 = tf.nn.relu(bn_1)
W_2 = tf.get_variable('w' + str(layer_num)+'_2', [3, 3, in_channel, in_channel], initializer=init_w)
b_2 = tf.get_variable('b' + str(layer_num)+'_2', [in_channel, ], initializer=init_b)
if layer_num == 3 or layer_num == 5 or layer_num == 7 or layer_num == 9 or layer_num == 11:
conv_2 = tf.add(tf.nn.conv2d(input_1, W_2, strides=[1, 2, 2, 1], padding='SAME'), b_2)
else:
conv_2 = tf.add(tf.nn.conv2d(input_1, W_2, strides=[1, 1, 1, 1], padding='SAME'), b_2)
bn_2 = tf.layers.batch_normalization(conv_2, training=True)
input_2 = tf.nn.relu(bn_2)
W_3 = tf.get_variable('w' + str(layer_num)+'_3', [1, 1, in_channel, out_channel], initializer=init_w)
b_3 = tf.get_variable('b' + str(layer_num)+'_3', [out_channel, ], initializer=init_b)
conv_3 = tf.add(tf.nn.conv2d(input_2, W_3, strides=[1, 1, 1, 1], padding='SAME'), b_3)
bn_3 = tf.layers.batch_normalization(conv_3, training=True)
if in_channel == out_channel:
output = tf.nn.relu(tf.add(bn_3,input))
else:
W_4 = tf.get_variable('w' + str(layer_num) + '_4', [1, 1, in_channel, out_channel], initializer=init_w)
b_4 = tf.get_variable('b' + str(layer_num) + '_4', [out_channel, ], initializer=init_b)
conv_4 = tf.add(tf.nn.conv2d(input, W_4, strides=[1, 2, 2, 1], padding='SAME'), b_4)
bn_4 = tf.layers.batch_normalization(conv_4, training=True)
output = tf.nn.relu(tf.add(bn_3,bn_4))
return output
def L2_loss(self,tag,output,point_num,pic_num):
x1 = tf.reshape(tf.subtract(tag, output), (-1, 17, 3))
loss = tf.divide(tf.divide(tf.reduce_sum(tf.squeeze(tf.reduce_sum(tf.squeeze(tf.sqrt(
tf.reduce_sum(tf.square(x1),axis=-1))),axis=-1))),point_num),pic_num) #感觉还需要调整
return loss
############# need to be modified ##################
def batch_Convert(self, csidata, key_point, csi_index_list):
csidata_batch, kp_batch = None, None
for index in range(len(key_point)):
xs = csidata[:,csi_index_list[index]-self.time_scale+1:csi_index_list[index]+1 ,:]
ys = key_point[index]
# 抽掉了部分帧
if (index)%4==0:
csidata_batch = np.array([xs]) if csidata_batch is None else np.append(csidata_batch, [xs], axis=0)
kp_batch = np.array([ys]) if kp_batch is None else np.append(kp_batch, [ys], axis= 0)
print(csidata_batch.shape)
return csidata_batch, kp_batch
############# need to be modified ##################
def learn(self):
stop_flag=0
for j in range(self.training_epochs):
train_lineary_process = float(j) / self.training_epochs
print('train_lineary_process is ', train_lineary_process)
flag=0
batch_size=4
batch_count=0
batch_xs_size, batch_ys_size = None, None
for train_data in self.train:
with open('loss.txt', 'a') as f:
f.write('this is '+str(j)+' '+str(flag)+ ' \n')
flag+=1
# print train_data[2].shape
xs = train_data[0].astype(np.float32)
xs = np.nan_to_num(xs)
batch_xs, batch_ys = self.batch_Convert(xs, train_data[1], train_data[2])
batch_xs = np.reshape(batch_xs, [-1, 30, self.time_scale, 4])
batch_ys = batch_ys.astype(np.float32)
batch_ys = np.nan_to_num(batch_ys)
batch_ys = np.reshape(batch_ys, [-1, 1, 51, 1])
batch_xs_size = np.array(batch_xs) if batch_xs_size is None else np.append(batch_xs_size, batch_xs, axis=0)
batch_ys_size = np.array(batch_ys) if batch_ys_size is None else np.append(batch_ys_size, batch_ys, axis=0)
batch_count+=1
if batch_count%batch_size==0:
batch_count=batch_count-batch_size
print('batch_xs_size.shape is '+ str(batch_xs_size.shape))
print('batch_ys_size.shape is '+ str(batch_ys_size.shape))
batch_pic_num = np.array(batch_ys_size.shape[0])
batch_pic_num = batch_pic_num.astype('float32')
print('this batch contains '+ str(batch_pic_num)+'pictures')
for i in range(500):
loss = 0
output, _, c ,lr,global_step= self.sess.run([self.output,self.optimizer, self.loss,self.lr,self.global_step],
feed_dict={self.input: batch_xs_size, self.tag: batch_ys_size,
self.pic_num:batch_pic_num})
for ii in range(len(output)):
out = np.reshape(output[ii],[17,3])
draw_save(out,'E:/test/picture/generate/2'+'_'+str(ii)+'.jpg')
real = np.reshape(batch_ys_size[ii],[17,3])
draw_save(real, 'E:/test/picture/real/2' + '_' + str(ii) + '.jpg')
loss += c
# if (j)%5==0 and i==5:
if i == 5:
print('global_step is',global_step)
print('learning rate is ',lr)
if math.isnan(loss) is True:
stop_flag = 1
print('loss is nan')
break
if np.any(np.isnan(batch_xs)):
print("Input Nan Type Error!! ")
if np.any(np.isnan(batch_ys)):
print("Tag Nan Type Error!! ")
if i % 5 == 0:
print(
"Total Epoch:", '%d' % (j), "Pic Rpoch:", '%d' % (i), "total cost=", "{:.9f}".format(loss))
batch_xs_size, batch_ys_size = None, None
if stop_flag==1:
break
if stop_flag==1:
break
if (j+1)%2==0:
self.saver.save(self.sess, 'E:/process/train/checkpoint'+str(j+1)+'/train.ckpt')
print('store '+str(j+1)+' epochs parames')
print("Optimization Finished!")
self.saver.save(self.sess, "E:/process/train/check904/train.ckpt")
def show(self):
"""
display the performance of autoencoder
:return: a autoencoder model using unsupervised learning
"""
mkdir('generator_5')
mkdir('target_5')
count_all = 1
count=1
for train_data in self.train:
# print len(train_data[0])
# print len(train_data[1])
xs = train_data[0].astype(np.float32)
xs = np.nan_to_num(xs)
batch_xs, batch_ys = self.batch_Convert(xs, train_data[1], train_data[2])
# print batch_xs.shape
batch_xs = np.reshape(batch_xs, [-1, 30, self.time_scale, 4])
print(batch_xs.shape)
batch_ys = batch_ys.astype(np.float32)
batch_ys = np.nan_to_num(batch_ys)
batch_ys = np.reshape(batch_ys, [-1, 1, 51])
batch_pic_num = np.array(batch_ys.shape[0])
batch_pic_num = batch_pic_num.astype('float32')
print(batch_ys.shape)
output = self.sess.run([self.output],feed_dict={self.input: batch_xs})
output = np.reshape(output, (-1,1,51))
for i in range(len(output)):
out_kp = np.reshape(output[i],(17,3))
print(out_kp)
draw_save(out_kp/5.0e+18,'generator_5/'+ str(count)+'_'+str(count_all)+'.jpg')
out_kp_real = np.reshape(batch_ys[i],(17,3))
# print(out_kp_real)
draw_save(out_kp_real, 'target_5/' + str(count)+'_'+str(count_all) + '.jpg')
count_all += 1
count +=1
count_all = 1
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1,2,2,1], padding='SAME')
def batchNormalization(data):
for each_item in range(len(data)):
data[each_item] = normalization.MINMAXNormalization(data[each_item])
def package(train_data):
csi_rx, image ,index= train_data[0], train_data[1],train_data[2]
tn_data = np.array(csi_rx)
# print tn_data.shape
tn_data = np.transpose(tn_data, [1,2,0])
# print tn_data.shape
return [tn_data, image,index]
def draw_save(input,savpath):
X0 = input[:, 0]
Y0 = input[:, 2]
Z0 = input[:, 1]
X3D = [[X0[10], X0[9]], [X0[9], X0[8]], [X0[8], X0[11]], [X0[11], X0[12]], [X0[12], X0[13]],
[X0[8], X0[14]],
[X0[14], X0[15]], [X0[15], X0[16]], [X0[8], X0[7]], [X0[7], X0[0]], [X0[0], X0[1]],
[X0[1], X0[2]], [X0[2], X0[3]], [X0[0], X0[4]], [X0[4], X0[5]], [X0[5], X0[6]]]
Y3D = [[Y0[10], Y0[9]], [Y0[9], Y0[8]], [Y0[8], Y0[11]], [Y0[11], Y0[12]], [Y0[12], Y0[13]],
[Y0[8], Y0[14]],
[Y0[14], Y0[15]], [Y0[15], Y0[16]], [Y0[8], Y0[7]], [Y0[7], Y0[0]], [Y0[0], Y0[1]],
[Y0[1], Y0[2]], [Y0[2], Y0[3]], [Y0[0], Y0[4]], [Y0[4], Y0[5]], [Y0[5], Y0[6]]]
Z3D = [[Z0[10], Z0[9]], [Z0[9], Z0[8]], [Z0[8], Z0[11]], [Z0[11], Z0[12]], [Z0[12], Z0[13]],
[Z0[8], Z0[14]],
[Z0[14], Z0[15]], [Z0[15], Z0[16]], [Z0[8], Z0[7]], [Z0[7], Z0[0]], [Z0[0], Z0[1]],
[Z0[1], Z0[2]], [Z0[2], Z0[3]], [Z0[0], Z0[4]], [Z0[4], Z0[5]], [Z0[5], Z0[6]]]
ax = plt.figure().add_subplot(111, projection='3d')
ax.view_init(elev=15., azim=70)
for tt in range(len(X3D)):
ax.plot(X3D[tt], Y3D[tt], Z3D[tt], c='b')
ax.scatter(X0, Y0, Z0, color='r', marker='.')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.savefig(savpath)
plt.close()
def mkdir(path):
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
# 创建目录操作函数
os.makedirs(path)
# print path + ' 创建成功'
return True
else:
# 如果目录存在则不创建,并提示目录已存在
# print path + ' 目录已存在'
return False
if __name__ =="__main__":
np.set_printoptions(threshold=np.inf)
train_data = []
index_m = 1
for m in range(20):
if m >= 0 and m <= 3:
# pass
index_m = m + 1
path = 'E:/process/' + str(index_m) + '_tf_indorTrack_phase_test/'
temp_paths = os.listdir(path, )
print(len(temp_paths))
#for i in range(len(temp_paths)):
for i in range(len(temp_paths)):
index = i
if index == 4 or index == 8 or index == 16 or index == 24 or index == 32 or index == 40 or index == 48:
# pass
# else:
with open('E:/process/' + str(index_m) + '_tf_indorTrack_phase_test/training_data_' + str(index) + '.pkl',
'rb') as handle:
data_temp = pickle.load(handle)
batchNormalization(data_temp[0])
data_nor = package(data_temp)
train_data.append(data_nor)
random.shuffle(train_data)
print('data_len is:', len(train_data))
network(train_data=train_data)
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment