Commit fd2e3e16 authored by Sensing's avatar Sensing

the first verison of this project

parent 62e3b701
#coding=UTF-8
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from sklearn.decomposition.pca import svd_flip
def mean_data(data):
return np.mean(data,axis=0)
"""
參数:
- XMat:传入的是一个numpy的矩阵格式,行表示样本数,列表示特征
- k:表示取前k个特征值相应的特征向量
返回值:
- finalData:參数一指的是返回的低维矩阵,相应于输入參数二
- reconData:參数二相应的是移动坐标轴后的矩阵
"""
def pca(XMat, k):
average = mean_data(XMat)
m, n = np.shape(XMat)
avgs = np.tile(average, (m, 1))
data_adjust = XMat - avgs
covX = np.cov(data_adjust.T) #计算协方差矩阵
featValue, featVec = np.linalg.eigh(covX) #求解协方差矩阵的特征值和特征向量
index = np.argsort(-featValue) #依照featValue进行从大到小排序
if k > n:
print ("k must lower than feature number")
return
else:
#注意特征向量时列向量。而numpy的二维矩阵(数组)a[m][n]中,a[1]表示第1行值
selectVec = np.array(featVec.T[index[:k]]) #所以这里须要进行转置
finalData = np.dot(XMat, selectVec.T)
# reconData = (finalData * selectVec) + average
return finalData
def PCAtest(XMat, k):
average = mean_data(XMat)
m, n = np.shape(XMat)
avgs = np.tile(average, (m, 1))
data_adjust = XMat - avgs
cov = np.dot(data_adjust.T,data_adjust)/(m-1)
u, d, v = np.linalg.svd(cov, full_matrices=False)
u, v = svd_flip(u, v)
index = np.argsort(-d)
final = np.dot(XMat, u[:, index[:k]])
return final
# -*- coding: UTF-8 -*-
import numpy as np
def phaseCalibration(csi, subCarrierIndex, rxNum, subCarrierNum):
phaseRaw = np.angle(csi)
phaseUnwrapped = np.unwrap(phaseRaw)
for antIndexForPhase in range(1, rxNum):
if phaseUnwrapped[antIndexForPhase, 0] - phaseUnwrapped[0, 0] > np.pi:
phaseUnwrapped[antIndexForPhase, :] -= 2 * np.pi
elif phaseUnwrapped[antIndexForPhase, 0] - phaseUnwrapped[0, 0] < -np.pi:
phaseUnwrapped[antIndexForPhase, :] += 2 * np.pi
phase = phaseUnwrapped.reshape(-1)
a_mat = np.tile(subCarrierIndex, (1, rxNum))
a_mat = np.append(a_mat, np.ones((1, subCarrierNum * rxNum)), axis=0)
a_mat = a_mat.transpose((1, 0))
a_mat_inv = np.linalg.pinv(a_mat)
x = np.dot(a_mat_inv, phase)
phaseSlope = x[0]
phaseCons = x[1]
calibration = np.exp(1j * (-phaseSlope * np.tile(subCarrierIndex, rxNum).reshape(3, -1) - phaseCons * np.ones((rxNum, subCarrierNum))))
csi = csi*calibration
return csi
# CentiTrack2.0
CentiTrack2.0, which is also named CentiTrack-3D, contains three key modules: the super-resolution AoA-ToF module, the relative motion trace module and the 3D tracking module, which have been fully implemented.
The experiment data is also uploaded, including "line", "curve" and "letter", in which "log.dat" and "logg.dat" are the CSI data collected on Rx1 and Rx2 respectively, the "csi_ts.txt" is the CSI timestamp used to synchronize with Leap Motion, the "data.json" is the ground truth collected by Leap Motion.
# -*- coding: UTF-8 -*-
import numpy as np
import math
from scipy.optimize import root, fsolve
THRESHOLD = 5.5
def get_Real(subcarrier):
return map(lambda x: float("%.4f" % x.real), subcarrier)
def get_Imag(subcarrier):
return map(lambda x: float("%.4f" % x.imag), subcarrier)
def rotate(x, y, theta):
x_r = x * np.cos(theta) + y * np.sin(theta)
y_r = y * np.cos(theta) - x * np.sin(theta)
return x_r, y_r
def mn_to_xy_shitty(m, n): # TODO: should be revised for 3D tracking
sqrt = 2*m**2-3*m**2*n**2+m**2*n**4+2*m**3*n-2*m**3*n**3-3*m**4+4*m**4*n**2-m**4*n**4-2*m**5*n-m**6*n**2+2*m**5*n**3+m**6
if sqrt >= 0:
x = (-1+m**2-m*n+n**2-(m*n)**2+m*n**3+math.sqrt(sqrt))/(2*(-1+m**2+n**2))
y = (m-n+m**2*n-m*n**2+(-n+m**2*n-m*n**2+n**3-m**2*n**3+m*n**4+n*math.sqrt(sqrt))/(-1+m**2+n**2))/(2*m)
else:
x = (-1 + m ** 2 - m * n + n ** 2 - (m * n) ** 2 + m * n ** 3) / (2 * (-1 + m ** 2 + n ** 2))
y = (m - n + m ** 2 * n - m * n ** 2 + (-n + m ** 2 * n - m * n ** 2 + n ** 3 - m ** 2 * n ** 3 + m * n ** 4) / (-1 + m ** 2 + n ** 2)) / (2 * m)
return x, y
def xyz_3d_coordinates2(p, q, aoa1, aoa2):
a, b = aoa1, aoa2
x = (p**2*q - p*q**2 - 2*np.sqrt(2)*a*p*q + p + np.sqrt(2)*a*q**2 + q - np.sqrt(2)*a)/(2*(p + q - np.sqrt(2)*a))
y = (- p**2*q - np.sqrt(2)*a*p**2 + p*q**2 + p + q - np.sqrt(2)*a)/(2*(p + q - np.sqrt(2)*a))
z = (-(2*a**2*p**4 + 8*a**2*p**2*q**2 - 4*a**2*p**2 - 8*a**2*p*q**3 + 8*a**2*p*q + 2*a**2*q**4 - 4*a**2*q**2
+ 4*a**2 + 2*np.sqrt(2)*a*p**4*q - 6*np.sqrt(2)*a*p**3*q**2 + 2*np.sqrt(2)*a*p**3 + 6*np.sqrt(2)*a*p**2*q**3
- 6*np.sqrt(2)*a*p**2*q - 2*np.sqrt(2)*a*p*q**4 + 2*np.sqrt(2)*a*p*q**2 - 4*np.sqrt(2)*a*p + 2*np.sqrt(2)*a*q**3
- 4*np.sqrt(2)*a*q + 2*p**4*q**2 - p**4 - 4*p**3*q**3 + 2*p**2*q**4 - 2*p**2*q**2 + 2*p**2 + 4*p*q - q**4
+ 2*q**2)/(2*a**2 - 2*np.sqrt(2)*a*p - 2*np.sqrt(2)*a*q + p**2 + 2*p*q + q**2))**0.5/2
return x, y, z
def xyz_3d_coordinates3(p, q, aoa1, aoa2):
a, b = aoa1, aoa2
x = (p**2*q - p*q**2 + p - np.sqrt(2)*b*q**2 + q - np.sqrt(2)*b)/(2*(p + q - np.sqrt(2)*b))
y = (- p**2*q + np.sqrt(2)*b*p**2 + p*q**2 - 2*np.sqrt(2)*b*p*q + p + q - np.sqrt(2)*b)/(2*(p + q - np.sqrt(2)*b))
z = (-(2*b**2*p**4 - 8*b**2*p**3*q + 8*b**2*p**2*q**2 - 4*b**2*p**2 + 8*b**2*p*q + 2*b**2*q**4 - 4*b**2*q**2
+ 4*b**2 - 2*np.sqrt(2)*b*p**4*q + 6*np.sqrt(2)*b*p**3*q**2 + 2*np.sqrt(2)*b*p**3 - 6*np.sqrt(2)*b*p**2*q**3
+ 2*np.sqrt(2)*b*p**2*q + 2*np.sqrt(2)*b*p*q**4 - 6*np.sqrt(2)*b*p*q**2 - 4*np.sqrt(2)*b*p + 2*np.sqrt(2)*b*q**3
- 4*np.sqrt(2)*b*q + 2*p**4*q**2 - p**4 - 4*p**3*q**3 + 2*p**2*q**4 - 2*p**2*q**2 + 2*p**2 + 4*p*q - q**4
+ 2*q**2)/(2*b**2 - 2*np.sqrt(2)*b*p - 2*np.sqrt(2)*b*q + p**2 + 2*p*q + q**2))**0.5/2
return x, y, z
def xy2z(x, y, p, q):
z = (q**4 - 4*q**2*x**2 + 4*q**2*x - 4*q**2*y**2 - 2*q**2 + 4*x**2 - 4*x + 1)**0.5 / (2*q)
# z2 = (p**4 - 4*p**2*x**2 + 4*p**2*y - 4*p**2*y**2 - 2*p**2 + 4*y**2 - 4*y + 1)**0.5 / (2*p)
return z
def mn_to_xy_series(ns, ms, aoa1, aoa2):
msc, nsc = np.array(ms), np.array(ns)
msc, nsc = 1.6-(msc - msc[0])/100, 1.6-(nsc - nsc[0])/100
trace_x1, trace_y1 = np.zeros(msc.shape), np.zeros(msc.shape)
trace_x2, trace_y2 = np.zeros(msc.shape), np.zeros(msc.shape)
trace_z1, trace_z2, trace_z3 = np.zeros(msc.shape), np.zeros(msc.shape), np.zeros(msc.shape)
for i in range(0, len(ns)):
m_in, n_in = max(1, msc[i]), max(1, nsc[i])
aoa1[i], aoa2[i] = np.cos(aoa1[i] / 180 * np.pi), np.cos(aoa2[i] / 180 * np.pi)
x1, y1, z1 = xyz_3d_coordinates2(m_in, n_in, aoa1[i], aoa2[i])
x2, y2, z2 = xyz_3d_coordinates3(m_in, n_in, aoa1[i], aoa2[i])
z = xy2z((x1 + x2) / 2, (y1 + y2) / 2, m_in, n_in)
# print "p: ", m_in, " q: ", n_in, " a: ", aoa1[i], " b: ", aoa2[i]
# print "x1: ", x1, " y1: ", y1, " z1: ", z1
# print "x2: ", x2, " y2: ", y2, " z2: ", z2
x1, y1 = 100*x1, 100*y1
x2, y2 = 100*x2, 100*y2
trace_z1[i] = 100 * z1
trace_z2[i] = 100 * z2
trace_z3[i] = 100 * z
trace_x1[i], trace_y1[i] = rotate(x1, y1, -0.25 * np.pi)
trace_x2[i], trace_y2[i] = rotate(x2, y2, -0.25 * np.pi)
trace_x1, trace_y1 = trace_x1 - trace_x1[0], trace_y1 - trace_y1[0]
trace_x2, trace_y2 = trace_x2 - trace_x2[0], trace_y2 - trace_y2[0]
return [trace_x1, trace_y1, trace_z1], [trace_x2, trace_y2, trace_z2], trace_z3
def find_peaks_(phase_list):
peak_segments = []
peaks = []
p = []
flag = 0
for index in range(1, len(phase_list)-2):
if abs(phase_list[index] - phase_list[index+1]) > THRESHOLD / 2.0 > abs(phase_list[index] - phase_list[index - 1]) and abs(phase_list[index + 1] - phase_list[index + 2]) < THRESHOLD / 2.0:
p.append(index)
if phase_list[index] > phase_list[index+1]:
if flag == 0:
peaks.append(index)
else:
peak_segments.append(peaks)
peaks = [index]
flag = 0
else:
if flag == 1:
peaks.append(index)
flag = 1
else:
if peaks:
peak_segments.append(peaks)
peaks = [index]
else:
peaks.append(index)
flag = 1
peak_segments.append(peaks)
return p, peak_segments
def calibration(a):
phase = np.angle(a)
gaps, ps = find_peaks_(phase)
ps = filter(lambda x: len(x) > 0, ps)
angle_calibrated = np.zeros(np.shape(phase), dtype=float)
parts = np.zeros(np.shape(a), dtype=complex)
if len(ps) == 0:
angle_calibrated = phase
parts = a
elif len(ps) == 1:
if len(ps[0]) == 1:
angle_calibrated = phase
parts = a
else:
for i in range(0, gaps.__len__() - 1):
angle_calibrated[gaps[i]:gaps[i + 1]] = np.angle(a[gaps[i]:gaps[i + 1]] - np.mean(a[gaps[i]:gaps[i + 1]]))
parts[gaps[i]:gaps[i + 1]] = a[gaps[i]:gaps[i + 1]] - np.mean(a[gaps[i]:gaps[i + 1]])
angle_calibrated[0:gaps[0]] = np.angle(a[0:gaps[0]] - np.mean(a[gaps[0]:gaps[1]]))
angle_calibrated[gaps[-1]:] = np.angle(a[gaps[-1]:] - np.mean(a[gaps[-2]:gaps[-1]]))
parts[0:gaps[0]] = a[0:gaps[0]] - np.mean(a[gaps[0]:gaps[1]])
parts[gaps[-1]:] = a[gaps[-1]:] - np.mean(a[gaps[-2]:gaps[-1]])
else:
if len(ps[0]) == 1:
angle_calibrated[:(ps[0][0]+ps[1][0])/2] = phase[:(ps[0][0]+ps[1][0])/2]
parts[:(ps[0][0]+ps[1][0])/2] = a[:(ps[0][0]+ps[1][0])/2]
else:
ps_l = (ps[0][-1] - ps[0][0]) / (ps[0].__len__() - 1)
for i in range(len(ps[0]) - 1):
angle_calibrated[ps[0][i]: ps[0][i + 1]] = np.angle(a[ps[0][i]:ps[0][i + 1]] - np.mean(a[ps[0][i]:ps[0][i + 1]]))
parts[ps[0][i]: ps[0][i + 1]] = a[ps[0][i]:ps[0][i + 1]] - np.mean(a[ps[0][i]:ps[0][i + 1]])
if ps[0][0] - ps_l <= 0:
angle_calibrated[0:ps[0][0]] = np.angle(a[0:ps[0][0]] - np.mean(a[ps[0][0]:ps[0][1]]))
parts[0:ps[0][0]] = a[0:ps[0][0]] - np.mean(a[ps[0][0]:ps[0][1]])
else:
angle_calibrated[ps[0][0] - ps_l: ps[0][0]] = np.angle(a[ps[0][0] - ps_l: ps[0][0]] - np.mean(a[ps[0][0]:ps[0][1]]))
angle_calibrated[0:ps[0][0] - ps_l] = [i+angle_calibrated[ps[0][0]-ps_l]-phase[ps[0][0]-ps_l-1] for i in phase[0: ps[0][0] - ps_l]]
parts[ps[0][0] - ps_l: ps[0][0]] = a[ps[0][0] - ps_l: ps[0][0]] - np.mean(a[ps[0][0]:ps[0][1]])
parts[0:ps[0][0] - ps_l] = [i + parts[ps[0][0] - ps_l] - a[ps[0][0] - ps_l - 1] for i in a[0: ps[0][0] - ps_l]]
angle_calibrated[ps[0][-1]: (ps[0][-1] + ps[1][0])/2] = np.angle(a[ps[0][-1]: (ps[0][-1] + ps[1][0]) / 2] - np.mean(a[ps[0][-2]:ps[0][-1]]))
parts[ps[0][-1]: (ps[0][-1] + ps[1][0])/2] = a[ps[0][-1]: (ps[0][-1] + ps[1][0]) / 2] - np.mean(a[ps[0][-2]:ps[0][-1]])
for peaks in range(1, len(ps) - 1):
if len(ps[peaks]) == 1:
angle_calibrated[(ps[peaks-1][-1]+ps[peaks][0])/2: (ps[peaks][0]+ps[peaks+1][0])/2] = phase[(ps[peaks-1][-1]+ps[peaks][0])/2: (ps[peaks][0]+ps[peaks+1][0])/2]
parts[(ps[peaks-1][-1]+ps[peaks][0])/2: (ps[peaks][0]+ps[peaks+1][0])/2] = a[(ps[peaks-1][-1]+ps[peaks][0])/2: (ps[peaks][0]+ps[peaks+1][0])/2]
else:
for i in range(len(ps[peaks]) - 1):
angle_calibrated[ps[peaks][i]: ps[peaks][i+1]] = np.angle(a[ps[peaks][i]:ps[peaks][i+1]]-np.mean(a[ps[peaks][i]:ps[peaks][i + 1]]))
parts[ps[peaks][i]: ps[peaks][i+1]] = a[ps[peaks][i]:ps[peaks][i+1]]-np.mean(a[ps[peaks][i]:ps[peaks][i + 1]])
angle_calibrated[(ps[peaks-1][-1] + ps[peaks][0])/2: ps[peaks][0]] = np.angle(a[(ps[peaks-1][-1] + ps[peaks][0]) / 2: ps[peaks][0]] - np.mean(a[ps[peaks][0]:ps[peaks][1]]))
angle_calibrated[ps[peaks][-1]: (ps[peaks][-1]+ps[peaks+1][0])/2] = np.angle(a[ps[peaks][-1]: (ps[peaks][-1]+ps[peaks + 1][0]) / 2] - np.mean(a[ps[peaks][-2]:ps[peaks][-1]]))
parts[(ps[peaks-1][-1] + ps[peaks][0])/2: ps[peaks][0]] = a[(ps[peaks-1][-1] + ps[peaks][0]) / 2: ps[peaks][0]] - np.mean(a[ps[peaks][0]:ps[peaks][1]])
parts[ps[peaks][-1]: (ps[peaks][-1]+ps[peaks+1][0])/2] = a[ps[peaks][-1]: (ps[peaks][-1]+ps[peaks + 1][0]) / 2] - np.mean(a[ps[peaks][-2]:ps[peaks][-1]])
if len(ps[-1]) == 1:
angle_calibrated[(ps[-2][-1]+ps[-1][0])/2:] = [i+angle_calibrated[(ps[-2][-1]+ps[-1][0])/2-1]-phase[(ps[-2][-1]+ps[-1][0])/2] for i in phase[(ps[-2][-1]+ps[-1][0])/2:]]
parts[(ps[-2][-1]+ps[-1][0])/2:] = [i+parts[(ps[-2][-1]+ps[-1][0])/2-1]-a[(ps[-2][-1]+ps[-1][0])/2] for i in a[(ps[-2][-1]+ps[-1][0])/2:]]
else:
ps_r = (ps[-1][-1] - ps[-1][0]) / (ps[-1].__len__() - 1)
for i in range(len(ps[-1]) - 1):
angle_calibrated[ps[-1][i]: ps[-1][i + 1]] = np.angle(a[ps[-1][i]:ps[-1][i + 1]] - np.mean(a[ps[-1][i]:ps[-1][i + 1]]))
parts[ps[-1][i]: ps[-1][i + 1]] = a[ps[-1][i]:ps[-1][i + 1]] - np.mean(a[ps[-1][i]:ps[-1][i + 1]])
if ps[-1][-1] + ps_r >= 500:
angle_calibrated[ps[-1][-1]:] = np.angle(a[ps[-1][-1]:] - np.mean(a[ps[-1][-2]:ps[-1][-1]]))
parts[ps[-1][-1]:] = a[ps[-1][-1]:] - np.mean(a[ps[-1][-2]:ps[-1][-1]])
else:
angle_calibrated[ps[-1][-1]:ps[-1][-1] + ps_r] = np.angle(a[ps[-1][-1]:ps[-1][-1] + ps_r] - np.mean(a[ps[-1][-2]:ps[-1][-1]]))
angle_calibrated[ps[-1][-1] + ps_r:] = [i+angle_calibrated[ps[-1][-1] + ps_r-1]-phase[ps[-1][-1]+ps_r] for i in phase[ps[-1][-1] + ps_r:]]
parts[ps[-1][-1]:ps[-1][-1] + ps_r] = a[ps[-1][-1]:ps[-1][-1] + ps_r] - np.mean(a[ps[-1][-2]:ps[-1][-1]])
parts[ps[-1][-1] + ps_r:] = [i+parts[ps[-1][-1] + ps_r-1]-a[ps[-1][-1]+ps_r] for i in a[ps[-1][-1] + ps_r:]]
angle_calibrated[(ps[-2][-1] + ps[-1][0])/2:ps[-1][0]] = np.angle(a[(ps[-2][-1] + ps[-1][0]) / 2:ps[-1][0]] - np.mean(a[ps[-1][-2]:ps[-1][-1]]))
parts[(ps[-2][-1] + ps[-1][0])/2:ps[-1][0]] = a[(ps[-2][-1] + ps[-1][0]) / 2:ps[-1][0]] - np.mean(a[ps[-1][-2]:ps[-1][-1]])
return angle_calibrated, parts
# -*- coding: UTF-8 -*-
import numpy as np
import os
import sys
import struct
import pylab
import cmath
class WifiCsi:
def __init__(self, args, csi):
self.timestamp_low = args[0]
self.bfee_count = args[1]
self.Nrx = args[2]
self.Ntx = args[3]
self.rssi_a = args[4]
self.rssi_b = args[5]
self.rssi_c = args[6]
self.noise = args[7]
self.agc = args[8]
self.perm = args[9]
self.rate = args[10]
self.csi = csi
pass
def get_bit_num(in_num, data_length):
max_value = (1 << data_length - 1) - 1
if not -max_value-1 <= in_num <= max_value:
out_num = (in_num + (max_value + 1)) % (2 * (max_value + 1)) - max_value - 1
else:
out_num = in_num
return out_num
pass
def read_bfee(in_bytes):
timestamp_low = in_bytes[0] + (in_bytes[1] << 8) + \
(in_bytes[2] << 16) + (in_bytes[3] << 24)
bfee_count = in_bytes[4] + (in_bytes[5] << 24)
Nrx = in_bytes[8]
Ntx = in_bytes[9]
rssi_a = in_bytes[10]
rssi_b = in_bytes[11]
rssi_c = in_bytes[12]
noise = get_bit_num(in_bytes[13],8)
agc = in_bytes[14]
antenna_sel = in_bytes[15]
length = in_bytes[16] + (in_bytes[17] << 8)
fake_rate_n_flags = in_bytes[18] + (in_bytes[19] << 8)
calc_len = (30 * (Nrx * Ntx * 8 * 2 + 3) + 7) / 8
payload = in_bytes[20:]
# if(length != calc_len)
perm_size = 3
perm = np.ndarray(perm_size, dtype=int)
perm[0] = (antenna_sel & 0x3) + 1
perm[1] = ((antenna_sel >> 2) & 0x3) + 1
perm[2] = ((antenna_sel >> 4) & 0x3) + 1
index = 0
csi_size = (30, Ntx, Nrx)
row_csi = np.ndarray(csi_size, dtype=complex)
perm_csi = np.ndarray(csi_size, dtype=complex)
for i in range(30):
index += 3
remainder = index % 8
for j in range(Nrx):
for k in range(Ntx):
pr = get_bit_num((payload[index // 8] >> remainder),8) | get_bit_num((payload[index // 8+1] << (8-remainder)),8)
pi = get_bit_num((payload[(index // 8)+1] >> remainder),8) | get_bit_num((payload[(index // 8)+2] << (8-remainder)),8)
perm_csi[i][k][perm[j] - 1] = complex(pr, pi)
index += 16
pass
pass
pass
pass
args = [timestamp_low, bfee_count, Nrx, Ntx, rssi_a,
rssi_b, rssi_c, noise, agc, perm, fake_rate_n_flags]
temp_wifi_csi = WifiCsi(args, perm_csi)
return temp_wifi_csi
def read_file(file_path):
length = os.path.getsize(file_path)
cur = 0
count = 0
broken_perm = 0
triangle = [1, 3, 6]
csi_data = []
with open(file_path, 'rb') as f:
while cur < (length - 3):
filed_length = struct.unpack("!H", f.read(2))[0]
code = struct.unpack("!B", f.read(1))[0]
cur += 3
if code == 187:
data = []
for _ in range(filed_length - 1):
data.append(struct.unpack("!B", f.read(1))[0])
cur = cur + filed_length - 1
if len(data) != filed_length - 1:
break
else:
f.seek(filed_length - 1, 1)
cur = cur + filed_length - 1
csi_data.append(read_bfee(data))
count += 1
return csi_data
import read_bf_file
from scipy.signal import savgol_filter
import pylab
from backup import *
TIMEINYERVAL = 0.01
TIMEBIASE = 0.008
TIMELEN = 500
IMAGETOCSIRATIO = 2
THRESHOLD = 5.5
def csi_ratio(an1, an2, an3):
ret1, ret2 = None, None
for sub_index in range(len(an1)):
ret1 = np.array([np.divide(an1[sub_index], an2[sub_index])]) if ret1 is None else np.append(ret1, [np.divide(an1[sub_index], an2[sub_index])], axis=0)
ret2 = np.array([np.divide(an2[sub_index], an3[sub_index])]) if ret2 is None else np.append(ret2, [np.divide(an2[sub_index], an3[sub_index])], axis=0)
return ret1, ret2
def trace(filepath):
file = read_bf_file.read_file(filepath)
file_len = len(file)
timestamp = np.array([])
startTime = file[0].timestamp_low
print "Length of packets: ", file_len, " Start timestamp:" + str(startTime)
ap1_tx1, ap2_tx1, ap3_tx1 = [], [], []
for item in file :
timestamp = np.append(timestamp, (item.timestamp_low - startTime) / 1000000.0)
for eachcsi in range(0, 30):
ap1_tx1.append(item.csi[eachcsi][0][0])
ap2_tx1.append(item.csi[eachcsi][0][1])
ap3_tx1.append(item.csi[eachcsi][0][2])
ap1_tx1 = np.reshape(ap1_tx1, (file_len, 30)).transpose()
ap2_tx1 = np.reshape(ap2_tx1, (file_len, 30)).transpose()
ap3_tx1 = np.reshape(ap3_tx1, (file_len, 30)).transpose()
ret1, ret2 = csi_ratio(ap1_tx1, ap2_tx1, ap3_tx1)
aa = np.mean(ret1, axis=0)
for i in range(len(aa)):
if np.isnan(aa[i]):
aa[i] = aa[i-1]
a = aa - np.mean(aa)
phase = np.angle(a)
phase_wrap = np.unwrap(phase)
angle_calibrated, dynamic_vectors = calibration(a)
pylab.figure()
pylab.subplot(3, 3, 1)
pylab.ylabel('Imag')
pylab.xlabel('Real')
pylab.plot(get_Real(a), get_Imag(a), 'b')
pylab.xlim(-2, 2)
pylab.ylim(-2, 2)
pylab.subplot(3, 3, 2)
pylab.title("angle")
pylab.ylabel('angle/rad')
pylab.xlabel('time')
pylab.plot(phase, 'b')
pylab.subplot(3, 3, 3)
pylab.title("angle_wrap")
pylab.ylabel('angle/rad')
pylab.xlabel('time')
pylab.plot(phase_wrap, 'b')
pylab.subplot(3, 3, 4)
pylab.title("distance")
pylab.ylabel('dis/cm')
pylab.xlabel('time')
pylab.plot(phase_wrap * 5.64 / (4 * np.pi), 'b')
pylab.subplot(3, 3, 5)
pylab.ylabel('angle/rad')
pylab.xlabel('calibration')
pylab.plot(angle_calibrated, 'r')
pylab.subplot(3, 3, 6)
pylab.ylabel('dis/cm')
pylab.xlabel('calibration')
pylab.plot(np.unwrap(angle_calibrated), 'r')
return np.unwrap(angle_calibrated) * 5.64 / (2 * np.pi)
if __name__ == '__main__':
x = trace("../0928/rx1_3.dat")
y = trace("../0928/rx2_3.dat")
# -*- coding=utf-8 -*-
import read_bf_file
from PCA import PCAtest
from PhaseCalibrate import phaseCalibration
import scipy.stats as sc
from skimage import feature
from scipy.signal import savgol_filter
from backup import *
from relative_motion_trace import trace
sampleFrequency = 100 # Hertz
centerFrequency = 5.32e9 # Hertz, 64 channel
speedOfLight = 299792458 # speed of electromagnetic wave
antDistance = 2.7e-2
rxAntennaNum = 3
subCarrierNum = 30
f_gap = 312.5e3
subCarrierIndex40 = np.array([-58, -54, -50, -46, -42, -38, -34, -30, -26, -22, -18, -14, -10, -6, -2,
2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58])
subCarrierIndex20 = np.array([-28, -26, -24, -22, -20, -18, -16, -14, -12, -10, -8, -6, -4, -2, -1,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 28])
class Track(object):
def __init__(self,
search_interval=(-np.pi / 2, np.pi/2),
toa_interval=(-2.5, 2.5),
slide_window=0.4,
filename=None,
use_mdl=1,
use_pca=1,
use40mhz=0,
use_trans=[5, 6],
step=20
):
self.search_interval = search_interval
self.toa_interval = toa_interval
self.slide_window = slide_window
self.filename = filename
self.useMDL = use_mdl
self.usePCA = use_pca
self.use40MHz = use40mhz
self.useTrans = use_trans
self.angleStepsNum = 400
self.angleStepLen = (self.search_interval[1] - self.search_interval[0]) / self.angleStepsNum
self.angleSteps = np.arange(self.search_interval[0], self.search_interval[1] + self.angleStepLen,
self.angleStepLen, dtype=float)
self.toaStepsNum = 400
self.toaStepLen = (self.toa_interval[1] - self.toa_interval[0]) / self.toaStepsNum
self.toaSteps = np.arange(self.toa_interval[0], self.toa_interval[1] + self.toaStepLen, self.toaStepLen,
dtype=float)
self.slideWindowLen = int(self.slide_window * sampleFrequency)
self.stepLength = step
self.overlapLength = self.slideWindowLen - self.stepLength
file_r = read_bf_file.read_file(self.filename)
self.aoa = self.readfile(file_r)
def get_AoA(self):
return self.aoa
def topk(self, music_spectrum, max_p, k):
max_v = [music_spectrum[p[0]][p[1]] for p in max_p]
descend_index = np.argsort(max_v)
return [max_p[i] for i in descend_index[-k:]]
def getMUSIC(self, noiseMultiply, fc):
angle_consider = self.angleSteps
us_consider = (antDistance * fc / speedOfLight) * np.sin(angle_consider)
delay_consider = 1e-7 * self.toaSteps
subCarrierIndex = subCarrierIndex40 if self.use40MHz == 1 else subCarrierIndex20
if self.usePCA:
delay_steering_mat = np.exp(-1j * 2 * np.pi * subCarrierIndex[:self.useTrans[0]].reshape(self.useTrans[0], -1)
* f_gap * delay_consider)
else:
delay_steering_mat = np.exp(-1j * 2 * np.pi * subCarrierIndex.reshape(30, -1) * f_gap * delay_consider)
aoa_steering_mat = np.exp(-2j * np.pi * np.array([0, 1, 2]).reshape(3, 1) * us_consider)
aoa_steering_inv_mat = 1
theta_tau_mat = np.kron(aoa_steering_mat, delay_steering_mat)
theta_tau_delta_mat = np.kron(aoa_steering_inv_mat, theta_tau_mat) # =theta_tau_mat
pna = np.dot(noiseMultiply, theta_tau_delta_mat)
music_spectrum = np.sum(theta_tau_delta_mat.conjugate().transpose() * (pna.transpose()), axis=1) # Ah*q*qh@At
music_spectrum = 1 / np.abs(music_spectrum)
music_spectrum = music_spectrum.reshape(401, -1)
return music_spectrum
def mdl_algorithm(self, eigenvalues):
mdl = np.zeros(len(eigenvalues))
lambda_tot = eigenvalues
sub_arr_size = len(eigenvalues)
n_segments = self.slideWindowLen
max_multipath = len(lambda_tot)
for k in range(0, max_multipath):
mdl[k] = -n_segments * (sub_arr_size - k) * np.log10(sc.gmean(lambda_tot[k :]) / np.mean(lambda_tot[k:])) \
+ 0.5 * k * (2 * sub_arr_size - k) * np.log10(n_segments)
index = max(np.argmin(mdl), 1)
print "source signals: ", index
return index
def getNoiseMat1(self, matrix):
matr = np.zeros([90, 90], dtype=complex)
for i in matrix:
mat = np.asarray(i)[:, None]
cor = np.dot(mat, mat.conjugate().transpose())
matr += cor
matr = matr / (len(matrix))
eig, u_tmp = np.linalg.eig(matr)
eig = np.abs(eig)
un = np.argsort(-eig)
eig = -np.sort(-eig)
u = u_tmp[:, un[:]]
if self.useMDL:
index = self.mdl_algorithm(eig)
else:
index = 6
qn = u[:, index:]
noiseMultiply = np.dot(qn, qn.conjugate().transpose())
return noiseMultiply, index
def getNoiseMat(self, matrix) :
mat = np.asarray(matrix).transpose() # timestamp * Nrx
cor = np.dot(mat, mat.conjugate().transpose())
eig, u_tmp = np.linalg.eig(cor)
eig = np.abs(eig)
un = np.argsort(-eig)
eig = -np.sort(-eig)
u = u_tmp[:, un[:]]
if self.useMDL:
index = self.mdl_algorithm(eig)
else :
index = 6
qn = u[:, index:]
noiseMultiply = np.dot(qn, qn.conjugate().transpose())
return noiseMultiply, index
def format_matrix(self, csi):
M, N = 3, 30
if self.usePCA:
ant1 = np.reshape(csi[0], (-1, self.useTrans[0])).transpose()
ant2 = np.reshape(csi[1], (-1, self.useTrans[0])).transpose()
ant3 = np.reshape(csi[2], (-1, self.useTrans[0])).transpose()
csi_formed = np.concatenate((np.concatenate((ant1, ant2), axis=0), ant3), axis=0)
else :
csi_formed = np.zeros((M * N, 1), dtype=complex)
for p in range(M):
csi_formed = np.concatenate((np.concatenate((csi[0], csi[1]), axis=0), csi[2]), axis=0)
return csi_formed
def fillCSIMatrix(self, fileToRead):
subCarrierIndex = subCarrierIndex40 if self.use40MHz == 1 else subCarrierIndex20
CSIMatrix = np.zeros([len(fileToRead), rxAntennaNum, subCarrierNum], dtype=complex)
if self.usePCA:
CSIMatrixx = np.zeros([len(fileToRead), 3 * self.useTrans[0], self.useTrans[1]], dtype=complex)
else:
CSIMatrixx = np.zeros([len(fileToRead), subCarrierNum * 3], dtype=complex)
timestampCount = 0
for item in fileToRead:
for EachCSI in range(0, 30):
CSIMatrix[timestampCount, :, EachCSI] = \
np.array([item.csi[EachCSI, 0, 0], item.csi[EachCSI, 0, 1],
item.csi[EachCSI, 0, 2]])
tmp = phaseCalibration(CSIMatrix[timestampCount], subCarrierIndex, rxNum=rxAntennaNum,
subCarrierNum=subCarrierNum)
CSIMatrixx[timestampCount] = self.format_matrix(tmp)
timestampCount += 1
return CSIMatrixx
def getAoASpectrum(self, CSIMatrix):
if self.usePCA == 1:
MUSICSignalNum = []
Qn, sourceNum = self.getNoiseMat(CSIMatrix)
eachSpectrum = self.getMUSIC(Qn, centerFrequency) # timestamp * Nrx
MUSICSignalNum.append(sourceNum)
AoASpectrum = np.array(eachSpectrum)
else:
MUSICSignalNum = []
Qn, sourceNum = self.getNoiseMat1(CSIMatrix)
eachSpectrum = self.getMUSIC(Qn, centerFrequency) # timestamp * Nrx
MUSICSignalNum.append(sourceNum)
AoASpectrum = np.array(eachSpectrum)
return AoASpectrum, sourceNum
def readfile(self, *args):
file1 = args[0]
window_now = 0
file_len = len(file1)
c_matrix1 = np.zeros([self.slideWindowLen, subCarrierNum * 3], dtype=complex)
AoAEstRx1, timeRx1 = [], None
print("start timeStamp: ", str(file1[0].timestamp_low))
while window_now + self.slideWindowLen < file_len:
if window_now == 0:
c_matrix1 = self.fillCSIMatrix(file1[0: self.slideWindowLen])
else:
c_matrix1[:self.overlapLength, :, :] = c_matrix1[-self.overlapLength:, :, :]
c_matrix1[-self.stepLength:, :, :] = \
self.fillCSIMatrix(file1[window_now + self.overlapLength: window_now + self.slideWindowLen])
aoa_tof = None
if self.usePCA == 1:
ret = np.zeros([c_matrix1.shape[0], c_matrix1.shape[1]], dtype=complex)
for csi in range(len(c_matrix1)):
temp = PCAtest(c_matrix1[csi, :, :], 1)
ret[csi, :] = temp.reshape(-1)
aoa_spectrum, peak_num = self.getAoASpectrum(ret)
index_tem = imregionalmax(aoa_spectrum)
topk_index = self.topk(aoa_spectrum, index_tem, peak_num)
else:
aoa_spectrum, peak_num = self.getAoASpectrum(c_matrix1)
index_tem = imregionalmax(aoa_spectrum)
topk_index = self.topk(aoa_spectrum, index_tem, peak_num)
for ma in topk_index:
aoa = (ma[0] * self.angleStepLen + self.search_interval[0]) * 180 / np.pi
tof = (ma[1] * self.toaStepLen + self.toa_interval[0])
new_aoa_tof = np.array([[aoa, tof]])
aoa_tof = new_aoa_tof if aoa_tof is None else np.append(aoa_tof, new_aoa_tof, axis=0)
AoAEstRx1.append(aoa_tof)
window_now += self.stepLength
return AoAEstRx1
def imregionalmax(spectrum):
aa = feature.peak_local_max(spectrum, min_distance=15) # 2D
return aa
if __name__ == '__main__':
slide_window = 0.4
# The relative motion trace module
tra1 = trace("./rx1.dat")
tra2 = trace("./rx2.dat")
share_len_trace = min(len(tra1), len(tra2))
tra1 = tra1[:share_len_trace]
tra2 = tra2[:share_len_trace]
intercept_s = int(slide_window * sampleFrequency / 20)
tra1 = tra1[::10][intercept_s:]
tra2 = tra2[::10][intercept_s:]
print "CSI trace len: %d" % share_len_trace
print "-------------------------------------------------------------------------------"
rx1 = Track(search_interval=(-np.pi/2, np.pi/2), slide_window=slide_window, filename="./rx1.dat",
use_pca=1, use_mdl=1, use40mhz=0, step=10)
aoa1 = rx1.get_AoA()
rx2 = Track(search_interval=(-np.pi/2, np.pi/2), slide_window=slide_window, filename="./rx2.dat",
use_pca=1, use_mdl=1, use40mhz=0, step=10)
aoa2 = rx2.get_AoA()
print "-------------------------------------------------------------------------------"
# The direct hand-reflected path can be extracted in aoa1/aoa2 with minimum ToF
share_len_aoa = min(len(aoa1), len(aoa2))
aoa1 = aoa1[:share_len_aoa]
aoa2 = aoa2[:share_len_aoa]
print "AoA len: %d" % len(aoa1)
total_len = min(share_len_trace, share_len_aoa)
tra1 = tra1[:total_len]
tra2 = tra2[:total_len]
aoa1 = aoa1[:total_len]
aoa2 = aoa2[:total_len]
print "total len: %d" % total_len
# The 3D tracking model, prior to savgol-filter
# Here the initial path length p and q (i.e., initial position) is assumed as 1.6 and 1.6.
[x1, y1, z1], [x2, y2, z2], z3 = mn_to_xy_series(tra2, tra1, aoa2, aoa1)
x1, y1 = savgol_filter(x1, 31, 3), savgol_filter(y1, 31, 3)
x2, y2 = savgol_filter(x2, 31, 3), savgol_filter(y2, 31, 3)
#coding:utf-8
from tensorflow.examples.tutorials.mnist import input_data
import pylab
from ops_modify import *
import numpy as np
import pickle
import random
import os
with open('../conditional-GAN/data_extract/source/s_th.pkl', 'rb') as f:
sh = pickle.load(f)
with open('../conditional-GAN/data_extract/source/s_tq.pkl', 'rb') as f:
sq = pickle.load(f)
with open('../conditional-GAN/data_extract/source/s_tqie.pkl', 'rb') as f:
sqie = pickle.load(f)
with open('../conditional-GAN/data_extract/source/s_tz.pkl', 'rb') as f:
sz = pickle.load(f)
train_data = sh[0:200] + sq[0:200] + sqie[0:200] + sz[0:200]
global_step = tf.Variable(0, name='global_step', trainable=False)
label = tf.placeholder(tf.float32, [BATCH_SIZE, 4], name='label')
images = tf.placeholder(tf.float32, [BATCH_SIZE, 28, 100, 1], name='images')
niose = tf.placeholder(tf.float32, [BATCH_SIZE, 100], name='noise')
with tf.variable_scope(tf.get_variable_scope()) as scope:
G_out = generator(niose, label)
D_logits_real = discriminator(images, label)
D_logits_fake = discriminator(G_out, label, reuse=True)
samples = sampler(niose, label)
def transition(batch, is_train=True):
data = None
for i in batch[:, 0]:
i = i[:, 0:100]
data = np.array([i]) if data is None else np.append(data, [i], axis=0)
if is_train is True:
label = None
for j in batch[:, 1]:
label = np.array([convert(j[0])]) if label is None else np.append(label, [convert(j[0])], axis=0)
label = np.reshape(label, (-1, 4))
else:
label = []
for j in batch[:, 1]:
label.append(j[0])
return data, label
def convert(number):
e = np.zeros((4, 1))
e[number] = 1
return e
# label for generating dataset
sample_labels = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],
[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],
[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],
[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
d_loss_real = -tf.reduce_mean(D_logits_real)
d_loss_fake = tf.reduce_mean(D_logits_fake)
d_loss = d_loss_real + d_loss_fake
g_loss = -d_loss_fake
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
# for tf.layers.batch_normalization
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
d_optim = tf.train.AdamOptimizer(0.001, beta1=0.5).minimize(d_loss, var_list=d_vars, global_step=global_step)
g_optim = tf.train.AdamOptimizer(0.001, beta1=0.5).minimize(g_loss, var_list=g_vars, global_step=global_step)
is_train = False
is_param = True
if is_train:
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
if is_param:
saver.restore(sess,'./check_point/CGAN_model_52.ckpt')
# saver.restore(sess, tf.train.latest_checkpoint('./check_point'))
print "loading params"
for i in range(1001):
random.shuffle(train_data)
batchs = [
train_data[k: k + BATCH_SIZE]
for k in xrange(0, 800, BATCH_SIZE)]
for batch in batchs:
data, tag = transition(np.array(batch))
batch_xs = np.reshape(data, (BATCH_SIZE, 28, 100, 1))
batch_xs = batch_xs / 1.5
batch_ys = tag
batch_z = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100))
sess.run([d_optim], feed_dict={images: batch_xs, niose: batch_z, label: batch_ys})
sess.run([g_optim], feed_dict={images: batch_xs, niose: batch_z, label: batch_ys})
# sess.run([g_optim], feed_dict={images: batch_xs, niose: batch_z, label: batch_ys})
if i % 10 == 0:
errD = d_loss.eval(feed_dict={images: batch_xs, label: batch_ys, niose: batch_z})
errG = g_loss.eval({niose: batch_z, label: batch_ys})
print("epoch:[%d], d_loss: %.8f, g_loss: %.8f" % (i, errD, errG))
if i % 50 == 1:
sample = sess.run(samples, feed_dict={niose: batch_z, label: sample_labels})
sample = sample * 1.5
samples_path = './pics/'
save_images(sample, [10,4], samples_path + '%d.png' % (i))
print('save image')
if i % 50 == 2:
checkpoint_path = os.path.join('./check_point/CGAN_model_%d.ckpt' % (i))
saver.save(sess, checkpoint_path)
print('save check_point')
else:
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, './check_point/CGAN_model_52.ckpt')
gan_data = []
for i in range(200):
sample_noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100))#n-sample=1
gen_samples = sess.run(generator(niose, label, training =False), feed_dict={niose: sample_noise, label : sample_labels})
gen_samples = gen_samples.reshape(-1, 28, 100)
gen_samples = gen_samples * 1.5
tags = [[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3]]
for i,j in enumerate(gen_samples):
gan_data.append([j,tags[i]])
with open('./GAN_data/gan_t2.pkl', 'wb') as f:
pickle.dump(gan_data, f)
# pylab.figure()
# pylab.subplot(4,1,1)
# pylab.imshow(csi_generator[0])
# pylab.legend(loc="best")
#
# pylab.subplot(4,1,2)
# pylab.imshow(csi_generator[1])
# pylab.legend(loc="best")
#
# pylab.subplot(4,1,3)
# pylab.imshow(csi_generator[2])
# pylab.legend(loc="best")
#
# pylab.subplot(4,1,4)
# pylab.imshow(csi_generator[3])
# pylab.legend(loc="best")
# pylab.show()
# Deep-Adaptation-Networks-based-Gesture-Recognition
The project can be devided into two parts:
1) Data augment based on conditional GAN, related modules:
Earth_move_GAN.py and ops_modify.py
2) Domain adaptation based multi-kernel Maximum Mean Discrepancy, related modules:
s_model.py for training deep neural networks in source domain;
t_model.py for transfering source model to targets;
mmd.py for computing domain discrepancy for adaptation.
import tensorflow as tf
def guassian_kernel(source, target, batch_size = 128, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = batch_size * 2
total = tf.concat([source, target], axis=0)
total0 = tf.expand_dims(total, 0)
total1 = tf.expand_dims(total, 1)
L2_distance = tf.reduce_sum((total0-total1)**2, axis=2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = tf.reduce_sum(L2_distance) / (n_samples**2-n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)]
kernel_val = [tf.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
return sum(kernel_val)#/len(kernel_val)
def mmd_rbf_accelerate(source, target, batch_size = 100, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
kernels = guassian_kernel(source, target, batch_size,
kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
loss = 0
for i in range(batch_size):
s1, s2 = i, (i+1)%batch_size
t1, t2 = s1+batch_size, s2+batch_size
loss += kernels[s1, s2] + kernels[t1, t2]
loss -= kernels[s1, t2] + kernels[s2, t1]
return loss / float(batch_size)
def mmd_rbf_noaccelerate(source, target, batch_size = 128, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
kernels = guassian_kernel(source, target,
kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
XX = kernels[:batch_size, :batch_size]
YY = kernels[batch_size:, batch_size:]
XY = kernels[:batch_size, batch_size:]
YX = kernels[batch_size:, :batch_size]
loss = tf.reduce_mean(XX + YY - XY -YX)
return loss
import tensorflow as tf
import scipy.misc
import numpy as np
BATCH_SIZE = 40
def weight_variable(shape, name, stddev=0.02, trainable=True):
dtype = tf.float32
var = tf.get_variable(name, shape, tf.float32, trainable=trainable,initializer=tf.random_normal_initializer(stddev=stddev, dtype=dtype))
return var
def bias_variable(shape, name, bias_start=0.0, trainable = True):
dtype = tf.float32
var = tf.get_variable(name, shape, tf.float32, trainable=trainable,initializer=tf.constant_initializer(bias_start, dtype=dtype))
return var
def conv2d(x, output_channels, name, k_h=5, k_w=5, s=2):
x_shape = x.get_shape().as_list()
with tf.variable_scope(name):
w = weight_variable(shape=[k_h, k_w, x_shape[-1], output_channels], name='weights')
b = bias_variable([output_channels], name='biases')
conv = tf.nn.conv2d(x, w, strides=[1, s, s, 1], padding='SAME') + b
return conv
def deconv2d(x, output_shape, name, k_h=5, k_w=5, s=2):
x_shape = x.get_shape().as_list()
with tf.variable_scope(name):
w = weight_variable([k_h, k_w, output_shape[-1], x_shape[-1]], name='weights')
bias = bias_variable([output_shape[-1]], name='biases')
deconv = tf.nn.conv2d_transpose(x, w, output_shape, strides=[1, s, s, 1], padding='SAME') + bias
return deconv
def fully_connect(x, channels_out, name):
shape = x.get_shape().as_list()
channels_in = shape[1]
with tf.variable_scope(name):
weights = weight_variable([channels_in, channels_out], name='weights')
biases = bias_variable([channels_out], name='biases')
return tf.matmul(x, weights) + biases
def lrelu(x, leak=0.02):
return tf.maximum(x, leak * x)
def conv_cond_concat(value, cond):
value_shapes = value.get_shape().as_list()
cond_shapes = cond.get_shape().as_list()
return tf.concat([value, cond * tf.ones(value_shapes[0:3] + cond_shapes[3:])], 3)
# z:?*100, y:?*10
def generator(z, y, training=True):
with tf.variable_scope("generator", reuse=not training):
yb = tf.reshape(y, [BATCH_SIZE, 1, 1, 4], name="yb") # y:?*1*1*4
z = tf.concat([z, y], 1) # z:?*104
h1 = fully_connect(z, 1024, name='g_h1_fully_connect')
h1 = lrelu(tf.layers.batch_normalization(h1, training=training, name='g_h1_batch_norm'))
h1 = tf.concat([h1,y],1) # 1028
# h2 = fully_connect(h1, 7 * 25 * 128, name='g_h2_fully_connect')
# h2 = lrelu(tf.layers.batch_normalization(h2, training=training, name='g_h2_batch_norm'))
# h2 = tf.reshape(h2,(BATCH_SIZE,7, 25, 128))
# h2 = conv_cond_concat(h2, yb) # h1: 1 * 4 * 260
# h2 = tf.image.resize_images(h1, size=(4,7), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h2 = conv2d(h2, 256, name='g_h2_deconv2d',s=1)
# h2 = lrelu(tf.layers.batch_normalization(h2, training=training, name='g_h2_batch_norm',)) # BATCH_SIZE*2*7*256
# h2 = conv_cond_concat(h2, yb) # h1: BATCH_SIZE*2*7*260
# h3 = tf.image.resize_images(h2, size=(7,25), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h3 = conv2d(h3, 128, name='g_h3_deconv2d',s=1)
# h3 = lrelu(tf.layers.batch_normalization(h3, training=training, name='g_h3_batch_norm',)) # BATCH_SIZE*4*13*128
# h3 = conv_cond_concat(h3, yb) # h1: BATCH_SIZE*4*13*132
# h4 = tf.image.resize_images(h3, size=(7,25), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h4 = conv2d(h4, 64, name='g_h4_deconv2d',s=1)
# h4 = lrelu(tf.layers.batch_normalization(h4, training=training, name='g_h4_batch_norm',)) # BATCH_SIZE*7*25*64
# h4 = conv_cond_concat(h4, yb) # h1: BATCH_SIZE*7*25*68
# h5 = tf.image.resize_images(h2, size=(14,50), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h5 = conv2d(h5, 64, name='g_h5_deconv2d',s=1)
# h5 = lrelu(tf.layers.batch_normalization(h5, training=training, name='g_h5_batch_norm',)) # BATCH_SIZE*14*50*32
# h5 = conv_cond_concat(h5, yb) # h1: BATCH_SIZE*14*50*32
#
# h6 = tf.image.resize_images(h5, size=(28,100), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h6 = conv2d(h6, 1, name='g_h6_deconv2d',s=1)
# h6 = lrelu(tf.layers.batch_normalization(h6, training=training, name='g_h6_batch_norm',)) # BATCH_SIZE*28*100*1
# h6 = tf.nn.tanh(h6)
h2 = fully_connect(h1, 128*7*25, name='g_h2_fully_connect')
h2 = lrelu(tf.layers.batch_normalization(h2, training=training, name='g_h2_batch_norm',))
h2 = tf.reshape(h2, [BATCH_SIZE, 7, 25, 128]) # h2: ?*7*7*128
h2 = conv_cond_concat(h2, yb) # h2: ?*7*7*132
h3 = deconv2d(h2, output_shape=[BATCH_SIZE, 14, 50, 128], name='g_h3_deconv2d')
h3 = lrelu(tf.layers.batch_normalization(h3, training=training, name='g_h3_batch_norm',)) # h3: ?*14*14*128
h3 = conv_cond_concat(h3, yb) # h3:?*14*14*138
h4 = deconv2d(h3, output_shape=[BATCH_SIZE, 28, 100, 1], name='g_h4_deconv2d')
h4 = tf.nn.tanh(h4) # h4: ?*28*100*1
return h4
def discriminator(image, y, reuse=False, training=True):
# with tf.variable_scope(tf.get_variable_scope(),reuse=reuse):
if reuse:
tf.get_variable_scope().reuse_variables()
yb = tf.reshape(y, [BATCH_SIZE, 1, 1, 4], name='yb') # BATCH_SIZE*1*1*4
x = conv_cond_concat(image, yb) # image: BATCH_SIZE*28*100*1 ,x: BATCH_SIZE*28*100*5
h1 = conv2d(x, 32, name='d_h1_conv2d')
h1 = lrelu(tf.layers.batch_normalization(h1, name='d_h1_batch_norm', training=training, reuse=reuse)) # h1: BATCH_SIZE*14*50*32
h1 = conv_cond_concat(h1, yb) # h1: BATCH_SIZE*14*50*15
h2 = conv2d(h1, 64, name='d_h2_conv2d')
h2 = lrelu(tf.layers.batch_normalization(h2, name='d_h2_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*7*25*64
h2 = conv_cond_concat(h2, yb) # h1: BATCH_SIZE*7*25*68
h3 = conv2d(h2, 128, name='d_h3_conv2d')
h3 = lrelu(tf.layers.batch_normalization(h3, name='d_h3_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*4*13*128
h3 = tf.reshape(h3, [BATCH_SIZE, -1])
h3 = tf.concat([h3, y], 1) # h1: BATCH_SIZE*4*13*132
h4 = fully_connect(h3, 1024, name='d_h4_fully_connect')
h4 = lrelu(tf.layers.batch_normalization(h4, training=training, name='g_h4_batch_norm',))
h4 = tf.concat([h4, y], 1)
# h4 = conv2d(h3, 256, name='d_h4_conv2d')
# h4 = lrelu(tf.layers.batch_normalization(h4, name='d_h4_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*2*7*256
# h4 = conv_cond_concat(h4, yb) # h1: BATCH_SIZE*2*7*256
# h5 = conv2d(h4, 256, name='d_h5_conv2d')
# h5 = lrelu(tf.layers.batch_normalization(h5, name='d_h5_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*1*4*256
# h5 = tf.reshape(h5, [BATCH_SIZE, -1]) # BATCH_SIZE*1024
# h5 = tf.concat([h5, y], 1) # BATCH_SIZE*1028
h6 = fully_connect(h4, 1, name='d_h6_fully_connect')
# h3 = lrelu(tf.layers.batch_normalization(h3, name='d_h3_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*1024
# h3 = tf.concat([h3, y], 1) # BATCH_SIZE*1034
# h4 = fully_connect(h3, 1, name='d_h4_fully_connect') # BATCH_SIZE*1
return tf.nn.sigmoid(h6)
# return h4
def sampler(z, y, training=False):
tf.get_variable_scope().reuse_variables()
return generator(z, y, training=training)
def save_images(images, size, path):
# normalization
img = (images + 1.0) / 2.0
h, w = img.shape[1], img.shape[2]
merge_img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
if j >= size[0]:
break
merge_img[j * h:j * h + h, i * w:i * w + w, :] = image
return scipy.misc.imsave(path, merge_img)
"""
The model trained on source dataset which we refer to source_model.
Created on March 6, 2019
Author: zijun han
"""
import tensorflow as tf
import numpy as np
import random
import pickle
from sklearn.manifold import TSNE
# import normalization
import matplotlib.pyplot as plt
# import pylab
np.set_printoptions(threshold=np.inf)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/source/s_th.pkl', 'rb') as f:
sh = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/source/s_tq.pkl', 'rb') as f:
sq = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/source/s_tqie2.pkl', 'rb') as f:
sqie = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/source/s_tz.pkl', 'rb') as f:
sz = pickle.load(f)
with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_th.pkl', 'rb') as f:
gh = pickle.load(f)
with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_tq.pkl', 'rb') as f:
gq = pickle.load(f)
with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_tqie.pkl', 'rb') as f:
gqie = pickle.load(f)
with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_tz.pkl', 'rb') as f:
gz = pickle.load(f)
with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_t2.pkl', 'rb') as f:
g = pickle.load(f)
train_data_real = g
# train_data_real = gh + gq + gqie + gz
# train_data_real = sh[0:200] + sq[0:200] + sqie[0:200] + sz[0:200]
test_ground_truth = sh[200:300] + sq[200:300] + sqie[200:300] + sz[200:300]
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target1/t_th.pkl', 'rb') as f:
# th1 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target1/t_tq.pkl', 'rb') as f:
# tq1 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target1/t_tqie.pkl', 'rb') as f:
# tqie1 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target1/t_tz.pkl', 'rb') as f:
# tz1 = pickle.load(f)
# test_ground_truth = th1+tq1+tqie1+tz1
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target2/t_th.pkl', 'rb') as f:
# th2 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target2/t_tq.pkl', 'rb') as f:
# tq2 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target2/t_tqie.pkl', 'rb') as f:
# tqie2 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target2/t_tz.pkl', 'rb') as f:
# tz2 = pickle.load(f)
# test_ground_truth = th2+tq2+tqie2+tz2
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target3/t_th.pkl', 'rb') as f:
# th3 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target3/t_tq.pkl', 'rb') as f:
# tq3 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target3/t_tqie.pkl', 'rb') as f:
# tqie3 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target3/t_tz.pkl', 'rb') as f:
# tz3 = pickle.load(f)
# test_ground_truth = th3+tq3+tqie3+tz3
class SourceModel():
def __init__(
self,
m=28,
n=100,
k=4,
batch_size=256,
learning_rate=0.0005,
training_epochs=1,
param_file=False,
is_train=False
):
self.m, self.n, self.k = m, n, k
self.batch_size = batch_size
self.lr = learning_rate
self.is_train = is_train
self.training_epochs = training_epochs
self.buildNetwork()
print "Neural networks build!"
self.saver = tf.train.Saver()
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
if is_train is True:
if param_file is True:
self.saver.restore(self.sess, "./GAN_train/GAN-train-fake3.ckpt")
print("loading neural-network params...")
self.learn()
self.show()
else:
print "learning with initialization!"
self.learn()
self.show()
else:
self.saver.restore(self.sess, "./GAN_train/GAN-train-fake3.ckpt")
print "loading neural-network params..."
self.show()
def buildNetwork(self):
self.x = tf.placeholder(tf.float32, shape = [None, self.m, self.n, 1], name='image_origin')
self.y = tf.placeholder(tf.float32, shape = [None, self.k], name='true_label')
self.y_ = tf.placeholder(tf.float32, shape = [None, self.k], name='predict')
with tf.variable_scope('sharedModel'):
w_initializer = tf.random_normal_initializer(stddev=0.02)
b_initializer = tf.constant_initializer(0.01)
w_e_conv1 = tf.get_variable('w1', [5, 5, 1, 32], initializer=w_initializer)
b_e_conv1 = tf.get_variable('b1', [32, ], initializer=b_initializer)
con1_s = lrelu(tf.add(self.conv2d(self.x, w_e_conv1), b_e_conv1))
self.fly = tf.reshape(con1_s, (-1, 14 * 50 * 32))
w_e_conv2 = tf.get_variable('w2', [5, 5, 32, 64], initializer=w_initializer)
b_e_conv2= tf.get_variable('b2', [64, ], initializer=b_initializer)
con2_s = lrelu(tf.add(self.conv2d(con1_s, w_e_conv2), b_e_conv2))
w_e_conv3 = tf.get_variable('w3', [5, 5, 64, 128], initializer=w_initializer)
b_e_conv3= tf.get_variable('b3', [128, ], initializer=b_initializer)
con3_s = lrelu(tf.add(self.conv2d(con2_s, w_e_conv3), b_e_conv3))
# layer1 = conv2d(self.x, 32, name='d_h1_conv2d')
# layer1 = lrelu(layer1)
# layer2 = conv2d(layer1, 64, name='d_h2_conv2d')
# layer2 = lrelu(layer2)
# layer3 = conv2d(layer2, 128, name='d_h3_conv2d')
# layer3 = lrelu(layer3)
# layer1 = lrelu(tf.layers.batch_normalization(layer1, training=self.is_train, name='g_h1_batch_norm'))
# layer2 = lrelu(tf.layers.batch_normalization(layer2, training=self.is_train, name='g_h2_batch_norm'))
# layer3 = lrelu(tf.layers.batch_normalization(layer3, training=self.is_train, name='g_h3_batch_norm'))
with tf.variable_scope('fineTuning'):
w_e_conv4 = tf.get_variable('w4', [5, 5, 128, 256], initializer=w_initializer)
b_e_conv4= tf.get_variable('b4', [256, ], initializer=b_initializer)
con4_s = lrelu(tf.add(self.conv2d(con3_s, w_e_conv4), b_e_conv4))
# con4_s = tf.add(self.conv2d(con3_s, w_e_conv4), b_e_conv4)
# layer4 = conv2d(layer3, 256, name='d_h4_conv2d')
# layer4 = lrelu(tf.layers.batch_normalization(layer4, training=self.is_train, name='g_h4_batch_norm'))
# layer4 = lrelu(layer4)
# layer5 = conv2d(layer4, 256, name='d_h5_conv2d')
# layer5 = lrelu(tf.layers.batch_normalization(layer5, training=self.is_train, name='g_h5_batch_norm'))
# layer5 = lrelu(layer5)
# layer2 = tf.layers.conv2d(layer1, 64, 3, strides=1, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(seed=2))
# layer2 = tf.nn.relu(layer2)
# layer3 = tf.layers.conv2d(layer2, 128, 3, strides=2, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(seed=2))
# layer3 = tf.nn.relu(layer3)
# # layer3 = tf.nn.max_pool(layer3,[1,2,2,1],[1,2,2,1],padding='VALID')
# layer4 = tf.layers.conv2d(layer3, 256, 3, strides=2, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(seed=2))
# layer4 = tf.nn.relu(layer4)
# layer5 = tf.layers.conv2d(layer4, 256, 3, strides=2, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(seed=2))
# layer5 = tf.nn.relu(layer5)
with tf.variable_scope('tailed'):
self.flatten = tf.reshape(con4_s, (-1, 2*7*256))
w_fc1 = tf.get_variable('w5', [2*7*256, 500], initializer=w_initializer)
b_fc1 = tf.get_variable('b5', [500], initializer=b_initializer)
self.logits = tf.nn.sigmoid(tf.matmul(self.flatten, w_fc1) + b_fc1)
w_fc2 = tf.get_variable('w6', [500, self.k], initializer=w_initializer)
b_fc2 = tf.get_variable('b6', [self.k], initializer=b_initializer)
res = tf.nn.sigmoid(tf.matmul(self.logits, w_fc2) + b_fc2)
self.y_ = res
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.pow(self.y - self.y_, 2))
with tf.variable_scope('train'):
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
"""
This is the origin model setup for mnist and M-mnist
"""
# with tf.variable_scope('SharedModel'):
# w_initializer = tf.random_normal_initializer(stddev=0.01)
# b_initializer = tf.constant_initializer(0.01)
# self.w_e_conv1 = tf.get_variable('w1', [3, 3, 1, 16], initializer=w_initializer)
# b_e_conv1 = tf.get_variable('b1', [16, ], initializer=b_initializer)
# self.conv1 = tf.nn.relu(tf.add(self.conv2d(self.input, self.w_e_conv1), b_e_conv1))
# print self.conv1.shape
# self.w_e_conv2 = tf.get_variable('w2', [3, 3, 16, 64], initializer=w_initializer)
# b_e_conv2 = tf.get_variable('b2', [64, ], initializer=b_initializer)
# self.conv2 = tf.nn.relu(tf.add(self.conv2d(self.conv1, self.w_e_conv2), b_e_conv2))
# print self.conv2.shape
# self.w_e_conv3 = tf.get_variable('w3', [3, 3, 64, 128], initializer=w_initializer)
# b_e_conv3 = tf.get_variable('b3', [128, ], initializer=b_initializer)
# conv3 = tf.nn.relu(tf.add(self.conv2d(self.conv2, self.w_e_conv3), b_e_conv3))
# print conv3.shape
# self.conv3 = tf.reshape(conv3, [-1, 4 * 4 * 128])
# with tf.variable_scope('tailed'):
# self.w2_fc = tf.get_variable('tailed_w4', [4 * 4 * 128, GESTURE], initializer=w_initializer, )
# self.b2_fc = tf.get_variable('tailed_b4', [GESTURE, ], initializer=b_initializer,)
# result = tf.nn.relu(tf.matmul(self.conv3, self.w2_fc) + self.b2_fc)
# self.predict = result
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1,2,2,1], padding='SAME')
def learn(self):
total_batch = 300
for epoch in range(self.training_epochs):
# Loop over all batches
for i in range(total_batch):
# batch = np.array(random.sample(csi_train, self.batch_size))
random.shuffle(train_data_real)
batchs = [
train_data_real[k: k + self.batch_size]
for k in xrange(0, 8000, self.batch_size)]
for batch in batchs:
data, label = self.transition(np.array(batch))
batch_xs = np.reshape(data, (-1, self.m, self.n, 1))
batch_ys = label
_, c = self.sess.run([self.optimizer, self.loss], feed_dict={self.x: batch_xs, self.y: batch_ys})
if i % 10 == 0:
print("Epoch:", '%04d' % (epoch + 1),"iteration: %d"%(i), "cost=", "{:.8f}".format(c))
if i % 10 == 0:
self.show()
print("Optimization Finished!")
# self.saver.save(self.sess, "./GAN_train/GAN-train-fake3.ckpt")
def show(self):
data, lab = self.transition(np.array(test_ground_truth), is_train=False)
y_pre = self.sess.run(
self.y_, feed_dict={self.x: np.reshape(data, (-1, self.m, self.n, 1))})
y_lab = lab
y_pre = [np.argmax(i) for i in y_pre]
# print y_pre
# print y_lab
accuarcy = sum(int(y == y_) for (y, y_) in zip(y_pre, y_lab))
print "accuarcy: {0} / {1} ".format(accuarcy,400)
batch_xs = data.reshape((-1, 28,100, 1))
batch_ys = lab
features = self.sess.run(self.fly, feed_dict={self.x: batch_xs})
src_features = TSNE(n_components=2).fit_transform(features)
slide,riot,down,push=[],[],[],[]
for i in range(len(batch_ys)):
if batch_ys[i]==0:
slide.append(np.array(src_features[i]))
elif batch_ys[i] == 1:
riot.append(np.array(src_features[i]))
elif batch_ys[i] == 2:
down.append(np.array(src_features[i]))
else:
push.append(np.array(src_features[i]))
slide = np.array(slide)
riot = np.array(riot)
down = np.array(down)
push = np.array(push)
o1 = plt.scatter(slide[:, 0], slide[:, 1], c='', edgecolors='#FF6347', alpha=0.8,label = 'slide')
o2 = plt.scatter(riot[:, 0], riot[:, 1], c='', edgecolors='y', alpha=0.8,label = 'riot')
o3 = plt.scatter(down[:, 0], down[:, 1], c='', edgecolors='g', alpha=0.8,label='down')
o4 = plt.scatter(push[:, 0], push[:, 1], c='', edgecolors='b', alpha=0.8,label= 'push')
plt.legend(loc='best')
plt.title('Source domain')
# plt.xlim((-30, 40))
# plt.ylim((-40, 30))
plt.savefig('/home/han/plot123_2.png', dpi=900)
plt.show()
# x= []
# for i in batch_ys:
# label = i
# if label == 0:
# x.append('#FF6347')
# elif label == 1:
# x.append('y')
# elif label == 2:
# x.append('g')
# elif label == 3:
# x.append('b')
# elif label == 4:
# x.append('#DC143C')
# elif label == 5:
# x.append('k')
# elif label == 6:
# x.append('m')
# elif label == 7:
# x.append('#00FFFF')
# elif label == 8:
# x.append('#20B2AA')
# elif label == 9:
# x.append('#708090')
#
# plt.scatter(src_features[:, 0], src_features[:, 1], c='', edgecolors=x, alpha=0.8)
# plt.legend(loc='upper left')
# plt.title('target2')
# plt.show()
def transition(self, batch, is_train = True):
data = None
for i in batch[:,0]:
i = i[:, 0:100]
data = np.array([i]) if data is None else np.append(data, [i], axis=0)
if is_train is True:
label = None
for j in batch[:,1]:
label = np.array([self.convert(j[0])]) if label is None else np.append(label, [self.convert(j[0])], axis=0)
label = np.reshape(label, (-1, 4))
else:
label = []
for j in batch[:,1]:
label.append(j[0])
return data, label
def convert(self, number):
e = np.zeros((4,1))
e[number] = 1
return e
def test(self):
"""
This is the function for performance view.
"""
correct_prediction = tf.equal(tf.argmax(self.label, 1), tf.argmax(self.predict, 1))
batch_xs = np.reshape(mnist.test.images, [-1, HEIGHT, WEIGHT, DEPTH])
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print(self.sess.run(accuracy, feed_dict={self.input: batch_xs, self.label: mnist.test.labels}))
batch_xs = batch_xs[1000: 3000]
batch_ys = mnist.test.labels[1000: 3000]
features = self.sess.run(self.conv3, feed_dict={self.input: batch_xs})
src_features = TSNE(n_components=2).fit_transform(features)
x= []
for i in batch_ys:
label = self.changeLabel(i)
if label == 0:
x.append('#FF6347')
elif label == 1:
x.append('y')
elif label == 2:
x.append('g')
elif label == 3:
x.append('b')
elif label == 4:
x.append('#DC143C')
elif label == 5:
x.append('k')
elif label == 6:
x.append('m')
elif label == 7:
x.append('#00FFFF')
elif label == 8:
x.append('#20B2AA')
elif label == 9:
x.append('#708090')
plt.scatter(src_features[:, 0], src_features[:, 1], c='', edgecolors=x, alpha=0.8)
plt.title('Non-adapted')
pylab.show()
def changeLabel(self, one_hot):
return np.argmax(one_hot)
def weight_variable(shape, name, stddev=0.02, trainable=True):
dtype = tf.float32
var = tf.get_variable(name, shape, tf.float32, trainable=trainable,initializer=tf.random_normal_initializer(stddev=stddev, dtype=dtype))
return var
def bias_variable(shape, name, bias_start=0.01, trainable = True):
dtype = tf.float32
var = tf.get_variable(name, shape, tf.float32, trainable=trainable,initializer=tf.constant_initializer(bias_start, dtype=dtype))
return var
def conv2d(x, output_channels, name, k_h=5, k_w=5,reuse =False):
x_shape = x.get_shape().as_list()
with tf.variable_scope(name, reuse=reuse):
w = weight_variable(shape=[k_h, k_w, x_shape[-1], output_channels], name='weights')
b = bias_variable([output_channels], name='biases')
conv = tf.nn.conv2d(x, w, strides=[1, 2, 2, 1], padding='SAME') + b
return conv
def lrelu(x, leak=0.02):
return tf.maximum(x, leak * x)
if __name__ =="__main__":
SourceModel()
"""
The model re-trained on target dataset which we refer to target_model.
Created on March 6, 2019
Author: zijun han
"""
import tensorflow as tf
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import pickle
import mmd
import torch.utils.data as Data
class TargetModel():
def __init__(
self,
m=28,
n=100,
k=4,
batch_size=100,
learning_rate=0.001,
training_epochs=50 ,
is_train=False,
):
self.m, self.n, self.k = m, n, k
self.batch_size = batch_size
self.lr = learning_rate
self.is_train = is_train
self.training_epochs = training_epochs
self.buildNetwork()
print "Neural networks built!"
self.saver = tf.train.Saver()
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
if is_train is True:
self.saver.restore(self.sess, './GAN_train/GAN-train-fake3.ckpt') # load source model
print("loading source model params...")
var_list = [self.w_fc1, self.b_fc1,self.w_fc2,self.b_fc2]
initfc = tf.variables_initializer(var_list, name='init') # FIXME: Any better initialization?
self.sess.run(initfc) # initialize the tailed layers
self.learn() # time to train
self.show()
else:
self.saver.restore(self.sess, "./params/target3.ckpt") # load trained model
self.show() # performance view
def buildNetwork(self):
"""
Here, the 'SharedModel' is the transfered layer composed of CNN, the 'tailer' is the fully connected layer which is randomly initialized.
source data with label is used to calculate class error, and compute mmd loss together with target data.
:return: initialized model.
"""
self.source_input = tf.placeholder(tf.float32, shape = [None, self.m, self.n, 1], name='source_input')
self.target_input= tf.placeholder(tf.float32, shape = [None, self.m, self.n, 1], name='target_input')
self.predict = tf.placeholder(tf.float32, shape = [None, self.k], name ='y_predict')
self.source_label= tf.placeholder(tf.float32, shape = [None, self.k], name='y_label')
with tf.variable_scope('sharedModel'):
w_initializer = tf.random_normal_initializer(stddev=0.02)
b_initializer = tf.constant_initializer(0.01)
# self.w_e_conv1 = tf.get_variable('w1', [3, 3, 1, 16], initializer=w_initializer, trainable=False)
# b_e_conv1 = tf.get_variable('b1', [16, ], initializer=b_initializer, trainable=False)
# self.conv1_source = tf.nn.relu(tf.add(self.conv2d(self.source_input, self.w_e_conv1), b_e_conv1))
w_e_conv1 = tf.get_variable('w1', [5, 5, 1, 32], initializer=w_initializer, trainable=False)
b_e_conv1 = tf.get_variable('b1', [32, ], initializer=b_initializer, trainable=False)
con1_s = lrelu(tf.add(self.conv2d(self.source_input, w_e_conv1), b_e_conv1))
con1_t = lrelu(tf.add(self.conv2d(self.target_input, w_e_conv1), b_e_conv1))
# self.fly = tf.reshape(con1_t, (-1, 14 * 50 * 32))
w_e_conv2 = tf.get_variable('w2', [5, 5, 32, 64], initializer=w_initializer, trainable=False)
b_e_conv2= tf.get_variable('b2', [64, ], initializer=b_initializer, trainable=False)
con2_s = lrelu(tf.add(self.conv2d(con1_s, w_e_conv2), b_e_conv2))
con2_t = lrelu(tf.add(self.conv2d(con1_t, w_e_conv2), b_e_conv2))
self.fly = tf.reshape(con2_t, (-1, 7 * 25 * 64))
w_e_conv3 = tf.get_variable('w3', [5, 5, 64, 128], initializer=w_initializer, trainable=False)
b_e_conv3= tf.get_variable('b3', [128, ], initializer=b_initializer, trainable=False)
con3_s = lrelu(tf.add(self.conv2d(con2_s, w_e_conv3), b_e_conv3))
con3_t = lrelu(tf.add(self.conv2d(con2_t, w_e_conv3), b_e_conv3))
self.con3_s = tf.reshape(con3_s, [-1, 4*13*128])
self.con3_t = tf.reshape(con3_t, [-1, 4*13*128])
# con1_s = conv2d(self.source_input, 32, name='d_h1_conv2d')
# con1_s = lrelu(con1_s)
# con1_t = conv2d(self.target_input, 32, name='d_h1_conv2d', reuse=True)
# con1_t = lrelu(con1_t)
# con2_s = conv2d(con1_s, 64, name='d_h2_conv2d')
# con2_s = lrelu(con2_s)
# con2_t = conv2d(con1_t, 64, name='d_h2_conv2d', reuse=True)
# con2_t = lrelu(con2_t)
# con3_s = conv2d(con2_s, 128, name='d_h3_conv2d')
# con3_s = lrelu(con3_s)
# con3_t = conv2d(con2_t, 128, name='d_h3_conv2d', reuse=True)
# con3_t = lrelu(con3_t)
with tf.variable_scope('fineTuning'):
w_e_conv4 = tf.get_variable('w4', [5, 5, 128, 256], initializer=w_initializer)
b_e_conv4= tf.get_variable('b4', [256, ], initializer=b_initializer)
con4_s = lrelu(tf.add(self.conv2d(con3_s, w_e_conv4), b_e_conv4))
con4_t = lrelu(tf.add(self.conv2d(con3_t, w_e_conv4), b_e_conv4))
with tf.variable_scope('tailed'):
self.con4_s= tf.reshape(con4_s, [-1, 2*7*256])
self.con4_t = tf.reshape(con4_t, [-1, 2*7*256])
self.w_fc1 = tf.get_variable('w5', [2*7*256, 500], initializer=w_initializer)
self.b_fc1 = tf.get_variable('b5', [500,], initializer=b_initializer)
self.res = tf.nn.sigmoid(tf.matmul(self.con4_s, self.w_fc1) + self.b_fc1)
self.ret = tf.nn.sigmoid(tf.matmul(self.con4_t, self.w_fc1) + self.b_fc1)
self.w_fc2 = tf.get_variable('w6', [500, self.k], initializer=w_initializer)
self.b_fc2 = tf.get_variable('b6', [self.k,], initializer=b_initializer)
pre_s = tf.nn.sigmoid(tf.matmul(self.res, self.w_fc2) + self.b_fc2)
pre_t = tf.nn.sigmoid(tf.matmul(self.ret, self.w_fc2) + self.b_fc2)
self.p_s = pre_s
self.p_t = pre_t
with tf.variable_scope('loss'):
alpha, theta, gama1, gama2 = 0.3, 0.3, 0.3, 0.3
self.class_loss = tf.reduce_mean(tf.pow(self.source_label - self.p_s, 2))
self.mmd_loss = mmd.mmd_rbf_accelerate(source=self.con3_s, target=self.con3_t) * alpha + mmd.mmd_rbf_accelerate(source=self.con4_s, target=self.con4_t)* theta + \
mmd.mmd_rbf_accelerate(source=self.res, target=self.ret) * gama1 + mmd.mmd_rbf_accelerate(source=self.p_s, target=self.p_t) * gama2
self.loss = self.class_loss + self.mmd_loss
with tf.variable_scope('train'):
train_vars = tf.trainable_variables()
fine_vars = [var for var in train_vars if var.name.startswith("fineTuning")]
tail_vars = [var for var in train_vars if var.name.startswith("tailed")]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
# train_op1 = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.loss, var_list=fine_vars)
# train_op2 = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.loss, var_list=tail_vars)
opt1 = tf.train.GradientDescentOptimizer(0.0001)
opt2 = tf.train.GradientDescentOptimizer(0.005)
grads = tf.gradients(self.loss, fine_vars + tail_vars)
grads1 = grads[:len(fine_vars)]
grads2 = grads[len(fine_vars):]
train_op1 = opt1.apply_gradients(zip(grads1,fine_vars))
train_op2 = opt2.apply_gradients(zip(grads2, tail_vars))
self.train_op = tf.group(train_op1, train_op2)
# self.train_op = tf.group(fine_opt, tail_opt)
# fine_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='fineTuning')
# fine_opt = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.loss, var_list=fine_vars)
#
# tail_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='tailed')
# tail_opt = tf.train.AdamOptimizer(learning_rate=0.005).minimize(self.loss,var_list=tail_vars)
# with tf.variable_scope('SharedModel'):
# w_initializer = tf.random_normal_initializer(stddev=0.01)
# b_initializer = tf.constant_initializer(0.01)
# self.w_e_conv1 = tf.get_variable('w1', [3, 3, 1, 16], initializer=w_initializer, trainable=False)
# b_e_conv1 = tf.get_variable('b1', [16, ], initializer=b_initializer, trainable=False)
# self.conv1_source = tf.nn.relu(tf.add(self.conv2d(self.source_input, self.w_e_conv1), b_e_conv1))
# self.conv1_target = tf.nn.relu(tf.add(self.conv2d(self.target_input, self.w_e_conv1), b_e_conv1))
# self.w_e_conv2 = tf.get_variable('w2', [3, 3, 16, 64], initializer=w_initializer, trainable=False)
# b_e_conv2 = tf.get_variable('b2', [64, ], initializer=b_initializer, trainable=False)
# self.conv2_source = tf.nn.relu(tf.add(self.conv2d(self.conv1_source, self.w_e_conv2), b_e_conv2))
# self.conv2_target = tf.nn.relu(tf.add(self.conv2d(self.conv1_target, self.w_e_conv2), b_e_conv2))
# self.w_e_conv3 = tf.get_variable('w3', [3, 3, 64, 128], initializer=w_initializer, trainable=False)
# self.b_e_conv3 = tf.get_variable('b3', [128, ], initializer=b_initializer, trainable=False)
# conv3_source = tf.nn.relu(tf.add(self.conv2d(self.conv2_source, self.w_e_conv3), self.b_e_conv3))
# conv3_target = tf.nn.relu(tf.add(self.conv2d(self.conv2_target, self.w_e_conv3), self.b_e_conv3))
# self.conv3_source = tf.reshape(conv3_source, [-1, 4 * 4 * 128])
# self.conv3_target = tf.reshape(conv3_target, [-1, 4 * 4 * 128])
# with tf.variable_scope('tailed'):
# self.w2_fc = tf.get_variable('tailed_w4', [4 * 4 * 128, GESTURE], initializer=w_initializer,)
# self.b2_fc = tf.get_variable('tailed_b4', [GESTURE, ], initializer=b_initializer,)
# self.result_s = tf.nn.relu(tf.matmul(self.conv3_source, self.w2_fc) + self.b2_fc)
# self.result_t = tf.nn.relu(tf.matmul(self.conv3_target, self.w2_fc) + self.b2_fc)
# self.p_s = self.result_s
# self.p_t = self.result_t
# def conv2d(self, x, W):
# return tf.nn.conv2d(x, W, strides=[1,2,2,1], padding='SAME')
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1,2,2,1], padding='SAME')
def learn(self):
"""
The transferring and re-training process
:return: target model
"""
for j in range(self.training_epochs):
source_iter = iter(source_data)
target_iter = iter(target_data)
for i in range(1, len_source+1):
batch_s = source_iter.next()
xs, ys = self.transition(batch_s)
xs = np.reshape(xs, [-1, self.m, self.n, 1])
ys = np.reshape(ys, [-1, self.k])
batch_t = target_iter.next()
xt, _ = self.transition(batch_t)
xt = np.reshape(xt, [-1, self.m, self.n, 1])
if i % len_target == 0:
target_iter = iter(target_data)
_, c1, c2 = self.sess.run([self.train_op, self.class_loss, self.mmd_loss], feed_dict={self.source_input: xs, self.source_label: ys, self.target_input: xt})
if np.any(np.isnan(xs)) or np.any(np.isnan(ys)):
print "Input Nan Type Error!! "
if i % 20 == 0:
# print("Total Epoch:", '%d' % (j), "Int Epoch:", '%d' % (i), "class loss=", "{:.9f}".format(c1))
print "Total Epoch:", '%d' % (j), "Int Epoch:",'%d' % (i), "class loss=", "{:.9f}".format(c1), "mmd loss=","{:.9f}".format(c2)
if i % 20 == 1:
self.show()
print("Optimization Finished!")
self.saver.save(self.sess, "./params/target3.ckpt")
def show(self):
data, label = self.trans(np.array(target), is_train=False)
t_pre = self.sess.run(
self.p_t, feed_dict={self.target_input: np.reshape(data, (-1, self.m, self.n, 1))})
t_lab = label
t_pre = [np.argmax(i) for i in t_pre]
print t_lab
print t_pre
acc_target = sum(int(y == y_) for (y, y_) in zip(t_pre, t_lab))
print "acc_target: {0} / {1} ".format(acc_target, 400)
# print "acc_source: {0} / {1} ".format(acc_source, 400)
batch_xs = data.reshape((-1, 28, 100, 1))
batch_ys = label
features = self.sess.run(self.fly, feed_dict={self.target_input: batch_xs})
src_features = TSNE(n_components=2).fit_transform(features)
slide, riot, down, push = [], [], [], []
for i in range(len(batch_ys)):
if batch_ys[i] == 0:
slide.append(np.array(src_features[i]))
elif batch_ys[i] == 1:
riot.append(np.array(src_features[i]))
elif batch_ys[i] == 2:
down.append(np.array(src_features[i]))
else:
push.append(np.array(src_features[i]))
slide = np.array(slide)
riot = np.array(riot)
down = np.array(down)
push = np.array(push)
o1 = plt.scatter(slide[:, 0], slide[:, 1], c='', edgecolors='#FF6347', alpha=0.8, label='slide')
o2 = plt.scatter(riot[:, 0], riot[:, 1], c='', edgecolors='y', alpha=0.8, label='riot')
o3 = plt.scatter(down[:, 0], down[:, 1], c='', edgecolors='g', alpha=0.8, label='down')
o4 = plt.scatter(push[:, 0], push[:, 1], c='', edgecolors='b', alpha=0.8, label='push')
plt.legend(loc='best')
plt.title('Target domain (C)')
# plt.xlim((-30, 40))
# plt.ylim((-40, 30))
plt.savefig('/home/han/plot123_2.png', dpi=900)
plt.show()
# x = []
# for i in batch_ys:
# label = i
# if label == 0:
# x.append('#FF6347')
# elif label == 1:
# x.append('y')
# elif label == 2:
# x.append('g')
# elif label == 3:
# x.append('b')
# elif label == 4:
# x.append('#DC143C')
# elif label == 5:
# x.append('k')
# elif label == 6:
# x.append('m')
# elif label == 7:
# x.append('#00FFFF')
# elif label == 8:
# x.append('#20B2AA')
# elif label == 9:
# x.append('#708090')
#
# plt.scatter(src_features[:, 0], src_features[:, 1], c='', edgecolors=x, alpha=0.8)
# plt.title('target3')
# plt.show()
def convert(self, number):
e = np.zeros((4, 1))
e[number] = 1
return e
def trans(self, batch, is_train = True):
data = None
for i in batch[:,0]:
i = i[:, 0:100]
data = np.array([i]) if data is None else np.append(data, [i], axis=0)
if is_train is True:
label = None
for j in batch[:,1]:
label = np.array([self.convert(j[0])]) if label is None else np.append(label, [self.convert(j[0])], axis=0)
label = np.reshape(label, (-1, 4))
else:
label = []
for j in batch[:,1]:
label.append(j[0])
return data, label
def transition(self, batch, is_train=True):
data = None
for i in batch[0]:
i = i.numpy()
i = i[:, 0:100]
data = np.array([i]) if data is None else np.append(data, [i], axis=0)
if is_train is True:
label = None
be = [i.numpy() for i in batch[1]][0]
for j in be:
label = np.array([self.convert(j)]) if label is None else np.append(label, [self.convert(j)], axis=0)
label = np.reshape(label, (-1, 4))
else:
label = [i.numpy() for i in batch[1]][0]
return data, label
def lrelu(x, leak=0.02):
return tf.maximum(x, leak * x)
if __name__ =="__main__":
batch_size = 100
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/source/s_th.pkl', 'rb') as f:
sh = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/source/s_tq.pkl', 'rb') as f:
sq = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/source/s_tqie.pkl', 'rb') as f:
sqie = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/source/s_tz.pkl', 'rb') as f:
sz = pickle.load(f)
test_ground_truth = sh[200:300] + sq[200:300] + sqie[200:300] + sz[200:300]
# with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_th.pkl', 'rb') as f:
# gh = pickle.load(f)
# with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_tq.pkl', 'rb') as f:
# gq = pickle.load(f)
# with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_tqie.pkl', 'rb') as f:
# gqie = pickle.load(f)
# with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_tz.pkl', 'rb') as f:
# gz = pickle.load(f)
with open('/home/han/PycharmProjects/c-GAN-earth_move/GAN_data/gan_t2.pkl', 'rb') as f:
g = pickle.load(f)
source = g
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target1/t_th.pkl', 'rb') as f:
# th1 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target1/t_tq.pkl', 'rb') as f:
# tq1 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target1/t_tqie.pkl', 'rb') as f:
# tqie1 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target1/t_tz2.pkl', 'rb') as f:
# tz1 = pickle.load(f)
# target = th1 + tq1 + tqie1 + tz1
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target2/t_th.pkl', 'rb') as f:
# th2 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target2/t_tq.pkl', 'rb') as f:
# tq2 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target2/t_tqie.pkl', 'rb') as f:
# tqie2 = pickle.load(f)
# with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target2/t_tz2.pkl', 'rb') as f:
# tz2 = pickle.load(f)
# target = th2+tq2+tqie2+tz2
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target3/t_th.pkl', 'rb') as f:
th3 = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target3/t_tq2.pkl', 'rb') as f:
tq3 = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target3/t_tqie.pkl', 'rb') as f:
tqie3 = pickle.load(f)
with open('/home/han/PycharmProjects/conditional-GAN/data_extract/target3/t_tz2.pkl', 'rb') as f:
tz3 = pickle.load(f)
target = th3+tq3+tqie3+tz3
source_data = Data.DataLoader(dataset=source, batch_size=batch_size, shuffle=True)
target_data = Data.DataLoader(dataset=target, batch_size=batch_size, shuffle=True)
source_iter = iter(source_data)
target_iter = iter(target_data)
len_source = len(source_data)
len_target = len(target_data)
TargetModel()
# csitool-for-realview
The is the tool running on Server for realview on csi-tool 802.11n.
In the test scenario, there are three PCs, two for Tx and RX respectively, and the other is for realview.
The packet we received is transmitted by the Rx at time of Rx receiving any packet
Note that the transmission is done by UDP protocol.
# -*- coding: utf-8 -*-
"""
Created on Wens Dec 26 2018
@author: han
"""
# coding: utf-8
import modify_extract
#import extract
import udp
from plot import Display
import struct
ret = [] # to store csi
s = udp.udp_init(5563) # create a udp handle
f = Display() #initialize the realview procedure
f.display()
try:
while True: # a loop to receive the data
csiInfo = []
data, addr = udp.recv(s) # receive a udp socket
for i in range(1,len(data)):
csiInfo.append(struct.unpack("!B", data[i])[0]) # decode csi from udp
CSI_matrix = modify_extract.readFile(csiInfo)
f.push(CSI_matrix)
# print CSI_matrix
except KeyboardInterrupt:
udp.close(s) # close udp
f.stop() # close view
import read_bf_file
import numpy as np
# import pylab
def radReverse(subcarrier):
return map(lambda x: float("%.2f" % np.arctan(x.imag / x.real)), subcarrier)
def complexToLatitude(subcarrier):
return map(lambda x: float("%.2f" % abs(x)), subcarrier)
def relativePhaseOperation(antenna_one, antenna_two, antenna_three):
# amplitude, relativePhase_one, relativePhase_two = None, None,None
# antenna_two= antenna_one * (antenna_two.conjugate())
# antenna_three = antenna_one * (antenna_three.conjugate())
# for subcarrier in antenna_one:
# raw = np.array([complexToLatitude(subcarrier)]) if raw is None else np.append(raw, [complexToLatitude(subcarrier)], axis=0)
# for subcarrier in antenna_two:
# relativePhase_one = np.array([radReverse(subcarrier)]) if relativePhase_one is None else np.append(relativePhase_one, [radReverse(subcarrier)], axis=0)
# for subcarrier in antenna_three:
# relativePhase_two = np.array([radReverse(subcarrier)]) if relativePhase_two is None else np.append(relativePhase_two, [radReverse(subcarrier)], axis=0)
amplitude, phase = None, None
for subcarrier in antenna_one:
amplitude = np.array([complexToLatitude(subcarrier)]) \
if amplitude is None else np.append(amplitude, [complexToLatitude(subcarrier)], axis=0)
for subcarrier in antenna_two:
phase = np.array([radReverse(subcarrier)]) \
if phase is None else np.append(phase, [radReverse(subcarrier)], axis=0)
return amplitude, phase
def readFile(filepath):
file=read_bf_file.read_file(filepath)
print len(file)
# pair_one_real =pair_one_imag=pair_Two_real=pair_Two_imag=pair_Three_real=pair_Three_imag =np.zeros((30,len(file)))
antennaPair_raw, antennaPair_One, antennaPair_Two, antennaPair_Three= [], [], [], []
for item in file:
for eachcsi in range(0, 30):
antennaPair_One.append(item.csi[eachcsi][0][0])
antennaPair_Two.append(item.csi[eachcsi][0][1])
antennaPair_Three.append(item.csi[eachcsi][0][2])
antennaPair_One = np.reshape(antennaPair_One,(1, 30)).transpose()
antennaPair_Two = np.reshape(antennaPair_Two, (1, 30)).transpose()
antennaPair_Three = np.reshape(antennaPair_Three, (1, 30)).transpose()
amplitude, phase= relativePhaseOperation(antennaPair_One, antennaPair_Two, antennaPair_Three)
csi_matrix = np.array([amplitude])
csi_matrix = np.append(csi_matrix, [phase], axis=0)
return csi_matrix
if __name__ == '__main__':
csi,= readFile("/home/han/data/1/csi1.dat")
# with open('../data/1/static_csi.pkl', 'wb') as handle:
# pickle.dump(csi, handle, -1)
phase1, value1 = None, None
phase2, value2 = None, None
pylab.figure()
pylab.plot(csi[0][0], 'g-', label='butterworth')
pylab.legend(loc='best')
pylab.ylim(0, 50)
pylab.show()
# --coding: utf-8 --
import pywt
from scipy import signal
class Filter():
def __init__(
self,
sequence):
self.sequence = sequence
def feedback(self):
w = pywt.Wavelet('coif7')
a = self.sequence
ca, cd = [], []
for i in range(3):
(a, d) = pywt.dwt(a, w)
ca.append(a)
cd.append(d)
rec_a, rec_d = [], []
for i, coeff in enumerate(ca):
coeff_list = [coeff, None] + [None] * i
rec_a.append(pywt.waverec(coeff_list, w))
for i, coeff in enumerate(cd):
coeff_list = [None, coeff] + [None] * i
rec_d.append(pywt.waverec(coeff_list, w))
return rec_a[-1]
def butterWorth(self):
b, a = signal.butter(3, 0.3, 'low')
sf = signal.filtfilt(b, a, self.sequence)
return sf
if __name__ =="__main__":
import display
raw, dwt,_,_ = display.date_wrapper()
print raw[0]
A = Filter(raw[0])
print len(A.feedback())
# fig = plt.figure()
# ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1)
# ax_main.set_title(title)
# ax_main.plot(data)
# ax_main.set_xlim(0, len(data) - 1)
#
# for i, y in enumerate(rec_a):
# ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2)
# ax.plot(y, 'r')
# ax.set_xlim(0, len(y) - 1)
# ax.set_ylabel("A%d" % (i + 1))
#
# for i, y in enumerate(rec_d):
# ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2)
# ax.plot(y, 'g')
# ax.set_xlim(0, len(y) - 1)
# ax.set_ylabel("D%d" % (i + 1))
\ No newline at end of file
# coding=utf-8
import pywt
import math
import numpy as np
import matplotlib.pyplot as plt
fs=1000 #采样频率
f1=50 #信号频率
f2=100 #信号频率
totalscale=256
t = np.arange(0,1,1.0/fs)
sig=np.sin(2*math.pi*f1*t)+np.sin(2*math.pi*f2*t)
wcf=pywt.central_frequency('morl') #计算小波中心频率
scale=np.arange(1,totalscale+1,1) #生成1-256等差数列
cparam=2*wcf*totalscale
scale=cparam/scale #计算尺度
frequencies = pywt.scale2frequency('morl',scale)#将尺度转换成频率
frequencies=frequencies*fs #将频率变换成信号真实频率
cwtmatr, freqs = pywt.cwt(sig, scale, 'morl')#求连续小波系数
plt.ylabel('y')
plt.xlabel('x')
plt.pcolormesh(t,frequencies,abs(cwtmatr),vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
plt.colorbar()
plt.show()
\ No newline at end of file
import threading
import read_bf_file
# import read_bf_file
import numpy as np
import pylab
import pickle
from scipy.fftpack import fft, ifft
#from dwtfilter import dwtfilter
TIMEINYERVAL = 0.05
TIMEBIASE = 0.45
TIMELEN = 54000
TIMEWINDOW = 900
IMAGETOCSIRATIO = 30
def radReverse(subcarrier):
return map(lambda x: float("%.2f" % np.arctan(x.imag / x.real)), subcarrier)
def complexToLatitude(subcarrier):
return map(lambda x: float("%.2f" % abs(x)), subcarrier)
def reviseInterp(timestamp, eachsubcarrier):
blockedTime = []
flag, count = 0, 0
for tIndex in range(1, len(timestamp)):
if timestamp[tIndex] - timestamp[tIndex - 1] > TIMEINYERVAL:
numOfInterp = int((timestamp[tIndex] - timestamp[tIndex - 1]) / TIMEBIASE) #todo: Timebiase needed to be fixed
for num in range(0, numOfInterp - 1):
blockedTime.append(tIndex)
ca = eachsubcarrier.tolist()
for csiIndex in blockedTime:
ca.insert(csiIndex + count, 0)
count+=1
for csiApt in range(0, len(ca) - 1):
if ca[csiApt] == 0 and ca[csiApt + 1] != 0:
ca[csiApt] = "%.2f" % ((ca[csiApt - 1] + ca[csiApt + 1]) / 2.0)
elif ca[csiApt] == 0 and ca[csiApt + 1] == 0:
for zeros in range(csiApt, len(ca)):
if ca[zeros] != 0:
flag = zeros
break
numOfZeros = flag - csiApt
for num in range(0, numOfZeros):
ca[csiApt + num] = ca[csiApt + num - 1] + float('%.2f' % ((ca[flag] - ca[csiApt - 1]) / numOfZeros))
caNew = [float(x) for x in ca]
blockedTime.extend([x for x in range(len(caNew), TIMELEN)])
if len(caNew) < TIMELEN:
caNew.extend([caNew[-1] for _ in range(0, TIMELEN - len(caNew))])
else:
caNew = caNew[:TIMELEN]
return caNew, blockedTime
def my_static(temp, lenfile):
s_t = []
s_t_list = []
s_t_index = []
Max = -100
Min = 100
T = 5
flag = 2
for i in range(lenfile):
if i < lenfile - 4 :
s_t.append(np.mean(temp[i : i + 4]))
else:
s_t.append(s_t[lenfile - 5])
for i in range(lenfile):
if temp[i] > s_t[i] + T and temp[i] >= Max and temp[i] > 0:
Max = temp[i]
Max_index = i
if flag == 0:
s_t_list.append(Min)
s_t_index.append(Min_index)
Min = 100
flag = 1
elif temp[i] < s_t[i] - T and temp[i] <= Min and temp[i] > 0:
Min = temp[i]
Min_index = i
if flag == 1:
s_t_list.append(Max)
s_t_index.append(Max_index)
Max = -100
flag = 0
for i in range (len(s_t_list) - 1):
for j in range(s_t_index[i], s_t_index[i + 1]):
temp[j] = temp[j] - (s_t_list[i] + s_t_list[i + 1]) / 2
return temp
def linearInterpolation(matrix, timestamp):
raw, blockedTime = None, None
for eachsubcarrier in matrix:
eachsubcarrier, blockedTime = reviseInterp(timestamp, eachsubcarrier)
raw = np.array([eachsubcarrier]) if raw is None else np.append(raw, [eachsubcarrier], axis=0)
return raw, blockedTime
def varianceOperation(*args):
var_list = [np.var(args[0]), np.var(args[1]), np.var(args[2])]
mini, maxi = var_list.index(min(var_list)), var_list.index(max(var_list))
return args[mini], args[maxi]
def relativePhaseOperation(antenna_one, antenna_two, antenna_three):
# amplitude, relativePhase_one, relativePhase_two = None, None,None
# antenna_two= antenna_one * (antenna_two.conjugate())
# antenna_three = antenna_one * (antenna_three.conjugate())
# for subcarrier in antenna_one:
# raw = np.array([complexToLatitude(subcarrier)]) if raw is None else np.append(raw, [complexToLatitude(subcarrier)], axis=0)
# for subcarrier in antenna_two:
# relativePhase_one = np.array([radReverse(subcarrier)]) if relativePhase_one is None else np.append(relativePhase_one, [radReverse(subcarrier)], axis=0)
# for subcarrier in antenna_three:
# relativePhase_two = np.array([radReverse(subcarrier)]) if relativePhase_two is None else np.append(relativePhase_two, [radReverse(subcarrier)], axis=0)
antenna_one_amp,conjugate_amp, conjugate, relativePhase = None, None, None,None
tryy, tryyy = None, None
for wins in range(0, len(antenna_one[0]), TIMEWINDOW):
part_min, part_max = varianceOperation(antenna_one[:, wins: wins+TIMEWINDOW],
antenna_two[:, wins: wins+TIMEWINDOW], antenna_three[:, wins: wins+TIMEWINDOW])
alpha = np.mean(part_max)
belta = alpha * 1000
tryy = part_max * (part_min.conjugate())
tryyy = np.array(tryy) if tryyy is None else np.hstack((tryyy,tryy))
con_mul = (part_max - alpha) * ((part_min + belta).conjugate())
con_mul = con_mul - np.mean(con_mul)
conjugate = np.array(con_mul) if conjugate is None else np.hstack((conjugate,con_mul))
for subcarrier in antenna_one:
antenna_one_amp = np.array([complexToLatitude(subcarrier)]) \
if antenna_one_amp is None else np.append(antenna_one_amp, [complexToLatitude(subcarrier)], axis=0)
for subcarrier in conjugate:
conjugate_amp = np.array([complexToLatitude(subcarrier)]) \
if conjugate_amp is None else np.append(conjugate_amp, [complexToLatitude(subcarrier)], axis=0)
for subcarrier in conjugate:
relativePhase = np.array([radReverse(subcarrier)]) \
if relativePhase is None else np.append(relativePhase, [radReverse(subcarrier)], axis=0)
# relativePhase = np.array([complexToLatitude(subcarrier)]) \
# if relativePhase is None else np.append(relativePhase, [complexToLatitude(subcarrier)], axis=0)
return antenna_one_amp,conjugate_amp, relativePhase, conjugate, tryyy
def readFile(filepath):
file=read_bf_file.read_file(filepath)
#print "Length of packets: ", len(file)
# pair_one_real =pair_one_imag=pair_Two_real=pair_Two_imag=pair_Three_real=pair_Three_imag =np.zeros((30,len(file)))
timestamp = np.array([])
startTime = file[0].timestamp_low
#print "Start timestamp:" + str(startTime)
antennaPair_raw, antennaPair_One, antennaPair_Two, antennaPair_Three= [], [], [], []
for item in file:
timestamp = np.append(timestamp, (item.timestamp_low - startTime) / 1000000.0)
for eachcsi in range(0, 30):
''''
acquire csi complex value for each antenna pair with shape ( len(file) * 30), i.e., packet number * subcarrier number
'''
antennaPair_One.append(item.csi[eachcsi][0][0])
antennaPair_Two.append(item.csi[eachcsi][0][1])
antennaPair_Three.append(item.csi[eachcsi][0][2])
antennaPair_One = np.reshape(antennaPair_One,(len(file), 30)).transpose()
antennaPair_Two = np.reshape(antennaPair_Two, (len(file), 30)).transpose()
antennaPair_Three = np.reshape(antennaPair_Three, (len(file), 30)).transpose()
"""
To get the relative phase between each antenna pair.
Linear inteplotation operation.
"""
antenna_one_amp, conjugate_amp,relativePhase,conjugate, tryyy = relativePhaseOperation(antennaPair_One, antennaPair_Two, antennaPair_Three)
#antenna_one_amp, blocked = linearInterpolation(antenna_one_amp, timestamp)
#conjugate_amp, blocked = linearInterpolation(conjugate_amp, timestamp)
#relativePhase, blocked = linearInterpolation(relativePhase, timestamp)
# TODO: MORE SINGAL OPERATIONS NEEDED TO BE ADDED!
'''for subcarrier in range(len(amplitude)):
amplitude[subcarrier] = dwtfilter(amplitude[subcarrier]).butterWorth()
amplitude[subcarrier] = dwtfilter(amplitude[subcarrier]).filterOperation()'''
csi_matrix = np.array([antenna_one_amp])
csi_matrix = np.append(csi_matrix, [conjugate_amp], axis=0)
csi_matrix = np.append(csi_matrix, [relativePhase], axis=0)
return csi_matrix
#return csi_matrix, conjugate, tryyy
if __name__ == '__main__':
csi= readFile("test.dat")
#csi, conjugate, tryyy= readFile("test.dat")
# with open('../data/1/static_csi.pkl', 'wb') as handle:
# pickle.dump(csi, handle, -1)
phase1, value1 = None, None
phase2, value2 = None, None
pylab.figure()
pylab.plot(csi[0][0], 'g-', label='butterworth')
pylab.pcolormesh(csi[0][0], cmap = cm_ )
# pylab.plot(csi[1][0], 'g-', label='butterworth')
# ff = fft(conjugate)
# for subcarrier in ff:
# phase1 = np.array([radReverse(subcarrier)]) \
# if phase1 is None else np.append(phase1, [radReverse(subcarrier)], axis=0)
# value1 = np.array([complexToLatitude(subcarrier)]) \
# if value1 is None else np.append(value1, [complexToLatitude(subcarrier)], axis=0)
# pylab.plot(phase1[0], 'r--', label='fft_alpha')
#
# yy = fft(tryyy)
# for subcarrier in yy:
# phase2 = np.array([radReverse(subcarrier)]) \
# if phase2 is None else np.append(phase2, [radReverse(subcarrier)], axis=0)
# value2 = np.array([complexToLatitude(subcarrier)]) \
# if value2 is None else np.append(value2, [complexToLatitude(subcarrier)], axis=0)
# pylab.plot(phase2[0], 'b', label='fft_raw')
# pylab.plot(value[0], 'b--',label='fft_value')
pylab.legend(loc='best')
pylab.ylim(0, 50)
pylab.show()
import matplotlib.pylab as plt
from collections import deque
from filter import Filter
import numpy as np
import copy
import time
import threading
import pywt
TIMEWINDOW = 1800
SLIDEWINDOW = TIMEWINDOW / 2
class Display:
def __init__(self, REFRESH_INTERVAL=0.001):
self.count = 0
self.t = deque()
self.amp = deque()
self.conj_amp=deque()
self.pha = deque()
self.amp_filter = None
self.pha_conj = deque()
self.end = False
self.threads = []
self.interval = REFRESH_INTERVAL
pass
def push(self, data):
if self.count > TIMEWINDOW -1:
self.t.popleft()
self.amp.popleft()
self.conj_amp.popleft()
self.pha.popleft()
self.t.append(self.count)
self.amp.append(data[0][0])
self.conj_amp.append(data[1][0])
self.pha.append(data[2][0])
self.count += 1
if self.count % SLIDEWINDOW == 0:
if self.amp_filter is None:
self.amp_filter = list(copy.deepcopy(self.amp))
elif len(self.amp_filter) == SLIDEWINDOW:
self.amp_filter.extend(list(self.amp)[-SLIDEWINDOW:])
else:
self.amp_filter = self.amp_filter[-SLIDEWINDOW:] + list(copy.deepcopy(self.amp))[-SLIDEWINDOW:]
def _plot(self):
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True)
amplitude = ax[0][0]
# phase = ax[0][1]
slide = ax[0][1]
conj_amplitude=ax[1][0]
time_fre = ax[1][1]
while not self.end:
t = copy.deepcopy(self.t)
amp = copy.deepcopy(self.amp)
conj_amp=copy.deepcopy(self.conj_amp)
pha = copy.deepcopy(self.pha)
# if self.count <= TIMEWINDOW + SLIDEWINDOW:
flt = copy.deepcopy(self.amp_filter)
# if flt and len(flt) == TIMEWINDOW:
# flt = Filter(flt).butterWorth()
# else:
if len(t) == 0:
time.sleep(self.interval)
continue
if len(t) != len(amp) or len(t) != len(pha):
continue
max_t = t[-1] + 100
min_t = max_t - TIMEWINDOW if max_t - TIMEWINDOW > 0 else 0
amplitude.cla()
amplitude.set_title("amplitude")
amplitude.set_xlabel("packet / per")
amplitude.set_ylim(0, 50)
amplitude.set_xlim(min_t, max_t)
amplitude.grid()
amplitude.plot(t,np.array(amp))
# amplitude.legend(loc='best')
# phase.cla()
# phase.set_title("phase")
# phase.set_xlabel("packet / per")
# phase.set_ylim(-2, 2)
# phase.set_xlim(min_t, max_t)
# phase.grid()
# phase.plot(t, np.array(pha))
slide.cla()
slide.set_title("slide")
slide.set_xlabel("packet / per")
slide.set_ylim(0, 50)
slide.set_xlim(min_t, max_t)
slide.grid()
time_fre.set_title("time_fre")
time_fre.set_xlabel("time(s)")
time_fre.set_ylim(0, 25)
if flt and len(flt) == TIMEWINDOW:
slide.plot(t,np.array(flt))
sig=[]
for i in range(len(flt)): #to 1 dimension
sig.append(flt[i][0])
#print sig
fs = 50
totalscale = 256
#t = np.arange(0, 1, 1.0 / fs)
#sig = np.sin(2 * math.pi * f1 * t) + np.sin(2 * math.pi * f2 * t)
wcf = pywt.central_frequency('morl')
scale = np.arange(1, totalscale + 1, 1)
cparam = 2 * wcf * totalscale
scale = cparam / scale
frequencies = pywt.scale2frequency('morl', scale)
frequencies = frequencies * fs
cwtmatr, freqs = pywt.cwt(sig, scale, 'morl')
time_fre.pcolormesh(t, frequencies, abs(cwtmatr), vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
#time_fre.colorbar()
#time_fre.show()
conj_amplitude.cla()
conj_amplitude.set_title("conj_amplitude")
conj_amplitude.set_xlabel("packet / per")
#conj_amplitude.set_ylim(0, 100)
conj_amplitude.set_xlim(min_t, max_t)
conj_amplitude.grid()
conj_amplitude.plot(t, np.array(conj_amp))
plt.pause(self.interval)
def stop(self):
for t in self.threads:
t.join()
print('stop realview****')
def display(self):
t1 = threading.Thread(target=self._plot)
self.threads.append(t1)
for t in self.threads:
# t.setDaemon(True)
t.start()
print('display starting...')
if __name__ == '__main__':
f = Display()
f.display()
while True:
data = [[[10]],[[10]],[[0.2]]]
f.push(data)
time.sleep(0.1)
import numpy as np
import os
import sys
import struct
# from .csi import WifiCsi
class WifiCsi:
def __init__(self, args, csi):
self.timestamp_low = args[0]
self.bfee_count = args[1]
self.Nrx = args[2]
self.Ntx = args[3]
self.rssi_a = args[4]
self.rssi_b = args[5]
self.rssi_c = args[6]
self.noise = args[7]
self.agc = args[8]
self.perm = args[9]
self.rate = args[10]
self.csi = csi
pass
def get_bit_num(in_num, data_length):
max_value = (1 << data_length - 1) - 1
if not -max_value-1 <= in_num <= max_value:
out_num = (in_num + (max_value + 1)) % (2 * (max_value + 1)) - max_value - 1
else:
out_num = in_num
return out_num
pass
def read_bfee(in_bytes):
timestamp_low = in_bytes[0] + (in_bytes[1] << 8) + \
(in_bytes[2] << 16) + (in_bytes[3] << 24)
bfee_count = in_bytes[4] + (in_bytes[5] << 8)
Nrx = in_bytes[8]
Ntx = in_bytes[9]
rssi_a = in_bytes[10]
rssi_b = in_bytes[11]
rssi_c = in_bytes[12]
noise = get_bit_num(in_bytes[13],8)
agc = in_bytes[14]
antenna_sel = in_bytes[15]
length = in_bytes[16] + (in_bytes[17] << 8)
fake_rate_n_flags = in_bytes[18] + (in_bytes[19] << 8)
calc_len = (30 * (Nrx * Ntx * 8 * 2 + 3) + 7) / 8
payload = in_bytes[20:]
# if(length != calc_len)
perm_size = (3)
perm = np.ndarray(perm_size, dtype=int)
if Nrx == 3 :
perm[0] = ((antenna_sel) & 0x3) + 1
perm[1] = ((antenna_sel >> 2) & 0x3) + 1
perm[2] = ((antenna_sel >> 4) & 0x3) + 1
elif Nrx ==2 :
perm[0] = 2
perm[1] = 1
perm[2] = 3
index = 0
Nrx_mat = 3
csi_size = (30, Ntx, Nrx_mat)
row_csi = np.ndarray(csi_size, dtype=complex)
perm_csi = np.ndarray(csi_size, dtype=complex)
try:
for i in range(30):
index += 3
remainder = index % 8
for j in range(Nrx):
for k in range(Ntx):
pr = get_bit_num((payload[index // 8] >> remainder),8) | get_bit_num((payload[index // 8+1] << (8-remainder)),8)
pi = get_bit_num((payload[(index // 8)+1] >> remainder),8) | get_bit_num((payload[(index // 8)+2] << (8-remainder)),8)
if Nrx == 3:
perm_csi[i][k][perm[j] - 1] = complex(pr, pi)
elif Nrx == 2:
perm_csi[i][k][perm[j]] = complex(pr, pi)
index += 16
pass
pass
pass
pass
except:
pass
args = [timestamp_low, bfee_count, Nrx, Ntx, rssi_a,
rssi_b, rssi_c, noise, agc, perm, fake_rate_n_flags]
temp_wifi_csi = WifiCsi(args, perm_csi)
return temp_wifi_csi
def read_file(file_path):
csi_data = []
csi_data.append(read_bfee(file_path))
return csi_data
from collections import deque
import numpy as np
import copy
l = None
t = deque()
t.append(1)
t.append(2)
t.append(3)
t.append(4)
for i in range(2):
if l is None:
l = copy.deepcopy(t)
else:
l.extend(list(t)[-2:])
print l
l= list(l)[:2] + list(t)
print l
print t
# -*- coding: utf-8 -*-
"""@package udp
Created on Fri Jun 16 17:13:04 2017
@author: ren
"""
import socket
def udp_init(port):
## initalize the socket with udp protocol
#
#@param port the port number which used for the udp at the server
#
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # SOCKET_DGRAM:UDP
#s.bind(("", 0x1031))
s.bind(("",port))
return s
def recv(s):
data, addr = s.recvfrom(4096) # BUFSIZE
return data,addr
def close(s):
s.close()
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment