Commit fd2e3e16 authored by Sensing's avatar Sensing

the first verison of this project

parent 62e3b701
#coding=UTF-8
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from sklearn.decomposition.pca import svd_flip
def mean_data(data):
return np.mean(data,axis=0)
"""
參数:
- XMat:传入的是一个numpy的矩阵格式,行表示样本数,列表示特征
- k:表示取前k个特征值相应的特征向量
返回值:
- finalData:參数一指的是返回的低维矩阵,相应于输入參数二
- reconData:參数二相应的是移动坐标轴后的矩阵
"""
def pca(XMat, k):
average = mean_data(XMat)
m, n = np.shape(XMat)
avgs = np.tile(average, (m, 1))
data_adjust = XMat - avgs
covX = np.cov(data_adjust.T) #计算协方差矩阵
featValue, featVec = np.linalg.eigh(covX) #求解协方差矩阵的特征值和特征向量
index = np.argsort(-featValue) #依照featValue进行从大到小排序
if k > n:
print ("k must lower than feature number")
return
else:
#注意特征向量时列向量。而numpy的二维矩阵(数组)a[m][n]中,a[1]表示第1行值
selectVec = np.array(featVec.T[index[:k]]) #所以这里须要进行转置
finalData = np.dot(XMat, selectVec.T)
# reconData = (finalData * selectVec) + average
return finalData
def PCAtest(XMat, k):
average = mean_data(XMat)
m, n = np.shape(XMat)
avgs = np.tile(average, (m, 1))
data_adjust = XMat - avgs
cov = np.dot(data_adjust.T,data_adjust)/(m-1)
u, d, v = np.linalg.svd(cov, full_matrices=False)
u, v = svd_flip(u, v)
index = np.argsort(-d)
final = np.dot(XMat, u[:, index[:k]])
return final
# -*- coding: UTF-8 -*-
import numpy as np
def phaseCalibration(csi, subCarrierIndex, rxNum, subCarrierNum):
phaseRaw = np.angle(csi)
phaseUnwrapped = np.unwrap(phaseRaw)
for antIndexForPhase in range(1, rxNum):
if phaseUnwrapped[antIndexForPhase, 0] - phaseUnwrapped[0, 0] > np.pi:
phaseUnwrapped[antIndexForPhase, :] -= 2 * np.pi
elif phaseUnwrapped[antIndexForPhase, 0] - phaseUnwrapped[0, 0] < -np.pi:
phaseUnwrapped[antIndexForPhase, :] += 2 * np.pi
phase = phaseUnwrapped.reshape(-1)
a_mat = np.tile(subCarrierIndex, (1, rxNum))
a_mat = np.append(a_mat, np.ones((1, subCarrierNum * rxNum)), axis=0)
a_mat = a_mat.transpose((1, 0))
a_mat_inv = np.linalg.pinv(a_mat)
x = np.dot(a_mat_inv, phase)
phaseSlope = x[0]
phaseCons = x[1]
calibration = np.exp(1j * (-phaseSlope * np.tile(subCarrierIndex, rxNum).reshape(3, -1) - phaseCons * np.ones((rxNum, subCarrierNum))))
csi = csi*calibration
return csi
# CentiTrack2.0
CentiTrack2.0, which is also named CentiTrack-3D, contains three key modules: the super-resolution AoA-ToF module, the relative motion trace module and the 3D tracking module, which have been fully implemented.
The experiment data is also uploaded, including "line", "curve" and "letter", in which "log.dat" and "logg.dat" are the CSI data collected on Rx1 and Rx2 respectively, the "csi_ts.txt" is the CSI timestamp used to synchronize with Leap Motion, the "data.json" is the ground truth collected by Leap Motion.
This diff is collapsed.
# -*- coding: UTF-8 -*-
import numpy as np
import os
import sys
import struct
import pylab
import cmath
class WifiCsi:
def __init__(self, args, csi):
self.timestamp_low = args[0]
self.bfee_count = args[1]
self.Nrx = args[2]
self.Ntx = args[3]
self.rssi_a = args[4]
self.rssi_b = args[5]
self.rssi_c = args[6]
self.noise = args[7]
self.agc = args[8]
self.perm = args[9]
self.rate = args[10]
self.csi = csi
pass
def get_bit_num(in_num, data_length):
max_value = (1 << data_length - 1) - 1
if not -max_value-1 <= in_num <= max_value:
out_num = (in_num + (max_value + 1)) % (2 * (max_value + 1)) - max_value - 1
else:
out_num = in_num
return out_num
pass
def read_bfee(in_bytes):
timestamp_low = in_bytes[0] + (in_bytes[1] << 8) + \
(in_bytes[2] << 16) + (in_bytes[3] << 24)
bfee_count = in_bytes[4] + (in_bytes[5] << 24)
Nrx = in_bytes[8]
Ntx = in_bytes[9]
rssi_a = in_bytes[10]
rssi_b = in_bytes[11]
rssi_c = in_bytes[12]
noise = get_bit_num(in_bytes[13],8)
agc = in_bytes[14]
antenna_sel = in_bytes[15]
length = in_bytes[16] + (in_bytes[17] << 8)
fake_rate_n_flags = in_bytes[18] + (in_bytes[19] << 8)
calc_len = (30 * (Nrx * Ntx * 8 * 2 + 3) + 7) / 8
payload = in_bytes[20:]
# if(length != calc_len)
perm_size = 3
perm = np.ndarray(perm_size, dtype=int)
perm[0] = (antenna_sel & 0x3) + 1
perm[1] = ((antenna_sel >> 2) & 0x3) + 1
perm[2] = ((antenna_sel >> 4) & 0x3) + 1
index = 0
csi_size = (30, Ntx, Nrx)
row_csi = np.ndarray(csi_size, dtype=complex)
perm_csi = np.ndarray(csi_size, dtype=complex)
for i in range(30):
index += 3
remainder = index % 8
for j in range(Nrx):
for k in range(Ntx):
pr = get_bit_num((payload[index // 8] >> remainder),8) | get_bit_num((payload[index // 8+1] << (8-remainder)),8)
pi = get_bit_num((payload[(index // 8)+1] >> remainder),8) | get_bit_num((payload[(index // 8)+2] << (8-remainder)),8)
perm_csi[i][k][perm[j] - 1] = complex(pr, pi)
index += 16
pass
pass
pass
pass
args = [timestamp_low, bfee_count, Nrx, Ntx, rssi_a,
rssi_b, rssi_c, noise, agc, perm, fake_rate_n_flags]
temp_wifi_csi = WifiCsi(args, perm_csi)
return temp_wifi_csi
def read_file(file_path):
length = os.path.getsize(file_path)
cur = 0
count = 0
broken_perm = 0
triangle = [1, 3, 6]
csi_data = []
with open(file_path, 'rb') as f:
while cur < (length - 3):
filed_length = struct.unpack("!H", f.read(2))[0]
code = struct.unpack("!B", f.read(1))[0]
cur += 3
if code == 187:
data = []
for _ in range(filed_length - 1):
data.append(struct.unpack("!B", f.read(1))[0])
cur = cur + filed_length - 1
if len(data) != filed_length - 1:
break
else:
f.seek(filed_length - 1, 1)
cur = cur + filed_length - 1
csi_data.append(read_bfee(data))
count += 1
return csi_data
import read_bf_file
from scipy.signal import savgol_filter
import pylab
from backup import *
TIMEINYERVAL = 0.01
TIMEBIASE = 0.008
TIMELEN = 500
IMAGETOCSIRATIO = 2
THRESHOLD = 5.5
def csi_ratio(an1, an2, an3):
ret1, ret2 = None, None
for sub_index in range(len(an1)):
ret1 = np.array([np.divide(an1[sub_index], an2[sub_index])]) if ret1 is None else np.append(ret1, [np.divide(an1[sub_index], an2[sub_index])], axis=0)
ret2 = np.array([np.divide(an2[sub_index], an3[sub_index])]) if ret2 is None else np.append(ret2, [np.divide(an2[sub_index], an3[sub_index])], axis=0)
return ret1, ret2
def trace(filepath):
file = read_bf_file.read_file(filepath)
file_len = len(file)
timestamp = np.array([])
startTime = file[0].timestamp_low
print "Length of packets: ", file_len, " Start timestamp:" + str(startTime)
ap1_tx1, ap2_tx1, ap3_tx1 = [], [], []
for item in file :
timestamp = np.append(timestamp, (item.timestamp_low - startTime) / 1000000.0)
for eachcsi in range(0, 30):
ap1_tx1.append(item.csi[eachcsi][0][0])
ap2_tx1.append(item.csi[eachcsi][0][1])
ap3_tx1.append(item.csi[eachcsi][0][2])
ap1_tx1 = np.reshape(ap1_tx1, (file_len, 30)).transpose()
ap2_tx1 = np.reshape(ap2_tx1, (file_len, 30)).transpose()
ap3_tx1 = np.reshape(ap3_tx1, (file_len, 30)).transpose()
ret1, ret2 = csi_ratio(ap1_tx1, ap2_tx1, ap3_tx1)
aa = np.mean(ret1, axis=0)
for i in range(len(aa)):
if np.isnan(aa[i]):
aa[i] = aa[i-1]
a = aa - np.mean(aa)
phase = np.angle(a)
phase_wrap = np.unwrap(phase)
angle_calibrated, dynamic_vectors = calibration(a)
pylab.figure()
pylab.subplot(3, 3, 1)
pylab.ylabel('Imag')
pylab.xlabel('Real')
pylab.plot(get_Real(a), get_Imag(a), 'b')
pylab.xlim(-2, 2)
pylab.ylim(-2, 2)
pylab.subplot(3, 3, 2)
pylab.title("angle")
pylab.ylabel('angle/rad')
pylab.xlabel('time')
pylab.plot(phase, 'b')
pylab.subplot(3, 3, 3)
pylab.title("angle_wrap")
pylab.ylabel('angle/rad')
pylab.xlabel('time')
pylab.plot(phase_wrap, 'b')
pylab.subplot(3, 3, 4)
pylab.title("distance")
pylab.ylabel('dis/cm')
pylab.xlabel('time')
pylab.plot(phase_wrap * 5.64 / (4 * np.pi), 'b')
pylab.subplot(3, 3, 5)
pylab.ylabel('angle/rad')
pylab.xlabel('calibration')
pylab.plot(angle_calibrated, 'r')
pylab.subplot(3, 3, 6)
pylab.ylabel('dis/cm')
pylab.xlabel('calibration')
pylab.plot(np.unwrap(angle_calibrated), 'r')
return np.unwrap(angle_calibrated) * 5.64 / (2 * np.pi)
if __name__ == '__main__':
x = trace("../0928/rx1_3.dat")
y = trace("../0928/rx2_3.dat")
This diff is collapsed.
#coding:utf-8
from tensorflow.examples.tutorials.mnist import input_data
import pylab
from ops_modify import *
import numpy as np
import pickle
import random
import os
with open('../conditional-GAN/data_extract/source/s_th.pkl', 'rb') as f:
sh = pickle.load(f)
with open('../conditional-GAN/data_extract/source/s_tq.pkl', 'rb') as f:
sq = pickle.load(f)
with open('../conditional-GAN/data_extract/source/s_tqie.pkl', 'rb') as f:
sqie = pickle.load(f)
with open('../conditional-GAN/data_extract/source/s_tz.pkl', 'rb') as f:
sz = pickle.load(f)
train_data = sh[0:200] + sq[0:200] + sqie[0:200] + sz[0:200]
global_step = tf.Variable(0, name='global_step', trainable=False)
label = tf.placeholder(tf.float32, [BATCH_SIZE, 4], name='label')
images = tf.placeholder(tf.float32, [BATCH_SIZE, 28, 100, 1], name='images')
niose = tf.placeholder(tf.float32, [BATCH_SIZE, 100], name='noise')
with tf.variable_scope(tf.get_variable_scope()) as scope:
G_out = generator(niose, label)
D_logits_real = discriminator(images, label)
D_logits_fake = discriminator(G_out, label, reuse=True)
samples = sampler(niose, label)
def transition(batch, is_train=True):
data = None
for i in batch[:, 0]:
i = i[:, 0:100]
data = np.array([i]) if data is None else np.append(data, [i], axis=0)
if is_train is True:
label = None
for j in batch[:, 1]:
label = np.array([convert(j[0])]) if label is None else np.append(label, [convert(j[0])], axis=0)
label = np.reshape(label, (-1, 4))
else:
label = []
for j in batch[:, 1]:
label.append(j[0])
return data, label
def convert(number):
e = np.zeros((4, 1))
e[number] = 1
return e
# label for generating dataset
sample_labels = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],
[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],
[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],
[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
d_loss_real = -tf.reduce_mean(D_logits_real)
d_loss_fake = tf.reduce_mean(D_logits_fake)
d_loss = d_loss_real + d_loss_fake
g_loss = -d_loss_fake
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
# for tf.layers.batch_normalization
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
d_optim = tf.train.AdamOptimizer(0.001, beta1=0.5).minimize(d_loss, var_list=d_vars, global_step=global_step)
g_optim = tf.train.AdamOptimizer(0.001, beta1=0.5).minimize(g_loss, var_list=g_vars, global_step=global_step)
is_train = False
is_param = True
if is_train:
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
if is_param:
saver.restore(sess,'./check_point/CGAN_model_52.ckpt')
# saver.restore(sess, tf.train.latest_checkpoint('./check_point'))
print "loading params"
for i in range(1001):
random.shuffle(train_data)
batchs = [
train_data[k: k + BATCH_SIZE]
for k in xrange(0, 800, BATCH_SIZE)]
for batch in batchs:
data, tag = transition(np.array(batch))
batch_xs = np.reshape(data, (BATCH_SIZE, 28, 100, 1))
batch_xs = batch_xs / 1.5
batch_ys = tag
batch_z = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100))
sess.run([d_optim], feed_dict={images: batch_xs, niose: batch_z, label: batch_ys})
sess.run([g_optim], feed_dict={images: batch_xs, niose: batch_z, label: batch_ys})
# sess.run([g_optim], feed_dict={images: batch_xs, niose: batch_z, label: batch_ys})
if i % 10 == 0:
errD = d_loss.eval(feed_dict={images: batch_xs, label: batch_ys, niose: batch_z})
errG = g_loss.eval({niose: batch_z, label: batch_ys})
print("epoch:[%d], d_loss: %.8f, g_loss: %.8f" % (i, errD, errG))
if i % 50 == 1:
sample = sess.run(samples, feed_dict={niose: batch_z, label: sample_labels})
sample = sample * 1.5
samples_path = './pics/'
save_images(sample, [10,4], samples_path + '%d.png' % (i))
print('save image')
if i % 50 == 2:
checkpoint_path = os.path.join('./check_point/CGAN_model_%d.ckpt' % (i))
saver.save(sess, checkpoint_path)
print('save check_point')
else:
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, './check_point/CGAN_model_52.ckpt')
gan_data = []
for i in range(200):
sample_noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100))#n-sample=1
gen_samples = sess.run(generator(niose, label, training =False), feed_dict={niose: sample_noise, label : sample_labels})
gen_samples = gen_samples.reshape(-1, 28, 100)
gen_samples = gen_samples * 1.5
tags = [[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3],[0],[1],[2],[3]]
for i,j in enumerate(gen_samples):
gan_data.append([j,tags[i]])
with open('./GAN_data/gan_t2.pkl', 'wb') as f:
pickle.dump(gan_data, f)
# pylab.figure()
# pylab.subplot(4,1,1)
# pylab.imshow(csi_generator[0])
# pylab.legend(loc="best")
#
# pylab.subplot(4,1,2)
# pylab.imshow(csi_generator[1])
# pylab.legend(loc="best")
#
# pylab.subplot(4,1,3)
# pylab.imshow(csi_generator[2])
# pylab.legend(loc="best")
#
# pylab.subplot(4,1,4)
# pylab.imshow(csi_generator[3])
# pylab.legend(loc="best")
# pylab.show()
# Deep-Adaptation-Networks-based-Gesture-Recognition
The project can be devided into two parts:
1) Data augment based on conditional GAN, related modules:
Earth_move_GAN.py and ops_modify.py
2) Domain adaptation based multi-kernel Maximum Mean Discrepancy, related modules:
s_model.py for training deep neural networks in source domain;
t_model.py for transfering source model to targets;
mmd.py for computing domain discrepancy for adaptation.
import tensorflow as tf
def guassian_kernel(source, target, batch_size = 128, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = batch_size * 2
total = tf.concat([source, target], axis=0)
total0 = tf.expand_dims(total, 0)
total1 = tf.expand_dims(total, 1)
L2_distance = tf.reduce_sum((total0-total1)**2, axis=2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = tf.reduce_sum(L2_distance) / (n_samples**2-n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)]
kernel_val = [tf.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
return sum(kernel_val)#/len(kernel_val)
def mmd_rbf_accelerate(source, target, batch_size = 100, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
kernels = guassian_kernel(source, target, batch_size,
kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
loss = 0
for i in range(batch_size):
s1, s2 = i, (i+1)%batch_size
t1, t2 = s1+batch_size, s2+batch_size
loss += kernels[s1, s2] + kernels[t1, t2]
loss -= kernels[s1, t2] + kernels[s2, t1]
return loss / float(batch_size)
def mmd_rbf_noaccelerate(source, target, batch_size = 128, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
kernels = guassian_kernel(source, target,
kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
XX = kernels[:batch_size, :batch_size]
YY = kernels[batch_size:, batch_size:]
XY = kernels[:batch_size, batch_size:]
YX = kernels[batch_size:, :batch_size]
loss = tf.reduce_mean(XX + YY - XY -YX)
return loss
import tensorflow as tf
import scipy.misc
import numpy as np
BATCH_SIZE = 40
def weight_variable(shape, name, stddev=0.02, trainable=True):
dtype = tf.float32
var = tf.get_variable(name, shape, tf.float32, trainable=trainable,initializer=tf.random_normal_initializer(stddev=stddev, dtype=dtype))
return var
def bias_variable(shape, name, bias_start=0.0, trainable = True):
dtype = tf.float32
var = tf.get_variable(name, shape, tf.float32, trainable=trainable,initializer=tf.constant_initializer(bias_start, dtype=dtype))
return var
def conv2d(x, output_channels, name, k_h=5, k_w=5, s=2):
x_shape = x.get_shape().as_list()
with tf.variable_scope(name):
w = weight_variable(shape=[k_h, k_w, x_shape[-1], output_channels], name='weights')
b = bias_variable([output_channels], name='biases')
conv = tf.nn.conv2d(x, w, strides=[1, s, s, 1], padding='SAME') + b
return conv
def deconv2d(x, output_shape, name, k_h=5, k_w=5, s=2):
x_shape = x.get_shape().as_list()
with tf.variable_scope(name):
w = weight_variable([k_h, k_w, output_shape[-1], x_shape[-1]], name='weights')
bias = bias_variable([output_shape[-1]], name='biases')
deconv = tf.nn.conv2d_transpose(x, w, output_shape, strides=[1, s, s, 1], padding='SAME') + bias
return deconv
def fully_connect(x, channels_out, name):
shape = x.get_shape().as_list()
channels_in = shape[1]
with tf.variable_scope(name):
weights = weight_variable([channels_in, channels_out], name='weights')
biases = bias_variable([channels_out], name='biases')
return tf.matmul(x, weights) + biases
def lrelu(x, leak=0.02):
return tf.maximum(x, leak * x)
def conv_cond_concat(value, cond):
value_shapes = value.get_shape().as_list()
cond_shapes = cond.get_shape().as_list()
return tf.concat([value, cond * tf.ones(value_shapes[0:3] + cond_shapes[3:])], 3)
# z:?*100, y:?*10
def generator(z, y, training=True):
with tf.variable_scope("generator", reuse=not training):
yb = tf.reshape(y, [BATCH_SIZE, 1, 1, 4], name="yb") # y:?*1*1*4
z = tf.concat([z, y], 1) # z:?*104
h1 = fully_connect(z, 1024, name='g_h1_fully_connect')
h1 = lrelu(tf.layers.batch_normalization(h1, training=training, name='g_h1_batch_norm'))
h1 = tf.concat([h1,y],1) # 1028
# h2 = fully_connect(h1, 7 * 25 * 128, name='g_h2_fully_connect')
# h2 = lrelu(tf.layers.batch_normalization(h2, training=training, name='g_h2_batch_norm'))
# h2 = tf.reshape(h2,(BATCH_SIZE,7, 25, 128))
# h2 = conv_cond_concat(h2, yb) # h1: 1 * 4 * 260
# h2 = tf.image.resize_images(h1, size=(4,7), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h2 = conv2d(h2, 256, name='g_h2_deconv2d',s=1)
# h2 = lrelu(tf.layers.batch_normalization(h2, training=training, name='g_h2_batch_norm',)) # BATCH_SIZE*2*7*256
# h2 = conv_cond_concat(h2, yb) # h1: BATCH_SIZE*2*7*260
# h3 = tf.image.resize_images(h2, size=(7,25), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h3 = conv2d(h3, 128, name='g_h3_deconv2d',s=1)
# h3 = lrelu(tf.layers.batch_normalization(h3, training=training, name='g_h3_batch_norm',)) # BATCH_SIZE*4*13*128
# h3 = conv_cond_concat(h3, yb) # h1: BATCH_SIZE*4*13*132
# h4 = tf.image.resize_images(h3, size=(7,25), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h4 = conv2d(h4, 64, name='g_h4_deconv2d',s=1)
# h4 = lrelu(tf.layers.batch_normalization(h4, training=training, name='g_h4_batch_norm',)) # BATCH_SIZE*7*25*64
# h4 = conv_cond_concat(h4, yb) # h1: BATCH_SIZE*7*25*68
# h5 = tf.image.resize_images(h2, size=(14,50), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h5 = conv2d(h5, 64, name='g_h5_deconv2d',s=1)
# h5 = lrelu(tf.layers.batch_normalization(h5, training=training, name='g_h5_batch_norm',)) # BATCH_SIZE*14*50*32
# h5 = conv_cond_concat(h5, yb) # h1: BATCH_SIZE*14*50*32
#
# h6 = tf.image.resize_images(h5, size=(28,100), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# h6 = conv2d(h6, 1, name='g_h6_deconv2d',s=1)
# h6 = lrelu(tf.layers.batch_normalization(h6, training=training, name='g_h6_batch_norm',)) # BATCH_SIZE*28*100*1
# h6 = tf.nn.tanh(h6)
h2 = fully_connect(h1, 128*7*25, name='g_h2_fully_connect')
h2 = lrelu(tf.layers.batch_normalization(h2, training=training, name='g_h2_batch_norm',))
h2 = tf.reshape(h2, [BATCH_SIZE, 7, 25, 128]) # h2: ?*7*7*128
h2 = conv_cond_concat(h2, yb) # h2: ?*7*7*132
h3 = deconv2d(h2, output_shape=[BATCH_SIZE, 14, 50, 128], name='g_h3_deconv2d')
h3 = lrelu(tf.layers.batch_normalization(h3, training=training, name='g_h3_batch_norm',)) # h3: ?*14*14*128
h3 = conv_cond_concat(h3, yb) # h3:?*14*14*138
h4 = deconv2d(h3, output_shape=[BATCH_SIZE, 28, 100, 1], name='g_h4_deconv2d')
h4 = tf.nn.tanh(h4) # h4: ?*28*100*1
return h4
def discriminator(image, y, reuse=False, training=True):
# with tf.variable_scope(tf.get_variable_scope(),reuse=reuse):
if reuse:
tf.get_variable_scope().reuse_variables()
yb = tf.reshape(y, [BATCH_SIZE, 1, 1, 4], name='yb') # BATCH_SIZE*1*1*4
x = conv_cond_concat(image, yb) # image: BATCH_SIZE*28*100*1 ,x: BATCH_SIZE*28*100*5
h1 = conv2d(x, 32, name='d_h1_conv2d')
h1 = lrelu(tf.layers.batch_normalization(h1, name='d_h1_batch_norm', training=training, reuse=reuse)) # h1: BATCH_SIZE*14*50*32
h1 = conv_cond_concat(h1, yb) # h1: BATCH_SIZE*14*50*15
h2 = conv2d(h1, 64, name='d_h2_conv2d')
h2 = lrelu(tf.layers.batch_normalization(h2, name='d_h2_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*7*25*64
h2 = conv_cond_concat(h2, yb) # h1: BATCH_SIZE*7*25*68
h3 = conv2d(h2, 128, name='d_h3_conv2d')
h3 = lrelu(tf.layers.batch_normalization(h3, name='d_h3_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*4*13*128
h3 = tf.reshape(h3, [BATCH_SIZE, -1])
h3 = tf.concat([h3, y], 1) # h1: BATCH_SIZE*4*13*132
h4 = fully_connect(h3, 1024, name='d_h4_fully_connect')
h4 = lrelu(tf.layers.batch_normalization(h4, training=training, name='g_h4_batch_norm',))
h4 = tf.concat([h4, y], 1)
# h4 = conv2d(h3, 256, name='d_h4_conv2d')
# h4 = lrelu(tf.layers.batch_normalization(h4, name='d_h4_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*2*7*256
# h4 = conv_cond_concat(h4, yb) # h1: BATCH_SIZE*2*7*256
# h5 = conv2d(h4, 256, name='d_h5_conv2d')
# h5 = lrelu(tf.layers.batch_normalization(h5, name='d_h5_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*1*4*256
# h5 = tf.reshape(h5, [BATCH_SIZE, -1]) # BATCH_SIZE*1024
# h5 = tf.concat([h5, y], 1) # BATCH_SIZE*1028
h6 = fully_connect(h4, 1, name='d_h6_fully_connect')
# h3 = lrelu(tf.layers.batch_normalization(h3, name='d_h3_batch_norm', training=training, reuse=reuse)) # BATCH_SIZE*1024
# h3 = tf.concat([h3, y], 1) # BATCH_SIZE*1034
# h4 = fully_connect(h3, 1, name='d_h4_fully_connect') # BATCH_SIZE*1
return tf.nn.sigmoid(h6)
# return h4
def sampler(z, y, training=False):
tf.get_variable_scope().reuse_variables()
return generator(z, y, training=training)
def save_images(images, size, path):
# normalization
img = (images + 1.0) / 2.0
h, w = img.shape[1], img.shape[2]
merge_img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
if j >= size[0]:
break
merge_img[j * h:j * h + h, i * w:i * w + w, :] = image
return scipy.misc.imsave(path, merge_img)
# csitool-for-realview
The is the tool running on Server for realview on csi-tool 802.11n.
In the test scenario, there are three PCs, two for Tx and RX respectively, and the other is for realview.
The packet we received is transmitted by the Rx at time of Rx receiving any packet
Note that the transmission is done by UDP protocol.
# -*- coding: utf-8 -*-
"""
Created on Wens Dec 26 2018
@author: han
"""
# coding: utf-8
import modify_extract
#import extract
import udp
from plot import Display
import struct
ret = [] # to store csi
s = udp.udp_init(5563) # create a udp handle
f = Display() #initialize the realview procedure
f.display()
try:
while True: # a loop to receive the data
csiInfo = []
data, addr = udp.recv(s) # receive a udp socket
for i in range(1,len(data)):
csiInfo.append(struct.unpack("!B", data[i])[0]) # decode csi from udp
CSI_matrix = modify_extract.readFile(csiInfo)
f.push(CSI_matrix)
# print CSI_matrix
except KeyboardInterrupt:
udp.close(s) # close udp
f.stop() # close view
import read_bf_file
import numpy as np
# import pylab
def radReverse(subcarrier):
return map(lambda x: float("%.2f" % np.arctan(x.imag / x.real)), subcarrier)
def complexToLatitude(subcarrier):
return map(lambda x: float("%.2f" % abs(x)), subcarrier)
def relativePhaseOperation(antenna_one, antenna_two, antenna_three):
# amplitude, relativePhase_one, relativePhase_two = None, None,None
# antenna_two= antenna_one * (antenna_two.conjugate())
# antenna_three = antenna_one * (antenna_three.conjugate())
# for subcarrier in antenna_one:
# raw = np.array([complexToLatitude(subcarrier)]) if raw is None else np.append(raw, [complexToLatitude(subcarrier)], axis=0)
# for subcarrier in antenna_two:
# relativePhase_one = np.array([radReverse(subcarrier)]) if relativePhase_one is None else np.append(relativePhase_one, [radReverse(subcarrier)], axis=0)
# for subcarrier in antenna_three:
# relativePhase_two = np.array([radReverse(subcarrier)]) if relativePhase_two is None else np.append(relativePhase_two, [radReverse(subcarrier)], axis=0)
amplitude, phase = None, None
for subcarrier in antenna_one:
amplitude = np.array([complexToLatitude(subcarrier)]) \
if amplitude is None else np.append(amplitude, [complexToLatitude(subcarrier)], axis=0)
for subcarrier in antenna_two:
phase = np.array([radReverse(subcarrier)]) \
if phase is None else np.append(phase, [radReverse(subcarrier)], axis=0)
return amplitude, phase
def readFile(filepath):
file=read_bf_file.read_file(filepath)
print len(file)
# pair_one_real =pair_one_imag=pair_Two_real=pair_Two_imag=pair_Three_real=pair_Three_imag =np.zeros((30,len(file)))
antennaPair_raw, antennaPair_One, antennaPair_Two, antennaPair_Three= [], [], [], []
for item in file:
for eachcsi in range(0, 30):
antennaPair_One.append(item.csi[eachcsi][0][0])
antennaPair_Two.append(item.csi[eachcsi][0][1])
antennaPair_Three.append(item.csi[eachcsi][0][2])
antennaPair_One = np.reshape(antennaPair_One,(1, 30)).transpose()
antennaPair_Two = np.reshape(antennaPair_Two, (1, 30)).transpose()
antennaPair_Three = np.reshape(antennaPair_Three, (1, 30)).transpose()
amplitude, phase= relativePhaseOperation(antennaPair_One, antennaPair_Two, antennaPair_Three)
csi_matrix = np.array([amplitude])
csi_matrix = np.append(csi_matrix, [phase], axis=0)
return csi_matrix
if __name__ == '__main__':
csi,= readFile("/home/han/data/1/csi1.dat")
# with open('../data/1/static_csi.pkl', 'wb') as handle:
# pickle.dump(csi, handle, -1)
phase1, value1 = None, None
phase2, value2 = None, None
pylab.figure()
pylab.plot(csi[0][0], 'g-', label='butterworth')
pylab.legend(loc='best')
pylab.ylim(0, 50)
pylab.show()
# --coding: utf-8 --
import pywt
from scipy import signal
class Filter():
def __init__(
self,
sequence):
self.sequence = sequence
def feedback(self):
w = pywt.Wavelet('coif7')
a = self.sequence
ca, cd = [], []
for i in range(3):
(a, d) = pywt.dwt(a, w)
ca.append(a)
cd.append(d)
rec_a, rec_d = [], []
for i, coeff in enumerate(ca):
coeff_list = [coeff, None] + [None] * i
rec_a.append(pywt.waverec(coeff_list, w))
for i, coeff in enumerate(cd):
coeff_list = [None, coeff] + [None] * i
rec_d.append(pywt.waverec(coeff_list, w))
return rec_a[-1]
def butterWorth(self):
b, a = signal.butter(3, 0.3, 'low')
sf = signal.filtfilt(b, a, self.sequence)
return sf
if __name__ =="__main__":
import display
raw, dwt,_,_ = display.date_wrapper()
print raw[0]
A = Filter(raw[0])
print len(A.feedback())
# fig = plt.figure()
# ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1)
# ax_main.set_title(title)
# ax_main.plot(data)
# ax_main.set_xlim(0, len(data) - 1)
#
# for i, y in enumerate(rec_a):
# ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2)
# ax.plot(y, 'r')
# ax.set_xlim(0, len(y) - 1)
# ax.set_ylabel("A%d" % (i + 1))
#
# for i, y in enumerate(rec_d):
# ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2)
# ax.plot(y, 'g')
# ax.set_xlim(0, len(y) - 1)
# ax.set_ylabel("D%d" % (i + 1))
\ No newline at end of file
# coding=utf-8
import pywt
import math
import numpy as np
import matplotlib.pyplot as plt
fs=1000 #采样频率
f1=50 #信号频率
f2=100 #信号频率
totalscale=256
t = np.arange(0,1,1.0/fs)
sig=np.sin(2*math.pi*f1*t)+np.sin(2*math.pi*f2*t)
wcf=pywt.central_frequency('morl') #计算小波中心频率
scale=np.arange(1,totalscale+1,1) #生成1-256等差数列
cparam=2*wcf*totalscale
scale=cparam/scale #计算尺度
frequencies = pywt.scale2frequency('morl',scale)#将尺度转换成频率
frequencies=frequencies*fs #将频率变换成信号真实频率
cwtmatr, freqs = pywt.cwt(sig, scale, 'morl')#求连续小波系数
plt.ylabel('y')
plt.xlabel('x')
plt.pcolormesh(t,frequencies,abs(cwtmatr),vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
plt.colorbar()
plt.show()
\ No newline at end of file
import threading
import read_bf_file
# import read_bf_file
import numpy as np
import pylab
import pickle
from scipy.fftpack import fft, ifft
#from dwtfilter import dwtfilter
TIMEINYERVAL = 0.05
TIMEBIASE = 0.45
TIMELEN = 54000
TIMEWINDOW = 900
IMAGETOCSIRATIO = 30
def radReverse(subcarrier):
return map(lambda x: float("%.2f" % np.arctan(x.imag / x.real)), subcarrier)
def complexToLatitude(subcarrier):
return map(lambda x: float("%.2f" % abs(x)), subcarrier)
def reviseInterp(timestamp, eachsubcarrier):
blockedTime = []
flag, count = 0, 0
for tIndex in range(1, len(timestamp)):
if timestamp[tIndex] - timestamp[tIndex - 1] > TIMEINYERVAL:
numOfInterp = int((timestamp[tIndex] - timestamp[tIndex - 1]) / TIMEBIASE) #todo: Timebiase needed to be fixed
for num in range(0, numOfInterp - 1):
blockedTime.append(tIndex)
ca = eachsubcarrier.tolist()
for csiIndex in blockedTime:
ca.insert(csiIndex + count, 0)
count+=1
for csiApt in range(0, len(ca) - 1):
if ca[csiApt] == 0 and ca[csiApt + 1] != 0:
ca[csiApt] = "%.2f" % ((ca[csiApt - 1] + ca[csiApt + 1]) / 2.0)
elif ca[csiApt] == 0 and ca[csiApt + 1] == 0:
for zeros in range(csiApt, len(ca)):
if ca[zeros] != 0:
flag = zeros
break
numOfZeros = flag - csiApt
for num in range(0, numOfZeros):
ca[csiApt + num] = ca[csiApt + num - 1] + float('%.2f' % ((ca[flag] - ca[csiApt - 1]) / numOfZeros))
caNew = [float(x) for x in ca]
blockedTime.extend([x for x in range(len(caNew), TIMELEN)])
if len(caNew) < TIMELEN:
caNew.extend([caNew[-1] for _ in range(0, TIMELEN - len(caNew))])
else:
caNew = caNew[:TIMELEN]
return caNew, blockedTime
def my_static(temp, lenfile):
s_t = []
s_t_list = []
s_t_index = []
Max = -100
Min = 100
T = 5
flag = 2
for i in range(lenfile):
if i < lenfile - 4 :
s_t.append(np.mean(temp[i : i + 4]))
else:
s_t.append(s_t[lenfile - 5])
for i in range(lenfile):
if temp[i] > s_t[i] + T and temp[i] >= Max and temp[i] > 0:
Max = temp[i]
Max_index = i
if flag == 0:
s_t_list.append(Min)
s_t_index.append(Min_index)
Min = 100
flag = 1
elif temp[i] < s_t[i] - T and temp[i] <= Min and temp[i] > 0:
Min = temp[i]
Min_index = i
if flag == 1:
s_t_list.append(Max)
s_t_index.append(Max_index)
Max = -100
flag = 0
for i in range (len(s_t_list) - 1):
for j in range(s_t_index[i], s_t_index[i + 1]):
temp[j] = temp[j] - (s_t_list[i] + s_t_list[i + 1]) / 2
return temp
def linearInterpolation(matrix, timestamp):
raw, blockedTime = None, None
for eachsubcarrier in matrix:
eachsubcarrier, blockedTime = reviseInterp(timestamp, eachsubcarrier)
raw = np.array([eachsubcarrier]) if raw is None else np.append(raw, [eachsubcarrier], axis=0)
return raw, blockedTime
def varianceOperation(*args):
var_list = [np.var(args[0]), np.var(args[1]), np.var(args[2])]
mini, maxi = var_list.index(min(var_list)), var_list.index(max(var_list))
return args[mini], args[maxi]
def relativePhaseOperation(antenna_one, antenna_two, antenna_three):
# amplitude, relativePhase_one, relativePhase_two = None, None,None
# antenna_two= antenna_one * (antenna_two.conjugate())
# antenna_three = antenna_one * (antenna_three.conjugate())
# for subcarrier in antenna_one:
# raw = np.array([complexToLatitude(subcarrier)]) if raw is None else np.append(raw, [complexToLatitude(subcarrier)], axis=0)
# for subcarrier in antenna_two:
# relativePhase_one = np.array([radReverse(subcarrier)]) if relativePhase_one is None else np.append(relativePhase_one, [radReverse(subcarrier)], axis=0)
# for subcarrier in antenna_three:
# relativePhase_two = np.array([radReverse(subcarrier)]) if relativePhase_two is None else np.append(relativePhase_two, [radReverse(subcarrier)], axis=0)
antenna_one_amp,conjugate_amp, conjugate, relativePhase = None, None, None,None
tryy, tryyy = None, None
for wins in range(0, len(antenna_one[0]), TIMEWINDOW):
part_min, part_max = varianceOperation(antenna_one[:, wins: wins+TIMEWINDOW],
antenna_two[:, wins: wins+TIMEWINDOW], antenna_three[:, wins: wins+TIMEWINDOW])
alpha = np.mean(part_max)
belta = alpha * 1000
tryy = part_max * (part_min.conjugate())
tryyy = np.array(tryy) if tryyy is None else np.hstack((tryyy,tryy))
con_mul = (part_max - alpha) * ((part_min + belta).conjugate())
con_mul = con_mul - np.mean(con_mul)
conjugate = np.array(con_mul) if conjugate is None else np.hstack((conjugate,con_mul))
for subcarrier in antenna_one:
antenna_one_amp = np.array([complexToLatitude(subcarrier)]) \
if antenna_one_amp is None else np.append(antenna_one_amp, [complexToLatitude(subcarrier)], axis=0)
for subcarrier in conjugate:
conjugate_amp = np.array([complexToLatitude(subcarrier)]) \
if conjugate_amp is None else np.append(conjugate_amp, [complexToLatitude(subcarrier)], axis=0)
for subcarrier in conjugate:
relativePhase = np.array([radReverse(subcarrier)]) \
if relativePhase is None else np.append(relativePhase, [radReverse(subcarrier)], axis=0)
# relativePhase = np.array([complexToLatitude(subcarrier)]) \
# if relativePhase is None else np.append(relativePhase, [complexToLatitude(subcarrier)], axis=0)
return antenna_one_amp,conjugate_amp, relativePhase, conjugate, tryyy
def readFile(filepath):
file=read_bf_file.read_file(filepath)
#print "Length of packets: ", len(file)
# pair_one_real =pair_one_imag=pair_Two_real=pair_Two_imag=pair_Three_real=pair_Three_imag =np.zeros((30,len(file)))
timestamp = np.array([])
startTime = file[0].timestamp_low
#print "Start timestamp:" + str(startTime)
antennaPair_raw, antennaPair_One, antennaPair_Two, antennaPair_Three= [], [], [], []
for item in file:
timestamp = np.append(timestamp, (item.timestamp_low - startTime) / 1000000.0)
for eachcsi in range(0, 30):
''''
acquire csi complex value for each antenna pair with shape ( len(file) * 30), i.e., packet number * subcarrier number
'''
antennaPair_One.append(item.csi[eachcsi][0][0])
antennaPair_Two.append(item.csi[eachcsi][0][1])
antennaPair_Three.append(item.csi[eachcsi][0][2])
antennaPair_One = np.reshape(antennaPair_One,(len(file), 30)).transpose()
antennaPair_Two = np.reshape(antennaPair_Two, (len(file), 30)).transpose()
antennaPair_Three = np.reshape(antennaPair_Three, (len(file), 30)).transpose()
"""
To get the relative phase between each antenna pair.
Linear inteplotation operation.
"""
antenna_one_amp, conjugate_amp,relativePhase,conjugate, tryyy = relativePhaseOperation(antennaPair_One, antennaPair_Two, antennaPair_Three)
#antenna_one_amp, blocked = linearInterpolation(antenna_one_amp, timestamp)
#conjugate_amp, blocked = linearInterpolation(conjugate_amp, timestamp)
#relativePhase, blocked = linearInterpolation(relativePhase, timestamp)
# TODO: MORE SINGAL OPERATIONS NEEDED TO BE ADDED!
'''for subcarrier in range(len(amplitude)):
amplitude[subcarrier] = dwtfilter(amplitude[subcarrier]).butterWorth()
amplitude[subcarrier] = dwtfilter(amplitude[subcarrier]).filterOperation()'''
csi_matrix = np.array([antenna_one_amp])
csi_matrix = np.append(csi_matrix, [conjugate_amp], axis=0)
csi_matrix = np.append(csi_matrix, [relativePhase], axis=0)
return csi_matrix
#return csi_matrix, conjugate, tryyy
if __name__ == '__main__':
csi= readFile("test.dat")
#csi, conjugate, tryyy= readFile("test.dat")
# with open('../data/1/static_csi.pkl', 'wb') as handle:
# pickle.dump(csi, handle, -1)
phase1, value1 = None, None
phase2, value2 = None, None
pylab.figure()
pylab.plot(csi[0][0], 'g-', label='butterworth')
pylab.pcolormesh(csi[0][0], cmap = cm_ )
# pylab.plot(csi[1][0], 'g-', label='butterworth')
# ff = fft(conjugate)
# for subcarrier in ff:
# phase1 = np.array([radReverse(subcarrier)]) \
# if phase1 is None else np.append(phase1, [radReverse(subcarrier)], axis=0)
# value1 = np.array([complexToLatitude(subcarrier)]) \
# if value1 is None else np.append(value1, [complexToLatitude(subcarrier)], axis=0)
# pylab.plot(phase1[0], 'r--', label='fft_alpha')
#
# yy = fft(tryyy)
# for subcarrier in yy:
# phase2 = np.array([radReverse(subcarrier)]) \
# if phase2 is None else np.append(phase2, [radReverse(subcarrier)], axis=0)
# value2 = np.array([complexToLatitude(subcarrier)]) \
# if value2 is None else np.append(value2, [complexToLatitude(subcarrier)], axis=0)
# pylab.plot(phase2[0], 'b', label='fft_raw')
# pylab.plot(value[0], 'b--',label='fft_value')
pylab.legend(loc='best')
pylab.ylim(0, 50)
pylab.show()
import matplotlib.pylab as plt
from collections import deque
from filter import Filter
import numpy as np
import copy
import time
import threading
import pywt
TIMEWINDOW = 1800
SLIDEWINDOW = TIMEWINDOW / 2
class Display:
def __init__(self, REFRESH_INTERVAL=0.001):
self.count = 0
self.t = deque()
self.amp = deque()
self.conj_amp=deque()
self.pha = deque()
self.amp_filter = None
self.pha_conj = deque()
self.end = False
self.threads = []
self.interval = REFRESH_INTERVAL
pass
def push(self, data):
if self.count > TIMEWINDOW -1:
self.t.popleft()
self.amp.popleft()
self.conj_amp.popleft()
self.pha.popleft()
self.t.append(self.count)
self.amp.append(data[0][0])
self.conj_amp.append(data[1][0])
self.pha.append(data[2][0])
self.count += 1
if self.count % SLIDEWINDOW == 0:
if self.amp_filter is None:
self.amp_filter = list(copy.deepcopy(self.amp))
elif len(self.amp_filter) == SLIDEWINDOW:
self.amp_filter.extend(list(self.amp)[-SLIDEWINDOW:])
else:
self.amp_filter = self.amp_filter[-SLIDEWINDOW:] + list(copy.deepcopy(self.amp))[-SLIDEWINDOW:]
def _plot(self):
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True)
amplitude = ax[0][0]
# phase = ax[0][1]
slide = ax[0][1]
conj_amplitude=ax[1][0]
time_fre = ax[1][1]
while not self.end:
t = copy.deepcopy(self.t)
amp = copy.deepcopy(self.amp)
conj_amp=copy.deepcopy(self.conj_amp)
pha = copy.deepcopy(self.pha)
# if self.count <= TIMEWINDOW + SLIDEWINDOW:
flt = copy.deepcopy(self.amp_filter)
# if flt and len(flt) == TIMEWINDOW:
# flt = Filter(flt).butterWorth()
# else:
if len(t) == 0:
time.sleep(self.interval)
continue
if len(t) != len(amp) or len(t) != len(pha):
continue
max_t = t[-1] + 100
min_t = max_t - TIMEWINDOW if max_t - TIMEWINDOW > 0 else 0
amplitude.cla()
amplitude.set_title("amplitude")
amplitude.set_xlabel("packet / per")
amplitude.set_ylim(0, 50)
amplitude.set_xlim(min_t, max_t)
amplitude.grid()
amplitude.plot(t,np.array(amp))
# amplitude.legend(loc='best')
# phase.cla()
# phase.set_title("phase")
# phase.set_xlabel("packet / per")
# phase.set_ylim(-2, 2)
# phase.set_xlim(min_t, max_t)
# phase.grid()
# phase.plot(t, np.array(pha))
slide.cla()
slide.set_title("slide")
slide.set_xlabel("packet / per")
slide.set_ylim(0, 50)
slide.set_xlim(min_t, max_t)
slide.grid()
time_fre.set_title("time_fre")
time_fre.set_xlabel("time(s)")
time_fre.set_ylim(0, 25)
if flt and len(flt) == TIMEWINDOW:
slide.plot(t,np.array(flt))
sig=[]
for i in range(len(flt)): #to 1 dimension
sig.append(flt[i][0])
#print sig
fs = 50
totalscale = 256
#t = np.arange(0, 1, 1.0 / fs)
#sig = np.sin(2 * math.pi * f1 * t) + np.sin(2 * math.pi * f2 * t)
wcf = pywt.central_frequency('morl')
scale = np.arange(1, totalscale + 1, 1)
cparam = 2 * wcf * totalscale
scale = cparam / scale
frequencies = pywt.scale2frequency('morl', scale)
frequencies = frequencies * fs
cwtmatr, freqs = pywt.cwt(sig, scale, 'morl')
time_fre.pcolormesh(t, frequencies, abs(cwtmatr), vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
#time_fre.colorbar()
#time_fre.show()
conj_amplitude.cla()
conj_amplitude.set_title("conj_amplitude")
conj_amplitude.set_xlabel("packet / per")
#conj_amplitude.set_ylim(0, 100)
conj_amplitude.set_xlim(min_t, max_t)
conj_amplitude.grid()
conj_amplitude.plot(t, np.array(conj_amp))
plt.pause(self.interval)
def stop(self):
for t in self.threads:
t.join()
print('stop realview****')
def display(self):
t1 = threading.Thread(target=self._plot)
self.threads.append(t1)
for t in self.threads:
# t.setDaemon(True)
t.start()
print('display starting...')
if __name__ == '__main__':
f = Display()
f.display()
while True:
data = [[[10]],[[10]],[[0.2]]]
f.push(data)
time.sleep(0.1)
import numpy as np
import os
import sys
import struct
# from .csi import WifiCsi
class WifiCsi:
def __init__(self, args, csi):
self.timestamp_low = args[0]
self.bfee_count = args[1]
self.Nrx = args[2]
self.Ntx = args[3]
self.rssi_a = args[4]
self.rssi_b = args[5]
self.rssi_c = args[6]
self.noise = args[7]
self.agc = args[8]
self.perm = args[9]
self.rate = args[10]
self.csi = csi
pass
def get_bit_num(in_num, data_length):
max_value = (1 << data_length - 1) - 1
if not -max_value-1 <= in_num <= max_value:
out_num = (in_num + (max_value + 1)) % (2 * (max_value + 1)) - max_value - 1
else:
out_num = in_num
return out_num
pass
def read_bfee(in_bytes):
timestamp_low = in_bytes[0] + (in_bytes[1] << 8) + \
(in_bytes[2] << 16) + (in_bytes[3] << 24)
bfee_count = in_bytes[4] + (in_bytes[5] << 8)
Nrx = in_bytes[8]
Ntx = in_bytes[9]
rssi_a = in_bytes[10]
rssi_b = in_bytes[11]
rssi_c = in_bytes[12]
noise = get_bit_num(in_bytes[13],8)
agc = in_bytes[14]
antenna_sel = in_bytes[15]
length = in_bytes[16] + (in_bytes[17] << 8)
fake_rate_n_flags = in_bytes[18] + (in_bytes[19] << 8)
calc_len = (30 * (Nrx * Ntx * 8 * 2 + 3) + 7) / 8
payload = in_bytes[20:]
# if(length != calc_len)
perm_size = (3)
perm = np.ndarray(perm_size, dtype=int)
if Nrx == 3 :
perm[0] = ((antenna_sel) & 0x3) + 1
perm[1] = ((antenna_sel >> 2) & 0x3) + 1
perm[2] = ((antenna_sel >> 4) & 0x3) + 1
elif Nrx ==2 :
perm[0] = 2
perm[1] = 1
perm[2] = 3
index = 0
Nrx_mat = 3
csi_size = (30, Ntx, Nrx_mat)
row_csi = np.ndarray(csi_size, dtype=complex)
perm_csi = np.ndarray(csi_size, dtype=complex)
try:
for i in range(30):
index += 3
remainder = index % 8
for j in range(Nrx):
for k in range(Ntx):
pr = get_bit_num((payload[index // 8] >> remainder),8) | get_bit_num((payload[index // 8+1] << (8-remainder)),8)
pi = get_bit_num((payload[(index // 8)+1] >> remainder),8) | get_bit_num((payload[(index // 8)+2] << (8-remainder)),8)
if Nrx == 3:
perm_csi[i][k][perm[j] - 1] = complex(pr, pi)
elif Nrx == 2:
perm_csi[i][k][perm[j]] = complex(pr, pi)
index += 16
pass
pass
pass
pass
except:
pass
args = [timestamp_low, bfee_count, Nrx, Ntx, rssi_a,
rssi_b, rssi_c, noise, agc, perm, fake_rate_n_flags]
temp_wifi_csi = WifiCsi(args, perm_csi)
return temp_wifi_csi
def read_file(file_path):
csi_data = []
csi_data.append(read_bfee(file_path))
return csi_data
from collections import deque
import numpy as np
import copy
l = None
t = deque()
t.append(1)
t.append(2)
t.append(3)
t.append(4)
for i in range(2):
if l is None:
l = copy.deepcopy(t)
else:
l.extend(list(t)[-2:])
print l
l= list(l)[:2] + list(t)
print l
print t
# -*- coding: utf-8 -*-
"""@package udp
Created on Fri Jun 16 17:13:04 2017
@author: ren
"""
import socket
def udp_init(port):
## initalize the socket with udp protocol
#
#@param port the port number which used for the udp at the server
#
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # SOCKET_DGRAM:UDP
#s.bind(("", 0x1031))
s.bind(("",port))
return s
def recv(s):
data, addr = s.recvfrom(4096) # BUFSIZE
return data,addr
def close(s):
s.close()
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment