Commit 4879ea30 authored by GuJingLing's avatar GuJingLing

Measurement initial commit

parents
Pipeline #57 canceled with stages

Too many changes to show.

To preserve performance only 1000 of 1000+ files are displayed.

<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/oaiflowclassification.iml" filepath="$PROJECT_DIR$/.idea/oaiflowclassification.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="99f76696-bbaa-47e7-a60f-368e65454df1" name="Default Changelist" comment="" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Python Script" />
</list>
</option>
</component>
<component name="ProjectId" id="1ouh0dEMuQDqaYZ33dVplZEjCVB" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">
<property name="RunOnceActivity.OpenProjectViewOnStart" value="true" />
<property name="RunOnceActivity.ShowReadmeOnStart" value="true" />
<property name="last_opened_file_path" value="$PROJECT_DIR$" />
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
</component>
<component name="RunManager" selected="Python.main">
<configuration name="main" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true">
<module name="oaiflowclassification" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/main.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="predict" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="oaiflowclassification" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/predict.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="train" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="oaiflowclassification" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/train.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<recent_temporary>
<list>
<item itemvalue="Python.predict" />
<item itemvalue="Python.train" />
</list>
</recent_temporary>
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="99f76696-bbaa-47e7-a60f-368e65454df1" name="Default Changelist" comment="" />
<created>1614142160428</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1614142160428</updated>
</task>
<servers />
</component>
<component name="WindowStateProjectService">
<state width="2027" height="234" key="GridCell.Tab.0.bottom" timestamp="1614178930540">
<screen x="0" y="0" width="2048" height="1112" />
</state>
<state width="2027" height="234" key="GridCell.Tab.0.bottom/0.0.2048.1112@0.0.2048.1112" timestamp="1614178930540" />
<state width="2027" height="234" key="GridCell.Tab.0.center" timestamp="1614178930540">
<screen x="0" y="0" width="2048" height="1112" />
</state>
<state width="2027" height="234" key="GridCell.Tab.0.center/0.0.2048.1112@0.0.2048.1112" timestamp="1614178930540" />
<state width="2027" height="234" key="GridCell.Tab.0.left" timestamp="1614178930540">
<screen x="0" y="0" width="2048" height="1112" />
</state>
<state width="2027" height="234" key="GridCell.Tab.0.left/0.0.2048.1112@0.0.2048.1112" timestamp="1614178930540" />
<state width="2027" height="234" key="GridCell.Tab.0.right" timestamp="1614178930540">
<screen x="0" y="0" width="2048" height="1112" />
</state>
<state width="2027" height="234" key="GridCell.Tab.0.right/0.0.2048.1112@0.0.2048.1112" timestamp="1614178930540" />
<state x="526" y="200" key="SettingsEditor" timestamp="1614177585864">
<screen x="0" y="0" width="2048" height="1112" />
</state>
<state x="526" y="200" key="SettingsEditor/0.0.2048.1112@0.0.2048.1112" timestamp="1614177585864" />
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ChangeListManager">
<list default="true" id="167f7a9d-7314-4128-86b1-58c3c7cff552" name="Default Changelist" comment="" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="ProjectId" id="1p0OKv6n0YSCemq7ljYu4f5bJXI" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">
<property name="RunOnceActivity.OpenProjectViewOnStart" value="true" />
<property name="RunOnceActivity.ShowReadmeOnStart" value="true" />
<property name="last_opened_file_path" value="$PROJECT_DIR$" />
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="167f7a9d-7314-4128-86b1-58c3c7cff552" name="Default Changelist" comment="" />
<created>1614316481376</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1614316481376</updated>
</task>
<servers />
</component>
</project>
\ No newline at end of file
"""
预测函数:随机森林
每次运行前,检查:
四个需要修改的地方,命名是否正确
最后的运行模式是否正确
做预测
1. 直接使用之前的模型做预测 分类结果和五元组封装起来
2. 做一个线程池 多线程接收发送来的特征信息
"""
from common_utils import * # 修改了
from argparse import ArgumentParser
from collections import namedtuple
from typing import List, Dict
from datetime import datetime
from path_utils import get_prj_root
import numpy as np
# from sklearn.externals import joblib
import joblib
import pprint
from sklearn.ensemble import RandomForestClassifier # 训练模型
random.seed(datetime.now())
model_dir = os.path.join(get_prj_root(), "classify/models") # 修改:模型model文件夹路径
predict_model_pkl = os.path.join(model_dir, "dt2.pkl") # 修改:模型的版本,只用修改此处就行
Instance = namedtuple("Instance", ["features", "label"]) # 实例
dirs = {
"video": "./tmp/dt/video",
"iot": "./tmp/dt/iot",
"voip": "./tmp/dt/voip",
"AR": "./tmp/dt/AR",
}
instances_dir = os.path.join(get_prj_root(), "classify/instances") # 修改:instances路径
# 通过元组测试
test_flow = (1, 2, 3, 4, 5, 6, 7, 8, 6, 10)
test_flow2 = [221.0, 1350.0, 640.0, 376.26798960315506, 543.0,
21257877.349853516, 4793407917.022705, 1263437211.5135193,
2039103758.0566826, 119541525.84075928]
def train_and_predict():
iot = load_pkl(os.path.join(instances_dir, "iot.pkl")) # 不同实例的pkl是不同特征的
videos = load_pkl(os.path.join(instances_dir, "video.pkl"))
voip = load_pkl(os.path.join(instances_dir, "voip.pkl"))
AR = load_pkl(os.path.join(instances_dir, "AR.pkl"))
for i in videos:
assert i.label == 0
for i in iot:
assert i.label == 1
for i in voip:
assert i.label == 2
for i in AR:
assert i.label == 3
# print(videos)
debug("# iot instances {}".format(len(iot)))
debug("# video instances {}".format(len(videos)))
debug("# VOIP instances {}".format(len(voip)))
debug("# AR instances {}".format(len(AR)))
random.shuffle(voip) # 打乱排序
random.shuffle(iot)
random.shuffle(videos)
random.shuffle(AR)
n_video_train = int(len(videos) * 0.7)
n_video_test = len(videos) - n_video_train
video_train = videos[:n_video_train]
video_test = videos[n_video_train:]
iot_train = iot[:n_video_train]
iot_test = iot[len(iot) - len(video_test):]
voip_train = voip[:n_video_train]
voip_test = voip[len(voip) - len(video_test):]
AR_train = AR[:n_video_train]
AR_test = AR[len(AR) - len(video_test):]
info("#video train {}".format(len(video_train)))
info("#iot train {}".format(len(iot_train)))
info("#voip train {}".format(len(voip_train)))
info("#AR train {}".format(len(AR_train)))
train = []
train.extend(iot_train)
train.extend(video_train)
train.extend(voip_train)
train.extend(AR_train)
random.shuffle(train)
train_x = [x.features for x in train]
train_y = [x.label for x in train]
# test 1:1
test = []
info("#video test {}".format(len(video_test)))
info("#iot test {}".format(len(iot_test)))
info("#voip test {}".format(len(voip_test)))
info("#AR test {}".format(len(AR_test)))
test.extend(video_test)
test.extend(iot_test)
test.extend(voip_test)
test.extend(AR_test)
random.shuffle(test)
test_x = [t.features for t in test]
test_y = [t.label for t in test]
# 训练以及预测
predict_model = RandomForestClassifier(oob_score=True) # 引入训练方法
predict_model.fit(train_x, train_y) # 队训练数据进行拟合
predicts = predict_model.predict(test_x)
"""
dt = DT()
dt.fit((train_x, train_y))
predicts = dt.predict(test_x)
"""
"""
# 打印预测的结果
print(predicts)
print("-------------------------------")
"""
# 保存模型
fn_name = os.path.join(model_dir, predict_model_pkl)
joblib.dump(predict_model, predict_model_pkl)
# 评价模型
count = 0
for idx in range(len(test_x)):
if int(predicts[idx]) == int(test_y[idx]):
count += 1
# print(count / len(test_x))
return count / len(test_x)
def classify_flows(mode: 'int', predict_dir):
"""
该函数用于训练模型并且测试模型的准确度 或者 预测结果
:param mode: 0--训练模型 1--预测和分类流并返回
:param predict_dir: 待预测的流的目录下的pkl文件
:return: 待分类的流的分类结果列表
"""
# 判断是只训练模型 还是 只是预测结果
if mode == 0:
# 此时训练使用数据训练模型 并且 保存模型 评价模型
times = 10
sum_predict = 0
for _ in range(times):
res = train_and_predict()
sum_predict = sum_predict + res
print("模型准确率为:", sum_predict / times)
else:
# 使用传递的文件来预测结果并且返回
predict = load_pkl(os.path.join(predict_dir, "predict2.pkl"))
test = []
info("#video test {}".format(len(predict)))
test.extend(predict)
# random.shuffle(test)
test_x = [t.features for t in test]
predict_model = joblib.load(predict_model_pkl)
predict_result = predict_model.predict(test_x)
res_list = identify_classification(predict_result)
return res_list
def classify_flow_list(flow_list):
"""
该方法用于分类为元组的流
格式:[[1, 2, 3, 4, 5, 6, 7, 8, 6, 10], ["五元组"]]
"""
test_x = [flow_list[0]]
predict_model = joblib.load(predict_model_pkl)
predict_result = predict_model.predict(test_x)
# 定义结果的变量 res = ["五元组", "分类结果"]
res = []
res.append(flow_list[1][0])
# 得到字符串的结果
if predict_result == 0:
res.append("videos")
elif predict_result == 1:
res.append("iot")
elif predict_result == 2:
res.append("voip")
else:
res.append("AR")
return res
def identify_classification(predict_result):
"""
该函数将分类结果的标签转换为具体内容字符串的结果
:param predict_result:标签分类结果
:return: 字符串分类结果
"""
res_list = []
for label in predict_result:
if label == 0:
res_list.append("videos")
elif label == 1:
res_list.append("iot")
elif label == 2:
res_list.append("voip")
elif label == 3:
res_list.append("AR")
return res_list
if __name__ == '__main__':
# 训练模型
# classify_flows(mode=0, path=instances_dir)
# 预测结果
predict_dir = os.path.join(get_prj_root(), "classify/predict") # 修改:instances路径
predict_result_list = classify_flows(mode=1, predict_dir=predict_dir)
pprint.pprint(predict_result_list)
import json
import os
import pickle
import random
from pathlib import Path
import loguru
import numpy as np
logger = loguru.logger
info = logger.info
debug = logger.debug
err = logger.error
def check_dir(dir_name):
if not Path(dir_name).is_dir():
raise FileNotFoundError
def check_file(fn):
if not Path(fn).is_file():
raise FileNotFoundError
def file_exsit(fn):
return Path(fn).is_file()
def dir_exsit(fn):
return Path(fn).is_dir()
def gaussion(mean: float, std_var: float, size=1):
if size == 1:
return np.random.normal(mean, std_var)
return np.random.normal(mean, std_var, size)
def exp(mean: float, size=1):
if 1 == size:
return np.random.normal(mean)
return np.random.normal(mean, size)
def uniform(low, up, size=1):
if 1 == size:
return np.random.uniform(low, up)
return np.random.uniform(low, up, size)
def load_pkl(filename):
if Path(filename).is_file():
data = None
with open(filename, 'rb') as file:
data = pickle.load(file)
file.close()
return data
raise FileNotFoundError
def save_pkl(filename, obj, overwrite=True):
def write():
with open(filename, 'wb') as file:
pickle.dump(obj, file)
file.flush()
file.close()
if Path(filename).is_file() and overwrite:
write()
return
write()
def load_json(filename):
if Path(filename).is_file():
with open(filename) as f:
return json.load(f)
raise FileNotFoundError
def save_json(filename, obj, overwrite=True):
def write():
with open(filename, 'w', encoding="utf8") as file:
json.dump(obj, file, indent=4)
if Path(filename).is_file and overwrite:
write()
return
write()
def is_digit(x: str) -> bool:
try:
float(x)
return True
except:
return False
def normalize(x):
mi = min(x)
ma = max(x)
diff = ma - mi
x = [(xx - mi) / diff for xx in x]
return x
if __name__ == "__main__":
pass
由于路径的问题,在import上作了一些删除
在classify文件夹下是运行的数据
先运行parser产生解析PCAP之后的文件,放在./tmp/dt的目录下,statistics.json为各个类型流的数据
再运行train,先把最下面的模式设置为1,产生对statistics.json中包大小,包间隔进行处理产生特征,保存在instances文件夹的pkl中
最后是运行predict,预测
train_dt_1
设置的是8个特征的(前四个包大小,后四个包间隔)+决策树 。。。
特征与标签都保存到instances文件夹,
训练模型保存到models文件夹中
正确率可以达到90%
加了PCA降维之后为88.6%
train_dt_2
设置的是10个特征的(前五个包大小,后五个包间隔):最小值,最大值,平均值,方差,中位数 +决策树
特征与标签都保存到instances2文件夹,
训练模型保存到models2文件夹中
正确率可以达到91.73%
加了PCA降维之后为88.7%
train_dt_3
11个特征(前五个包大小,后五个包间隔):最小值,最大值,平均值,方差,中位数 ;每秒包的数量
预测函数:决策树
特征与标签都保存到instances3文件夹
训练模型保存到models3文件夹中
正确率可以达到91.7%
加了PCA降维之后为88.7%
instances装的是 8个特征的(前四个包大小,后四个包间隔)
pkl文件其实可以处理成Excel表格的形式,然后用MATLAB来跑
predit_1_1 K邻近+8特征 85%
predit_1_2 K邻近+10特征 86.59%
predit_2 支持向量机 + 8特征 25.9%
predict_3 随机森林 +8特征 93.5%
predict_3 随机森林 +10特征 94%
predict_3 随机森林 +11特征 93.5%
models文件下
model_predict文件夹装的是各种预测函数
dt1为 K邻近+8特征 85%
dt1_2 为 K邻近+10特征 86.59%
dt3_1 为 随机森林 +8特征 93.5%
dt3_2 为 随机森林 +10特征 94%
有后缀_9 说明是用9个窗口
from classify import *
from multithread_server import TcpServer, boot_server
from pool import *
import json
def load_flows():
# 一直执行
while True:
# 判断是否停止识别
"""
mutex.acquire()
if len(STOP_CLASSIFY) == 1:
print("程序结束")
mutex.release()
break
mutex.release()
"""
mutex.acquire()
if PREDECT_FLOWS:
# 读出所有的结果
for feature in PREDECT_FLOWS:
if type(feature) == list:
# print("type: ", type(feature))
# 预测结果
predict_result = classify_flow_list(feature)
# 将分类结果放到全局变量中
if predict_result not in CLASSIFY_RESULT:
CLASSIFY_RESULT.append(predict_result)
print("缓冲池中有: ", len(CLASSIFY_RESULT), "个已经分类好的结果,请取走")
print("分类结果为:", predict_result)
# 清空列表
PREDECT_FLOWS.clear()
mutex.release()
# 定义一个函数作为主线程 让其他的守护他 判断关键字close后关闭主线称
def main_threading():
global STOP_CLASSIFY
# 创建线程
t_server = threading.Thread(target = boot_server)
t_classify = threading.Thread(target = load_flows)
# 设置其他的为守护线程
t_server.setDaemon(True)
t_classify.setDaemon(True)
# 启动线程
t_server.start()
t_classify.start()
while True:
mutex.acquire()
if len(STOP_CLASSIFY) == 1:
print("程序结束")
mutex.release()
break
mutex.release()
if __name__ == "__main__":
main_threading()
from sklearn.tree import DecisionTreeClassifier
from path_utils import get_prj_root
from pathlib import Path
import os
from common_utils import save_pkl, load_pkl, info #修改了
import numpy as np
import random
root_dir = get_prj_root()
model_dir = os.path.join(root_dir, "models")
class Classifier:
# def __init__(self,fn_name=None):
# self.model=None
# if fn_name is not None:
# self.load_model(fn_name)
def fit(self, data):
raise NotImplementedError
def predict(self, features):
raise NotImplementedError
def save_model(self, fn_name):
raise NotImplementedError
def load_model(self, fn_name):
raise NotImplementedError
'''
min_pkt|max_pkt|mean_pkt|var_pkt
min_idt|max_idt|mean_idt|var_idt
'''
class DT(Classifier):
def __init__(self):
super(DT, self).__init__()
self.model: DecisionTreeClassifier = DecisionTreeClassifier()
def fit(self, data):
assert len(data) == 2
features = data[0]
y = data[1]
assert len(features) == len(y)
info("# instances {}".format(len(features)))
self.model.fit(features, y)
def predict(self, features):
# info("# instances {}".format(len(features)))
return self.model.predict(features)
def save_model(self, fn_name):
if self.model is None: return
fn_name = os.path.join(model_dir, fn_name)
save_pkl(fn_name, self.model)
def load_model(self, fn_name):
fn_name = os.path.join(model_dir, fn_name)
self.model: DecisionTreeClassifier = load_pkl(fn_name)
class Dumb(Classifier):
def fit(self, data):
pass
def predict(self, features):
if random.random() >= 0.5:
return 1
return 0
def save_model(self, fn_name):
pass
def load_model(self, fn_name):
pass
if __name__ == '__main__':
from sklearn.datasets import load_iris
x, y = load_iris(return_X_y=True)
model = DT()
model.fit((x, y))
model.save_model("test.pkl")
model.load_model("test.pkl")
from socket import *
from threading import Thread
from random import choice
import json
import time
BUFFER_SIZE = 8192
# 随机发送数据测试的库:
data_list = [
[[221.0, 1350.0, 640.0, 376.26798960315506, 543.0, 21257877.349853516,
4793407917.022705, 1263437211.5135193, 2039103758.0566826, 119541525.84075928],["1"]],
[[171.0, 1460.0, 888.4, 514.1558518581695, 853.0, 11920.928955078125,
17442424058.914185, 4385495722.293854, 7538530505.708111, 49773454.666137695],["2"]],
[[498.0, 1460.0, 1088.8, 455.13356281425786, 1460.0, 36954.87976074219,
43512821.197509766, 11164724.826812744, 18679961.749486398, 554561.6149902344],["3"]],
[[498.0, 1460.0, 1088.8, 455.13356281425786, 1460.0, 30994.415283203125,
44764995.57495117, 16246497.631072998, 18385686.144529935, 10095000.267028809],["4"]],
[[566.0, 1460.0, 1281.2, 357.59999999999997, 1460.0, 22172.927856445312,
21413803.100585938, 5526959.8960876465, 9175244.715715846, 335931.77795410156],["5"]],
[[69.0, 629.0, 246.6, 205.1522361564699, 172.0, 190973.28186035156,
457492113.1134033, 219977498.0545044, 220039359.48775682, 211113452.91137695],["6"]],
[[69.0, 278.0, 137.8, 78.91108920804477, 85.0, 144958.49609375,
462785959.2437744, 210317969.3222046, 211827960.3342278, 189170479.7744751],["7"]],
[[302.0, 1460.0, 807.6, 537.1113850962387, 498.0, 12874.603271484375,
205285072.32666016, 56649982.92922974, 86251862.02980827, 10650992.393493652],["8"]],
[[266.0, 1460.0, 892.8, 490.89730086852177, 780.0, 24080.276489257812,
301223039.6270752, 76725006.10351562, 129634012.70485088, 2826452.2552490234],["9"]],
[[514.0, 1460.0, 1191.6, 371.89762032043177, 1460.0, 10013.580322265625,
890016.5557861328, 236511.23046875, 377434.79666208074, 23007.39288330078],["10"]],
"hhh", "exit", "send", "send"]
class TcpClient(object):
"""Tcp客户端"""
def __init__(self, IP="127.0.0.1", Port=5002):
"""初始化对象"""
self.code_mode = "utf-8" #收发数据编码/解码格式
self.IP = IP
self.Port = Port
self.my_socket = socket(AF_INET, SOCK_STREAM) #创建socket
def run(self):
"""启动"""
self.my_socket.connect((self.IP, self.Port)) #连接服务器
tr = Thread(target=self.recv_data) #创建线程收数据
ts = Thread(target=self.send_data) #创建线程发数据
tr.start() #开启线程
ts.start()
def recv_data(self):
"""收数据"""
while True:
data = self.my_socket.recv(BUFFER_SIZE).decode(self.code_mode)
if data:
if data == "close connecting":
print("已下线")
break
else:
print("{}".format(data))
else:
break
self.my_socket.close()
def send_data(self):
"""发数据"""
while True:
# 接收数据
data = choice(data_list)
if type(data) == list:
print("已发送五元组{}的特征".format(data[1][0]))
else:
print("发送命令: ", data)
# 两s发送一次 测试
time.sleep(2)
# 判断data的类型 如果是列表用 json传输 字符串就直接传输
if type(data) == list:
send_dat = json.dumps(data)
self.my_socket.sendall(send_dat.encode(self.code_mode))
elif type(data) == str:
self.my_socket.sendall(data.encode(self.code_mode))
else:
continue
# 如果是退出 直接推出程序
if data == "exit":
break
def main():
# 服务器IP和端口
ip = "127.0.0.1"
port = 12345
my_socket = TcpClient(ip, port)
my_socket.run()
if __name__ == "__main__":
main()
\ No newline at end of file
from socket import *
from threading import Thread
from pool import *
import json
import struct
class TcpServer(object):
"""Tcp服务器"""
def __init__(self, Port):
"""初始化对象"""
self.code_mode = "utf-8" #收发数据编码/解码格式
self.server_socket = socket(AF_INET, SOCK_STREAM) #创建socket
self.server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, True) #设置端口复用
self.server_socket.bind((SEVER_HOST, Port)) #绑定IP和Port
self.server_socket.listen(100) #设置为被动socket
print("服务器正在监听...")
def run(self):
"""运行"""
while True:
# 判断程序是否结束
"""
mutex.acquire()
if len(STOP_CLASSIFY) == 1:
print("程序结束")
mutex.release()
break
mutex.release()
"""
client_socket, client_addr = self.server_socket.accept() #等待客户端连接
print("{} 已上线".format(client_addr))
#创建线程为客户端服务
tr = Thread(target=self.recv_data, args=(client_socket, client_addr))
# if !tr.is_alive():
tr.start() #开启线程
self.server_socket.close()
def recv_data(self, client_socket, client_addr):
"""收发数据"""
while True:
# 判断程序是否结束
"""
mutex.acquire()
if len(STOP_CLASSIFY) == 1:
print("程序结束")
mutex.release()
break
mutex.release()
"""
recv = client_socket.recv(77)
# a = []
data = []
if recv:
# print('服务端收到客户端发来的消息:%s' % (recv))
a = struct.unpack('3i1f1i3l1d1l13B', recv)
#print(a)
data = [[], []]
idStr = ""
mystr = a[10:]
for i in range(10):
data[0].append(a[i]+0.0)
for i in range(8):
if i % 4 == 3:
idStr += str(mystr[i])+" "
else:
idStr += str(mystr[i]) + "."
idStr += str(int(mystr[8])*256+int(mystr[9])) + " "
idStr += str(int(mystr[10])*256+int(mystr[11])) + " "
if int(mystr[12]) == 6:
idStr += "TCP"
else:
idStr += "UDP"
data[1].append(idStr)
# data = client_socket.recv(BUFFER_SIZE).decode(self.code_mode)
# if len(data) > 8:
# data = json.loads(data)
if data:
# 关闭连接的客户端的线程
if data == "exit":
# 发送关闭客户端的命令
client_socket.send("close connecting".encode(self.code_mode))
print("{} 已下线".format(client_addr))
break
# 如果接收的命令是close 修改标志位 停止程序
elif data == "close":
mutex.acquire()
close_program()
mutex.release()
break
# 如果发送的命令是 send 则将分类结果发送过来
elif data == "send":
mutex.acquire()
if CLASSIFY_RESULT:
for res in CLASSIFY_RESULT:
client_socket.send(str(res).encode(self.code_mode))
CLASSIFY_RESULT.clear()
else:
client_socket.send("暂无分类结果".encode(self.code_mode))
mutex.release()
else:
# 将数据保存在PREDECT_FLOWS中 加锁
mutex.acquire()
if data not in PREDECT_FLOWS:
PREDECT_FLOWS.append(data)
mutex.release()
print("{}:发送特征的五元组为{}".format(client_addr, data[1]))
print("缓冲池中有: ", len(PREDECT_FLOWS), " 个待分类的特征,请分类")
# client_socket.send(data.encode(self.code_mode))
else:
#客户端断开连接
print("{} 已下线".format(client_addr))
break
client_socket.close()
def boot_server():
# print("\033c", end="") #清屏
# port = int(input("请输入要绑定的Port:"))
my_server = TcpServer(SEVER_PORT)
my_server.run()
if __name__ == "__main__":
boot_server()
from socket import *
from threading import Thread
from random import choice
import json
import time
BUFFER_SIZE = 8192
# 随机发送数据测试的库:
data_list = [
[[221.0, 1350.0, 640.0, 376.26798960315506, 543.0, 21257877.349853516,
4793407917.022705, 1263437211.5135193, 2039103758.0566826, 119541525.84075928],["1"]],
[[171.0, 1460.0, 888.4, 514.1558518581695, 853.0, 11920.928955078125,
17442424058.914185, 4385495722.293854, 7538530505.708111, 49773454.666137695],["2"]],
[[498.0, 1460.0, 1088.8, 455.13356281425786, 1460.0, 36954.87976074219,
43512821.197509766, 11164724.826812744, 18679961.749486398, 554561.6149902344],["3"]],
[[498.0, 1460.0, 1088.8, 455.13356281425786, 1460.0, 30994.415283203125,
44764995.57495117, 16246497.631072998, 18385686.144529935, 10095000.267028809],["4"]],
[[566.0, 1460.0, 1281.2, 357.59999999999997, 1460.0, 22172.927856445312,
21413803.100585938, 5526959.8960876465, 9175244.715715846, 335931.77795410156],["5"]],
[[69.0, 629.0, 246.6, 205.1522361564699, 172.0, 190973.28186035156,
457492113.1134033, 219977498.0545044, 220039359.48775682, 211113452.91137695],["6"]],
[[69.0, 278.0, 137.8, 78.91108920804477, 85.0, 144958.49609375,
462785959.2437744, 210317969.3222046, 211827960.3342278, 189170479.7744751],["7"]],
[[302.0, 1460.0, 807.6, 537.1113850962387, 498.0, 12874.603271484375,
205285072.32666016, 56649982.92922974, 86251862.02980827, 10650992.393493652],["8"]],
[[266.0, 1460.0, 892.8, 490.89730086852177, 780.0, 24080.276489257812,
301223039.6270752, 76725006.10351562, 129634012.70485088, 2826452.2552490234],["9"]],
[[514.0, 1460.0, 1191.6, 371.89762032043177, 1460.0, 10013.580322265625,
890016.5557861328, 236511.23046875, 377434.79666208074, 23007.39288330078],["10"]],
"hhh", "exit", "send", "send"]
class TcpClient(object):
"""Tcp客户端"""
def __init__(self, IP="127.0.0.1", Port=5002):
"""初始化对象"""
self.code_mode = "utf-8" #收发数据编码/解码格式
self.IP = IP
self.Port = Port
self.my_socket = socket(AF_INET, SOCK_STREAM) #创建socket
def run(self):
"""启动"""
self.my_socket.connect((self.IP, self.Port)) #连接服务器
tr = Thread(target=self.recv_data) #创建线程收数据
ts = Thread(target=self.send_data) #创建线程发数据
tr.start() #开启线程
ts.start()
def recv_data(self):
"""收数据"""
while True:
data = self.my_socket.recv(BUFFER_SIZE).decode(self.code_mode)
if data:
if data == "close connecting":
print("已下线")
break
else:
print("{}".format(data))
else:
break
self.my_socket.close()
def send_data(self):
"""发数据"""
while True:
# 接收数据
data = choice(data_list)
if type(data) == list:
print("已发送五元组{}的特征".format(data[1][0]))
else:
print("发送命令: ", data)
# 两s发送一次 测试
time.sleep(2)
# 判断data的类型 如果是列表用 json传输 字符串就直接传输
if type(data) == list:
send_dat = json.dumps(data)
self.my_socket.sendall(send_dat.encode(self.code_mode))
elif type(data) == str:
self.my_socket.sendall(data.encode(self.code_mode))
else:
continue
# 如果是退出 直接推出程序
if data == "exit":
break
def main():
# 服务器IP和端口
ip = "127.0.0.1"
port = 12345
my_socket = TcpClient(ip, port)
my_socket.run()
if __name__ == "__main__":
main()
\ No newline at end of file
from pathlib import Path
import os.path
def get_prj_root():
return os.path.abspath(os.curdir)
# return Path(__file__).parent
if __name__ == '__main__':
print(get_prj_root())
import threading
"""
该文件定义使用的常量 和 进程锁
"""
# 设置服务器的IP和端口
SEVER_HOST = "127.0.0.1" # 发送到windows测试
SEVER_PORT = 12345
# 缓冲区大小
BUFFER_SIZE = 8192
# 共享的全局变量 保存解析的特征 用于客户端发送和服务器端读取的缓冲区
PREDECT_FLOWS = []
# 共享的全局变量 保存分类的结果
CLASSIFY_RESULT = []
# 是否停止分类的标志位 长度为1退出程序
STOP_CLASSIFY = []
# 定义修改全局变量的函数
def close_program():
global STOP_CLASSIFY
STOP_CLASSIFY.append(1)
# 创建全局的进程锁
mutex = threading.Lock()
# 测试使用的数据
test = [[221.0, 1350.0, 640.0, 376.26798960315506, 543.0,
21257877.349853516, 4793407917.022705, 1263437211.5135193,
2039103758.0566826, 119541525.84075928],["sip sport dip dport protocol"]]
"""
预测函数:随机森林
每次运行前,检查:
四个需要修改的地方,命名是否正确
最后的运行模式是否正确
"""
from common_utils import * # 修改了
from argparse import ArgumentParser
from collections import namedtuple
from typing import List, Dict
from datetime import datetime
from path_utils import get_prj_root
import numpy as np
# from sklearn.externals import joblib
import joblib
from sklearn.ensemble import RandomForestClassifier # 训练模型
random.seed(datetime.now())
model_dir = os.path.join(get_prj_root(), "classify/models") # 修改:模型model文件夹路径
predict_model_pkl = os.path.join(model_dir, "dt2.pkl") # 修改:模型的版本,只用修改此处就行
Instance = namedtuple("Instance", ["features", "label"]) # 实例
dirs = {
"video": "./tmp/dt/video",
"iot": "./tmp/dt/iot",
"voip": "./tmp/dt/voip",
"AR": "./tmp/dt/AR",
}
instances_dir = os.path.join(get_prj_root(), "classify/instances") # 修改:instances路径
def train_and_predict():
iot = load_pkl(os.path.join(instances_dir, "iot.pkl")) # 不同实例的pkl是不同特征的
videos = load_pkl(os.path.join(instances_dir, "video.pkl"))
voip = load_pkl(os.path.join(instances_dir, "voip.pkl"))
AR = load_pkl(os.path.join(instances_dir, "AR.pkl"))
for i in videos:
assert i.label == 0
for i in iot:
assert i.label == 1
for i in voip:
assert i.label == 2
for i in AR:
assert i.label == 3
# print(videos)
debug("# iot instances {}".format(len(iot)))
debug("# video instances {}".format(len(videos)))
debug("# VOIP instances {}".format(len(voip)))
debug("# AR instances {}".format(len(AR)))
random.shuffle(voip) # 打乱排序
random.shuffle(iot)
random.shuffle(videos)
random.shuffle(AR)
n_video_train = int(len(videos) * 0.7)
n_video_test = len(videos) - n_video_train
video_train = videos[:n_video_train]
video_test = videos[n_video_train:]
iot_train = iot[:n_video_train]
iot_test = iot[len(iot) - len(video_test):]
voip_train = voip[:n_video_train]
voip_test = voip[len(voip) - len(video_test):]
AR_train = AR[:n_video_train]
AR_test = AR[len(AR) - len(video_test):]
info("#video train {}".format(len(video_train)))
info("#iot train {}".format(len(iot_train)))
info("#voip train {}".format(len(voip_train)))
info("#AR train {}".format(len(AR_train)))
train = []
train.extend(iot_train)
train.extend(video_train)
train.extend(voip_train)
train.extend(AR_train)
random.shuffle(train)
train_x = [x.features for x in train]
train_y = [x.label for x in train]
# test 1:1
test = []
info("#video test {}".format(len(video_test)))
info("#iot test {}".format(len(iot_test)))
info("#voip test {}".format(len(voip_test)))
info("#AR test {}".format(len(AR_test)))
test.extend(video_test)
test.extend(iot_test)
test.extend(voip_test)
test.extend(AR_test)
random.shuffle(test)
test_x = [t.features for t in test]
test_y = [t.label for t in test]
# 训练以及预测
predict_model = RandomForestClassifier(oob_score=True) # 引入训练方法
predict_model.fit(train_x, train_y) # 队训练数据进行拟合
predicts = predict_model.predict(test_x)
"""
dt = DT()
dt.fit((train_x, train_y))
predicts = dt.predict(test_x)
"""
"""
# 打印预测的结果
print(predicts)
print("-------------------------------")
"""
# 保存模型
fn_name = os.path.join(model_dir, predict_model_pkl)
joblib.dump(predict_model, predict_model_pkl)
# 评价模型
count = 0
for idx in range(len(test_x)):
if int(predicts[idx]) == int(test_y[idx]):
count += 1
# print(count / len(test_x))
return count / len(test_x)
# save_pkl(predict_model_pkl, knn.model)
# knn.save_model(dt_model_pkl) #储存模型
if __name__ == '__main__':
n = 10
s = 0
predict_sum = 0
for i in range(n):
s = train_and_predict()
predict_sum = predict_sum + s
print(predict_sum / n)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-5-21 下午3:44
# @Author : LK
# @File : 进度条.py
# @Software: PyCharm
import sys
import time
def process_bar(precent, width=50):
use_num = int(precent*width)
space_num = int(width-use_num)
precent = precent*100
# 第一个和最后一个一样梯形显示, 中间两个正确,但是在python2中报错
#
# print('[%s%s]%d%%'%(use_num*'#', space_num*' ',precent))
# print('[%s%s]%d%%'%(use_num*'#', space_num*' ',precent), end='\r')
print('[%s%s]%d%%'%(use_num*'#', space_num*' ',precent),file=sys.stdout,flush=True, end='\r')
# print('[%s%s]%d%%'%(use_num*'#', space_num*' ',precent),file=sys.stdout,flush=True)
# for i in range(21):
# precent = i/20
# process_bar(precent)
# time.sleep(0.2)
"""
10个特征的(前五个包大小,后五个包间隔):最小值,最大值,平均值,方差,中位数
每次运行前,检查:
四个需要修改的地方,命名是否正确
最后的运行模式是否正确
这个文件用于把pcap解析的文件生成特征和标签的形式 并且5个包一组
"""
from common_utils import * # 修改了
from argparse import ArgumentParser
from collections import namedtuple
from typing import List, Dict
from path_utils import get_prj_root
from model import DT # 修改了
from datetime import datetime
from path_utils import get_prj_root
import numpy as np
from sklearn.decomposition import PCA
import time
# start counting time
start = time.time()
random.seed(datetime.now())
# get the path of the models
model_dir = os.path.join(get_prj_root(), "classify/models") # 修改:模型models路径
dt_model_pkl = os.path.join(model_dir, "dt2_9.pkl") # 修改:模型的版本,只用修改此处就行
Instance = namedtuple("Instance", ["features", "label"]) # 实例
win_size = 5 # 窗口大小
limit = 100000
# the path
# iot-物联网流 video-视频流 voip-音频流 AR-AR流用的高清视频流代替
dirs = {
"video": "./tmp/dt/video",
"iot": "./tmp/dt/iot",
"voip": "./tmp/dt/voip",
"AR": "./tmp/dt/AR",
}
instances_dir = os.path.join(get_prj_root(), "classify/instances") # 修改:instances路径
# 获取特征
def get_median(data): # 产生中位数
data.sort()
half = len(data) // 2
return (data[half] + data[~half]) / 2
def gen_single_instance(dir_name, flow, flow_type):
# debug("generate {}".format(flow["file"]))
def extract_features(raw_features: List[float]): # 修改特征
extracted_features = []
raw_features = [r for r in raw_features if int(r) >= 0]
extracted_features.append(min(raw_features))
extracted_features.append(max(raw_features))
extracted_features.append(sum(raw_features) / len(raw_features))
extracted_features.append(np.std(raw_features)) # 标准差
extracted_features.append(get_median(raw_features)) # 中位数
return extracted_features
features = []
idts = []
ps = []
idt_file = os.path.join(dir_name, flow["idt"]) # 包大小
ps_file = os.path.join(dir_name, flow["ps"]) # 包间隔
with open(idt_file, 'r') as fp:
lines = fp.readlines()
fp.close()
lines = [l.strip() for l in lines] # .strip()用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列。
lines = [l for l in lines if len(l) > -1]
if len(lines) > win_size:
lines = lines[:win_size]
for l in lines:
idts.append(float(l))
with open(ps_file, "r") as fp:
lines = fp.readlines()
fp.close()
lines = [l.strip() for l in lines]
lines = [l for l in lines if len(l) > 0]
if len(lines) > win_size:
lines = lines[:win_size]
for l in lines:
ps.append(float(l))
# 有很奇怪的现象
ps = [p for p in ps if p > 0]
if len(ps) == 0:
print(flow["ps"])
return None
idts = [i for i in idts if i >= 0]
if len(idts) == 0:
return None
features.extend(extract_features(ps)) # 包间隔的数理统计
features.extend(extract_features(idts)) # 包大小的数理统计
if flow_type == "video":
label = 0
elif flow_type == "iot":
label = 1
elif flow_type == "voip":
label = 2
elif flow_type == "AR":
label = 3
else:
err("Unsupported flow type")
raise Exception("Unsupported flow type")
return Instance(features=features, label=label)
def generate():
instances_dir = os.path.join(get_prj_root(), "classify/instances") # 修改:instances_dir实例的路径
for flow_type, dirname in dirs.items():
stats_fn = os.path.join(dirname, "statistics.json") # statistics.json流量统计的文件
debug(stats_fn)
statistics = load_json(os.path.join(dirname, "statistics.json"))
debug("#flows {}".format(statistics["count"]))
flows: List = statistics["flows"]
sorted(flows, key=lambda f: -f["num_pkt"])
if len(flows) > limit:
flows = flows[:limit]
instances = [gen_single_instance(dirname, f, flow_type) for f in flows]
instances = [i for i in instances if i is not None]
debug("#{} instances {}".format(flow_type, len(instances)))
# print(len(instances))
save_pkl(os.path.join(instances_dir, "{}.pkl".format(flow_type)), instances) # 保存Python内存数据到文件
if __name__ == '__main__':
parser = ArgumentParser()
print("running mode\n"
"1. generate instances\n"
"2. train dt\n")
parser.add_argument("--mode", type=int, default=1) # default为模式修改
args = parser.parse_args()
mode = int(args.mode)
if mode == 1:
generate()
end = time.time()
print("程序运行时间:%.2f秒" % (end - start))
# vim swp
*.swp
# log and exec file
cmake_targets/log/
cmake_targets/*/build/
cmake_targets/ran_build/
cmake_targets/nas_sim_tools/build/
log/
lte_build_oai/
targets/bin/
# vscode
.vscode
# Tags for vim/global
GPATH
GRTAGS
GTAGS
tags
# Default ignored files
/shelf/
/workspace.xml
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
# Editor-based HTTP Client requests
/httpRequests/
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="E722" />
<option value="W191" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="N802" />
<option value="N803" />
<option value="N806" />
</list>
</option>
</inspection_tool>
</profile>
</component>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="JavaScriptSettings">
<option name="languageLevel" value="ES6" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/openairinterface5g.iml" filepath="$PROJECT_DIR$/.idea/openairinterface5g.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
<component name="TemplatesService">
<option name="TEMPLATE_CONFIGURATION" value="Jinja2" />
<option name="TEMPLATE_FOLDERS">
<list>
<option value="$MODULE_DIR$/cmake_targets/autotests/templates" />
</list>
</option>
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
# RELEASE NOTES: #
## [v1.2.1](https://gitlab.eurecom.fr/oai/openairinterface5g/-/tags/v1.2.1) -> February 2020. ##
* Bug fix for mutex lock for wake-up signal
## [v1.2.0](https://gitlab.eurecom.fr/oai/openairinterface5g/-/tags/v1.2.0) -> January 2020. ##
This version adds the following implemented features:
* LTE-M : eNB support for Mode A repetitions
- PUSCH CE - 8 Repetitions
* Improved CDRX implementation for monolithic eNB
* Experimental eMBMS support (now also on eNB side)
* Experimental MCE - Multicast Coordination Entity
* Bug fixes
This version also has an improved code quality:
* Better Test Coverage in Continuous Integration:
- Initial framework to do long-run testing at R2LAB
## [v1.1.1](https://gitlab.eurecom.fr/oai/openairinterface5g/-/tags/v1.1.1) -> November 2019. ##
- Bug fix in the TDD Fair Round-Robin scheduler
## [v1.1.0](https://gitlab.eurecom.fr/oai/openairinterface5g/-/tags/v1.1.0) -> July 2019. ##
This version adds the following implemented features:
* Experimental support of LTE-M
- Single LTE-M UE attachment, legacy-LTE UE attachment is disabled
* X2 interface and handover (also X2-U interface)
- In FDD and TDD
* CU/DU split (F1 interface)
- Tested only in FDD
* CDRX
- Tested only in FDD
* Experimental eMBMS support (only on UE side)
* Experimental multi-RRU support
- Tested only in TDD
This version has an improved code quality:
* Simplification of the Build System
- A single build includes all full-stack simulators, S1/noS1 modes and one HW platform (such as USRP, BladeRF, ...)
* TUN interface is now used as default for the data plane
- for UE, eNB-noS1 and UE-noS1
* Code Cleanup
* Better Static Code Analysis:
- Limited number of errors in cppcheck
- Important Decrease on high Impact errors in CoverityScan
* Better Test Coverage in Continuous Integration:
- TM2, CDRX, IF4.5, F1
- OAI UE is tested in S1 and noS1 modes with USRP board
- Multi-RRU TDD mode
- X2 Handover in FDD mode
## [v1.0.3](https://gitlab.eurecom.fr/oai/openairinterface5g/-/tags/v1.0.3) -> June 2019. ##
- Bug fix for LimeSuite v19.04.0 API
## [v1.0.2](https://gitlab.eurecom.fr/oai/openairinterface5g/-/tags/v1.0.2) -> February 2019. ##
- Full OAI support for 3.13.1 UHD
## [v1.0.1](https://gitlab.eurecom.fr/oai/openairinterface5g/-/tags/v1.0.1) -> February 2019. ##
- Bug fix for the UE L1 simulator.
## [v1.0.0](https://gitlab.eurecom.fr/oai/openairinterface5g/-/tags/v1.0.0) -> January 2019. ##
This version first implements the architectural split described in the following picture.
![Block Diagram](./doc/images/oai_lte_enb_func_split_arch.png)
* Only FAPI, nFAPI and IF4.5 interfaces are implemented.
* Repository tree structure prepares future integrations of features such as LTE-M, nbIOT or 5G-NR.
* Preliminary X2 support has been implemented.
* S1-flex has been introduced.
* New tools: config library, telnet server, ...
* A lot of bugfixes and a proper automated Continuous Integration process validates contributions.
Old Releases:
* v0.6.1 -> Mostly bugfixes. This is the last version without NFAPI.
* v0.6 -> RRH functionality, UE greatly improved, better TDD support, a lot of bugs fixed.
- WARNING: oaisim in PHY abstraction mode does not work, you need to use v0.5.2 for that.
* v0.5.2 -> Last version with old code for oaisim (abstraction mode works)
* v0.5.1 -> Merge of bugfix-137-uplink-fixes. It includes stablity fixes for eNB
* v0.5 -> Merge of enhancement-10-harmony-lts. It includes fixes for Ubuntu 16.04 support
* v0.4 -> Merge of feature-131-new-license. It closes issue#131 and changes the license to OAI Public License V1.0
* v0.3 -> Last stable commit on develop branch before the merge of feature-131-new-license. This is the last commit with GPL License
* v0.2 -> Merge of enhancement-10-harmony to include NGFI RRH + New Interface for RF/BBU
* v0.1 -> Last stable commit on develop branch before enhancement-10-harmony
# Contributing to OpenAirInterface #
We want to make contributing to this project as easy and transparent as possible.
1. Sign and return a Contributor License Agreement to OAI team.
2. We recommend that you provide us with a professional or student email address
2. Register on [Eurecom GitLab Server](https://gitlab.eurecom.fr/users/sign_in)
3. Provide the OAI team with the **username** of this account to (mailto:contact@openairinterface.org) ; we will give you the developer rights on this repository.
4. The policies are described in these wiki pages: [OAI Policies](https://gitlab.eurecom.fr/oai/openairinterface5g/wikis/oai-policies-home)
- PLEASE DO NOT FORK the OAI repository on your own Eurecom GitLab account. It just eats up space on our servers.
- You can fork onto another hosting system. But we will NOT accept a merge request from a forked repository.
* This decision was made for the license reasons.
* The Continuous Integration will reject your merge request.
- All merge requests SHALL have `develop` branch as target branch.
## Coding Styles ##
There are described [here](https://gitlab.eurecom.fr/oai/openairinterface5g/wikis/guidelines/guidelines-home)
## License ##
By contributing to OpenAirInterface, you agree that your contributions will be licensed under the LICENSE file in the root directory of this source tree.
This diff is collapsed.
The source code of openairinterface5g is distributed under **OAI Public License V1.1**.
For more details of the license, refer to [LICENSE](LICENSE) file in the same directory.
However, the source code also contains third party software that is acknowledged here for reference.
## Credits for LFDS user space source code located in folder openair2/UTILS/LFDS/ ##
See on [liblfds website](https://liblfds.org/) the license section.
<pre>
"There is no license. You are free to use this software in any way, for any purpose. Go forth and create wealth!
If however for legal reasons a licence is required, the license of your choice will be granted."
</pre>
## Credits for source code common/utils/collection/queue.h: ##
The Regents of the University of California: BSD 3-Clause Licence.
## Credits for source code common/utils/collection/tree.h: ##
Niels Provos <provos@citi.umich.edu>: BSD 2-Clause Licence.
## Credits for source code openair3/GTPV1-U/nw-gtpv1u: ##
Amit Chawre <http://www.amitchawre.net/contact.html>: BSD 2-Clause Licence.
# OpenAirInterface License #
OpenAirInterface is under OpenAirInterface Software Alliance license.
* [OAI License Model](http://www.openairinterface.org/?page_id=101)
* [OAI License v1.1 on our website](http://www.openairinterface.org/?page_id=698)
It is distributed under **OAI Public License V1.1**.
The license information is distributed under [LICENSE](LICENSE) file in the same directory.
Please see [NOTICE](NOTICE.md) file for third party software that is included in the sources.
# Where to Start #
* [The implemented features](./doc/FEATURE_SET.md)
* [How to build](./doc/BUILD.md)
* [How to run the modems](./doc/RUNMODEM.md)
# RAN repository structure #
The OpenAirInterface (OAI) software is composed of the following parts:
<pre>
openairinterface5g
├── ci-scripts : Meta-scripts used by the OSA CI process. Contains also configuration files used day-to-day by CI.
├── cmake_targets : Build utilities to compile (simulation, emulation and real-time platforms), and generated build files.
├── common : Some common OAI utilities, other tools can be found at openair2/UTILS.
├── doc : Contains an up-to-date feature set list and starting tutorials.
├── executables : Top-level executable source files.
├── LICENSE : License file.
├── maketags : Script to generate emacs tags.
├── nfapi : Contains the NFAPI code. A local Readme file provides more details.
├── openair1 : 3GPP LTE Rel-10/12 PHY layer / 3GPP NR Rel-15 layer. A local Readme file provides more details.
│   ├── PHY
│   ├── SCHED
│   ├── SCHED_NBIOT
│   ├── SCHED_NR
│   ├── SCHED_NR_UE
│   ├── SCHED_UE
│   └── SIMULATION : PHY RF simulation.
├── openair2 : 3GPP LTE Rel-10 RLC/MAC/PDCP/RRC/X2AP + LTE Rel-14 M2AP implementation. Also 3GPP NR Rel-15 RLC/MAC/PDCP/RRC/X2AP.
│   ├── COMMON
│   ├── DOCS
│   ├── ENB_APP
│   ├── F1AP
│   ├── GNB_APP
│   ├── LAYER2/RLC/ : with the following subdirectories: UM_v9.3.0, TM_v9.3.0, and AM_v9.3.0.
│   ├── LAYER2/PDCP/PDCP_v10.1.0
│   ├── M2AP
│   ├── MCE_APP
│   ├── NETWORK_DRIVER
│   ├── NR_PHY_INTERFACE
│   ├── NR_UE_PHY_INTERFACE
│   ├── PHY_INTERFACE
│   ├── RRC
│   ├── UTIL
│   └── X2AP
├── openair3 : 3GPP LTE Rel10 for S1AP, NAS GTPV1-U for both ENB and UE.
│   ├── COMMON
│   ├── DOCS
│   ├── GTPV1-U
│   ├── M3AP
│   ├── MME_APP
│   ├── NAS
│   ├── S1AP
│   ├── SCTP
│   ├── SECU
│   ├── TEST
│   ├── UDP
│   └── UTILS
└── targets : Top-level wrappers for unitary simulation for PHY channels, system-level emulation (eNB-UE with and without S1), and realtime eNB and UE and RRH GW.
</pre>
r*.raw
enb_*.log
ue_*.log
ping_*.*
iperf_*.*
phones_list.txt
modules_list.txt
test_results*.html
pMain*
__pycache__
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
/inet.*brd/{print "interfaceToUse="$NF"done"}
BEGIN{lineIdx=0;captureUEDesc=0}
{
if ($0 ~/UE0/) {
captureUEDesc = 1
}
if (captureUEDesc == 1) {
captureLine[lineIdx] = $0
lineIdx = lineIdx + 1
}
print $0
}
END {
for (ueIdx = 1; ueIdx < num_ues; ueIdx++) {
print ""
for (k = 0; k < lineIdx; k++) {
if (captureLine[k] ~/UE0/) {
mLine = captureLine[k]
gsub("UE0", "UE"ueIdx, mLine)
print mLine
} else {
if (captureLine[k] ~/MSIN=/) {
mLine = captureLine[k]
MSIN=sprintf("%08d", 1111+int(ueIdx))
gsub("00001111", MSIN, mLine)
print mLine
} else {
print captureLine[k]
}
}
}
}
}
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment