Commit 9fa556ff authored by gabrielC's avatar gabrielC

Merge branch 'Enhancement-142-OAI_UE_autotest_framework' into develop_integration_w50

parents ca078ff5 af68b397
......@@ -235,6 +235,7 @@ add_boolean_option(DEBUG_OMG False "???")
add_boolean_option(XFORMS False "This adds the possibility to see the signal oscilloscope")
add_boolean_option(PRINT_STATS False "This adds the possibility to see the status")
add_boolean_option(T_TRACER False "Activate the T tracer, a debugging/monitoring framework" )
add_boolean_option(UE_AUTOTEST_TRACE False "Activate UE autotest specific logs")
add_boolean_option(DEBUG_CONSOLE False "makes debugging easier, disables stdout/stderr buffering")
......
......@@ -75,7 +75,7 @@ class openair(core):
return (stdout, stderr)
def connect(self, username, password, prompt='PEXPECT_OAI'):
max_retries=100
max_retries=10
i=0
while i <= max_retries:
self.prompt1 = prompt
......@@ -97,9 +97,9 @@ class openair(core):
# need to look for twice the string of the prompt
self.oai.prompt()
self.oai.prompt()
self.oai.sendline('uptime')
self.oai.prompt()
print self.oai.before
# self.oai.sendline('uptime')
# self.oai.prompt()
# print self.oai.before
break
except Exception, e:
error=''
......@@ -171,7 +171,7 @@ class openair(core):
sys.exit(1)
def disconnect(self):
print 'disconnecting the ssh connection to ' + self.address + '\n'
# print 'disconnecting the ssh connection to ' + self.address + '\n'
self.oai.send('exit')
# self.cancel()
......
diff --git a/cmake_targets/CMakeLists.txt b/cmake_targets/CMakeLists.txt
index 07d92a1..b6a02d1 100644
--- a/cmake_targets/CMakeLists.txt
+++ b/cmake_targets/CMakeLists.txt
@@ -235,6 +235,7 @@ add_boolean_option(DEBUG_OMG False "???")
add_boolean_option(XFORMS False "This adds the possibility to see the signal oscilloscope")
add_boolean_option(PRINT_STATS False "This adds the possibility to see the status")
add_boolean_option(T_TRACER False "Activate the T tracer, a debugging/monitoring framework" )
+add_boolean_option(UE_AUTOTEST_TRACE False "Activate UE autotest specific logs")
add_boolean_option(DEBUG_CONSOLE False "makes debugging easier, disables stdout/stderr buffering")
diff --git a/openair1/PHY/LTE_TRANSPORT/initial_sync.c b/openair1/PHY/LTE_TRANSPORT/initial_sync.c
index 663978e..b77cb1b 100644
--- a/openair1/PHY/LTE_TRANSPORT/initial_sync.c
+++ b/openair1/PHY/LTE_TRANSPORT/initial_sync.c
@@ -476,6 +476,15 @@ int initial_sync(PHY_VARS_UE *ue, runmode_t mode)
//#endif
if (ue->UE_scan_carrier == 0) {
+
+ #if UE_AUTOTEST_TRACE
+ LOG_I(PHY,"[UE %d] AUTOTEST Cell Sync : frame = %d, rx_offset %d, freq_offset %d \n",
+ ue->Mod_id,
+ ue->proc.proc_rxtx[0].frame_rx,
+ ue->rx_offset,
+ ue->common_vars.freq_offset );
+ #endif
+
if (ue->mac_enabled==1) {
LOG_I(PHY,"[UE%d] Sending synch status to higher layers\n",ue->Mod_id);
//mac_resynch();
diff --git a/openair1/SCHED/phy_procedures_lte_ue.c b/openair1/SCHED/phy_procedures_lte_ue.c
index 5dc629b..353a049 100644
--- a/openair1/SCHED/phy_procedures_lte_ue.c
+++ b/openair1/SCHED/phy_procedures_lte_ue.c
@@ -3551,6 +3551,13 @@ int phy_procedures_UE_RX(PHY_VARS_UE *ue,UE_rxtx_proc_t *proc,uint8_t eNB_id,uin
LOG_D(PHY,"[UE %d] Calculating bitrate Frame %d: total_TBS = %d, total_TBS_last = %d, bitrate %f kbits\n",
ue->Mod_id,frame_rx,ue->total_TBS[eNB_id],
ue->total_TBS_last[eNB_id],(float) ue->bitrate[eNB_id]/1000.0);
+
+ #if UE_AUTOTEST_TRACE
+ if ((frame_rx % 100 == 0)) {
+ LOG_I(PHY,"[UE %d] AUTOTEST Metric : UE_DLSCH_BITRATE = %5.2f kbps (frame = %d) \n", ue->Mod_id, (float) ue->bitrate[eNB_id]/1000.0, frame_rx);
+ }
+ #endif
+
}
diff --git a/openair1/SCHED/phy_procedures_lte_ue.c b/openair1/SCHED/phy_procedures_lte_ue.c
index 5dc629b..4d31ad3 100644
--- a/openair1/SCHED/phy_procedures_lte_ue.c
+++ b/openair1/SCHED/phy_procedures_lte_ue.c
@@ -3347,6 +3347,22 @@ int phy_procedures_UE_RX(PHY_VARS_UE *ue,UE_rxtx_proc_t *proc,uint8_t eNB_id,uin
// first slot has been processed (FFTs + Channel Estimation, PCFICH/PHICH/PDCCH)
+ #if UE_AUTOTEST_TRACE
+ if ( (frame_rx % 10 == 0) && (subframe_rx == 0)) {
+ printf("AUTOTEST Metric : UE_FREQ_OFFSET = %d Hz (frame = %d) \n", ue->common_vars.freq_offset, frame_rx);
+ printf("AUTOTEST Metric : UE_RX_OFFSET = %d (frame = %d) \n", ue->rx_offset, frame_rx);
+
+ printf("AUTOTEST Metric : RRC Measurments RSRP[0] %.2f dBm/RE, RSSI %.2f dBm, RSRQ[0] %.2f dB, N0 %d dBm/RE, NF %.1f dB (frame = %d)\n",
+ 10*log10(ue->measurements.rsrp[0])-ue->rx_total_gain_dB,
+ 10*log10(ue->measurements.rssi)-ue->rx_total_gain_dB,
+ 10*log10(ue->measurements.rsrq[0]),
+ ue->measurements.n0_power_tot_dBm,
+ (double)ue->measurements.n0_power_tot_dBm+132.24,
+ frame_rx);
+ }
+ #endif
+
+
// do procedures for C-RNTI
if (ue->dlsch[eNB_id][0]->active == 1) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PDSCH_PROC, VCD_FUNCTION_IN);
diff --git a/cmake_targets/build_oai b/cmake_targets/build_oai
index 25e9a1c..0b04bb7 100755
--- a/cmake_targets/build_oai
+++ b/cmake_targets/build_oai
@@ -54,6 +54,7 @@ BUILD_DOXYGEN=0
T_TRACER="False"
DISABLE_HARDWARE_DEPENDENCY="False"
CMAKE_BUILD_TYPE=""
+UE_AUTOTEST_TRACE="False"
trap handle_ctrl_c INT
function print_help() {
@@ -130,6 +131,8 @@ Options
Enables the T tracer.
--disable-hardware-dependency
Disable HW dependency during installation
+--ue-autotest-trace
+ Enable specific traces for UE autotest framework
Usage (first build):
oaisim (eNB + UE): ./build_oai -I --oaisim -x --install-system-files
Eurecom EXMIMO + COTS UE : ./build_oai -I --eNB -x --install-system-files
@@ -285,6 +288,10 @@ function main() {
echo_info "Disabling hardware dependency for compiling software"
DISABLE_HARDWARE_DEPENDENCY="True"
shift 1;;
+ --ue-autotest-trace)
+ UE_AUTOTEST_TRACE="True"
+ echo_info "Enabling autotest specific trace for UE"
+ shift 1;;
-h | --help)
print_help
exit 1;;
@@ -457,6 +464,7 @@ function main() {
echo "set (DEADLINE_SCHEDULER \"${DEADLINE_SCHEDULER_FLAG_USER}\" )" >>$cmake_file
echo "set (CPU_AFFINITY \"${CPU_AFFINITY_FLAG_USER}\" )" >>$cmake_file
echo "set ( T_TRACER $T_TRACER )" >> $cmake_file
+ echo "set (UE_AUTOTEST_TRACE $UE_AUTOTEST_TRACE)" >> $cmake_file
echo 'include(${CMAKE_CURRENT_SOURCE_DIR}/../CMakeLists.txt)' >> $cmake_file
cd $DIR/$lte_build_dir/build
cmake ..
......@@ -20,10 +20,14 @@
# * contact@openairinterface.org
# */
# \author Rohit Gupta
# \author Rohit Gupta - Benoit ROBERT (benoit.robert@syrtem.com)
# \version 0.1
# @ingroup _test
# \Changelog
# 2016-11-18 :
# - Add progess bar during test execution update_progress()
import tempfile
import threading
import sys
......@@ -37,6 +41,8 @@ import math #from time import clock
import xml.etree.ElementTree as ET
import re
#from dict2xml import dict2xml as xmlify
from colorama import Fore, Back, Style
import numpy as np
......@@ -57,6 +63,36 @@ import ssh
from ssh import SSHSession
import argparse
# update_progress() : Displays or updates a console progress bar
## Accepts a float between 0 and 1. Any int will be converted to a float.
## A value under 0 represents a 'halt'.
## A value at 1 or bigger represents 100%
def update_progress(progress, prefix_string):
barLength = 20 # Modify this to change the length of the progress bar
status = ""
#print "progress = "+ str(progress)
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\r"+prefix_string+" [{0}] {1}% {2}".format( "="*block + " "*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
def exit_prog(exit_val):
print(Style.RESET_ALL),
sys.exit(exit_val)
# \brief write a string to a file
# \param filename name of file
# \param string string to write
......@@ -271,7 +307,7 @@ def update_config_file(oai, config_string, logdirRepo, python_script):
# \param logdir_local_base local directory
# \param operation operation to perform (get_all, put_all) transfers recursively for directories
def SSHSessionWrapper(machine, username, key_file, password, logdir_remote, logdir_local_base, operation):
max_tries = 100
max_tries = 10
i=0
while i <= max_tries:
i = i +1
......@@ -306,30 +342,50 @@ def SSHSessionWrapper(machine, username, key_file, password, logdir_remote, logd
# \param CleanUpAluLteBox program to terminate AlU Bell Labs LTE Box
# \param ExmimoRfStop String to stop EXMIMO card (specified in test_case_list.xml)
def cleanOldPrograms(oai, programList, CleanUpAluLteBox, ExmimoRfStop, logdir, logdirOAI5GRepo):
cmd = 'sudo -E killall -s INT -q -r ' + programList + ' ; sleep 5 ; sudo -E killall -9 -q -r ' + programList
cmd = 'killall -9 ' + programList
result = oai.send(cmd, True)
print "Killing old programs..." + result
# print "\t\t > "+cmd
# print "\t\t < "+result
# print "Killing old programs on ..." + result
programArray = programList.split()
programListJoin = '|'.join(programArray)
cmd = " ( date ;echo \"Starting cleaning old programs.. \" ; dmesg|tail ; echo \"Current disk space.. \" ; df -h )>& " + logdir + "/oai_test_setup_cleanup.log.`hostname` 2>&1 ; sync"
result=oai.send_recv(cmd)
# print "\t\t > "+cmd
# print "\t\t < "+result
cmd = cleanupOldProgramsScript + ' ' + '\''+programListJoin+'\''
#result = oai.send_recv(cmd)
#print result
result = oai.send_expect_false(cmd, 'Match found', False)
print "Looking for old programs..." + result
# print "\t\t > "+cmd
# print "\t\t < "+result
# print "Looking for old programs..." + result
res=oai.send_recv(CleanUpAluLteBox, True)
# print "\t\t > "+CleanUpAluLteBox
# print "\t\t < "+res
cmd= " echo \"Starting EXmimoRF Stop... \" >> " + logdir + "/oai_test_setup_cleanup.log.`hostname` 2>&1 ; sync ";
oai.send_recv(cmd)
result = oai.send_recv(cmd)
# print "\t\t > "+cmd
# print "\t\t < "+result
cmd = "( " + "cd " + logdirOAI5GRepo + " ; source oaienv ; " + ExmimoRfStop + " ) >> " + logdir + "/oai_test_setup_cleanup.log.`hostname` 2>&1 ; sync "
print "cleanoldprograms cmd = " + cmd
# print "cleanoldprograms cmd = " + cmd
res=oai.send_recv(cmd, False, timeout=600)
# print "\t\t > "+cmd
# print "\t\t < "+res
cmd= " echo \"Stopping EXmimoRF Stop... \" >> " + logdir + "/oai_test_setup_cleanup.log.`hostname` 2>&1 ; sync ";
oai.send_recv(cmd)
result = oai.send_recv(cmd)
# print "\t\t > "+cmd
# print "\t\t < "+result
#res = oai.send_recv(ExmimoRfStop, False)
cmd = " ( date ;echo \"Finished cleaning old programs.. \" ; dmesg | tail)>> $HOME/.oai_test_setup_cleanup.log.`hostname` 2>&1 ; sync"
res=oai.send_recv(cmd)
# print "\t\t > "+cmd
# print "\t\t < "+res
# \brief Class thread to launch a generic command on remote machine
# \param threadID number of thread (for book keeping)
......@@ -355,10 +411,10 @@ class oaiThread (threading.Thread):
try:
oai = openair('localdomain',self.machine)
oai.connect(self.username, self.password)
print "Starting " + self.threadname + " on machine " + self.machine
# print "Starting " + self.threadname + " on machine " + self.machine
result = oai.send_recv(self.cmd, self.sudo, self.timeout)
print "result = " + result
print "Exiting " + self.threadname
#print "result = " + result
#print "Exiting " + self.threadname
oai.disconnect()
except Exception, e:
error=''
......@@ -1269,6 +1325,8 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
if testcase_verdict != 'PASS': # if something went wrong to not run test cases
max_ntries=0
indent="\t\t"
runs_results = []
nb_runs = 0
nb_run_pass = 0
......@@ -1288,9 +1346,11 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
#
# RUN initialization
# ----------------------------------------------------
print (Fore.WHITE + indent + "> RUN_"+str(run).zfill(2)+" : " ),
sys.stdout.flush()
prefix_string = Fore.WHITE + indent + "> RUN_"+str(run).zfill(2)+" :"
# print (Fore.WHITE + indent + "> RUN_"+str(run).zfill(2)+" : " ),
# sys.stdout.flush()
run_start_time=datetime.datetime.now()
......@@ -1406,8 +1466,34 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
threads.append(thread_UE)
thread_eNB.start()
thread_UE.start()
run_work_inprogress_flag = True
progress_step = timeout_thread/20
progress_count = 0
while (run_work_inprogress_flag):
update_progress(float(progress_count)/float(timeout_thread),prefix_string)
time.sleep(progress_step)
progress_count += progress_step
# sys.stdout.write ('#')
# sys.stdout.flush()
run_work_inprogress_flag = False
for t in threads:
if t.isAlive():
run_work_inprogress_flag = True
update_progress(1,prefix_string)
for t in threads:
t.join()
t.join()
#
#
......@@ -1422,10 +1508,12 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
fname = logdir_local_run+ '/UE_exec' + '_' + str(run) + '_.log'
cell_synch_status = analyser.check_cell_synchro(fname)
print (Fore.WHITE + indent + "> Check Cell synchro :"),
if cell_synch_status == 'CELL_SYNCH':
print '!!!!!!!!!!!!!! Cell synchronized !!!!!!!!!!!'
print ( Fore.GREEN + cell_synch_status)
else :
print '!!!!!!!!!!!!!! Cell NOT NOT synchronized !!!!!!!!!!!'
print ( Fore.RED + cell_synch_status)
metric_checks_flag = 0
ue_seg_fault_status = analyser.check_exec_seg_fault(fname)
......@@ -1444,6 +1532,9 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
# UE side metrics
metricList=testcase.findall('UE_metric')
for metric in metricList:
metric_verdict = 'PASS'
metric_def = {}
metric_def['id'] = metric.get('id')
metric_def['description'] = metric.get('description')
......@@ -1464,15 +1555,19 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
metric_extracted = analyser.do_extract_metrics(args)
print "\t > Metric "+metric_def['id']+" :"
print "\t\t> min = "+ str( metric_extracted['metric_min'] )
print "\t\t> min_index = "+ str( metric_extracted['metric_min_index'] )
print "\t\t> max = "+ str( metric_extracted['metric_max'] )
print "\t\t> max_index = "+ str( metric_extracted['metric_max_index'] )
print "\t\t> mean = "+ str( metric_extracted['metric_mean'] )
print "\t\t> median = "+ str( metric_extracted['metric_median'] )
print (Fore.WHITE + indent + "> Metric : "+metric_def['id']),
print ("(min="+str( metric_extracted['metric_min'])+", max="+str( metric_extracted['metric_max'])+", mean="+str( metric_extracted['metric_mean'])+", median="+str( metric_extracted['metric_median'])+")")
# print "\t > Metric "+metric_def['id']+" :"
# print "\t\t> min = "+ str( metric_extracted['metric_min'] )
# print "\t\t> min_index = "+ str( metric_extracted['metric_min_index'] )
# print "\t\t> max = "+ str( metric_extracted['metric_max'] )
# print "\t\t> max_index = "+ str( metric_extracted['metric_max_index'] )
# print "\t\t> mean = "+ str( metric_extracted['metric_mean'] )
# print "\t\t> median = "+ str( metric_extracted['metric_median'] )
verdict = analyser.do_check_verdict(metric_def, metric_extracted)
if metric_def['pass_fail_stat'] :
metric_verdict = analyser.do_check_verdict(metric_def, metric_extracted)
metric_fig = logdir_local_run+ '/UE_metric_'+ metric_def['id']+'_' + str(run) + '_.png'
analyser.do_img_metrics(metric_def, metric_extracted, metric_fig)
......@@ -1500,6 +1595,13 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
runs_metrics.append(run_metrics)
if metric_verdict != 'PASS':
verdict = metric_verdict
# End Metrics LOOP
# ---------------------
# Traffic analysis
if UE_traffic_exec != "":
......@@ -1553,9 +1655,9 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
run_stop_time=datetime.datetime.now()
run_duration = run_stop_time-run_start_time
# print (Fore.WHITE + ("duration=" :"),
print (Fore.WHITE + indent + "> RUN duration : "+ str(run_duration) +"s" )
print (Fore.WHITE + indent + "> RUN duration : "+ str(run_duration) +"s" )
print (Fore.WHITE + indent + "> RUN verdict :"),
print (Fore.WHITE + indent + "> RUN verdict :"),
if verdict == 'PASS':
nb_run_pass += 1
......@@ -1585,6 +1687,8 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
# END RUN LOOP
#----------------------------------------------------
indent="\t"
# Test case duration
# ----------------------------------
testcase_time_stop = datetime.datetime.now()
......@@ -1624,11 +1728,9 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
else:
print (Fore.YELLOW+'INCONCLUSIVE')
duration= testcase_time_stop - testcase_time_start
xmlFile = logdir_local_testcase + '/test.' + testcasename + '.xml'
xml="\n<testcase classname=\'"+ testcaseclass + "\' name=\'" + testcasename + "."+tags + "\' Run_result=\'" + test_result_string + "\' time=\'" + str(duration) + " s \' RESULT=\'" + testcase_verdict + "\'></testcase> \n"
write_file(xmlFile, xml, mode="w")
print(Style.RESET_ALL)
duration= testcase_time_stop - testcase_time_start
test_result = dict(testcase_name=testcasename,
testcaseclass=testcaseclass,
......@@ -1647,7 +1749,17 @@ def handle_testcaseclass_softmodem_noS1 (testcase, oldprogramList, logdirOAI5GRe
testcase_eNBMachine =eNBMachine,
testcase_UEMachine =UEMachine,
runs_results = runs_results)
#test_results.append(test_result)
test_results.append(test_result)
xmlFile = logdir_local_testcase + '/test.' + testcasename + '.xml'
xml="\n<testcase classname=\'"+ testcaseclass + "\' name=\'" + testcasename + "."+tags + "\' Run_result=\'" + test_result_string + "\' time=\'" + str(duration) + " s \' RESULT=\'" + testcase_verdict + "\'></testcase> \n"
write_file(xmlFile, xml, mode="w")
# xmlFile_ng = logdir_local_testcase + '/test.' + testcasename + '_ng.xml'
# xml_ng = xmlify(test_result, wrap=testcasename, indent=" ")
# write_file(xmlFile_ng, xml_ng, mode="w")
return testcase_verdict
......@@ -1666,12 +1778,12 @@ def search_test_case_group(testcasename, testcasegroup, test_case_exclude):
if entry.find('+') >=0:
match = re.search(entry, testcasename)
if match:
print "\nSkipping test case as it is found in black list: " + testcasename
#print "\nSkipping test case as it is found in black list: " + testcasename
return False
else:
match = entry.find(testcasename)
if match >=0:
print "\nSkipping test case as it is found in black list: " + testcasename
#print "\nSkipping test case as it is found in black list: " + testcasename
return False
if testcasegroup == '':
return True
......@@ -1759,6 +1871,7 @@ if openairdir_local is None:
print "Environment variable OPENAIR_DIR not set correctly"
sys.exit()
locallogdir = openairdir_local + '/cmake_targets/autotests/log'
reportdir = openairdir_local + '/cmake_targets/autotests/report'
MachineList = ''
MachineListGeneric=''
MachineDescDic={}
......@@ -1767,12 +1880,19 @@ flag_start_testcase=False
nruns_lte_softmodem=''
flag_skip_git_head_check=False
flag_skip_oai_install=False
flag_skip_machine_preparation=False
flag_skip_sanity_check=False
flag_generate_html_report = False
Timeout_cmd=''
xmlInputFile=''
print "Number of arguments argc = " + str(len(sys.argv))
#print "Number of arguments argc = " + str(len(sys.argv))
#for index in range(1,len(sys.argv) ):
# print "argv_" + str(index) + " : " + sys.argv[index]
oai_list = []
i=1
while i < len (sys.argv):
arg=sys.argv[i]
......@@ -1832,9 +1952,15 @@ while i < len (sys.argv):
i = i +1
elif arg == '--skip-oai-install':
flag_skip_oai_install=True
elif arg == '--skip-machine-preparation':
flag_skip_machine_preparation=True
elif arg == '--skip-sanity-check':
flag_skip_sanity_check=True
elif arg == '--test-suite' :
xmlInputFile = sys.argv[i+1]
i = i +1
elif arg == '--generate-html-report':
flag_generate_html_report=True
elif arg == '-h' :
print "-s: This flag *MUST* be set to start the test cases"
print "-r: Remove the log directory in autotests"
......@@ -1852,90 +1978,65 @@ while i < len (sys.argv):
print "--skip-git-head-check: skip checking of GitHead remote/local branch (only for debugging)"
print "--timeout_cmd: Override the default parameter (timeout_cmd) in test_case_list.xml. This parameter is in seconds and should be > 120"
print "--skip-oai-install: Skips the openairinterface5g installer"
print "--skip-machine-preparation: skipp the whole system preparation -> direct to test cases"
print "--skip-sanity-check: skipp the machine sanity checks"
print "--test-suite: Select a XML test-suite file"
print "--generate-html-report: Generate an HTML report of the test campaign (supported only for noS1 testsuite !!)"
sys.exit()
else :
print "Unrecongnized Option: <" + arg + ">. Use -h to see valid options"
sys.exit()
i= i + 1
try:
os.environ["OPENAIR1_DIR"]
except KeyError:
print "Please set the environment variable OPENAIR1_DIR in the .bashrc"
sys.exit(1)
try:
os.environ["OPENAIR2_DIR"]
except KeyError:
print "Please set the environment variable OPENAIR2_DIR in the .bashrc"
sys.exit(1)
try:
os.environ["OPENAIR_TARGETS"]
except KeyError:
print "Please set the environment variable OPENAIR_TARGETS in the .bashrc"
sys.exit(1)
print "Killing zombie ssh sessions from earlier sessions..."
cmd='ps aux |grep \"/usr/bin/ssh -q -l guptar\"| awk \'{print $2}\' | sudo xargs kill -9 '
os.system(cmd)
logdir = '/tmp/' + 'OAITestFrameWork-' + user + '/'
logdirOAI5GRepo = logdir + 'openairinterface5g/'
logdirOpenaircnRepo = logdir + 'openair-cn/'
patchdir = logdirOAI5GRepo + 'cmake_targets/autotests/patches/'
NFSResultsDir = '/mnt/sradio'
cleanupOldProgramsScript = '$OPENAIR_DIR/cmake_targets/autotests/tools/remove_old_programs.bash'
try:
analyser = __import__("lib_autotest_analyser")
except ImportError as err:
print('Import error: ' + str(err))
exit(0)
#Now we parse the xml file for basic configuration
if xmlInputFile == '':
xmlInputFile=os.environ.get('OPENAIR_DIR')+"/cmake_targets/autotests/test_case_list.xml"
xmlTree = ET.parse(xmlInputFile)
xmlRoot = xmlTree.getroot()
#
# Read test session configuration
# ---------------------------------------------------------------------------------
print (Fore.YELLOW + '\nStep 0.1 - Read test session configuration')
print (Fore.YELLOW + '-------------------------------------------------')
if flag_start_testcase == False:
print "You need to start the testcase by passing option -s. Use -h to see all options. Aborting now..."
sys.exit(1)
test_session_start_time = datetime.datetime.now()
print (Fore.WHITE + ' - start time : '+ str(test_session_start_time))
# get the oai object
host = os.uname()[1]
#oai = openair('localdomain','calisson')
oai_list = []
print (Fore.WHITE + ' - MTC host : '+host)
#start_time = time.time() # datetime.datetime.now()
if user=='':
user = getpass.getuser()
print (Fore.WHITE + ' - user : '+user)
if pw=='':
pw = getpass.getpass()
print (Fore.WHITE + ' - password : '+pw)
print "Killing zombie ssh sessions from earlier sessions..."
cmd='ps aux |grep \"/usr/bin/ssh -q -l guptar\"|tr -s \" \" :|cut -f 2 -d :|xargs kill -9 '
cmd = cmd + '; ps aux |grep \"/usr/bin/ssh -q -l ' + user + '\"|tr -s \" \" :|cut -f 2 -d :|xargs kill -9 '
os.system(cmd)
print "host = " + host
print "user = " + user
if xmlInputFile == '':
xmlInputFile=os.environ.get('OPENAIR_DIR')+"/cmake_targets/autotests/test_case_list.xml"
NFSResultsDir = '/mnt/sradio'
cleanupOldProgramsScript = '$OPENAIR_DIR/cmake_targets/autotests/tools/remove_old_programs.bash'
logdir = '/tmp/' + 'OAITestFrameWork-' + user + '/'
logdirOAI5GRepo = logdir + 'openairinterface5g/'
logdirOpenaircnRepo = logdir + 'openair-cn/'
patchdir = logdirOAI5GRepo + 'cmake_targets/autotests/patches/'
if flag_remove_logdir == True:
print "Removing directory: " + locallogdir
os.system(' rm -fr ' + locallogdir + '; mkdir -p ' + locallogdir )
else:
os.system('mkdir -p ' + locallogdir)
paramiko_logfile = os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/log/paramiko.log')
res=os.system(' echo > ' + paramiko_logfile)
paramiko.util.log_to_file(paramiko_logfile)
#pw=getpass.getpass()
print (Fore.WHITE + ' - cmd line flags : ')
print (Fore.WHITE + ' + -s : '+str(flag_start_testcase))
print (Fore.WHITE + ' + -r : '+str(flag_remove_logdir))
print (Fore.WHITE + ' + -c : '+str(cleanUpRemoteMachines))
print (Fore.WHITE + ' + --skip-git-head-check : '+str(flag_skip_git_head_check))
print (Fore.WHITE + ' + --skip-oai-install : '+str(flag_skip_oai_install))
print (Fore.WHITE + ' + --skip-machine-preparation : '+str(flag_skip_machine_preparation))
print (Fore.WHITE + ' + --skip-sanity-check : '+str(flag_skip_sanity_check))
#Now we parse the xml file for basic configuration
xmlTree = ET.parse(xmlInputFile)
xmlRoot = xmlTree.getroot()
print (Fore.WHITE + ' - Test setup basic config : (* -> overwrite by cmd line parameter)')
print (Fore.WHITE + ' + test setup config file : '+str(xmlInputFile))
print (Fore.WHITE + ' + remote setup dir : '+str(logdir))
print (Fore.WHITE + ' + remote OAI5G repo : '+str(logdirOAI5GRepo))
print (Fore.WHITE + ' + remote OpenAirCN repo : '+str(logdirOpenaircnRepo))
if MachineList =='':
......@@ -1948,6 +2049,8 @@ if GitOAI5GRepo == '':
if GitOAI5GRepoBranch == '':
GitOAI5GRepoBranch = xmlRoot.findtext('GitOAI5GRepoBranch',default='')
if GitOAI5GHeadVersion == '':
GitOAI5GHeadVersion = xmlRoot.findtext('GitOAI5GHeadVersion',default='')
GitOpenaircnRepoBranch = xmlRoot.findtext('GitOpenair-cnRepoBranch',default='')
CleanUpOldProgs = xmlRoot.findtext('CleanUpOldProgs',default='')
......@@ -1960,25 +2063,27 @@ ExmimoRfStop = xmlRoot.findtext('ExmimoRfStop',default='')
if nruns_lte_softmodem == '':
nruns_lte_softmodem = xmlRoot.findtext('nruns_lte-softmodem',default='')
OAI5GpatchFileList=xmlRoot.findall('OAI5GPatchFile')
print (Fore.WHITE + ' + MachineList : '+ MachineList)
print (Fore.WHITE + ' + GitOpenair-cnRepo : '+ GitOpenaircnRepo)
print (Fore.WHITE + ' + GitOAI5GRepo : '+ GitOAI5GRepo)
print (Fore.WHITE + ' + GitOAI5GBranch : '+ GitOAI5GRepoBranch)
print (Fore.WHITE + ' + GitOpenaircnRepoBranch : '+ GitOpenaircnRepoBranch)
print (Fore.WHITE + ' + NFSResultsShare : '+ NFSResultsShare)
print (Fore.WHITE + ' + nruns_lte_softmodem : '+ nruns_lte_softmodem)
print (Fore.WHITE + ' + Timeout_cmd : '+ Timeout_cmd)
print "MachineList = " + MachineList
print "GitOpenair-cnRepo = " + GitOpenaircnRepo
print "GitOAI5GRepo = " + GitOAI5GRepo
print "GitOAI5GBranch = " + GitOAI5GRepoBranch
print "GitOpenaircnRepoBranch = " + GitOpenaircnRepoBranch
print "NFSResultsShare = " + NFSResultsShare
print "nruns_lte_softmodem = " + nruns_lte_softmodem
print "Timeout_cmd = " + Timeout_cmd
if GitOAI5GHeadVersion == '':
cmd = "git show-ref --heads -s "+ GitOAI5GRepoBranch
cmd = "git log --pretty=format:\'%H\' -n 1" # origin/"+ GitOAI5GRepoBranch
# cmd = "git show-ref --heads -s "+ GitOAI5GRepoBranch
GitOAI5GHeadVersion = subprocess.check_output ([cmd], shell=True)
GitOAI5GHeadVersion=GitOAI5GHeadVersion.replace("\n","")
print "GitOAI5GHeadVersion = " + GitOAI5GHeadVersion
print "CleanUpOldProgs = " + CleanUpOldProgs
print "Timeout_execution = " + str(Timeout_execution)
# Print commiter and date : git log -1 --pretty=format:"%an %ad"
print (Fore.WHITE + ' + GitOAI5GHeadVersion : '+ GitOAI5GHeadVersion)
#print "CleanUpOldProgs = " + CleanUpOldProgs
print (Fore.WHITE + ' + Timeout_execution (s) : '+ str(Timeout_execution))
if GitOAI5GHeadVersion == '':
print "Error getting the OAI5GBranch Head version...Exiting"
......@@ -1986,214 +2091,401 @@ if GitOAI5GHeadVersion == '':
NFSTestsResultsDir = NFSResultsShare + '/'+ GitOAI5GRepoBranch + '/' + GitOAI5GHeadVersion
print "NFSTestsResultsDir = " + NFSTestsResultsDir
#print "NFSTestsResultsDir = " + NFSTestsResultsDir
OAI5GpatchFileList=xmlRoot.findall('OAI5GPatchFile')
MachineList = MachineList.split()
MachineListGeneric = MachineListGeneric.split()
#index=0
MachineDescDic
#
# 'Check Mater Test Controller sanity (MTC)'
# ---------------------------------------------------------------------------------
print (Fore.YELLOW + '\nStep 0.2 - Check Mater Test Controller sanity (MTC)')
print (Fore.YELLOW + '-------------------------------------------------')
try:
print (Fore.WHITE + ' - check OpenAirInterface environment variable OPENAIR1_DIR... '),
os.environ["OPENAIR1_DIR"]
except KeyError:
print (Fore.RED + 'FAILED')
print "Please set the environment variable OPENAIR1_DIR in the .bashrc or run 'source oaienv' "
exit_prog(1)
print (Fore.GREEN + 'OK')
try:
print (Fore.WHITE + ' - check OpenAirInterface environment variable OPENAIR2_DIR... '),
os.environ["OPENAIR2_DIR"]
except KeyError:
print (Fore.RED + 'FAILED')
print "Please set the environment variable OPENAIR2_DIR in the .bashrc or run 'source oaienv' "
exit_prog(1)
print (Fore.GREEN + 'OK')
try:
os.environ["OPENAIR_TARGETS"]
except KeyError:
print "Please set the environment variable OPENAIR_TARGETS in the .bashrc or run 'source oaienv' "
sexit_prog(1)
print (Fore.WHITE + ' - kill zombie ssh sessions from earlier sessions... '),
cmd='ps aux |grep \"/usr/bin/ssh -q -l ' + user + '\"| tr -s \" \" :| cut -f 2 -d :| xargs kill -9 '
os.system(cmd)
print (Fore.GREEN + 'DONE')
if flag_start_testcase == False:
print "You need to start the testcase by passing option -s. Use -h to see all options. Aborting now..."
exit_prog(1)
#print "Killing zombie ssh sessions from earlier sessions..."
#cmd='ps aux |grep \"/usr/bin/ssh -q -l guptar\"|tr -s \" \" :|cut -f 2 -d :|xargs kill -9 '
#cmd = cmd + '; ps aux |grep \"/usr/bin/ssh -q -l ' + user + '\"| tr -s \" \" :| cut -f 2 -d :| xargs kill -9 '
#os.system(cmd)
#print cmd
try:
analyser = __import__("lib_autotest_analyser")
except ImportError as err:
print('Import error: ' + str(err))
exit(0)
if flag_remove_logdir == True:
print "Removing directory: " + locallogdir
os.system(' rm -fr ' + locallogdir + '; mkdir -p ' + locallogdir )
if not os.path.exists(locallogdir):
os.system('mkdir -p ' + locallogdir )
paramiko_logfile = os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/log/paramiko.log')
res=os.system(' echo > ' + paramiko_logfile)
paramiko.util.log_to_file(paramiko_logfile)
#
# 'REMOTE MACHINE SANITY CHEKS'
# ---------------------------------------------------------------------------------
print (Fore.YELLOW + '\nStep 0.3 - REMOTE MACHINE SANITY CHEKS')
print (Fore.YELLOW + '-------------------------------------------------')
for machine in MachineList:
oai_list.append( openair('localdomain',machine))
MachineDescDic[machine]={}
#index = index + 1
print "\nTesting the sanity of machines used for testing..."
if localshell == 0:
if not flag_skip_sanity_check:
if localshell == 0:
try:
index=0
for machine in MachineList:
print '\n******* Note that the user <'+user+'> should be a sudoer *******\n'
print '******* Connecting to the machine <'+machine+'> to perform the test *******\n'
if not pw :
print "username: " + user
#pw = getpass.getpass()
#print "password: " + pw
else :
print "username: " + user
#print "password: " + pw
# issues in ubuntu 12.04
oai_list[index].connect(user,pw)
print '\nChecking for sudo permissions on machine <'+machine+'>...'
result = oai_list[index].send_expect_false('sudo -S -v','may not run sudo',True)
print "Sudo permissions..." + result
print '\nCleaning Older running programs : ' + CleanUpOldProgs
cleanOldPrograms(oai_list[index], CleanUpOldProgs, CleanUpAluLteBox, ExmimoRfStop, '$HOME', '/tmp')
#result = oai_list[index].send('mount ' + NFSResultsDir, True)
#print "Mounting NFS Share " + NFSResultsDir + "..." + result
# Check if NFS share is mounted correctly.
#print 'Checking if NFS Share<' + NFSResultsDir + '> is mounted correctly...'
#cmd = 'if grep -qs '+NFSResultsDir+ ' /proc/mounts; then echo \'' + NFSResultsDir + ' is mounted\' ; fi'
#search_expr = NFSResultsDir + ' is mounted'
#print "cmd = " + cmd
#print "search_expr = " + search_expr
#result = oai_list[index].send_expect(cmd, search_expr)
#print "Mount NFS_Results_Dir..." + result
index = index + 1
#oai.connect2(user,pw)
#oai.get_shell()
index=0
for machine in MachineList:
print (Fore.WHITE + " - Check sanity for remote machine "+machine)
oai_list[index].connect(user,pw)
print (Fore.WHITE + " > Checking for sudo permissions for user "+user+ " : "),
result = oai_list[index].send_expect_false('sudo -S -v','may not run sudo',True)
if (result == 'OK'):
print (Fore.GREEN + result )
else:
print (Fore.RED + result )
print (Fore.WHITE + " > Cleaning Older running programs : "),
cleanOldPrograms(oai_list[index], CleanUpOldProgs, CleanUpAluLteBox, ExmimoRfStop, '$HOME', '/tmp')
print (Fore.WHITE + " Done")
#result = oai_list[index].send('mount ' + NFSResultsDir, True)
#print "Mounting NFS Share " + NFSResultsDir + "..." + result
# Check if NFS share is mounted correctly.
#print 'Checking if NFS Share<' + NFSResultsDir + '> is mounted correctly...'
#cmd = 'if grep -qs '+NFSResultsDir+ ' /proc/mounts; then echo \'' + NFSResultsDir + ' is mounted\' ; fi'
#search_expr = NFSResultsDir + ' is mounted'
#print "cmd = " + cmd
#print "search_expr = " + search_expr
#result = oai_list[index].send_expect(cmd, search_expr)
#print "Mount NFS_Results_Dir..." + result
index = index + 1
#oai.connect2(user,pw)
#oai.get_shell()
except :
print 'Fail to connect to the machine: '+ machine
sys.exit(1)
else:
pw = ''
oai_list[0].connect_localshell()
else:
pw = ''
oai_list[0].connect_localshell()
print (Fore.RED + " Skipping Machine sanity check...")
#
# 'REMOTE MACHINE TEST SETUP CONFIGURATION'
# ---------------------------------------------------------------------------------
print (Fore.YELLOW + '\nStep 0.4 - REMOTE MACHINE PREPARATION')
print (Fore.YELLOW + '-------------------------------------------------')
if not flag_skip_machine_preparation:
index=0
threads_init_setup=[]
for oai in oai_list:
try:
print "setting up machine: " + MachineList[index]
#print oai_list[oai].send_recv('echo \''+pw+'\' |sudo -S -v')
#print oai_list[oai].send_recv('sudo su')
#print oai_list[oai].send_recv('who am i')
#cleanUpPrograms(oai_list[oai]
cmd = 'sudo -S -E rm -fr ' + logdir + ' ; mkdir -p ' + logdir
result = oai.send_recv(cmd)
print cmd
setuplogfile = logdir + '/setup_log_' + MachineList[index] + '_.txt'
setup_script = locallogdir + '/setup_script_' + MachineList[index] + '_.txt'
#Sometimes git fails so the script below retries in that case
localfile = os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/tools/git-retry.sh')
remotefile = logdir + '/git-retry.sh'
paramList=[]
port=22
paramList.append ( {"operation":'put', "localfile":localfile, "remotefile":remotefile} )
sftp_log = os.path.expandvars(locallogdir + '/sftp_module.log')
sftp_module (user, pw, MachineList[index], port, paramList, sftp_log)
cmd = ' ( \n'
#cmd = cmd + 'rm -fR ' + logdir + '\n'
#cmd = cmd + 'mkdir -p ' + logdir + '\n'
cmd = cmd + 'cd '+ logdir + '\n'
cmd = cmd + 'sudo apt-get install -y git \n'
cmd = cmd + 'chmod 700 ' + logdir + '/git-retry.sh \n'
cmd = cmd + logdir + '/git-retry.sh clone '+ GitOAI5GRepo +' \n'
cmd = cmd + logdir + '/git-retry.sh clone '+ GitOpenaircnRepo + ' \n'
cmd = cmd + 'cd ' + logdirOAI5GRepo + '\n'
cmd = cmd + 'git checkout ' + GitOAI5GRepoBranch + '\n'
if GitOAI5GHeadVersion :
cmd = cmd + 'git checkout ' + GitOAI5GHeadVersion + '\n'
cmd = cmd + 'git_head=`git ls-remote |grep \'' + GitOAI5GRepoBranch + '\'` \n'
cmd = cmd + 'git_head=($git_head) \n'
cmd = cmd + 'git_head=${git_head[0]} \n'
cmd = cmd + 'echo \"GitOAI5GHeadVersion_remote = $git_head\" \n'
cmd = cmd + 'echo \"GitOAI5GHeadVersion_local = ' + GitOAI5GHeadVersion + '\" \n'
if flag_skip_git_head_check==True:
cmd = cmd + 'echo \"skipping GitHead check...\" \n '
else:
cmd = cmd + 'if [ \"$git_head\" != \"'+ GitOAI5GHeadVersion + '\" ]; then echo \"error: Git openairinterface5g head version does not match\" ; fi \n'
cmd = cmd + 'source oaienv' + '\n'
if flag_skip_oai_install == False:
cmd = cmd + 'source $OPENAIR_DIR/cmake_targets/tools/build_helper \n'
cmd = cmd + 'echo \"Installing core OAI dependencies...Start\" \n'
cmd = cmd + '$OPENAIR_DIR/cmake_targets/build_oai -I --install-optional-packages \n'
cmd = cmd + 'echo \"Installing core OAI dependencies...Finished\" \n'
#cmd = cmd + 'echo \"Installing BLADERF OAI dependencies...Start\" \n'
#cmd = cmd + 'check_install_bladerf_driver \n'
#cmd = cmd + 'echo \"Installing BLADERF OAI dependencies...Finished\" \n'
#cmd = cmd + 'echo \"Installing USRP OAI dependencies...Start\" \n'
#cmd = cmd + 'check_install_usrp_uhd_driver \n'
#cmd = cmd + 'echo \"Installing USRP OAI dependencies...Finished\" \n'
cmd = cmd + 'cd ' + logdirOpenaircnRepo + '\n'
cmd = cmd + 'git checkout ' + GitOpenaircnRepoBranch + '\n'
cmd = cmd + 'env |grep OPENAIR' + '\n'
cmd = cmd + ' cd ' + logdir + '\n'
cmd = cmd + 'mkdir -p ' + patchdir + '\n'
cmd = cmd + ' ) > ' + setuplogfile + ' 2>&1 \n'
#cmd = cmd + 'echo \' ' + cmd + '\' > ' + setup_script + ' 2>&1 \n '
#result = oai_list[index].send_recv(cmd, False, 300 )
write_file(setup_script, cmd, mode="w")
tempThread = oaiThread(index, 'thread_setup_'+str(index)+'_' + MachineList[index] , MachineList[index] , user, pw, cmd, False, 3000)
threads_init_setup.append(tempThread )
tempThread.start()
index = index + 1
except Exception, e:
print 'There is error in one of the commands to setup the machine '+ MachineList[index]
error=''
error = error + ' In function: ' + sys._getframe().f_code.co_name + ': *** Caught exception: ' + str(e.__class__) + " : " + str( e)
error = error + traceback.format_exc()
print error
sys.exit(1)
#We now prepare the machines for testing
index=0
threads_init_setup=[]
for oai in oai_list:
try:
print "setting up machine: " + MachineList[index]
#print oai_list[oai].send_recv('echo \''+pw+'\' |sudo -S -v')
#print oai_list[oai].send_recv('sudo su')
#print oai_list[oai].send_recv('who am i')
#cleanUpPrograms(oai_list[oai]
cmd = 'sudo -S -E rm -fr ' + logdir + ' ; mkdir -p ' + logdir
result = oai.send_recv(cmd)
#Now we wait for all the threads to complete
index = 0
for t in threads_init_setup:
t.join()
port = 22
paramList=[]
sftp_log = os.path.expandvars(locallogdir + '/sftp_module.log')
#Now we copy patch files and apply them
print( Fore.WHITE + " - Installating patch files on machine " + MachineList[index])
for patchFile in OAI5GpatchFileList:
localfile = os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/patches/')+patchFile.get('name')
remotefile = logdirOAI5GRepo + '/cmake_targets/autotests/patches/'+patchFile.get('name')
if patchFile.get('machine') == MachineList[index] or patchFile.get('machine') == None:
if os.path.isfile(localfile):
print( Fore.WHITE + "\t> PATCH FILE :"+localfile)
paramList=[]
paramList.append ( {"operation":'put', "localfile":localfile, "remotefile":remotefile} )
sftp_module (user, pw, MachineList[index], port, paramList, sftp_log)
cmd = ' cd ' + logdirOAI5GRepo + ' ;git apply cmake_targets/autotests/patches/'+patchFile.get('name')
res = oai_list[index].send_recv(cmd)
#print res
setuplogfile = logdir + '/setup_log_' + MachineList[index] + '_.txt'
setup_script = locallogdir + '/setup_script_' + MachineList[index] + '_.txt'
#Sometimes git fails so the script below retries in that case
localfile = os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/tools/git-retry.sh')
remotefile = logdir + '/git-retry.sh'
paramList=[]
port=22
paramList.append ( {"operation":'put', "localfile":localfile, "remotefile":remotefile} )
sftp_log = os.path.expandvars(locallogdir + '/sftp_module.log')
sftp_module (user, pw, MachineList[index], port, paramList, sftp_log)
cmd = ' ( \n'
#cmd = cmd + 'rm -fR ' + logdir + '\n'
#cmd = cmd + 'mkdir -p ' + logdir + '\n'
cmd = cmd + 'cd '+ logdir + '\n'
cmd = cmd + 'sudo apt-get install -y git \n'
cmd = cmd + 'chmod 700 ' + logdir + '/git-retry.sh \n'
cmd = cmd + logdir + '/git-retry.sh clone '+ GitOAI5GRepo +' \n'
cmd = cmd + logdir + '/git-retry.sh clone '+ GitOpenaircnRepo + ' \n'
cmd = cmd + 'cd ' + logdirOAI5GRepo + '\n'
cmd = cmd + 'git checkout ' + GitOAI5GRepoBranch + '\n'
#cmd = cmd + 'git checkout ' + GitOAI5GHeadVersion + '\n'
cmd = cmd + 'git_head=`git ls-remote |grep \'' + GitOAI5GRepoBranch + '\'` \n'
cmd = cmd + 'git_head=($git_head) \n'
cmd = cmd + 'git_head=${git_head[0]} \n'
cmd = cmd + 'echo \"GitOAI5GHeadVersion_remote = $git_head\" \n'
cmd = cmd + 'echo \"GitOAI5GHeadVersion_local = ' + GitOAI5GHeadVersion + '\" \n'
if flag_skip_git_head_check==True:
cmd = cmd + 'echo \"skipping GitHead check...\" \n '
else:
cmd = cmd + 'if [ \"$git_head\" != \"'+ GitOAI5GHeadVersion + '\" ]; then echo \"error: Git openairinterface5g head version does not match\" ; fi \n'
cmd = cmd + 'source oaienv' + '\n'
if flag_skip_oai_install == False:
cmd = cmd + 'source $OPENAIR_DIR/cmake_targets/tools/build_helper \n'
cmd = cmd + 'echo \"Installing core OAI dependencies...Start\" \n'
cmd = cmd + '$OPENAIR_DIR/cmake_targets/build_oai -I --install-optional-packages \n'
cmd = cmd + 'echo \"Installing core OAI dependencies...Finished\" \n'
#cmd = cmd + 'echo \"Installing BLADERF OAI dependencies...Start\" \n'
#cmd = cmd + 'check_install_bladerf_driver \n'
#cmd = cmd + 'echo \"Installing BLADERF OAI dependencies...Finished\" \n'
#cmd = cmd + 'echo \"Installing USRP OAI dependencies...Start\" \n'
#cmd = cmd + 'check_install_usrp_uhd_driver \n'
#cmd = cmd + 'echo \"Installing USRP OAI dependencies...Finished\" \n'
cmd = cmd + 'cd ' + logdirOpenaircnRepo + '\n'
cmd = cmd + 'git checkout ' + GitOpenaircnRepoBranch + '\n'
cmd = cmd + 'env |grep OPENAIR' + '\n'
cmd = cmd + ' cd ' + logdir + '\n'
cmd = cmd + 'mkdir -p ' + patchdir + '\n'
cmd = cmd + ' ) > ' + setuplogfile + ' 2>&1 \n'
#cmd = cmd + 'echo \' ' + cmd + '\' > ' + setup_script + ' 2>&1 \n '
#result = oai_list[index].send_recv(cmd, False, 300 )
write_file(setup_script, cmd, mode="w")
tempThread = oaiThread(index, 'thread_setup_'+str(index)+'_' + MachineList[index] , MachineList[index] , user, pw, cmd, False, 3000)
threads_init_setup.append(tempThread )
tempThread.start()
index = index + 1
except Exception, e:
print 'There is error in one of the commands to setup the machine '+ MachineList[index]
error=''
error = error + ' In function: ' + sys._getframe().f_code.co_name + ': *** Caught exception: ' + str(e.__class__) + " : " + str( e)
error = error + traceback.format_exc()
print error
sys.exit(1)
paramList=[]
#Now we wait for all the threads to complete
index = 0
for t in threads_init_setup:
t.join()
setuplogfile = logdir + '/setup_log_' + MachineList[index] + '_.txt'
setup_script = locallogdir + '/setup_script_' + MachineList[index] + '_.txt'
localfile = locallogdir + '/setup_log_' + MachineList[index] + '_.txt'
remotefile = logdir + '/setup_log_' + MachineList[index] + '_.txt'
port = 22
#Now we copy patch files and apply them
print "Installating patch files on machine " + MachineList[index]
for patchFile in OAI5GpatchFileList:
localfile = os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/patches/')+patchFile.get('name')
remotefile = logdirOAI5GRepo + '/cmake_targets/autotests/patches/'+patchFile.get('name')
if patchFile.get('machine') == MachineList[index] or patchFile.get('machine') == None:
if os.path.isfile(localfile):
print "\t> PATCH FILE :"+localfile
paramList=[]
paramList.append ( {"operation":'put', "localfile":localfile, "remotefile":remotefile} )
sftp_module (user, pw, MachineList[index], port, paramList, sftp_log)
cmd = ' cd ' + logdirOAI5GRepo + ' ;git apply cmake_targets/autotests/patches/'+patchFile.get('name')
res = oai_list[index].send_recv(cmd)
paramList=[]
setuplogfile = logdir + '/setup_log_' + MachineList[index] + '_.txt'
setup_script = locallogdir + '/setup_script_' + MachineList[index] + '_.txt'
localfile = locallogdir + '/setup_log_' + MachineList[index] + '_.txt'
remotefile = logdir + '/setup_log_' + MachineList[index] + '_.txt'
sftp_log = os.path.expandvars(locallogdir + '/sftp_module.log')
paramList.append ( {"operation":'get', "localfile":localfile, "remotefile":remotefile} )
#sftp_module (user, pw, MachineList[index], port, localfile, remotefile, sftp_log, "get")
#Now we copy test_case_list.xml on the remote machines
localfile = os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/test_case_list.xml')
remotefile = logdirOAI5GRepo + '/cmake_targets/autotests/test_case_list.xml'
paramList.append ( {"operation":'put', "localfile":localfile, "remotefile":remotefile} )
sftp_log = os.path.expandvars(locallogdir + '/sftp_module.log')
sftp_module (user, pw, MachineList[index], port, paramList, sftp_log)
cmd = ' cd ' + logdirOAI5GRepo + ' ; source oaienv ; env|grep OPENAIR \n'
res = oai_list[index].send_recv(cmd)
index = index +1
if os.path.exists(localfile) == 0:
setuplogfile = logdir + '/setup_log_' + MachineList[index] + '_.txt'
setup_script = locallogdir + '/setup_script_' + MachineList[index] + '_.txt'
localfile = locallogdir + '/setup_log_' + MachineList[index] + '_.txt'
remotefile = logdir + '/setup_log_' + MachineList[index] + '_.txt'
sftp_log = os.path.expandvars(locallogdir + '/sftp_module.log')
paramList.append ( {"operation":'get', "localfile":localfile, "remotefile":remotefile} )
#sftp_module (user, pw, MachineList[index], port, localfile, remotefile, sftp_log, "get")
#Now we copy test_case_list.xml on the remote machines
localfile = os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/test_case_list.xml')
remotefile = logdirOAI5GRepo + '/cmake_targets/autotests/test_case_list.xml'
# paramList.append ( {"operation":'put', "localfile":localfile, "remotefile":remotefile} )
# sftp_module (user, pw, MachineList[index], port, paramList, sftp_log)
sftp_module (user, pw, MachineList[index], port, paramList, sftp_log)
cmd = ' cd ' + logdirOAI5GRepo + ' ; source oaienv ; env|grep OPENAIR \n'
res = oai_list[index].send_recv(cmd)
index = index +1
if os.path.exists(localfile) == 0:
print "Setup log file <" + localfile + "> missing for machine <" + MachineList[index] + ">. Please check the setup log files. Exiting now"
sys.exit(1)
#Now we process all the test cases
#Now we check if there was error in setup files
#Now we process all the test cases
#Now we check if there was error in setup files
status, out = commands.getstatusoutput('grep ' + ' -il \'error\' ' + locallogdir + '/setup_log*')
if (out != '') :
print "There is error in setup of machines"
print "status = " + str(status) + "\n Check files for error = " + out
print "Exiting now..."
sys.exit(1)
status, out = commands.getstatusoutput('grep ' + ' -il \'error\' ' + locallogdir + '/setup_log*')
if (out != '') :
print "There is error in setup of machines"
print "status = " + str(status) + "\n Check files for error = " + out
print "Exiting now..."
sys.exit(1)
cleanOldProgramsAllMachines(oai_list, CleanUpOldProgs, CleanUpAluLteBox, ExmimoRfStop, '$HOME' , logdirOAI5GRepo)
if cleanUpRemoteMachines == True:
print "Error while cleaning Remote machines"
print "Exiting now..."
sys.exit(0)
else:
print (Fore.RED + " Skipping Machine preparation...")
# **************************************************************************
# **************************************************************************
# STEP 2 - TEST SELECTION AND PARAMETRIZATION
# **************************************************************************
# **************************************************************************
print (Fore.YELLOW + '\nStep 2 - TEST SELECTION')
print (Fore.YELLOW + '-------------------------------------------------')
print (Fore.WHITE + " Parsing test case list for test selection...")
print (Fore.WHITE + " - TestCaseExclusionList : "+TestCaseExclusionList)
print (Fore.WHITE + " - testcasegroup : "+testcasegroup)
cleanOldProgramsAllMachines(oai_list, CleanUpOldProgs, CleanUpAluLteBox, ExmimoRfStop, '$HOME' , logdirOAI5GRepo)
if cleanUpRemoteMachines == True:
sys.exit(0)
threadListGlobal=[]
testcaseList=xmlRoot.findall('testCase')
#print testcaseList
nb_total_testcases = 0
nb_total_testcases_softmodem = 0
nb_total_testcases_softmodem_noS1 = 0
nb_total_testcases_compilation = 0
nb_total_testcases_execution = 0
nb_run_testcases = 0
nb_run_testcases_softmodem = 0
nb_run_testcases_softmodem_noS1 = 0
nb_run_testcases_compilation = 0
nb_run_testcases_execution = 0
for testcase in testcaseList:
try:
testcasename = testcase.get('id')
testcaseclass = testcase.findtext('class',default='')
desc = testcase.findtext('desc',default='')
nb_total_testcases+=1
run_flag = search_test_case_group(testcasename, testcasegroup, TestCaseExclusionList)
if (run_flag):
nb_run_testcases+=1
if testcaseclass == 'lte-softmodem' :
nb_total_testcases_softmodem+=1
if (run_flag): nb_run_testcases_softmodem+=1
elif testcaseclass == 'lte-softmodem-noS1':
nb_total_testcases_softmodem_noS1+=1
if (run_flag): nb_run_testcases_softmodem_noS1+=1
elif testcaseclass == 'compilation':
nb_total_testcases_compilation+=1
if (run_flag): nb_run_testcases_compilation+=1
elif testcaseclass == 'execution':
nb_total_testcases_execution+=1
if (run_flag): nb_run_testcases_execution+=1
else :
print "Unknown test case class: " + testcaseclass
sys.exit()
except Exception, e:
error=''
error = error + ' In function: ' + sys._getframe().f_code.co_name + ': *** Caught exception: ' + str(e.__class__) + " : " + str( e)
error = error + '\n testcasename = ' + testcasename + '\n testcaseclass = ' + testcaseclass + '\n desc = ' + 'desc' + '\n'
error = error + traceback.format_exc()
print error
print "Continuing to next test case..."
print (Fore.WHITE + " Parsing done...")
print (Fore.WHITE + " - Total number of test cases : "+str(nb_total_testcases))
print (Fore.WHITE + " - Total number class softmodem : "+str(nb_total_testcases_softmodem))
print (Fore.WHITE + " - Total number class softmodem-noS1 : "+str(nb_total_testcases_softmodem_noS1))
print (Fore.WHITE + " - Total number class compilation : "+str(nb_total_testcases_compilation))
print (Fore.WHITE + " - Total number class execution : "+str(nb_total_testcases_execution))
print
print (Fore.WHITE + " - Selected number of test cases : "+str(nb_run_testcases))
print (Fore.WHITE + " - Selected class softmodem : "+str(nb_run_testcases_softmodem))
print (Fore.WHITE + " - Selected class softmodem_noS1 : "+str(nb_run_testcases_softmodem_noS1))
print (Fore.WHITE + " - Selected class compilation : "+str(nb_run_testcases_compilation))
print (Fore.WHITE + " - Selected class execution : "+str(nb_run_testcases_execution))
# **************************************************************************
# **************************************************************************
# STEP 3 - TEST CAMPAIGNS
# **************************************************************************
# **************************************************************************
test_results = []
print (Fore.YELLOW + '\nStep 3 - TEST CAMPAIGNS')
print (Fore.YELLOW + '-------------------------------------------------')
print (Fore.WHITE + " Start test campaign...")
threadListGlobal=[]
run_count = 0
for testcase in testcaseList:
try:
testcasename = testcase.get('id')
testcaseclass = testcase.findtext('class',default='')
desc = testcase.findtext('desc',default='')
#print "Machine list top level = " + ','.join(MachineList)
if search_test_case_group(testcasename, testcasegroup, TestCaseExclusionList) == True:
run_count+=1
print (Fore.WHITE + " ("+str(run_count).zfill(3)+"/"+str(nb_run_testcases).zfill(3)+") - test case "+testcasename+" : "),
if testcaseclass == 'lte-softmodem' :
#First we wait for all the test cases in generic test case class to finish as they are running in parallel
threadListGlobal = wait_testcaseclass_generic_threads(threadListGlobal, Timeout_execution)
......@@ -2266,6 +2558,22 @@ for param in threadListGlobal:
thread_id = param["thread_id"]
thread_id.join()
# **************************************************************************
# **************************************************************************
# STEP 4 - TEST REPORT PRODUCTION
# **************************************************************************
# **************************************************************************
test_session_stop_time = datetime.datetime.now()
print (Fore.YELLOW + '\nStep 4 - TEST REPORT PRODUCTION')
print (Fore.YELLOW + '-------------------------------------------------')
# XML test report
# --------------------------------------------------------------------
print "Creating xml file for overall results..."
cmd = "cat $OPENAIR_DIR/cmake_targets/autotests/log/*/*.xml > $OPENAIR_DIR/cmake_targets/autotests/log/results_autotests.xml "
res=os.system(cmd)
......@@ -2284,4 +2592,43 @@ res = oai_localhost.send_recv(cmd)
oai_localhost.disconnect()
# HTML test report
# --------------------------------------------------------------------
if flag_generate_html_report:
print "Creating html test report..."
report_dir = reportdir + '/'+ test_session_start_time.strftime("%Y-%m-%d_%H-%M")+"_"+host
cmd = 'mkdir -p ' + report_dir
result = os.system(cmd)
cmd = 'cp -r '+locallogdir+' '+report_dir
result = os.system(cmd)
context = {
'report_path' : report_dir,
'test_session_start_time' : test_session_start_time,
'test_session_stop_time' : test_session_stop_time,
'test_session_duration' : test_session_stop_time-test_session_start_time,
'mtc_host' : host,
'user' : user,
'password' : pw,
'test_results' : test_results,
}
for test_result in test_results:
cmd = 'mkdir -p ' + report_dir + '/'+ test_result['testcase_name']
result = os.system(cmd)
report_file = report_dir + '/'+ test_result['testcase_name'] + '/'+ test_result['testcase_name']+ '_report.html'
analyser.create_test_report_detailed_html(test_result, report_file )
# print test_result
analyser.create_report_html(context)
sys.exit()
This source diff could not be displayed because it is too large. You can view the blob instead.
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>OAI5G UE Autotest Report</title>
<script type="text/javascript">
function showhide(id) {
var e = document.getElementById(id);
e.style.display = (e.style.display == 'block') ? 'none' : 'block';
}
</script>
</head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
<body>
<center>
<h2>OAI5G UE Autotest Report</h2>
</center>
<p>
<table border>
<caption>Test session configuration</caption>
<tr><td>Start time</td><td>{{test_session_start_time}}</td></tr>
<tr><td>Stop time</td><td>{{test_session_stop_time}}</td></tr>
<tr><td>Duration</td><td>{{test_session_duration}}</td></tr>
<tr><td>MTC host</td><td>{{mtc_host}}</td></tr>
<tr><td>User</td><td>{{user}}</td></tr>
<tr><td>Password</td><td>{{password}}</td></tr>
</table>
</p>
<h3>Test Setup</h3>
To be complete
<br></br>
<h3>UE phy-test performances tests results</h3>
<h4>Objectives</h4>
<p>Checks that OAI UE can achieve at least 75 percent of the theoretical throughput.</p>
<p>Tests are done for all MCS (0 to 28) for 5MHz and 10MHz bandwidth.</p>
<h4>Results</h4>
<table>
<TR><TH>ID</TH><TH>TAG</TH><TH>VERDICT</TH><TH>NB RUNS</TH><TH>PASS</TH><TH>FAILED</TH><TH>INCON</TH><TH>SKIPPED</TH><TH>SEG FAULT</TH><TH>TC Timeout</TH><TH>Start</TH><TH>Stop</TH><TH>Duration</TH><TH>Details</TH></TR>
{% for result in test_results|sort(attribute='testcase_name') %}
<TR>
<TD >{{result.testcase_name}}</TD>
<TD align="right">{{result.tags}}</TD>
{% if result.testcase_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{result.testcase_verdict}}</TD>
{% elif result.testcase_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{result.testcase_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{result.testcase_verdict}}</TD>
{% endif %}
<TD align='center'>{{result.nruns}}</TD>
<TD align='center'>{{result.nb_run_pass}}</TD>
<TD align='center'>{{result.nb_run_failed}}</TD>
<TD align='center'>{{result.nb_run_inc}}</TD>
<TD align='center'>{{result.nb_run_skip}}</TD>
<TD align='center'>{{result.nb_seg_fault}}</TD>
<TD >{{result.testcase_timeout}}</TD>
<TD >{{result.testcase_time_start.strftime('%Y-%m-%d %H:%M:%S')}}</TD>
<TD >{{result.testcase_time_stop.strftime('%Y-%m-%d %H:%M:%S')}}</TD>
<TD >{{result.testcase_duration}}</TD>
<TD ><a href="{{ result.testcase_name }}/{{ result.testcase_name }}_report.html">{{ result.testcase_name }}_report.html</a></TD>
</TR>
{% endfor %}
</table>
<br></br>
<h3>UE phy-test stability tests results</h3>
<h4>Objectives</h4>
<p>To be complete</p>
<h4>Results</h4>
To be complete
</table>
</body>
</html>
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>OAI5G UE test case report</title>
<script type="text/javascript">
function showhide(id) {
var e = document.getElementById(id);
e.style.display = (e.style.display == 'block') ? 'none' : 'block';
}
</script>
</head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
<body>
<center>
<h2>OAI5G UE test case report details</h2>
</center>
<h3>Test Case description</h3>
<p>
<table border>
<tr><td>ID</td><td>{{testcase_name}}</td></tr>
<tr><td>TAG</td><td>{{tags}}</td></tr>
<tr><td>class</td><td>{{testcaseclass}}</td></tr>
<tr><td>description</td><td></td></tr>
<tr><td>timeout</td><td>{{testcase_timeout}}</td></tr>
<tr><td>number of runs</td><td>{{nruns}}</td></tr>
<tr><td>eNB machine</td><td>{{testcase_eNBMachine}}</td></tr>
<tr><td>UE machine</td><td>{{testcase_UEMachine}}</td></tr>
</table>
</p>
<h3>Test Case execution</h3>
<p>
<table border>
<tr><td>testcase_time_start</td><td>{{testcase_time_start}}</td></tr>
<tr><td>testcase_time_stop</td><td>{{testcase_time_stop}}</td></tr>
<tr><td>testcase_duration</td><td>{{testcase_duration}}</td></tr>
<tr><td>Nb runs</td><td>{{nruns}}</td></tr>
<tr><td>Nb PASS</td><td>{{nb_run_pass}}</td></tr>
<tr><td>Nb FAILED</td><td>{{nb_run_failed}}</td></tr>
<tr><td>Nb INCONCLUSIVE</td><td>{{nb_run_inc}}</td></tr>
<tr>
<td>testcase_verdict</td>
{% if testcase_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{testcase_verdict}}</TD>
{% elif testcase_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{testcase_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{testcase_verdict}}</TD>
{% endif %}
</tr>
<tr><td>Nb Seg Fault</td><td>{{nb_seg_fault}}</td></tr>
</table>
</p>
<h3>Test Case runs results</h3>
{% for run_results in runs_results|sort(attribute='run_id') %}
<h4>RUN {{run_results.run_id}} </h4>
<table border>
<tr><td>run_start_time </td><td>{{run_results.run_start_time}}</td></tr>
<tr><td>run_stop_time</td><td>{{run_results.run_stop_time}}</td></tr>
<tr><td>run_duration</td><td>{{run_results.run_duration}}</td></tr>
<tr>
<td>run_verdict</td>
{% if run_results.run_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{run_results.run_verdict}}</TD>
{% elif run_results.run_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{run_results.run_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{run_results.run_verdict}}</TD>
{% endif %}
</tr>
<tr><td>Seg Fault Satus</td>
{% if run_results.ue_seg_fault_status == "NO_SEG_FAULT" %}
<TD align="center" style="background-color:green">{{run_results.ue_seg_fault_status}}</TD>
{% elif run_results.ue_seg_fault_status == "SEG_FAULT" %}
<TD align="center" style="background-color:red">{{run_results.ue_seg_fault_status}}</TD>
{% else %}
<TD align="center" style="background-color:orange">unknown</TD>
{% endif %}
</tr>
</table>
{% for run_metrics in run_results.runs_metrics %}
<br></br>
<table border>
<tr><td>metric_id</td><td>{{run_metrics.metric_id}}</td></tr>
<tr><td>Description</td><td>{{run_metrics.metric_desc}}</td></tr>
<tr><td>Unit of measure</td><td>{{run_metrics.metric_uom}}</td></tr>
<tr><td>metric_min</td><td>{{run_metrics.metric_min}}</td></tr>
<tr><td>metric_max</td><td>{{run_metrics.metric_max}}</td></tr>
<tr><td>metric_mean</td><td>{{run_metrics.metric_mean}}</td></tr>
<tr><td>metric_median</td><td>{{run_metrics.metric_median}}</td></tr>
<tr><td colspan="2"></td></tr>
{% if run_metrics.pass_fail_stat is defined %}
<tr><td>Pass/fail stat</td><td>{{run_metrics.pass_fail_stat}}</td></tr>
{% endif %}
{% if run_metrics.pass_fail_min_limit is defined %}
<tr><td>Pass/fail min limit</td><td>{{run_metrics.pass_fail_min_limit}}</td></tr>
{% endif %}
{% if run_metrics.pass_fail_max_limit is defined %}
<tr><td>Pass/fail max limit</td><td>{{run_metrics.pass_fail_max_limit}}</td></tr>
{% endif %}
<tr><td colspan="2"></td></tr>
<tr><td>metric_fig</td><td><IMG src={{run_metrics.metric_fig}}></td></tr>
</table>
{% endfor %}
{% if run_results.run_traffic.traffic_count != 0 %}
<br></br>
<table border>
<TR><TH>Iperf metric</TH><TH>min</TH><TH>max</TH><TH>mean</TH><TH>median</TH><TR>
<TR><td>Bandwidth </td><td>{{run_results.run_traffic.bw_min}}</td><td>{{run_results.run_traffic.bw_max}}</td><td>{{run_results.run_traffic.bw_mean}}</td><td>{{run_results.run_traffic.bw_median}}</td><TR>
<TR><td>Jitter</td><td>{{run_results.run_traffic.jitter_min}}</td><td>{{run_results.run_traffic.jitter_max}}</td><td>{{run_results.run_traffic.jitter_mean}}</td><td>{{run_results.run_traffic.jitter_median}}</td><TR>
<TR><td>Loss rate</td><td>{{run_results.run_traffic.rl_min}}</td><td>{{run_results.run_traffic.rl_max}}</td><td>{{run_results.run_traffic.rl_mean}}</td><td>{{run_results.run_traffic.rl_median}}</td><TR>
<TR><td colspan="5"></td></TR>
<TR><td>Iperf duration</td><td>{{run_results.run_traffic.iperf_duration}}</td><td></td><td>Pass/Fail criteria (min duration)</td><td>{{run_results.run_traffic.dur_pass_fail_crit}}</td><TR>
<TR><td colspan="5"></td></TR>
<tr><td>traffic_fig</td><td colspan="4"><IMG src={{run_results.run_traffic.traffic_fig}}></td></tr>
</table>
{% endif %}
{% endfor %}
</body>
</html>
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -35,12 +35,15 @@ import getopt
import sys
from subprocess import call
import encoder
sys.path.append(os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/tools/'))
#test_cases = ('030001', '030901', '031001', '031601', '031701', '031801', '031901', '032001', '032101', '032201', '032301', '032501', '032601', '032801')
test_cases = ('030030' , '030030' )
test_cases = ('032800' , '032730' )
nb_run = 3
nb_run = 2
def error_opt(msg):
print("Option error: " + msg)
......@@ -58,14 +61,27 @@ def main(args):
# metric = {}
# metric['id'] = 'UE_DLSCH_BITRATE'
# metric['description'] = 'UE downlink physical throughput'
# metric['regex'] = '(UE_DLSCH_BITRATE) =\s+(\d+\.\d+) kbps.+frame = (\d+)\)'
# metric['unit_of_meas'] = 'kbps'
# metric['min_limit'] = 14668.8
#AUTOTEST Metric : RRC Measurments RSRP[0]=-97.60 dBm/RE, RSSI=-72.83 dBm, RSRQ[0] 9.03 dB, N0 -125 dBm/RE, NF 7.2 dB (frame = 4490)
metric = {}
metric['id'] = 'UE_DLSCH_BITRATE'
metric['description'] = 'UE downlink physical throughput'
metric['regex'] = '(UE_DLSCH_BITRATE) =\s+(\d+\.\d+) kbps.+frame = (\d+)\)'
metric['id'] = 'UE_DL_RRC_MEAS'
metric['description'] = 'UE downlink RRC Measurments'
metric['nb_metric'] = 5
# metric['regex'] = 'AUTOTEST Metric : RRC Measurments (RSRP\[0\])=(-?\d+\.?\d*)\s+(.+),\s+(RSRQ\[0\])=(-?\d+\.?\d*)\s+(.+),,\s+(N0)=(-?\d+\.?\d*)\s+(.+),,\s+(NF)=(-?\d+\.?\d*)\s+(.+)\s+\(frame = (\d+)\) '
metric['regex'] = 'AUTOTEST Metric : RRC Measurments (RSRP\[0\])=(-?\d+\.?\d*)\s+(.+)\,\s+(RSSI)=(-?\d+\.?\d*)\s+(.+)\,\s+(RSRQ\[0\])=(-?\d+\.?\d*)\s+(.+)\,\s+(N0)=(-?\d+\.?\d*)\s+(.+)\,\s+(NF)=(-?\d+\.?\d*)\s+(.+)\s+\(frame = (\d+)\)'
metric['unit_of_meas'] = 'kbps'
metric['min_limit'] = 14668.8
#report_path = log_path+'/report/'
#os.system(' mkdir -p ' + report_path)
......@@ -74,58 +90,44 @@ def main(args):
#return(0)
for test_case in test_cases:
# print test_case
if test_case == '030001':
metric['min_limit'] = 500.0
if test_case == '030901':
metric['min_limit'] = 640.0
if test_case == '031001':
metric['min_limit'] = 3200.0
if test_case == '031601':
metric['min_limit'] = 5920.0
if test_case == '031701':
metric['min_limit'] = 6000.0
if test_case == '031801':
metric['min_limit'] = 6200.0
if test_case == '031901':
metric['min_limit'] = 7000.0
if test_case == '032001':
metric['min_limit'] = 7800.0
if test_case == '032101':
metric['min_limit'] = 8000.0
if test_case == '032201':
metric['min_limit'] = 9000.0
if test_case == '032301':
metric['min_limit'] = 10000.0
if test_case == '032501':
metric['min_limit'] = 11000.0
if test_case == '032601':
metric['min_limit'] = 12000.0
if test_case == '032801':
metric['min_limit'] = 12500.0
if test_case == '035201':
metric['min_limit'] = 14668.8
if test_case == '036001':
metric['min_limit'] = 25363.2
test_results = []
for test_case in test_cases:
for i in range(0, nb_run):
fname = 'log//'+test_case+'/run_'+str(i)+'/UE_exec_'+str(i)+'_.log'
fname = '..//log//'+test_case+'/run_'+str(i)+'/UE_exec_'+str(i)+'_.log'
args = {'metric' : metric,
'file' : fname }
cell_synch_status = analyser.check_cell_synchro(fname)
if cell_synch_status == 'CELL_SYNCH':
print '!!!!!!!!!!!!!! Cell synchronized !!!!!!!!!!!'
metric_checks_flag = 0
else :
print '!!!!!!!!!!!!!! Cell NOT NOT synchronized !!!!!!!!!!!'
# cell_synch_status = analyser.check_cell_synchro(fname)
# if cell_synch_status == 'CELL_SYNCH':
# print '!!!!!!!!!!!!!! Cell synchronized !!!!!!!!!!!'
# metric_checks_flag = 0
# else :
# print '!!!!!!!!!!!!!! Cell NOT NOT synchronized !!!!!!!!!!!'
# metric_extracted = analyser.do_extract_metrics(args)
# metrics_extracted = analyser.do_extract_metrics_new(args)
# de-xmlfy test report
xml_file = '..//log//'+test_case+'/test.'+test_case+'_ng.xml'
print xml_file
# test_result =
# test_results.append(test_result)
# xmlFile = logdir_local_testcase + '/test.' + testcasename + '.xml'
# xml="\n<testcase classname=\'"+ testcaseclass + "\' name=\'" + testcasename + "."+tags + "\' Run_result=\'" + test_result_string + "\' time=\'" + str(duration) + " s \' RESULT=\'" + testcase_verdict + "\'></testcase> \n"
# write_file(xmlFile, xml, mode="w")
# xmlFile_ng = logdir_local_testcase + '/test.' + testcasename + '_ng.xml'
# xml_ng = xmlify(test_result, wrap=testcasename, indent=" ")
# write_file(xmlFile_ng, xml_ng, mode="w")
# print "min = "+ str( metric_extracted['metric_min'] )
# print "min_index = "+ str( metric_extracted['metric_min_index'] )
......@@ -143,16 +145,27 @@ def main(args):
# print fname
# analyser.do_img_metrics(metric, metric_extracted, fname)
# fname = 'log//'+test_case+'/run_'+str(i)+'/UE_traffic_'+str(i)+'_.log'
# fname = 'log//'+test_case+'/run_'+str(i)+'/UE_traffic_'+str(i)+'_.log'
# args = {'file' : fname }
# args = {'file' : fname }
# traffic_metrics = analyser.do_extract_traffic_metrics(args)
# traffic_metrics = analyser.do_extract_traffic_metrics(args)
# fname= 'report/iperf_'+test_case+'_'+str(i)+'.png'
# fname= 'report/iperf_'+test_case+'_'+str(i)+'.png'
# print fname
# analyser.do_img_traffic(traffic_metrics, fname)
# print fname
# analyser.do_img_traffic(traffic_metrics, fname)
for test_result in test_results:
cmd = 'mkdir -p ' + report_dir + '/'+ test_result['testcase_name']
result = os.system(cmd)
report_file = report_dir + '/'+ test_result['testcase_name'] + '/'+ test_result['testcase_name']+ '_report.html'
analyser.create_test_report_detailed_html(test_result, report_file )
print test_result
......
......@@ -41,7 +41,7 @@ from jinja2 import Environment, FileSystemLoader
PATH = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(PATH, 'templates')),
loader=FileSystemLoader(os.path.join(PATH, '../templates')),
trim_blocks=False)
......@@ -103,6 +103,129 @@ def do_extract_metrics(args):
}
return(ret)
def do_extract_metrics_new(args):
# print ""
# print "do_extract_metrics ... "
fname = args['file']
metric = args['metric']
print(fname)
print 'metric id = ' + metric['id']
print 'metric regex = ' + metric['regex']
count = 0
mmin = 0
mmin_index = 0
mmax = 0
mmax_index = 0
mean = 0
median = 0
toto = [('id', 'S20'), ('metric', np.float), ('frame', np.int)]
print toto
np_format = []
for x in range(0, metric['nb_metric']):
np_format.append( ('id'+str(x), 'S20') )
np_format.append( ('metric'+str(x), np.float) )
np_format.append( ('uom'+str(x), 'S20') )
np_format.append( ('frame', np.int))
print np_format
output = np.fromregex(fname,metric['regex'], np_format)
print output
count = output['frame'].size
print count
if count > 0:
fontP = FontProperties()
fontP.set_size('small')
fig = plt.figure(1)
plt.figure(figsize=(10,10))
plot_xmax = np.amax(output['frame'])+np.amin(output['frame'])
for x in range(0, metric['nb_metric']):
metric_name = output['id'+str(x)][0]
metric_uom = output['uom'+str(x)][0]
mmin = np.amin(output['metric'+str(x)])
mmax = np.amax(output['metric'+str(x)])
mmean = np.mean(output['metric'+str(x)])
mmedian = np.median(output['metric'+str(x)])
plot_loc = 100*metric['nb_metric']+10+x+1
sbplt = plt.subplot(plot_loc)
sbplt.plot(output['frame'], output['metric'+str(x)], color='b' )
sbplt.set_title( metric_name+' ('+metric_uom+')')
if mmin < 0:
sbplot_ymin=mmin+mmin/10
else:
sbplot_ymin=0
sbplt.set_ylim(ymin=sbplot_ymin)
if mmax > 0:
sbplot_ymax=mmax+mmax/10
else:
sbplot_ymax=0
sbplt.set_ylim(ymax=sbplot_ymax)
sbplt.set_xlim(xmax=plot_xmax)
sbplt.set_xlim(xmin=0)
text='min: '+str(mmin)+'\nmax: '+str(mmax)+'\nmean: '+str(mmean)+'\nmedian: '+str(mmedian)
sbplt.text( plot_xmax+10,sbplot_ymin,text)
sbplt.set_xlabel('frame')
sbplt.set_ylabel(metric_name)
plt.tight_layout()
fname = "toto.png"
# lgd = plt.legend(prop=fontP, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
mng = plt.get_current_fig_manager()
plt.savefig(fname, bbox_inches='tight')
plt.close()
mmin = np.amin(output['metric']);
mmin_index = np.argmin(output['metric']);
mmax = np.amax(output['metric']);
mmax_index = np.argmax(output['metric']);
mean = np.mean(output['metric']);
median = np.median(output['metric']);
# print ( ( (metric['min_limit'] > output['metric']).sum() / float(output['metric'].size) ) * 100 )
ret = { 'metric_count' : count,
'metric_buf' : output,
'metric_min' : mmin,
'metric_min_index' : mmin_index,
'metric_max' : mmax,
'metric_max_index' : mmax_index,
'metric_mean' : mean,
'metric_median' : median,
}
return(ret)
#
#
#
......@@ -182,24 +305,30 @@ def do_img_metrics(metric_def, metric_data, fname):
# print output['metric'].size
plt.scatter(output['frame'], output['metric'], color='b', alpha=0.33, s = 1 , label=metric_def['id'])
plt.plot([0, output['frame'][metric_data['metric_count']-1]],[ metric_def['min_limit'],metric_def['min_limit']], 'r-', lw=2, label='min limit') # Red straight line
if 'min_limit' in metric_def:
plt.plot([0, output['frame'][metric_data['metric_count']-1]],[ metric_def['min_limit'],metric_def['min_limit']], 'r-', lw=2, label='min limit') # Red straight line
plt.title('Physical throughput ('+metric_def['unit_of_meas']+')')
plt.title(metric_def['id'] +' ('+metric_def['unit_of_meas']+')')
plt.xlabel('frame')
plt.ylabel(metric_def['id'])
# Set graphic minimum Y axis
# -------------------------
if metric_data['metric_min'] == 0 :
plt.ylim(ymin=-metric_def['min_limit']/10)
if metric_data['metric_min'] < 0:
plt.ylim(ymin=metric_data['metric_min']+metric_data['metric_min']/10)
else :
plt.ylim(ymin=0)
y_axis_max = 0
if metric_data['metric_max'] > metric_def['min_limit']:
y_axis_max =metric_data['metric_max']+metric_data['metric_max']/10
if 'min_limit' in metric_def:
if metric_data['metric_max'] > metric_def['min_limit']:
y_axis_max =metric_data['metric_max']+metric_data['metric_max']/10
else:
y_axis_max =metric_def['min_limit']+metric_def['min_limit']/10
else:
y_axis_max =metric_def['min_limit']+metric_def['min_limit']/10
y_axis_max =metric_data['metric_max']+metric_data['metric_max']/10
plt.ylim(ymax=y_axis_max)
......@@ -219,9 +348,6 @@ def do_img_metrics(metric_def, metric_data, fname):
def do_extract_traffic_metrics(args):
print ""
print "do_extract_traffic_metrics ... "
fname = args['file']
# print(fname)
......@@ -325,7 +451,8 @@ def do_img_traffic(traffic_data, fname):
ax1.set_xlim(xmax=np.amax(output['interval_stop']))
text='min: '+str(traffic_data['bw_min'])+'\nmax: '+str(traffic_data['bw_max'])+'\nmean: '+str(traffic_data['bw_mean'])+'\nmedian: '+str(traffic_data['bw_median'])
ax1.text( np.amax(output['interval_stop'])+10,0,text)
ax1.set_xlabel('time (s)')
ax1.set_ylabel(' ')
ax2=plt.subplot(312)
plt.plot(output['interval_stop'], output['jitter'], color='b' )
......@@ -334,6 +461,8 @@ def do_img_traffic(traffic_data, fname):
ax2.set_ylim(ymin=-1)
text='min: '+str(traffic_data['jitter_min'])+'\nmax: '+str(traffic_data['jitter_max'])+'\nmean: '+str(traffic_data['jitter_mean'])+'\nmedian: '+str(traffic_data['jitter_median'])
ax2.text( np.amax(output['interval_stop'])+10,0,text)
ax2.set_xlabel('time (s)')
ax2.set_ylabel(' ')
ax3=plt.subplot(313)
plt.plot(output['interval_stop'], output['rate_lost'], color='b')
......@@ -342,10 +471,11 @@ def do_img_traffic(traffic_data, fname):
ax3.set_ylim(ymin=-1)
text='min: '+str(traffic_data['rl_min'])+'\nmax: '+str(traffic_data['rl_max'])+'\nmean: '+str(traffic_data['rl_mean'])+'\nmedian: '+str(traffic_data['rl_median'])
ax3.text( np.amax(output['interval_stop'])+10,0,text)
ax3.set_xlabel('time (s)')
ax3.set_ylabel(' ')
# plt.title('Physical throughput ('+metric_def['unit_of_meas']+')')
plt.xlabel('time (s)')
# plt.xlabel('time (s)')
# plt.ylabel(metric_def['id'])
# Set graphic minimum Y axis
......@@ -365,7 +495,7 @@ def do_img_traffic(traffic_data, fname):
plt.tight_layout()
lgd = plt.legend(prop=fontP, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# lgd = plt.legend(prop=fontP, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
mng = plt.get_current_fig_manager()
plt.savefig(fname, bbox_inches='tight')
plt.close()
......@@ -395,7 +525,7 @@ def check_cell_synchro(fname):
m = re.search('AUTOTEST Cell Sync \:', line)
if m :
print line
#print line
return 'CELL_SYNCH'
return 'CELL_NOT_SYNCH'
......@@ -407,7 +537,7 @@ def check_exec_seg_fault(fname):
for line in f:
m = re.search('Segmentation fault', line)
if m :
print line
#print line
return 'SEG_FAULT'
return 'NO_SEG_FAULT'
......
......@@ -54,6 +54,7 @@ BUILD_DOXYGEN=0
T_TRACER="False"
DISABLE_HARDWARE_DEPENDENCY="False"
CMAKE_BUILD_TYPE=""
UE_AUTOTEST_TRACE="False"
trap handle_ctrl_c INT
function print_help() {
......@@ -130,6 +131,8 @@ Options
Enables the T tracer.
--disable-hardware-dependency
Disable HW dependency during installation
--ue-autotest-trace
Enable specific traces for UE autotest framework
Usage (first build):
oaisim (eNB + UE): ./build_oai -I --oaisim -x --install-system-files
Eurecom EXMIMO + COTS UE : ./build_oai -I --eNB -x --install-system-files
......@@ -285,6 +288,10 @@ function main() {
echo_info "Disabling hardware dependency for compiling software"
DISABLE_HARDWARE_DEPENDENCY="True"
shift 1;;
--ue-autotest-trace)
UE_AUTOTEST_TRACE="True"
echo_info "Enabling autotest specific trace for UE"
shift 1;;
-h | --help)
print_help
exit 1;;
......@@ -457,6 +464,7 @@ function main() {
echo "set (DEADLINE_SCHEDULER \"${DEADLINE_SCHEDULER_FLAG_USER}\" )" >>$cmake_file
echo "set (CPU_AFFINITY \"${CPU_AFFINITY_FLAG_USER}\" )" >>$cmake_file
echo "set ( T_TRACER $T_TRACER )" >> $cmake_file
echo "set (UE_AUTOTEST_TRACE $UE_AUTOTEST_TRACE)" >> $cmake_file
echo 'include(${CMAKE_CURRENT_SOURCE_DIR}/../CMakeLists.txt)' >> $cmake_file
cd $DIR/$lte_build_dir/build
cmake ..
......
......@@ -972,6 +972,11 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
} else
n2 = n;
if(n2<256)
{
printf("phy_threegpplte_turbo_decoder8 : frame length < 256\n");
return 255;
}
for (iind=0; iind < 188 && f1f2mat[iind].nb_bits != n; iind++);
......
......@@ -476,6 +476,15 @@ int initial_sync(PHY_VARS_UE *ue, runmode_t mode)
//#endif
if (ue->UE_scan_carrier == 0) {
#if UE_AUTOTEST_TRACE
LOG_I(PHY,"[UE %d] AUTOTEST Cell Sync : frame = %d, rx_offset %d, freq_offset %d \n",
ue->Mod_id,
ue->proc.proc_rxtx[0].frame_rx,
ue->rx_offset,
ue->common_vars.freq_offset );
#endif
if (ue->mac_enabled==1) {
LOG_I(PHY,"[UE%d] Sending synch status to higher layers\n",ue->Mod_id);
//mac_resynch();
......
......@@ -3593,6 +3593,13 @@ int phy_procedures_UE_RX(PHY_VARS_UE *ue,UE_rxtx_proc_t *proc,uint8_t eNB_id,uin
LOG_D(PHY,"[UE %d] Calculating bitrate Frame %d: total_TBS = %d, total_TBS_last = %d, bitrate %f kbits\n",
ue->Mod_id,frame_rx,ue->total_TBS[eNB_id],
ue->total_TBS_last[eNB_id],(float) ue->bitrate[eNB_id]/1000.0);
#if UE_AUTOTEST_TRACE
if ((frame_rx % 100 == 0)) {
LOG_I(PHY,"[UE %d] AUTOTEST Metric : UE_DLSCH_BITRATE = %5.2f kbps (frame = %d) \n", ue->Mod_id, (float) ue->bitrate[eNB_id]/1000.0, frame_rx);
}
#endif
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment