Commit 2b36b1f3 authored by hardy's avatar hardy

redefine monitor conf file + paged graphs

parent 90e84c01
...@@ -734,8 +734,8 @@ class RANManagement(): ...@@ -734,8 +734,8 @@ class RANManagement():
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/enb_*.pcap .','\$',20) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/enb_*.pcap .','\$',20)
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/gnb_*.pcap .','\$',20) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/gnb_*.pcap .','\$',20)
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm -f enb.log.zip', '\$', 5) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm -f enb.log.zip', '\$', 5)
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor.png', '\$', 60) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor*.png', '\$', 60)
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *.pickle *.png', '\$', 5) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor*.png', '\$', 5)
mySSH.close() mySSH.close()
def AnalyzeLogFile_eNB(self, eNBlogFile, HTML): def AnalyzeLogFile_eNB(self, eNBlogFile, HTML):
......
...@@ -17,9 +17,15 @@ class StatMonitor(): ...@@ -17,9 +17,15 @@ class StatMonitor():
def __init__(self,cfg_file): def __init__(self,cfg_file):
with open(cfg_file,'r') as file: with open(cfg_file,'r') as file:
self.d = yaml.load(file) self.d = yaml.load(file)
for node in self.d: for node in self.d:#so far we have enb or gnb as nodes
for metric in self.d[node]: for metric_l1 in self.d[node]: #first level of metric keys
self.d[node][metric]=[] if metric_l1!="graph": #graph is a reserved word to configure graph paging, so it is disregarded
if self.d[node][metric_l1] is None:#first level is None -> create array
self.d[node][metric_l1]=[]
else: #first level is not None -> there is a second level -> create array
for metric_l2 in self.d[node][metric_l1]:
self.d[node][metric_l1][metric_l2]=[]
def process_gnb (self,node_type,output): def process_gnb (self,node_type,output):
...@@ -35,15 +41,12 @@ class StatMonitor(): ...@@ -35,15 +41,12 @@ class StatMonitor():
self.d[node_type]['ulsch_err'].append(int(result.group(3))) self.d[node_type]['ulsch_err'].append(int(result.group(3)))
percentage=float(result.group(2))/float(result.group(1)) percentage=float(result.group(2))/float(result.group(1))
self.d[node_type]['ulsch_err_perc_round_1'].append(percentage) self.d[node_type]['ulsch_err_perc_round_1'].append(percentage)
result=re.match(r'^.*\bL1 Tx processing thread 0\b:\s+([0-9\.]+) us;\s+([0-9]+);\s+([0-9\.]+) us;',tmp)
if result is not None: for k in self.d[node_type]['rt']:
self.d[node_type]['L1 Tx processing thread 0'].append(float(result.group(3))) result=re.match(rf'^.*\b{k}\b:\s+([0-9\.]+) us;\s+([0-9]+);\s+([0-9\.]+) us;',tmp)
result=re.match(r'^.*\bL1 Tx processing thread 1\b:\s+([0-9\.]+) us;\s+([0-9]+);\s+([0-9\.]+) us;',tmp)
if result is not None:
self.d[node_type]['L1 Tx processing thread 1'].append(float(result.group(3)))
result=re.match(r'^.*\bDLSCH encoding\b:\s+([0-9\.]+) us;\s+([0-9]+);\s+([0-9\.]+) us;',tmp)
if result is not None: if result is not None:
self.d[node_type]['DLSCH encoding'].append(float(result.group(3))) self.d[node_type]['rt'][k].append(float(result.group(3)))
def process_enb (self,node_type,output): def process_enb (self,node_type,output):
for line in output: for line in output:
...@@ -70,23 +73,37 @@ class StatMonitor(): ...@@ -70,23 +73,37 @@ class StatMonitor():
def graph(self,node_type): def graph(self,node_type):
for page in self.d[node_type]['graph']:#work out a set a graphs per page
col = 1 col = 1
figure, axis = plt.subplots(len(self.d[node_type]), col ,figsize=(10, 10)) figure, axis = plt.subplots(len(self.d[node_type]['graph'][page]), col ,figsize=(10, 10))
i=0 i=0
for metric in self.d[node_type]: for m in self.d[node_type]['graph'][page]:#metric may refer to 1 level or 2 levels
major_ticks = np.arange(0, len(self.d[node_type][metric])+1, 1) metric_path=m.split('.')
if len(metric_path)==1:#1 level
metric_l1=metric_path[0]
major_ticks = np.arange(0, len(self.d[node_type][metric_l1])+1, 1)
axis[i].set_xticks(major_ticks)
axis[i].set_xticklabels([])
axis[i].plot(self.d[node_type][metric_l1],marker='o')
axis[i].set_xlabel('time')
axis[i].set_ylabel(metric_l1)
axis[i].set_title(metric_l1)
else:#2 levels
metric_l1=metric_path[0]
metric_l2=metric_path[1]
major_ticks = np.arange(0, len(self.d[node_type][metric_l1][metric_l2])+1, 1)
axis[i].set_xticks(major_ticks) axis[i].set_xticks(major_ticks)
axis[i].set_xticklabels([]) axis[i].set_xticklabels([])
axis[i].plot(self.d[node_type][metric],marker='o') axis[i].plot(self.d[node_type][metric_l1][metric_l2],marker='o')
axis[i].set_xlabel('time') axis[i].set_xlabel('time')
axis[i].set_ylabel(metric) axis[i].set_ylabel(metric_l2)
axis[i].set_title(metric) axis[i].set_title(metric_l2)
i+=1 i+=1
plt.tight_layout() plt.tight_layout()
# Combine all the operations and display #save as png
plt.savefig(node_type+'_stats_monitor.png') plt.savefig(node_type+'_stats_monitor_'+page+'.png')
plt.show()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -3,12 +3,49 @@ enb : ...@@ -3,12 +3,49 @@ enb :
bler: bler:
mcsoff: mcsoff:
mcs: mcs:
graph:
page1:
PHR:
bler:
mcsoff:
mcs:
gnb : gnb :
dlsch_err: dlsch_err:
dlsch_err_perc_round_1: dlsch_err_perc_round_1:
ulsch_err: ulsch_err:
ulsch_err_perc_round_1: ulsch_err_perc_round_1:
rt :
feprx:
feptx_prec:
feptx_ofdm:
feptx_total:
L1 Tx processing thread 0: L1 Tx processing thread 0:
L1 Tx processing thread 1: L1 Tx processing thread 1:
DLSCH encoding: DLSCH encoding:
L1 Rx processing:
PUSCH inner-receiver:
PUSCH decoding:
DL & UL scheduling timing stats:
UL Indication:
graph :
page1:
dlsch_err:
dlsch_err_perc_round_1:
ulsch_err:
ulsch_err_perc_round_1:
page2:
rt.feprx:
rt.feptx_prec:
rt.feptx_ofdm:
rt.feptx_total:
page3:
rt.L1 Tx processing thread 0:
rt.L1 Tx processing thread 1:
rt.DLSCH encoding:
rt.L1 Rx processing:
page4:
rt.PUSCH inner-receiver:
rt.PUSCH decoding:
rt.DL & UL scheduling timing stats:
rt.UL Indication:
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment