Commit 37d1d680 authored by Remi Hardy's avatar Remi Hardy

integration_2022_wk04_a

MR !1353: L1 Tx threading update 
Removed active waiting loop which could block threads. Instead a separate thread to reorder the finished job is used.
 
MR !1404: \[CI\] rtmon per test
so far we had 1 default cfg file for real time processing check (html table).  
now we can have a specific file per test, the yaml config file has to be specified in the xml
parents b22db549 12bcebfb
......@@ -36,7 +36,7 @@ eNBs =
Nid_cell = 0;
N_RB_DL = 100;
Nid_cell_mbsfn = 0;
nb_antenna_ports = 1;
nb_antenna_ports = 2;
nb_antennas_tx = 2;
nb_antennas_rx = 2;
tx_gain = 90;
......
......@@ -232,7 +232,7 @@ L1s = (
{
num_cc = 1;
tr_n_preference = "local_mac";
pusch_proc_threads = 2;
pusch_proc_threads = 8;
prach_dtx_threshold = 120;
pucch0_dtx_threshold = 150;
}
......
......@@ -229,7 +229,7 @@ L1s = (
{
num_cc = 1;
tr_n_preference = "local_mac";
pusch_proc_threads = 2;
pusch_proc_threads = 8;
prach_dtx_threshold = 120;
}
);
......
......@@ -44,6 +44,7 @@ ENB_PROCESS_ASSERTION = -12
ENB_PROCESS_REALTIME_ISSUE = -13
ENB_PROCESS_NOLOGFILE_TO_ANALYZE = -14
ENB_PROCESS_SLAVE_RRU_NOT_SYNCED = -15
ENB_REAL_TIME_PROCESSING_ISSUE = -16
HSS_PROCESS_FAILED = -2
HSS_PROCESS_OK = +2
MME_PROCESS_FAILED = -3
......
#this is a configuration file
#used to build real time processing statistics
#for 5G NR phy test (gNB terminate)
Title : Processing Time (us)
ColNames :
- Metric
- Average
- Max
- Average vs Reference Deviation (Reference Value ; Acceptability Threshold)
Ref :
feprx : 120.0
feptx_prec : 8.0
feptx_ofdm : 50.0
feptx_total : 75.0
L1 Tx processing thread 0 : 300.0
L1 Tx processing thread 1 : 300.0
DLSCH encoding : 230.0
L1 Rx processing : 175.0
PUSCH inner-receiver : 100.0
PUSCH decoding : 180.0
DL & UL scheduling timing stats : 37.0
UL Indication : 38.0
Threshold :
feprx : 1.25
feptx_prec : 1.25
feptx_ofdm : 1.25
feptx_total : 1.25
L1 Tx processing thread 0 : 1.25
L1 Tx processing thread 1 : 1.25
DLSCH encoding : 1.25
L1 Rx processing : 1.25
PUSCH inner-receiver : 1.25
PUSCH decoding : 1.25
DL & UL scheduling timing stats : 1.25
UL Indication : 1.25
......@@ -155,6 +155,11 @@ def GetParametersFromXML(action):
elif action == 'Initialize_eNB':
RAN.eNB_Trace=test.findtext('eNB_Trace')
RAN.eNB_Stats=test.findtext('eNB_Stats')
datalog_rt_stats_file=test.findtext('rt_stats_cfg')
if datalog_rt_stats_file is None:
RAN.datalog_rt_stats_file='datalog_rt_stats.default.yaml'
else:
RAN.datalog_rt_stats_file=datalog_rt_stats_file
RAN.Initialize_eNB_args=test.findtext('Initialize_eNB_args')
eNB_instance=test.findtext('eNB_instance')
USRPIPAddress=test.findtext('USRP_IPAddress')
......
......@@ -92,6 +92,7 @@ class RANManagement():
self.epcPcapFile = ''
self.runtime_stats= ''
self.datalog_rt_stats={}
self.datalog_rt_stats_file='datalog_rt_stats.default.yaml'
self.eNB_Trace = '' #if 'yes', Tshark will be launched at initialization
self.eNB_Stats = '' #if 'yes', Statistics Monitor will be launched at initialization
self.USRPIPAddress = ''
......@@ -722,6 +723,9 @@ class RANManagement():
logStatus = self.AnalyzeLogFile_eNB(fileToAnalyze, HTML)
if (logStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', logStatus)
#display rt stats for gNB only
if len(self.datalog_rt_stats)!=0 and nodeB_prefix == 'g':
HTML.CreateHtmlDataLogTable(self.datalog_rt_stats)
self.prematureExit = True
self.eNBmbmsEnables[int(self.eNB_instance)] = False
return
......@@ -810,6 +814,7 @@ class RANManagement():
#NSA specific log markers
nsa_markers ={'SgNBReleaseRequestAcknowledge': [],'FAILURE': [], 'scgFailureInformationNR-r15': [], 'SgNBReleaseRequest': []}
nodeB_prefix_found = False
RealTimeProcessingIssue = False
line_cnt=0 #log file line counter
for line in enb_log_file.readlines():
......@@ -1011,7 +1016,7 @@ class RANManagement():
#the following part takes the *_stats.log files as source (not the stdout log file)
#the datalog config file has to be loaded
datalog_rt_stats_file='datalog_rt_stats.yaml'
datalog_rt_stats_file=self.datalog_rt_stats_file
if (os.path.isfile(datalog_rt_stats_file)):
yaml_file=datalog_rt_stats_file
elif (os.path.isfile('ci-scripts/'+datalog_rt_stats_file)):
......@@ -1127,8 +1132,7 @@ class RANManagement():
#check if there is a fail => will render the test as failed
for k in datalog_rt_stats['Data']:
if float(datalog_rt_stats['Data'][k][2])> datalog_rt_stats['Threshold'][k]: #condition for fail : avg/ref is greater than the fixed threshold
#setting prematureExit is ok although not the best option
self.prematureExit=False #temp for debug : do not stop the test if RT stats are excedeed
RealTimeProcessingIssue = True
else:
statMsg = 'No real time stats found in the log file\n'
logging.debug('No real time stats found in the log file')
......@@ -1162,7 +1166,10 @@ class RANManagement():
logging.debug(statMsg)
htmleNBFailureMsg += htmlMsg
if RealTimeProcessingIssue:
logging.debug('\u001B[1;37;41m ' + nodeB_prefix + 'NB ended with real time processing issue! \u001B[0m')
htmleNBFailureMsg += 'Fail due to real time processing issue\n'
global_status = CONST.ENB_REAL_TIME_PROCESSING_ISSUE
if uciStatMsgCount > 0:
statMsg = nodeB_prefix + 'NB showed ' + str(uciStatMsgCount) + ' "uci->stat" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
......
......@@ -79,6 +79,7 @@
<eNB_serverId>1</eNB_serverId>
<air_interface>nr</air_interface>
<eNB_Stats>yes</eNB_Stats>
<rt_stats_cfg>datalog_rt_stats.2x2.yaml</rt_stats_cfg>
<USRP_IPAddress>192.168.18.240</USRP_IPAddress>
</testCase>
......
......@@ -99,6 +99,7 @@
<eNB_serverId>1</eNB_serverId>
<air_interface>nr</air_interface>
<eNB_Stats>yes</eNB_Stats>
<rt_stats_cfg>datalog_rt_stats.2x2.yaml</rt_stats_cfg>
<USRP_IPAddress>192.168.18.240</USRP_IPAddress>
</testCase>
......
......@@ -64,6 +64,7 @@
<air_interface>nr</air_interface>
<eNB_Trace>yes</eNB_Trace>
<eNB_Stats>yes</eNB_Stats>
<rt_stats_cfg>datalog_rt_stats.2x2.yaml</rt_stats_cfg>
<USRP_IPAddress>192.168.18.240</USRP_IPAddress>
</testCase>
......
......@@ -64,6 +64,7 @@
<air_interface>nr</air_interface>
<eNB_Trace>yes</eNB_Trace>
<eNB_Stats>yes</eNB_Stats>
<rt_stats_cfg>datalog_rt_stats.2x2.yaml</rt_stats_cfg>
<USRP_IPAddress>192.168.18.240</USRP_IPAddress>
</testCase>
......
......@@ -71,6 +71,7 @@
<air_interface>nr</air_interface>
<eNB_Trace>yes</eNB_Trace>
<eNB_Stats>yes</eNB_Stats>
<rt_stats_cfg>datalog_rt_stats.2x2.yaml</rt_stats_cfg>
<USRP_IPAddress>192.168.18.240</USRP_IPAddress>
</testCase>
......
......@@ -73,6 +73,7 @@
<air_interface>nr</air_interface>
<eNB_Trace>yes</eNB_Trace>
<eNB_Stats>yes</eNB_Stats>
<rt_stats_cfg>datalog_rt_stats.2x2.yaml</rt_stats_cfg>
<USRP_IPAddress>192.168.18.240</USRP_IPAddress>
</testCase>
......
......@@ -24,6 +24,7 @@
# authors Laurent Thomas
#
#######################################
if [ ! -f /etc/os-release ]; then
echo "No /etc/os-release file found. You're likely on an unsupported distro."
exit 1
......@@ -472,7 +473,7 @@ if ! check_supported_distribution; then
echo_error "Your distribution $(get_distribution_release) is not supported by oai !"
exit 1
fi
set_openair_env
echo_info "Installing packages"
check_install_ubuntu_packages
......
......@@ -254,7 +254,7 @@ configmodule_interface_t *load_configmodule(int argc,
cfgptr = calloc(sizeof(configmodule_interface_t),1);
/* argv_info is used to memorize command line options which have been recognized */
/* and to detect unrecognized command line options which might have been specified */
cfgptr->argv_info = calloc(sizeof(int32_t), argc);
cfgptr->argv_info = calloc(sizeof(int32_t), argc+10);
/* argv[0] is the exec name, always Ok */
cfgptr->argv_info[0] |= CONFIG_CMDLINEOPT_PROCESSED;
......
```mermaid
flowchart TB
A[ru_thread] --> RFin>block rx_rf] --> feprx
feprx --> half-slot --> end_feprx
feprx --> second-thread -- block_end_feprx --> end_feprx>feprx]
end_feprx --> rx_nr_prach_ru
rx_nr_prach_ru -- block_queue_singleton --> resp_L1>resp L1]
resp_L1 -- async launch --> rx_func
resp_L1 -- immediate return --> RFin
subgraph rxfunc
rx_func_implem[rx_func]
subgraph rxfuncbeg
handle_nr_slot_ind
--> rnti_to_remove-mgmt
--> L1_nr_prach_procedures
--> apply_nr_rotation_ul
end
subgraph phy_procedures_gNB_uespec_RX
fill_ul_rb_mask
--> pucch(decode each gNB->pucch)
-->nr_fill_ul_indication
--> nr_ulsch_procedures
--> nr_ulsch_decoding
--> segInParallel[[all segments decode in parallel]]
--> barrier_end_of_ulsch_decoding
end
subgraph NR_UL_indication
handle_nr_rach
--> handle_nr_uci
--> handle_nr_ulsch
subgraph gNB_dlsch_ulsch_scheduler
run_pdcp
--> nr_rrc_trigger
--> schedule_xxxx
end
handle_nr_ulsch --> gNB_dlsch_ulsch_scheduler
subgraph NR_Schedule_response
L1_tx_free3>L1_tx_free]
--> handle_nr_nfapi_xxx_pdu
--> sendTxFilled((L1_tx_filled))
--> nr_fill_ul_xxx
--> nr_fill_prach
end
gNB_dlsch_ulsch_scheduler --> NR_Schedule_response
end
rx_func_implem --> rxfuncbeg
rxfuncbeg --> phy_procedures_gNB_uespec_RX
phy_procedures_gNB_uespec_RX --> NR_UL_indication
-- block_queue_block_PNF_monolithic --> L1_tx_free2>L1 tx filled]
-- async launch --> tx_func
L1_tx_free2 -- send_msg --> rsp((resp_L1))
end
rx_func --> rxfunc
subgraph tx_func
direction LR
subgraph phy_procedures_gNB_TX
dcitop[nr_generate dci top]
--> nr_generate_csi_rs
--> apply_nr_rotation
-- send_msg --> end_tx_func((L1_tx_out))
end
subgraph tx_reorder_thread
L1_tx_out>L1_tx_out]
--> reorder{re order} --> reorder
reorder --> ru_tx_func
reorder --> L1_tx_free((L1_tx_free))
ru_tx_func --> feptx_prec
--> feptx_ofdm
end
end
```
......@@ -17,6 +17,70 @@ body {
</style>
```mermaid
flowchart TB
A[ru_thread] --> RFin>block rx_rf] --> feprx
feprx --> half-slot --> end_feprx
feprx --> second-thread -- block_end_feprx --> end_feprx>feprx]
end_feprx --> rx_nr_prach_ru
rx_nr_prach_ru -- block_queue --> resp_L1>resp L1]
resp_L1 -- async launch --> rx_func
resp_L1 -- immediate return --> RFin
subgraph rxfunc
rx_func_implem[rx_func]
subgraph rxfuncbeg
handle_nr_slot_ind
--> rnti_to_remove-mgmt
--> L1_nr_prach_procedures
--> apply_nr_rotation_ul
end
subgraph phy_procedures_gNB_uespec_RX
fill_ul_rb_mask
--> pucch(decode each gNB->pucch)
-->nr_fill_ul_indication
--> nr_ulsch_procedures
--> nr_ulsch_decoding
--> segInParallel[[all segments decode in parallel]]
--> barrier_end_of_ulsch_decoding
end
subgraph NR_UL_indication
handle_nr_rach
--> handle_nr_uci
--> handle_nr_ulsch
--> gNB_dlsch_ulsch_scheduler
--> NR_Schedule_response
end
rx_func_implem --> rxfuncbeg
rxfuncbeg --> phy_procedures_gNB_uespec_RX
phy_procedures_gNB_uespec_RX --> NR_UL_indication
-- block_queue --> L1_tx_free2>L1 tx free]
-- async launch --> tx_func
L1_tx_free2 -- send_msg --> rsp((resp_L1))
end
rx_func --> rxfunc
subgraph tx
direction LR
subgraph tx_func2
phy_procedures_gNB_TX
--> dcitop[nr_generate dci top]
--> nr_generate_csi_rs
--> apply_nr_rotation
-- send_msg --> end_tx_func((L1_tx_out))
end
subgraph tx_reorder_thread
L1_tx_out>L1_tx_out]
--> reorder{re order} --> reorder
reorder --> ru_tx_func
reorder --> L1_tx_free((L1_tx_free))
ru_tx_func --> feptx_prec
--> feptx_ofdm
end
tx_func2 --> tx_reorder_thread
end
```
This tuto for 5G gNB design, with Open Cells main
{: .text-center}
......
......@@ -87,7 +87,6 @@
#include <openair1/PHY/NR_TRANSPORT/nr_ulsch.h>
#include <openair1/PHY/NR_TRANSPORT/nr_dlsch.h>
#include <PHY/NR_ESTIMATION/nr_ul_estimation.h>
//#define DEBUG_THREADS 1
//#define USRP_DEBUG 1
// Fix per CC openair rf/if device update
......@@ -114,7 +113,6 @@ time_stats_t softmodem_stats_rx_sf; // total rx time
void tx_func(void *param) {
processingData_L1tx_t *info = (processingData_L1tx_t *) param;
PHY_VARS_gNB *gNB = info->gNB;
int frame_tx = info->frame;
int slot_tx = info->slot;
......@@ -122,35 +120,9 @@ void tx_func(void *param) {
frame_tx,
slot_tx,
1);
info->slot = -1;
//if ((frame_tx&127) == 0) dump_pdsch_stats(fd,gNB);
// If the later of the 2 L1 tx thread finishes first,
// we wait for the earlier one to finish and start the RU thread
// to avoid realtime issues with USRP
// Start RU TX processing.
notifiedFIFO_elt_t *res;
res = pullTpool(gNB->resp_RU_tx, gNB->threadPool);
processingData_RU_t *syncMsg = (processingData_RU_t *)NotifiedFifoData(res);
LOG_D(PHY,"waiting for previous tx to finish, next slot %d,%d\n",syncMsg->next_slot,slot_tx);
while (syncMsg->next_slot != slot_tx) {
pushNotifiedFIFO(gNB->resp_RU_tx, res);
res = pullTpool(gNB->resp_RU_tx, gNB->threadPool);
syncMsg = (processingData_RU_t *)NotifiedFifoData(res);
}
LOG_D(PHY,"previous tx finished, next slot %d,%d\n",syncMsg->next_slot,slot_tx);
syncMsg->frame_tx = frame_tx;
syncMsg->slot_tx = slot_tx;
syncMsg->next_slot = get_next_downlink_slot(gNB, &gNB->gNB_config, frame_tx, slot_tx);
syncMsg->timestamp_tx = info->timestamp_tx;
syncMsg->ru = gNB->RU_list[0];
res->key = slot_tx;
pushTpool(gNB->threadPool, res);
}
void rx_func(void *param) {
processingData_L1_t *info = (processingData_L1_t *) param;
PHY_VARS_gNB *gNB = info->gNB;
int frame_rx = info->frame_rx;
......@@ -275,15 +247,12 @@ void rx_func(void *param) {
if (tx_slot_type == NR_DOWNLINK_SLOT || tx_slot_type == NR_MIXED_SLOT) {
notifiedFIFO_elt_t *res;
res = pullTpool(gNB->resp_L1_tx, gNB->threadPool);
processingData_L1tx_t *syncMsg = (processingData_L1tx_t *)NotifiedFifoData(res);
while (syncMsg->slot != slot_tx) {
pushNotifiedFIFO(gNB->resp_L1_tx, res);
res = pullTpool(gNB->resp_L1_tx, gNB->threadPool);
syncMsg = (processingData_L1tx_t *)NotifiedFifoData(res);
}
processingData_L1tx_t *syncMsg;
// Its a FIFO so it maitains the order in which the MAC fills the messages
// so no need for checking for right slot
res = pullTpool(gNB->L1_tx_filled, gNB->threadPool);
syncMsg = (processingData_L1tx_t *)NotifiedFifoData(res);
syncMsg->gNB = gNB;
AssertFatal(syncMsg->slot == slot_tx, "Thread message slot and logical slot number do not match\n");
syncMsg->timestamp_tx = info->timestamp_tx;
res->key = slot_tx;
pushTpool(gNB->threadPool, res);
......@@ -331,8 +300,8 @@ void rx_func(void *param) {
}
static void dump_L1_meas_stats(PHY_VARS_gNB *gNB, RU_t *ru, char *output) {
int stroff = 0;
stroff += print_meas_log(gNB->phy_proc_tx_0, "L1 Tx processing thread 0", NULL, NULL, output);
stroff += print_meas_log(gNB->phy_proc_tx_1, "L1 Tx processing thread 1", NULL, NULL, output+stroff);
stroff += print_meas_log(gNB->phy_proc_tx[0], "L1 Tx processing thread 0", NULL, NULL, output);
//stroff += print_meas_log(gNB->phy_proc_tx[1], "L1 Tx processing thread 1", NULL, NULL, output+stroff);
stroff += print_meas_log(&gNB->dlsch_encoding_stats, "DLSCH encoding", NULL, NULL, output+stroff);
stroff += print_meas_log(&gNB->phy_proc_rx, "L1 Rx processing", NULL, NULL, output+stroff);
stroff += print_meas_log(&gNB->ul_indication_stats, "UL Indication", NULL, NULL, output+stroff);
......@@ -367,8 +336,8 @@ void *nrL1_stats_thread(void *param) {
fd=fopen("nrL1_stats.log","w");
AssertFatal(fd!=NULL,"Cannot open nrL1_stats.log\n");
reset_meas(gNB->phy_proc_tx_0);
reset_meas(gNB->phy_proc_tx_1);
reset_meas(gNB->phy_proc_tx[0]);
//reset_meas(gNB->phy_proc_tx[1]);
reset_meas(&gNB->dlsch_encoding_stats);
reset_meas(&gNB->phy_proc_rx);
reset_meas(&gNB->ul_indication_stats);
......@@ -389,6 +358,52 @@ void *nrL1_stats_thread(void *param) {
return(NULL);
}
// This thread reads the finished L1 tx jobs from threaPool
// and pushes RU tx thread in the right order. It works only
// two parallel L1 tx threads.
void *tx_reorder_thread(void* param) {
PHY_VARS_gNB *gNB = (PHY_VARS_gNB *)param;
notifiedFIFO_elt_t *resL1Reserve = NULL;
resL1Reserve=pullTpool(gNB->L1_tx_out, gNB->threadPool);
int next_tx_slot=((processingData_L1tx_t *)NotifiedFifoData(resL1Reserve))->slot;
while (!oai_exit) {
notifiedFIFO_elt_t *resL1;
if (resL1Reserve) {
resL1=resL1Reserve;
if (((processingData_L1tx_t *)NotifiedFifoData(resL1))->slot != next_tx_slot) {
LOG_E(PHY,"order mistake");
resL1Reserve=NULL;
resL1 = pullTpool(gNB->L1_tx_out, gNB->threadPool);
}
} else {
resL1 = pullTpool(gNB->L1_tx_out, gNB->threadPool);
if (((processingData_L1tx_t *)NotifiedFifoData(resL1))->slot != next_tx_slot) {
if (resL1Reserve)
LOG_E(PHY,"error, have a stored packet, then a second one\n");
resL1Reserve=resL1;
resL1 = pullTpool(gNB->L1_tx_out, gNB->threadPool);
if (((processingData_L1tx_t *)NotifiedFifoData(resL1))->slot != next_tx_slot)
LOG_E(PHY,"error, pull two msg, none is good\n");
}
}
processingData_L1tx_t *syncMsgL1= (processingData_L1tx_t *)NotifiedFifoData(resL1);
processingData_RU_t syncMsgRU;
syncMsgRU.frame_tx = syncMsgL1->frame;
syncMsgRU.slot_tx = syncMsgL1->slot;
syncMsgRU.timestamp_tx = syncMsgL1->timestamp_tx;
syncMsgRU.ru = gNB->RU_list[0];
next_tx_slot = get_next_downlink_slot(gNB, &gNB->gNB_config, syncMsgRU.frame_tx, syncMsgRU.slot_tx);
pushNotifiedFIFO(gNB->L1_tx_free, resL1);
if (resL1==resL1Reserve)
resL1Reserve=NULL;
ru_tx_func((void*)&syncMsgRU);
}
return(NULL);
}
void init_gNB_Tpool(int inst) {
PHY_VARS_gNB *gNB;
gNB = RC.gNB[inst];
......@@ -420,40 +435,29 @@ void init_gNB_Tpool(int inst) {
pushNotifiedFIFO(gNB->resp_L1,msg); // to unblock the process in the beginning
// L1 TX result FIFO
gNB->resp_L1_tx = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->resp_L1_tx);
gNB->L1_tx_free = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
gNB->L1_tx_filled = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
gNB->L1_tx_out = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->L1_tx_free);
initNotifiedFIFO(gNB->L1_tx_filled);
initNotifiedFIFO(gNB->L1_tx_out);
// we create 2 threads for L1 tx processing
notifiedFIFO_elt_t *msgL1Tx = newNotifiedFIFO_elt(sizeof(processingData_L1tx_t),0,gNB->resp_L1_tx,tx_func);
processingData_L1tx_t *msgDataTx = (processingData_L1tx_t *)NotifiedFifoData(msgL1Tx);
init_DLSCH_struct(gNB, msgDataTx);
msgDataTx->slot = -1;
memset(msgDataTx->ssb, 0, 64*sizeof(NR_gNB_SSB_t));
reset_meas(&msgDataTx->phy_proc_tx);
gNB->phy_proc_tx_0 = &msgDataTx->phy_proc_tx;
pushNotifiedFIFO(gNB->resp_L1_tx,msgL1Tx); // to unblock the process in the beginning
msgL1Tx = newNotifiedFIFO_elt(sizeof(processingData_L1tx_t),0,gNB->resp_L1_tx,tx_func);
msgDataTx = (processingData_L1tx_t *)NotifiedFifoData(msgL1Tx);
init_DLSCH_struct(gNB, msgDataTx);
msgDataTx->slot = -1;
memset(msgDataTx->ssb, 0, 64*sizeof(NR_gNB_SSB_t));
reset_meas(&msgDataTx->phy_proc_tx);
gNB->phy_proc_tx_1 = &msgDataTx->phy_proc_tx;
pushNotifiedFIFO(gNB->resp_L1_tx,msgL1Tx); // to unblock the process in the beginning
// RU TX result FIFO
gNB->resp_RU_tx = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->resp_RU_tx);
notifiedFIFO_elt_t *msgRUTx = newNotifiedFIFO_elt(sizeof(processingData_RU_t),0,gNB->resp_RU_tx,ru_tx_func);
processingData_RU_t *msgData = (processingData_RU_t*)msgRUTx->msgData;
int first_tx_slot = sf_ahead*gNB->frame_parms.slots_per_subframe;
msgData->next_slot = get_next_downlink_slot(gNB, &gNB->gNB_config, 0, first_tx_slot-1);
pushNotifiedFIFO(gNB->resp_RU_tx,msgRUTx); // to unblock the process in the beginning
if (!get_softmodem_params()->emulate_l1) {
threadCreate(&proc->L1_stats_thread,nrL1_stats_thread,(void*)gNB,"L1_stats",-1,OAI_PRIORITY_RT_LOW);
for (int i=0; i < 1; i++) {
notifiedFIFO_elt_t *msgL1Tx = newNotifiedFIFO_elt(sizeof(processingData_L1tx_t),0,gNB->L1_tx_out,tx_func);
processingData_L1tx_t *msgDataTx = (processingData_L1tx_t *)NotifiedFifoData(msgL1Tx);
init_DLSCH_struct(gNB, msgDataTx);
memset(msgDataTx->ssb, 0, 64*sizeof(NR_gNB_SSB_t));
reset_meas(&msgDataTx->phy_proc_tx);
gNB->phy_proc_tx[i] = &msgDataTx->phy_proc_tx;
pushNotifiedFIFO(gNB->L1_tx_free,msgL1Tx); // to unblock the process in the beginning
}
if (!get_softmodem_params()->emulate_l1)
threadCreate(&proc->L1_stats_thread,nrL1_stats_thread,(void*)gNB,"L1_stats",-1,OAI_PRIORITY_RT_LOW);
threadCreate(&proc->pthread_tx_reorder, tx_reorder_thread, (void *)gNB, "thread_tx_reorder", -1, OAI_PRIORITY_RT_MAX);
}
......
......@@ -1471,11 +1471,9 @@ void *ru_thread( void *param ) {
res = pullNotifiedFIFO(gNB->resp_L1);
delNotifiedFIFO_elt(res);
res = pullNotifiedFIFO(gNB->resp_L1_tx);
res = pullNotifiedFIFO(gNB->L1_tx_free);
delNotifiedFIFO_elt(res);
res = pullNotifiedFIFO(gNB->resp_L1_tx);
delNotifiedFIFO_elt(res);
res = pullNotifiedFIFO(gNB->resp_RU_tx);
res = pullNotifiedFIFO(gNB->L1_tx_free);
delNotifiedFIFO_elt(res);
ru_thread_status = 0;
......
......@@ -1101,41 +1101,10 @@ void pnf_phy_deallocate_p7_vendor_ext(nfapi_p7_message_header_t *header) {
notifiedFIFO_elt_t *l1tx_message_extract(PHY_VARS_gNB *gNB, int frame, int slot) {
notifiedFIFO_elt_t *res;
notifiedFIFO_elt_t *freeRes = NULL;
// check first message
res = pullTpool(gNB->resp_L1_tx, gNB->threadPool);
processingData_L1tx_t *msgTx = (processingData_L1tx_t *)NotifiedFifoData(res);
if (msgTx->slot == slot) {
return res;
}
if (msgTx->slot == -1) {
freeRes = res;
}
// check second message
pushNotifiedFIFO(gNB->resp_L1_tx,res);
res = pullTpool(gNB->resp_L1_tx, gNB->threadPool);
msgTx = (processingData_L1tx_t *)NotifiedFifoData(res);
if (msgTx->slot == slot) {
return res;
}
if (msgTx->slot == -1) {
freeRes = res;
}
if (freeRes) {
msgTx = (processingData_L1tx_t *)NotifiedFifoData(res);
msgTx->num_pdsch_slot=0;
msgTx->pdcch_pdu.pdcch_pdu_rel15.numDlDci = 0;
msgTx->ul_pdcch_pdu.pdcch_pdu.pdcch_pdu_rel15.numDlDci = 0;
msgTx->slot = slot;
msgTx->frame = frame;
return freeRes;
}
pushNotifiedFIFO(gNB->resp_L1_tx,res);
AssertFatal(1==0, "It means both L1 Tx messages are still waiting to be processed. This happens when L1 Tx processing is too slow. Message slot %d, scheduled slot %d\n",
msgTx->slot, slot);
//TODO: This needs to be reworked for nfapi to work
res = pullTpool(gNB->L1_tx_free, gNB->threadPool);
return res;
}
int pnf_phy_ul_dci_req(gNB_L1_rxtx_proc_t *proc, nfapi_pnf_p7_config_t *pnf_p7, nfapi_nr_ul_dci_request_t *req) {
......@@ -1161,7 +1130,7 @@ int pnf_phy_ul_dci_req(gNB_L1_rxtx_proc_t *proc, nfapi_pnf_p7_config_t *pnf_p7,
}
}
pushNotifiedFIFO(gNB->resp_L1_tx,res);
pushNotifiedFIFO(gNB->L1_tx_filled,res);
return 0;
}
......@@ -1269,7 +1238,7 @@ int pnf_phy_dl_tti_req(gNB_L1_rxtx_proc_t *proc, nfapi_pnf_p7_config_t *pnf_p7,
else {
NFAPI_TRACE(NFAPI_TRACE_ERROR, "%s() UNKNOWN:%d\n", __FUNCTION__, dl_tti_pdu_list[i].PDUType);
}
pushNotifiedFIFO(gNB->resp_L1_tx,res);
pushNotifiedFIFO(gNB->L1_tx_filled,res);
}
if(req->vendor_extension)
......
......@@ -353,6 +353,8 @@ typedef struct {
} fapi_nr_ul_config_request_pdu_t;
typedef struct {
//uint16_t sfn;
//uint16_t slot;
uint16_t sfn;
uint16_t slot;
uint8_t number_pdus;
......
......@@ -294,8 +294,8 @@ int nr_dlsch_encoding(PHY_VARS_gNB *gNB,
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_DLSCH_ENCODING, VCD_FUNCTION_IN);
uint32_t A = rel15->TBSize[0]<<3;
if ( dlsch->rnti != SI_RNTI )
trace_NRpdu(DIRECTION_DOWNLINK, a, rel15->TBSize[0], 0, WS_C_RNTI, dlsch->rnti, frame, slot,0, 0);
if ( rel15->rnti != SI_RNTI)
trace_NRpdu(DIRECTION_DOWNLINK, a, rel15->TBSize[0], 0, WS_C_RNTI, rel15->rnti, frame, slot,0, 0);
NR_gNB_SCH_STATS_t *stats=NULL;
int first_free=-1;
......
......@@ -33,7 +33,6 @@
#define ScaleZone 4
#define localBuff(NaMe,SiZe) float NaMe[SiZe]; memset(NaMe,0,sizeof(NaMe));
int otg_enabled;
const FL_COLOR rx_antenna_colors[4] = {FL_RED,FL_BLUE,FL_GREEN,FL_YELLOW};
const FL_COLOR water_colors[4] = {FL_BLUE,FL_GREEN,FL_YELLOW,FL_RED};
......
......@@ -594,6 +594,8 @@ typedef struct gNB_L1_proc_t_s {
pthread_t L1_stats_thread;
/// pthread structure for printing time meas
pthread_t process_stats_thread;
/// pthread structure for reordering L1 tx thread messages
pthread_t pthread_tx_reorder;
/// flag to indicate first RX acquisition
int first_rx;
/// flag to indicate first TX transmission
......@@ -844,8 +846,7 @@ typedef struct PHY_VARS_gNB_s {
/*
time_stats_t phy_proc;
*/
time_stats_t *phy_proc_tx_0;
time_stats_t *phy_proc_tx_1;
time_stats_t *phy_proc_tx[2];
time_stats_t phy_proc_rx;
time_stats_t rx_prach;
/*
......@@ -886,7 +887,9 @@ typedef struct PHY_VARS_gNB_s {
*/
notifiedFIFO_t *respDecode;
notifiedFIFO_t *resp_L1;
notifiedFIFO_t *resp_L1_tx;
notifiedFIFO_t *L1_tx_free;
notifiedFIFO_t *L1_tx_filled;
notifiedFIFO_t *L1_tx_out;
notifiedFIFO_t *resp_RU_tx;
tpool_t *threadPool;
int nbDecode;
......
......@@ -148,6 +148,9 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
AssertFatal(RC.gNB[Mod_id]!=NULL,"RC.gNB[%d] is null\n",Mod_id);
gNB = RC.gNB[Mod_id];
nfapi_nr_config_request_scf_t *cfg = &gNB->gNB_config;
int slot_type = nr_slot_select(cfg,frame,slot);
uint8_t number_dl_pdu = (DL_req==NULL) ? 0 : DL_req->dl_tti_request_body.nPDUs;
uint8_t number_ul_dci_pdu = (UL_dci_req==NULL) ? 0 : UL_dci_req->numPdus;
......@@ -155,62 +158,59 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
uint8_t number_tx_data_pdu = (TX_req == NULL) ? 0 : TX_req->Number_of_PDUs;
if (NFAPI_MODE == NFAPI_MONOLITHIC){
notifiedFIFO_elt_t *res;
res = pullTpool(gNB->resp_L1_tx, gNB->threadPool);
processingData_L1tx_t *msgTx = (processingData_L1tx_t *)NotifiedFifoData(res);
if (DL_req != NULL && TX_req!=NULL && (number_dl_pdu > 0 || number_ul_dci_pdu > 0 || number_ul_tti_pdu > 0))
LOG_D(PHY,"NFAPI: Sched_INFO:SFN/SLOT:%04d/%d DL_req:SFN/SLO:%04d/%d:dl_pdu:%d tx_req:SFN/SLOT:%04d/%d:pdus:%d;ul_dci %d ul_tti %d\n",
frame,slot,
DL_req->SFN,DL_req->Slot,number_dl_pdu,
TX_req->SFN,TX_req->Slot,TX_req->Number_of_PDUs,
number_ul_dci_pdu,number_ul_tti_pdu);
int pdcch_received=0;
msgTx->num_pdsch_slot=0;
msgTx->pdcch_pdu.pdcch_pdu_rel15.numDlDci = 0;
msgTx->ul_pdcch_pdu.pdcch_pdu.pdcch_pdu_rel15.numDlDci = 0;
msgTx->slot = slot;
msgTx->frame = frame;
for (int i=0;i<number_dl_pdu;i++) {
nfapi_nr_dl_tti_request_pdu_t *dl_tti_pdu = &DL_req->dl_tti_request_body.dl_tti_pdu_list[i];
LOG_D(PHY,"NFAPI: dl_pdu %d : type %d\n",i,dl_tti_pdu->PDUType);
switch (dl_tti_pdu->PDUType) {
case NFAPI_NR_DL_TTI_SSB_PDU_TYPE:
handle_nr_nfapi_ssb_pdu(msgTx,frame,slot,
dl_tti_pdu);
break;
case NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE:
AssertFatal(pdcch_received == 0, "pdcch_received is not 0, we can only handle one PDCCH PDU per slot\n");
msgTx->pdcch_pdu = dl_tti_pdu->pdcch_pdu;
pdcch_received = 1;
break;
case NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE:
LOG_D(PHY,"frame %d, slot %d, Got NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE for %d.%d\n",frame,slot,DL_req->SFN,DL_req->Slot);
handle_nfapi_nr_csirs_pdu(msgTx,frame,slot,
&dl_tti_pdu->csi_rs_pdu);
break;
case NFAPI_NR_DL_TTI_PDSCH_PDU_TYPE:
LOG_D(PHY,"frame %d, slot %d, Got NFAPI_NR_DL_TTI_PDSCH_PDU_TYPE for %d.%d\n",frame,slot,DL_req->SFN,DL_req->Slot);
nfapi_nr_dl_tti_pdsch_pdu_rel15_t *pdsch_pdu_rel15 = &dl_tti_pdu->pdsch_pdu.pdsch_pdu_rel15;
uint16_t pduIndex = pdsch_pdu_rel15->pduIndex;
AssertFatal(TX_req->pdu_list[pduIndex].num_TLV == 1, "TX_req->pdu_list[%d].num_TLV %d != 1\n",
pduIndex,TX_req->pdu_list[pduIndex].num_TLV);
uint8_t *sdu = (uint8_t *)TX_req->pdu_list[pduIndex].TLVs[0].value.direct;
AssertFatal(msgTx->num_pdsch_slot < gNB->number_of_nr_dlsch_max,"Number of PDSCH PDUs %d exceeded the limit %d\n",
msgTx->num_pdsch_slot,gNB->number_of_nr_dlsch_max);
handle_nr_nfapi_pdsch_pdu(msgTx,&dl_tti_pdu->pdsch_pdu, sdu);
if (slot_type == NR_DOWNLINK_SLOT || slot_type == NR_MIXED_SLOT) {
notifiedFIFO_elt_t *res;
res = pullTpool(gNB->L1_tx_free, gNB->threadPool);
processingData_L1tx_t *msgTx = (processingData_L1tx_t *)NotifiedFifoData(res);
int pdcch_received=0;
msgTx->num_pdsch_slot=0;
msgTx->pdcch_pdu.pdcch_pdu_rel15.numDlDci = 0;
msgTx->ul_pdcch_pdu.pdcch_pdu.pdcch_pdu_rel15.numDlDci = 0;
msgTx->slot = slot;
msgTx->frame = frame;
for (int i=0;i<number_dl_pdu;i++) {
nfapi_nr_dl_tti_request_pdu_t *dl_tti_pdu = &DL_req->dl_tti_request_body.dl_tti_pdu_list[i];
LOG_D(PHY,"NFAPI: dl_pdu %d : type %d\n",i,dl_tti_pdu->PDUType);
switch (dl_tti_pdu->PDUType) {
case NFAPI_NR_DL_TTI_SSB_PDU_TYPE:
handle_nr_nfapi_ssb_pdu(msgTx,frame,slot,
dl_tti_pdu);
break;
case NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE:
AssertFatal(pdcch_received == 0, "pdcch_received is not 0, we can only handle one PDCCH PDU per slot\n");
msgTx->pdcch_pdu = dl_tti_pdu->pdcch_pdu;
pdcch_received = 1;
break;
case NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE:
LOG_D(PHY,"frame %d, slot %d, Got NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE for %d.%d\n",frame,slot,DL_req->SFN,DL_req->Slot);
handle_nfapi_nr_csirs_pdu(msgTx,frame,slot,
&dl_tti_pdu->csi_rs_pdu);
break;
case NFAPI_NR_DL_TTI_PDSCH_PDU_TYPE:
LOG_D(PHY,"frame %d, slot %d, Got NFAPI_NR_DL_TTI_PDSCH_PDU_TYPE for %d.%d\n",frame,slot,DL_req->SFN,DL_req->Slot);
nfapi_nr_dl_tti_pdsch_pdu_rel15_t *pdsch_pdu_rel15 = &dl_tti_pdu->pdsch_pdu.pdsch_pdu_rel15;
uint16_t pduIndex = pdsch_pdu_rel15->pduIndex;
AssertFatal(TX_req->pdu_list[pduIndex].num_TLV == 1, "TX_req->pdu_list[%d].num_TLV %d != 1\n",
pduIndex,TX_req->pdu_list[pduIndex].num_TLV);
uint8_t *sdu = (uint8_t *)TX_req->pdu_list[pduIndex].TLVs[0].value.direct;
AssertFatal(msgTx->num_pdsch_slot < gNB->number_of_nr_dlsch_max,"Number of PDSCH PDUs %d exceeded the limit %d\n",
msgTx->num_pdsch_slot,gNB->number_of_nr_dlsch_max);
handle_nr_nfapi_pdsch_pdu(msgTx,&dl_tti_pdu->pdsch_pdu, sdu);
}
}
}
if (number_ul_dci_pdu > 0)
msgTx->ul_pdcch_pdu = UL_dci_req->ul_dci_pdu_list[number_ul_dci_pdu-1]; // copy the last pdu
if (number_ul_dci_pdu > 0)
msgTx->ul_pdcch_pdu = UL_dci_req->ul_dci_pdu_list[number_ul_dci_pdu-1]; // copy the last pdu
pushNotifiedFIFO(gNB->resp_L1_tx,res);
pushNotifiedFIFO(gNB->L1_tx_filled,res);
}
for (int i = 0; i < number_ul_tti_pdu; i++) {
switch (UL_tti_req->pdus_list[i].pdu_type) {
......
......@@ -128,6 +128,7 @@ void phy_procedures_gNB_TX(processingData_L1tx_t *msgTx,
int frame,
int slot,
int do_meas) {
int aa;
PHY_VARS_gNB *gNB = msgTx->gNB;
NR_DL_FRAME_PARMS *fp=&gNB->frame_parms;
......@@ -204,6 +205,8 @@ void phy_procedures_gNB_TX(processingData_L1tx_t *msgTx,
}
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_gNB_TX+offset,0);
//pthread_mutex_unlock(&mutextest);
}
......
......@@ -419,6 +419,7 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
}
}
//Clear the fields when all the config pdu are done
if (pdu_done == ul_config->number_pdus) {
if (scheduled_response->tx_request)
......@@ -429,6 +430,7 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
LOG_D(PHY, "%d.%d clear ul_config %p\n", scheduled_response->frame, slot, ul_config);
memset(ul_config->ul_config_list, 0, sizeof(ul_config->ul_config_list));
}
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
}
......
......@@ -984,18 +984,21 @@ int main(int argc, char **argv)
gNB->threadPool = (tpool_t*)malloc(sizeof(tpool_t));
initTpool(gNBthreads, gNB->threadPool, true);
gNB->resp_L1_tx = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->resp_L1_tx);
gNB->L1_tx_free = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
gNB->L1_tx_filled = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
gNB->L1_tx_out = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->L1_tx_free);
initNotifiedFIFO(gNB->L1_tx_filled);
initNotifiedFIFO(gNB->L1_tx_out);
// we create 2 threads for L1 tx processing
notifiedFIFO_elt_t *msgL1Tx = newNotifiedFIFO_elt(sizeof(processingData_L1tx_t),0,gNB->resp_L1_tx,processSlotTX);
notifiedFIFO_elt_t *msgL1Tx = newNotifiedFIFO_elt(sizeof(processingData_L1tx_t),0,gNB->L1_tx_free,processSlotTX);
processingData_L1tx_t *msgDataTx = (processingData_L1tx_t *)NotifiedFifoData(msgL1Tx);
init_DLSCH_struct(gNB, msgDataTx);
msgDataTx->slot = slot;
msgDataTx->frame = frame;
memset(msgDataTx->ssb, 0, 64*sizeof(NR_gNB_SSB_t));
reset_meas(&msgDataTx->phy_proc_tx);
gNB->phy_proc_tx_0 = &msgDataTx->phy_proc_tx;
pushTpool(gNB->threadPool,msgL1Tx);
gNB->phy_proc_tx[0] = &msgDataTx->phy_proc_tx;
for (SNR = snr0; SNR < snr1; SNR += .2) {
......@@ -1074,6 +1077,7 @@ int main(int argc, char **argv)
Sched_INFO.UL_tti_req = gNB_mac->UL_tti_req_ahead[slot];
Sched_INFO.UL_dci_req = NULL;
Sched_INFO.TX_req = &gNB_mac->TX_req[0];
pushNotifiedFIFO(gNB->L1_tx_free,msgL1Tx);
nr_schedule_response(&Sched_INFO);
/* PTRS values for DLSIM calculations */
......@@ -1307,10 +1311,10 @@ int main(int argc, char **argv)
printf("\n");
if (print_perf==1) {
printf("\ngNB TX function statistics (per %d us slot, NPRB %d, mcs %d, TBS %d)\n",
printf("\ngNB TX function statistics (per %d us slot, NPRB %d, mcs %d, block %d)\n",
1000>>*scc->ssbSubcarrierSpacing, g_rbSize, g_mcsIndex,
msgDataTx->dlsch[0][0]->harq_process.pdsch_pdu.pdsch_pdu_rel15.TBSize[0]<<3);
printDistribution(gNB->phy_proc_tx_0,table_tx,"PHY proc tx");
printDistribution(gNB->phy_proc_tx[0],table_tx,"PHY proc tx");
printStatIndent2(&gNB->dlsch_encoding_stats,"DLSCH encoding time");
printStatIndent3(&gNB->dlsch_segmentation_stats,"DLSCH segmentation time");
printStatIndent3(&gNB->tinput,"DLSCH LDPC input processing time");
......
......@@ -670,13 +670,16 @@ int main(int argc, char **argv)
char tp_param[] = "n";
initTpool(tp_param, gNB->threadPool, false);
initNotifiedFIFO(gNB->respDecode);
gNB->resp_L1_tx = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->resp_L1_tx);
notifiedFIFO_elt_t *msgL1Tx = newNotifiedFIFO_elt(sizeof(processingData_L1tx_t),0,gNB->resp_L1_tx,NULL);
gNB->L1_tx_free = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
gNB->L1_tx_filled = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
gNB->L1_tx_out = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->L1_tx_free);
initNotifiedFIFO(gNB->L1_tx_filled);
initNotifiedFIFO(gNB->L1_tx_out);
notifiedFIFO_elt_t *msgL1Tx = newNotifiedFIFO_elt(sizeof(processingData_L1tx_t),0,gNB->L1_tx_free,NULL);
processingData_L1tx_t *msgDataTx = (processingData_L1tx_t *)NotifiedFifoData(msgL1Tx);
msgDataTx->slot = -1;
gNB->phy_proc_tx_0 = &msgDataTx->phy_proc_tx;
pushNotifiedFIFO(gNB->resp_L1_tx,msgL1Tx); // to unblock the process in the beginning
gNB->phy_proc_tx[0] = &msgDataTx->phy_proc_tx;
//gNB_config = &gNB->gNB_config;
//memset((void *)&gNB->UL_INFO,0,sizeof(gNB->UL_INFO));
......@@ -1118,6 +1121,7 @@ int main(int argc, char **argv)
}
// prepare ULSCH/PUSCH reception
pushNotifiedFIFO(gNB->L1_tx_free,msgL1Tx); // to unblock the process in the beginning
nr_schedule_response(Sched_INFO);
// --------- setting parameters for UE --------
......
......@@ -65,7 +65,7 @@
{CONFIG_STRING_L1_REMOTE_N_PORTC, NULL, 0, uptr:NULL, defintval:50030, TYPE_UINT, 0}, \
{CONFIG_STRING_L1_LOCAL_N_PORTD, NULL, 0, uptr:NULL, defintval:50031, TYPE_UINT, 0}, \
{CONFIG_STRING_L1_REMOTE_N_PORTD, NULL, 0, uptr:NULL, defintval:50031, TYPE_UINT, 0}, \
{CONFIG_STRING_L1_PUSCH_PROC_THREADS, NULL, 0, uptr:NULL, defintval:3, TYPE_UINT, 0}, \
{CONFIG_STRING_L1_PUSCH_PROC_THREADS, NULL, 0, uptr:NULL, defintval:4, TYPE_UINT, 0}, \
{CONFIG_STRING_L1_OFDM_OFFSET_DIVISOR, NULL, 0, uptr:NULL, defuintval:8, TYPE_UINT, 0}, \
{CONFIG_STRING_L1_PUCCH0_DTX_THRESHOLD, NULL, 0, uptr:NULL, defintval:100, TYPE_UINT, 0}, \
{CONFIG_STRING_L1_PRACH_DTX_THRESHOLD, NULL, 0, uptr:NULL, defintval:150, TYPE_UINT, 0}, \
......
......@@ -73,6 +73,8 @@ void fill_ul_config(fapi_nr_ul_config_request_t *ul_config, frame_t frame_tx, in
memset(ul_config->ul_config_list, 0, sizeof(ul_config->ul_config_list));
}
ul_config->ul_config_list[ul_config->number_pdus].pdu_type = pdu_type;
//ul_config->slot = slot_tx;
//ul_config->sfn = frame_tx;
ul_config->slot = slot_tx;
ul_config->sfn = frame_tx;
ul_config->number_pdus++;
......@@ -971,6 +973,8 @@ NR_UE_L2_STATE_t nr_ue_scheduler(nr_downlink_indication_t *dl_info, nr_uplink_in
uint8_t ulsch_input_buffer_array[NFAPI_MAX_NUM_UL_PDU][MAX_ULSCH_PAYLOAD_BYTES];
nr_scheduled_response_t scheduled_response;
fapi_nr_tx_request_t tx_req;
//tx_req.slot = slot_tx;
//tx_req.sfn = frame_tx;
tx_req.slot = slot_tx;
tx_req.sfn = frame_tx;
tx_req.number_of_pdus = 0;
......
......@@ -272,6 +272,7 @@ bool is_xlsch_in_slot(uint64_t bitmap, sub_frame_t slot) {
void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
frame_t frame,
sub_frame_t slot){
//pthread_mutex_lock(&mutextest);
protocol_ctxt_t ctxt={0};
PROTOCOL_CTXT_SET_BY_MODULE_ID(&ctxt, module_idP, ENB_FLAG_YES, NOT_A_RNTI, frame, slot,module_idP);
......
......@@ -4293,7 +4293,7 @@ ssize_t do_nrMeasurementReport(uint8_t *buffer,
LTE_MeasResultListEUTRA_t *measResultListEUTRA2=&measResultNeighCells->choice.measResultListEUTRA;
asn1cSequenceAdd(measResultListEUTRA2->list, struct LTE_MeasResultEUTRA, measresulteutra_list);
measresulteutra_list->physCellId = phy_id;
asn1cCalloc(measresulteutra_list->cgi_Info, measresult_cgi2);
//asn1cCalloc(measresulteutra_list->cgi_Info, measresult_cgi2);
//measresult_cgi2->cellGlobalId= {0};
//measresult_cgi2->trackingAreaCode= {0};
struct LTE_MeasResultEUTRA__measResult* measResult= &measresulteutra_list->measResult;
......
......@@ -213,7 +213,7 @@ L1s = (
{
num_cc = 1;
tr_n_preference = "local_mac";
pusch_proc_threads = 2;
pusch_proc_threads = 8;
}
);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment