Commit 85e1c34c authored by Sakthivel Velumani's avatar Sakthivel Velumani

Multiple RXTX sequential threads

Created separate job for PDSCH processing so that its exectued in the right order in rfsimulator mode.
parent cef0567a
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "RRC/NR_UE/rrc_proto.h" #include "RRC/NR_UE/rrc_proto.h"
#include "SCHED_NR_UE/phy_frame_config_nr.h" #include "SCHED_NR_UE/phy_frame_config_nr.h"
#include "SCHED_NR_UE/defs.h" #include "SCHED_NR_UE/defs.h"
#include "SCHED_NR_UE/pucch_uci_ue_nr.h"
#include "PHY/NR_UE_TRANSPORT/nr_transport_proto_ue.h" #include "PHY/NR_UE_TRANSPORT/nr_transport_proto_ue.h"
#include "executables/softmodem-common.h" #include "executables/softmodem-common.h"
...@@ -92,6 +93,8 @@ ...@@ -92,6 +93,8 @@
#define DURATION_RX_TO_TX (6) /* For LTE, this duration is fixed to 4 and it is linked to LTE standard for both modes FDD/TDD */ #define DURATION_RX_TO_TX (6) /* For LTE, this duration is fixed to 4 and it is linked to LTE standard for both modes FDD/TDD */
#endif #endif
#define RX_JOB_ID 0x1010
typedef enum { typedef enum {
pss = 0, pss = 0,
pbch = 1, pbch = 1,
...@@ -130,10 +133,7 @@ void init_nr_ue_vars(PHY_VARS_NR_UE *ue, ...@@ -130,10 +133,7 @@ void init_nr_ue_vars(PHY_VARS_NR_UE *ue,
* \param arg is a pointer to a \ref PHY_VARS_NR_UE structure. * \param arg is a pointer to a \ref PHY_VARS_NR_UE structure.
*/ */
typedef struct syncData_s { typedef nr_rxtx_thread_data_t syncData_t;
UE_nr_rxtx_proc_t proc;
PHY_VARS_NR_UE *UE;
} syncData_t;
static void UE_synch(void *arg) { static void UE_synch(void *arg) {
syncData_t *syncD=(syncData_t *) arg; syncData_t *syncD=(syncData_t *) arg;
...@@ -359,7 +359,10 @@ void processSlotRX(void *arg) { ...@@ -359,7 +359,10 @@ void processSlotRX(void *arg) {
PHY_VARS_NR_UE *UE = rxtxD->UE; PHY_VARS_NR_UE *UE = rxtxD->UE;
fapi_nr_config_request_t *cfg = &UE->nrUE_config; fapi_nr_config_request_t *cfg = &UE->nrUE_config;
int rx_slot_type = nr_ue_slot_select(cfg, proc->frame_rx, proc->nr_slot_rx); int rx_slot_type = nr_ue_slot_select(cfg, proc->frame_rx, proc->nr_slot_rx);
int tx_slot_type = nr_ue_slot_select(cfg, proc->frame_tx, proc->nr_slot_tx);
uint8_t gNB_id = 0; uint8_t gNB_id = 0;
bool check_tx_finish = true;
bool check_rx_finish = true;
if (rx_slot_type == NR_DOWNLINK_SLOT || rx_slot_type == NR_MIXED_SLOT){ if (rx_slot_type == NR_DOWNLINK_SLOT || rx_slot_type == NR_MIXED_SLOT){
...@@ -387,26 +390,21 @@ void processSlotRX(void *arg) { ...@@ -387,26 +390,21 @@ void processSlotRX(void *arg) {
// Wait for PUSCH processing to finish // Wait for PUSCH processing to finish
notifiedFIFO_elt_t *res; notifiedFIFO_elt_t *res;
while (check_tx_finish || check_rx_finish) { // Loop through the FIFO for the current slot msg
// under the assumption that no two slots with same slot index of different frames are processed at the same time
res = pullTpool(UE->respPdsch,&(get_nrUE_params()->Tpool));
if (res->key == proc->nr_slot_rx) check_rx_finish = false;
pushNotifiedFIFO_nothreadSafe(UE->respPdsch, res);
res = pullTpool(UE->txFifo,&(get_nrUE_params()->Tpool)); res = pullTpool(UE->txFifo,&(get_nrUE_params()->Tpool));
if (res->key == proc->nr_slot_tx) check_tx_finish = false;
pushNotifiedFIFO_nothreadSafe(UE->txFifo, res); pushNotifiedFIFO_nothreadSafe(UE->txFifo, res);
if (get_softmodem_params()->usim_test==0) {
pucch_procedures_ue_nr(UE,
gNB_id,
proc,
FALSE);
} }
LOG_D(PHY, "Sending Uplink data \n");
nr_ue_pusch_common_procedures(UE,
proc->nr_slot_tx,
&UE->frame_parms,1);
ue_ta_procedures(UE, proc->nr_slot_tx, proc->frame_tx);
} else { } else {
processSlotTX(rxtxD); processSlotTX(rxtxD);
}
if (tx_slot_type == NR_UPLINK_SLOT || tx_slot_type == NR_MIXED_SLOT){
if (get_softmodem_params()->usim_test==0) { if (get_softmodem_params()->usim_test==0) {
pucch_procedures_ue_nr(UE, pucch_procedures_ue_nr(UE,
gNB_id, gNB_id,
...@@ -549,25 +547,32 @@ void *UE_thread(void *arg) { ...@@ -549,25 +547,32 @@ void *UE_thread(void *arg) {
notifiedFIFO_t rxFifo; notifiedFIFO_t rxFifo;
UE->rxFifo = &rxFifo; UE->rxFifo = &rxFifo;
initNotifiedFIFO(&rxFifo); initNotifiedFIFO(&rxFifo);
pushNotifiedFIFO_nothreadSafe(&rxFifo, newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), 1,&rxFifo,processSlotRX));
notifiedFIFO_t txFifo; notifiedFIFO_t txFifo;
UE->txFifo = &txFifo; UE->txFifo = &txFifo;
initNotifiedFIFO(&txFifo); initNotifiedFIFO(&txFifo);
pushNotifiedFIFO_nothreadSafe(&txFifo, newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), 1,&txFifo,processSlotTX));
int nbSlotProcessing=0; notifiedFIFO_t respPdsch;
UE->respPdsch = &respPdsch;
initNotifiedFIFO(&respPdsch);
int thread_idx=0; int thread_idx=0;
NR_UE_MAC_INST_t *mac = get_mac_inst(0); NR_UE_MAC_INST_t *mac = get_mac_inst(0);
int timing_advance = UE->timing_advance; int timing_advance = UE->timing_advance;
bool syncRunning=false; bool syncRunning=false;
const int nb_slot_frame = UE->frame_parms.slots_per_frame; const int nb_slot_frame = UE->frame_parms.slots_per_frame;
int absolute_slot=0, decoded_frame_rx=INT_MAX, trashed_frames=0; int absolute_slot=0, decoded_frame_rx=-1, trashed_frames=0;
for (int i=0; i<RX_NB_TH; i++) {
pushNotifiedFIFO_nothreadSafe(&rxFifo, newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), RX_JOB_ID,&rxFifo,processSlotRX));
pushNotifiedFIFO_nothreadSafe(&txFifo, newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), 1,&txFifo,processSlotTX));
pushNotifiedFIFO_nothreadSafe(&respPdsch, newNotifiedFIFO_elt(sizeof(pdsch_rx_thread_data_t), 1,&respPdsch,processPdsch));
}
while (!oai_exit) { while (!oai_exit) {
if (UE->lost_sync) { if (UE->lost_sync) {
abortTpool(&(get_nrUE_params()->Tpool),0); abortTpool(&(get_nrUE_params()->Tpool),RX_JOB_ID);
UE->is_synchronized = 0; UE->is_synchronized = 0;
UE->lost_sync = 0; UE->lost_sync = 0;
} }
...@@ -635,18 +640,6 @@ void *UE_thread(void *arg) { ...@@ -635,18 +640,6 @@ void *UE_thread(void *arg) {
// this is general failure in UE !!! // this is general failure in UE !!!
thread_idx = absolute_slot % RX_NB_TH; thread_idx = absolute_slot % RX_NB_TH;
int slot_nr = absolute_slot % nb_slot_frame; int slot_nr = absolute_slot % nb_slot_frame;
notifiedFIFO_elt_t *msgToPush;
AssertFatal((msgToPush=pullTpool(&rxFifo,&(get_nrUE_params()->Tpool))) != NULL,"chained list failure");
nr_rxtx_thread_data_t *curMsg=(nr_rxtx_thread_data_t *)NotifiedFifoData(msgToPush);
curMsg->UE=UE;
// update thread index for received subframe
curMsg->proc.thread_id = thread_idx;
curMsg->proc.CC_id = UE->CC_id;
curMsg->proc.nr_slot_rx = slot_nr;
curMsg->proc.nr_slot_tx = (absolute_slot + DURATION_RX_TO_TX) % nb_slot_frame;
curMsg->proc.frame_rx = (absolute_slot/nb_slot_frame) % MAX_FRAME_NUMBER;
curMsg->proc.frame_tx = ((absolute_slot+DURATION_RX_TO_TX)/nb_slot_frame) % MAX_FRAME_NUMBER;
curMsg->proc.decoded_frame_rx=-1;
//LOG_I(PHY,"Process slot %d thread Idx %d total gain %d\n", slot_nr, thread_idx, UE->rx_total_gain_dB); //LOG_I(PHY,"Process slot %d thread Idx %d total gain %d\n", slot_nr, thread_idx, UE->rx_total_gain_dB);
#ifdef OAI_ADRV9371_ZC706 #ifdef OAI_ADRV9371_ZC706
...@@ -702,13 +695,30 @@ void *UE_thread(void *arg) { ...@@ -702,13 +695,30 @@ void *UE_thread(void *arg) {
LOG_E(PHY,"can't compensate: diff =%d\n", first_symbols); LOG_E(PHY,"can't compensate: diff =%d\n", first_symbols);
} }
notifiedFIFO_elt_t *msgToPush;
AssertFatal((msgToPush=pullTpool(&rxFifo,&(get_nrUE_params()->Tpool))) != NULL,"chained list failure");
nr_rxtx_thread_data_t *curMsg=(nr_rxtx_thread_data_t *)NotifiedFifoData(msgToPush);
// Extract the received frame number
if (curMsg->proc.decoded_frame_rx != -1)
decoded_frame_rx=(((mac->mib->systemFrameNumber.buf[0] >> mac->mib->systemFrameNumber.bits_unused)<<4) | curMsg->proc.decoded_frame_rx);
else
decoded_frame_rx=-1;
curMsg->UE=UE;
// update thread index for received subframe
curMsg->proc.thread_id = thread_idx;
curMsg->proc.CC_id = UE->CC_id;
curMsg->proc.nr_slot_rx = slot_nr;
curMsg->proc.nr_slot_tx = (absolute_slot + DURATION_RX_TO_TX) % nb_slot_frame;
curMsg->proc.frame_rx = (absolute_slot/nb_slot_frame) % MAX_FRAME_NUMBER;
curMsg->proc.frame_tx = ((absolute_slot+DURATION_RX_TO_TX)/nb_slot_frame) % MAX_FRAME_NUMBER;
curMsg->proc.decoded_frame_rx=-1;
curMsg->proc.timestamp_tx = timestamp+ curMsg->proc.timestamp_tx = timestamp+
UE->frame_parms.get_samples_slot_timestamp(slot_nr, UE->frame_parms.get_samples_slot_timestamp(slot_nr,
&UE->frame_parms,DURATION_RX_TO_TX) - firstSymSamp; &UE->frame_parms,DURATION_RX_TO_TX) - firstSymSamp;
decoded_frame_rx=-1; // Check if the processed frame number is same as the received frame number;
if (decoded_frame_rx>0 && decoded_frame_rx != curMsg->proc.frame_rx)
if ( decoded_frame_rx>0 && decoded_frame_rx != curMsg->proc.frame_rx)
LOG_E(PHY,"Decoded frame index (%d) is not compatible with current context (%d), UE should go back to synch mode\n", LOG_E(PHY,"Decoded frame index (%d) is not compatible with current context (%d), UE should go back to synch mode\n",
decoded_frame_rx, curMsg->proc.frame_rx ); decoded_frame_rx, curMsg->proc.frame_rx );
...@@ -747,8 +757,6 @@ void *UE_thread(void *arg) { ...@@ -747,8 +757,6 @@ void *UE_thread(void *arg) {
for (int i=0; i<UE->frame_parms.nb_antennas_tx; i++) for (int i=0; i<UE->frame_parms.nb_antennas_tx; i++)
memset(txp[i], 0, writeBlockSize); memset(txp[i], 0, writeBlockSize);
nbSlotProcessing++;
msgToPush->key=1;
pushTpool(&(get_nrUE_params()->Tpool), msgToPush); pushTpool(&(get_nrUE_params()->Tpool), msgToPush);
} // while !oai_exit } // while !oai_exit
......
...@@ -214,10 +214,17 @@ nrUE_params_t *get_nrUE_params(void) { ...@@ -214,10 +214,17 @@ nrUE_params_t *get_nrUE_params(void) {
} }
/* initialie thread pools used for NRUE processing paralleliation */ /* initialie thread pools used for NRUE processing paralleliation */
void init_tpools(uint8_t nun_dlsch_threads) { void init_tpools(uint8_t nun_dlsch_threads) {
char *params=calloc(1,(RX_NB_TH*3)+1); char *params = NULL;
for (int i=0; i<RX_NB_TH; i++) { if (IS_SOFTMODEM_RFSIM) {
params = calloc(1,2);
memcpy(params,"N",1);
}
else {
params = calloc(1,(RX_NB_TH*RX_NB_TH_PL*3)+1);
for (int i=0; i<RX_NB_TH*RX_NB_TH_PL; i++) {
memcpy(params+(i*3),"-1,",3); memcpy(params+(i*3),"-1,",3);
} }
}
initTpool(params, &(nrUE_params.Tpool), false); initTpool(params, &(nrUE_params.Tpool), false);
free(params); free(params);
init_dlsch_tpool( nun_dlsch_threads); init_dlsch_tpool( nun_dlsch_threads);
...@@ -240,7 +247,6 @@ static void get_options(void) { ...@@ -240,7 +247,6 @@ static void get_options(void) {
printf("%s\n",uecap_xer); printf("%s\n",uecap_xer);
uecap_xer_in=1; uecap_xer_in=1;
} /* UE with config file */ } /* UE with config file */
init_tpools(nrUE_params.nr_dlsch_parallel);
} }
// set PHY vars from command line // set PHY vars from command line
...@@ -478,6 +484,7 @@ int main( int argc, char **argv ) { ...@@ -478,6 +484,7 @@ int main( int argc, char **argv ) {
get_options (); //Command-line options specific for NRUE get_options (); //Command-line options specific for NRUE
get_common_options(SOFTMODEM_5GUE_BIT ); get_common_options(SOFTMODEM_5GUE_BIT );
init_tpools(nrUE_params.nr_dlsch_parallel);
CONFIG_CLEARRTFLAG(CONFIG_NOEXITONHELP); CONFIG_CLEARRTFLAG(CONFIG_NOEXITONHELP);
#if T_TRACER #if T_TRACER
T_Config_Init(); T_Config_Init();
......
...@@ -1741,6 +1741,8 @@ int nr_rx_pdsch(PHY_VARS_NR_UE *ue, ...@@ -1741,6 +1741,8 @@ int nr_rx_pdsch(PHY_VARS_NR_UE *ue,
int32_t generate_nr_prach(PHY_VARS_NR_UE *ue, uint8_t gNB_id, uint8_t subframe); int32_t generate_nr_prach(PHY_VARS_NR_UE *ue, uint8_t gNB_id, uint8_t subframe);
void *dlsch_thread(void *arg); void *dlsch_thread(void *arg);
void processPdsch(void *arg);
/**@}*/ /**@}*/
#endif #endif
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#define RX_NB_TH_MAX 2 #define RX_NB_TH_MAX 2
#define RX_NB_TH 2 #define RX_NB_TH 2
#define RX_NB_TH_PL 2 //number of parallel jobs per slot
#define RX_NB_TH_DL 14 #define RX_NB_TH_DL 14
#define LTE_SLOTS_PER_SUBFRAME 2 #define LTE_SLOTS_PER_SUBFRAME 2
......
...@@ -1068,6 +1068,7 @@ typedef struct { ...@@ -1068,6 +1068,7 @@ typedef struct {
#endif #endif
notifiedFIFO_t *rxFifo; notifiedFIFO_t *rxFifo;
notifiedFIFO_t *txFifo; notifiedFIFO_t *txFifo;
notifiedFIFO_t *respPdsch;
int dl_stats[5]; int dl_stats[5];
...@@ -1081,5 +1082,10 @@ typedef struct nr_rxtx_thread_data_s { ...@@ -1081,5 +1082,10 @@ typedef struct nr_rxtx_thread_data_s {
PHY_VARS_NR_UE *UE; PHY_VARS_NR_UE *UE;
} nr_rxtx_thread_data_t; } nr_rxtx_thread_data_t;
typedef struct pdsch_rx_thread_data_s {
nr_rxtx_thread_data_t rxtxD;
int gNB_id;
uint8_t dlsch_parallel;
} pdsch_rx_thread_data_t;
#include "SIMULATION/ETH_TRANSPORT/defs.h" #include "SIMULATION/ETH_TRANSPORT/defs.h"
#endif #endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment