Commit bd60e402 authored by hardy's avatar hardy

Merge remote-tracking branch 'origin/ue-pdsch-pusch-parallel' into integration_2021_wk17_b

parents e22389dc 5566d65d
......@@ -231,7 +231,7 @@ RUs = (
att_rx = 0;
bands = [7];
max_pdschReferenceSignalPower = -27;
max_rxgain = 75;
max_rxgain = 50;
eNB_instances = [0];
## beamforming 1x2 matrix: 1 layer x 2 antennas
bf_weights = [0x00007fff, 0x0000];
......
......@@ -355,7 +355,9 @@ void init_gNB_Tpool(int inst) {
// ULSCH decoding threadpool
gNB->threadPool = (tpool_t*)malloc(sizeof(tpool_t));
int numCPU = sysconf(_SC_NPROCESSORS_ONLN);
LOG_I(PHY,"Number of threads requested in config file: %d, Number of threads available on this machine: %d\n",gNB->pusch_proc_threads,numCPU);
int threadCnt = min(numCPU, gNB->pusch_proc_threads);
if (threadCnt < 2) LOG_E(PHY,"Number of threads for gNB should be more than 1. Allocated only %d\n",threadCnt);
char ul_pool[80];
sprintf(ul_pool,"-1");
int s_offset = 0;
......
......@@ -27,6 +27,8 @@
#include "SCHED_NR_UE/phy_frame_config_nr.h"
#include "SCHED_NR_UE/defs.h"
#include "PHY/NR_UE_TRANSPORT/nr_transport_proto_ue.h"
#include "executables/softmodem-common.h"
#include "SCHED_NR_UE/pucch_uci_ue_nr.h"
/*
* NR SLOT PROCESSING SEQUENCE
......@@ -85,6 +87,14 @@
*
*/
#ifndef NO_RAT_NR
#define DURATION_RX_TO_TX (NR_UE_CAPABILITY_SLOT_RX_TO_TX) /* for NR this will certainly depends to such UE capability which is not yet defined */
#else
#define DURATION_RX_TO_TX (6) /* For LTE, this duration is fixed to 4 and it is linked to LTE standard for both modes FDD/TDD */
#endif
#define RX_JOB_ID 0x1010
#define TX_JOB_ID 100
typedef enum {
pss = 0,
pbch = 1,
......@@ -101,6 +111,7 @@ void init_nr_ue_vars(PHY_VARS_NR_UE *ue,
ue->Mod_id = UE_id;
ue->mac_enabled = 1;
ue->if_inst = nr_ue_if_module_init(0);
ue->dci_thres = 0;
// Setting UE mode to NOT_SYNCHED by default
for (gNB_id = 0; gNB_id < nb_connected_gNB; gNB_id++){
......@@ -123,10 +134,7 @@ void init_nr_ue_vars(PHY_VARS_NR_UE *ue,
* \param arg is a pointer to a \ref PHY_VARS_NR_UE structure.
*/
typedef struct syncData_s {
UE_nr_rxtx_proc_t proc;
PHY_VARS_NR_UE *UE;
} syncData_t;
typedef nr_rxtx_thread_data_t syncData_t;
static void UE_synch(void *arg) {
syncData_t *syncD=(syncData_t *) arg;
......@@ -273,7 +281,11 @@ static void UE_synch(void *arg) {
}
}
void processSlotTX( PHY_VARS_NR_UE *UE, UE_nr_rxtx_proc_t *proc) {
void processSlotTX(void *arg) {
nr_rxtx_thread_data_t *rxtxD = (nr_rxtx_thread_data_t *) arg;
UE_nr_rxtx_proc_t *proc = &rxtxD->proc;
PHY_VARS_NR_UE *UE = rxtxD->UE;
fapi_nr_config_request_t *cfg = &UE->nrUE_config;
int tx_slot_type = nr_ue_slot_select(cfg, proc->frame_tx, proc->nr_slot_tx);
uint8_t gNB_id = 0;
......@@ -304,10 +316,14 @@ void processSlotTX( PHY_VARS_NR_UE *UE, UE_nr_rxtx_proc_t *proc) {
}
}
void processSlotRX( PHY_VARS_NR_UE *UE, UE_nr_rxtx_proc_t *proc) {
void processSlotRX(void *arg) {
nr_rxtx_thread_data_t *rxtxD = (nr_rxtx_thread_data_t *) arg;
UE_nr_rxtx_proc_t *proc = &rxtxD->proc;
PHY_VARS_NR_UE *UE = rxtxD->UE;
fapi_nr_config_request_t *cfg = &UE->nrUE_config;
int rx_slot_type = nr_ue_slot_select(cfg, proc->frame_rx, proc->nr_slot_rx);
int tx_slot_type = nr_ue_slot_select(cfg, proc->frame_tx, proc->nr_slot_tx);
uint8_t gNB_id = 0;
if (rx_slot_type == NR_DOWNLINK_SLOT || rx_slot_type == NR_MIXED_SLOT){
......@@ -323,7 +339,7 @@ void processSlotRX( PHY_VARS_NR_UE *UE, UE_nr_rxtx_proc_t *proc) {
phy_procedures_slot_parallelization_nrUE_RX( UE, proc, 0, 0, 1, no_relay, NULL );
#else
uint64_t a=rdtsc();
phy_procedures_nrUE_RX(UE, proc, gNB_id, get_nrUE_params()->nr_dlsch_parallel);
phy_procedures_nrUE_RX(UE, proc, gNB_id, get_nrUE_params()->nr_dlsch_parallel, &rxtxD->txFifo);
LOG_D(PHY, "In %s: slot %d, time %lu\n", __FUNCTION__, proc->nr_slot_rx, (rdtsc()-a)/3500);
#endif
......@@ -341,34 +357,38 @@ void processSlotRX( PHY_VARS_NR_UE *UE, UE_nr_rxtx_proc_t *proc) {
nr_pdcp_tick(proc->frame_rx, proc->nr_slot_rx / UE->frame_parms.slots_per_subframe);
}
}
}
}
// Wait for PUSCH processing to finish
notifiedFIFO_elt_t *res;
res = pullTpool(&rxtxD->txFifo,&(get_nrUE_params()->Tpool));
delNotifiedFIFO_elt(res);
/*!
* \brief This is the UE thread for RX subframe n and TX subframe n+4.
* This thread performs the phy_procedures_UE_RX() on every received slot.
* then, if TX is enabled it performs TX for n+4.
* \param arg is a pointer to a \ref PHY_VARS_NR_UE structure.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
} else {
processSlotTX(rxtxD);
}
typedef struct processingData_s {
UE_nr_rxtx_proc_t proc;
PHY_VARS_NR_UE *UE;
} processingData_t;
if (tx_slot_type == NR_UPLINK_SLOT || tx_slot_type == NR_MIXED_SLOT){
if (UE->UE_mode[gNB_id] <= PUSCH) {
if (get_softmodem_params()->usim_test==0) {
pucch_procedures_ue_nr(UE,
gNB_id,
proc,
FALSE);
}
void UE_processing(void *arg) {
processingData_t *rxtxD = (processingData_t *) arg;
UE_nr_rxtx_proc_t *proc = &rxtxD->proc;
PHY_VARS_NR_UE *UE = rxtxD->UE;
int slot_tx = proc->nr_slot_tx;
int frame_tx = proc->frame_tx;
LOG_D(PHY, "Sending Uplink data \n");
nr_ue_pusch_common_procedures(UE,
proc->nr_slot_tx,
&UE->frame_parms,1);
}
processSlotRX(UE, proc);
processSlotTX(UE, proc);
ue_ta_procedures(UE, slot_tx, frame_tx);
if (UE->UE_mode[gNB_id] > NOT_SYNCHED && UE->UE_mode[gNB_id] < PUSCH) {
nr_ue_prach_procedures(UE, proc, gNB_id);
}
LOG_D(PHY,"****** end TX-Chain for AbsSubframe %d.%d ******\n", proc->frame_tx, proc->nr_slot_tx);
}
ue_ta_procedures(UE, proc->nr_slot_tx, proc->frame_tx);
}
void dummyWrite(PHY_VARS_NR_UE *UE,openair0_timestamp timestamp, int writeBlockSize) {
......@@ -485,25 +505,44 @@ void *UE_thread(void *arg) {
int start_rx_stream = 0;
AssertFatal(0== openair0_device_load(&(UE->rfdevice), &openair0_cfg[0]), "");
UE->rfdevice.host_type = RAU_HOST;
UE->lost_sync = 0;
UE->is_synchronized = 0;
AssertFatal(UE->rfdevice.trx_start_func(&UE->rfdevice) == 0, "Could not start the device\n");
notifiedFIFO_t nf;
initNotifiedFIFO(&nf);
int nbSlotProcessing=0;
int thread_idx=0;
notifiedFIFO_t freeBlocks;
initNotifiedFIFO_nothreadSafe(&freeBlocks);
int nbSlotProcessing=0;
int thread_idx=0;
NR_UE_MAC_INST_t *mac = get_mac_inst(0);
int timing_advance = UE->timing_advance;
for (int i=0; i<RX_NB_TH+1; i++) // RX_NB_TH working + 1 we are making to be pushed
pushNotifiedFIFO_nothreadSafe(&freeBlocks,
newNotifiedFIFO_elt(sizeof(processingData_t), 0,&nf,UE_processing));
bool syncRunning=false;
const int nb_slot_frame = UE->frame_parms.slots_per_frame;
int absolute_slot=0, decoded_frame_rx=INT_MAX, trashed_frames=0;
for (int i=0; i<NR_RX_NB_TH+1; i++) {// NR_RX_NB_TH working + 1 we are making to be pushed
notifiedFIFO_elt_t *newElt = newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), RX_JOB_ID,&nf,processSlotRX);
nr_rxtx_thread_data_t *curMsg=(nr_rxtx_thread_data_t *)NotifiedFifoData(newElt);
initNotifiedFIFO(&curMsg->txFifo);
pushNotifiedFIFO_nothreadSafe(&freeBlocks, newElt);
}
while (!oai_exit) {
if (UE->lost_sync) {
int nb = abortTpool(&(get_nrUE_params()->Tpool),RX_JOB_ID);
nb += abortNotifiedFIFO(&nf, RX_JOB_ID);
LOG_I(PHY,"Number of aborted slots %d\n",nb);
for (int i=0; i<nb; i++)
pushNotifiedFIFO_nothreadSafe(&freeBlocks, newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), RX_JOB_ID,&nf,processSlotRX));
nbSlotProcessing = 0;
UE->is_synchronized = 0;
UE->lost_sync = 0;
}
if (syncRunning) {
notifiedFIFO_elt_t *res=tryPullTpool(&nf,&(get_nrUE_params()->Tpool));
......@@ -516,6 +555,7 @@ void *UE_thread(void *arg) {
decoded_frame_rx=(decoded_frame_rx + (!UE->init_sync_frame) + trashed_frames) % MAX_FRAME_NUMBER;
}
delNotifiedFIFO_elt(res);
start_rx_stream=0;
} else {
readFrame(UE, &timestamp, true);
trashed_frames+=2;
......@@ -562,13 +602,13 @@ void *UE_thread(void *arg) {
// whatever means thread_idx
// Fix me: will be wrong when slot 1 is slow, as slot 2 finishes
// Slot 3 will overlap if RX_NB_TH is 2
// Slot 3 will overlap if NR_RX_NB_TH is 2
// this is general failure in UE !!!
thread_idx = absolute_slot % RX_NB_TH;
thread_idx = absolute_slot % NR_RX_NB_TH;
int slot_nr = absolute_slot % nb_slot_frame;
notifiedFIFO_elt_t *msgToPush;
AssertFatal((msgToPush=pullNotifiedFIFO_nothreadSafe(&freeBlocks)) != NULL,"chained list failure");
processingData_t *curMsg=(processingData_t *)NotifiedFifoData(msgToPush);
AssertFatal((msgToPush=pullTpool(&freeBlocks,&(get_nrUE_params()->Tpool))) != NULL,"chained list failure");
nr_rxtx_thread_data_t *curMsg=(nr_rxtx_thread_data_t *)NotifiedFifoData(msgToPush);
curMsg->UE=UE;
// update thread index for received subframe
curMsg->proc.thread_id = thread_idx;
......@@ -596,18 +636,18 @@ void *UE_thread(void *arg) {
for (int i=0; i<UE->frame_parms.nb_antennas_tx; i++)
txp[i] = (void *)&UE->common_vars.txdata[i][UE->frame_parms.get_samples_slot_timestamp(
((slot_nr + DURATION_RX_TO_TX - RX_NB_TH)%nb_slot_frame),&UE->frame_parms,0)];
((slot_nr + DURATION_RX_TO_TX - NR_RX_NB_TH)%nb_slot_frame),&UE->frame_parms,0)];
int readBlockSize, writeBlockSize;
if (slot_nr<(nb_slot_frame - 1)) {
readBlockSize=get_readBlockSize(slot_nr, &UE->frame_parms);
writeBlockSize=UE->frame_parms.get_samples_per_slot((slot_nr + DURATION_RX_TO_TX - RX_NB_TH) % nb_slot_frame, &UE->frame_parms);
writeBlockSize=UE->frame_parms.get_samples_per_slot((slot_nr + DURATION_RX_TO_TX - NR_RX_NB_TH) % nb_slot_frame, &UE->frame_parms);
} else {
UE->rx_offset_diff = computeSamplesShift(UE);
readBlockSize=get_readBlockSize(slot_nr, &UE->frame_parms) -
UE->rx_offset_diff;
writeBlockSize=UE->frame_parms.get_samples_per_slot((slot_nr + DURATION_RX_TO_TX - RX_NB_TH) % nb_slot_frame, &UE->frame_parms)- UE->rx_offset_diff;
writeBlockSize=UE->frame_parms.get_samples_per_slot((slot_nr + DURATION_RX_TO_TX - NR_RX_NB_TH) % nb_slot_frame, &UE->frame_parms)- UE->rx_offset_diff;
}
AssertFatal(readBlockSize ==
......@@ -639,10 +679,10 @@ void *UE_thread(void *arg) {
notifiedFIFO_elt_t *res;
while (nbSlotProcessing >= RX_NB_TH) {
while (nbSlotProcessing >= NR_RX_NB_TH) {
res=pullTpool(&nf, &(get_nrUE_params()->Tpool));
nbSlotProcessing--;
processingData_t *tmp=(processingData_t *)res->msgData;
nr_rxtx_thread_data_t *tmp=(nr_rxtx_thread_data_t *)res->msgData;
if (tmp->proc.decoded_frame_rx != -1)
decoded_frame_rx=(((mac->mib->systemFrameNumber.buf[0] >> mac->mib->systemFrameNumber.bits_unused)<<4) | tmp->proc.decoded_frame_rx);
......@@ -659,7 +699,7 @@ void *UE_thread(void *arg) {
// use previous timing_advance value to compute writeTimestamp
writeTimestamp = timestamp+
UE->frame_parms.get_samples_slot_timestamp(slot_nr,&UE->frame_parms,DURATION_RX_TO_TX
- RX_NB_TH) - firstSymSamp - openair0_cfg[0].tx_sample_advance -
- NR_RX_NB_TH) - firstSymSamp - openair0_cfg[0].tx_sample_advance -
UE->N_TA_offset - timing_advance;
// but use current UE->timing_advance value to compute writeBlockSize
......@@ -669,7 +709,7 @@ void *UE_thread(void *arg) {
}
int flags = 0;
int slot_tx_usrp = slot_nr + DURATION_RX_TO_TX - RX_NB_TH;
int slot_tx_usrp = slot_nr + DURATION_RX_TO_TX - NR_RX_NB_TH;
if (openair0_cfg[0].duplex_mode == duplex_mode_TDD) {
......@@ -701,26 +741,9 @@ void *UE_thread(void *arg) {
memset(txp[i], 0, writeBlockSize);
nbSlotProcessing++;
msgToPush->key=slot_nr;
LOG_D(PHY,"Number of slots being processed at the moment: %d\n",nbSlotProcessing);
pushTpool(&(get_nrUE_params()->Tpool), msgToPush);
if (IS_SOFTMODEM_RFSIM) { //getenv("RFSIMULATOR")
// FixMe: Wait previous thread is done, because race conditions seems too bad
// in case of actual RF board, the overlap between threads mitigate the issue
// We must receive one message, that proves the slot processing is done
res=pullTpool(&nf, &(get_nrUE_params()->Tpool));
nbSlotProcessing--;
processingData_t *tmp=(processingData_t *)res->msgData;
if (tmp->proc.decoded_frame_rx != -1)
decoded_frame_rx=(((mac->mib->systemFrameNumber.buf[0] >> mac->mib->systemFrameNumber.bits_unused)<<4) | tmp->proc.decoded_frame_rx);
else
decoded_frame_rx=-1;
//decoded_frame_rx=tmp->proc.decoded_frame_rx;
pushNotifiedFIFO_nothreadSafe(&freeBlocks,res);
}
} // while !oai_exit
return NULL;
......
......@@ -232,10 +232,17 @@ nrUE_params_t *get_nrUE_params(void) {
}
/* initialie thread pools used for NRUE processing paralleliation */
void init_tpools(uint8_t nun_dlsch_threads) {
char *params=calloc(1,(RX_NB_TH*3)+1);
for (int i=0; i<RX_NB_TH; i++) {
char *params = NULL;
if (IS_SOFTMODEM_RFSIM) {
params = calloc(1,2);
memcpy(params,"N",1);
}
else {
params = calloc(1,(NR_RX_NB_TH*NR_NB_TH_SLOT*3)+1);
for (int i=0; i<NR_RX_NB_TH*NR_NB_TH_SLOT; i++) {
memcpy(params+(i*3),"-1,",3);
}
}
initTpool(params, &(nrUE_params.Tpool), false);
free(params);
init_dlsch_tpool( nun_dlsch_threads);
......@@ -258,7 +265,6 @@ static void get_options(void) {
printf("%s\n",uecap_xer);
uecap_xer_in=1;
} /* UE with config file */
init_tpools(nrUE_params.nr_dlsch_parallel);
}
// set PHY vars from command line
......@@ -415,6 +421,7 @@ int main( int argc, char **argv ) {
get_options (); //Command-line options specific for NRUE
get_common_options(SOFTMODEM_5GUE_BIT );
init_tpools(nrUE_params.nr_dlsch_parallel);
CONFIG_CLEARRTFLAG(CONFIG_NOEXITONHELP);
#if T_TRACER
T_Config_Init();
......
......@@ -73,6 +73,7 @@ extern "C"
#define CONFIG_HLP_STMON "Enable processing timing measurement of lte softmodem on per subframe basis \n"
#define CONFIG_HLP_256QAM "Use the 256 QAM mcs table for PDSCH\n"
#define CONFIG_HLP_NONSTOP "Go back to frame sync mode after 100 consecutive PBCH failures\n"
//#define CONFIG_HLP_NUMUES "Set the number of UEs for the emulation"
#define CONFIG_HLP_MSLOTS "Skip the missed slots/subframes \n"
#define CONFIG_HLP_ULMCS "Set the maximum uplink MCS\n"
......@@ -115,6 +116,7 @@ extern "C"
#define USIM_TEST softmodem_params.usim_test
#define USE_256QAM_TABLE softmodem_params.use_256qam_table
#define NFAPI softmodem_params.nfapi
#define NON_STOP softmodem_params.non_stop
#define DEFAULT_RFCONFIG_FILE "/usr/local/etc/syriq/ue.band7.tm1.PRB100.NR40.dat";
......@@ -148,6 +150,7 @@ extern int usrp_tx_thread;
{"use-256qam-table", CONFIG_HLP_256QAM, PARAMFLAG_BOOL, iptr:&USE_256QAM_TABLE, defintval:0, TYPE_INT, 0}, \
{"usrp-tx-thread-config", CONFIG_HLP_USRP_THREAD, 0, iptr:&usrp_tx_thread, defstrval:0, TYPE_INT, 0}, \
{"nfapi", CONFIG_HLP_NFAPI, 0, u8ptr:&nfapi_mode, defintval:0, TYPE_UINT8, 0}, \
{"non-stop", CONFIG_HLP_NONSTOP, PARAMFLAG_BOOL, iptr:&NON_STOP, defintval:0, TYPE_INT, 0}, \
}
......@@ -238,6 +241,7 @@ typedef struct {
uint32_t send_dmrs_sync;
int use_256qam_table;
uint8_t nfapi;
int non_stop;
} softmodem_params_t;
extern uint64_t get_softmodem_optmask(void);
......
......@@ -874,6 +874,33 @@ void nr_pdcch_unscrambling(int16_t *z,
#ifdef NR_PDCCH_DCI_RUN
/* This function compares the received DCI bits with
* re-encoded DCI bits and returns the number of mismatched bits
*/
uint16_t nr_dci_false_detection(uint64_t *dci,
int16_t *soft_in,
const t_nrPolar_params *polar_param,
int encoded_length,
int rnti) {
uint32_t encoder_output[NR_MAX_DCI_SIZE_DWORD];
polar_encoder_fast(dci, (void*)encoder_output, rnti, 1, polar_param);
uint8_t *enout_p = (uint8_t*)encoder_output;
uint16_t x = 0;
for (int i=0; i<encoded_length/8; i++) {
x += ( enout_p[i] & 1 ) ^ ( ( soft_in[i*8] >> 15 ) & 1);
x += ( ( enout_p[i] >> 1 ) & 1 ) ^ ( ( soft_in[i*8+1] >> 15 ) & 1 );
x += ( ( enout_p[i] >> 2 ) & 1 ) ^ ( ( soft_in[i*8+2] >> 15 ) & 1 );
x += ( ( enout_p[i] >> 3 ) & 1 ) ^ ( ( soft_in[i*8+3] >> 15 ) & 1 );
x += ( ( enout_p[i] >> 4 ) & 1 ) ^ ( ( soft_in[i*8+4] >> 15 ) & 1 );
x += ( ( enout_p[i] >> 5 ) & 1 ) ^ ( ( soft_in[i*8+5] >> 15 ) & 1 );
x += ( ( enout_p[i] >> 6 ) & 1 ) ^ ( ( soft_in[i*8+6] >> 15 ) & 1 );
x += ( ( enout_p[i] >> 7 ) & 1 ) ^ ( ( soft_in[i*8+7] >> 15 ) & 1 );
}
return x;
}
uint8_t nr_dci_decoding_procedure(PHY_VARS_NR_UE *ue,
UE_nr_rxtx_proc_t *proc,
fapi_nr_dci_indication_t *dci_ind) {
......@@ -920,6 +947,13 @@ uint8_t nr_dci_decoding_procedure(PHY_VARS_NR_UE *ue,
if (crc == n_rnti) {
LOG_D(PHY, "(%i.%i) Received dci indication (rnti %x,dci format %d,n_CCE %d,payloadSize %d,payload %llx)\n",
proc->frame_rx, proc->nr_slot_rx,n_rnti,rel15->dci_format_options[k],CCEind,dci_length,*(unsigned long long*)dci_estimation);
uint16_t mb = nr_dci_false_detection(dci_estimation,tmp_e,currentPtrDCI,L*108,n_rnti);
ue->dci_thres = (ue->dci_thres + mb) / 2;
if (mb > (ue->dci_thres+20)) {
LOG_W(PHY,"DCI false positive. Dropping DCI index %d. Mismatched bits: %d/%d. Current DCI threshold: %d\n",j,mb,L*108,ue->dci_thres);
continue;
}
else {
dci_ind->SFN = proc->frame_rx;
dci_ind->slot = proc->nr_slot_rx;
dci_ind->dci_list[dci_ind->number_of_dcis].rnti = n_rnti;
......@@ -929,6 +963,7 @@ uint8_t nr_dci_decoding_procedure(PHY_VARS_NR_UE *ue,
memcpy((void*)dci_ind->dci_list[dci_ind->number_of_dcis].payloadBits,(void*)dci_estimation,8);
dci_ind->number_of_dcis++;
break; // If DCI is found, no need to check for remaining DCI lengths
}
} else {
LOG_D(PHY,"(%i.%i) Decoded crc %x does not match rnti %x for DCI format %d\n", proc->frame_rx, proc->nr_slot_rx, crc, n_rnti, rel15->dci_format_options[k]);
}
......
......@@ -763,6 +763,8 @@ typedef struct {
int is_synchronized;
/// \brief Indicates on which frame is synchronized in a two frame synchronization
int is_synchronized_on_frame;
/// \brief Indicator that UE lost frame synchronization
int lost_sync;
/// Data structure for UE process scheduling
UE_nr_proc_t proc;
/// Flag to indicate the UE shouldn't do timing correction at all
......@@ -789,6 +791,8 @@ typedef struct {
uint8_t ho_initiated;
/// \brief indicator that Handover procedure has been triggered
uint8_t ho_triggered;
/// threshold for false dci detection
int dci_thres;
/// \brief Measurement variables.
PHY_NR_MEASUREMENTS measurements;
NR_DL_FRAME_PARMS frame_parms;
......@@ -1080,6 +1084,7 @@ typedef struct {
typedef struct nr_rxtx_thread_data_s {
UE_nr_rxtx_proc_t proc;
PHY_VARS_NR_UE *UE;
notifiedFIFO_t txFifo;
} nr_rxtx_thread_data_t;
#include "SIMULATION/ETH_TRANSPORT/defs.h"
......
......@@ -109,6 +109,8 @@
#define MAX_NUM_NR_CHANNEL_BITS (14*273*12*8) // 14 symbols, 273 RB
#define MAX_NUM_NR_RE (14*273*12)
#define NR_RX_NB_TH 1
#define NR_NB_TH_SLOT 2
extern const uint8_t nr_rv_round_map[4];
extern const uint8_t nr_rv_round_map_ue[4];
......
......@@ -564,6 +564,10 @@ void phy_procedures_gNB_uespec_RX(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx)
gNB->uci_pdu_list[num_ucis].pdu_size = sizeof(nfapi_nr_uci_pucch_pdu_format_0_1_t);
nfapi_nr_uci_pucch_pdu_format_0_1_t *uci_pdu_format0 = &gNB->uci_pdu_list[num_ucis].pucch_pdu_format_0_1;
offset = pucch_pdu->start_symbol_index*gNB->frame_parms.ofdm_symbol_size + (gNB->frame_parms.first_carrier_offset+pucch_pdu->prb_start*12);
power_rxF = signal_energy_nodc(&gNB->common_vars.rxdataF[0][offset],12);
LOG_D(PHY,"frame %d, slot %d: PUCCH signal energy %d\n",frame_rx,slot_rx,power_rxF);
nr_decode_pucch0(gNB,
slot_rx,
uci_pdu_format0,
......
......@@ -115,14 +115,17 @@ void phy_procedures_nrUE_TX(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc, uint8_t
@param proc Pointer to proc information
@param gNB_id Local id of eNB on which to act
@param dlsch_parallel use multithreaded dlsch processing
@param txFifo Result fifo if PDSCH is run in parallel
*/
int phy_procedures_nrUE_RX(PHY_VARS_NR_UE *ue,
UE_nr_rxtx_proc_t *proc,
uint8_t gNB_id,
uint8_t dlsch_parallel);
uint8_t dlsch_parallel,
notifiedFIFO_t *txFifo);
int phy_procedures_slot_parallelization_nrUE_RX(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc, uint8_t eNB_id, uint8_t abstraction_flag, uint8_t do_pdcch_flag, relaying_type_t r_type);
void processSlotTX(void *arg);
#ifdef UE_SLOT_PARALLELISATION
void *UE_thread_slot1_dl_processing(void *arg);
......
......@@ -127,6 +127,8 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
// dlsch0_harq->status not ACTIVE may be due to false retransmission. Reset the
// following flag to skip PDSCH procedures in that case.
dlsch0->active = 0;
dlsch0_harq->harq_ack.ack = 1;
dlsch0_harq->harq_ack.send_harq_status = 1;
}
dlsch0_harq->harq_ack.vDAI_DL = dlsch_config_pdu->dai;
/* PTRS */
......
......@@ -275,26 +275,8 @@ void phy_procedures_nrUE_TX(PHY_VARS_NR_UE *ue,
nr_ue_ulsch_procedures(ue, harq_pid, frame_tx, slot_tx, proc->thread_id, gNB_id);
}
if (get_softmodem_params()->usim_test==0) {
LOG_D(PHY, "Generating PUCCH\n");
pucch_procedures_ue_nr(ue,
gNB_id,
proc,
FALSE);
}
LOG_D(PHY, "Sending Uplink data \n");
nr_ue_pusch_common_procedures(ue,
slot_tx,
&ue->frame_parms,1);
}
if (ue->UE_mode[gNB_id] > NOT_SYNCHED && ue->UE_mode[gNB_id] < PUSCH) {
nr_ue_prach_procedures(ue, proc, gNB_id);
}
LOG_D(PHY,"****** end TX-Chain for AbsSubframe %d.%d ******\n", frame_tx, slot_tx);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_UE_TX, VCD_FUNCTION_OUT);
#if UE_TIMING_TRACE
stop_meas(&ue->phy_proc_tx);
......@@ -435,10 +417,15 @@ void nr_ue_pbch_procedures(uint8_t gNB_id,
ue->pbch_vars[gNB_id]->pdu_errors++;
if (ue->pbch_vars[gNB_id]->pdu_errors_conseq>=100) {
if (get_softmodem_params()->non_stop) {
LOG_E(PHY,"More that 100 consecutive PBCH errors! Going back to Sync mode!\n");
ue->lost_sync = 1;
} else {
LOG_E(PHY,"More that 100 consecutive PBCH errors! Exiting!\n");
exit_fun("More that 100 consecutive PBCH errors! Exiting!\n");
}
}
}
if (frame_rx % 100 == 0) {
ue->pbch_vars[gNB_id]->pdu_fer = ue->pbch_vars[gNB_id]->pdu_errors - ue->pbch_vars[gNB_id]->pdu_errors_last;
......@@ -1615,7 +1602,8 @@ int is_pbch_in_slot(fapi_nr_config_request_t *config, int frame, int slot, NR_DL
int phy_procedures_nrUE_RX(PHY_VARS_NR_UE *ue,
UE_nr_rxtx_proc_t *proc,
uint8_t gNB_id,
uint8_t dlsch_parallel
uint8_t dlsch_parallel,
notifiedFIFO_t *txFifo
)
{
int frame_rx = proc->frame_rx;
......@@ -1773,6 +1761,13 @@ int phy_procedures_nrUE_RX(PHY_VARS_NR_UE *ue,
#endif //NR_PDCCH_SCHED
// Start PUSCH processing here. It runs in parallel with PDSCH processing
notifiedFIFO_elt_t *newElt = newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), proc->nr_slot_tx,txFifo,processSlotTX);
nr_rxtx_thread_data_t *curMsg=(nr_rxtx_thread_data_t *)NotifiedFifoData(newElt);
curMsg->proc = *proc;
curMsg->UE = ue;
pushTpool(&(get_nrUE_params()->Tpool), newElt);
#if UE_TIMING_TRACE
start_meas(&ue->generic_stat);
#endif
......@@ -1904,6 +1899,7 @@ int phy_procedures_nrUE_RX(PHY_VARS_NR_UE *ue,
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PDSCH_PROC, VCD_FUNCTION_OUT);
}
#if UE_TIMING_TRACE
start_meas(&ue->generic_stat);
#endif
......
......@@ -202,6 +202,8 @@ int is_x2ap_enabled(void)
return 0;
}
void processSlotTX(void *arg) {}
//nFAPI P7 dummy functions
int oai_nfapi_dl_tti_req(nfapi_nr_dl_tti_request_t *dl_config_req) { return(0); }
......@@ -286,6 +288,20 @@ void nr_dlsim_preprocessor(module_id_t module_id,
AssertFatal(ps->mcsTableIdx >= 0 && ps->mcsTableIdx <= 2, "invalid mcsTableIdx %d\n", ps->mcsTableIdx);
}
typedef struct {
uint64_t optmask; //mask to store boolean config options
uint8_t nr_dlsch_parallel; // number of threads for dlsch decoding, 0 means no parallelization
tpool_t Tpool; // thread pool
} nrUE_params_t;
nrUE_params_t nrUE_params;
nrUE_params_t *get_nrUE_params(void) {
return &nrUE_params;
}
void do_nothing(void *args) {
}
int main(int argc, char **argv)
{
......@@ -885,6 +901,7 @@ int main(int argc, char **argv)
unsigned int errors_bit = 0;
uint32_t errors_scrambling = 0;
initTpool("N", &(nrUE_params.Tpool), false);
test_input_bit = (unsigned char *) malloc16(sizeof(unsigned char) * 16 * 68 * 384);
estimated_output_bit = (unsigned char *) malloc16(sizeof(unsigned char) * 16 * 68 * 384);
......@@ -1135,7 +1152,8 @@ int main(int argc, char **argv)
phy_procedures_nrUE_RX(UE,
&UE_proc,
0,
dlsch_threads);
dlsch_threads,
NULL);
//printf("dlsim round %d ends\n",round);
round++;
......
......@@ -174,6 +174,20 @@ int nr_derive_key(int alg_type, uint8_t alg_id,
return 0;
}
typedef struct {
uint64_t optmask; //mask to store boolean config options
uint8_t nr_dlsch_parallel; // number of threads for dlsch decoding, 0 means no parallelization
tpool_t Tpool; // thread pool
} nrUE_params_t;
nrUE_params_t nrUE_params;
nrUE_params_t *get_nrUE_params(void) {
return &nrUE_params;
}
void processSlotTX(void *arg) {}
int main(int argc, char **argv){
char c;
......
......@@ -198,6 +198,19 @@ int nr_derive_key(int alg_type, uint8_t alg_id,
return 0;
}
typedef struct {
uint64_t optmask; //mask to store boolean config options
uint8_t nr_dlsch_parallel; // number of threads for dlsch decoding, 0 means no parallelization
tpool_t Tpool; // thread pool
} nrUE_params_t;
void processSlotTX(void *arg) {}
nrUE_params_t nrUE_params;
nrUE_params_t *get_nrUE_params(void) {
return &nrUE_params;
}
// needed for some functions
uint16_t n_rnti = 0x1234;
openair0_config_t openair0_cfg[MAX_CARDS];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment