Commit 66b0ab10 authored by Robert Schmidt's avatar Robert Schmidt Committed by Laurent THOMAS

fix tx_resume_ind_fifo messages sending bugs, rename associated function...

fix tx_resume_ind_fifo messages sending bugs, rename associated function send_slot_ind() to what it actually does send_dl_done_to_tx_thread()
parent 17abb90c
......@@ -489,7 +489,7 @@ static void UE_synch(void *arg) {
static void RU_write(nr_rxtx_thread_data_t *rxtxD) {
PHY_VARS_NR_UE *UE = rxtxD->UE;
UE_nr_rxtx_proc_t *proc = &rxtxD->proc;
const UE_nr_rxtx_proc_t *proc = &rxtxD->proc;
void *txp[NB_ANTENNAS_TX];
int slot = proc->nr_slot_tx;
......@@ -536,7 +536,7 @@ static void RU_write(nr_rxtx_thread_data_t *rxtxD) {
void processSlotTX(void *arg) {
nr_rxtx_thread_data_t *rxtxD = (nr_rxtx_thread_data_t *) arg;
UE_nr_rxtx_proc_t *proc = &rxtxD->proc;
const UE_nr_rxtx_proc_t *proc = &rxtxD->proc;
PHY_VARS_NR_UE *UE = rxtxD->UE;
nr_phy_data_tx_t phy_data = {0};
......@@ -547,13 +547,42 @@ void processSlotTX(void *arg) {
proc->tx_slot_type,
rxtxD->tx_wait_for_dlsch);
if (proc->tx_slot_type == NR_UPLINK_SLOT || proc->tx_slot_type == NR_MIXED_SLOT){
if (rxtxD->tx_wait_for_dlsch)
LOG_D(PHY, "enter wait for tx, slot %d, nb events to wait %d; ", proc->nr_slot_tx, rxtxD->tx_wait_for_dlsch);
// wait for rx slots to send indication (if any) that DLSCH decoding is finished
for(int i=0; i < rxtxD->tx_wait_for_dlsch; i++) {
notifiedFIFO_elt_t *res = pullNotifiedFIFO(UE->tx_resume_ind_fifo[proc->nr_slot_tx]);
notifiedFIFO_elt_t *res = pullNotifiedFIFO(UE->tx_resume_ind_fifo + proc->nr_slot_tx);
delNotifiedFIFO_elt(res);
}
LOG_D(PHY, "completed wait for tx, slot %d\n", proc->nr_slot_tx);
/*
This herafter code is costing some perfomance for a check that should be useless
But, we face today several bugs arround the matching between events in UE->tx_resume_ind_fifo[slot]
and the corresponding tx_wait_for_dlsch[slot]
The algorithm is we accumlate the actions that should end before processing a tx slot in tx_wait_for_dlsch[slot]
later, other threads push events in UE->tx_resume_ind_fifo[slot]
so, the tx encoding starts only when related actions are done (mainly DLSCH ACK/NACK to encode PUCCH)
if there is a bug that misses to send a event in UE->tx_resume_ind_fifo[slot], the process hangs, we detect the issue
if there is a bug that makes a extra event in UE->tx_resume_ind_fifo[slot], and if we drop the hereafter check
the system runs with random race conditions, very hard to debug
Likely we should later remove completly UE->tx_resume_ind_fifo with notifications,
instead,
we may run in place the processSlotTX() when the conditions are met (when a decreasing tx_wait_for_dlsch[slot] will become 0)
It will remove the condition signals (for a thread safe semaphore or counter) and make the system simpler
This require also other modifications to
remove txFifo that is also a big issue
add out of order RF board sending, because,
if we encode and send tx slot as soon as we can,
it will be thrown out of order, especially in TDD mode
*/
notifiedFIFO_elt_t *res = pollNotifiedFIFO(UE->tx_resume_ind_fifo + proc->nr_slot_tx);
if (res)
LOG_E(NR_PHY,
"Internal error: extra event on Tx waiting queue for slot %d, event comes from rx slot %d\n",
proc->nr_slot_tx,
*(int *)NotifiedFifoData(res));
// trigger L2 to run ue_scheduler thru IF module
// [TODO] mapping right after NR initial sync
if(UE->if_inst != NULL && UE->if_inst->ul_indication != NULL) {
......@@ -575,11 +604,8 @@ void processSlotTX(void *arg) {
RU_write(rxtxD);
}
static nr_phy_data_t UE_dl_preprocessing(PHY_VARS_NR_UE *UE, UE_nr_rxtx_proc_t *proc, int *tx_wait_for_dlsch)
static void UE_dl_preprocessing(PHY_VARS_NR_UE *UE, const UE_nr_rxtx_proc_t *proc, int *tx_wait_for_dlsch, nr_phy_data_t *phy_data)
{
nr_phy_data_t phy_data = {0};
LOG_D(PHY, "preprocessing %d.%d => slot type %d \n", proc->frame_rx, proc->nr_slot_rx, proc->rx_slot_type);
if (IS_SOFTMODEM_NOS1 || get_softmodem_params()->sa) {
// Start synchronization with a target gNB
......@@ -603,15 +629,15 @@ static nr_phy_data_t UE_dl_preprocessing(PHY_VARS_NR_UE *UE, UE_nr_rxtx_proc_t *
if(UE->if_inst != NULL && UE->if_inst->dl_indication != NULL) {
nr_downlink_indication_t dl_indication;
nr_fill_dl_indication(&dl_indication, NULL, NULL, proc, UE, &phy_data);
nr_fill_dl_indication(&dl_indication, NULL, NULL, proc, UE, phy_data);
UE->if_inst->dl_indication(&dl_indication);
}
uint64_t a=rdtsc_oai();
pbch_pdcch_processing(UE, proc, &phy_data);
if (phy_data.dlsch[0].active && phy_data.dlsch[0].rnti_type == TYPE_C_RNTI_) {
pbch_pdcch_processing(UE, proc, phy_data);
if (phy_data->dlsch[0].active && phy_data->dlsch[0].rnti_type == TYPE_C_RNTI_) {
// indicate to tx thread to wait for DLSCH decoding
const int ack_nack_slot = (proc->nr_slot_rx + phy_data.dlsch[0].dlsch_config.k1_feedback) % UE->frame_parms.slots_per_frame;
const int ack_nack_slot = (proc->nr_slot_rx + phy_data->dlsch[0].dlsch_config.k1_feedback) % UE->frame_parms.slots_per_frame;
tx_wait_for_dlsch[ack_nack_slot]++;
LOG_D(NR_PHY, "Adding wait even for slot %d, total %d\n", ack_nack_slot, tx_wait_for_dlsch[ack_nack_slot]);
}
......@@ -620,7 +646,7 @@ static nr_phy_data_t UE_dl_preprocessing(PHY_VARS_NR_UE *UE, UE_nr_rxtx_proc_t *
}
ue_ta_procedures(UE, proc->nr_slot_tx, proc->frame_tx);
return phy_data;
return;
}
void UE_dl_processing(void *arg) {
......@@ -765,8 +791,7 @@ void *UE_thread(void *arg)
int num_ind_fifo = nb_slot_frame;
for(int i=0; i < num_ind_fifo; i++) {
UE->tx_resume_ind_fifo[i] = malloc(sizeof(*UE->tx_resume_ind_fifo[i]));
initNotifiedFIFO(UE->tx_resume_ind_fifo[i]);
initNotifiedFIFO(UE->tx_resume_ind_fifo + i);
}
while (!oai_exit) {
......@@ -834,6 +859,13 @@ void *UE_thread(void *arg)
decoded_frame_rx++;
// we do ++ first in the regular processing, so it will be begin of frame;
absolute_slot = decoded_frame_rx * nb_slot_frame - 1;
// We have resynchronized, maybe after RF loss so we need to purge any existing context
memset(tx_wait_for_dlsch, 0, sizeof(tx_wait_for_dlsch));
for (int i = 0; i < num_ind_fifo; i++) {
notifiedFIFO_elt_t *res;
while ((res = pollNotifiedFIFO(UE->tx_resume_ind_fifo + i)))
delNotifiedFIFO_elt(res);
}
continue;
}
......@@ -912,27 +944,26 @@ void *UE_thread(void *arg)
nr_ue_rrc_timer_trigger(UE->Mod_id, curMsg.proc.frame_tx, curMsg.proc.gNB_id);
// RX slot processing. We launch and forget.
notifiedFIFO_elt_t *newEltRx =
newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), curMsg.proc.nr_slot_rx, NULL, UE_dl_processing);
nr_rxtx_thread_data_t *curMsgRx = (nr_rxtx_thread_data_t *)NotifiedFifoData(newEltRx);
curMsgRx->proc = curMsg.proc;
curMsgRx->UE = UE;
curMsgRx->phy_data = UE_dl_preprocessing(UE, &curMsg.proc, UE->tx_wait_for_dlsch);
pushTpool(&(get_nrUE_params()->Tpool), newEltRx);
notifiedFIFO_elt_t *newRx = newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), curMsg.proc.nr_slot_rx, NULL, UE_dl_processing);
nr_rxtx_thread_data_t *curMsgRx = (nr_rxtx_thread_data_t *)NotifiedFifoData(newRx);
*curMsgRx = (nr_rxtx_thread_data_t){.proc = curMsg.proc, .UE = UE};
UE_dl_preprocessing(UE, &curMsgRx->proc, tx_wait_for_dlsch, &curMsgRx->phy_data);
pushTpool(&(get_nrUE_params()->Tpool), newRx);
// Start TX slot processing here. It runs in parallel with RX slot processing
notifiedFIFO_elt_t *newEltTx =
newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), curMsg.proc.nr_slot_tx, &txFifo, processSlotTX);
nr_rxtx_thread_data_t *curMsgTx = (nr_rxtx_thread_data_t *)NotifiedFifoData(newEltTx);
// in current code, DURATION_RX_TO_TX constant is the limit to get UL data to encode from a RX slot
notifiedFIFO_elt_t *newTx = newNotifiedFIFO_elt(sizeof(nr_rxtx_thread_data_t), curMsg.proc.nr_slot_tx, &txFifo, processSlotTX);
nr_rxtx_thread_data_t *curMsgTx = (nr_rxtx_thread_data_t *)NotifiedFifoData(newTx);
curMsgTx->proc = curMsg.proc;
curMsgTx->writeBlockSize = writeBlockSize;
curMsgTx->proc.timestamp_tx = writeTimestamp;
curMsgTx->UE = UE;
curMsgTx->tx_wait_for_dlsch = UE->tx_wait_for_dlsch[curMsgTx->proc.nr_slot_tx];
UE->tx_wait_for_dlsch[curMsgTx->proc.nr_slot_tx] = 0;
pushTpool(&(get_nrUE_params()->Tpool), newEltTx);
curMsgTx->tx_wait_for_dlsch = tx_wait_for_dlsch[curMsgTx->proc.nr_slot_tx];
tx_wait_for_dlsch[curMsgTx->proc.nr_slot_tx] = 0;
pushTpool(&(get_nrUE_params()->Tpool), newTx);
// Wait for TX slot processing to finish
// Should be removed when bugs, race conditions, will be fixed
notifiedFIFO_elt_t *res;
res = pullTpool(&txFifo, &(get_nrUE_params()->Tpool));
if (res == NULL)
......
......@@ -426,7 +426,7 @@ uint32_t nr_dlsch_decoding(PHY_VARS_NR_UE *phy_vars_ue,
rdata->offset = offset;
rdata->dlsch = dlsch;
rdata->dlsch_id = 0;
rdata->proc = proc;
rdata->proc = *proc;
reset_meas(&rdata->ts_deinterleave);
reset_meas(&rdata->ts_rate_unmatch);
reset_meas(&rdata->ts_ldpc_decode);
......
......@@ -611,8 +611,7 @@ typedef struct PHY_VARS_NR_UE_s {
void *phy_sim_pdsch_dl_ch_estimates_ext;
uint8_t *phy_sim_dlsch_b;
notifiedFIFO_t phy_config_ind;
notifiedFIFO_t *tx_resume_ind_fifo[NR_MAX_SLOTS_PER_FRAME];
int tx_wait_for_dlsch[NR_MAX_SLOTS_PER_FRAME];
notifiedFIFO_t tx_resume_ind_fifo[NR_MAX_SLOTS_PER_FRAME];
} PHY_VARS_NR_UE;
typedef struct {
......@@ -647,7 +646,6 @@ typedef struct nr_rxtx_thread_data_s {
UE_nr_rxtx_proc_t proc;
PHY_VARS_NR_UE *UE;
int writeBlockSize;
notifiedFIFO_t txFifo;
nr_phy_data_t phy_data;
int tx_wait_for_dlsch;
} nr_rxtx_thread_data_t;
......@@ -675,7 +673,7 @@ typedef struct LDPCDecode_ue_s {
time_stats_t ts_deinterleave;
time_stats_t ts_rate_unmatch;
time_stats_t ts_ldpc_decode;
UE_nr_rxtx_proc_t *proc;
UE_nr_rxtx_proc_t proc;
} ldpcDecode_ue_t;
#include "SIMULATION/ETH_TRANSPORT/defs.h"
......
......@@ -489,7 +489,7 @@ int nr_ue_pdcch_procedures(PHY_VARS_NR_UE *ue,
}
static int nr_ue_pdsch_procedures(PHY_VARS_NR_UE *ue,
UE_nr_rxtx_proc_t *proc,
const UE_nr_rxtx_proc_t *proc,
NR_UE_DLSCH_t dlsch[2],
int16_t *llr[2],
c16_t rxdataF[][ue->frame_parms.samples_per_slot_wCP])
......@@ -499,9 +499,6 @@ static int nr_ue_pdsch_procedures(PHY_VARS_NR_UE *ue,
int m;
int first_symbol_flag=0;
if (!dlsch[0].active)
return 0;
// We handle only one CW now
if (!(NR_MAX_NB_LAYERS>4)) {
NR_UE_DLSCH_t *dlsch0 = &dlsch[0];
......@@ -637,20 +634,23 @@ static int nr_ue_pdsch_procedures(PHY_VARS_NR_UE *ue,
return 0;
}
void send_slot_ind(notifiedFIFO_t *nf, int slot) {
// This function release the Tx working thread for one pending information, like dlsch ACK/NACK
static void send_dl_done_to_tx_thread(notifiedFIFO_t *nf, int rx_slot)
{
if (nf) {
notifiedFIFO_elt_t *newElt = newNotifiedFIFO_elt(sizeof(int), 0, NULL, NULL);
// We put rx slot only for tracing purpose
int *msgData = (int *) NotifiedFifoData(newElt);
*msgData = slot;
*msgData = rx_slot;
pushNotifiedFIFO(nf, newElt);
}
}
static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc, NR_UE_DLSCH_t dlsch[2], int16_t *llr[2])
static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, const UE_nr_rxtx_proc_t *proc, NR_UE_DLSCH_t dlsch[2], int16_t *llr[2])
{
if (dlsch[0].active == false) {
LOG_E(PHY, "DLSCH should be active when calling this function\n");
return 1;
return true;
}
int gNB_id = proc->gNB_id;
......@@ -685,8 +685,9 @@ static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc,
// exit dlsch procedures as there are no active dlsch
if (is_cw0_active != ACTIVE && is_cw1_active != ACTIVE) {
// don't wait anymore
LOG_E(NR_PHY, "Internal error nr_ue_dlsch_procedure() called but no active cw on slot %d, harq %d\n", nr_slot_rx, harq_pid);
const int ack_nack_slot = (proc->nr_slot_rx + dlsch[0].dlsch_config.k1_feedback) % ue->frame_parms.slots_per_frame;
send_slot_ind(ue->tx_resume_ind_fifo[ack_nack_slot], proc->nr_slot_rx);
send_dl_done_to_tx_thread(ue->tx_resume_ind_fifo + ack_nack_slot, proc->nr_slot_rx);
return false;
}
......@@ -818,7 +819,7 @@ static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc,
LOG_D(PHY, "AbsSubframe %d.%d --> ldpc Decoding for CW1 %5.3f\n",
frame_rx%1024, nr_slot_rx,(ue->dlsch_decoding_stats.p_time)/(cpuf*1000.0));
}
LOG_D(PHY, "harq_pid: %d, TBS expected dlsch1: %d \n", harq_pid, dlsch[1].dlsch_config.TBS);
LOG_D(PHY, "harq_pid: %d, TBS expected dlsch1: %d \n", harq_pid, dlsch[1].dlsch_config.TBS);
}
// send to mac
......@@ -826,8 +827,11 @@ static bool nr_ue_dlsch_procedures(PHY_VARS_NR_UE *ue, UE_nr_rxtx_proc_t *proc,
ue->if_inst->dl_indication(&dl_indication);
}
const int ack_nack_slot = (proc->nr_slot_rx + dlsch[0].dlsch_config.k1_feedback) % ue->frame_parms.slots_per_frame;
send_slot_ind(ue->tx_resume_ind_fifo[ack_nack_slot], proc->nr_slot_rx);
// DLSCH decoding finished! don't wait anymore in Tx process, we know if we should answer ACK/NACK PUCCH
if (dlsch[0].rnti_type == TYPE_C_RNTI_) {
const int ack_nack_slot = (proc->nr_slot_rx + dlsch[0].dlsch_config.k1_feedback) % ue->frame_parms.slots_per_frame;
send_dl_done_to_tx_thread(ue->tx_resume_ind_fifo + ack_nack_slot, proc->nr_slot_rx);
}
if (ue->phy_sim_dlsch_b)
memcpy(ue->phy_sim_dlsch_b, p_b, dlsch_bytes);
......@@ -1091,10 +1095,10 @@ void pdsch_processing(PHY_VARS_NR_UE *ue, const UE_nr_rxtx_proc_t *proc, nr_phy_
if (ret_pdsch >= 0)
nr_ue_dlsch_procedures(ue, proc, dlsch, llr);
else {
// don't wait anymore
send_slot_ind(ue->tx_resume_ind_fifo[(proc->nr_slot_rx + dlsch_config->k1_feedback) % ue->frame_parms.slots_per_frame],
proc->nr_slot_rx);
LOG_W(NR_PHY, "nr_ue_pdsch_procedures failed in slot %d\n", proc->nr_slot_rx);
LOG_E(NR_PHY, "Demodulation impossible, internal error\n");
send_dl_done_to_tx_thread(
ue->tx_resume_ind_fifo + (proc->nr_slot_rx + dlsch_config->k1_feedback) % ue->frame_parms.slots_per_frame,
proc->nr_slot_rx);
}
stop_meas(&ue->dlsch_procedures_stat);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment