Commit 31a10033 authored by Robert Schmidt's avatar Robert Schmidt

Introduce mutex for MAC scheduler

Different threads (e.g., RRC, DU tasks) might access the scheduler at
the same time as the thread that drivers the scheduler itself
(ru_thread). To avoid data races, introduce this mutex.

Most functions of the scheduler are only used internally and have been
marked static in the previous commit. The remaining ones, in this
commit, either lock the scheduler mutex sched_lock, or a comment has
been added of the assumption of how such function is to be locked (e.g.,
if it might be called from different places, or if it is called by a
function that already locks the scheduler).
parent 44bf6275
......@@ -1010,7 +1010,10 @@ int main(int argc, char **argv)
UE_info->UE_sched_ctrl.harq_processes[harq_pid].ndi = !(trial&1);
UE_info->UE_sched_ctrl.harq_processes[harq_pid].round = round;
// nr_schedule_ue_spec() requires the mutex to be locked
NR_SCHED_LOCK(&gNB_mac->sched_lock);
nr_schedule_ue_spec(0, frame, slot, &Sched_INFO->DL_req, &Sched_INFO->TX_req);
NR_SCHED_UNLOCK(&gNB_mac->sched_lock);
Sched_INFO->module_id = 0;
Sched_INFO->CC_id = 0;
Sched_INFO->frame = frame;
......
......@@ -125,6 +125,9 @@ static void process_phr_Config(NR_UE_sched_ctrl_t *sched_ctrl, NR_SetupRelease_P
void process_CellGroup(NR_CellGroupConfig_t *CellGroup, NR_UE_sched_ctrl_t *sched_ctrl)
{
/* we assume that this function is mutex-protected from outside */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[0]->sched_lock);
AssertFatal(CellGroup, "CellGroup is null\n");
NR_MAC_CellGroupConfig_t *mac_CellGroupConfig = CellGroup->mac_CellGroupConfig;
......@@ -436,19 +439,23 @@ static void config_common(gNB_MAC_INST *nrmac, int pdsch_AntennaPorts, int pusch
}
}
int nr_mac_enable_ue_rrc_processing_timer(module_id_t Mod_idP, rnti_t rnti, NR_SubcarrierSpacing_t subcarrierSpacing, uint32_t rrc_reconfiguration_delay) {
int nr_mac_enable_ue_rrc_processing_timer(module_id_t Mod_idP, rnti_t rnti, NR_SubcarrierSpacing_t subcarrierSpacing, uint32_t rrc_reconfiguration_delay)
{
if (rrc_reconfiguration_delay == 0) {
return -1;
}
NR_UE_info_t *UE_info = find_nr_UE(&RC.nrmac[Mod_idP]->UE_info,rnti);
gNB_MAC_INST *nrmac = RC.nrmac[Mod_idP];
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t *UE_info = find_nr_UE(&nrmac->UE_info,rnti);
if (!UE_info) {
LOG_W(NR_MAC, "Could not find UE for RNTI 0x%04x\n", rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return -1;
}
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl;
const uint16_t sl_ahead = RC.nrmac[Mod_idP]->if_inst->sl_ahead;
const uint16_t sl_ahead = nrmac->if_inst->sl_ahead;
sched_ctrl->rrc_processing_timer = (rrc_reconfiguration_delay<<subcarrierSpacing) + sl_ahead;
LOG_I(NR_MAC, "Activating RRC processing timer for UE %04x with %d ms\n", UE_info->rnti, rrc_reconfiguration_delay);
......@@ -456,8 +463,9 @@ int nr_mac_enable_ue_rrc_processing_timer(module_id_t Mod_idP, rnti_t rnti, NR_S
// processing timer. To prevent this, set a variable as if we would have just
// sent it. This way, another TA command will for sure be sent in some
// frames, after RRC processing timer.
sched_ctrl->ta_frame = (RC.nrmac[Mod_idP]->frame - 1 + 1024) % 1024;
sched_ctrl->ta_frame = (nrmac->frame - 1 + 1024) % 1024;
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return 0;
}
......@@ -470,6 +478,7 @@ void nr_mac_config_scc(gNB_MAC_INST *nrmac,
{
DevAssert(nrmac != NULL);
AssertFatal(nrmac->common_channels[0].ServingCellConfigCommon == NULL, "logic error: multiple configurations of SCC\n");
NR_SCHED_LOCK(&nrmac->sched_lock);
DevAssert(scc != NULL);
AssertFatal(scc->ssb_PositionsInBurst->present > 0 && scc->ssb_PositionsInBurst->present < 4,
......@@ -549,26 +558,31 @@ void nr_mac_config_scc(gNB_MAC_INST *nrmac,
ra->preambles.preamble_list[i] = i;
}
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void nr_mac_config_mib(gNB_MAC_INST *nrmac, NR_BCCH_BCH_Message_t *mib)
{
DevAssert(nrmac != NULL);
DevAssert(mib != NULL);
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_COMMON_channels_t *cc = &nrmac->common_channels[0];
AssertFatal(cc->mib == NULL, "logic bug: updated MIB multiple times\n");
cc->mib = mib;
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void nr_mac_config_sib1(gNB_MAC_INST *nrmac, NR_BCCH_DL_SCH_Message_t *sib1)
{
DevAssert(nrmac != NULL);
DevAssert(sib1 != NULL);
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_COMMON_channels_t *cc = &nrmac->common_channels[0];
AssertFatal(cc->sib1 == NULL, "logic bug: updated SIB1 multiple times\n");
cc->sib1 = sib1;
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
bool nr_mac_add_test_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup)
......@@ -576,16 +590,17 @@ bool nr_mac_add_test_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t
DevAssert(nrmac != NULL);
DevAssert(CellGroup != NULL);
DevAssert(get_softmodem_params()->phy_test);
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t* UE = add_new_nr_ue(nrmac, rnti, CellGroup);
if (UE) {
LOG_I(NR_MAC,"Force-added new UE %x with initial CellGroup\n", rnti);
process_CellGroup(CellGroup,&UE->UE_sched_ctrl);
} else {
LOG_E(NR_MAC,"Error adding UE %04x\n", rnti);
return false;
}
process_CellGroup(CellGroup,&UE->UE_sched_ctrl);
return true;
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return UE != NULL;
}
bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup)
......@@ -593,6 +608,7 @@ bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupCo
DevAssert(nrmac != NULL);
DevAssert(CellGroup != NULL);
DevAssert(!get_softmodem_params()->phy_test);
NR_SCHED_LOCK(&nrmac->sched_lock);
// NSA case: need to pre-configure CFRA
const int CC_id = 0;
......@@ -604,6 +620,7 @@ bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupCo
}
if (ra_index == NR_NB_RA_PROC_MAX) {
LOG_E(NR_MAC, "RA processes are not available for CFRA RNTI %04x\n", rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return false;
}
NR_RA_t *ra = &cc->ra[ra_index];
......@@ -632,12 +649,16 @@ bool nr_mac_prepare_ra_nsa_ue(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupCo
}
}
LOG_I(NR_MAC,"Added new RA process for UE RNTI %04x with initial CellGroup\n", rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return true;
}
bool nr_mac_update_cellgroup(gNB_MAC_INST *nrmac, uint32_t rnti, NR_CellGroupConfig_t *CellGroup)
{
DevAssert(nrmac != NULL);
/* we assume that this function is mutex-protected from outside */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
DevAssert(CellGroup != NULL);
NR_UE_info_t *UE = find_nr_UE(&nrmac->UE_info, rnti);
......
......@@ -63,6 +63,7 @@ void clear_nr_nfapi_information(gNB_MAC_INST *gNB,
nfapi_nr_tx_data_request_t *TX_req,
nfapi_nr_ul_dci_request_t *UL_dci_req)
{
/* called below and in simulators, so we assume a lock but don't require it */
NR_ServingCellConfigCommon_t *scc = gNB->common_channels->ServingCellConfigCommon;
const int num_slots = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
......@@ -149,6 +150,8 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, frame_t frame, sub_frame_
NR_COMMON_channels_t *cc = gNB->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
NR_SCHED_LOCK(&gNB->sched_lock);
if (slot==0 && (*scc->downlinkConfigCommon->frequencyInfoDL->frequencyBandList.list.array[0]>=257)) {
//FR2
const NR_TDD_UL_DL_Pattern_t *tdd = &scc->tdd_UL_DL_ConfigurationCommon->pattern1;
......@@ -254,6 +257,6 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, frame_t frame, sub_frame_
copy_ul_tti_req(&sched_info->UL_tti_req, &gNB->UL_tti_req_ahead[0][current_index]);
stop_meas(&gNB->eNB_scheduler);
NR_SCHED_UNLOCK(&gNB->sched_lock);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_DLSCH_ULSCH_SCHEDULER,VCD_FUNCTION_OUT);
}
......@@ -154,10 +154,12 @@ static int16_t ssb_index_from_prach(module_id_t module_idP,
return index;
}
//Compute Total active SSBs and RO available
void find_SSB_and_RO_available(gNB_MAC_INST *nrmac)
{
/* already mutex protected through nr_mac_config_scc() */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
NR_COMMON_channels_t *cc = &nrmac->common_channels[0];
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
nfapi_nr_config_request_scf_t *cfg = &nrmac->config[0];
......@@ -257,6 +259,9 @@ void find_SSB_and_RO_available(gNB_MAC_INST *nrmac)
void schedule_nr_prach(module_id_t module_idP, frame_t frameP, sub_frame_t slotP)
{
gNB_MAC_INST *gNB = RC.nrmac[module_idP];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&gNB->sched_lock);
NR_COMMON_channels_t *cc = gNB->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
int mu;
......@@ -536,12 +541,13 @@ void nr_initiate_ra_proc(module_id_t module_idP,
uint16_t preamble_index,
uint8_t freq_index,
uint8_t symbol,
int16_t timing_offset){
int16_t timing_offset)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_idP];
NR_SCHED_LOCK(&nr_mac->sched_lock);
uint8_t ul_carrier_id = 0; // 0 for NUL 1 for SUL
uint16_t msg2_frame, msg2_slot,monitoring_slot_period,monitoring_offset;
gNB_MAC_INST *nr_mac = RC.nrmac[module_idP];
NR_COMMON_channels_t *cc = &nr_mac->common_channels[CC_id];
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
frame_type_t frame_type = cc->frame_type;
......@@ -675,8 +681,11 @@ void nr_initiate_ra_proc(module_id_t module_idP,
cc->ssb_index[beam_index],
i);
NR_SCHED_UNLOCK(&nr_mac->sched_lock);
return;
}
NR_SCHED_UNLOCK(&nr_mac->sched_lock);
LOG_E(NR_MAC, "[gNB %d][RAPROC] FAILURE: CC_id %d Frame %d initiating RA procedure for preamble index %d\n", module_idP, CC_id, frameP, preamble_index);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_INITIATE_RA_PROC, 0);
......@@ -2090,7 +2099,10 @@ static void nr_check_Msg4_Ack(module_id_t module_id, int CC_id, frame_t frame, s
}
}
void nr_clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP, NR_RA_t *ra){
void nr_clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP, NR_RA_t *ra)
{
/* we assume that this function is mutex-protected from outside */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[module_idP]->sched_lock);
LOG_D(NR_MAC,"[gNB %d][RAPROC] CC_id %d Frame %d Clear Random access information rnti %x\n", module_idP, CC_id, frameP, ra->rnti);
ra->state = RA_IDLE;
ra->timing_offset = 0;
......@@ -2237,6 +2249,8 @@ void nr_schedule_RA(module_id_t module_idP,
nfapi_nr_tx_data_request_t *TX_req)
{
gNB_MAC_INST *mac = RC.nrmac[module_idP];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&mac->sched_lock);
start_meas(&mac->schedule_ra);
for (int CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) {
......
......@@ -108,6 +108,7 @@ static void fill_ssb_vrb_map(NR_COMMON_channels_t *cc, int rbStart, int ssb_subc
void schedule_nr_mib(module_id_t module_idP, frame_t frameP, sub_frame_t slotP, nfapi_nr_dl_tti_request_t *DL_req)
{
gNB_MAC_INST *gNB = RC.nrmac[module_idP];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_COMMON_channels_t *cc;
nfapi_nr_dl_tti_request_body_t *dl_req;
NR_MIB_t *mib = RC.nrrrc[module_idP]->carrier.mib->message.choice.mib;
......@@ -513,7 +514,6 @@ static void nr_fill_nfapi_dl_sib1_pdu(int Mod_idP,
LOG_D(MAC,"ShiftIndex: %i\n", pdcch_pdu_rel15->ShiftIndex);
LOG_D(MAC,"precoderGranularity: %i\n", pdcch_pdu_rel15->precoderGranularity);
LOG_D(MAC,"numDlDci: %i\n", pdcch_pdu_rel15->numDlDci);
}
void schedule_nr_sib1(module_id_t module_idP,
......@@ -522,6 +522,7 @@ void schedule_nr_sib1(module_id_t module_idP,
nfapi_nr_dl_tti_request_t *DL_req,
nfapi_nr_tx_data_request_t *TX_req)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
// TODO: Get these values from RRC
const int CC_id = 0;
uint8_t candidate_idx = 0;
......
......@@ -55,6 +55,7 @@
const int get_dl_tda(const gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *scc, int slot) {
/* we assume that this function is mutex-protected from outside */
const NR_TDD_UL_DL_Pattern_t *tdd = scc->tdd_UL_DL_ConfigurationCommon ? &scc->tdd_UL_DL_ConfigurationCommon->pattern1 : NULL;
AssertFatal(tdd || nrmac->common_channels->frame_type == FDD, "Dynamic TDD not handled yet\n");
......@@ -76,8 +77,12 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP,
const NR_UE_sched_ctrl_t *ue_sched_ctl,
unsigned char *mac_pdu,
unsigned char drx_cmd,
unsigned char *ue_cont_res_id) {
unsigned char *ue_cont_res_id)
{
gNB_MAC_INST *gNB = RC.nrmac[module_idP];
/* already mutex protected: called below and in _RA.c */
NR_SCHED_ENSURE_LOCKED(&gNB->sched_lock);
NR_MAC_SUBHEADER_FIXED *mac_pdu_ptr = (NR_MAC_SUBHEADER_FIXED *) mac_pdu;
uint8_t last_size = 0;
int offset = 0, mac_ce_size, i, timing_advance_cmd, tag_id = 0;
......@@ -361,8 +366,9 @@ static void nr_store_dlsch_buffer(module_id_t module_id, frame_t frame, sub_fram
}
}
void abort_nr_dl_harq(NR_UE_info_t* UE, int8_t harq_pid) {
void abort_nr_dl_harq(NR_UE_info_t* UE, int8_t harq_pid)
{
/* already mutex protected through handle_dl_harq() */
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[harq_pid];
......@@ -840,6 +846,7 @@ static void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_
}
nr_pp_impl_dl nr_init_fr1_dlsch_preprocessor(int CC_id) {
/* during initialization: no mutex needed */
/* in the PF algorithm, we have to use the TBsize to compute the coefficient.
* This would include the number of DMRS symbols, which in turn depends on
* the time domain allocation. In case we are in a mixed slot, we do not want
......@@ -873,6 +880,9 @@ void nr_schedule_ue_spec(module_id_t module_id,
nfapi_nr_tx_data_request_t *TX_req)
{
gNB_MAC_INST *gNB_mac = RC.nrmac[module_id];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
AssertFatal(pthread_mutex_trylock(&gNB_mac->sched_lock) == EBUSY,
"this function should be called with the scheduler mutex locked\n");
if (!is_xlsch_in_slot(gNB_mac->dlsch_slot_bitmap[slot / 64], slot))
return;
......
......@@ -50,6 +50,7 @@ void nr_preprocessor_phytest(module_id_t module_id,
frame_t frame,
sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
if (!is_xlsch_in_slot(dlsch_slot_bitmap, slot))
return;
NR_UE_info_t *UE = RC.nrmac[module_id]->UE_info.list[0];
......@@ -192,6 +193,7 @@ uint64_t ulsch_slot_bitmap = (1 << 8);
bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_t slot)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_COMMON_channels_t *cc = nr_mac->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
NR_UE_info_t *UE = nr_mac->UE_info.list[0];
......
......@@ -2369,7 +2369,7 @@ NR_UE_info_t *add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConf
reset_srs_stats(UE);
pthread_mutex_lock(&UE_info->mutex);
NR_SCHED_LOCK(&UE_info->mutex);
int i;
for(i=0; i<MAX_MOBILES_PER_GNB; i++) {
if (UE_info->list[i] == NULL) {
......@@ -2380,10 +2380,10 @@ NR_UE_info_t *add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConf
if (i == MAX_MOBILES_PER_GNB) {
LOG_E(NR_MAC,"Try to add UE %04x but the list is full\n", rntiP);
delete_nr_ue_data(UE, nr_mac->common_channels, &UE_info->uid_allocator);
pthread_mutex_unlock(&UE_info->mutex);
NR_SCHED_UNLOCK(&UE_info->mutex);
return NULL;
}
pthread_mutex_unlock(&UE_info->mutex);
NR_SCHED_UNLOCK(&UE_info->mutex);
LOG_D(NR_MAC, "Add NR rnti %x\n", rntiP);
dump_nr_list(UE_info->list);
......@@ -2487,8 +2487,11 @@ void reset_ul_harq_list(NR_UE_sched_ctrl_t *sched_ctrl) {
void mac_remove_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rnti)
{
/* already mutex protected */
NR_SCHED_ENSURE_LOCKED(&nr_mac->sched_lock);
NR_UEs_t *UE_info = &nr_mac->UE_info;
pthread_mutex_lock(&UE_info->mutex);
NR_SCHED_LOCK(&UE_info->mutex);
UE_iterator(UE_info->list, UE) {
if (UE->rnti==rnti)
break;
......@@ -2496,7 +2499,7 @@ void mac_remove_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rnti)
if (!UE) {
LOG_W(NR_MAC,"Call to del rnti %04x, but not existing\n", rnti);
pthread_mutex_unlock(&UE_info->mutex);
NR_SCHED_UNLOCK(&UE_info->mutex);
return;
}
......@@ -2506,7 +2509,7 @@ void mac_remove_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rnti)
if(UE_info->list[i] && UE_info->list[i]->rnti != rnti)
newUEs[newListIdx++]=UE_info->list[i];
memcpy(UE_info->list, newUEs, sizeof(UE_info->list));
pthread_mutex_unlock(&UE_info->mutex);
NR_SCHED_UNLOCK(&UE_info->mutex);
delete_nr_ue_data(UE, nr_mac->common_channels, &UE_info->uid_allocator);
}
......@@ -2525,6 +2528,7 @@ uint8_t nr_get_tpc(int target, uint8_t cqi, int incr) {
int get_pdsch_to_harq_feedback(NR_PUCCH_Config_t *pucch_Config,
nr_dci_format_t dci_format,
uint8_t *pdsch_to_harq_feedback) {
/* already mutex protected: held in nr_acknack_scheduling() */
if (dci_format == NR_DL_DCI_FORMAT_1_0) {
for (int i = 0; i < 8; i++)
......@@ -2545,6 +2549,9 @@ void nr_csirs_scheduling(int Mod_idP, frame_t frame, sub_frame_t slot, int n_slo
int CC_id = 0;
NR_UEs_t *UE_info = &RC.nrmac[Mod_idP]->UE_info;
gNB_MAC_INST *gNB_mac = RC.nrmac[Mod_idP];
NR_SCHED_ENSURE_LOCKED(&gNB_mac->sched_lock);
uint16_t *vrb_map = gNB_mac->common_channels[CC_id].vrb_map;
UE_info->sched_csirs = false;
......@@ -2770,7 +2777,10 @@ void nr_csirs_scheduling(int Mod_idP, frame_t frame, sub_frame_t slot, int n_slo
void nr_mac_update_timers(module_id_t module_id,
frame_t frame,
sub_frame_t slot) {
sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[module_id]->sched_lock);
NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info;
UE_iterator(UE_info->list, UE) {
......@@ -2824,7 +2834,10 @@ void nr_mac_update_timers(module_id_t module_id,
void schedule_nr_bwp_switch(module_id_t module_id,
frame_t frame,
sub_frame_t slot) {
sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[module_id]->sched_lock);
NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info;
......@@ -2887,6 +2900,7 @@ void send_initial_ul_rrc_message(gNB_MAC_INST *mac, int rnti, const uint8_t *sdu
LOG_W(MAC, "[RAPROC] Received SDU for CCCH length %d for UE %04x\n", sdu_len, rnti);
NR_UE_info_t *UE = (NR_UE_info_t *)rawUE;
NR_SCHED_ENSURE_LOCKED(&mac->sched_lock);
uint8_t du2cu[1024];
int encoded = encode_cellGroupConfig(UE->CellGroup, du2cu, sizeof(du2cu));
......@@ -2904,6 +2918,7 @@ void send_initial_ul_rrc_message(gNB_MAC_INST *mac, int rnti, const uint8_t *sdu
void prepare_initial_ul_rrc_message(gNB_MAC_INST *mac, NR_UE_info_t *UE)
{
NR_SCHED_ENSURE_LOCKED(&mac->sched_lock);
/* create this UE's initial CellGroup */
/* Note: relying on the RRC is a hack, as we are in the DU; there should be
* no RRC, remove in the future */
......
......@@ -54,6 +54,9 @@ void nr_srs_ri_computation(const nfapi_nr_srs_normalized_channel_iq_matrix_t *nr
const NR_UE_UL_BWP_t *current_BWP,
uint8_t *ul_ri)
{
/* already mutex protected: held in handle_nr_srs_measurements() */
NR_SCHED_ENSURE_LOCKED(&RC.nrmac[0]->sched_lock);
// If the gNB or UE has 1 antenna, the rank is always 1, i.e., *ul_ri = 0.
// For 2x2 scenario, we compute the rank of channel.
// The computation for 2x4, 4x2, 4x4, ... scenarios are not implemented yet. In these cases, the function sets *ul_ri = 0, which is always a valid value.
......@@ -240,8 +243,10 @@ static void nr_fill_nfapi_srs(int module_id,
*********************************************************************/
void nr_schedule_srs(int module_id, frame_t frame, int slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
gNB_MAC_INST *nrmac = RC.nrmac[module_id];
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
NR_UEs_t *UE_info = &nrmac->UE_info;
UE_iterator(UE_info->list, UE) {
......
......@@ -167,6 +167,9 @@ void nr_schedule_pucch(gNB_MAC_INST *nrmac,
frame_t frameP,
sub_frame_t slotP)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
if (!is_xlsch_in_slot(nrmac->ulsch_slot_bitmap[slotP / 64], slotP))
return;
......@@ -198,7 +201,10 @@ void nr_csi_meas_reporting(int Mod_idP,
frame_t frame,
sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
gNB_MAC_INST *nrmac = RC.nrmac[Mod_idP];
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
UE_iterator(nrmac->UE_info.list, UE ) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_UL_BWP_t *ul_bwp = &UE->current_UL_BWP;
......@@ -917,9 +923,12 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
sub_frame_t slot,
const nfapi_nr_uci_pucch_pdu_format_0_1_t *uci_01)
{
NR_UE_info_t * UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, uci_01->rnti);
gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t * UE = find_nr_UE(&nrmac->UE_info, uci_01->rnti);
if (!UE) {
LOG_E(NR_MAC, "%s(): unknown RNTI %04x in PUCCH UCI\n", __func__, uci_01->rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
......@@ -929,7 +938,7 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
for (int harq_bit = 0; harq_bit < uci_01->harq->num_harq; harq_bit++) {
const uint8_t harq_value = uci_01->harq->harq_list[harq_bit].harq_value;
const uint8_t harq_confidence = uci_01->harq->harq_confidence_level;
NR_UE_harq_t *harq = find_harq(frame, slot, UE, RC.nrmac[mod_id]->dl_bler.harq_round_max);
NR_UE_harq_t *harq = find_harq(frame, slot, UE, nrmac->dl_bler.harq_round_max);
if (!harq) {
LOG_E(NR_MAC, "Oh no! Could not find a harq in %s!\n", __FUNCTION__);
break;
......@@ -938,13 +947,13 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
const int8_t pid = sched_ctrl->feedback_dl_harq.head;
remove_front_nr_list(&sched_ctrl->feedback_dl_harq);
LOG_D(NR_MAC,"%4d.%2d bit %d pid %d ack/nack %d\n",frame, slot, harq_bit,pid,harq_value);
handle_dl_harq(UE, pid, harq_value == 0 && harq_confidence == 0, RC.nrmac[mod_id]->dl_bler.harq_round_max);
handle_dl_harq(UE, pid, harq_value == 0 && harq_confidence == 0, nrmac->dl_bler.harq_round_max);
if (harq_confidence == 1) UE->mac_stats.pucch0_DTX++;
}
// tpc (power control) only if we received AckNack
if (uci_01->harq->harq_confidence_level==0)
sched_ctrl->tpc1 = nr_get_tpc(RC.nrmac[mod_id]->pucch_target_snrx10, uci_01->ul_cqi, 30);
sched_ctrl->tpc1 = nr_get_tpc(nrmac->pucch_target_snrx10, uci_01->ul_cqi, 30);
else
sched_ctrl->tpc1 = 3;
sched_ctrl->pucch_snrx10 = uci_01->ul_cqi * 5 - 640;
......@@ -962,6 +971,7 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
}
free(uci_01->sr);
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
......@@ -969,15 +979,21 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
sub_frame_t slot,
const nfapi_nr_uci_pucch_pdu_format_2_3_4_t *uci_234)
{
NR_UE_info_t * UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, uci_234->rnti);
gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t * UE = find_nr_UE(&nrmac->UE_info, uci_234->rnti);
if (!UE) {
NR_SCHED_UNLOCK(&nrmac->sched_lock);
LOG_E(NR_MAC, "%s(): unknown RNTI %04x in PUCCH UCI\n", __func__, uci_234->rnti);
return;
}
NR_CSI_MeasConfig_t *csi_MeasConfig = UE->current_UL_BWP.csi_MeasConfig;
if (csi_MeasConfig==NULL)
if (csi_MeasConfig==NULL) {
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
// tpc (power control)
......@@ -1002,13 +1018,13 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
DevAssert(harq->is_waiting);
const int8_t pid = sched_ctrl->feedback_dl_harq.head;
remove_front_nr_list(&sched_ctrl->feedback_dl_harq);
handle_dl_harq(UE, pid, uci_234->harq.harq_crc != 1 && acknack, RC.nrmac[mod_id]->dl_bler.harq_round_max);
handle_dl_harq(UE, pid, uci_234->harq.harq_crc != 1 && acknack, nrmac->dl_bler.harq_round_max);
}
free(uci_234->harq.harq_payload);
}
if ((uci_234->pduBitmap >> 2) & 0x01) {
//API to parse the csi report and store it into sched_ctrl
extract_pucch_csi_report(csi_MeasConfig, uci_234, frame, slot, UE, RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon);
extract_pucch_csi_report(csi_MeasConfig, uci_234, frame, slot, UE, nrmac->common_channels->ServingCellConfigCommon);
//TCI handling function
tci_handling(UE,frame, slot);
free(uci_234->csi_part1.csi_part1_payload);
......@@ -1017,6 +1033,7 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
//@TODO:Handle CSI Report 2
// nothing to free (yet)
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
static void set_pucch_allocation(const NR_UE_UL_BWP_t *ul_bwp, const int r_pucch, const int bwp_size, NR_sched_pucch_t *pucch)
......@@ -1083,6 +1100,8 @@ int nr_acknack_scheduling(gNB_MAC_INST *mac,
int r_pucch,
int is_common)
{
/* we assume that this function is mutex-protected from outside. Since it is
* called often, don't try to lock every time */
const int CC_id = 0;
const int minfbtime = mac->minRXTXTIMEpdsch;
......@@ -1187,6 +1206,9 @@ int nr_acknack_scheduling(gNB_MAC_INST *mac,
void nr_sr_reporting(gNB_MAC_INST *nrmac, frame_t SFN, sub_frame_t slot)
{
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
if (!is_xlsch_in_slot(nrmac->ulsch_slot_bitmap[slot / 64], slot))
return;
const int CC_id = 0;
......
......@@ -39,7 +39,10 @@
//#define SRS_IND_DEBUG
const int get_ul_tda(gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *scc, int frame, int slot) {
const int get_ul_tda(gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *scc, int frame, int slot)
{
/* we assume that this function is mutex-protected from outside */
NR_SCHED_ENSURE_LOCKED(&nrmac->sched_lock);
/* there is a mixed slot only when in TDD */
const NR_TDD_UL_DL_Pattern_t *tdd = scc->tdd_UL_DL_ConfigurationCommon ? &scc->tdd_UL_DL_ConfigurationCommon->pattern1 : NULL;
......@@ -492,17 +495,22 @@ void handle_nr_ul_harq(const int CC_idP,
sub_frame_t slot,
const nfapi_nr_crc_t *crc_pdu)
{
NR_UE_info_t *UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, crc_pdu->rnti);
bool UE_waiting_CFRA_msg3 = get_UE_waiting_CFRA_msg3(RC.nrmac[mod_id], CC_idP, frame, slot);
gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
NR_SCHED_LOCK(&nrmac->sched_lock);
NR_UE_info_t *UE = find_nr_UE(&nrmac->UE_info, crc_pdu->rnti);
bool UE_waiting_CFRA_msg3 = get_UE_waiting_CFRA_msg3(nrmac, CC_idP, frame, slot);
if (!UE || UE_waiting_CFRA_msg3 == true) {
LOG_W(NR_MAC, "handle harq for rnti %04x, in RA process\n", crc_pdu->rnti);
for (int i = 0; i < NR_NB_RA_PROC_MAX; ++i) {
NR_RA_t *ra = &RC.nrmac[mod_id]->common_channels[CC_idP].ra[i];
if (ra->state >= WAIT_Msg3 &&
ra->rnti == crc_pdu->rnti)
NR_RA_t *ra = &nrmac->common_channels[CC_idP].ra[i];
if (ra->state >= WAIT_Msg3 && ra->rnti == crc_pdu->rnti) {
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
LOG_E(NR_MAC, "%s(): unknown RNTI 0x%04x in PUSCH\n", __func__, crc_pdu->rnti);
return;
}
......@@ -515,8 +523,10 @@ void handle_nr_ul_harq(const int CC_idP,
crc_pdu->harq_id,
harq_pid,
crc_pdu->rnti);
if (harq_pid < 0)
if (harq_pid < 0) {
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
remove_front_nr_list(&sched_ctrl->feedback_ul_harq);
sched_ctrl->ul_harq_processes[harq_pid].is_waiting = false;
......@@ -556,22 +566,23 @@ void handle_nr_ul_harq(const int CC_idP,
crc_pdu->rnti);
add_tail_nr_list(&sched_ctrl->retrans_ul_harq, harq_pid);
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
/*
* When data are received on PHY and transmitted to MAC
*/
void nr_rx_sdu(const module_id_t gnb_mod_idP,
const int CC_idP,
const frame_t frameP,
const sub_frame_t slotP,
const rnti_t rntiP,
uint8_t *sduP,
const uint16_t sdu_lenP,
const uint16_t timing_advance,
const uint8_t ul_cqi,
const uint16_t rssi){
static void _nr_rx_sdu(const module_id_t gnb_mod_idP,
const int CC_idP,
const frame_t frameP,
const sub_frame_t slotP,
const rnti_t rntiP,
uint8_t *sduP,
const uint16_t sdu_lenP,
const uint16_t timing_advance,
const uint8_t ul_cqi,
const uint16_t rssi)
{
gNB_MAC_INST *gNB_mac = RC.nrmac[gnb_mod_idP];
const int current_rnti = rntiP;
......@@ -835,6 +846,23 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP,
}
}
void nr_rx_sdu(const module_id_t gnb_mod_idP,
const int CC_idP,
const frame_t frameP,
const sub_frame_t slotP,
const rnti_t rntiP,
uint8_t *sduP,
const uint16_t sdu_lenP,
const uint16_t timing_advance,
const uint8_t ul_cqi,
const uint16_t rssi)
{
gNB_MAC_INST *gNB_mac = RC.nrmac[gnb_mod_idP];
NR_SCHED_LOCK(&gNB_mac->sched_lock);
_nr_rx_sdu(gnb_mod_idP, CC_idP, frameP, slotP, rntiP, sduP, sdu_lenP, timing_advance, ul_cqi, rssi);
NR_SCHED_UNLOCK(&gNB_mac->sched_lock);
}
static uint32_t calc_power_complex(const int16_t *x, const int16_t *y, const uint32_t size)
{
// Real part value
......@@ -1169,6 +1197,8 @@ void handle_nr_srs_measurements(const module_id_t module_id,
const sub_frame_t slot,
nfapi_nr_srs_indication_pdu_t *srs_ind)
{
gNB_MAC_INST *nrmac = RC.nrmac[module_id];
NR_SCHED_LOCK(&nrmac->sched_lock);
LOG_D(NR_MAC, "(%d.%d) Received SRS indication for UE %04x\n", frame, slot, srs_ind->rnti);
#ifdef SRS_IND_DEBUG
......@@ -1184,11 +1214,13 @@ void handle_nr_srs_measurements(const module_id_t module_id,
NR_UE_info_t *UE = find_nr_UE(&RC.nrmac[module_id]->UE_info, srs_ind->rnti);
if (!UE) {
LOG_W(NR_MAC, "Could not find UE for RNTI %04x\n", srs_ind->rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
if (srs_ind->timing_advance_offset == 0xFFFF) {
LOG_W(NR_MAC, "Invalid timing advance offset for RNTI %04x\n", srs_ind->rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
......@@ -1206,6 +1238,7 @@ void handle_nr_srs_measurements(const module_id_t module_id,
if (nr_srs_bf_report.wide_band_snr == 0xFF) {
LOG_W(NR_MAC, "Invalid wide_band_snr for RNTI %04x\n", srs_ind->rnti);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
return;
}
......@@ -1304,12 +1337,14 @@ void handle_nr_srs_measurements(const module_id_t module_id,
default:
AssertFatal(1 == 0, "Invalid SRS usage\n");
}
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
long get_K2(NR_PUSCH_TimeDomainResourceAllocationList_t *tdaList,
int time_domain_assignment,
int mu) {
/* we assume that this function is mutex-protected from outside */
NR_PUSCH_TimeDomainResourceAllocation_t *tda = tdaList->list.array[time_domain_assignment];
if (tda->k2)
......@@ -1974,6 +2009,7 @@ static bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_
nr_pp_impl_ul nr_init_fr1_ulsch_preprocessor(int CC_id)
{
/* during initialization: no mutex needed */
/* in the PF algorithm, we have to use the TBsize to compute the coefficient.
* This would include the number of DMRS symbols, which in turn depends on
* the time domain allocation. In case we are in a mixed slot, we do not want
......@@ -2005,6 +2041,9 @@ nr_pp_impl_ul nr_init_fr1_ulsch_preprocessor(int CC_id)
void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot, nfapi_nr_ul_dci_request_t *ul_dci_req)
{
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
/* already mutex protected: held in gNB_dlsch_ulsch_scheduler() */
NR_SCHED_ENSURE_LOCKED(&nr_mac->sched_lock);
/* Uplink data ONLY can be scheduled when the current slot is downlink slot,
* because we have to schedule the DCI0 first before schedule uplink data */
if (!is_xlsch_in_slot(nr_mac->dlsch_slot_bitmap[slot / 64], slot)) {
......
......@@ -60,7 +60,9 @@ void *nrmac_stats_thread(void *arg) {
while (oai_exit == 0) {
char *p = output;
NR_SCHED_LOCK(&gNB->sched_lock);
p += dump_mac_stats(gNB, p, end - p, false);
NR_SCHED_UNLOCK(&gNB->sched_lock);
p += snprintf(p, end - p, "\n");
p += print_meas_log(&gNB->eNB_scheduler, "DL & UL scheduling timing", NULL, NULL, p, end - p);
p += print_meas_log(&gNB->schedule_dlsch, "dlsch scheduler", NULL, NULL, p, end - p);
......@@ -87,7 +89,11 @@ size_t dump_mac_stats(gNB_MAC_INST *gNB, char *output, size_t strlen, bool reset
const char *begin = output;
const char *end = output + strlen;
pthread_mutex_lock(&gNB->UE_info.mutex);
/* this function is called from gNB_dlsch_ulsch_scheduler(), so assumes the
* scheduler to be locked*/
NR_SCHED_ENSURE_LOCKED(&gNB->sched_lock);
NR_SCHED_LOCK(&gNB->UE_info.mutex);
UE_iterator(gNB->UE_info.list, UE) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_mac_stats_t *stats = &UE->mac_stats;
......@@ -175,7 +181,7 @@ size_t dump_mac_stats(gNB_MAC_INST *gNB, char *output, size_t strlen, bool reset
stats->ul.lc_bytes[lc_id]);
}
}
pthread_mutex_unlock(&gNB->UE_info.mutex);
NR_SCHED_UNLOCK(&gNB->UE_info.mutex);
return output - begin;
}
......@@ -233,6 +239,8 @@ void mac_top_init_gNB(ngran_node_t node_type)
RC.nrmac[i]->first_MIB = true;
pthread_mutex_init(&RC.nrmac[i]->sched_lock, NULL);
pthread_mutex_init(&RC.nrmac[i]->UE_info.mutex, NULL);
uid_linear_allocator_init(&RC.nrmac[i]->UE_info.uid_allocator);
......
......@@ -41,6 +41,25 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#define NR_SCHED_LOCK(lock) \
do { \
int rc = pthread_mutex_lock(lock); \
AssertFatal(rc == 0, "error while locking scheduler mutex\n"); \
} while (0)
#define NR_SCHED_UNLOCK(lock) \
do { \
int rc = pthread_mutex_unlock(lock); \
AssertFatal(rc == 0, "error while locking scheduler mutex\n"); \
} while (0)
#define NR_SCHED_ENSURE_LOCKED(lock)\
do {\
int rc = pthread_mutex_trylock(lock); \
AssertFatal(rc == EBUSY, "this function should be called with the scheduler mutex locked\n");\
} while (0)
/* Commmon */
#include "radio/COMMON/common_lib.h"
......@@ -795,6 +814,8 @@ typedef struct gNB_MAC_INST_s {
int16_t frame;
int16_t slot;
pthread_mutex_t sched_lock;
} gNB_MAC_INST;
#endif /*__LAYER2_NR_MAC_GNB_H__ */
......@@ -55,12 +55,17 @@ void nr_rrc_mac_remove_ue(rnti_t rntiMaybeUEid)
nr_rlc_remove_ue(rntiMaybeUEid);
gNB_MAC_INST *nrmac = RC.nrmac[0];
NR_SCHED_LOCK(&nrmac->sched_lock);
mac_remove_nr_ue(nrmac, rntiMaybeUEid);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
void nr_rrc_mac_update_cellgroup(rnti_t rntiMaybeUEid, NR_CellGroupConfig_t *cgc)
{
nr_mac_update_cellgroup(RC.nrmac[0], rntiMaybeUEid, cgc);
gNB_MAC_INST *nrmac = RC.nrmac[0];
NR_SCHED_LOCK(&nrmac->sched_lock);
nr_mac_update_cellgroup(nrmac, rntiMaybeUEid, cgc);
NR_SCHED_UNLOCK(&nrmac->sched_lock);
}
uint16_t mac_rrc_nr_data_req(const module_id_t Mod_idP,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment