Commit f9b24023 authored by mjoang's avatar mjoang

protect ul_config_pdu against race condition

parent 1adb7ccc
......@@ -175,6 +175,7 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
pusch_config_pdu = &ul_config->ul_config_list[i].pusch_config_pdu;
current_harq_pid = pusch_config_pdu->pusch_data.harq_process_id;
NR_UL_UE_HARQ_t *harq_process_ul_ue = ulsch0->harq_processes[current_harq_pid];
harq_process_ul_ue->status = 0;
if (harq_process_ul_ue){
......
......@@ -668,7 +668,7 @@ int8_t nr_ue_process_dci(module_id_t module_id, int cc_id, uint8_t gNB_index, fr
LOG_W(MAC, "In %s: ul_config request is NULL. Probably due to unexpected UL DCI in frame.slot %d.%d. Ignoring DCI!\n", __FUNCTION__, frame, slot);
return -1;
}
pthread_mutex_lock(&ul_config->mutex_ul_config);
AssertFatal(ul_config->number_pdus<FAPI_NR_UL_CONFIG_LIST_NUM, "ul_config->number_pdus %d out of bounds\n",ul_config->number_pdus);
nfapi_nr_ue_pusch_pdu_t *pusch_config_pdu = &ul_config->ul_config_list[ul_config->number_pdus].pusch_config_pdu;
......@@ -676,6 +676,7 @@ int8_t nr_ue_process_dci(module_id_t module_id, int cc_id, uint8_t gNB_index, fr
// Config PUSCH PDU
ret = nr_config_pusch_pdu(mac, pusch_config_pdu, dci, NULL, rnti, &dci_format);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
......@@ -729,12 +730,15 @@ int8_t nr_ue_process_dci(module_id_t module_id, int cc_id, uint8_t gNB_index, fr
return -1;
}
pthread_mutex_lock(&ul_config->mutex_ul_config);
AssertFatal(ul_config->number_pdus<FAPI_NR_UL_CONFIG_LIST_NUM, "ul_config->number_pdus %d out of bounds\n",ul_config->number_pdus);
nfapi_nr_ue_pusch_pdu_t *pusch_config_pdu = &ul_config->ul_config_list[ul_config->number_pdus].pusch_config_pdu;
fill_ul_config(ul_config, frame_tx, slot_tx, FAPI_NR_UL_CONFIG_TYPE_PUSCH);
// Config PUSCH PDU
ret = nr_config_pusch_pdu(mac, pusch_config_pdu, dci, NULL, rnti, &dci_format);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
} else AssertFatal(1==0,"Cannot schedule PUSCH\n");
break;
......@@ -3921,12 +3925,15 @@ int nr_ue_process_rar(nr_downlink_indication_t *dl_info, NR_UL_TIME_ALIGNMENT_t
rnti = ra->t_crnti;
}
pthread_mutex_lock(&ul_config->mutex_ul_config);
AssertFatal(ul_config->number_pdus<FAPI_NR_UL_CONFIG_LIST_NUM, "ul_config->number_pdus %d out of bounds\n",ul_config->number_pdus);
nfapi_nr_ue_pusch_pdu_t *pusch_config_pdu = &ul_config->ul_config_list[ul_config->number_pdus].pusch_config_pdu;
fill_ul_config(ul_config, frame_tx, slot_tx, FAPI_NR_UL_CONFIG_TYPE_PUSCH);
// Config Msg3 PDU
nr_config_pusch_pdu(mac, pusch_config_pdu, NULL, &rar_grant, rnti, NULL);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
......
......@@ -60,12 +60,10 @@ static prach_association_pattern_t prach_assoc_pattern;
static ssb_list_info_t ssb_list;
void fill_ul_config(fapi_nr_ul_config_request_t *ul_config, frame_t frame_tx, int slot_tx, uint8_t pdu_type){
pthread_mutex_lock(&ul_config->mutex_ul_config);
ul_config->ul_config_list[ul_config->number_pdus].pdu_type = pdu_type;
ul_config->slot = slot_tx;
ul_config->sfn = frame_tx;
ul_config->number_pdus++;
pthread_mutex_unlock(&ul_config->mutex_ul_config);
LOG_D(NR_MAC, "In %s: Set config request for UL transmission in [%d.%d], number of UL PDUs: %d\n", __FUNCTION__, ul_config->sfn, ul_config->slot, ul_config->number_pdus);
......@@ -947,7 +945,10 @@ NR_UE_L2_STATE_t nr_ue_scheduler(nr_downlink_indication_t *dl_info, nr_uplink_in
// Schedule ULSCH only if the current frame and slot match those in ul_config_req
// AND if a UL grant (UL DCI or Msg3) has been received (as indicated by num_pdus)
if (ul_config && (ul_info->slot_tx == ul_config->slot && ul_info->frame_tx == ul_config->sfn) && ul_config->number_pdus > 0){
if (ul_config){
pthread_mutex_lock(&ul_config->mutex_ul_config);
if ((ul_info->slot_tx == ul_config->slot && ul_info->frame_tx == ul_config->sfn) && ul_config->number_pdus > 0){
pthread_mutex_unlock(&ul_config->mutex_ul_config);
LOG_D(NR_MAC, "In %s:[%d.%d]: number of UL PDUs: %d with UL transmission in [%d.%d]\n", __FUNCTION__, frame_tx, slot_tx, ul_config->number_pdus, ul_config->sfn, ul_config->slot);
......@@ -958,13 +959,14 @@ NR_UE_L2_STATE_t nr_ue_scheduler(nr_downlink_indication_t *dl_info, nr_uplink_in
tx_req.sfn = frame_tx;
tx_req.number_of_pdus = 0;
pthread_mutex_lock(&ul_config->mutex_ul_config);
for (int j = 0; j < ul_config->number_pdus; j++) {
uint8_t *ulsch_input_buffer = &(ulsch_input_buffer_array[tx_req.number_of_pdus][MAX_ULSCH_PAYLOAD_BYTES]);
uint8_t *ulsch_input_buffer = &(ulsch_input_buffer_array[tx_req.number_of_pdus][MAX_ULSCH_PAYLOAD_BYTES]);
fapi_nr_ul_config_request_pdu_t *ulcfg_pdu = &ul_config->ul_config_list[j];
if (ulcfg_pdu->pdu_type == FAPI_NR_UL_CONFIG_TYPE_PUSCH) {
int mac_pdu_exist=0;
int mac_pdu_exist = 0;
uint16_t TBS_bytes = ulcfg_pdu->pusch_config_pdu.pusch_data.tb_size;
LOG_D(NR_MAC,"harq_id %d, NDI %d NDI_DCI %d, TBS_bytes %d (ra_state %d)\n",
ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id,
......@@ -991,7 +993,7 @@ NR_UE_L2_STATE_t nr_ue_scheduler(nr_downlink_indication_t *dl_info, nr_uplink_in
// Getting IP traffic to be transmitted
nr_ue_get_sdu(mod_id, cc_id,frame_tx, slot_tx, gNB_index, ulsch_input_buffer, TBS_bytes);
mac_pdu_exist=1;
mac_pdu_exist = 1;
}
LOG_D(NR_MAC,"Flipping NDI for harq_id %d\n",ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator);
......@@ -1018,11 +1020,16 @@ NR_UE_L2_STATE_t nr_ue_scheduler(nr_downlink_indication_t *dl_info, nr_uplink_in
}
}
}
pthread_mutex_unlock(&ul_config->mutex_ul_config);
fill_scheduled_response(&scheduled_response, NULL, ul_config, &tx_req, mod_id, cc_id, rx_frame, rx_slot, ul_info->thread_id);
if(mac->if_module != NULL && mac->if_module->scheduled_response != NULL){
mac->if_module->scheduled_response(&scheduled_response);
}
}
else {
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
}
}
......@@ -2151,6 +2158,8 @@ void nr_ue_pucch_scheduler(module_id_t module_idP, frame_t frameP, int slotP, in
pucch->resource_set_id = find_pucch_resource_set(mac, N_UCI);
select_pucch_resource(mac, pucch);
fapi_nr_ul_config_request_t *ul_config = get_ul_config_request(mac, slotP);
pthread_mutex_lock(&ul_config->mutex_ul_config);
AssertFatal(ul_config->number_pdus<FAPI_NR_UL_CONFIG_LIST_NUM, "ul_config->number_pdus %d out of bounds\n",ul_config->number_pdus);
fapi_nr_ul_config_pucch_pdu *pucch_pdu = &ul_config->ul_config_list[ul_config->number_pdus].pucch_config_pdu;
nr_ue_configure_pucch(mac,
slotP,
......@@ -2160,6 +2169,7 @@ void nr_ue_pucch_scheduler(module_id_t module_idP, frame_t frameP, int slotP, in
O_SR, O_ACK, O_CSI);
LOG_D(NR_MAC,"Configuring pucch, is_common = %d\n",pucch->is_common);
fill_ul_config(ul_config, frameP, slotP, FAPI_NR_UL_CONFIG_TYPE_PUCCH);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
nr_scheduled_response_t scheduled_response;
fill_scheduled_response(&scheduled_response, NULL, ul_config, NULL, module_idP, 0 /*TBR fix*/, frameP, slotP, thread_id);
if(mac->if_module != NULL && mac->if_module->scheduled_response != NULL)
......@@ -2220,6 +2230,8 @@ void nr_ue_prach_scheduler(module_id_t module_idP, frame_t frameP, sub_frame_t s
format0 = format & 0xff; // single PRACH format
format1 = (format >> 8) & 0xff; // dual PRACH format
pthread_mutex_lock(&ul_config->mutex_ul_config);
AssertFatal(ul_config->number_pdus<FAPI_NR_UL_CONFIG_LIST_NUM, "ul_config->number_pdus %d out of bounds\n",ul_config->number_pdus);
prach_config_pdu = &ul_config->ul_config_list[ul_config->number_pdus].prach_config_pdu;
memset(prach_config_pdu, 0, sizeof(fapi_nr_ul_config_prach_pdu));
......@@ -2302,6 +2314,7 @@ void nr_ue_prach_scheduler(module_id_t module_idP, frame_t frameP, sub_frame_t s
AssertFatal(1 == 0, "Invalid PRACH format");
}
} // if format1
pthread_mutex_unlock(&ul_config->mutex_ul_config);
fill_scheduled_response(&scheduled_response, NULL, ul_config, NULL, module_idP, 0 /*TBR fix*/, frameP, slotP, thread_id);
if(mac->if_module != NULL && mac->if_module->scheduled_response != NULL)
mac->if_module->scheduled_response(&scheduled_response);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment