Commit 5accdbe1 authored by mjoang's avatar mjoang

small change on adding mutex to protect ul_config_pdu against race condition.

parent e6e59969
......@@ -673,11 +673,10 @@ int8_t nr_ue_process_dci(module_id_t module_id, int cc_id, uint8_t gNB_index, fr
nfapi_nr_ue_pusch_pdu_t *pusch_config_pdu = &ul_config->ul_config_list[ul_config->number_pdus].pusch_config_pdu;
fill_ul_config(ul_config, frame_tx, slot_tx, FAPI_NR_UL_CONFIG_TYPE_PUSCH);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
// Config PUSCH PDU
ret = nr_config_pusch_pdu(mac, pusch_config_pdu, dci, NULL, rnti, &dci_format);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
break;
......@@ -735,11 +734,10 @@ int8_t nr_ue_process_dci(module_id_t module_id, int cc_id, uint8_t gNB_index, fr
nfapi_nr_ue_pusch_pdu_t *pusch_config_pdu = &ul_config->ul_config_list[ul_config->number_pdus].pusch_config_pdu;
fill_ul_config(ul_config, frame_tx, slot_tx, FAPI_NR_UL_CONFIG_TYPE_PUSCH);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
// Config PUSCH PDU
ret = nr_config_pusch_pdu(mac, pusch_config_pdu, dci, NULL, rnti, &dci_format);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
} else AssertFatal(1==0,"Cannot schedule PUSCH\n");
break;
}
......@@ -3930,11 +3928,10 @@ int nr_ue_process_rar(nr_downlink_indication_t *dl_info, NR_UL_TIME_ALIGNMENT_t
nfapi_nr_ue_pusch_pdu_t *pusch_config_pdu = &ul_config->ul_config_list[ul_config->number_pdus].pusch_config_pdu;
fill_ul_config(ul_config, frame_tx, slot_tx, FAPI_NR_UL_CONFIG_TYPE_PUSCH);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
// Config Msg3 PDU
nr_config_pusch_pdu(mac, pusch_config_pdu, NULL, &rar_grant, rnti, NULL);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
} else {
......
......@@ -946,90 +946,90 @@ NR_UE_L2_STATE_t nr_ue_scheduler(nr_downlink_indication_t *dl_info, nr_uplink_in
// Schedule ULSCH only if the current frame and slot match those in ul_config_req
// AND if a UL grant (UL DCI or Msg3) has been received (as indicated by num_pdus)
if (ul_config){
pthread_mutex_lock(&ul_config->mutex_ul_config);
if ((ul_info->slot_tx == ul_config->slot && ul_info->frame_tx == ul_config->sfn) && ul_config->number_pdus > 0){
pthread_mutex_unlock(&ul_config->mutex_ul_config);
pthread_mutex_lock(&ul_config->mutex_ul_config);
if ((ul_info->slot_tx == ul_config->slot && ul_info->frame_tx == ul_config->sfn) && ul_config->number_pdus > 0){
pthread_mutex_unlock(&ul_config->mutex_ul_config);
LOG_D(NR_MAC, "In %s:[%d.%d]: number of UL PDUs: %d with UL transmission in [%d.%d]\n", __FUNCTION__, frame_tx, slot_tx, ul_config->number_pdus, ul_config->sfn, ul_config->slot);
uint8_t ulsch_input_buffer_array[NFAPI_MAX_NUM_UL_PDU][MAX_ULSCH_PAYLOAD_BYTES];
nr_scheduled_response_t scheduled_response;
fapi_nr_tx_request_t tx_req;
tx_req.slot = slot_tx;
tx_req.sfn = frame_tx;
tx_req.number_of_pdus = 0;
pthread_mutex_lock(&ul_config->mutex_ul_config);
for (int j = 0; j < ul_config->number_pdus; j++) {
uint8_t *ulsch_input_buffer = &(ulsch_input_buffer_array[tx_req.number_of_pdus][MAX_ULSCH_PAYLOAD_BYTES]);
fapi_nr_ul_config_request_pdu_t *ulcfg_pdu = &ul_config->ul_config_list[j];
if (ulcfg_pdu->pdu_type == FAPI_NR_UL_CONFIG_TYPE_PUSCH) {
int mac_pdu_exist = 0;
uint16_t TBS_bytes = ulcfg_pdu->pusch_config_pdu.pusch_data.tb_size;
LOG_D(NR_MAC,"harq_id %d, NDI %d NDI_DCI %d, TBS_bytes %d (ra_state %d)\n",
ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id,
mac->UL_ndi[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id],
ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator,
TBS_bytes,ra->ra_state);
if (ra->ra_state == WAIT_RAR && !ra->cfra){
memcpy(ulsch_input_buffer, mac->ulsch_pdu.payload, TBS_bytes);
LOG_D(NR_MAC,"[RAPROC] Msg3 to be transmitted:\n");
for (int k = 0; k < TBS_bytes; k++) {
LOG_D(NR_MAC,"(%i): 0x%x\n",k,mac->ulsch_pdu.payload[k]);
}
LOG_D(NR_MAC,"Flipping NDI for harq_id %d (Msg3)\n",ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator);
mac->UL_ndi[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] = ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator;
mac->first_ul_tx[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] = 0;
mac_pdu_exist = 1;
} else {
LOG_D(NR_MAC, "In %s:[%d.%d]: number of UL PDUs: %d with UL transmission in [%d.%d]\n", __FUNCTION__, frame_tx, slot_tx, ul_config->number_pdus, ul_config->sfn, ul_config->slot);
if ((mac->UL_ndi[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] != ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator ||
mac->first_ul_tx[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id]==1) &&
((get_softmodem_params()->phy_test == 1) ||
(ra->ra_state == RA_SUCCEEDED) ||
(ra->ra_state == WAIT_RAR && ra->cfra))){
uint8_t ulsch_input_buffer_array[NFAPI_MAX_NUM_UL_PDU][MAX_ULSCH_PAYLOAD_BYTES];
nr_scheduled_response_t scheduled_response;
fapi_nr_tx_request_t tx_req;
tx_req.slot = slot_tx;
tx_req.sfn = frame_tx;
tx_req.number_of_pdus = 0;
// Getting IP traffic to be transmitted
nr_ue_get_sdu(mod_id, cc_id,frame_tx, slot_tx, gNB_index, ulsch_input_buffer, TBS_bytes);
mac_pdu_exist = 1;
}
pthread_mutex_lock(&ul_config->mutex_ul_config);
for (int j = 0; j < ul_config->number_pdus; j++) {
uint8_t *ulsch_input_buffer = &(ulsch_input_buffer_array[tx_req.number_of_pdus][MAX_ULSCH_PAYLOAD_BYTES]);
fapi_nr_ul_config_request_pdu_t *ulcfg_pdu = &ul_config->ul_config_list[j];
if (ulcfg_pdu->pdu_type == FAPI_NR_UL_CONFIG_TYPE_PUSCH) {
int mac_pdu_exist = 0;
uint16_t TBS_bytes = ulcfg_pdu->pusch_config_pdu.pusch_data.tb_size;
LOG_D(NR_MAC,"harq_id %d, NDI %d NDI_DCI %d, TBS_bytes %d (ra_state %d)\n",
ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id,
mac->UL_ndi[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id],
ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator,
TBS_bytes,ra->ra_state);
if (ra->ra_state == WAIT_RAR && !ra->cfra){
memcpy(ulsch_input_buffer, mac->ulsch_pdu.payload, TBS_bytes);
LOG_D(NR_MAC,"[RAPROC] Msg3 to be transmitted:\n");
for (int k = 0; k < TBS_bytes; k++) {
LOG_D(NR_MAC,"(%i): 0x%x\n",k,mac->ulsch_pdu.payload[k]);
}
LOG_D(NR_MAC,"Flipping NDI for harq_id %d (Msg3)\n",ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator);
mac->UL_ndi[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] = ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator;
mac->first_ul_tx[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] = 0;
mac_pdu_exist = 1;
} else {
if ((mac->UL_ndi[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] != ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator ||
mac->first_ul_tx[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id]==1) &&
((get_softmodem_params()->phy_test == 1) ||
(ra->ra_state == RA_SUCCEEDED) ||
(ra->ra_state == WAIT_RAR && ra->cfra))){
// Getting IP traffic to be transmitted
nr_ue_get_sdu(mod_id, cc_id,frame_tx, slot_tx, gNB_index, ulsch_input_buffer, TBS_bytes);
mac_pdu_exist = 1;
}
LOG_D(NR_MAC,"Flipping NDI for harq_id %d\n",ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator);
mac->UL_ndi[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] = ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator;
mac->first_ul_tx[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] = 0;
LOG_D(NR_MAC,"Flipping NDI for harq_id %d\n",ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator);
mac->UL_ndi[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] = ulcfg_pdu->pusch_config_pdu.pusch_data.new_data_indicator;
mac->first_ul_tx[ulcfg_pdu->pusch_config_pdu.pusch_data.harq_process_id] = 0;
}
// Config UL TX PDU
if (mac_pdu_exist) {
tx_req.tx_request_body[tx_req.number_of_pdus].pdu_length = TBS_bytes;
tx_req.tx_request_body[tx_req.number_of_pdus].pdu_index = j;
tx_req.tx_request_body[tx_req.number_of_pdus].pdu = ulsch_input_buffer;
tx_req.number_of_pdus++;
}
if (ra->ra_state == WAIT_CONTENTION_RESOLUTION && !ra->cfra){
LOG_I(NR_MAC,"[RAPROC][%d.%d] RA-Msg3 retransmitted\n", frame_tx, slot_tx);
// 38.321 restart the ra-ContentionResolutionTimer at each HARQ retransmission in the first symbol after the end of the Msg3 transmission
nr_Msg3_transmitted(ul_info->module_id, ul_info->cc_id, ul_info->frame_tx, ul_info->slot_tx, ul_info->gNB_index);
}
if (ra->ra_state == WAIT_RAR && !ra->cfra){
LOG_I(NR_MAC,"[RAPROC][%d.%d] RA-Msg3 transmitted\n", frame_tx, slot_tx);
nr_Msg3_transmitted(ul_info->module_id, ul_info->cc_id, ul_info->frame_tx, ul_info->slot_tx, ul_info->gNB_index);
}
}
}
pthread_mutex_unlock(&ul_config->mutex_ul_config);
// Config UL TX PDU
if (mac_pdu_exist) {
tx_req.tx_request_body[tx_req.number_of_pdus].pdu_length = TBS_bytes;
tx_req.tx_request_body[tx_req.number_of_pdus].pdu_index = j;
tx_req.tx_request_body[tx_req.number_of_pdus].pdu = ulsch_input_buffer;
tx_req.number_of_pdus++;
}
if (ra->ra_state == WAIT_CONTENTION_RESOLUTION && !ra->cfra){
LOG_I(NR_MAC,"[RAPROC][%d.%d] RA-Msg3 retransmitted\n", frame_tx, slot_tx);
// 38.321 restart the ra-ContentionResolutionTimer at each HARQ retransmission in the first symbol after the end of the Msg3 transmission
nr_Msg3_transmitted(ul_info->module_id, ul_info->cc_id, ul_info->frame_tx, ul_info->slot_tx, ul_info->gNB_index);
}
if (ra->ra_state == WAIT_RAR && !ra->cfra){
LOG_I(NR_MAC,"[RAPROC][%d.%d] RA-Msg3 transmitted\n", frame_tx, slot_tx);
nr_Msg3_transmitted(ul_info->module_id, ul_info->cc_id, ul_info->frame_tx, ul_info->slot_tx, ul_info->gNB_index);
}
fill_scheduled_response(&scheduled_response, NULL, ul_config, &tx_req, mod_id, cc_id, rx_frame, rx_slot, ul_info->thread_id);
if(mac->if_module != NULL && mac->if_module->scheduled_response != NULL){
mac->if_module->scheduled_response(&scheduled_response);
}
}
pthread_mutex_unlock(&ul_config->mutex_ul_config);
fill_scheduled_response(&scheduled_response, NULL, ul_config, &tx_req, mod_id, cc_id, rx_frame, rx_slot, ul_info->thread_id);
if(mac->if_module != NULL && mac->if_module->scheduled_response != NULL){
mac->if_module->scheduled_response(&scheduled_response);
else {
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
}
else {
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
}
}
......@@ -2161,6 +2161,10 @@ void nr_ue_pucch_scheduler(module_id_t module_idP, frame_t frameP, int slotP, in
pthread_mutex_lock(&ul_config->mutex_ul_config);
AssertFatal(ul_config->number_pdus<FAPI_NR_UL_CONFIG_LIST_NUM, "ul_config->number_pdus %d out of bounds\n",ul_config->number_pdus);
fapi_nr_ul_config_pucch_pdu *pucch_pdu = &ul_config->ul_config_list[ul_config->number_pdus].pucch_config_pdu;
fill_ul_config(ul_config, frameP, slotP, FAPI_NR_UL_CONFIG_TYPE_PUCCH);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
nr_ue_configure_pucch(mac,
slotP,
rnti,
......@@ -2168,8 +2172,6 @@ void nr_ue_pucch_scheduler(module_id_t module_idP, frame_t frameP, int slotP, in
pucch_pdu,
O_SR, O_ACK, O_CSI);
LOG_D(NR_MAC,"Configuring pucch, is_common = %d\n",pucch->is_common);
fill_ul_config(ul_config, frameP, slotP, FAPI_NR_UL_CONFIG_TYPE_PUCCH);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
nr_scheduled_response_t scheduled_response;
fill_scheduled_response(&scheduled_response, NULL, ul_config, NULL, module_idP, 0 /*TBR fix*/, frameP, slotP, thread_id);
if(mac->if_module != NULL && mac->if_module->scheduled_response != NULL)
......@@ -2233,12 +2235,13 @@ void nr_ue_prach_scheduler(module_id_t module_idP, frame_t frameP, sub_frame_t s
pthread_mutex_lock(&ul_config->mutex_ul_config);
AssertFatal(ul_config->number_pdus<FAPI_NR_UL_CONFIG_LIST_NUM, "ul_config->number_pdus %d out of bounds\n",ul_config->number_pdus);
prach_config_pdu = &ul_config->ul_config_list[ul_config->number_pdus].prach_config_pdu;
memset(prach_config_pdu, 0, sizeof(fapi_nr_ul_config_prach_pdu));
fill_ul_config(ul_config, frameP, slotP, FAPI_NR_UL_CONFIG_TYPE_PRACH);
pthread_mutex_unlock(&ul_config->mutex_ul_config);
LOG_D(PHY, "In %s: (%p) %d UL PDUs:\n", __FUNCTION__, ul_config, ul_config->number_pdus);
memset(prach_config_pdu, 0, sizeof(fapi_nr_ul_config_prach_pdu));
ncs = get_NCS(rach_ConfigGeneric->zeroCorrelationZoneConfig, format0, setup->restrictedSetConfig);
prach_config_pdu->phys_cell_id = mac->physCellId;
......@@ -2314,7 +2317,7 @@ void nr_ue_prach_scheduler(module_id_t module_idP, frame_t frameP, sub_frame_t s
AssertFatal(1 == 0, "Invalid PRACH format");
}
} // if format1
pthread_mutex_unlock(&ul_config->mutex_ul_config);
fill_scheduled_response(&scheduled_response, NULL, ul_config, NULL, module_idP, 0 /*TBR fix*/, frameP, slotP, thread_id);
if(mac->if_module != NULL && mac->if_module->scheduled_response != NULL)
mac->if_module->scheduled_response(&scheduled_response);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment