Commit 90d56417 authored by Melissa Elkadi's avatar Melissa Elkadi

Filling, queuing and sending UCI ind to gNB

Currently, the pull8 in nFAPI of the VNF
is failing.
parent fc79bb1c
......@@ -212,6 +212,12 @@ static bool sfn_slot_matcher(void *wanted, void *candidate)
return NFAPI_SFNSLOT2SFN(sfn_sf) == ind->sfn && NFAPI_SFNSLOT2SLOT(sfn_sf) == ind->slot;
}
case NFAPI_NR_PHY_MSG_TYPE_UCI_INDICATION:
{
nfapi_nr_uci_indication_t *ind = candidate;
return NFAPI_SFNSLOT2SFN(sfn_sf) == ind->sfn && NFAPI_SFNSLOT2SLOT(sfn_sf) == ind->slot;
}
default:
LOG_E(NR_MAC, "sfn_slot_match bad ID: %d\n", msg->message_id);
......@@ -226,6 +232,7 @@ static void *NRUE_phy_stub_standalone_pnf_task(void *arg)
reset_queue(&nr_rach_ind_queue);
reset_queue(&nr_rx_ind_queue);
reset_queue(&nr_crc_ind_queue);
reset_queue(&nr_uci_ind_queue);
NR_PRACH_RESOURCES_t prach_resources;
memset(&prach_resources, 0, sizeof(prach_resources));
......@@ -322,6 +329,7 @@ static void *NRUE_phy_stub_standalone_pnf_task(void *arg)
nfapi_nr_rach_indication_t *rach_ind = unqueue_matching(&nr_rach_ind_queue, MAX_QUEUE_SIZE, sfn_slot_matcher, &sfn_slot);
nfapi_nr_rx_data_indication_t *rx_ind = unqueue_matching(&nr_rx_ind_queue, MAX_QUEUE_SIZE, sfn_slot_matcher, &sfn_slot);
nfapi_nr_crc_indication_t *crc_ind = unqueue_matching(&nr_crc_ind_queue, MAX_QUEUE_SIZE, sfn_slot_matcher, &sfn_slot);
nfapi_nr_uci_indication_t *uci_ind = unqueue_matching(&nr_uci_ind_queue, MAX_QUEUE_SIZE, sfn_slot_matcher, &sfn_slot);
if (rach_ind && rach_ind->number_of_pdus > 0)
{
......@@ -352,6 +360,14 @@ static void *NRUE_phy_stub_standalone_pnf_task(void *arg)
send_nsa_standalone_msg(&UL_INFO, crc_ind->header.message_id);
free(crc_ind->crc_list);
}
if (uci_ind && uci_ind->num_ucis > 0)
{
NR_UL_IND_t UL_INFO = {
.uci_ind = *uci_ind,
};
send_nsa_standalone_msg(&UL_INFO, uci_ind->header.message_id);
free(uci_ind->uci_list);
}
}
return NULL;
}
......
......@@ -712,6 +712,18 @@ int phy_nr_rach_indication(struct nfapi_vnf_p7_config *config, nfapi_nr_rach_ind
return 1;
}
int phy_nr_uci_indication(struct nfapi_vnf_p7_config *config, nfapi_nr_uci_indication_t *ind)
{
if(NFAPI_MODE == NFAPI_MODE_VNF)
{
UL_INFO.uci_ind = *ind;
}
else {
LOG_E(NR_MAC, "NFAPI_MODE = %d not NFAPI_MODE_VNF(2)\n", nfapi_getmode());
}
return 1;
}
int phy_harq_indication(struct nfapi_vnf_p7_config *config, nfapi_harq_indication_t *ind) {
struct PHY_VARS_eNB_s *eNB = RC.eNB[0][0];
LOG_D(MAC, "%s() NFAPI SFN/SF:%d number_of_harqs:%u\n", __FUNCTION__, NFAPI_SFNSF2DEC(ind->sfn_sf), ind->harq_indication_body.number_of_harqs);
......@@ -1195,6 +1207,7 @@ void *vnf_nr_p7_thread_start(void *ptr) {
p7_vnf->config->nr_crc_indication = &phy_nr_crc_indication;
p7_vnf->config->nr_rx_indication = &phy_nr_rx_indication;
p7_vnf->config->nr_rach_indication = &phy_nr_rach_indication;
p7_vnf->config->nr_uci_indication = &phy_nr_uci_indication;
p7_vnf->config->srs_indication = &phy_srs_indication;
p7_vnf->config->sr_indication = &phy_sr_indication;
p7_vnf->config->cqi_indication = &phy_cqi_indication;
......
......@@ -3494,9 +3494,9 @@ static uint8_t pack_nr_rach_indication_body(void* tlv, uint8_t **ppWritePackedMs
return 0;
for(int i = 0; i < value->num_preamble; i++)
{
if(!(push8(value->preamble_list->preamble_index, ppWritePackedMsg, end) &&
push16(value->preamble_list->timing_advance, ppWritePackedMsg, end) &&
push32(value->preamble_list->preamble_pwr, ppWritePackedMsg, end)
if(!(push8(value->preamble_list[i].preamble_index, ppWritePackedMsg, end) &&
push16(value->preamble_list[i].timing_advance, ppWritePackedMsg, end) &&
push32(value->preamble_list[i].preamble_pwr, ppWritePackedMsg, end)
))
return 0;
}
......@@ -6524,41 +6524,38 @@ static uint8_t unpack_nr_rach_indication(uint8_t **ppReadPackedMsg, uint8_t *end
//NR UCI
static uint8_t unpack_nr_uci_pucch_0_1(void* tlv, uint8_t **ppReadPackedMsg, uint8_t *end) {
nfapi_nr_uci_pucch_pdu_format_0_1_t* value = (nfapi_nr_uci_pucch_pdu_format_0_1_t*)tlv;
static uint8_t unpack_nr_uci_pucch_0_1(nfapi_nr_uci_pucch_pdu_format_0_1_t *value,
uint8_t **ppReadPackedMsg,
uint8_t *end,
nfapi_p7_codec_config_t *config) {
if(!(pull8(ppReadPackedMsg, &value->pduBitmap, end) &&
pull32(ppReadPackedMsg, &value->handle, end) &&
pull16(ppReadPackedMsg, &value->rnti, end) &&
pull8(ppReadPackedMsg, &value->pucch_format, end) &&
pull8(ppReadPackedMsg, &value->ul_cqi, end) &&
pull16(ppReadPackedMsg, &value->timing_advance, end) &&
pull16(ppReadPackedMsg, &value->timing_advance, end) &&
pull16(ppReadPackedMsg, &value->rssi, end)
))
return 0;
return 0;
if (value->pduBitmap & 0x01) { //SR
if(!(pull8(ppReadPackedMsg, &value->sr->sr_indication, end) &&
pull8(ppReadPackedMsg, &value->sr->sr_confidence_level, end)
pull8(ppReadPackedMsg, &value->sr->sr_confidence_level, end)
))
return 0;
return 0;
}
if (((value->pduBitmap >> 1) & 0x01)) { //HARQ
value->harq = nfapi_p7_allocate(sizeof(*value->harq), config);
if (!(pull8(ppReadPackedMsg, &value->harq->harq_confidence_level, end) &&
pull8(ppReadPackedMsg, &value->harq->num_harq, end)
))
return 0;
value->harq->harq_list = nfapi_p7_allocate(sizeof(*value->harq->harq_list) * value->harq->num_harq, config);
for (int i = 0; i < value->harq->num_harq; i++) {
if (!pull8(ppReadPackedMsg, &value->harq->harq_list[i].harq_value, end))
return 0;
}
}
// if (((value->pduBitmap >> 1) & 0x01)) { //HARQ
// uint8_t* temp; //&value->harq->num_harq &value->harq->harq_confidence_level &value->harq->harq_list[0].harq_value
// temp = (uint8_t*) malloc(sizeof(uint8_t));
// //printf("value->harq->num_harq = %d \n", value->harq->num_harq);
// // if(!(pull8(ppReadPackedMsg, temp, end) &&
// // pull8(ppReadPackedMsg, temp, end)
// // ))
// // return 0;
// // for(int i=0; i<1;i++)
// // {
// // if(!(pull8(ppReadPackedMsg, temp, end) //review - gokul
// // ))
// // return 0;
// // }
// }
return 1;
}
......@@ -6649,13 +6646,14 @@ static uint8_t unpack_nr_uci_indication(uint8_t **ppReadPackedMsg, uint8_t *end,
))
return 0;
for(int i=0; i<pNfapiMsg->num_ucis;i++)
pNfapiMsg->uci_list = nfapi_p7_allocate(sizeof(*pNfapiMsg->uci_list) * pNfapiMsg->num_ucis, config);
for (int i = 0; i < pNfapiMsg->num_ucis; i++)
{
if(!unpack_nr_uci_indication_body(&pNfapiMsg->uci_list,ppReadPackedMsg,end))
if(!unpack_nr_uci_indication_body(&pNfapiMsg->uci_list[i], ppReadPackedMsg, end, config))
return 0;
}
return 1;
return 1;
}
......
......@@ -802,6 +802,20 @@ typedef struct nfapi_vnf_p7_config
*/
int (*nr_rach_indication)(struct nfapi_vnf_p7_config* config, nfapi_nr_rach_indication_t* ind);
/*! A callback for the nrRACH.indication
* \param config A pointer to the vnf p7 configuration
* \param ind A data structure for the decoded nrRACH.indication This will
* have been allocated on the stack.
* \return not currently used.
*
* The ind may contain pointers to dyanmically allocated sub structures
* such as the pdu. The dyanmically allocated structure will
* be deallocated on return. If the client wishes to 'keep' the structures
* then the substructure pointers should be set to 0 and then the client should
* use the codec_config.deallocate function to release it at a future point
*/
int (*nr_uci_indication)(struct nfapi_vnf_p7_config* config, nfapi_nr_uci_indication_t* ind);
/*! A callback for the SRS.indication
* \param config A pointer to the vnf p7 configuration
* \param ind A data structure for the decoded SRS.indication This will
......
......@@ -1543,6 +1543,14 @@ void vnf_handle_nr_uci_indication(void *pRecvMsg, int recvMsgLen, vnf_p7_t* vnf_
{
NFAPI_TRACE(NFAPI_TRACE_ERROR, "%s: Failed to unpack message\n", __FUNCTION__);
}
else
{
NFAPI_TRACE(NFAPI_TRACE_INFO, "%s: Handling UCI Indication\n", __FUNCTION__);
if(vnf_p7->_public.nr_uci_indication)
{
(vnf_p7->_public.nr_uci_indication)(&vnf_p7->_public, &ind);
}
}
}
}
......
......@@ -46,6 +46,7 @@ const char *dl_pdu_type[]={"DCI", "DLSCH", "RA_DLSCH"};
const char *ul_pdu_type[]={"PRACH", "PUCCH", "PUSCH", "SRS"};
queue_t nr_rx_ind_queue;
queue_t nr_crc_ind_queue;
queue_t nr_uci_ind_queue;
int8_t nr_ue_scheduled_response_stub(nr_scheduled_response_t *scheduled_response) {
......@@ -136,6 +137,56 @@ int8_t nr_ue_scheduled_response_stub(nr_scheduled_response_t *scheduled_response
}
scheduled_response->ul_config->number_pdus = 0;
}
if (scheduled_response->dl_config != NULL)
{
fapi_nr_dl_config_request_t *dl_config = scheduled_response->dl_config;
AssertFatal(dl_config->number_pdus < sizeof(dl_config->dl_config_list) / sizeof(dl_config->dl_config_list[0]),
"Too many dl_config pdus %d", dl_config->number_pdus);
for (int i = 0; i < dl_config->number_pdus; ++i)
{
LOG_I(PHY, "In %s: processing %s PDU of %d total DL PDUs (dl_config %p) \n",
__FUNCTION__, dl_pdu_type[dl_config->dl_config_list[i].pdu_type - 1], dl_config->number_pdus, dl_config);
uint8_t pdu_type = dl_config->dl_config_list[i].pdu_type;
switch (pdu_type)
{
case (FAPI_NR_DL_CONFIG_TYPE_DLSCH):
{
nfapi_nr_uci_indication_t *uci_ind = CALLOC(1, sizeof(*uci_ind));
uci_ind->header.message_id = NFAPI_NR_PHY_MSG_TYPE_UCI_INDICATION;
uci_ind->sfn = scheduled_response->frame == 19 ? scheduled_response->frame + 1 % 1024 : scheduled_response->frame;
uci_ind->slot = (scheduled_response->slot + 6) % 20;
uci_ind->num_ucis = 1;
uci_ind->uci_list = CALLOC(1, sizeof(nfapi_nr_uci_t));
uci_ind->uci_list[0].pdu_type = NFAPI_NR_UCI_FORMAT_0_1_PDU_TYPE;
uci_ind->uci_list[0].pdu_size = 46;
uci_ind->uci_list->pucch_pdu_format_0_1.rnti = scheduled_response->dl_config->dl_config_list[0].dlsch_config_pdu.rnti;
uci_ind->uci_list[0].pucch_pdu_format_0_1.pduBitmap = 2; // (value->pduBitmap >> 1) & 0x01) == HARQ and (value->pduBitmap) & 0x01) == SR
uci_ind->uci_list[0].pucch_pdu_format_0_1.pucch_format = 1;
uci_ind->uci_list[0].pucch_pdu_format_0_1.harq = CALLOC(1, sizeof(nfapi_nr_harq_pdu_0_1_t));
uci_ind->uci_list[0].pucch_pdu_format_0_1.harq->harq_confidence_level = 0;
uci_ind->uci_list[0].pucch_pdu_format_0_1.harq->num_harq = 1;
uci_ind->uci_list[0].pucch_pdu_format_0_1.harq->harq_list = CALLOC(1, sizeof(nfapi_nr_harq_t));
uci_ind->uci_list->pucch_pdu_format_0_1.harq->harq_list[0].harq_value = 0;
LOG_I(NR_PHY, "In %s: Filled queue uci_ind which was filled by dlconfig.\n"
"uci_num %d, uci_slot %d, uci_frame %d\n",
__FUNCTION__, uci_ind->num_ucis, uci_ind->slot, uci_ind->sfn);
if (!put_queue(&nr_uci_ind_queue, uci_ind))
{
LOG_E(NR_MAC, "Put_queue failed for rx_ind\n");
free(uci_ind->uci_list[0].pucch_pdu_format_0_1.harq->harq_list);
free(uci_ind->uci_list[0].pucch_pdu_format_0_1.harq);
free(uci_ind->uci_list);
free(uci_ind);
}
break; //Melissa figure out what to send to gNB when receiving format 1_1
}
}
}
}
}
return 0;
}
......
......@@ -818,6 +818,8 @@ void nr_schedule_ue_spec(module_id_t module_id,
pdsch_pdu->qamModOrder[0] = Qm;
pdsch_pdu->mcsIndex[0] = sched_ctrl->mcs;
pdsch_pdu->mcsTable[0] = sched_ctrl->mcsTableIdx;
AssertFatal (harq->round < sizeof(nr_rv_round_map) / sizeof(nr_rv_round_map[0]),
"harq->round %d is larger than nv_rv_round_map\n", harq->round);
pdsch_pdu->rvIndex[0] = nr_rv_round_map[harq->round];
pdsch_pdu->TBSize[0] = TBS;
......
......@@ -82,11 +82,11 @@ void handle_nr_rach(NR_UL_IND_t *UL_info)
void handle_nr_uci(NR_UL_IND_t *UL_info)
{
const module_id_t mod_id = UL_info->module_id;
const frame_t frame = UL_info->frame;
const sub_frame_t slot = UL_info->slot;
int num_ucis = UL_info->uci_ind.num_ucis;
nfapi_nr_uci_t *uci_list = UL_info->uci_ind.uci_list;
const module_id_t mod_id = UL_INFO.module_id;
const frame_t frame = UL_INFO.frame;
const sub_frame_t slot = UL_INFO.slot;
int num_ucis = UL_INFO.uci_ind.num_ucis;
nfapi_nr_uci_t *uci_list = UL_INFO.uci_ind.uci_list;
for (int i = 0; i < num_ucis; i++) {
switch (uci_list[i].pdu_type) {
......@@ -107,7 +107,7 @@ void handle_nr_uci(NR_UL_IND_t *UL_info)
}
}
UL_info->uci_ind.num_ucis = 0;
UL_INFO.uci_ind.num_ucis = 0;
if(NFAPI_MODE != NFAPI_MODE_PNF)
// mark corresponding PUCCH resources as free
// NOTE: we just assume it is BWP ID 1, to be revised for multiple BWPs
......
......@@ -188,9 +188,27 @@ void send_nsa_standalone_msg(NR_UL_IND_t *UL_INFO, uint16_t msg_id)
}
break;
}
case NFAPI_NR_PHY_MSG_TYPE_SRS_INDICATION:
break;
case NFAPI_NR_PHY_MSG_TYPE_UCI_INDICATION:
{
char buffer[NFAPI_MAX_PACKED_MESSAGE_SIZE];
LOG_I(NR_MAC, "UCI header id :%d", UL_INFO->uci_ind.header.message_id);
int encoded_size = nfapi_nr_p7_message_pack(&UL_INFO->uci_ind, buffer, sizeof(buffer), NULL);
if (encoded_size <= 0)
{
LOG_E(NR_MAC, "nfapi_nr_p7_message_pack has failed. Encoded size = %d\n", encoded_size);
return;
}
LOG_I(NR_MAC, "NR_UCI_IND sent to Proxy, Size: %d Frame %d Slot %d Num PDUS %d\n", encoded_size,
UL_INFO->uci_ind.sfn, UL_INFO->uci_ind.slot, UL_INFO->uci_ind.num_ucis);
if (send(ue_tx_sock_descriptor, buffer, encoded_size, 0) < 0)
{
LOG_E(NR_MAC, "Send Proxy NR_UE failed\n");
return;
}
break;
}
case NFAPI_NR_PHY_MSG_TYPE_SRS_INDICATION:
break;
default:
break;
......@@ -217,7 +235,7 @@ static void copy_dl_tti_req_to_dl_info(nr_downlink_indication_t *dl_info, nfapi_
if (pdu_list->PDUType == NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE)
{
LOG_I(NR_PHY, "[%d, %d] PDCCH PDU \n",
LOG_I(NR_PHY, "[%d, %d] PDCCH DCI PDU (Format for incoming PDSCH PDU)\n",
dl_tti_request->SFN, dl_tti_request->Slot);
uint16_t num_dcis = pdu_list->pdcch_pdu.pdcch_pdu_rel15.numDlDci;
if (num_dcis > 0)
......@@ -325,6 +343,31 @@ static void copy_ul_dci_data_req_to_dl_info(nr_downlink_indication_t *dl_info, n
dl_info->slot = ul_dci_req->Slot;
}
static void copy_ul_tti_data_req_to_dl_info(nr_downlink_indication_t *dl_info, nfapi_nr_ul_tti_request_t *ul_tti_req)
{
int num_pdus = ul_tti_req->n_pdus;
if (num_pdus <= 0)
{
LOG_E(NR_PHY, "%s: ul_tti_request number of PDUS <= 0\n", __FUNCTION__);
abort();
}
AssertFatal(num_pdus <= sizeof(ul_tti_req->pdus_list) / sizeof(ul_tti_req->pdus_list[0]),
"Too many pdus %d in ul_tti_req\n", num_pdus);
for (int i = 0; i < num_pdus; i++)
{
nfapi_nr_ul_tti_request_number_of_pdus_t *pdu_list = &ul_tti_req->pdus_list[i];
LOG_D(NR_PHY, "This is the pdu type %d in ul_tti_req\n", pdu_list->pdu_type);
if (pdu_list->pdu_type == NFAPI_NR_UL_CONFIG_PUCCH_PDU_TYPE)
{
LOG_I(NR_PHY, "This is the tx_sfn %d and tx_slot %d for ul_tti_req. Not doing anything here since"
" the slot and frame arent actually the reception values gNB specified.\n",
ul_tti_req->SFN, ul_tti_req->Slot);
}
}
}
static void fill_dci_from_dl_config(nr_downlink_indication_t*dl_ind, fapi_nr_dl_config_request_t *dl_config)
{
if (!dl_ind->dci_ind)
......@@ -374,7 +417,8 @@ static void fill_dci_from_dl_config(nr_downlink_indication_t*dl_ind, fapi_nr_dl_
static void check_and_process_dci(nfapi_nr_dl_tti_request_t *dl_tti_request,
nfapi_nr_tx_data_request_t *tx_data_request,
nfapi_nr_ul_dci_request_t *ul_dci_request)
nfapi_nr_ul_dci_request_t *ul_dci_request,
nfapi_nr_ul_tti_request_t *ul_tti_request)
{
frame_t frame = 0;
int slot = 0;
......@@ -406,6 +450,13 @@ static void check_and_process_dci(nfapi_nr_dl_tti_request_t *dl_tti_request,
LOG_I(NR_PHY, "[%d, %d] ul_dci_request\n", frame, slot);
copy_ul_dci_data_req_to_dl_info(&mac->dl_info, ul_dci_request);
}
else if (ul_tti_request)
{
frame = ul_tti_request->SFN;
slot = ul_tti_request->Slot;
LOG_I(NR_PHY, "[%d, %d] ul_tti_request\n", frame, slot);
copy_ul_tti_data_req_to_dl_info(&mac->dl_info, ul_tti_request);
}
else
{
return;
......@@ -551,9 +602,10 @@ void *nrue_standalone_pnf_task(void *context)
LOG_E(NR_PHY, "Message dl_tti_request failed to unpack\n");
break;
}
LOG_I(NR_PHY, "Received an NFAPI_NR_PHY_MSG_TYPE_DL_TTI_REQUEST message in slot %d. \n", dl_tti_request.Slot);
LOG_I(NR_PHY, "Received an NFAPI_NR_PHY_MSG_TYPE_DL_TTI_REQUEST message in sfn/slot %d %d. \n",
dl_tti_request.SFN, dl_tti_request.Slot);
save_nr_measurement_info(&dl_tti_request);
check_and_process_dci(&dl_tti_request, NULL, NULL);
check_and_process_dci(&dl_tti_request, NULL, NULL, NULL);
break;
case NFAPI_NR_PHY_MSG_TYPE_TX_DATA_REQUEST:
if (nfapi_nr_p7_message_unpack((void *)buffer, len, &tx_data_request,
......@@ -562,8 +614,9 @@ void *nrue_standalone_pnf_task(void *context)
LOG_E(NR_PHY, "Message tx_data_request failed to unpack\n");
break;
}
LOG_I(NR_PHY, "Received an NFAPI_NR_PHY_MSG_TYPE_TX_DATA_REQUEST message in slot %d. \n", tx_data_request.Slot);
check_and_process_dci(NULL, &tx_data_request, NULL);
LOG_I(NR_PHY, "Received an NFAPI_NR_PHY_MSG_TYPE_TX_DATA_REQUEST message in SFN/slot %d %d. \n",
tx_data_request.SFN, tx_data_request.Slot);
check_and_process_dci(NULL, &tx_data_request, NULL, NULL);
break;
case NFAPI_NR_PHY_MSG_TYPE_UL_DCI_REQUEST:
if (nfapi_nr_p7_message_unpack((void *)buffer, len, &ul_dci_request,
......@@ -572,8 +625,9 @@ void *nrue_standalone_pnf_task(void *context)
LOG_E(NR_PHY, "Message ul_dci_request failed to unpack\n");
break;
}
LOG_I(NR_PHY, "Received an NFAPI_NR_PHY_MSG_TYPE_UL_DCI_REQUEST message in slot %d. \n", ul_dci_request.Slot);
check_and_process_dci(NULL, NULL, &ul_dci_request);
LOG_I(NR_PHY, "Received an NFAPI_NR_PHY_MSG_TYPE_UL_DCI_REQUEST message in SFN/slot %d %d. \n",
ul_dci_request.SFN, ul_dci_request.Slot);
check_and_process_dci(NULL, NULL, &ul_dci_request, NULL);
break;
case NFAPI_NR_PHY_MSG_TYPE_UL_TTI_REQUEST:
if (nfapi_nr_p7_message_unpack((void *)buffer, len, &ul_tti_request,
......@@ -582,7 +636,9 @@ void *nrue_standalone_pnf_task(void *context)
LOG_E(NR_PHY, "Message ul_tti_request failed to unpack\n");
break;
}
LOG_I(NR_PHY, "Received an NFAPI_NR_PHY_MSG_TYPE_UL_TTI_REQUEST message in slot %d. \n", ul_tti_request.Slot);
LOG_I(NR_PHY, "Received an NFAPI_NR_PHY_MSG_TYPE_UL_TTI_REQUEST message in SFN/slot %d %d. \n",
ul_tti_request.SFN, ul_tti_request.Slot);
check_and_process_dci(NULL, NULL, NULL, &ul_tti_request);
break;
default:
LOG_E(NR_PHY, "Case Statement has no corresponding nfapi message, this is the header ID %d\n", header.message_id);
......
......@@ -2968,6 +2968,7 @@ static void start_oai_nrue_threads()
init_queue(&nr_rach_ind_queue);
init_queue(&nr_rx_ind_queue);
init_queue(&nr_crc_ind_queue);
init_queue(&nr_uci_ind_queue);
if (sem_init(&sfn_slot_semaphore, 0, 0) != 0)
{
......
......@@ -44,6 +44,7 @@
extern queue_t nr_rach_ind_queue;
extern queue_t nr_rx_ind_queue;
extern queue_t nr_crc_ind_queue;
extern queue_t nr_uci_ind_queue;
//
// main_rrc.c
//
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment