Commit a5e4cbb9 authored by Melissa Elkadi's avatar Melissa Elkadi

Merge branch 'episys-throughput-updates-and-downlink-siso-cm' of...

Merge branch 'episys-throughput-updates-and-downlink-siso-cm' of https://gitlab.eurecom.fr/oai/openairinterface5g into episys-throughput-updates-and-downlink-siso-cm
parents 91d42fd5 5a8b235a
......@@ -244,13 +244,13 @@ void oai_create_gnb(void) {
if (RC.gNB == NULL) {
RC.gNB = (PHY_VARS_gNB **) calloc(1, sizeof(PHY_VARS_gNB *));
LOG_I(PHY,"gNB L1 structure RC.gNB allocated @ %p\n",RC.gNB);
LOG_D(PHY,"gNB L1 structure RC.gNB allocated @ %p\n",RC.gNB);
}
if (RC.gNB[0] == NULL) {
RC.gNB[0] = (PHY_VARS_gNB *) calloc(1, sizeof(PHY_VARS_gNB));
LOG_I(PHY,"[nr-gnb.c] gNB structure RC.gNB[%d] allocated @ %p\n",0,RC.gNB[0]);
LOG_D(PHY,"[nr-gnb.c] gNB structure RC.gNB[%d] allocated @ %p\n",0,RC.gNB[0]);
}
PHY_VARS_gNB *gNB = RC.gNB[0];
......@@ -491,7 +491,7 @@ int wake_gNB_rxtx(PHY_VARS_gNB *gNB, uint16_t sfn, uint16_t slot) {
L1_proc->frame_tx = (L1_proc->slot_rx > (19-slot_ahead)) ? (L1_proc->frame_rx+1)&1023 : L1_proc->frame_rx;
L1_proc->slot_tx = (L1_proc->slot_rx + slot_ahead)%20;
//LOG_I(PHY, "sfn/sf:%d%d proc[rx:%d%d] rx:%d%d] About to wake rxtx thread\n\n", sfn, slot, proc->frame_rx, proc->slot_rx, L1_proc->frame_rx, L1_proc->slot_rx);
//LOG_D(PHY, "sfn/sf:%d%d proc[rx:%d%d] rx:%d%d] About to wake rxtx thread\n\n", sfn, slot, proc->frame_rx, proc->slot_rx, L1_proc->frame_rx, L1_proc->slot_rx);
//NFAPI_TRACE(NFAPI_TRACE_INFO, "\nEntering wake_gNB_rxtx sfn %d slot %d\n",L1_proc->frame_rx,L1_proc->slot_rx);
// the thread can now be woken up
if (pthread_cond_signal(&L1_proc->cond) != 0) {
......@@ -757,7 +757,7 @@ int phy_nr_rach_indication(nfapi_nr_rach_indication_t *ind)
int phy_nr_uci_indication(nfapi_nr_uci_indication_t *ind)
{
LOG_I(NR_MAC, "In %s() NFAPI SFN/SF: %d/%d number_of_pdus :%u\n",
LOG_D(NR_MAC, "In %s() NFAPI SFN/SF: %d/%d number_of_pdus :%u\n",
__FUNCTION__,ind->sfn, ind->slot, ind->num_ucis);
if(NFAPI_MODE == NFAPI_MODE_VNF)
{
......@@ -950,7 +950,7 @@ int phy_crc_indication(struct nfapi_vnf_p7_config *config, nfapi_crc_indication_
int phy_nr_crc_indication(nfapi_nr_crc_indication_t *ind) {
LOG_I(NR_MAC, "In %s() NFAPI SFN/SF: %d/%d number_of_pdus :%u\n",
LOG_D(NR_MAC, "In %s() NFAPI SFN/SF: %d/%d number_of_pdus :%u\n",
__FUNCTION__,ind->sfn, ind->slot, ind->number_crcs);
if(NFAPI_MODE == NFAPI_MODE_VNF)
......@@ -973,7 +973,7 @@ int phy_nr_crc_indication(nfapi_nr_crc_indication_t *ind) {
crc_ind->crc_list[j].tb_crc_status = ind->crc_list[j].tb_crc_status;
crc_ind->crc_list[j].timing_advance = ind->crc_list[j].timing_advance;
crc_ind->crc_list[j].ul_cqi = ind->crc_list[j].ul_cqi;
LOG_I(NR_MAC, "Received crc_ind.harq_id = %d for %d index SFN SLot %u %u with rnti %x\n",
LOG_D(NR_MAC, "Received crc_ind.harq_id = %d for %d index SFN SLot %u %u with rnti %x\n",
ind->crc_list[j].harq_id, j, ind->sfn, ind->slot, ind->crc_list[j].rnti);
}
if (!put_queue(&gnb_crc_ind_queue, crc_ind))
......@@ -1069,7 +1069,7 @@ int phy_rx_indication(struct nfapi_vnf_p7_config *config, nfapi_rx_indication_t
int phy_nr_rx_data_indication(nfapi_nr_rx_data_indication_t *ind) {
LOG_I(NR_MAC, "In %s() NFAPI SFN/SF: %d/%d number_of_pdus :%u, and pdu %p\n",
LOG_D(NR_MAC, "In %s() NFAPI SFN/SF: %d/%d number_of_pdus :%u, and pdu %p\n",
__FUNCTION__,ind->sfn, ind->slot, ind->number_of_pdus, ind->pdu_list[0].pdu);
if(NFAPI_MODE == NFAPI_MODE_VNF)
......@@ -1171,7 +1171,7 @@ static void analyze_cqi_pdus_for_duplicates(nfapi_cqi_indication_t *ind)
{
nfapi_cqi_indication_pdu_t *src_pdu = &ind->cqi_indication_body.cqi_pdu_list[i];
LOG_I(MAC, "CQI_IND[PDU:%d][rnti:%x cqi:%d channel:%d]\n", i, src_pdu->rx_ue_information.rnti,
LOG_D(MAC, "CQI_IND[PDU:%d][rnti:%x cqi:%d channel:%d]\n", i, src_pdu->rx_ue_information.rnti,
src_pdu->ul_cqi_information.ul_cqi, src_pdu->ul_cqi_information.channel);
for (int j = i + 1; j < num_cqis; j++)
......@@ -1893,7 +1893,7 @@ int oai_nfapi_dl_config_req(nfapi_dl_config_request_t *dl_config_req) {
nfapi_vnf_p7_config_t *p7_config = vnf.p7_vnfs[0].config;
dl_config_req->header.phy_id = 1; // DJP HACK TODO FIXME - need to pass this around!!!!
dl_config_req->header.message_id = NFAPI_DL_CONFIG_REQUEST;
LOG_I(PHY, "[VNF] %s() DL_CONFIG_REQ sfn_sf:%d_%d number_of_pdus:%d\n", __FUNCTION__,
LOG_D(PHY, "[VNF] %s() DL_CONFIG_REQ sfn_sf:%d_%d number_of_pdus:%d\n", __FUNCTION__,
NFAPI_SFNSF2SFN(dl_config_req->sfn_sf),NFAPI_SFNSF2SF(dl_config_req->sfn_sf), dl_config_req->dl_config_request_body.number_pdu);
if (dl_config_req->dl_config_request_body.number_pdu > 0)
{
......@@ -1904,7 +1904,7 @@ int oai_nfapi_dl_config_req(nfapi_dl_config_request_t *dl_config_req) {
{
uint16_t dl_rnti = dl_config_req->dl_config_request_body.dl_config_pdu_list[i].dlsch_pdu.dlsch_pdu_rel8.rnti;
uint16_t numPDUs = dl_config_req->dl_config_request_body.number_pdu;
LOG_I(MAC, "(OAI eNB) Sending dl_config_req at VNF during Frame: %d and Subframe: %d,"
LOG_D(MAC, "(OAI eNB) Sending dl_config_req at VNF during Frame: %d and Subframe: %d,"
" with a RNTI value of: %x and with number of PDUs: %u\n",
NFAPI_SFNSF2SFN(dl_config_req->sfn_sf),NFAPI_SFNSF2SF(dl_config_req->sfn_sf), dl_rnti, numPDUs);
}
......@@ -2062,7 +2062,7 @@ int oai_nfapi_ul_config_req(nfapi_ul_config_request_t *ul_config_req) {
{
uint8_t pdu_type = pdu_list[i].pdu_type;
LOG_I(MAC, "ul_config_req num_pdus: %u pdu_number: %d pdu_type: %u SFN.SF: %d.%d\n",
LOG_D(MAC, "ul_config_req num_pdus: %u pdu_number: %d pdu_type: %u SFN.SF: %d.%d\n",
num_pdus, i, pdu_type, ul_config_req->sfn_sf >> 4, ul_config_req->sfn_sf & 15);
if (pdu_type != NFAPI_UL_CONFIG_ULSCH_CQI_RI_PDU_TYPE)
......
......@@ -117,7 +117,7 @@ int8_t nr_ue_scheduled_response_stub(nr_scheduled_response_t *scheduled_response
"Too many ul_config pdus %d", ul_config->number_pdus);
for (int i = 0; i < ul_config->number_pdus; ++i)
{
LOG_I(NR_PHY, "In %s: processing type %d PDU of %d total UL PDUs (ul_config %p) \n",
LOG_D(NR_PHY, "In %s: processing type %d PDU of %d total UL PDUs (ul_config %p) \n",
__FUNCTION__, ul_config->ul_config_list[i].pdu_type, ul_config->number_pdus, ul_config);
uint8_t pdu_type = ul_config->ul_config_list[i].pdu_type;
......@@ -218,7 +218,7 @@ int8_t nr_ue_scheduled_response_stub(nr_scheduled_response_t *scheduled_response
uci_ind->uci_list = CALLOC(uci_ind->num_ucis, sizeof(*uci_ind->uci_list));
for (int j = 0; j < uci_ind->num_ucis; j++)
{
LOG_I(NR_MAC, "ul_config->ul_config_list[%d].pucch_config_pdu.n_bit = %d\n", i, ul_config->ul_config_list[i].pucch_config_pdu.n_bit);
LOG_D(NR_MAC, "ul_config->ul_config_list[%d].pucch_config_pdu.n_bit = %d\n", i, ul_config->ul_config_list[i].pucch_config_pdu.n_bit);
if (ul_config->ul_config_list[i].pucch_config_pdu.n_bit > 3 && mac->nr_ue_emul_l1.num_csi_reports > 0)
{
uci_ind->uci_list[j].pdu_type = NFAPI_NR_UCI_FORMAT_2_3_4_PDU_TYPE;
......@@ -265,7 +265,7 @@ int8_t nr_ue_scheduled_response_stub(nr_scheduled_response_t *scheduled_response
}
}
LOG_I(NR_PHY, "Sending UCI with %d PDUs in sfn.slot %d/%d\n",
LOG_D(NR_PHY, "Sending UCI with %d PDUs in sfn.slot %d/%d\n",
uci_ind->num_ucis, uci_ind->sfn, uci_ind->slot);
NR_UL_IND_t ul_info = {
.uci_ind = *uci_ind,
......@@ -276,7 +276,7 @@ int8_t nr_ue_scheduled_response_stub(nr_scheduled_response_t *scheduled_response
}
default:
LOG_I(NR_MAC, "Unknown ul_config->pdu_type %d\n", pdu_type);
LOG_W(NR_MAC, "Unknown ul_config->pdu_type %d\n", pdu_type);
break;
}
}
......
......@@ -87,9 +87,6 @@ PHY_VARS_NR_UE *UE;
RAN_CONTEXT_t RC;
int32_t uplink_frequency_offset[MAX_NUM_CCs][4];
/* dummy constant */
NR_UE_RRC_INST_t *NR_UE_rrc_inst;
double cpuf;
char *uecap_file;
......
......@@ -223,6 +223,8 @@ nrUE_params_t *get_nrUE_params(void) {
return &nrUE_params;
}
nr_bler_struct nr_bler_data[NR_NUM_MCS];
void processSlotTX(void *arg) {}
int main(int argc, char **argv){
......
......@@ -93,9 +93,6 @@ THREAD_STRUCT thread_struct;
nfapi_ue_release_request_body_t release_rntis;
uint32_t N_RB_DL = 106;
/* dummy constant */
NR_UE_RRC_INST_t *NR_UE_rrc_inst;
//Fixme: Uniq dirty DU instance, by global var, datamodel need better management
instance_t DUuniqInstance=0;
instance_t CUuniqInstance=0;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment