Commit d908779c authored by francescomani's avatar francescomani

lowercase bwp

parent cbff9df7
......@@ -280,7 +280,7 @@ void nr_dlsim_preprocessor(module_id_t module_id,
NR_UE_info_t *UE_info = RC.nrmac[module_id]->UE_info.list[0];
AssertFatal(RC.nrmac[module_id]->UE_info.list[1]==NULL, "can have only a single UE\n");
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl;
NR_UE_DL_BWP_t *BWP = &UE_info->current_DL_BWP;
NR_UE_DL_BWP_t *current_BWP = &UE_info->current_DL_BWP;
NR_ServingCellConfigCommon_t *scc = RC.nrmac[0]->common_channels[0].ServingCellConfigCommon;
uint8_t nr_of_candidates = 0;
......@@ -307,7 +307,7 @@ void nr_dlsim_preprocessor(module_id_t module_id,
NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static;
nr_set_pdsch_semi_static(BWP,
nr_set_pdsch_semi_static(current_BWP,
scc,
/* tda = */ 0,
g_nrOfLayers,
......@@ -320,10 +320,10 @@ void nr_dlsim_preprocessor(module_id_t module_id,
sched_pdsch->mcs = g_mcsIndex;
/* the following might override the table that is mandated by RRC
* configuration */
BWP->mcsTableIdx = g_mcsTableIdx;
current_BWP->mcsTableIdx = g_mcsTableIdx;
sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, BWP->mcsTableIdx);
sched_pdsch->R = nr_get_code_rate_dl(sched_pdsch->mcs, BWP->mcsTableIdx);
sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, current_BWP->mcsTableIdx);
sched_pdsch->R = nr_get_code_rate_dl(sched_pdsch->mcs, current_BWP->mcsTableIdx);
sched_pdsch->tb_size = nr_compute_tbs(sched_pdsch->Qm,
sched_pdsch->R,
sched_pdsch->rbSize,
......@@ -351,7 +351,7 @@ void nr_dlsim_preprocessor(module_id_t module_id,
AssertFatal(sched_pdsch->rbStart >= 0, "invalid rbStart %d\n", sched_pdsch->rbStart);
AssertFatal(sched_pdsch->rbSize > 0, "invalid rbSize %d\n", sched_pdsch->rbSize);
AssertFatal(sched_pdsch->mcs >= 0, "invalid mcs %d\n", sched_pdsch->mcs);
AssertFatal(BWP->mcsTableIdx >= 0 && BWP->mcsTableIdx <= 2, "invalid mcsTableIdx %d\n", BWP->mcsTableIdx);
AssertFatal(current_BWP->mcsTableIdx >= 0 && current_BWP->mcsTableIdx <= 2, "invalid mcsTableIdx %d\n", current_BWP->mcsTableIdx);
}
typedef struct {
......
......@@ -388,8 +388,8 @@ bool allocate_dl_retransmission(module_id_t module_id,
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
const NR_ServingCellConfigCommon_t *scc = nr_mac->common_channels->ServingCellConfigCommon;
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_DL_BWP_t *BWP = &UE->current_DL_BWP;
NR_UE_UL_BWP_t *UBWP = &UE->current_UL_BWP;
NR_UE_DL_BWP_t *dl_bwp = &UE->current_DL_BWP;
NR_UE_UL_BWP_t *ul_bwp = &UE->current_UL_BWP;
NR_sched_pdsch_t *retInfo = &sched_ctrl->harq_processes[current_harq_pid].sched_pdsch;
NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static;
......@@ -404,7 +404,7 @@ bool allocate_dl_retransmission(module_id_t module_id,
}
const int coresetid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t bwpSize = coresetid == 0 ? RC.nrmac[module_id]->cset0_bwp_size : BWP->BWPSize;
const uint16_t bwpSize = coresetid == 0 ? RC.nrmac[module_id]->cset0_bwp_size : dl_bwp->BWPSize;
int rbStart = 0; // start wrt BWPstart
int rbSize = 0;
......@@ -435,7 +435,7 @@ bool allocate_dl_retransmission(module_id_t module_id,
/* check whether we need to switch the TDA allocation since the last
* (re-)transmission */
if (ps->time_domain_allocation != tda) {
nr_set_pdsch_semi_static(BWP,
nr_set_pdsch_semi_static(dl_bwp,
scc,
tda,
ps->nrOfLayers,
......@@ -447,7 +447,7 @@ bool allocate_dl_retransmission(module_id_t module_id,
* that we have enough resources */
NR_pdsch_semi_static_t temp_ps = *ps;
nr_set_pdsch_semi_static(BWP,
nr_set_pdsch_semi_static(dl_bwp,
scc,
tda,
ps->nrOfLayers,
......@@ -519,7 +519,7 @@ bool allocate_dl_retransmission(module_id_t module_id,
/* Find PUCCH occasion: if it fails, undo CCE allocation (undoing PUCCH
* allocation after CCE alloc fail would be more complex) */
int r_pucch = nr_get_pucch_resource(sched_ctrl->coreset, UBWP->pucch_Config, CCEIndex);
int r_pucch = nr_get_pucch_resource(sched_ctrl->coreset, ul_bwp->pucch_Config, CCEIndex);
const int alloc = nr_acknack_scheduling(module_id, UE, frame, slot, r_pucch, 0);
if (alloc<0) {
LOG_D(MAC,
......@@ -579,7 +579,7 @@ void pf_dl(module_id_t module_id,
continue;
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_DL_BWP_t *BWP = &UE->current_DL_BWP;
NR_UE_DL_BWP_t *current_BWP = &UE->current_DL_BWP;
if (sched_ctrl->ul_failure==1 && get_softmodem_params()->phy_test==0) continue;
......@@ -619,15 +619,15 @@ void pf_dl(module_id_t module_id,
/* Calculate coeff */
const NR_bler_options_t *bo = &mac->dl_bler;
const int max_mcs_table = BWP->mcsTableIdx == 1 ? 27 : 28;
const int max_mcs_table = current_BWP->mcsTableIdx == 1 ? 27 : 28;
const int max_mcs = min(sched_ctrl->dl_max_mcs, max_mcs_table);
if (bo->harq_round_max == 1)
sched_pdsch->mcs = max_mcs;
else
sched_pdsch->mcs = get_mcs_from_bler(bo, stats, &sched_ctrl->dl_bler_stats, max_mcs, frame);
UE->layers = set_dl_nrOfLayers(sched_ctrl);
const uint8_t Qm = nr_get_Qm_dl(sched_pdsch->mcs, BWP->mcsTableIdx);
const uint16_t R = nr_get_code_rate_dl(sched_pdsch->mcs, BWP->mcsTableIdx);
const uint8_t Qm = nr_get_Qm_dl(sched_pdsch->mcs, current_BWP->mcsTableIdx);
const uint16_t R = nr_get_code_rate_dl(sched_pdsch->mcs, current_BWP->mcsTableIdx);
uint32_t tbs = nr_compute_tbs(Qm,
R,
1, /* rbSize */
......@@ -657,13 +657,13 @@ void pf_dl(module_id_t module_id,
NR_UE_sched_ctrl_t *sched_ctrl = &iterator->UE->UE_sched_ctrl;
const uint16_t rnti = iterator->UE->rnti;
NR_UE_DL_BWP_t *BWP = &iterator->UE->current_DL_BWP;
NR_UE_UL_BWP_t *UBWP = &iterator->UE->current_UL_BWP;
NR_UE_DL_BWP_t *dl_bwp = &iterator->UE->current_DL_BWP;
NR_UE_UL_BWP_t *ul_bwp = &iterator->UE->current_UL_BWP;
const int coresetid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t bwpSize = coresetid == 0 ?
RC.nrmac[module_id]->cset0_bwp_size :
BWP->BWPSize;
dl_bwp->BWPSize;
int rbStart = 0; // start wrt BWPstart
if (sched_ctrl->available_dl_harq.head < 0) {
......@@ -703,7 +703,7 @@ void pf_dl(module_id_t module_id,
/* Find PUCCH occasion: if it fails, undo CCE allocation (undoing PUCCH
* allocation after CCE alloc fail would be more complex) */
int r_pucch = nr_get_pucch_resource(sched_ctrl->coreset, UBWP->pucch_Config, CCEIndex);
int r_pucch = nr_get_pucch_resource(sched_ctrl->coreset, ul_bwp->pucch_Config, CCEIndex);
const int alloc = nr_acknack_scheduling(module_id, iterator->UE, frame, slot, r_pucch, 0);
if (alloc<0) {
......@@ -730,7 +730,7 @@ void pf_dl(module_id_t module_id,
NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static;
if (ps->nrOfLayers != iterator->UE->layers || ps->time_domain_allocation != tda ) {
nr_set_pdsch_semi_static(BWP,
nr_set_pdsch_semi_static(dl_bwp,
scc,
tda,
iterator->UE->layers,
......@@ -749,8 +749,8 @@ void pf_dl(module_id_t module_id,
while (rbStart + max_rbSize < bwpSize && (rballoc_mask[rbStart + max_rbSize] & slbitmap) == slbitmap)
max_rbSize++;
sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, BWP->mcsTableIdx);
sched_pdsch->R = nr_get_code_rate_dl(sched_pdsch->mcs, BWP->mcsTableIdx);
sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, dl_bwp->mcsTableIdx);
sched_pdsch->R = nr_get_code_rate_dl(sched_pdsch->mcs, dl_bwp->mcsTableIdx);
sched_pdsch->pucch_allocation = alloc;
uint32_t TBS = 0;
uint16_t rbSize;
......@@ -796,18 +796,18 @@ void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t
/* This is temporary and it assumes all UEs have the same BWP and TDA*/
NR_UE_info_t *UE=UE_info->list[0];
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_DL_BWP_t *BWP = &UE->current_DL_BWP;
NR_UE_DL_BWP_t *current_BWP = &UE->current_DL_BWP;
const int tda = get_dl_tda(RC.nrmac[module_id], scc, slot);
int startSymbolIndex, nrOfSymbols;
const struct NR_PDSCH_TimeDomainResourceAllocationList *tdaList = BWP->tdaList;
const struct NR_PDSCH_TimeDomainResourceAllocationList *tdaList = current_BWP->tdaList;
AssertFatal(tda < tdaList->list.count, "time_domain_allocation %d>=%d\n", tda, tdaList->list.count);
const int startSymbolAndLength = tdaList->list.array[tda]->startSymbolAndLength;
SLIV2SL(startSymbolAndLength, &startSymbolIndex, &nrOfSymbols);
const int coresetid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t bwpSize = coresetid == 0 ? RC.nrmac[module_id]->cset0_bwp_size : BWP->BWPSize;
const uint16_t BWPStart = coresetid == 0 ? RC.nrmac[module_id]->cset0_bwp_start : BWP->BWPStart;
const uint16_t bwpSize = coresetid == 0 ? RC.nrmac[module_id]->cset0_bwp_size : current_BWP->BWPSize;
const uint16_t BWPStart = coresetid == 0 ? RC.nrmac[module_id]->cset0_bwp_start : current_BWP->BWPStart;
const uint16_t slbitmap = SL_to_bitmap(startSymbolIndex, nrOfSymbols);
uint16_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map;
......
......@@ -195,18 +195,18 @@ void nr_preprocessor_phytest(module_id_t module_id,
NR_UE_info_t *UE = RC.nrmac[module_id]->UE_info.list[0];
NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[0].ServingCellConfigCommon;
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_DL_BWP_t *BWP = &UE->current_DL_BWP;
NR_UE_DL_BWP_t *dl_bwp = &UE->current_DL_BWP;
const int CC_id = 0;
const int tda = get_dl_tda(RC.nrmac[module_id], scc, slot);
NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static;
ps->nrOfLayers = target_dl_Nl;
if (ps->time_domain_allocation != tda || ps->nrOfLayers != target_dl_Nl)
nr_set_pdsch_semi_static(BWP, scc, tda, target_dl_Nl,sched_ctrl , ps);
nr_set_pdsch_semi_static(dl_bwp, scc, tda, target_dl_Nl,sched_ctrl , ps);
/* find largest unallocated chunk */
const int bwpSize = BWP->BWPSize;
const int BWPStart = BWP->BWPStart;
const int bwpSize = dl_bwp->BWPSize;
const int BWPStart = dl_bwp->BWPStart;
int rbStart = 0;
int rbSize = 0;
......@@ -309,8 +309,8 @@ void nr_preprocessor_phytest(module_id_t module_id,
sched_pdsch->rbSize = rbSize;
sched_pdsch->mcs = target_dl_mcs;
sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, BWP->mcsTableIdx);
sched_pdsch->R = nr_get_code_rate_dl(sched_pdsch->mcs, BWP->mcsTableIdx);
sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, dl_bwp->mcsTableIdx);
sched_pdsch->R = nr_get_code_rate_dl(sched_pdsch->mcs, dl_bwp->mcsTableIdx);
sched_ctrl->dl_bler_stats.mcs = target_dl_mcs; /* for logging output */
sched_pdsch->tb_size = nr_compute_tbs(sched_pdsch->Qm,
sched_pdsch->R,
......@@ -351,10 +351,10 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
const int CC_id = 0;
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_UL_BWP_t *BWP = &UE->current_UL_BWP;
const int mu = BWP->scs;
NR_UE_UL_BWP_t *ul_bwp = &UE->current_UL_BWP;
const int mu = ul_bwp->scs;
const struct NR_PUSCH_TimeDomainResourceAllocationList *tdaList = BWP->tdaList;
const struct NR_PUSCH_TimeDomainResourceAllocationList *tdaList = ul_bwp->tdaList;
const int temp_tda = get_ul_tda(nr_mac, scc, slot);
if (temp_tda < 0)
return false;
......@@ -362,7 +362,7 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
"time domain assignment %d >= %d\n",
temp_tda,
tdaList->list.count);
int K2 = get_K2(BWP->tdaList, temp_tda, mu);
int K2 = get_K2(ul_bwp->tdaList, temp_tda, mu);
const int sched_frame = frame + (slot + K2 >= nr_slots_per_frame[mu]);
const int sched_slot = (slot + K2) % nr_slots_per_frame[mu];
const int tda = get_ul_tda(nr_mac, scc, sched_slot);
......@@ -384,13 +384,13 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static;
if (ps->time_domain_allocation != tda
|| ps->nrOfLayers != target_ul_Nl)
nr_set_pusch_semi_static(BWP, scc, tda, target_ul_Nl,ps);
nr_set_pusch_semi_static(ul_bwp, scc, tda, target_ul_Nl,ps);
uint16_t rbStart = 0;
uint16_t rbSize;
const int bw = BWP->BWPSize;
const int BWPStart = BWP->BWPStart;
const int bw = ul_bwp->BWPSize;
const int BWPStart = ul_bwp->BWPStart;
if (target_ul_bw>bw)
rbSize = bw;
......@@ -453,10 +453,10 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
/* Calculate TBS from MCS */
ps->nrOfLayers = target_ul_Nl;
sched_pusch->R = nr_get_code_rate_ul(mcs, BWP->mcs_table);
sched_pusch->Qm = nr_get_Qm_ul(mcs, BWP->mcs_table);
if (BWP->pusch_Config->tp_pi2BPSK
&& ((BWP->mcs_table == 3 && mcs < 2) || (BWP->mcs_table == 4 && mcs < 6))) {
sched_pusch->R = nr_get_code_rate_ul(mcs, ul_bwp->mcs_table);
sched_pusch->Qm = nr_get_Qm_ul(mcs, ul_bwp->mcs_table);
if (ul_bwp->pusch_Config->tp_pi2BPSK
&& ((ul_bwp->mcs_table == 3 && mcs < 2) || (ul_bwp->mcs_table == 4 && mcs < 6))) {
sched_pusch->R >>= 1;
sched_pusch->Qm <<= 1;
}
......
......@@ -165,16 +165,16 @@ void nr_csi_meas_reporting(int Mod_idP,
UE_iterator(RC.nrmac[Mod_idP]->UE_info.list, UE ) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_UL_BWP_t *UL_BWP = &UE->current_UL_BWP;
const int n_slots_frame = nr_slots_per_frame[UL_BWP->scs];
NR_UE_UL_BWP_t *ul_bwp = &UE->current_UL_BWP;
const int n_slots_frame = nr_slots_per_frame[ul_bwp->scs];
if ((sched_ctrl->rrc_processing_timer > 0) || (sched_ctrl->ul_failure==1 && get_softmodem_params()->phy_test==0)) {
continue;
}
const NR_CSI_MeasConfig_t *csi_measconfig = UL_BWP->csi_MeasConfig;
const NR_CSI_MeasConfig_t *csi_measconfig = ul_bwp->csi_MeasConfig;
if (!csi_measconfig) continue;
AssertFatal(csi_measconfig->csi_ReportConfigToAddModList->list.count > 0,
"NO CSI report configuration available");
NR_PUCCH_Config_t *pucch_Config = UL_BWP->pucch_Config;
NR_PUCCH_Config_t *pucch_Config = ul_bwp->pucch_Config;
for (int csi_report_id = 0; csi_report_id < csi_measconfig->csi_ReportConfigToAddModList->list.count; csi_report_id++){
NR_CSI_ReportConfig_t *csirep = csi_measconfig->csi_ReportConfigToAddModList->list.array[csi_report_id];
......@@ -214,7 +214,7 @@ void nr_csi_meas_reporting(int Mod_idP,
curr_pucch->resource_indicator = res_index;
curr_pucch->csi_bits += nr_get_csi_bitlen(UE->csi_report_template, csi_report_id);
int bwp_start = UL_BWP->BWPStart;
int bwp_start = ul_bwp->BWPStart;
// going through the list of PUCCH resources to find the one indexed by resource_id
uint16_t *vrb_map_UL = &RC.nrmac[Mod_idP]->common_channels[0].vrb_map_UL[sched_slot * MAX_BWP_SIZE];
......@@ -378,7 +378,7 @@ void tci_handling(NR_UE_info_t *UE, frame_t frame, slot_t slot) {
int ssb_index[MAX_NUM_SSB] = {0};
int ssb_rsrp[MAX_NUM_SSB] = {0};
uint8_t idx = 0;
NR_UE_DL_BWP_t *BWP = &UE->current_DL_BWP;
NR_UE_DL_BWP_t *dl_bwp = &UE->current_DL_BWP;
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
uint8_t nr_ssbri_cri = 0;
......@@ -388,8 +388,8 @@ void tci_handling(NR_UE_info_t *UE, frame_t frame, slot_t slot) {
uint8_t i, j;
//bwp indicator
int n_dl_bwp = BWP->n_dl_bwp;
const int bwp_id = BWP->bwp_id;
int n_dl_bwp = dl_bwp->n_dl_bwp;
const int bwp_id = dl_bwp->bwp_id;
if (n_dl_bwp < 4)
pdsch_bwp_id = bwp_id;
else
......@@ -655,7 +655,6 @@ void evaluate_cqi_report(uint8_t *payload,
long *cqi_Table){
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_DL_BWP_t *BWP = &UE->current_DL_BWP;
//TODO sub-band CQI report not yet implemented
int cqi_bitlen = csi_report->csi_meas_bitlen.cqi_bitlen[ri];
......@@ -679,7 +678,7 @@ void evaluate_cqi_report(uint8_t *payload,
// TODO for wideband case and multiple TB
const int cqi_idx = sched_ctrl->CSI_report.cri_ri_li_pmi_cqi_report.wb_cqi_1tb;
const int mcs_table = BWP->mcsTableIdx;
const int mcs_table = UE->current_DL_BWP.mcsTableIdx;
const int cqi_table = sched_ctrl->CSI_report.cri_ri_li_pmi_cqi_report.cqi_table;
sched_ctrl->dl_max_mcs = get_mcs_from_cqi(mcs_table, cqi_table, cqi_idx);
}
......@@ -758,8 +757,8 @@ void extract_pucch_csi_report(NR_CSI_MeasConfig_t *csi_MeasConfig,
uint16_t bitlen = uci_pdu->csi_part1.csi_part1_bit_len;
NR_CSI_ReportConfig__reportQuantity_PR reportQuantity_type = NR_CSI_ReportConfig__reportQuantity_PR_NOTHING;
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_UL_BWP_t *BWP = &UE->current_UL_BWP;
const int n_slots_frame = nr_slots_per_frame[BWP->scs];
NR_UE_UL_BWP_t *ul_bwp = &UE->current_UL_BWP;
const int n_slots_frame = nr_slots_per_frame[ul_bwp->scs];
int cumul_bits = 0;
int r_index = -1;
for (int csi_report_id = 0; csi_report_id < csi_MeasConfig->csi_ReportConfigToAddModList->list.count; csi_report_id++ ) {
......@@ -1043,8 +1042,8 @@ int nr_acknack_scheduling(int mod_id,
const int CC_id = 0;
const int minfbtime = RC.nrmac[mod_id]->minRXTXTIMEpdsch;
const NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels[CC_id].ServingCellConfigCommon;
const NR_UE_UL_BWP_t *BWP = &UE->current_UL_BWP;
const int n_slots_frame = nr_slots_per_frame[BWP->scs];
const NR_UE_UL_BWP_t *ul_bwp = &UE->current_UL_BWP;
const int n_slots_frame = nr_slots_per_frame[ul_bwp->scs];
const NR_TDD_UL_DL_Pattern_t *tdd = scc->tdd_UL_DL_ConfigurationCommon ? &scc->tdd_UL_DL_ConfigurationCommon->pattern1 : NULL;
AssertFatal(tdd || RC.nrmac[mod_id]->common_channels[CC_id].frame_type == FDD, "Dynamic TDD not handled yet\n");
const int nr_slots_period = tdd ? n_slots_frame / get_nb_periods_per_frame(tdd->dl_UL_TransmissionPeriodicity) : n_slots_frame;
......@@ -1059,10 +1058,10 @@ int nr_acknack_scheduling(int mod_id,
* later)
* * each UE has dedicated PUCCH Format 0 resources, and we use index 0! */
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_PUCCH_Config_t *pucch_Config = BWP->pucch_Config;
NR_PUCCH_Config_t *pucch_Config = ul_bwp->pucch_Config;
int bwp_start = BWP->BWPStart;
int bwp_size = BWP->BWPSize;
int bwp_start = ul_bwp->BWPStart;
int bwp_size = ul_bwp->BWPSize;
NR_sched_pucch_t *pucch = &sched_ctrl->sched_pucch[0];
LOG_D(NR_MAC, "In %s: %4d.%2d Trying to allocate pucch, current DAI %d\n", __FUNCTION__, frame, slot, pucch->dai_c);
......@@ -1308,10 +1307,10 @@ void nr_sr_reporting(gNB_MAC_INST *nrmac, frame_t SFN, sub_frame_t slot)
UE_iterator(nrmac->UE_info.list, UE) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_UL_BWP_t *BWP = &UE->current_UL_BWP;
const int n_slots_frame = nr_slots_per_frame[BWP->scs];
NR_UE_UL_BWP_t *ul_bwp = &UE->current_UL_BWP;
const int n_slots_frame = nr_slots_per_frame[ul_bwp->scs];
if (sched_ctrl->ul_failure==1 || sched_ctrl->rrc_processing_timer>0) continue;
NR_PUCCH_Config_t *pucch_Config = BWP->pucch_Config;
NR_PUCCH_Config_t *pucch_Config = ul_bwp->pucch_Config;
if (!pucch_Config || !pucch_Config->schedulingRequestResourceToAddModList)
continue;
......
......@@ -1052,11 +1052,11 @@ void pf_ul(module_id_t module_id,
LOG_D(NR_MAC,"pf_ul: preparing UL scheduling for UE %04x\n",UE->rnti);
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
NR_UE_UL_BWP_t *BWP = &UE->current_UL_BWP;
NR_UE_UL_BWP_t *current_BWP = &UE->current_UL_BWP;
int rbStart = 0; // wrt BWP start
const uint16_t bwpSize = BWP->BWPSize;
const uint16_t bwpSize = current_BWP->BWPSize;
NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch;
NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static;
const NR_mac_dir_stats_t *stats = &UE->mac_stats.ul;
......@@ -1149,7 +1149,7 @@ void pf_ul(module_id_t module_id,
const int tda = get_ul_tda(nrmac, scc, sched_pusch->slot);
if (ps->time_domain_allocation != tda
|| ps->nrOfLayers != nrOfLayers) {
nr_set_pusch_semi_static(BWP,
nr_set_pusch_semi_static(current_BWP,
scc,
tda,
nrOfLayers,
......@@ -1176,7 +1176,7 @@ void pf_ul(module_id_t module_id,
NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch;
sched_pusch->mcs = min(nrmac->min_grant_mcs, sched_pusch->mcs);
update_ul_ue_R_Qm(sched_pusch, BWP->pusch_Config, BWP->mcs_table);
update_ul_ue_R_Qm(sched_pusch, current_BWP->pusch_Config, current_BWP->mcs_table);
sched_pusch->rbStart = rbStart;
sched_pusch->rbSize = min_rb;
sched_pusch->tb_size = nr_compute_tbs(sched_pusch->Qm,
......@@ -1199,7 +1199,7 @@ void pf_ul(module_id_t module_id,
/* Create UE_sched for UEs eligibale for new data transmission*/
/* Calculate coefficient*/
const uint32_t tbs = ul_pf_tbs[BWP->mcs_table][sched_pusch->mcs];
const uint32_t tbs = ul_pf_tbs[current_BWP->mcs_table][sched_pusch->mcs];
float coeff_ue = (float) tbs / UE->ul_thr_ue;
LOG_D(NR_MAC,"rnti %04x b %d, ul_thr_ue %f, tbs %d, coeff_ue %f\n",
UE->rnti, b, UE->ul_thr_ue, tbs, coeff_ue);
......@@ -1242,9 +1242,9 @@ void pf_ul(module_id_t module_id,
}
else LOG_D(NR_MAC, "%4d.%2d free CCE for UL DCI UE %04x\n",frame,slot, iterator->UE->rnti);
NR_UE_UL_BWP_t *BWP = &iterator->UE->current_UL_BWP;
NR_UE_UL_BWP_t *current_BWP = &iterator->UE->current_UL_BWP;
const uint16_t bwpSize = BWP->BWPSize;
const uint16_t bwpSize = current_BWP->BWPSize;
NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch;
NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static;
......@@ -1256,13 +1256,13 @@ void pf_ul(module_id_t module_id,
const int tda = get_ul_tda(nrmac, scc, sched_pusch->slot);
if (ps->time_domain_allocation != tda
|| ps->nrOfLayers != nrOfLayers) {
nr_set_pusch_semi_static(BWP,
nr_set_pusch_semi_static(current_BWP,
scc,
tda,
nrOfLayers,
ps);
}
update_ul_ue_R_Qm(sched_pusch, BWP->pusch_Config, BWP->mcs_table);
update_ul_ue_R_Qm(sched_pusch, current_BWP->pusch_Config, current_BWP->mcs_table);
int rbStart = 0;
const uint16_t slbitmap = SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols);
......@@ -1341,16 +1341,16 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t
* TDAs yet). If the TDA is negative, it means that there is no UL slot to
* schedule now (slot + k2 is not UL slot) */
NR_UE_sched_ctrl_t *sched_ctrl = &nr_mac->UE_info.list[0]->UE_sched_ctrl;
NR_UE_UL_BWP_t *BWP = &nr_mac->UE_info.list[0]->current_UL_BWP;
int mu = BWP->scs;
NR_UE_UL_BWP_t *current_BWP = &nr_mac->UE_info.list[0]->current_UL_BWP;
int mu = current_BWP->scs;
const int temp_tda = get_ul_tda(nr_mac, scc, slot);
int K2 = get_K2(BWP->tdaList, temp_tda, mu);
int K2 = get_K2(current_BWP->tdaList, temp_tda, mu);
const int sched_frame = (frame + (slot + K2 >= nr_slots_per_frame[mu])) & 1023;
const int sched_slot = (slot + K2) % nr_slots_per_frame[mu];
const int tda = get_ul_tda(nr_mac, scc, sched_slot);
if (tda < 0)
return false;
DevAssert(K2 == get_K2(BWP->tdaList, tda, mu));
DevAssert(K2 == get_K2(current_BWP->tdaList, tda, mu));
if (!is_xlsch_in_slot(nr_mac->ulsch_slot_bitmap[sched_slot / 64], sched_slot))
return false;
......@@ -1364,7 +1364,7 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t
is_xlsch_in_slot(nr_mac->ulsch_slot_bitmap[sched_slot / 64], sched_slot);
// FIXME: Avoid mixed slots for initialUplinkBWP
if (BWP->bwp_id==0 && is_mixed_slot)
if (current_BWP->bwp_id==0 && is_mixed_slot)
return false;
// Avoid slots with the SRS
......@@ -1379,9 +1379,9 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t
sched_ctrl->sched_pusch.frame = sched_frame;
UE_iterator(nr_mac->UE_info.list, UE2) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE2->UE_sched_ctrl;
AssertFatal(K2 == get_K2(BWP->tdaList, tda, mu),
AssertFatal(K2 == get_K2(current_BWP->tdaList, tda, mu),
"Different K2, %d(UE%d) != %ld(UE%04x)\n",
K2, 0, get_K2(BWP->tdaList, tda, mu), UE2->rnti);
K2, 0, get_K2(current_BWP->tdaList, tda, mu), UE2->rnti);
sched_ctrl->sched_pusch.slot = sched_slot;
sched_ctrl->sched_pusch.frame = sched_frame;
}
......@@ -1392,10 +1392,10 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t
uint16_t *vrb_map_UL =
&RC.nrmac[module_id]->common_channels[CC_id].vrb_map_UL[sched_slot * MAX_BWP_SIZE];
const uint16_t bwpSize = BWP->BWPSize;
const uint16_t bwpStart = BWP->BWPStart;
const uint16_t bwpSize = current_BWP->BWPSize;
const uint16_t bwpStart = current_BWP->BWPStart;
const int startSymbolAndLength = BWP->tdaList->list.array[tda]->startSymbolAndLength;
const int startSymbolAndLength = current_BWP->tdaList->list.array[tda]->startSymbolAndLength;
int startSymbolIndex, nrOfSymbols;
SLIV2SL(startSymbolAndLength, &startSymbolIndex, &nrOfSymbols);
const uint16_t symb = SL_to_bitmap(startSymbolIndex, nrOfSymbols);
......
......@@ -192,7 +192,7 @@ void config_uldci(const NR_SIB1_t *sib1,
dci_pdu_rel15_t *dci_pdu_rel15,
int time_domain_assignment,
uint8_t tpc,
NR_UE_UL_BWP_t *BWP);
NR_UE_UL_BWP_t *ul_bwp);
void nr_schedule_pucch(gNB_MAC_INST *nrmac,
frame_t frameP,
......@@ -285,7 +285,7 @@ void fill_pdcch_vrb_map(gNB_MAC_INST *mac,
void fill_dci_pdu_rel15(const NR_ServingCellConfigCommon_t *scc,
const NR_CellGroupConfig_t *CellGroup,
const NR_UE_DL_BWP_t *BWP,
const NR_UE_DL_BWP_t *dl_bwp,
nfapi_nr_dl_dci_pdu_t *pdcch_dci_pdu,
dci_pdu_rel15_t *dci_pdu_rel15,
int dci_formats,
......@@ -296,7 +296,7 @@ void fill_dci_pdu_rel15(const NR_ServingCellConfigCommon_t *scc,
uint16_t cset0_bwp_size);
void prepare_dci(const NR_CellGroupConfig_t *CellGroup,
const NR_UE_DL_BWP_t *BWP,
const NR_UE_DL_BWP_t *dl_bwp,
const NR_ControlResourceSet_t *coreset,
dci_pdu_rel15_t *dci_pdu_rel15,
nr_dci_format_t format);
......@@ -325,14 +325,14 @@ long get_K2(NR_PUSCH_TimeDomainResourceAllocationList_t *tdaList,
int time_domain_assignment,
int mu);
void nr_set_pdsch_semi_static(const NR_UE_DL_BWP_t *BWP,
void nr_set_pdsch_semi_static(const NR_UE_DL_BWP_t *dl_bwp,
const NR_ServingCellConfigCommon_t *scc,
int tda,
uint8_t layers,
NR_UE_sched_ctrl_t *sched_ctrl,
NR_pdsch_semi_static_t *ps);
void nr_set_pusch_semi_static(const NR_UE_UL_BWP_t *BWP,
void nr_set_pusch_semi_static(const NR_UE_UL_BWP_t *ul_bwp,
const NR_ServingCellConfigCommon_t *scc,
int tda,
uint8_t nrOfLayers,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment