Commit bdbd6989 authored by francescomani's avatar francescomani

removing pusch semi-static

parent 63327ce3
......@@ -381,14 +381,6 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
if (!is_xlsch_in_slot(ulsch_slot_bitmap, sched_slot))
return false;
/* we want to avoid a lengthy deduction of DMRS and other parameters in
* every TTI if we can save it, so check whether TDA, or
* num_dmrs_cdm_grps_no_data has changed and only then recompute */
NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static;
if (ps->time_domain_allocation != tda
|| ps->nrOfLayers != target_ul_Nl)
nr_set_pusch_semi_static(ul_bwp, scc, tda, target_ul_Nl,ps);
uint16_t rbStart = 0;
uint16_t rbSize;
......@@ -400,10 +392,13 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
else
rbSize = target_ul_bw;
NR_pusch_tda_info_t *tda_info = &sched_ctrl->sched_pusch.tda_info;
nr_get_pusch_tda_info(ul_bwp, tda, tda_info);
uint16_t *vrb_map_UL =
&RC.nrmac[module_id]->common_channels[CC_id].vrb_map_UL[sched_slot * MAX_BWP_SIZE];
for (int i = rbStart; i < rbStart + rbSize; ++i) {
if ((vrb_map_UL[i+BWPStart] & SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols)) != 0) {
if ((vrb_map_UL[i+BWPStart] & SL_to_bitmap(tda_info->startSymbolIndex, tda_info->nrOfSymbols)) != 0) {
LOG_E(MAC,
"%s(): %4d.%2d RB %d is already reserved, cannot schedule UE\n",
__func__,
......@@ -455,7 +450,7 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
sched_pusch->ul_harq_pid = sched_ctrl->retrans_ul_harq.head;
/* Calculate TBS from MCS */
ps->nrOfLayers = target_ul_Nl;
sched_pusch->nrOfLayers = target_ul_Nl;
sched_pusch->R = nr_get_code_rate_ul(mcs, ul_bwp->mcs_table);
sched_pusch->Qm = nr_get_Qm_ul(mcs, ul_bwp->mcs_table);
if (ul_bwp->pusch_Config->tp_pi2BPSK
......@@ -463,14 +458,22 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
sched_pusch->R >>= 1;
sched_pusch->Qm <<= 1;
}
NR_pusch_dmrs_t *dmrs = &sched_ctrl->sched_pusch.dmrs_info;
set_ul_dmrs_params(dmrs,
scc,
ul_bwp,
tda_info,
sched_pusch->nrOfLayers);
sched_pusch->tb_size = nr_compute_tbs(sched_pusch->Qm,
sched_pusch->R,
sched_pusch->rbSize,
ps->nrOfSymbols,
ps->N_PRB_DMRS * ps->num_dmrs_symb,
tda_info->nrOfSymbols,
dmrs->N_PRB_DMRS * dmrs->num_dmrs_symb,
0, // nb_rb_oh
0,
ps->nrOfLayers /* NrOfLayers */)
sched_pusch->nrOfLayers /* NrOfLayers */)
>> 3;
/* mark the corresponding RBs as used */
......@@ -481,6 +484,6 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
sched_ctrl->aggregation_level);
for (int rb = rbStart; rb < rbStart + rbSize; rb++)
vrb_map_UL[rb+BWPStart] |= SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols);
vrb_map_UL[rb+BWPStart] |= SL_to_bitmap(tda_info->startSymbolIndex, tda_info->nrOfSymbols);
return true;
}
......@@ -562,55 +562,58 @@ void nr_get_pdsch_tda_info(const NR_UE_DL_BWP_t *dl_bwp,
SLIV2SL(startSymbolAndLength, &tda_info->startSymbolIndex, &tda_info->nrOfSymbols);
}
void nr_set_pusch_semi_static(const NR_UE_UL_BWP_t *ul_bwp,
const NR_ServingCellConfigCommon_t *scc,
int tda,
uint8_t nrOfLayers,
NR_pusch_semi_static_t *ps) {
void nr_get_pusch_tda_info(const NR_UE_UL_BWP_t *ul_bwp,
int tda,
NR_pusch_tda_info_t *tda_info) {
ps->time_domain_allocation = tda;
NR_PUSCH_TimeDomainResourceAllocationList_t *tdaList = ul_bwp->tdaList;
AssertFatal(tda < tdaList->list.count, "time_domain_allocation %d>=%d\n", tda, tdaList->list.count);
tda_info->mapping_type = tdaList->list.array[tda]->mappingType;
const int startSymbolAndLength = tdaList->list.array[tda]->startSymbolAndLength;
SLIV2SL(startSymbolAndLength, &tda_info->startSymbolIndex, &tda_info->nrOfSymbols);
}
const int startSymbolAndLength = ul_bwp->tdaList->list.array[tda]->startSymbolAndLength;
SLIV2SL(startSymbolAndLength,
&ps->startSymbolIndex,
&ps->nrOfSymbols);
void set_ul_dmrs_params(NR_pusch_dmrs_t *dmrs,
const NR_ServingCellConfigCommon_t *scc,
NR_UE_UL_BWP_t *ul_bwp,
NR_pusch_tda_info_t *tda_info,
int Layers) {
ps->nrOfLayers = nrOfLayers;
// TODO setting of cdm groups with no data to be redone for MIMO
if (ul_bwp->transform_precoding || nrOfLayers<3)
ps->num_dmrs_cdm_grps_no_data = (ul_bwp->dci_format == NR_UL_DCI_FORMAT_0_1) ? 1 : (ps->nrOfSymbols == 2 ? 1 : 2);
if (ul_bwp->transform_precoding || Layers<3)
dmrs->num_dmrs_cdm_grps_no_data = (ul_bwp->dci_format == NR_UL_DCI_FORMAT_0_1) ? 1 : (tda_info->nrOfSymbols == 2 ? 1 : 2);
else
ps->num_dmrs_cdm_grps_no_data = 2;
dmrs->num_dmrs_cdm_grps_no_data = 2;
/* DMRS calculations */
ps->mapping_type = ul_bwp->tdaList->list.array[tda]->mappingType;
ps->NR_DMRS_UplinkConfig = ul_bwp->pusch_Config ?
(ps->mapping_type == NR_PUSCH_TimeDomainResourceAllocation__mappingType_typeA ?
NR_DMRS_UplinkConfig_t *NR_DMRS_UplinkConfig = ul_bwp->pusch_Config ?
(tda_info->mapping_type == NR_PUSCH_TimeDomainResourceAllocation__mappingType_typeA ?
ul_bwp->pusch_Config->dmrs_UplinkForPUSCH_MappingTypeA->choice.setup :
ul_bwp->pusch_Config->dmrs_UplinkForPUSCH_MappingTypeB->choice.setup) : NULL;
ps->dmrs_config_type = ps->NR_DMRS_UplinkConfig ? ((ps->NR_DMRS_UplinkConfig->dmrs_Type == NULL ? 0 : 1)) : 0;
const pusch_dmrs_AdditionalPosition_t additional_pos =
ps->NR_DMRS_UplinkConfig ? (ps->NR_DMRS_UplinkConfig->dmrs_AdditionalPosition == NULL
? 2
: (*ps->NR_DMRS_UplinkConfig->dmrs_AdditionalPosition ==
NR_DMRS_UplinkConfig__dmrs_AdditionalPosition_pos3
? 3
: *ps->NR_DMRS_UplinkConfig->dmrs_AdditionalPosition)):2;
const pusch_maxLength_t pusch_maxLength =
ps->NR_DMRS_UplinkConfig ? (ps->NR_DMRS_UplinkConfig->maxLength == NULL ? 1 : 2) : 1;
ps->ul_dmrs_symb_pos = get_l_prime(ps->nrOfSymbols,
ps->mapping_type,
additional_pos,
pusch_maxLength,
ps->startSymbolIndex,
scc->dmrs_TypeA_Position);
dmrs->dmrs_config_type = NR_DMRS_UplinkConfig ? ((NR_DMRS_UplinkConfig->dmrs_Type == NULL ? 0 : 1)) : 0;
const pusch_dmrs_AdditionalPosition_t additional_pos = NR_DMRS_UplinkConfig ? (NR_DMRS_UplinkConfig->dmrs_AdditionalPosition == NULL ?
2 : (*NR_DMRS_UplinkConfig->dmrs_AdditionalPosition ==
NR_DMRS_UplinkConfig__dmrs_AdditionalPosition_pos3 ?
3 : *NR_DMRS_UplinkConfig->dmrs_AdditionalPosition)) : 2;
const pusch_maxLength_t pusch_maxLength = NR_DMRS_UplinkConfig ? (NR_DMRS_UplinkConfig->maxLength == NULL ? 1 : 2) : 1;
dmrs->ul_dmrs_symb_pos = get_l_prime(tda_info->nrOfSymbols,
tda_info->mapping_type,
additional_pos,
pusch_maxLength,
tda_info->startSymbolIndex,
scc->dmrs_TypeA_Position);
uint8_t num_dmrs_symb = 0;
for(int i = ps->startSymbolIndex; i < ps->startSymbolIndex + ps->nrOfSymbols; i++)
num_dmrs_symb += (ps->ul_dmrs_symb_pos >> i) & 1;
ps->num_dmrs_symb = num_dmrs_symb;
ps->N_PRB_DMRS = ps->dmrs_config_type == 0
? ps->num_dmrs_cdm_grps_no_data * 6
: ps->num_dmrs_cdm_grps_no_data * 4;
for(int i = tda_info->startSymbolIndex; i < tda_info->startSymbolIndex + tda_info->nrOfSymbols; i++)
num_dmrs_symb += (dmrs->ul_dmrs_symb_pos >> i) & 1;
dmrs->num_dmrs_symb = num_dmrs_symb;
dmrs->N_PRB_DMRS = dmrs->dmrs_config_type == 0 ?
dmrs->num_dmrs_cdm_grps_no_data * 6 :
dmrs->num_dmrs_cdm_grps_no_data * 4;
dmrs->NR_DMRS_UplinkConfig = NR_DMRS_UplinkConfig;
}
#define BLER_UPDATE_FRAME 10
......@@ -2496,7 +2499,7 @@ NR_UE_info_t *add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConf
/* set illegal time domain allocation to force recomputation of all fields */
sched_ctrl->sched_pdsch.time_domain_allocation = -1;
sched_ctrl->pusch_semi_static.time_domain_allocation = -1;
sched_ctrl->sched_pusch.time_domain_allocation = -1;
/* Set default BWPs */
sched_ctrl->next_dl_bwp_id = -1;
......
......@@ -911,7 +911,7 @@ void update_ul_ue_R_Qm(int mcs, int mcs_table, const NR_PUSCH_Config_t *pusch_Co
}
void nr_ue_max_mcs_min_rb(int mu, int ph_limit, NR_pusch_semi_static_t *ps, NR_UE_UL_BWP_t *ul_bwp, uint16_t minRb, uint32_t tbs, uint16_t *Rb, uint8_t *mcs)
void nr_ue_max_mcs_min_rb(int mu, int ph_limit, NR_sched_pusch_t *sched_pusch, NR_UE_UL_BWP_t *ul_bwp, uint16_t minRb, uint32_t tbs, uint16_t *Rb, uint8_t *mcs)
{
AssertFatal(*Rb >= minRb, "illegal Rb %d < minRb %d\n", *Rb, minRb);
AssertFatal(*mcs >= 0 && *mcs <= 28, "illegal MCS %d\n", *mcs);
......@@ -925,9 +925,9 @@ void nr_ue_max_mcs_min_rb(int mu, int ph_limit, NR_pusch_semi_static_t *ps, NR_U
int tx_power = compute_bw_factor(mu, *Rb) +
compute_delta_tf(tbs_bits,
*Rb,
ps->nrOfLayers,
ps->nrOfSymbols,
ps->N_PRB_DMRS*ps->num_dmrs_symb,
sched_pusch->nrOfLayers,
sched_pusch->tda_info.nrOfSymbols,
sched_pusch->dmrs_info.N_PRB_DMRS*sched_pusch->dmrs_info.num_dmrs_symb,
deltaMCS);
while (ph_limit < tx_power && *Rb >= minRb) {
......@@ -935,9 +935,9 @@ void nr_ue_max_mcs_min_rb(int mu, int ph_limit, NR_pusch_semi_static_t *ps, NR_U
tx_power = compute_bw_factor(mu, *Rb) +
compute_delta_tf(tbs_bits,
*Rb,
ps->nrOfLayers,
ps->nrOfSymbols,
ps->N_PRB_DMRS*ps->num_dmrs_symb,
sched_pusch->nrOfLayers,
sched_pusch->tda_info.nrOfSymbols,
sched_pusch->dmrs_info.N_PRB_DMRS*sched_pusch->dmrs_info.num_dmrs_symb,
deltaMCS);
}
......@@ -947,9 +947,9 @@ void nr_ue_max_mcs_min_rb(int mu, int ph_limit, NR_pusch_semi_static_t *ps, NR_U
tx_power = compute_bw_factor(mu, *Rb) +
compute_delta_tf(tbs_bits,
*Rb,
ps->nrOfLayers,
ps->nrOfSymbols,
ps->N_PRB_DMRS*ps->num_dmrs_symb,
sched_pusch->nrOfLayers,
sched_pusch->tda_info.nrOfSymbols,
sched_pusch->dmrs_info.N_PRB_DMRS*sched_pusch->dmrs_info.num_dmrs_symb,
deltaMCS);
}
......@@ -978,22 +978,11 @@ static bool allocate_ul_retransmission(gNB_MAC_INST *nrmac,
const uint8_t nrOfLayers = 1;
LOG_D(NR_MAC,"retInfo->time_domain_allocation = %d, tda = %d\n", retInfo->time_domain_allocation, tda);
LOG_D(NR_MAC,"tbs %d\n",retInfo->tb_size);
if (tda == retInfo->time_domain_allocation) {
/* check whether we need to switch the TDA allocation since tha last
* (re-)transmission */
NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static;
if (ps->time_domain_allocation != tda
|| ps->nrOfLayers != nrOfLayers) {
nr_set_pusch_semi_static(&UE->current_UL_BWP,
scc,
tda,
nrOfLayers,
ps);
}
if (tda == retInfo->time_domain_allocation &&
nrOfLayers == retInfo->nrOfLayers) {
/* Check the resource is enough for retransmission */
const uint16_t slbitmap = SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols);
const uint16_t slbitmap = SL_to_bitmap(retInfo->tda_info.startSymbolIndex, retInfo->tda_info.nrOfSymbols);
while (rbStart < bwpSize && (rballoc_mask[rbStart] & slbitmap) != slbitmap)
rbStart++;
if (rbStart + retInfo->rbSize > bwpSize) {
......@@ -1002,15 +991,18 @@ static bool allocate_ul_retransmission(gNB_MAC_INST *nrmac,
}
LOG_D(NR_MAC, "%s(): retransmission keeping TDA %d and TBS %d\n", __func__, tda, retInfo->tb_size);
} else {
NR_pusch_semi_static_t temp_ps;
nr_set_pusch_semi_static(&UE->current_UL_BWP,
scc,
tda,
nrOfLayers,
&temp_ps);
NR_pusch_tda_info_t tda_info;
nr_get_pusch_tda_info(&UE->current_UL_BWP, tda, &tda_info);
NR_pusch_dmrs_t dmrs_info;
set_ul_dmrs_params(&dmrs_info,
scc,
&UE->current_UL_BWP,
&tda_info,
nrOfLayers);
/* the retransmission will use a different time domain allocation, check
* that we have enough resources */
const uint16_t slbitmap = SL_to_bitmap(temp_ps.startSymbolIndex, temp_ps.nrOfSymbols);
const uint16_t slbitmap = SL_to_bitmap(tda_info.startSymbolIndex, tda_info.nrOfSymbols);
while (rbStart < bwpSize && (rballoc_mask[rbStart] & slbitmap) != slbitmap)
rbStart++;
int rbSize = 0;
......@@ -1021,8 +1013,8 @@ static bool allocate_ul_retransmission(gNB_MAC_INST *nrmac,
bool success = nr_find_nb_rb(retInfo->Qm,
retInfo->R,
1, // layers
temp_ps.nrOfSymbols,
temp_ps.N_PRB_DMRS * temp_ps.num_dmrs_symb,
tda_info.nrOfSymbols,
dmrs_info.N_PRB_DMRS * dmrs_info.num_dmrs_symb,
retInfo->tb_size,
1, /* minimum of 1RB: need to find exact TBS, don't preclude any number */
rbSize,
......@@ -1038,7 +1030,8 @@ static bool allocate_ul_retransmission(gNB_MAC_INST *nrmac,
retInfo->tb_size = new_tbs;
retInfo->rbSize = new_rbSize;
retInfo->time_domain_allocation = tda;
sched_ctrl->pusch_semi_static = temp_ps;
retInfo->dmrs_info = dmrs_info;
retInfo->tda_info = tda_info;
}
/* Find a free CCE */
......@@ -1096,7 +1089,7 @@ static bool allocate_ul_retransmission(gNB_MAC_INST *nrmac,
/* Mark the corresponding RBs as used */
n_rb_sched -= sched_pusch->rbSize;
for (int rb = 0; rb < sched_ctrl->sched_pusch.rbSize; rb++)
rballoc_mask[rb + sched_ctrl->sched_pusch.rbStart] ^= SL_to_bitmap(sched_ctrl->pusch_semi_static.startSymbolIndex, sched_ctrl->pusch_semi_static.nrOfSymbols);
rballoc_mask[rb + sched_ctrl->sched_pusch.rbStart] ^= SL_to_bitmap(sched_pusch->tda_info.startSymbolIndex, sched_pusch->tda_info.nrOfSymbols);
return true;
}
......@@ -1143,7 +1136,6 @@ void pf_ul(module_id_t module_id,
const uint16_t bwpSize = current_BWP->BWPSize;
NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch;
NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static;
const NR_mac_dir_stats_t *stats = &UE->mac_stats.ul;
/* Calculate throughput */
......@@ -1235,24 +1227,20 @@ void pf_ul(module_id_t module_id,
if (remainUEs == 0)
return;
/* Save PUSCH field */
/* we want to avoid a lengthy deduction of DMRS and other parameters in
* every TTI if we can save it, so check whether TDA, or
* num_dmrs_cdm_grps_no_data has changed and only then recompute */
const uint8_t nrOfLayers = 1;
const int tda = get_ul_tda(nrmac, scc, sched_pusch->slot);
if (ps->time_domain_allocation != tda
|| ps->nrOfLayers != nrOfLayers) {
nr_set_pusch_semi_static(current_BWP,
scc,
tda,
nrOfLayers,
ps);
}
sched_pusch->nrOfLayers = 1;
sched_pusch->time_domain_allocation = get_ul_tda(nrmac, scc, sched_pusch->slot);
NR_pusch_tda_info_t *tda_info = &sched_pusch->tda_info;
nr_get_pusch_tda_info(current_BWP, sched_pusch->time_domain_allocation, tda_info);
NR_pusch_dmrs_t *dmrs = &sched_pusch->dmrs_info;
set_ul_dmrs_params(dmrs,
scc,
current_BWP,
tda_info,
sched_pusch->nrOfLayers);
LOG_D(NR_MAC,"Looking for min_rb %d RBs, starting at %d num_dmrs_cdm_grps_no_data %d\n",
min_rb, rbStart, ps->num_dmrs_cdm_grps_no_data);
const uint16_t slbitmap = SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols);
min_rb, rbStart, dmrs->num_dmrs_cdm_grps_no_data);
const uint16_t slbitmap = SL_to_bitmap(tda_info->startSymbolIndex, tda_info->nrOfSymbols);
while (rbStart < bwpSize && (rballoc_mask[rbStart] & slbitmap) != slbitmap)
rbStart++;
if (rbStart + min_rb >= bwpSize) {
......@@ -1276,11 +1264,11 @@ void pf_ul(module_id_t module_id,
sched_pusch->tb_size = nr_compute_tbs(sched_pusch->Qm,
sched_pusch->R,
sched_pusch->rbSize,
ps->nrOfSymbols,
ps->N_PRB_DMRS * ps->num_dmrs_symb,
tda_info->nrOfSymbols,
dmrs->N_PRB_DMRS * dmrs->num_dmrs_symb,
0, // nb_rb_oh
0,
ps->nrOfLayers)
sched_pusch->nrOfLayers)
>> 3;
/* Mark the corresponding RBs as used */
......@@ -1340,26 +1328,21 @@ void pf_ul(module_id_t module_id,
const uint16_t bwpSize = current_BWP->BWPSize;
NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch;
NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static;
/* Save PUSCH field */
/* we want to avoid a lengthy deduction of DMRS and other parameters in
* every TTI if we can save it, so check whether TDA, or
* num_dmrs_cdm_grps_no_data has changed and only then recompute */
const uint8_t nrOfLayers = 1;
const int tda = get_ul_tda(nrmac, scc, sched_pusch->slot);
if (ps->time_domain_allocation != tda
|| ps->nrOfLayers != nrOfLayers) {
nr_set_pusch_semi_static(current_BWP,
scc,
tda,
nrOfLayers,
ps);
}
sched_pusch->nrOfLayers = 1;
sched_pusch->time_domain_allocation = get_ul_tda(nrmac, scc, sched_pusch->slot);
NR_pusch_tda_info_t *tda_info = &sched_pusch->tda_info;
nr_get_pusch_tda_info(current_BWP, sched_pusch->time_domain_allocation, tda_info);
NR_pusch_dmrs_t *dmrs = &sched_pusch->dmrs_info;
set_ul_dmrs_params(dmrs,
scc,
current_BWP,
tda_info,
sched_pusch->nrOfLayers);
update_ul_ue_R_Qm(sched_pusch->mcs, current_BWP->mcs_table, current_BWP->pusch_Config, &sched_pusch->R, &sched_pusch->Qm);
int rbStart = 0;
const uint16_t slbitmap = SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols);
const uint16_t slbitmap = SL_to_bitmap(tda_info->startSymbolIndex, tda_info->nrOfSymbols);
while (rbStart < bwpSize && (rballoc_mask[rbStart] & slbitmap) != slbitmap)
rbStart++;
sched_pusch->rbStart = rbStart;
......@@ -1381,7 +1364,7 @@ void pf_ul(module_id_t module_id,
sched_pusch->mu = scc->uplinkConfigCommon->initialUplinkBWP->genericParameters.subcarrierSpacing;
if(sched_ctrl->pcmax!=0 ||
sched_ctrl->ph!=0) // verify if the PHR related parameter have been initialized
nr_ue_max_mcs_min_rb(sched_pusch->mu, sched_ctrl->ph, ps, current_BWP, min_rbSize, B, &max_rbSize, &sched_pusch->mcs);
nr_ue_max_mcs_min_rb(current_BWP->scs, sched_ctrl->ph, sched_pusch, current_BWP, min_rbSize, B, &max_rbSize, &sched_pusch->mcs);
if (sched_pusch->mcs < sched_ctrl->ul_bler_stats.mcs)
sched_ctrl->ul_bler_stats.mcs = sched_pusch->mcs; /* force estimated MCS down */
......@@ -1392,8 +1375,8 @@ void pf_ul(module_id_t module_id,
nr_find_nb_rb(sched_pusch->Qm,
sched_pusch->R,
1, // layers
ps->nrOfSymbols,
ps->N_PRB_DMRS * ps->num_dmrs_symb,
tda_info->nrOfSymbols,
dmrs->N_PRB_DMRS * dmrs->num_dmrs_symb,
B,
min_rbSize,
max_rbSize,
......@@ -1403,7 +1386,7 @@ void pf_ul(module_id_t module_id,
sched_pusch->rbSize = rbSize;
sched_pusch->tb_size = TBS;
LOG_D(NR_MAC,"rbSize %d (max_rbSize %d), TBS %d, est buf %d, sched_ul %d, B %d, CCE %d, num_dmrs_symb %d, N_PRB_DMRS %d\n",
rbSize, max_rbSize,sched_pusch->tb_size, sched_ctrl->estimated_ul_buffer, sched_ctrl->sched_ul_bytes, B,sched_ctrl->cce_index,ps->num_dmrs_symb,ps->N_PRB_DMRS);
rbSize, max_rbSize,sched_pusch->tb_size, sched_ctrl->estimated_ul_buffer, sched_ctrl->sched_ul_bytes, B,sched_ctrl->cce_index,dmrs->num_dmrs_symb,dmrs->N_PRB_DMRS);
/* Mark the corresponding RBs as used */
......@@ -1628,11 +1611,6 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
int rnti_types[2] = { NR_RNTI_C, 0 };
/* pre-computed PUSCH values that only change if time domain allocation,
* DCI format, or DMRS parameters change. Updated in the preprocessor
* through nr_set_pusch_semi_static() */
NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static;
/* Statistics */
AssertFatal(cur_harq->round < nr_mac->ul_bler.harq_round_max, "Indexing ulsch_rounds[%d] is out of bounds\n", cur_harq->round);
UE->mac_stats.ul.rounds[cur_harq->round]++;
......@@ -1643,7 +1621,7 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
cur_harq->sched_pusch = *sched_pusch;
/* save which time allocation has been used, to be used on
* retransmissions */
cur_harq->sched_pusch.time_domain_allocation = ps->time_domain_allocation;
cur_harq->sched_pusch.time_domain_allocation = sched_pusch->time_domain_allocation;
sched_ctrl->sched_ul_bytes += sched_pusch->tb_size;
UE->mac_stats.ul.total_rbs += sched_pusch->rbSize;
......@@ -1675,12 +1653,12 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
sched_ctrl->aggregation_level,
sched_pusch->rbStart,
sched_pusch->rbSize,
ps->startSymbolIndex,
ps->nrOfSymbols,
ps->ul_dmrs_symb_pos,
sched_pusch->tda_info.startSymbolIndex,
sched_pusch->tda_info.nrOfSymbols,
sched_pusch->dmrs_info.ul_dmrs_symb_pos,
sched_pusch->mcs,
ps->nrOfLayers,
ps->num_dmrs_cdm_grps_no_data,
sched_pusch->nrOfLayers,
sched_pusch->dmrs_info.num_dmrs_cdm_grps_no_data,
sched_pusch->tb_size,
harq_id,
cur_harq->round,
......@@ -1734,18 +1712,19 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
pusch_pdu->data_scrambling_id = *current_BWP->pusch_Config->dataScramblingIdentityPUSCH;
else
pusch_pdu->data_scrambling_id = *scc->physCellId;
pusch_pdu->nrOfLayers = ps->nrOfLayers;
pusch_pdu->num_dmrs_cdm_grps_no_data = ps->num_dmrs_cdm_grps_no_data;
pusch_pdu->nrOfLayers = sched_pusch->nrOfLayers;
pusch_pdu->num_dmrs_cdm_grps_no_data = sched_pusch->dmrs_info.num_dmrs_cdm_grps_no_data;
/* FAPI: DMRS */
pusch_pdu->ul_dmrs_symb_pos = ps->ul_dmrs_symb_pos;
pusch_pdu->dmrs_config_type = ps->dmrs_config_type;
pusch_pdu->ul_dmrs_symb_pos = sched_pusch->dmrs_info.ul_dmrs_symb_pos;
pusch_pdu->dmrs_config_type = sched_pusch->dmrs_info.dmrs_config_type;
const NR_DMRS_UplinkConfig_t *NR_DMRS_UplinkConfig = sched_pusch->dmrs_info.NR_DMRS_UplinkConfig;
if (pusch_pdu->transform_precoding) { // transform precoding disabled
long *scramblingid=NULL;
if (ps->NR_DMRS_UplinkConfig && pusch_pdu->scid == 0)
scramblingid = ps->NR_DMRS_UplinkConfig->transformPrecodingDisabled->scramblingID0;
else if (ps->NR_DMRS_UplinkConfig)
scramblingid = ps->NR_DMRS_UplinkConfig->transformPrecodingDisabled->scramblingID1;
if (NR_DMRS_UplinkConfig && pusch_pdu->scid == 0)
scramblingid = NR_DMRS_UplinkConfig->transformPrecodingDisabled->scramblingID0;
else if (NR_DMRS_UplinkConfig)
scramblingid = NR_DMRS_UplinkConfig->transformPrecodingDisabled->scramblingID1;
if (scramblingid == NULL)
pusch_pdu->ul_dmrs_scrambling_id = *scc->physCellId;
else
......@@ -1753,14 +1732,14 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
}
else {
pusch_pdu->ul_dmrs_scrambling_id = *scc->physCellId;
if (ps->NR_DMRS_UplinkConfig && ps->NR_DMRS_UplinkConfig->transformPrecodingEnabled->nPUSCH_Identity != NULL)
pusch_pdu->pusch_identity = *ps->NR_DMRS_UplinkConfig->transformPrecodingEnabled->nPUSCH_Identity;
else if (ps->NR_DMRS_UplinkConfig)
if (NR_DMRS_UplinkConfig && NR_DMRS_UplinkConfig->transformPrecodingEnabled->nPUSCH_Identity != NULL)
pusch_pdu->pusch_identity = *NR_DMRS_UplinkConfig->transformPrecodingEnabled->nPUSCH_Identity;
else if (NR_DMRS_UplinkConfig)
pusch_pdu->pusch_identity = *scc->physCellId;
}
pusch_pdu->scid = 0; // DMRS sequence initialization [TS38.211, sec 6.4.1.1.1]
pusch_pdu->num_dmrs_cdm_grps_no_data = ps->num_dmrs_cdm_grps_no_data;
pusch_pdu->dmrs_ports = ((1<<ps->nrOfLayers) - 1);
pusch_pdu->num_dmrs_cdm_grps_no_data = sched_pusch->dmrs_info.num_dmrs_cdm_grps_no_data;
pusch_pdu->dmrs_ports = ((1<<sched_pusch->nrOfLayers) - 1);
/* FAPI: Pusch Allocation in frequency domain */
pusch_pdu->resource_alloc = 1; //type 1
......@@ -1773,8 +1752,8 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
pusch_pdu->frequency_hopping = 1;
/* FAPI: Resource Allocation in time domain */
pusch_pdu->start_symbol_index = ps->startSymbolIndex;
pusch_pdu->nr_of_symbols = ps->nrOfSymbols;
pusch_pdu->start_symbol_index = sched_pusch->tda_info.startSymbolIndex;
pusch_pdu->nr_of_symbols = sched_pusch->tda_info.nrOfSymbols;
/* PUSCH PDU */
AssertFatal(cur_harq->round < nr_mac->ul_bler.harq_round_max, "Indexing nr_rv_round_map[%d] is out of bounds\n", cur_harq->round%4);
......@@ -1810,8 +1789,8 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
pusch_pdu->dfts_ofdm.low_papr_group_number = pusch_pdu->pusch_identity % 30;
// V as specified in section 6.4.1.1.1.2 in 38.211 V = 0 if sequence hopping and group hopping are disabled
if ((ps->NR_DMRS_UplinkConfig==NULL) || ((ps->NR_DMRS_UplinkConfig->transformPrecodingEnabled->sequenceGroupHopping == NULL) &&
(ps->NR_DMRS_UplinkConfig->transformPrecodingEnabled->sequenceHopping == NULL)))
if ((NR_DMRS_UplinkConfig==NULL) || ((NR_DMRS_UplinkConfig->transformPrecodingEnabled->sequenceGroupHopping == NULL) &&
(NR_DMRS_UplinkConfig->transformPrecodingEnabled->sequenceHopping == NULL)))
pusch_pdu->dfts_ofdm.low_papr_sequence_number = 0;
else
AssertFatal(1==0,"SequenceGroupHopping or sequenceHopping are NOT Supported\n");
......@@ -1822,10 +1801,10 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
/*-----------------------------------------------------------------------------*/
/* PUSCH PTRS */
if (ps->NR_DMRS_UplinkConfig && ps->NR_DMRS_UplinkConfig->phaseTrackingRS != NULL) {
if (NR_DMRS_UplinkConfig && NR_DMRS_UplinkConfig->phaseTrackingRS != NULL) {
bool valid_ptrs_setup = false;
pusch_pdu->pusch_ptrs.ptrs_ports_list = (nfapi_nr_ptrs_ports_t *) malloc(2*sizeof(nfapi_nr_ptrs_ports_t));
valid_ptrs_setup = set_ul_ptrs_values(ps->NR_DMRS_UplinkConfig->phaseTrackingRS->choice.setup,
valid_ptrs_setup = set_ul_ptrs_values(NR_DMRS_UplinkConfig->phaseTrackingRS->choice.setup,
pusch_pdu->rb_size, pusch_pdu->mcs_index, pusch_pdu->mcs_table,
&pusch_pdu->pusch_ptrs.ptrs_freq_density,&pusch_pdu->pusch_ptrs.ptrs_time_density,
&pusch_pdu->pusch_ptrs.ptrs_ports_list->ptrs_re_offset,&pusch_pdu->pusch_ptrs.num_ptrs_ports,
......@@ -1881,7 +1860,7 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
scc,
pusch_pdu,
&uldci_payload,
ps->time_domain_allocation,
sched_pusch->time_domain_allocation,
UE->UE_sched_ctrl.tpc0,
current_BWP);
fill_dci_pdu_rel15(scc,
......
......@@ -328,11 +328,15 @@ void nr_get_pdsch_tda_info(const NR_UE_DL_BWP_t *dl_bwp,
int tda,
NR_pdsch_tda_info_t *tda_info);
void nr_set_pusch_semi_static(const NR_UE_UL_BWP_t *ul_bwp,
const NR_ServingCellConfigCommon_t *scc,
int tda,
uint8_t nrOfLayers,
NR_pusch_semi_static_t *ps);
void nr_get_pusch_tda_info(const NR_UE_UL_BWP_t *ul_bwp,
int tda,
NR_pusch_tda_info_t *tda_info);
void set_ul_dmrs_params(NR_pusch_dmrs_t *dmrs,
const NR_ServingCellConfigCommon_t *scc,
NR_UE_UL_BWP_t *ul_bwp,
NR_pusch_tda_info_t *tda_info,
int Layers);
uint8_t nr_get_tpc(int target, uint8_t cqi, int incr);
......
......@@ -373,23 +373,20 @@ typedef struct NR_sched_pucch {
int start_symb;
} NR_sched_pucch_t;
/* PUSCH semi-static configuration: as long as the TDA and DCI format remain
* the same over the same uBWP and search space, there is no need to
* recalculate all S/L, MCS table, or DMRS-related parameters over and over
* again. Hence, we store them in this struct for easy reference. */
typedef struct NR_pusch_semi_static_t {
int time_domain_allocation;
uint8_t nrOfLayers;
uint8_t num_dmrs_cdm_grps_no_data;
typedef struct NR_pusch_tda_info {
int mapping_type;
int startSymbolIndex;
int nrOfSymbols;
long mapping_type;
NR_DMRS_UplinkConfig_t *NR_DMRS_UplinkConfig;
uint16_t dmrs_config_type;
uint16_t ul_dmrs_symb_pos;
uint8_t num_dmrs_symb;
} NR_pusch_tda_info_t;
typedef struct NR_pusch_dmrs {
uint8_t N_PRB_DMRS;
} NR_pusch_semi_static_t;
uint8_t num_dmrs_symb;
uint16_t ul_dmrs_symb_pos;
uint8_t num_dmrs_cdm_grps_no_data;
nfapi_nr_dmrs_type_e dmrs_config_type;
NR_DMRS_UplinkConfig_t *NR_DMRS_UplinkConfig;
} NR_pusch_dmrs_t;
typedef struct NR_sched_pusch {
int frame;
......@@ -411,10 +408,10 @@ typedef struct NR_sched_pusch {
/// UL HARQ PID to use for this UE, or -1 for "any new"
int8_t ul_harq_pid;
/// the Time Domain Allocation used for this transmission. Note that this is
/// only important for retransmissions; otherwise, the TDA in
/// NR_pusch_semi_static_t has precedence
uint8_t nrOfLayers;
int time_domain_allocation;
NR_pusch_dmrs_t dmrs_info;
NR_pusch_tda_info_t tda_info;
} NR_sched_pusch_t;
typedef struct NR_sched_srs {
......@@ -457,9 +454,6 @@ typedef struct NR_sched_pdsch {
// pucch format allocation
uint8_t pucch_allocation;
/// the Time Domain Allocation used for this transmission. Note that this is
/// only important for retransmissions; otherwise, the TDA in
/// NR_pdsch_semi_static_t has precedence
int time_domain_allocation;
uint16_t pm_index;
......@@ -578,8 +572,6 @@ typedef struct {
/// CSI in second. This order is important for nr_acknack_scheduling()!
NR_sched_pucch_t sched_pucch[2];
/// PUSCH semi-static configuration: is not cleared across TTIs
NR_pusch_semi_static_t pusch_semi_static;
/// Sched PUSCH: scheduling decisions, copied into HARQ and cleared every TTI
NR_sched_pusch_t sched_pusch;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment