Commit 02ae1692 authored by Guhan's avatar Guhan

Rebased to latest develop and removed nr_rx_acknack function

parent 89c1a9c2
...@@ -392,6 +392,8 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP, ...@@ -392,6 +392,8 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP,
if (secondaryCellGroup) { if (secondaryCellGroup) {
RC.nrmac[Mod_idP]->secondaryCellGroupCommon = secondaryCellGroup;
NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info;
if (add_ue == 1 && get_softmodem_params()->phy_test) { if (add_ue == 1 && get_softmodem_params()->phy_test) {
const int UE_id = add_new_nr_ue(Mod_idP, rnti, secondaryCellGroup); const int UE_id = add_new_nr_ue(Mod_idP, rnti, secondaryCellGroup);
......
...@@ -89,8 +89,7 @@ void dump_mac_stats(gNB_MAC_INST *gNB) ...@@ -89,8 +89,7 @@ void dump_mac_stats(gNB_MAC_INST *gNB)
} }
} }
void clear_nr_nfapi_information(gNB_MAC_INST * gNB,
void clear_nr_nfapi_information(gNB_MAC_INST *gNB,
int CC_idP, int CC_idP,
frame_t frameP, frame_t frameP,
sub_frame_t slotP){ sub_frame_t slotP){
...@@ -103,6 +102,7 @@ void clear_nr_nfapi_information(gNB_MAC_INST *gNB, ...@@ -103,6 +102,7 @@ void clear_nr_nfapi_information(gNB_MAC_INST *gNB,
&gNB->UL_tti_req_ahead[CC_idP][(slotP + num_slots - 1) % num_slots]; &gNB->UL_tti_req_ahead[CC_idP][(slotP + num_slots - 1) % num_slots];
nfapi_nr_ul_dci_request_t *UL_dci_req = &gNB->UL_dci_req[0]; nfapi_nr_ul_dci_request_t *UL_dci_req = &gNB->UL_dci_req[0];
nfapi_nr_tx_data_request_t *TX_req = &gNB->TX_req[0]; nfapi_nr_tx_data_request_t *TX_req = &gNB->TX_req[0];
gNB->pdu_index[CC_idP] = 0; gNB->pdu_index[CC_idP] = 0;
if (NFAPI_MODE == NFAPI_MONOLITHIC || NFAPI_MODE == NFAPI_MODE_PNF) { // monolithic or PNF if (NFAPI_MODE == NFAPI_MONOLITHIC || NFAPI_MODE == NFAPI_MODE_PNF) { // monolithic or PNF
...@@ -131,6 +131,7 @@ void clear_nr_nfapi_information(gNB_MAC_INST *gNB, ...@@ -131,6 +131,7 @@ void clear_nr_nfapi_information(gNB_MAC_INST *gNB,
gNB->UL_tti_req[CC_idP] = &gNB->UL_tti_req_ahead[CC_idP][slotP]; gNB->UL_tti_req[CC_idP] = &gNB->UL_tti_req_ahead[CC_idP][slotP];
TX_req[CC_idP].Number_of_PDUs = 0; TX_req[CC_idP].Number_of_PDUs = 0;
} }
} }
/* /*
...@@ -294,11 +295,11 @@ void schedule_nr_SRS(module_id_t module_idP, frame_t frameP, sub_frame_t subfram ...@@ -294,11 +295,11 @@ void schedule_nr_SRS(module_id_t module_idP, frame_t frameP, sub_frame_t subfram
} }
*/ */
bool is_xlsch_in_slot(uint64_t bitmap, sub_frame_t slot) { bool is_xlsch_in_slot(uint64_t bitmap, sub_frame_t slot) {
return (bitmap >> slot) & 0x01; return (bitmap >> slot) & 0x01;
} }
void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
frame_t frame, frame_t frame,
sub_frame_t slot){ sub_frame_t slot){
...@@ -358,6 +359,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, ...@@ -358,6 +359,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
start_meas(&RC.nrmac[module_idP]->eNB_scheduler); start_meas(&RC.nrmac[module_idP]->eNB_scheduler);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_DLSCH_ULSCH_SCHEDULER,VCD_FUNCTION_IN); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_DLSCH_ULSCH_SCHEDULER,VCD_FUNCTION_IN);
pdcp_run(&ctxt); pdcp_run(&ctxt);
/* send tick to RLC and RRC every ms */ /* send tick to RLC and RRC every ms */
if ((slot & ((1 << *scc->ssbSubcarrierSpacing) - 1)) == 0) { if ((slot & ((1 << *scc->ssbSubcarrierSpacing) - 1)) == 0) {
...@@ -394,6 +396,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, ...@@ -394,6 +396,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
if ((slot == 0) && (frame & 127) == 0) dump_mac_stats(RC.nrmac[module_idP]); if ((slot == 0) && (frame & 127) == 0) dump_mac_stats(RC.nrmac[module_idP]);
// This schedules MIB // This schedules MIB
schedule_nr_mib(module_idP, frame, slot, nr_slots_per_frame[*scc->ssbSubcarrierSpacing]); schedule_nr_mib(module_idP, frame, slot, nr_slots_per_frame[*scc->ssbSubcarrierSpacing]);
...@@ -442,5 +445,6 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, ...@@ -442,5 +445,6 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
nr_schedule_pucch(module_idP, frame, slot); nr_schedule_pucch(module_idP, frame, slot);
stop_meas(&RC.nrmac[module_idP]->eNB_scheduler); stop_meas(&RC.nrmac[module_idP]->eNB_scheduler);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_DLSCH_ULSCH_SCHEDULER,VCD_FUNCTION_OUT); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_DLSCH_ULSCH_SCHEDULER,VCD_FUNCTION_OUT);
} }
...@@ -1638,7 +1638,7 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *secon ...@@ -1638,7 +1638,7 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *secon
add_nr_list(&UE_info->list, UE_id); add_nr_list(&UE_info->list, UE_id);
memset(&UE_info->mac_stats[UE_id], 0, sizeof(NR_mac_stats_t)); memset(&UE_info->mac_stats[UE_id], 0, sizeof(NR_mac_stats_t));
set_Y(UE_info->Y[UE_id], rntiP); set_Y(UE_info->Y[UE_id], rntiP);
compute_csi_bitlen(secondaryCellGroup, UE_info, UE_id); compute_csi_bitlen (secondaryCellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup, UE_info, UE_id, mod_idP);
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
memset(sched_ctrl, 0, sizeof(*sched_ctrl)); memset(sched_ctrl, 0, sizeof(*sched_ctrl));
sched_ctrl->ta_frame = 0; sched_ctrl->ta_frame = 0;
......
...@@ -514,7 +514,7 @@ void nr_csi_meas_reporting(int Mod_idP, ...@@ -514,7 +514,7 @@ void nr_csi_meas_reporting(int Mod_idP,
curr_pucch->ul_slot = sched_slot; curr_pucch->ul_slot = sched_slot;
curr_pucch->resource_indicator = res_index; curr_pucch->resource_indicator = res_index;
curr_pucch->csi_bits += curr_pucch->csi_bits +=
nr_get_csi_bitlen(&UE_info->csi_report_template[UE_id][csi_report_id]); nr_get_csi_bitlen(Mod_idP,UE_id,csi_report_id);
// going through the list of PUCCH resources to find the one indexed by resource_id // going through the list of PUCCH resources to find the one indexed by resource_id
uint16_t *vrb_map_UL = &RC.nrmac[Mod_idP]->common_channels[0].vrb_map_UL[sched_slot * MAX_BWP_SIZE]; uint16_t *vrb_map_UL = &RC.nrmac[Mod_idP]->common_channels[0].vrb_map_UL[sched_slot * MAX_BWP_SIZE];
...@@ -947,99 +947,6 @@ void extract_pucch_csi_report (NR_CSI_MeasConfig_t *csi_MeasConfig, ...@@ -947,99 +947,6 @@ void extract_pucch_csi_report (NR_CSI_MeasConfig_t *csi_MeasConfig,
} }
void nr_rx_acknack(nfapi_nr_uci_pusch_pdu_t *uci_pusch,
const nfapi_nr_uci_pucch_pdu_format_0_1_t *uci_01,
const nfapi_nr_uci_pucch_pdu_format_2_3_4_t *uci_234,
slot_t slot, NR_UE_sched_ctrl_t *sched_ctrl, NR_mac_stats_t *stats) {
// TODO
int max_harq_rounds = 4; // TODO define macro
if (uci_01 != NULL) {
// handle harq
int harq_idx_s = 0;
// iterate over received harq bits
for (int harq_bit = 0; harq_bit < uci_01->harq->num_harq; harq_bit++) {
// search for the right harq process
for (int harq_idx = harq_idx_s; harq_idx < NR_MAX_NB_HARQ_PROCESSES; harq_idx++) {
// if the gNB received ack with a good confidence
if ((slot-1) == sched_ctrl->harq_processes[harq_idx].feedback_slot) {
if ((uci_01->harq->harq_list[harq_bit].harq_value == 1) &&
(uci_01->harq->harq_confidence_level == 0)) {
// toggle NDI and reset round
sched_ctrl->harq_processes[harq_idx].ndi ^= 1;
sched_ctrl->harq_processes[harq_idx].round = 0;
}
else
sched_ctrl->harq_processes[harq_idx].round++;
sched_ctrl->harq_processes[harq_idx].is_waiting = 0;
harq_idx_s = harq_idx + 1;
// if the max harq rounds was reached
if (sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds) {
sched_ctrl->harq_processes[harq_idx].ndi ^= 1;
sched_ctrl->harq_processes[harq_idx].round = 0;
stats->dlsch_errors++;
}
break;
}
// if feedback slot processing is aborted
else if (((slot-1) > sched_ctrl->harq_processes[harq_idx].feedback_slot) &&
(sched_ctrl->harq_processes[harq_idx].is_waiting)) {
sched_ctrl->harq_processes[harq_idx].round++;
if (sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds) {
sched_ctrl->harq_processes[harq_idx].ndi ^= 1;
sched_ctrl->harq_processes[harq_idx].round = 0;
}
sched_ctrl->harq_processes[harq_idx].is_waiting = 0;
}
}
}
}
if (uci_234 != NULL) {
int harq_idx_s = 0;
int acknack;
// iterate over received harq bits
for (int harq_bit = 0; harq_bit < uci_234->harq.harq_bit_len; harq_bit++) {
acknack = ((uci_234->harq.harq_payload[harq_bit>>3])>>harq_bit)&0x01;
for (int harq_idx = harq_idx_s; harq_idx < NR_MAX_NB_HARQ_PROCESSES-1; harq_idx++) {
// if the gNB received ack with a good confidence or if the max harq rounds was reached
if ((slot-1) == sched_ctrl->harq_processes[harq_idx].feedback_slot) {
// TODO add some confidence level for when there is no CRC
if ((uci_234->harq.harq_crc != 1) && acknack) {
// toggle NDI and reset round
sched_ctrl->harq_processes[harq_idx].ndi ^= 1;
sched_ctrl->harq_processes[harq_idx].round = 0;
}
else
sched_ctrl->harq_processes[harq_idx].round++;
sched_ctrl->harq_processes[harq_idx].is_waiting = 0;
harq_idx_s = harq_idx + 1;
// if the max harq rounds was reached
if (sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds) {
sched_ctrl->harq_processes[harq_idx].ndi ^= 1;
sched_ctrl->harq_processes[harq_idx].round = 0;
stats->dlsch_errors++;
}
break;
}
// if feedback slot processing is aborted
else if (((slot-1) > sched_ctrl->harq_processes[harq_idx].feedback_slot) &&
(sched_ctrl->harq_processes[harq_idx].is_waiting)) {
sched_ctrl->harq_processes[harq_idx].round++;
if (sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds) {
sched_ctrl->harq_processes[harq_idx].ndi ^= 1;
sched_ctrl->harq_processes[harq_idx].round = 0;
}
sched_ctrl->harq_processes[harq_idx].is_waiting = 0;
}
}
}
}
}
void handle_nr_uci_pucch_0_1(module_id_t mod_id, void handle_nr_uci_pucch_0_1(module_id_t mod_id,
frame_t frame, frame_t frame,
sub_frame_t slot, sub_frame_t slot,
...@@ -1058,8 +965,37 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id, ...@@ -1058,8 +965,37 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id,
uci_01->ul_cqi, uci_01->ul_cqi,
30); 30);
NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon;
const int num_slots = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
if (((uci_01->pduBitmap >> 1) & 0x01)) { if (((uci_01->pduBitmap >> 1) & 0x01)) {
nr_rx_acknack(NULL,uci_01,NULL,slot,sched_ctrl,&UE_info->mac_stats[0]); // iterate over received harq bits
for (int harq_bit = 0; harq_bit < uci_01->harq->num_harq; harq_bit++) {
const uint8_t harq_value = uci_01->harq->harq_list[harq_bit].harq_value;
const uint8_t harq_confidence = uci_01->harq->harq_confidence_level;
const int feedback_slot = (slot - 1 + num_slots) % num_slots;
/* In case of realtime problems: we can only identify a HARQ process by
* timing. If the HARQ process's feedback_slot is not the one we
* expected, we assume that processing has been aborted and we need to
* skip this HARQ process, which is what happens in the loop below. If
* you don't experience real-time problems, you might simply revert the
* commit that introduced these changes. */
int8_t pid = sched_ctrl->feedback_dl_harq.head;
DevAssert(pid >= 0);
while (sched_ctrl->harq_processes[pid].feedback_slot != feedback_slot) {
LOG_W(MAC,
"expected feedback slot %d, but found %d instead\n",
sched_ctrl->harq_processes[pid].feedback_slot,
feedback_slot);
remove_front_nr_list(&sched_ctrl->feedback_dl_harq);
handle_dl_harq(mod_id, UE_id, pid, 0);
pid = sched_ctrl->feedback_dl_harq.head;
DevAssert(pid >= 0);
}
remove_front_nr_list(&sched_ctrl->feedback_dl_harq);
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[pid];
DevAssert(harq->is_waiting);
handle_dl_harq(mod_id, UE_id, pid, harq_value == 1 && harq_confidence == 0);
}
} }
} }
...@@ -1082,8 +1018,36 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id, ...@@ -1082,8 +1018,36 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
uci_234->ul_cqi, uci_234->ul_cqi,
30); 30);
NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon;
const int num_slots = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
if ((uci_234->pduBitmap >> 1) & 0x01) { if ((uci_234->pduBitmap >> 1) & 0x01) {
nr_rx_acknack(NULL,NULL,uci_234,slot,sched_ctrl,&UE_info->mac_stats[0]); // iterate over received harq bits
for (int harq_bit = 0; harq_bit < uci_234->harq.harq_bit_len; harq_bit++) {
const int acknack = ((uci_234->harq.harq_payload[harq_bit >> 3]) >> harq_bit) & 0x01;
const int feedback_slot = (slot - 1 + num_slots) % num_slots;
/* In case of realtime problems: we can only identify a HARQ process by
* timing. If the HARQ process's feedback_slot is not the one we
* expected, we assume that processing has been aborted and we need to
* skip this HARQ process, which is what happens in the loop below. If
* you don't experience real-time problems, you might simply revert the
* commit that introduced these changes. */
int8_t pid = sched_ctrl->feedback_dl_harq.head;
DevAssert(pid >= 0);
while (sched_ctrl->harq_processes[pid].feedback_slot != feedback_slot) {
LOG_W(MAC,
"expected feedback slot %d, but found %d instead\n",
sched_ctrl->harq_processes[pid].feedback_slot,
feedback_slot);
remove_front_nr_list(&sched_ctrl->feedback_dl_harq);
handle_dl_harq(mod_id, UE_id, pid, 0);
pid = sched_ctrl->feedback_dl_harq.head;
DevAssert(pid >= 0);
}
remove_front_nr_list(&sched_ctrl->feedback_dl_harq);
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[pid];
DevAssert(harq->is_waiting);
handle_dl_harq(mod_id, UE_id, pid, uci_234->harq.harq_crc != 1 && acknack);
}
//API to parse the csi report and store it into sched_ctrl //API to parse the csi report and store it into sched_ctrl
extract_pucch_csi_report (csi_MeasConfig, uci_234, frame, slot, UE_id, mod_id); extract_pucch_csi_report (csi_MeasConfig, uci_234, frame, slot, UE_id, mod_id);
//TCI handling function //TCI handling function
...@@ -1091,86 +1055,214 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id, ...@@ -1091,86 +1055,214 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
} }
if (uci_234 -> pduBitmap & 0x08) { if (uci_234 -> pduBitmap & 0x08) {
///Handle CSI Report 2 //@TODO:Handle CSI Report 2
} }
} }
// function to update pucch scheduling parameters in UE list when a USS DL is scheduled // function to update pucch scheduling parameters in UE list when a USS DL is scheduled
void nr_acknack_scheduling(int Mod_idP, bool nr_acknack_scheduling(int mod_id,
int UE_id, int UE_id,
frame_t frameP, frame_t frame,
sub_frame_t slotP, sub_frame_t slot)
int slots_per_tdd, {
int *pucch_id, const NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon;
int *pucch_occ) { const int n_slots_frame = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
const NR_TDD_UL_DL_Pattern_t *tdd = &scc->tdd_UL_DL_ConfigurationCommon->pattern1;
NR_ServingCellConfigCommon_t *scc = RC.nrmac[Mod_idP]->common_channels->ServingCellConfigCommon; const int nr_ulmix_slots = tdd->nrofUplinkSlots + (tdd->nrofUplinkSymbols != 0);
NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; const int nr_mix_slots = tdd->nrofDownlinkSymbols != 0 || tdd->nrofUplinkSymbols != 0;
NR_sched_pucch *curr_pucch; const int nr_slots_period = tdd->nrofDownlinkSlots + tdd->nrofUplinkSlots + nr_mix_slots;
int max_acknacks,pucch_res,first_ul_slot_tdd,k,i,l; const int first_ul_slot_tdd = tdd->nrofDownlinkSlots + nr_slots_period * (slot / nr_slots_period);
uint8_t pdsch_to_harq_feedback[8]; const int CC_id = 0;
int found = 0;
int nr_ulmix_slots = scc->tdd_UL_DL_ConfigurationCommon->pattern1.nrofUplinkSlots; AssertFatal(slot < first_ul_slot_tdd + (tdd->nrofUplinkSymbols != 0),
if (scc->tdd_UL_DL_ConfigurationCommon->pattern1.nrofUplinkSymbols!=0) "cannot handle multiple TDD periods (yet): slot %d first_ul_slot_tdd %d nrofUplinkSlots %ld\n",
nr_ulmix_slots++; slot,
first_ul_slot_tdd,
bool csi_pres=false; tdd->nrofUplinkSlots);
for (k=0; k<nr_ulmix_slots; k++) {
if(UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][0].csi_bits>0) /* for the moment, we consider:
csi_pres=true; * * only pucch_sched[0] holds HARQ (and SR)
* * we do not multiplex with CSI, which is always in pucch_sched[2]
* * SR uses format 0 and is allocated in the first UL (mixed) slot (and not
* later)
* * that the PUCCH resource set 0 (for up to 2 bits) points to the first N
* PUCCH resources, where N is the number of resources in the PUCCH
* resource set. This is used in pucch_index_used, which counts the used
* resources by index, and not by their ID! */
NR_UE_sched_ctrl_t *sched_ctrl = &RC.nrmac[mod_id]->UE_info.UE_sched_ctrl[UE_id];
NR_sched_pucch_t *pucch = &sched_ctrl->sched_pucch[0];
AssertFatal(pucch->csi_bits == 0,
"%s(): csi_bits %d in sched_pucch[0]\n",
__func__,
pucch->csi_bits);
const int max_acknacks = 2;
AssertFatal(pucch->dai_c + pucch->sr_flag <= max_acknacks,
"illegal number of bits in PUCCH of UE %d\n",
UE_id);
/* if the currently allocated PUCCH of this UE is full, allocate it */
if (pucch->sr_flag + pucch->dai_c == max_acknacks) {
/* advance the UL slot information in PUCCH by one so we won't schedule in
* the same slot again */
const int f = pucch->frame;
const int s = pucch->ul_slot;
nr_fill_nfapi_pucch(mod_id, frame, slot, pucch, UE_id);
memset(pucch, 0, sizeof(*pucch));
pucch->frame = s == n_slots_frame - 1 ? (f + 1) % 1024 : f;
pucch->ul_slot = (s + 1) % n_slots_frame;
// we assume that only two indices over the array sched_pucch exist
const NR_sched_pucch_t *csi_pucch = &sched_ctrl->sched_pucch[2];
// skip the CSI PUCCH if it is present and if in the next frame/slot
if (csi_pucch->csi_bits > 0
&& csi_pucch->frame == pucch->frame
&& csi_pucch->ul_slot == pucch->ul_slot) {
AssertFatal(!csi_pucch->simultaneous_harqcsi,
"%s(): %d.%d cannot handle simultaneous_harqcsi, but found for UE %d\n",
__func__,
pucch->frame,
pucch->ul_slot,
UE_id);
nr_fill_nfapi_pucch(mod_id, frame, slot, csi_pucch, UE_id);
pucch->frame = s >= n_slots_frame - 2 ? (f + 1) % 1024 : f;
pucch->ul_slot = (s + 2) % n_slots_frame;
}
} }
// As a preference always schedule ack nacks in PUCCH0 (max 2 per slots) /* if the UE's next PUCCH occasion is after the possible UL slots (within the
// Unless there is CSI meas reporting scheduled in the period to avoid conflicts in the same slot * same frame) or wrapped around to the next frame, then we assume there is
if (csi_pres) * no possible PUCCH allocation anymore */
max_acknacks=10; if ((pucch->frame == frame
else && (pucch->ul_slot >= first_ul_slot_tdd + nr_ulmix_slots))
max_acknacks=2; || (pucch->frame == frame + 1))
return false;
// this is hardcoded for now as ue specific // this is hardcoded for now as ue specific
NR_SearchSpace__searchSpaceType_PR ss_type = NR_SearchSpace__searchSpaceType_PR_ue_Specific; NR_SearchSpace__searchSpaceType_PR ss_type = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
get_pdsch_to_harq_feedback(Mod_idP,UE_id,ss_type,pdsch_to_harq_feedback); uint8_t pdsch_to_harq_feedback[8];
get_pdsch_to_harq_feedback(mod_id, UE_id, ss_type, pdsch_to_harq_feedback);
// for each possible ul or mixed slot
for (k=0; k<nr_ulmix_slots; k++) { /* there is a scheduled SR or HARQ. Check whether we can use it for this
for (l=0; l<1; l++) { // scheduling 2 PUCCH in a single slot does not work with the phone, currently * ACKNACK */
curr_pucch = &UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][l]; if (pucch->sr_flag + pucch->dai_c > 0) {
//if it is possible to schedule acknack in current pucch (no exclusive csi pucch) /* this UE already has a PUCCH occasion */
if ((curr_pucch->csi_bits == 0) || (curr_pucch->simultaneous_harqcsi==true)) { DevAssert(pucch->frame == frame);
// if there is free room in current pucch structure
if (curr_pucch->dai_c<max_acknacks) { // Find the right timing_indicator value.
pucch_res = get_pucch_resource(UE_info,UE_id,k,l); int i = 0;
if (pucch_res>-1){ while (i < 8) {
curr_pucch->resource_indicator = pucch_res; if (pdsch_to_harq_feedback[i] == pucch->ul_slot - slot)
curr_pucch->frame = frameP; break;
// first pucch occasion in first UL or MIXED slot ++i;
first_ul_slot_tdd = scc->tdd_UL_DL_ConfigurationCommon->pattern1.nrofDownlinkSlots;
i = 0;
while (i<8 && found == 0) { // look if timing indicator is among allowed values
if (pdsch_to_harq_feedback[i]==(first_ul_slot_tdd+k)-(slotP % slots_per_tdd))
found = 1;
if (found == 0) i++;
}
if (found == 1) {
// computing slot in which pucch is scheduled
curr_pucch->dai_c++;
curr_pucch->ul_slot = first_ul_slot_tdd + k + (slotP - (slotP % slots_per_tdd));
curr_pucch->timing_indicator = i; // index in the list of timing indicators
*pucch_id = k;
*pucch_occ = l;
return;
} }
if (i >= 8) {
// we cannot reach this timing anymore, allocate and try again
const int f = pucch->frame;
const int s = pucch->ul_slot;
const int n_slots_frame = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
nr_fill_nfapi_pucch(mod_id, frame, slot, pucch, UE_id);
memset(pucch, 0, sizeof(*pucch));
pucch->frame = s == n_slots_frame - 1 ? (f + 1) % 1024 : f;
pucch->ul_slot = (s + 1) % n_slots_frame;
return nr_acknack_scheduling(mod_id, UE_id, frame, slot);
} }
pucch->timing_indicator = i;
pucch->dai_c++;
// retain old resource indicator, and we are good
return true;
} }
/* we need to find a new PUCCH occasion */
NR_PUCCH_Config_t *pucch_Config = sched_ctrl->active_ubwp->bwp_Dedicated->pucch_Config->choice.setup;
DevAssert(pucch_Config->resourceToAddModList->list.count > 0);
DevAssert(pucch_Config->resourceSetToAddModList->list.count > 0);
const int n_res = pucch_Config->resourceSetToAddModList->list.array[0]->resourceList.list.count;
int *pucch_index_used = RC.nrmac[mod_id]->pucch_index_used[sched_ctrl->active_ubwp->bwp_Id];
/* if time information is outdated (e.g., last PUCCH occasion in last frame),
* set to first possible UL occasion in this frame. Note that if such UE is
* scheduled a lot and used all AckNacks, pucch->frame might have been
* wrapped around to next frame */
if (frame != pucch->frame || pucch->ul_slot < first_ul_slot_tdd) {
DevAssert(pucch->sr_flag + pucch->dai_c == 0);
AssertFatal(frame + 1 != pucch->frame,
"frame wrap around not handled in %s() yet\n",
__func__);
pucch->frame = frame;
pucch->ul_slot = first_ul_slot_tdd;
}
// increase to first slot in which PUCCH resources are available
while (pucch_index_used[pucch->ul_slot] >= n_res) {
pucch->ul_slot++;
/* if there is no free resource anymore, abort search */
if ((pucch->frame == frame
&& pucch->ul_slot >= first_ul_slot_tdd + nr_ulmix_slots)
|| (pucch->frame == frame + 1)) {
LOG_E(MAC,
"%4d.%2d no free PUCCH resources anymore while searching for UE %d\n",
frame,
slot,
UE_id);
return false;
} }
} }
// advance ul_slot if it is not reachable by UE
pucch->ul_slot = max(pucch->ul_slot, slot + pdsch_to_harq_feedback[0]);
// Find the right timing_indicator value.
int i = 0;
while (i < 8) {
if (pdsch_to_harq_feedback[i] == pucch->ul_slot - slot)
break;
++i;
} }
AssertFatal(1==0,"No Uplink slot available in accordance to allowed timing indicator\n"); if (i >= 8) {
LOG_W(MAC,
"%4d.%2d could not find pdsch_to_harq_feedback for UE %d: earliest "
"ack slot %d\n",
frame,
slot,
UE_id,
pucch->ul_slot);
return false;
}
pucch->timing_indicator = i; // index in the list of timing indicators
pucch->dai_c++;
const int pucch_res = pucch_index_used[pucch->ul_slot];
pucch->resource_indicator = pucch_res;
pucch_index_used[pucch->ul_slot] += 1;
AssertFatal(pucch_index_used[pucch->ul_slot] <= n_res,
"UE %d in %4d.%2d: pucch_index_used is %d (%d available)\n",
UE_id,
pucch->frame,
pucch->ul_slot,
pucch_index_used[pucch->ul_slot],
n_res);
/* verify that at that slot and symbol, resources are free. We only do this
* for initialCyclicShift 0 (we assume it always has that one), so other
* initialCyclicShifts can overlap with ICS 0!*/
const NR_PUCCH_Resource_t *resource =
pucch_Config->resourceToAddModList->list.array[pucch_res];
DevAssert(resource->format.present == NR_PUCCH_Resource__format_PR_format0);
if (resource->format.choice.format0->initialCyclicShift == 0) {
uint16_t *vrb_map_UL = &RC.nrmac[mod_id]->common_channels[CC_id].vrb_map_UL[pucch->ul_slot * MAX_BWP_SIZE];
const uint16_t symb = 1 << resource->format.choice.format0->startingSymbolIndex;
AssertFatal((vrb_map_UL[resource->startingPRB] & symb) == 0,
"symbol %x is not free for PUCCH alloc in vrb_map_UL at RB %ld and slot %d\n",
symb, resource->startingPRB, pucch->ul_slot);
vrb_map_UL[resource->startingPRB] |= symb;
}
return true;
} }
void csi_period_offset(NR_CSI_ReportConfig_t *csirep, void csi_period_offset(const NR_CSI_ReportConfig_t *csirep,
int *period, int *offset) { int *period, int *offset) {
NR_CSI_ReportPeriodicityAndOffset_PR p_and_o = csirep->reportConfigType.choice.periodic->reportSlotConfig.present; NR_CSI_ReportPeriodicityAndOffset_PR p_and_o = csirep->reportConfigType.choice.periodic->reportSlotConfig.present;
...@@ -1221,24 +1313,6 @@ void csi_period_offset(NR_CSI_ReportConfig_t *csirep, ...@@ -1221,24 +1313,6 @@ void csi_period_offset(NR_CSI_ReportConfig_t *csirep,
} }
} }
int get_pucch_resource(NR_UE_info_t *UE_info,int UE_id,int k,int l) {
// to be updated later, for now simple implementation
// use the second allocation just in case there is csi in the first
// in that case use second resource (for a different symbol) see 9.2 in 38.213
if (l==1) {
if (UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][0].csi_bits==0)
return -1;
else
return 1;
}
else
return 0;
}
uint16_t compute_pucch_prb_size(uint8_t format, uint16_t compute_pucch_prb_size(uint8_t format,
uint8_t nr_prbs, uint8_t nr_prbs,
uint16_t O_tot, uint16_t O_tot,
...@@ -1314,8 +1388,3 @@ uint16_t compute_pucch_prb_size(uint8_t format, ...@@ -1314,8 +1388,3 @@ uint16_t compute_pucch_prb_size(uint8_t format,
AssertFatal(1==0,"Not yet implemented"); AssertFatal(1==0,"Not yet implemented");
} }
} }
...@@ -1053,7 +1053,8 @@ void fill_default_secondaryCellGroup(NR_ServingCellConfigCommon_t *servingcellco ...@@ -1053,7 +1053,8 @@ void fill_default_secondaryCellGroup(NR_ServingCellConfigCommon_t *servingcellco
secondaryCellGroup->spCellConfig->spCellConfigDedicated->pdsch_ServingCellConfig->choice.setup = pdsch_servingcellconfig; secondaryCellGroup->spCellConfig->spCellConfigDedicated->pdsch_ServingCellConfig->choice.setup = pdsch_servingcellconfig;
pdsch_servingcellconfig->codeBlockGroupTransmission = NULL; pdsch_servingcellconfig->codeBlockGroupTransmission = NULL;
pdsch_servingcellconfig->xOverhead = NULL; pdsch_servingcellconfig->xOverhead = NULL;
pdsch_servingcellconfig->nrofHARQ_ProcessesForPDSCH = NULL; pdsch_servingcellconfig->nrofHARQ_ProcessesForPDSCH = calloc(1, sizeof(*pdsch_servingcellconfig->nrofHARQ_ProcessesForPDSCH));
*pdsch_servingcellconfig->nrofHARQ_ProcessesForPDSCH = NR_PDSCH_ServingCellConfig__nrofHARQ_ProcessesForPDSCH_n16;
pdsch_servingcellconfig->pucch_Cell= NULL; pdsch_servingcellconfig->pucch_Cell= NULL;
pdsch_servingcellconfig->ext1=calloc(1,sizeof(*pdsch_servingcellconfig->ext1)); pdsch_servingcellconfig->ext1=calloc(1,sizeof(*pdsch_servingcellconfig->ext1));
pdsch_servingcellconfig->ext1->maxMIMO_Layers = calloc(1,sizeof(*pdsch_servingcellconfig->ext1->maxMIMO_Layers)); pdsch_servingcellconfig->ext1->maxMIMO_Layers = calloc(1,sizeof(*pdsch_servingcellconfig->ext1->maxMIMO_Layers));
...@@ -1287,4 +1288,3 @@ void rrc_config_dl_ptrs_params(NR_BWP_Downlink_t *bwp, int *ptrsNrb, int *ptrsMc ...@@ -1287,4 +1288,3 @@ void rrc_config_dl_ptrs_params(NR_BWP_Downlink_t *bwp, int *ptrsNrb, int *ptrsMc
*bwp->bwp_Dedicated->pdsch_Config->choice.setup->dmrs_DownlinkForPDSCH_MappingTypeA->choice.setup->phaseTrackingRS->choice.setup->resourceElementOffset = *reOffset; *bwp->bwp_Dedicated->pdsch_Config->choice.setup->dmrs_DownlinkForPDSCH_MappingTypeA->choice.setup->phaseTrackingRS->choice.setup->resourceElementOffset = *reOffset;
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment