Commit 6f4aa16c authored by Robert Schmidt's avatar Robert Schmidt

Rewrite PUCCH allocation (simplified, restricted)

- No dynamic allocation of PUCCH structures
- Use first PUCCH for HARQ (can only allocate for up to two slots in
  single PUCCH, simplistic nr_acknack_scheduling!)
- Use second PUCCH for CSI, do not multiplex!
parent f2b3d4db
...@@ -173,10 +173,6 @@ void nr_dlsim_preprocessor(module_id_t module_id, ...@@ -173,10 +173,6 @@ void nr_dlsim_preprocessor(module_id_t module_id,
sched_ctrl->active_bwp, sched_ctrl->search_space, 1 /* dedicated */); sched_ctrl->active_bwp, sched_ctrl->search_space, 1 /* dedicated */);
sched_ctrl->cce_index = 0; sched_ctrl->cce_index = 0;
/* set "any" value for PUCCH (simulator evaluates PDSCH only) */
sched_ctrl->pucch_sched_idx = 0;
sched_ctrl->pucch_occ_idx = 0;
sched_ctrl->rbStart = g_rbStart; sched_ctrl->rbStart = g_rbStart;
sched_ctrl->rbSize = g_rbSize; sched_ctrl->rbSize = g_rbSize;
sched_ctrl->mcs = g_mcsIndex; sched_ctrl->mcs = g_mcsIndex;
......
...@@ -443,15 +443,10 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id, ...@@ -443,15 +443,10 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
UE_info->num_pdcch_cand[UE_id][cid]++; UE_info->num_pdcch_cand[UE_id][cid]++;
/* Find PUCCH occasion */ /* Find PUCCH occasion */
nr_acknack_scheduling(module_id, const bool alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot);
UE_id, AssertFatal(alloc,
frame, "could not find uplink slot for PUCCH (RNTI %04x@%d.%d)!\n",
slot, rnti, frame, slot);
num_slots_per_tdd,
&sched_ctrl->pucch_sched_idx,
&sched_ctrl->pucch_occ_idx);
AssertFatal(sched_ctrl->pucch_sched_idx >= 0, "no uplink slot for PUCCH found!\n");
uint16_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map; uint16_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map;
// for now HARQ PID is fixed and should be the same as in post-processor // for now HARQ PID is fixed and should be the same as in post-processor
...@@ -596,7 +591,7 @@ void nr_schedule_ue_spec(module_id_t module_id, ...@@ -596,7 +591,7 @@ void nr_schedule_ue_spec(module_id_t module_id,
const int current_harq_pid = slot % num_slots_per_tdd; const int current_harq_pid = slot % num_slots_per_tdd;
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[current_harq_pid]; NR_UE_harq_t *harq = &sched_ctrl->harq_processes[current_harq_pid];
NR_sched_pucch_t *pucch = &sched_ctrl->sched_pucch[sched_ctrl->pucch_sched_idx][sched_ctrl->pucch_occ_idx]; NR_sched_pucch_t *pucch = &sched_ctrl->sched_pucch[0];
harq->feedback_slot = pucch->ul_slot; harq->feedback_slot = pucch->ul_slot;
harq->is_waiting = 1; harq->is_waiting = 1;
UE_info->mac_stats[UE_id].dlsch_rounds[harq->round]++; UE_info->mac_stats[UE_id].dlsch_rounds[harq->round]++;
......
...@@ -328,14 +328,10 @@ void nr_preprocessor_phytest(module_id_t module_id, ...@@ -328,14 +328,10 @@ void nr_preprocessor_phytest(module_id_t module_id,
__func__, __func__,
UE_id); UE_id);
nr_acknack_scheduling(module_id, const bool alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot);
UE_id, AssertFatal(alloc,
frame, "could not find uplink slot for PUCCH (RNTI %04x@%d.%d)!\n",
slot, rnti, frame, slot);
num_slots_per_tdd,
&sched_ctrl->pucch_sched_idx,
&sched_ctrl->pucch_occ_idx);
AssertFatal(sched_ctrl->pucch_sched_idx >= 0, "no uplink slot for PUCCH found!\n");
sched_ctrl->rbStart = rbStart; sched_ctrl->rbStart = rbStart;
sched_ctrl->rbSize = rbSize; sched_ctrl->rbSize = rbSize;
......
...@@ -1864,16 +1864,6 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP){ ...@@ -1864,16 +1864,6 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP){
UE_info->UE_sched_ctrl[UE_id].ul_rssi = 0; UE_info->UE_sched_ctrl[UE_id].ul_rssi = 0;
/* set illegal time domain allocation to force recomputation of all fields */ /* set illegal time domain allocation to force recomputation of all fields */
UE_info->UE_sched_ctrl[UE_id].pusch_save.time_domain_allocation = -1; UE_info->UE_sched_ctrl[UE_id].pusch_save.time_domain_allocation = -1;
UE_info->UE_sched_ctrl[UE_id].sched_pucch = (NR_sched_pucch_t **)malloc(num_slots_ul*sizeof(NR_sched_pucch_t *));
for (int s=0; s<num_slots_ul;s++)
UE_info->UE_sched_ctrl[UE_id].sched_pucch[s] = (NR_sched_pucch_t *)malloc(2*sizeof(NR_sched_pucch_t));
for (int k=0; k<num_slots_ul; k++) {
for (int l=0; l<2; l++)
memset((void *) &UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][l],
0,
sizeof(NR_sched_pucch_t));
}
LOG_I(MAC, "gNB %d] Add NR UE_id %d : rnti %x\n", LOG_I(MAC, "gNB %d] Add NR UE_id %d : rnti %x\n",
mod_idP, mod_idP,
UE_id, UE_id,
...@@ -1913,7 +1903,6 @@ void mac_remove_nr_ue(module_id_t mod_id, rnti_t rnti) ...@@ -1913,7 +1903,6 @@ void mac_remove_nr_ue(module_id_t mod_id, rnti_t rnti)
UE_info->active[UE_id] = FALSE; UE_info->active[UE_id] = FALSE;
UE_info->rnti[UE_id] = 0; UE_info->rnti[UE_id] = 0;
remove_nr_ue_list(&UE_info->list, UE_id); remove_nr_ue_list(&UE_info->list, UE_id);
free(UE_info->UE_sched_ctrl[UE_id].sched_pucch);
memset((void *) &UE_info->UE_sched_ctrl[UE_id], memset((void *) &UE_info->UE_sched_ctrl[UE_id],
0, 0,
sizeof(NR_UE_sched_ctrl_t)); sizeof(NR_UE_sched_ctrl_t));
......
...@@ -42,9 +42,10 @@ void nr_schedule_pucch(int Mod_idP, ...@@ -42,9 +42,10 @@ void nr_schedule_pucch(int Mod_idP,
NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info;
AssertFatal(UE_info->active[UE_id],"Cannot find UE_id %d is not active\n",UE_id); AssertFatal(UE_info->active[UE_id],"Cannot find UE_id %d is not active\n",UE_id);
for (int k=0; k<nr_ulmix_slots; k++) { NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
for (int l=0; l<2; l++) { const int n = sizeof(sched_ctrl->sched_pucch) / sizeof(*sched_ctrl->sched_pucch);
NR_sched_pucch_t *curr_pucch = &UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][l]; for (int i = 0; i < n; i++) {
NR_sched_pucch_t *curr_pucch = &sched_ctrl->sched_pucch[i];
const uint16_t O_ack = curr_pucch->dai_c; const uint16_t O_ack = curr_pucch->dai_c;
const uint16_t O_csi = curr_pucch->csi_bits; const uint16_t O_csi = curr_pucch->csi_bits;
const uint8_t O_sr = 0; // no SR in PUCCH implemented for now const uint8_t O_sr = 0; // no SR in PUCCH implemented for now
...@@ -69,15 +70,14 @@ void nr_schedule_pucch(int Mod_idP, ...@@ -69,15 +70,14 @@ void nr_schedule_pucch(int Mod_idP,
future_ul_tti_req->n_pdus += 1; future_ul_tti_req->n_pdus += 1;
LOG_I(MAC, LOG_I(MAC,
"%4d.%2d Scheduling pucch reception in %4d.%2d: bits SR %d, ACK %d, CSI %d, k %d l %d\n", "%4d.%2d Scheduling pucch reception in %4d.%2d: bits SR %d, ACK %d, CSI %d\n",
frameP, frameP,
slotP, slotP,
curr_pucch->frame, curr_pucch->frame,
curr_pucch->ul_slot, curr_pucch->ul_slot,
O_sr, O_sr,
O_ack, O_ack,
O_csi, O_csi);
k, l);
NR_ServingCellConfigCommon_t *scc = RC.nrmac[Mod_idP]->common_channels->ServingCellConfigCommon; NR_ServingCellConfigCommon_t *scc = RC.nrmac[Mod_idP]->common_channels->ServingCellConfigCommon;
nr_configure_pucch(pucch_pdu, nr_configure_pucch(pucch_pdu,
...@@ -89,10 +89,7 @@ void nr_schedule_pucch(int Mod_idP, ...@@ -89,10 +89,7 @@ void nr_schedule_pucch(int Mod_idP,
O_ack, O_ack,
O_sr); O_sr);
memset(&UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][l], memset(curr_pucch, 0, sizeof(*curr_pucch));
0,
sizeof(NR_sched_pucch_t));
}
} }
} }
...@@ -221,7 +218,8 @@ void nr_csi_meas_reporting(int Mod_idP, ...@@ -221,7 +218,8 @@ void nr_csi_meas_reporting(int Mod_idP,
if ( (frame%(period/n_slots_frame)==(offset/n_slots_frame)) && (slot==((sched_slot/slots_per_tdd)*slots_per_tdd))) { if ( (frame%(period/n_slots_frame)==(offset/n_slots_frame)) && (slot==((sched_slot/slots_per_tdd)*slots_per_tdd))) {
// we are scheduling pucch for csi in the first pucch occasion (this comes before ack/nack) // we are scheduling pucch for csi in the first pucch occasion (this comes before ack/nack)
curr_pucch = &UE_info->UE_sched_ctrl[UE_id].sched_pucch[(sched_slot%slots_per_tdd)-slots_per_tdd+ul_slots][0]; // FIXME: for the moment, we statically put it into the second sched_pucch!
curr_pucch = &UE_info->UE_sched_ctrl[UE_id].sched_pucch[1];
NR_PUCCH_CSI_Resource_t *pucchcsires = csirep->reportConfigType.choice.periodic->pucch_CSI_ResourceList.list.array[0]; NR_PUCCH_CSI_Resource_t *pucchcsires = csirep->reportConfigType.choice.periodic->pucch_CSI_ResourceList.list.array[0];
...@@ -408,76 +406,82 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id, ...@@ -408,76 +406,82 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
// function to update pucch scheduling parameters in UE list when a USS DL is scheduled // function to update pucch scheduling parameters in UE list when a USS DL is scheduled
void nr_acknack_scheduling(int Mod_idP, bool nr_acknack_scheduling(int mod_id,
int UE_id, int UE_id,
frame_t frameP, frame_t frame,
sub_frame_t slotP, sub_frame_t slot)
int slots_per_tdd, {
int *pucch_id, /* FIXME: for the moment, we consider that
int *pucch_occ) { * * only pucch_sched[0] holds HARQ
* * a UE is not scheduled in more than two slots, and their ACKs come in the same slot!
* * we do not multiplex with CSI
* * we do not mux two UEs in the same PUCCH slot (on the two symbols)
* * we only use the first TDD period (5/10ms) */
NR_UE_sched_ctrl_t *sched_ctrl = &RC.nrmac[mod_id]->UE_info.UE_sched_ctrl[UE_id];
NR_sched_pucch_t *curr_pucch = &sched_ctrl->sched_pucch[0];
AssertFatal(curr_pucch->csi_bits == 0,
"%s(): csi_bits %d in sched_pucch[0]\n",
__func__,
curr_pucch->csi_bits);
const int max_acknacks = 2;
AssertFatal(curr_pucch->dai_c <= max_acknacks,
"%s() called but already %d dai_c in sched_pucch[0]\n",
__func__,
curr_pucch->dai_c);
const NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon;
const NR_TDD_UL_DL_Pattern_t *tdd_pattern = &scc->tdd_UL_DL_ConfigurationCommon->pattern1;
//const int nr_ulmix_slots = tdd_pattern->nrofUplinkSlots + (tdd_pattern->nrofUplinkSymbols != 0);
const int first_ul_slot_tdd = tdd_pattern->nrofDownlinkSlots;
const int CC_id = 0;
NR_ServingCellConfigCommon_t *scc = RC.nrmac[Mod_idP]->common_channels->ServingCellConfigCommon; // this is hardcoded for now as ue specific
NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; NR_SearchSpace__searchSpaceType_PR ss_type = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
NR_sched_pucch_t *curr_pucch;
int max_acknacks,pucch_res,first_ul_slot_tdd,k,i,l;
uint8_t pdsch_to_harq_feedback[8]; uint8_t pdsch_to_harq_feedback[8];
int found = 0; get_pdsch_to_harq_feedback(mod_id, UE_id, ss_type, pdsch_to_harq_feedback);
int nr_ulmix_slots = scc->tdd_UL_DL_ConfigurationCommon->pattern1.nrofUplinkSlots;
if (scc->tdd_UL_DL_ConfigurationCommon->pattern1.nrofUplinkSymbols!=0)
nr_ulmix_slots++;
bool csi_pres=false; NR_PUCCH_Config_t *pucch_Config = sched_ctrl->active_ubwp->bwp_Dedicated->pucch_Config->choice.setup;
for (k=0; k<nr_ulmix_slots; k++) { DevAssert(pucch_Config->resourceToAddModList->list.count > 0);
if(UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][0].csi_bits>0)
csi_pres=true;
}
// As a preference always schedule ack nacks in PUCCH0 (max 2 per slots) curr_pucch->frame = frame;
// Unless there is CSI meas reporting scheduled in the period to avoid conflicts in the same slot curr_pucch->dai_c++;
if (csi_pres)
max_acknacks=10;
else
max_acknacks=2;
// this is hardcoded for now as ue specific if (curr_pucch->dai_c == 1) {
NR_SearchSpace__searchSpaceType_PR ss_type = NR_SearchSpace__searchSpaceType_PR_ue_Specific; /* FIXME for first allocation: find free resource, here assume first PUCCH
get_pdsch_to_harq_feedback(Mod_idP,UE_id,ss_type,pdsch_to_harq_feedback); * resource and first_ul_slot_tdd */
const int pucch_res = 0;
// for each possible ul or mixed slot
for (k=0; k<nr_ulmix_slots; k++) {
for (l=0; l<1; l++) { // scheduling 2 PUCCH in a single slot does not work with the phone, currently
curr_pucch = &UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][l];
//if it is possible to schedule acknack in current pucch (no exclusive csi pucch)
if ((curr_pucch->csi_bits == 0) || (curr_pucch->simultaneous_harqcsi==true)) {
// if there is free room in current pucch structure
if (curr_pucch->dai_c<max_acknacks) {
pucch_res = get_pucch_resource(UE_info,UE_id,k,l);
if (pucch_res>-1){
curr_pucch->resource_indicator = pucch_res; curr_pucch->resource_indicator = pucch_res;
curr_pucch->frame = frameP; curr_pucch->ul_slot = first_ul_slot_tdd;
// first pucch occasion in first UL or MIXED slot
first_ul_slot_tdd = scc->tdd_UL_DL_ConfigurationCommon->pattern1.nrofDownlinkSlots; /* verify that at that slot and symbol, resources are free.
i = 0; * Note: this does not handle potential mux of PUCCH in the same symbol! */
while (i<8 && found == 0) { // look if timing indicator is among allowed values const NR_PUCCH_Resource_t *resource =
if (pdsch_to_harq_feedback[i]==(first_ul_slot_tdd+k)-(slotP % slots_per_tdd)) pucch_Config->resourceToAddModList->list.array[pucch_res];
found = 1; DevAssert(resource->format.present == NR_PUCCH_Resource__format_PR_format0);
if (found == 0) i++; uint16_t *vrb_map_UL =
} &RC.nrmac[mod_id]->common_channels[CC_id].vrb_map_UL[first_ul_slot_tdd * 275];
if (found == 1) { const uint16_t symb = 1 << resource->format.choice.format0->startingSymbolIndex;
// computing slot in which pucch is scheduled AssertFatal((vrb_map_UL[resource->startingPRB] & symb) == 0,
curr_pucch->dai_c++; "symbol %x is not free for PUCCH alloc in vrb_map_UL at RB %ld and slot %d\n",
curr_pucch->ul_slot = first_ul_slot_tdd + k + (slotP - (slotP % slots_per_tdd)); symb, resource->startingPRB, first_ul_slot_tdd);
curr_pucch->timing_indicator = i; // index in the list of timing indicators vrb_map_UL[resource->startingPRB] |= symb;
*pucch_id = k; }
*pucch_occ = l;
return; /* Find the right timing_indicator value. FIXME: if previously ul_slot is not
} * possible (anymore), we need to allocate previous HARQ feedback (since we
} * cannot "reach" it anymore) and search a new one! */
} int i = 0;
} while (i < 8) {
} if (pdsch_to_harq_feedback[i] == curr_pucch->ul_slot - slot)
break;
++i;
} }
AssertFatal(1==0,"No Uplink slot available in accordance to allowed timing indicator\n"); AssertFatal(i < 8,
"could not find pdsch_to_harq_feedback: slot %d, ack slot %d\n",
slot, first_ul_slot_tdd);
curr_pucch->timing_indicator = i; // index in the list of timing indicators
return true;
} }
...@@ -532,24 +536,6 @@ void csi_period_offset(NR_CSI_ReportConfig_t *csirep, ...@@ -532,24 +536,6 @@ void csi_period_offset(NR_CSI_ReportConfig_t *csirep,
} }
} }
int get_pucch_resource(NR_UE_info_t *UE_info,int UE_id,int k,int l) {
// to be updated later, for now simple implementation
// use the second allocation just in case there is csi in the first
// in that case use second resource (for a different symbol) see 9.2 in 38.213
if (l==1) {
if (UE_info->UE_sched_ctrl[UE_id].sched_pucch[k][0].csi_bits==0)
return -1;
else
return 1;
}
else
return 0;
}
uint16_t compute_pucch_prb_size(uint8_t format, uint16_t compute_pucch_prb_size(uint8_t format,
uint8_t nr_prbs, uint8_t nr_prbs,
uint16_t O_tot, uint16_t O_tot,
......
...@@ -215,15 +215,10 @@ void nr_csi_meas_reporting(int Mod_idP, ...@@ -215,15 +215,10 @@ void nr_csi_meas_reporting(int Mod_idP,
int ul_slots, int ul_slots,
int n_slots_frame); int n_slots_frame);
void nr_acknack_scheduling(int Mod_idP, bool nr_acknack_scheduling(int Mod_idP,
int UE_id, int UE_id,
frame_t frameP, frame_t frameP,
sub_frame_t slotP, sub_frame_t slotP);
int slots_per_tdd,
int *pucch_id,
int *pucch_occ);
int get_pucch_resource(NR_UE_info_t *UE_info,int UE_id,int k,int l);
void get_pdsch_to_harq_feedback(int Mod_idP, void get_pdsch_to_harq_feedback(int Mod_idP,
int UE_id, int UE_id,
......
...@@ -385,10 +385,12 @@ typedef struct { ...@@ -385,10 +385,12 @@ typedef struct {
/// the currently active BWP in UL /// the currently active BWP in UL
NR_BWP_Uplink_t *active_ubwp; NR_BWP_Uplink_t *active_ubwp;
NR_sched_pucch_t **sched_pucch; /// PUCCH scheduling information. Array of two, we assume for the moment:
/// selected PUCCH index, if scheduled /// HARQ (and SR) in the first field, CSI in second (as fixed by RRC conf.,
int pucch_sched_idx; /// i.e. if actually present). The order is important for
int pucch_occ_idx; /// nr_acknack_scheduling()!
NR_sched_pucch_t sched_pucch[2];
NR_sched_pusch_save_t pusch_save; NR_sched_pusch_save_t pusch_save;
NR_sched_pusch_t sched_pusch; NR_sched_pusch_t sched_pusch;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment