Commit 8f988e38 authored by ChiehChun's avatar ChiehChun Committed by Robert Schmidt

Add pf_ul & pseudo code

parent 5573b8e1
...@@ -504,55 +504,52 @@ long get_K2(NR_BWP_Uplink_t *ubwp, int time_domain_assignment, int mu) { ...@@ -504,55 +504,52 @@ long get_K2(NR_BWP_Uplink_t *ubwp, int time_domain_assignment, int mu) {
return 3; return 3;
} }
bool nr_simple_ulsch_preprocessor(module_id_t module_id, void pf_ul(module_id_t module_id,
frame_t frame, frame_t frame,
sub_frame_t slot, sub_frame_t slot,
int num_slots_per_tdd, int num_slots_per_tdd,
uint64_t ulsch_in_slot_bitmap) { NR_list_t *UE_list,
gNB_MAC_INST *nr_mac = RC.nrmac[module_id]; int n_rb_sched,
NR_COMMON_channels_t *cc = nr_mac->common_channels; uint8_t *rballoc_mask,
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon; int max_num_ue) {
const int mu = scc->uplinkConfigCommon->initialUplinkBWP->genericParameters.subcarrierSpacing;
NR_UE_info_t *UE_info = &nr_mac->UE_info;
AssertFatal(UE_info->num_UEs <= 1,
"%s() cannot handle more than one UE, but found %d\n",
__func__,
UE_info->num_UEs);
if (UE_info->num_UEs == 0)
return false;
const int UE_id = 0;
const int CC_id = 0; const int CC_id = 0;
/* NOT support different K2 in here, Get the K2 for first UE */
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[0];
const int tda = 1; const int tda = 1;
const struct NR_PUSCH_TimeDomainResourceAllocationList *tdaList = NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[CC_id].ServingCellConfigCommon;
sched_ctrl->active_ubwp->bwp_Common->pusch_ConfigCommon->choice.setup->pusch_TimeDomainAllocationList; NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info;
AssertFatal(tda < tdaList->list.count, const int min_rb = 5;
"time domain assignment %d >= %d\n",
tda,
tdaList->list.count);
int K2 = get_K2(sched_ctrl->active_ubwp, tda, mu);
const int sched_frame = frame + (slot + K2 >= num_slots_per_tdd);
const int sched_slot = (slot + K2) % num_slots_per_tdd;
if (!is_xlsch_in_slot(ulsch_in_slot_bitmap, sched_slot))
return false;
sched_ctrl->sched_pusch.slot = sched_slot;
sched_ctrl->sched_pusch.frame = sched_frame;
/* get the PID of a HARQ process awaiting retransmission, or -1 otherwise */
sched_ctrl->sched_pusch.ul_harq_pid = sched_ctrl->retrans_ul_harq.head;
/* Confirm all the UE have same K2 as the first UE */ /* Loop UE_list to calculate throughput and coeff */
for (int UE_id = UE_info->list.next[UE_id]; UE_id >= 0; UE_id = UE_info->list.next[UE_id]){ for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
AssertFatal(K2 == get_K2(sched_ctrl->active_ubwp, tda, mu), int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, MAX_BWP_SIZE);
"Different K2, %d(UE%d) != %ld(UE%d)\n", K2, 0, get_K2(sched_ctrl->active_ubwp, tda, mu), UE_id); const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_ubwp->bwp_Common->genericParameters.locationAndBandwidth, MAX_BWP_SIZE);
sched_ctrl->sched_pusch.slot = sched_slot;
sched_ctrl->sched_pusch.frame = sched_frame; /* Calculate throughput */
/* RETRANSMISSION: Check retransmission */
/* RETRANSMISSION: Find free CCE */
/* RETRANSMISSION: Allocate retransmission*/
/* Check BSR */
/* Calculate coefficient*/
} }
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_ubwp->bwp_Common->genericParameters.locationAndBandwidth,275);
/* Loop UE_sched to find max coeff and allocate transmission */
//while (UE_sched.head > 0 && max_num_ue> 0 && n_rb_sched > 0)
if (n_rb_sched > 0){ //temp
/* Find max coeff */
/* Find free CCE */
const int target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific; const int target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
sched_ctrl->search_space = get_searchspace(sched_ctrl->active_bwp, target_ss); sched_ctrl->search_space = get_searchspace(sched_ctrl->active_bwp, target_ss);
uint8_t nr_of_candidates; uint8_t nr_of_candidates;
...@@ -572,11 +569,12 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id, ...@@ -572,11 +569,12 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id,
m, m,
nr_of_candidates); nr_of_candidates);
if (sched_ctrl->cce_index < 0) { if (sched_ctrl->cce_index < 0) {
LOG_D(MAC, "%s(): CCE list not empty, couldn't schedule PUSCH\n", __func__); LOG_E(MAC, "%s(): CCE list not empty, couldn't schedule PUSCH\n", __func__);
return false; return;
} }
UE_info->num_pdcch_cand[UE_id][cid]++; UE_info->num_pdcch_cand[UE_id][cid]++;
/* Save PUSCH field */
sched_ctrl->sched_pusch.time_domain_allocation = tda; sched_ctrl->sched_pusch.time_domain_allocation = tda;
const long f = sched_ctrl->search_space->searchSpaceType->choice.ue_Specific->dci_Formats; const long f = sched_ctrl->search_space->searchSpaceType->choice.ue_Specific->dci_Formats;
const int dci_format = f ? NR_UL_DCI_FORMAT_0_1 : NR_UL_DCI_FORMAT_0_0; const int dci_format = f ? NR_UL_DCI_FORMAT_0_1 : NR_UL_DCI_FORMAT_0_0;
...@@ -608,26 +606,18 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id, ...@@ -608,26 +606,18 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id,
sched_pusch->Qm <<= 1; sched_pusch->Qm <<= 1;
} }
/* Change vrb_map_UL to rballoc_mask */
uint16_t *vrb_map_UL =
&RC.nrmac[module_id]->common_channels[CC_id].vrb_map_UL[sched_slot * MAX_BWP_SIZE];
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, MAX_BWP_SIZE);
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_ubwp->bwp_Common->genericParameters.locationAndBandwidth, MAX_BWP_SIZE);
uint8_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map_UL:
// if any RB in vrb_map_UL is blocked (1), the current RB will be 0
rballoc_mask[i] = !vrb_map_UL[i];
n_rb_sched += rballoc_mask[i];
}
while (rbStart < bwpSize && vrb_map_UL[rbStart]) rbStart++;
uint16_t rbSize = 4;
while (rbStart < bwpSize && !rballoc_mask[rbStart]) rbStart++;
sched_pusch->rbStart = rbStart; sched_pusch->rbStart = rbStart;
const int B = cmax(sched_ctrl->estimated_ul_buffer - sched_ctrl->sched_ul_bytes, 0); if (rbStart + min_rb >= bwpSize) {
LOG_W(MAC, "cannot allocate UL data for UE %d/RNTI %04x: no resources\n",
UE_id, UE_info->rnti[UE_id]);
return;
}
/* Calculate the current scheduling bytes */
const int B = cmax(sched_ctrl->estimated_ul_buffer - sched_ctrl->sched_ul_bytes, 0);
uint16_t rbSize = min_rb - 1;
do { do {
rbSize++; rbSize++;
sched_pusch->rbSize = rbSize; sched_pusch->rbSize = rbSize;
...@@ -645,9 +635,84 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id, ...@@ -645,9 +635,84 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id,
LOG_D(MAC,"rbSize %d, TBS %d, est buf %d, sched_ul %d, B %d\n", LOG_D(MAC,"rbSize %d, TBS %d, est buf %d, sched_ul %d, B %d\n",
rbSize, sched_pusch->tb_size, sched_ctrl->estimated_ul_buffer, sched_ctrl->sched_ul_bytes, B); rbSize, sched_pusch->tb_size, sched_ctrl->estimated_ul_buffer, sched_ctrl->sched_ul_bytes, B);
/* mark the corresponding RBs as used */ /* Mark the corresponding RBs as used */
for (int rb = 0; rb < sched_ctrl->sched_pusch.rbSize; rb++) for (int rb = 0; rb < sched_ctrl->sched_pusch.rbSize; rb++)
rballoc_mask[rb + sched_ctrl->sched_pusch.rbStart] = 0; rballoc_mask[rb + sched_ctrl->sched_pusch.rbStart] = 0;
}
}
bool nr_simple_ulsch_preprocessor(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
int num_slots_per_tdd,
uint64_t ulsch_in_slot_bitmap) {
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
NR_COMMON_channels_t *cc = nr_mac->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
const int mu = scc->uplinkConfigCommon->initialUplinkBWP->genericParameters.subcarrierSpacing;
NR_UE_info_t *UE_info = &nr_mac->UE_info;
AssertFatal(UE_info->num_UEs <= 1,
"%s() cannot handle more than one UE, but found %d\n",
__func__,
UE_info->num_UEs);
if (UE_info->num_UEs == 0)
return false;
const int CC_id = 0;
/* NOT support different K2 in here, Get the K2 for first UE */
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[0];
const int tda = 1;
const struct NR_PUSCH_TimeDomainResourceAllocationList *tdaList =
sched_ctrl->active_ubwp->bwp_Common->pusch_ConfigCommon->choice.setup->pusch_TimeDomainAllocationList;
AssertFatal(tda < tdaList->list.count,
"time domain assignment %d >= %d\n",
tda,
tdaList->list.count);
int K2 = get_K2(sched_ctrl->active_ubwp, tda, mu);
const int sched_frame = frame + (slot + K2 >= num_slots_per_tdd);
const int sched_slot = (slot + K2) % num_slots_per_tdd;
if (!is_xlsch_in_slot(ulsch_in_slot_bitmap, sched_slot))
return false;
sched_ctrl->sched_pusch.slot = sched_slot;
sched_ctrl->sched_pusch.frame = sched_frame;
/* get the PID of a HARQ process awaiting retransmission, or -1 otherwise */
sched_ctrl->sched_pusch.ul_harq_pid = sched_ctrl->retrans_ul_harq.head;
/* Confirm all the UE have same K2 as the first UE */
for (int UE_id = UE_info->list.next[UE_id]; UE_id >= 0; UE_id = UE_info->list.next[UE_id]){
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
AssertFatal(K2 == get_K2(sched_ctrl->active_ubwp, tda, mu),
"Different K2, %d(UE%d) != %ld(UE%d)\n", K2, 0, get_K2(sched_ctrl->active_ubwp, tda, mu), UE_id);
sched_ctrl->sched_pusch.slot = sched_slot;
sched_ctrl->sched_pusch.frame = sched_frame;
}
/* Change vrb_map_UL to rballoc_mask */
uint16_t *vrb_map_UL =
&RC.nrmac[module_id]->common_channels[CC_id].vrb_map_UL[sched_slot * MAX_BWP_SIZE];
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_ubwp->bwp_Common->genericParameters.locationAndBandwidth, MAX_BWP_SIZE);
uint8_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map_UL:
// if any RB in vrb_map_UL is blocked (1), the current RB will be 0
rballoc_mask[i] = !vrb_map_UL[i];
n_rb_sched += rballoc_mask[i];
}
/* proportional fair scheduling algorithm */
pf_ul(module_id,
frame,
slot,
num_slots_per_tdd,
&UE_info->list,
n_rb_sched,
rballoc_mask,
2);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment