Commit 9f17c2d9 authored by ChiehChun's avatar ChiehChun

Add pf_ul & pseduo code

parent fdd8a99f
......@@ -552,51 +552,51 @@ int8_t select_ul_harq_pid(NR_UE_sched_ctrl_t *sched_ctrl) {
return -1;
}
bool nr_simple_ulsch_preprocessor(module_id_t module_id,
void pf_ul(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
int num_slots_per_tdd,
uint64_t ulsch_in_slot_bitmap) {
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
NR_COMMON_channels_t *cc = nr_mac->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
const int mu = scc->uplinkConfigCommon->initialUplinkBWP->genericParameters.subcarrierSpacing;
NR_UE_info_t *UE_info = &nr_mac->UE_info;
AssertFatal(UE_info->num_UEs <= 1,
"%s() cannot handle more than one UE, but found %d\n",
__func__,
UE_info->num_UEs);
if (UE_info->num_UEs == 0)
return false;
NR_UE_list_t *UE_list,
int n_rb_sched,
uint8_t *rballoc_mask,
int max_num_ue) {
const int UE_id = 0;
const int CC_id = 0;
/* NOT support different K2 in here, Get the K2 for first UE */
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[0];
const int tda = 1;
const struct NR_PUSCH_TimeDomainResourceAllocationList *tdaList =
sched_ctrl->active_ubwp->bwp_Common->pusch_ConfigCommon->choice.setup->pusch_TimeDomainAllocationList;
AssertFatal(tda < tdaList->list.count,
"time domain assignment %d >= %d\n",
tda,
tdaList->list.count);
int K2 = get_K2(sched_ctrl->active_ubwp, tda, mu);
const int sched_frame = frame + (slot + K2 >= num_slots_per_tdd);
const int sched_slot = (slot + K2) % num_slots_per_tdd;
if (!is_xlsch_in_slot(ulsch_in_slot_bitmap, sched_slot))
return false;
sched_ctrl->sched_pusch.slot = sched_slot;
sched_ctrl->sched_pusch.frame = sched_frame;
NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[CC_id].ServingCellConfigCommon;
NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info;
/* Confirm all the UE have same K2 as the first UE */
for (int UE_id = UE_info->list.next[UE_id]; UE_id > 0; UE_id = UE_info->list.next[UE_id]){
/* Loop UE_list to calculate throughput and coeff */
for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
AssertFatal(K2 == get_K2(sched_ctrl->active_ubwp, tda, mu),
"Different K2, %d(UE%d) != %ld(UE%d)\n", K2, 0, get_K2(sched_ctrl->active_ubwp, tda, mu), UE_id);
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_ubwp->bwp_Common->genericParameters.locationAndBandwidth,275);
/* Calculate throughput */
/* RETRANSMISSION: Check retransmission */
/* RETRANSMISSION: Find free CCE */
/* RETRANSMISSION: Allocate retransmission*/
/* Check BSR */
/* Calculate coefficient*/
}
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_ubwp->bwp_Common->genericParameters.locationAndBandwidth,275);
/* Loop UE_sched to find max coeff and allocate transmission */
//while (UE_sched.head > 0 && max_num_ue> 0 && n_rb_sched > 0)
if (n_rb_sched > 0){ //temp
/* Find max coeff */
/* Find free CCE */
const int target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
sched_ctrl->search_space = get_searchspace(sched_ctrl->active_bwp, target_ss);
uint8_t nr_of_candidates;
......@@ -617,10 +617,11 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id,
nr_of_candidates);
if (sched_ctrl->cce_index < 0) {
LOG_E(MAC, "%s(): CCE list not empty, couldn't schedule PUSCH\n", __func__);
return false;
return;
}
UE_info->num_pdcch_cand[UE_id][cid]++;
/* Save PUSCH filed */
sched_ctrl->sched_pusch.time_domain_allocation = tda;
const long f = sched_ctrl->search_space->searchSpaceType->choice.ue_Specific->dci_Formats;
const int dci_format = f ? NR_UL_DCI_FORMAT_0_1 : NR_UL_DCI_FORMAT_0_0;
......@@ -652,26 +653,13 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id,
sched_pusch->Qm <<= 1;
}
/* Change vrb_map_UL to rballoc_mask */
uint16_t *vrb_map_UL =
&RC.nrmac[module_id]->common_channels[CC_id].vrb_map_UL[sched_slot * 275];
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_ubwp->bwp_Common->genericParameters.locationAndBandwidth,275);
uint8_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map_UL:
// if any RB in vrb_map_UL is blocked (1), the current RB will be 0
rballoc_mask[i] = !vrb_map_UL[i];
n_rb_sched += rballoc_mask[i];
}
while (rbStart < bwpSize && vrb_map_UL[rbStart]) rbStart++;
uint16_t rbSize = 4;
while (rbStart < bwpSize && !rballoc_mask[rbStart]) rbStart++;
sched_pusch->rbStart = rbStart;
const int B = cmax(sched_ctrl->estimated_ul_buffer - sched_ctrl->sched_ul_bytes, 0);
/* Calculate the current scheduling bytes */
const int B = cmax(sched_ctrl->estimated_ul_buffer - sched_ctrl->sched_ul_bytes, 0);
uint16_t rbSize = 4;
do {
rbSize++;
sched_pusch->rbSize = rbSize;
......@@ -689,9 +677,77 @@ bool nr_simple_ulsch_preprocessor(module_id_t module_id,
LOG_D(MAC,"rbSize %d, TBS %d, est buf %d, sched_ul %d, B %d\n",
rbSize, sched_pusch->tb_size, sched_ctrl->estimated_ul_buffer, sched_ctrl->sched_ul_bytes, B);
/* mark the corresponding RBs as used */
/* Mark the corresponding RBs as used */
for (int rb = 0; rb < sched_ctrl->sched_pusch.rbSize; rb++)
rballoc_mask[rb + sched_ctrl->sched_pusch.rbStart] = 0;
}
}
bool nr_simple_ulsch_preprocessor(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
int num_slots_per_tdd,
uint64_t ulsch_in_slot_bitmap) {
gNB_MAC_INST *nr_mac = RC.nrmac[module_id];
NR_COMMON_channels_t *cc = nr_mac->common_channels;
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
const int mu = scc->uplinkConfigCommon->initialUplinkBWP->genericParameters.subcarrierSpacing;
NR_UE_info_t *UE_info = &nr_mac->UE_info;
AssertFatal(UE_info->num_UEs <= 1,
"%s() cannot handle more than one UE, but found %d\n",
__func__,
UE_info->num_UEs);
if (UE_info->num_UEs == 0)
return false;
const int CC_id = 0;
/* NOT support different K2 in here, Get the K2 for first UE */
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[0];
const int tda = 1;
const struct NR_PUSCH_TimeDomainResourceAllocationList *tdaList =
sched_ctrl->active_ubwp->bwp_Common->pusch_ConfigCommon->choice.setup->pusch_TimeDomainAllocationList;
AssertFatal(tda < tdaList->list.count,
"time domain assignment %d >= %d\n",
tda,
tdaList->list.count);
int K2 = get_K2(sched_ctrl->active_ubwp, tda, mu);
const int sched_frame = frame + (slot + K2 >= num_slots_per_tdd);
const int sched_slot = (slot + K2) % num_slots_per_tdd;
if (!is_xlsch_in_slot(ulsch_in_slot_bitmap, sched_slot))
return false;
sched_ctrl->sched_pusch.slot = sched_slot;
sched_ctrl->sched_pusch.frame = sched_frame;
/* Confirm all the UE have same K2 as the first UE */
for (int UE_id = UE_info->list.next[UE_id]; UE_id > 0; UE_id = UE_info->list.next[UE_id]){
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
AssertFatal(K2 == get_K2(sched_ctrl->active_ubwp, tda, mu),
"Different K2, %d(UE%d) != %ld(UE%d)\n", K2, 0, get_K2(sched_ctrl->active_ubwp, tda, mu), UE_id);
}
/* Change vrb_map_UL to rballoc_mask */
uint16_t *vrb_map_UL =
&RC.nrmac[module_id]->common_channels[CC_id].vrb_map_UL[sched_slot * 275];
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_ubwp->bwp_Common->genericParameters.locationAndBandwidth,275);
uint8_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map_UL:
// if any RB in vrb_map_UL is blocked (1), the current RB will be 0
rballoc_mask[i] = !vrb_map_UL[i];
n_rb_sched += rballoc_mask[i];
}
/* proportional fair scheduling algorithm */
pf_ul(module_id, frame, slot, num_slots_per_tdd,
&UE_info->list,
n_rb_sched,
rballoc_mask,
2);
return true;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment