Commit e0d92ba2 authored by ChiehChun's avatar ChiehChun

Create pf_dl function

parent 9fd42b3d
......@@ -465,63 +465,20 @@ uint8_t getN_PRB_DMRS(NR_BWP_Downlink_t *bwp, int numDmrsCdmGrpsNoData) {
return numDmrsCdmGrpsNoData * 4;
}
}
void nr_simple_dlsch_preprocessor(module_id_t module_id,
void pf_dl(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
int num_slots_per_tdd) {
NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info;
AssertFatal(UE_info->num_UEs <= 1,
"%s() cannot handle more than one UE, but found %d\n",
__func__,
UE_info->num_UEs);
if (UE_info->num_UEs == 0)
return;
int num_slots_per_tdd,
NR_UE_info_t *UE_info,
int n_rb_sched,
uint8_t *rballoc_mask,
int max_num_ue) {
const int UE_id = 0;
const int CC_id = 0;
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
sched_ctrl->rbSize = 0;
/* Calculate num of RBG and RBG size from UE_id=0 */
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth,275);
uint8_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map;
uint8_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map:
// if any RB in vrb_map is blocked (1), the current RBG will be 0
rballoc_mask[i] = !vrb_map[i];
n_rb_sched += rballoc_mask[i];
}
/* Retrieve amount of data to send for this UE */
sched_ctrl->num_total_bytes = 0;
const int lcid = DL_SCH_LCID_DTCH;
const uint16_t rnti = UE_info->rnti[UE_id];
sched_ctrl->rlc_status[lcid] = mac_rlc_status_ind(module_id,
rnti,
module_id,
frame,
slot,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
0,
0);
sched_ctrl->num_total_bytes += sched_ctrl->rlc_status[lcid].bytes_in_buffer;
if (sched_ctrl->num_total_bytes == 0 && !get_softmodem_params()->phy_test)
return;
LOG_D(MAC,
"%d.%d, DTCH%d->DLSCH, RLC status %d bytes\n",
frame,
slot,
lcid,
sched_ctrl->rlc_status[lcid].bytes_in_buffer);
/* Find a free CCE */
const int target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
......@@ -530,8 +487,7 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
find_aggregation_candidates(&sched_ctrl->aggregation_level,
&nr_of_candidates,
sched_ctrl->search_space);
sched_ctrl->coreset = get_coreset(
sched_ctrl->active_bwp, sched_ctrl->search_space, 1 /* dedicated */);
sched_ctrl->coreset = get_coreset(sched_ctrl->active_bwp, sched_ctrl->search_space, 1 /* dedicated */);
int cid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t Y = UE_info->Y[UE_id][cid][slot];
const int m = UE_info->num_pdcch_cand[UE_id][cid];
......@@ -559,9 +515,12 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
AssertFatal(sched_ctrl->pucch_sched_idx >= 0, "no uplink slot for PUCCH found!\n");
uint8_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map;
const int current_harq_pid = sched_ctrl->current_harq_pid;
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[current_harq_pid];
NR_UE_ret_info_t *retInfo = &sched_ctrl->retInfo[current_harq_pid];
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
if (harq->round != 0) { /* retransmission */
sched_ctrl->time_domain_allocation = retInfo->time_domain_allocation;
......@@ -634,6 +593,73 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
}
}
void nr_simple_dlsch_preprocessor(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
int num_slots_per_tdd) {
NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info;
AssertFatal(UE_info->num_UEs <= 1,
"%s() cannot handle more than one UE, but found %d\n",
__func__,
UE_info->num_UEs);
if (UE_info->num_UEs == 0)
return;
const int UE_id = 0;
const int CC_id = 0;
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
sched_ctrl->rbSize = 0;
/* Calculate num of RBG and RBG size from UE_id=0 */
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth,275);
uint8_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map;
uint8_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map:
// if any RB in vrb_map is blocked (1), the current RBG will be 0
rballoc_mask[i] = !vrb_map[i];
n_rb_sched += rballoc_mask[i];
}
/* Retrieve amount of data to send for this UE */
sched_ctrl->num_total_bytes = 0;
const int lcid = DL_SCH_LCID_DTCH;
const uint16_t rnti = UE_info->rnti[UE_id];
sched_ctrl->rlc_status[lcid] = mac_rlc_status_ind(module_id,
rnti,
module_id,
frame,
slot,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
0,
0);
sched_ctrl->num_total_bytes += sched_ctrl->rlc_status[lcid].bytes_in_buffer;
if (sched_ctrl->num_total_bytes == 0 && !get_softmodem_params()->phy_test)
return;
LOG_D(MAC,
"%d.%d, DTCH%d->DLSCH, RLC status %d bytes\n",
frame,
slot,
lcid,
sched_ctrl->rlc_status[lcid].bytes_in_buffer);
/* pf algo */
pf_dl(module_id,frame,slot,num_slots_per_tdd,
UE_info,
n_rb_sched,
rballoc_mask,
2);
}
void nr_schedule_ue_spec(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment