Commit b9faa288 authored by ChiehChun's avatar ChiehChun Committed by Robert Schmidt

Create pf_dl function

parent 14b17b0e
......@@ -331,63 +331,30 @@ uint8_t getN_PRB_DMRS(NR_BWP_Downlink_t *bwp, int numDmrsCdmGrpsNoData) {
}
}
void nr_simple_dlsch_preprocessor(module_id_t module_id,
void pf_dl(module_id_t module_id,
frame_t frame,
sub_frame_t slot) {
NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info;
AssertFatal(UE_info->num_UEs <= 1,
"%s() cannot handle more than one UE, but found %d\n",
__func__,
UE_info->num_UEs);
if (UE_info->num_UEs == 0)
return;
sub_frame_t slot,
NR_list_t *UE_list,
int n_rb_sched,
uint8_t *rballoc_mask,
int max_num_ue) {
const int UE_id = 0;
const int CC_id = 0;
NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info;
/* Loop UE_info->list to check retransmission */
for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
// for now HARQ PID is fixed and should be the same as in post-processor
const int current_harq_pid = slot % 8;
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[current_harq_pid];
const rnti_t rnti = UE_info->rnti[UE_id];
/* Calculate num of RBG and RBG size from UE_id=0 */
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth,275);
/* Calculate Throughput */
uint16_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map;
uint8_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map:
// if any RB in vrb_map is blocked (1), the current RBG will be 0
rballoc_mask[i] = !vrb_map[i];
n_rb_sched += rballoc_mask[i];
}
/* Retrieve amount of data to send for this UE */
sched_ctrl->num_total_bytes = 0;
const int lcid = DL_SCH_LCID_DTCH;
const rnti_t rnti = UE_info->rnti[UE_id];
sched_ctrl->rlc_status[lcid] = mac_rlc_status_ind(module_id,
rnti,
module_id,
frame,
slot,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
0,
0);
sched_ctrl->num_total_bytes += sched_ctrl->rlc_status[lcid].bytes_in_buffer;
if (sched_ctrl->num_total_bytes == 0
&& !sched_ctrl->ta_apply) /* If TA should be applied, give at least one RB */
return;
LOG_D(MAC, "[%s][%d.%d], DTCH%d->DLSCH, RLC status %d bytes TA %d\n",
__FUNCTION__,
frame,
slot,
lcid,
sched_ctrl->rlc_status[lcid].bytes_in_buffer,
sched_ctrl->ta_apply);
/* retransmission */
if (harq->round != 0) {
/* Find a free CCE */
const int target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
......@@ -396,8 +363,7 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
find_aggregation_candidates(&sched_ctrl->aggregation_level,
&nr_of_candidates,
sched_ctrl->search_space);
sched_ctrl->coreset = get_coreset(
sched_ctrl->active_bwp, sched_ctrl->search_space, 1 /* dedicated */);
sched_ctrl->coreset = get_coreset(sched_ctrl->active_bwp, sched_ctrl->search_space, 1 /* dedicated */);
int cid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t Y = UE_info->Y[UE_id][cid][slot];
const int m = UE_info->num_pdcch_cand[UE_id][cid];
......@@ -409,7 +375,7 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
m,
nr_of_candidates);
if (sched_ctrl->cce_index < 0) {
LOG_D(MAC, "%s(): could not find CCE for UE %d\n", __func__, UE_id);
LOG_E(MAC, "%s(): could not find CCE for UE %d\n", __func__, UE_id);
return;
}
UE_info->num_pdcch_cand[UE_id][cid]++;
......@@ -418,7 +384,7 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
* allocation after CCE alloc fail would be more complex) */
const bool alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot);
if (!alloc) {
LOG_D(MAC,
LOG_W(MAC,
"%s(): could not find PUCCH for UE %d/%04x@%d.%d\n",
__func__,
UE_id,
......@@ -432,41 +398,30 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
return;
}
/* get the PID of a HARQ process awaiting retransmission, or -1 otherwise */
sched_ctrl->dl_harq_pid = sched_ctrl->retrans_dl_harq.head;
if (sched_ctrl->dl_harq_pid >= 0) { /* retransmission */
NR_UE_ret_info_t *retInfo = &sched_ctrl->retInfo[sched_ctrl->dl_harq_pid];
/* Allocate retransmission */
NR_UE_ret_info_t *retInfo = &sched_ctrl->retInfo[current_harq_pid];
sched_ctrl->time_domain_allocation = retInfo->time_domain_allocation;
/* ensure that there is a free place for RB allocation */
int rbSize = 0;
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
while (rbSize < retInfo->rbSize) {
rbStart += rbSize; /* last iteration rbSize was not enough, skip it */
rbSize = 0;
while (rbStart < bwpSize && !rballoc_mask[rbStart]) rbStart++;
if (rbStart >= bwpSize) {
LOG_D(MAC,
"%4d.%2d cannot allocate retransmission for UE %d/RNTI %04x: no resources\n",
frame,
slot,
LOG_E(MAC,
"cannot allocate retransmission for UE %d/RNTI %04x: no resources\n",
UE_id,
rnti);
/* not enough resources: drop retransmission attempt and make a new
* transmission instead */
sched_ctrl->dl_harq_pid = -1;
break;
return;
}
while (rbStart + rbSize < bwpSize
&& rballoc_mask[rbStart + rbSize]
&& rbSize < retInfo->rbSize)
rbSize++;
}
}
if (sched_ctrl->dl_harq_pid >= 0) {
/* this retransmission can be allocated (i.e., enough resources) */
NR_UE_ret_info_t *retInfo = &sched_ctrl->retInfo[sched_ctrl->dl_harq_pid];
sched_ctrl->time_domain_allocation = retInfo->time_domain_allocation;
sched_ctrl->rbSize = retInfo->rbSize;
sched_ctrl->rbStart = rbStart;
......@@ -475,6 +430,72 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
sched_ctrl->mcs = retInfo->mcs;
sched_ctrl->numDmrsCdmGrpsNoData = retInfo->numDmrsCdmGrpsNoData;
} else {
/* Check DL buffer */
/* Calculate coeff */
/* Create UE_sched list for transmission*/
}
}
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
const uint16_t rnti = UE_info->rnti[UE_id];
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
int rbStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
// for now HARQ PID is fixed and should be the same as in post-processor
const int current_harq_pid = slot % 8;
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[current_harq_pid];
/* Loop UE_sched to find max coeff and allocate transmission */
//while(n_rb_sched > 0 && UE_sched.head >= 0){
if (harq->round == 0) { // temp
/* Find max coeff from UE_sched*/
/* Find a free CCE */
const int target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
sched_ctrl->search_space = get_searchspace(sched_ctrl->active_bwp, target_ss);
uint8_t nr_of_candidates;
find_aggregation_candidates(&sched_ctrl->aggregation_level,
&nr_of_candidates,
sched_ctrl->search_space);
sched_ctrl->coreset = get_coreset(sched_ctrl->active_bwp, sched_ctrl->search_space, 1 /* dedicated */);
int cid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t Y = UE_info->Y[UE_id][cid][slot];
const int m = UE_info->num_pdcch_cand[UE_id][cid];
sched_ctrl->cce_index = allocate_nr_CCEs(RC.nrmac[module_id],
sched_ctrl->active_bwp,
sched_ctrl->coreset,
sched_ctrl->aggregation_level,
Y,
m,
nr_of_candidates);
if (sched_ctrl->cce_index < 0) {
LOG_E(MAC, "%s(): could not find CCE for UE %d\n", __func__, UE_id);
return;
}
UE_info->num_pdcch_cand[UE_id][cid]++;
/* Find PUCCH occasion: if it fails, undo CCE allocation (undoing PUCCH
* allocation after CCE alloc fail would be more complex) */
const bool alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot);
if (!alloc) {
LOG_W(MAC,
"%s(): could not find PUCCH for UE %d/%04x@%d.%d\n",
__func__,
UE_id,
rnti,
frame,
slot);
UE_info->num_pdcch_cand[UE_id][cid]--;
int *cce_list = RC.nrmac[module_id]->cce_list[sched_ctrl->active_bwp->bwp_Id][cid];
for (int i = 0; i < sched_ctrl->aggregation_level; i++)
cce_list[sched_ctrl->cce_index + i] = 0;
return;
}
/* Allocate transmission */
// Time-domain allocation
sched_ctrl->time_domain_allocation = 2;
......@@ -486,18 +507,20 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
// Freq-demain allocation
while (rbStart < bwpSize && !rballoc_mask[rbStart]) rbStart++;
uint8_t N_PRB_DMRS =
const uint8_t N_PRB_DMRS =
getN_PRB_DMRS(sched_ctrl->active_bwp, sched_ctrl->numDmrsCdmGrpsNoData);
int nrOfSymbols = getNrOfSymbols(sched_ctrl->active_bwp,
const int nrOfSymbols = getNrOfSymbols(sched_ctrl->active_bwp,
sched_ctrl->time_domain_allocation);
uint8_t N_DMRS_SLOT = get_num_dmrs_symbols(sched_ctrl->active_bwp->bwp_Dedicated->pdsch_Config->choice.setup,
RC.nrmac[module_id]->common_channels->ServingCellConfigCommon->dmrs_TypeA_Position ,
const NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels->ServingCellConfigCommon;
const uint8_t N_DMRS_SLOT = get_num_dmrs_symbols(
sched_ctrl->active_bwp->bwp_Dedicated->pdsch_Config->choice.setup,
scc->dmrs_TypeA_Position,
nrOfSymbols);
int rbSize = 0;
uint32_t TBS = 0;
const int oh = 2 + (sched_ctrl->num_total_bytes >= 256)
+ 2 * (frame == (sched_ctrl->ta_frame + 10) % 1024);
uint32_t TBS = 0;
do {
rbSize++;
TBS = nr_compute_tbs(nr_get_Qm_dl(sched_ctrl->mcs, sched_ctrl->mcsTableIdx),
......@@ -512,13 +535,81 @@ void nr_simple_dlsch_preprocessor(module_id_t module_id,
} while (rbStart + rbSize < bwpSize && rballoc_mask[rbStart + rbSize] && TBS < sched_ctrl->num_total_bytes + oh);
sched_ctrl->rbSize = rbSize;
sched_ctrl->rbStart = rbStart;
}
n_rb_sched -= sched_ctrl->rbSize;
/* mark the corresponding RBs as used */
for (int rb = 0; rb < sched_ctrl->rbSize; rb++) {
vrb_map[rb + sched_ctrl->rbStart] = 1;
//vrb_map[rb + sched_ctrl->rbStart] = 1;
rballoc_mask[rb + sched_ctrl->rbStart] = 0;
}
}
}
void nr_simple_dlsch_preprocessor(module_id_t module_id,
frame_t frame,
sub_frame_t slot) {
NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info;
AssertFatal(UE_info->num_UEs <= 1,
"%s() cannot handle more than one UE, but found %d\n",
__func__,
UE_info->num_UEs);
if (UE_info->num_UEs == 0)
return;
const int UE_id = 0;
const int CC_id = 0;
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
/* Calculate num of RBG and RBG size from UE_id=0 */
const uint16_t bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, 275);
uint16_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map;
uint8_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map:
// if any RB in vrb_map is blocked (1), the current RBG will be 0
rballoc_mask[i] = !vrb_map[i];
n_rb_sched += rballoc_mask[i];
}
/* Retrieve amount of data to send for this UE */
sched_ctrl->num_total_bytes = 0;
const int lcid = DL_SCH_LCID_DTCH;
const rnti_t rnti = UE_info->rnti[UE_id];
sched_ctrl->rlc_status[lcid] = mac_rlc_status_ind(module_id,
rnti,
module_id,
frame,
slot,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
0,
0);
sched_ctrl->num_total_bytes += sched_ctrl->rlc_status[lcid].bytes_in_buffer;
if (sched_ctrl->num_total_bytes == 0
&& !sched_ctrl->ta_apply) /* If TA should be applied, give at least one RB */
return;
LOG_D(MAC, "[%s][%d.%d], DTCH%d->DLSCH, RLC status %d bytes TA %d\n",
__FUNCTION__,
frame,
slot,
lcid,
sched_ctrl->rlc_status[lcid].bytes_in_buffer,
sched_ctrl->ta_apply);
/* proportional fair scheduling algorithm */
pf_dl(module_id,
frame,
slot,
&UE_info->list,
n_rb_sched,
rballoc_mask,
2);
}
void nr_schedule_ue_spec(module_id_t module_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment