Commit a1d5fbea authored by Stefan's avatar Stefan

SSR 21/12/18 ulsch_scheduler_pre_processor2

parent 32eafc13
......@@ -91,7 +91,6 @@ store_dlsch_buffer(module_id_t Mod_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP) {
int UE_id, lcid;
rnti_t rnti;
mac_rlc_status_resp_t rlc_status;
......@@ -106,7 +105,6 @@ store_dlsch_buffer(module_id_t Mod_id,
continue;
UE_template = &UE_list->UE_template[UE_PCCID(Mod_id, UE_id)][UE_id];
// clear logical channel interface variables
UE_template->dl_buffer_total = 0;
UE_template->dl_pdus_total = 0;
......@@ -121,12 +119,11 @@ store_dlsch_buffer(module_id_t Mod_id,
rnti = UE_RNTI(Mod_id, UE_id);
for (lcid = 0; lcid < MAX_NUM_LCID; ++lcid) { // loop over all the logical channels
rlc_status = mac_rlc_status_ind(Mod_id, rnti, Mod_id, frameP, subframeP,
ENB_FLAG_YES, MBMS_FLAG_NO, lcid, 0
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
,0, 0
#endif
#endif
);
UE_template->dl_buffer_info[lcid] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel
UE_template->dl_pdus_in_buffer[lcid] = rlc_status.pdus_in_buffer;
......@@ -138,8 +135,7 @@ store_dlsch_buffer(module_id_t Mod_id,
UE_template->dl_buffer_total += UE_template->dl_buffer_info[lcid]; //storing the total dlsch buffer
UE_template->dl_pdus_total += UE_template->dl_pdus_in_buffer[lcid];
#ifdef DEBUG_eNB_SCHEDULER
#ifdef DEBUG_eNB_SCHEDULER
/* note for dl_buffer_head_sdu_remaining_size_to_send[lcid] :
* 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent
*/
......@@ -152,9 +148,7 @@ store_dlsch_buffer(module_id_t Mod_id,
UE_template->dl_buffer_head_sdu_creation_time[lcid],
UE_template->dl_buffer_head_sdu_remaining_size_to_send[lcid],
UE_template->dl_buffer_head_sdu_is_segmented[lcid]);
#endif
#endif
}
......@@ -178,11 +172,8 @@ assign_rbs_required(module_id_t Mod_id,
frame_t frameP,
sub_frame_t subframe,
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
int min_rb_unit[NFAPI_CC_MAX])
{
int min_rb_unit[NFAPI_CC_MAX]) {
uint16_t TBS = 0;
int UE_id, n, i, j, CC_id, pCCid, tmp;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
......@@ -192,25 +183,27 @@ assign_rbs_required(module_id_t Mod_id,
// clear rb allocations across all CC_id
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (UE_list->active[UE_id] != TRUE) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
pCCid = UE_PCCID(Mod_id, UE_id);
//update CQI information across component carriers
for (n = 0; n < UE_list->numactiveCCs[UE_id]; n++) {
CC_id = UE_list->ordered_CCids[n][UE_id];
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
// eNB_UE_stats->dlsch_mcs1 = cmin(cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]], sli->dl[slice_idx].maxmcs);
// eNB_UE_stats->dlsch_mcs1 = cmin(cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]], sli->dl[slice_idx].maxmcs);
eNB_UE_stats->dlsch_mcs1 = cmin(cqi2mcs(UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]), sli->dl[slice_idx].maxmcs);
}
// provide the list of CCs sorted according to MCS
for (i = 0; i < UE_list->numactiveCCs[UE_id]; ++i) {
eNB_UE_stats_i = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]][UE_id];
for (j = i + 1; j < UE_list->numactiveCCs[UE_id]; j++) {
DevAssert(j < NFAPI_CC_MAX);
eNB_UE_stats_j = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]][UE_id];
if (eNB_UE_stats_j->dlsch_mcs1 > eNB_UE_stats_i->dlsch_mcs1) {
tmp = UE_list->ordered_CCids[i][UE_id];
UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id];
......@@ -233,16 +226,13 @@ assign_rbs_required(module_id_t Mod_id,
}
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rbs_required[CC_id][UE_id]);
LOG_D(MAC,
"[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n",
UE_id, CC_id,
UE_list->UE_template[pCCid][UE_id].dl_buffer_total,
nb_rbs_required[CC_id][UE_id],
eNB_UE_stats->dlsch_mcs1, TBS);
N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx] =
nb_rbs_allowed_slice(sli->dl[slice_idx].pct, N_RB_DL);
......@@ -255,6 +245,7 @@ assign_rbs_required(module_id_t Mod_id,
nb_rbs_required[CC_id][UE_id] = UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx];
break;
}
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rbs_required[CC_id][UE_id]);
} // end of while
......@@ -263,7 +254,6 @@ assign_rbs_required(module_id_t Mod_id,
Mod_id, frameP, UE_id, CC_id, min_rb_unit[CC_id],
nb_rbs_required[CC_id][UE_id], TBS,
eNB_UE_stats->dlsch_mcs1);
sli->pre_processor_results[slice_idx].mcs[CC_id][UE_id] = eNB_UE_stats->dlsch_mcs1;
}
}
......@@ -275,7 +265,6 @@ assign_rbs_required(module_id_t Mod_id,
int
maxround(module_id_t Mod_id, uint16_t rnti, int frame,
sub_frame_t subframe, uint8_t ul_flag) {
uint8_t round, round_max = 0, UE_id;
int CC_id, harq_pid;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
......@@ -283,11 +272,10 @@ maxround(module_id_t Mod_id, uint16_t rnti, int frame,
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
cc = &RC.mac[Mod_id]->common_channels[CC_id];
UE_id = find_UE_id(Mod_id, rnti);
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frame ,subframe);
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frame,subframe);
round = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid];
if (round > round_max) {
round_max = round;
}
......@@ -335,38 +323,33 @@ struct sort_ue_dl_params {
int slice_idx;
};
static int ue_dl_compare(const void *_a, const void *_b, void *_params)
{
static int ue_dl_compare(const void *_a, const void *_b, void *_params) {
struct sort_ue_dl_params *params = _params;
UE_list_t *UE_list = &RC.mac[params->Mod_idP]->UE_list;
int i;
int slice_idx = params->slice_idx;
int UE_id1 = *(const int *) _a;
int UE_id2 = *(const int *) _b;
int rnti1 = UE_RNTI(params->Mod_idP, UE_id1);
int pCC_id1 = UE_PCCID(params->Mod_idP, UE_id1);
int round1 = maxround(params->Mod_idP, rnti1, params->frameP, params->subframeP, 1);
int rnti2 = UE_RNTI(params->Mod_idP, UE_id2);
int pCC_id2 = UE_PCCID(params->Mod_idP, UE_id2);
int round2 = maxround(params->Mod_idP, rnti2, params->frameP, params->subframeP, 1);
int cqi1 = maxcqi(params->Mod_idP, UE_id1);
int cqi2 = maxcqi(params->Mod_idP, UE_id2);
long lcgid1 = min_lcgidpriority(params->Mod_idP, UE_id1);
long lcgid2 = min_lcgidpriority(params->Mod_idP, UE_id2);
for (i = 0; i < CR_NUM; ++i) {
switch (UE_list->sorting_criteria[slice_idx][i]) {
case CR_ROUND :
if (round1 > round2)
return -1;
if (round1 < round2)
return 1;
break;
case CR_SRB12 :
......@@ -375,41 +358,50 @@ static int ue_dl_compare(const void *_a, const void *_b, void *_params)
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] +
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
return -1;
if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] +
UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] <
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] +
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
return 1;
break;
case CR_HOL :
if (UE_list-> UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max >
UE_list-> UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
return -1;
if (UE_list-> UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
UE_list-> UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
return 1;
break;
case CR_LC :
if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total >
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
return -1;
if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
return 1;
break;
case CR_CQI :
if (cqi1 > cqi2)
return -1;
if (cqi1 < cqi2)
return 1;
break;
case CR_LCP :
if (lcgid1 < lcgid2)
return -1;
if (lcgid1 > lcgid2)
return 1;
......@@ -423,7 +415,6 @@ static int ue_dl_compare(const void *_a, const void *_b, void *_params)
void decode_sorting_policy(module_id_t Mod_idP, int slice_idx) {
int i;
UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
uint32_t policy = RC.mac[Mod_idP]->slice_info.dl[slice_idx].sorting;
uint32_t mask = 0x0000000F;
......@@ -431,6 +422,7 @@ void decode_sorting_policy(module_id_t Mod_idP, int slice_idx) {
for (i = 0; i < CR_NUM; ++i) {
criterion = (uint16_t) (policy >> 4 * (CR_NUM - 1 - i) & mask);
if (criterion >= CR_NUM) {
LOG_W(MAC,
"Invalid criterion in slice index %d ID %d policy, revert to default policy \n",
......@@ -438,14 +430,14 @@ void decode_sorting_policy(module_id_t Mod_idP, int slice_idx) {
RC.mac[Mod_idP]->slice_info.dl[slice_idx].sorting = 0x12345;
break;
}
UE_list->sorting_criteria[slice_idx][i] = criterion;
}
}
void decode_slice_positioning(module_id_t Mod_idP,
int slice_idx,
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX])
{
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX]) {
uint8_t CC_id;
int RBG, start_frequency, end_frequency;
......@@ -458,6 +450,7 @@ void decode_slice_positioning(module_id_t Mod_idP,
start_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_idx].pos_low;
end_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_idx].pos_high;
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_idP]; ++CC_id) {
for (RBG = start_frequency; RBG <= end_frequency; ++RBG) {
slice_allocation_mask[CC_id][RBG] = 1;
......@@ -467,20 +460,20 @@ void decode_slice_positioning(module_id_t Mod_idP,
// This fuction sorts the UE in order their dlsch buffer and CQI
void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subframeP)
{
void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subframeP) {
int i;
int list[MAX_MOBILES_PER_ENB];
int list_size = 0;
struct sort_ue_dl_params params = {Mod_idP, frameP, subframeP, slice_idx};
UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
if (UE_list->active[i] == FALSE) continue;
if (UE_RNTI(Mod_idP, i) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_idP, i, slice_idx)) continue;
list[list_size] = i;
......@@ -488,12 +481,12 @@ void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subfra
}
decode_sorting_policy(Mod_idP, slice_idx);
qsort_r(list, list_size, sizeof(int), ue_dl_compare, &params);
if (list_size) {
for (i = 0; i < list_size - 1; ++i)
UE_list->next[list[i]] = list[i + 1];
UE_list->next[list[list_size - 1]] = -1;
UE_list->head = list[0];
} else {
......@@ -503,17 +496,17 @@ void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subfra
void dlsch_scheduler_pre_processor_partitioning(module_id_t Mod_id,
int slice_idx,
const uint8_t rbs_retx[NFAPI_CC_MAX])
{
const uint8_t rbs_retx[NFAPI_CC_MAX]) {
int UE_id, CC_id, N_RB_DL, i;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_sched_ctrl *ue_sched_ctl;
uint16_t available_rbs;
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
......@@ -522,6 +515,7 @@ void dlsch_scheduler_pre_processor_partitioning(module_id_t Mod_id,
CC_id = UE_list->ordered_CCids[i][UE_id];
N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
available_rbs = nb_rbs_allowed_slice(RC.mac[Mod_id]->slice_info.dl[slice_idx].pct, N_RB_DL);
if (rbs_retx[CC_id] < available_rbs)
ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx] = available_rbs - rbs_retx[CC_id];
else
......@@ -536,11 +530,9 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
sub_frame_t subframeP,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB])
{
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]) {
int UE_id, CC_id;
int i;
rnti_t rnti;
uint8_t harq_pid, round;
uint16_t available_rbs[NFAPI_CC_MAX];
......@@ -550,7 +542,6 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
int ue_count_newtx[NFAPI_CC_MAX];
int ue_count_retx[NFAPI_CC_MAX];
//uint8_t ue_retx_flag[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_sched_ctrl *ue_sched_ctl;
COMMON_channels_t *cc;
......@@ -571,15 +562,18 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
// Find total UE count, and account the RBs required for retransmissions
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) {
CC_id = UE_list->ordered_CCids[i][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
cc = &RC.mac[Mod_id]->common_channels[CC_id];
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP ,subframeP);
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
round = ue_sched_ctl->round[CC_id][harq_pid];
if (nb_rbs_required[CC_id][UE_id] > 0) {
......@@ -603,46 +597,53 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) {
if (UE_list->head < 0) continue; // no UEs in list
// max_rbs_allowed_slice is saved in every UE, so take it from the first one
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_list->head];
available_rbs[CC_id] = ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx];
}
switch (RC.mac[Mod_id]->slice_info.dl[slice_idx].accounting) {
// If greedy scheduling, try to account all the required RBs
case POL_GREEDY:
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
if (available_rbs[CC_id] == 0) continue;
nb_rbs_accounted[CC_id][UE_id] = cmin(nb_rbs_required[CC_id][UE_id], available_rbs[CC_id]);
available_rbs[CC_id] -= nb_rbs_accounted[CC_id][UE_id];
}
}
break;
// Use the old, fair algorithm
// Loop over all active UEs and account the avg number of RBs to each UE, based on all non-retx UEs.
// case POL_FAIR:
default:
// FIXME: This is not ideal, why loop on UEs to find average_rbs_per_user[], that is per-CC?
// TODO: Look how to loop on active CCs only without using the UE_num_active_CC() function.
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) {
CC_id = UE_list->ordered_CCids[i][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
available_rbs[CC_id] = ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx];
......@@ -662,8 +663,11 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
// extend nb_rbs_required to capture per LCID RB required
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
......@@ -671,25 +675,26 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
nb_rbs_accounted[CC_id][UE_id] = cmin(average_rbs_per_user[CC_id], nb_rbs_required[CC_id][UE_id]);
}
}
break;
}
// Check retransmissions
// TODO: Do this once at the beginning
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
cc = &RC.mac[Mod_id]->common_channels[CC_id];
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP ,subframeP);
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
round = ue_sched_ctl->round[CC_id][harq_pid];
// control channel or retransmission
......@@ -708,17 +713,17 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX])
{
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) {
int UE_id, CC_id;
int i;
#ifdef TM5
#ifdef TM5
uint8_t transmission_mode;
#endif
#endif
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX];
int N_RBG[NFAPI_CC_MAX];
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
decode_slice_positioning(Mod_id, slice_idx, slice_allocation_mask);
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
......@@ -728,17 +733,19 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
// Try to allocate accounted RBs
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
nb_rbs_remaining[CC_id][UE_id] = nb_rbs_accounted[CC_id][UE_id];
#ifdef TM5
#ifdef TM5
transmission_mode = get_tmode(Mod_id, CC_id, UE_id);
#endif
#endif
if (nb_rbs_required[CC_id][UE_id] > 0)
LOG_D(MAC,
......@@ -764,14 +771,12 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
slice_allocation_mask,
MIMO_mode_indicator);
#ifdef TM5
#ifdef TM5
// data chanel TM5: to be revisited
if ((round == 0) &&
(transmission_mode == 5) &&
(ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
for (j = 0; j < N_RBG[CC_id]; j += 2) {
if ((((j == (N_RBG[CC_id] - 1))
&& (rballoc_sub[CC_id][j] == 0)
&& (ue_sched_ctl->
......@@ -783,17 +788,17 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
[CC_id][j + 1] == 0)))
&& (nb_rbs_remaining[CC_id][UE_id]
> 0)) {
for (i = UE_list->next[UE_id + 1]; i >= 0;
i = UE_list->next[i]) {
UE_id2 = i;
rnti2 = UE_RNTI(Mod_id, UE_id2);
ue_sched_ctl2 =
&UE_list->UE_sched_ctrl[UE_id2];
round2 = ue_sched_ctl2->round[CC_id];
if (rnti2 == NOT_A_RNTI)
continue;
if (UE_list->
UE_sched_ctrl
[UE_id2].ul_out_of_sync == 1)
......@@ -813,7 +818,6 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
== 5)
&& (ue_sched_ctl->
dl_pow_off[CC_id] != 1)) {
if ((((j == (N_RBG[CC_id] - 1))
&&
(ue_sched_ctl->rballoc_sub_UE
......@@ -828,13 +832,11 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
(nb_rbs_remaining
[CC_id]
[UE_id2] > 0)) {
if ((((eNB_UE_stats2->
DL_pmi_single ^
eNB_UE_stats1->
DL_pmi_single)
<< (14 - j)) & 0xc000) == 0x4000) { //MU-MIMO only for 25 RBs configuration
rballoc_sub[CC_id][j] = 1;
ue_sched_ctl->
rballoc_sub_UE[CC_id]
......@@ -866,12 +868,10 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
dl_pow_off[CC_id]
= 0;
if ((j == N_RBG[CC_id] - 1)
&& ((N_RB_DL == 25)
|| (N_RB_DL ==
50))) {
nb_rbs_remaining
[CC_id][UE_id] =
nb_rbs_remaining
......@@ -897,7 +897,6 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
min_rb_unit[CC_id]
- 1;
} else {
nb_rbs_remaining
[CC_id][UE_id] =
nb_rbs_remaining
......@@ -916,7 +915,6 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
ue_sched_ctl2->pre_nb_available_rbs
[CC_id] + 4;
}
break;
}
}
......@@ -925,7 +923,8 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
}
}
}
#endif
#endif
}
}
}
......@@ -937,18 +936,18 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX])
{
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) {
int UE_id, CC_id;
int i;
#ifdef TM5
#ifdef TM5
uint8_t transmission_mode;
#endif
#endif
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
int N_RBG[NFAPI_CC_MAX];
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
uint8_t (*slice_allocation_mask)[N_RBG_MAX] = sli->pre_processor_results[slice_idx].slice_allocation_mask;
decode_slice_positioning(Mod_id, slice_idx, slice_allocation_mask);
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
......@@ -958,20 +957,23 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
// Remaining RBs are allocated to high priority UEs
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
nb_rbs_remaining[CC_id][UE_id] =
nb_rbs_required[CC_id][UE_id] - nb_rbs_accounted[CC_id][UE_id] + nb_rbs_remaining[CC_id][UE_id];
if (nb_rbs_remaining[CC_id][UE_id] < 0)
abort();
#ifdef TM5
#ifdef TM5
transmission_mode = get_tmode(Mod_id, CC_id, UE_id);
#endif
#endif
if (nb_rbs_required[CC_id][UE_id] > 0)
LOG_D(MAC,
......@@ -997,14 +999,12 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
slice_allocation_mask,
MIMO_mode_indicator);
#ifdef TM5
#ifdef TM5
// data chanel TM5: to be revisited
if ((round == 0) &&
(transmission_mode == 5) &&
(ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
for (j = 0; j < N_RBG[CC_id]; j += 2) {
if ((((j == (N_RBG[CC_id] - 1))
&& (rballoc_sub[CC_id][j] == 0)
&& (ue_sched_ctl->
......@@ -1016,17 +1016,17 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
[CC_id][j + 1] == 0)))
&& (nb_rbs_remaining[CC_id][UE_id]
> 0)) {
for (i = UE_list->next[UE_id + 1]; i >= 0;
i = UE_list->next[i]) {
UE_id2 = i;
rnti2 = UE_RNTI(Mod_id, UE_id2);
ue_sched_ctl2 =
&UE_list->UE_sched_ctrl[UE_id2];
round2 = ue_sched_ctl2->round[CC_id];
if (rnti2 == NOT_A_RNTI)
continue;
if (UE_list->
UE_sched_ctrl
[UE_id2].ul_out_of_sync == 1)
......@@ -1046,7 +1046,6 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
== 5)
&& (ue_sched_ctl->
dl_pow_off[CC_id] != 1)) {
if ((((j == (N_RBG[CC_id] - 1))
&&
(ue_sched_ctl->rballoc_sub_UE
......@@ -1061,13 +1060,11 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
(nb_rbs_remaining
[CC_id]
[UE_id2] > 0)) {
if ((((eNB_UE_stats2->
DL_pmi_single ^
eNB_UE_stats1->
DL_pmi_single)
<< (14 - j)) & 0xc000) == 0x4000) { //MU-MIMO only for 25 RBs configuration
rballoc_sub[CC_id][j] = 1;
ue_sched_ctl->
rballoc_sub_UE[CC_id]
......@@ -1099,12 +1096,10 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
dl_pow_off[CC_id]
= 0;
if ((j == N_RBG[CC_id] - 1)
&& ((N_RB_DL == 25)
|| (N_RB_DL ==
50))) {
nb_rbs_remaining
[CC_id][UE_id] =
nb_rbs_remaining
......@@ -1130,7 +1125,6 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
min_rb_unit[CC_id]
- 1;
} else {
nb_rbs_remaining
[CC_id][UE_id] =
nb_rbs_remaining
......@@ -1158,7 +1152,8 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
}
}
}
#endif
#endif
}
}
}
......@@ -1170,24 +1165,19 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
frame_t frameP,
sub_frame_t subframeP,
int *mbsfn_flag,
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX])
{
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]) {
int UE_id;
uint8_t CC_id;
uint16_t i, j;
int min_rb_unit[NFAPI_CC_MAX];
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_required;
uint16_t (*nb_rbs_accounted)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_accounted;
uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_remaining;
uint8_t (*MIMO_mode_indicator)[N_RBG_MAX] = sli->pre_processor_results[slice_idx].MIMO_mode_indicator;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_sched_ctrl *ue_sched_ctl;
// int rrc_status = RRC_IDLE;
// int rrc_status = RRC_IDLE;
#ifdef TM5
int harq_pid1 = 0;
int round1 = 0, round2 = 0;
......@@ -1198,7 +1188,6 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
LTE_eNB_UE_stats *eNB_UE_stats2 = NULL;
UE_sched_ctrl *ue_sched_ctl1, *ue_sched_ctl2;
#endif
// Initialize scheduling information for all active UEs
memset(&sli->pre_processor_results[slice_idx], 0, sizeof(sli->pre_processor_results[slice_idx]));
// FIXME: After the memset above, some of the resets in reset() are redundant
......@@ -1208,17 +1197,13 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
rballoc_sub,
MIMO_mode_indicator,
mbsfn_flag); // FIXME: Not sure if useful
// STATUS
// Store the DLSCH buffer for each logical channel
store_dlsch_buffer(Mod_id, slice_idx, frameP, subframeP);
// Calculate the number of RBs required by each UE on the basis of logical channel's buffer
assign_rbs_required(Mod_id, slice_idx, frameP, subframeP, nb_rbs_required, min_rb_unit);
// Sorts the user on the basis of dlsch logical channel buffer and CQI
sort_UEs(Mod_id, slice_idx, frameP, subframeP);
// ACCOUNTING
// This procedure decides the number of RBs to allocate
dlsch_scheduler_pre_processor_accounting(Mod_id, slice_idx, frameP, subframeP,
......@@ -1248,6 +1233,7 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
}
#ifdef TM5
// This has to be revisited!!!!
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
......@@ -1287,13 +1273,13 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions =
PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions +
1;
}
#endif
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
......@@ -1330,9 +1316,7 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX],
int *mbsfn_flag)
{
int *mbsfn_flag) {
int UE_id;
uint8_t CC_id;
int i, j;
......@@ -1340,17 +1324,15 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
UE_sched_ctrl *ue_sched_ctl;
int N_RB_DL, RBGsize, RBGsize_last;
int N_RBG[NFAPI_CC_MAX];
#ifdef SF0_LIMIT
int sf0_lower, sf0_upper;
#endif
rnti_t rnti;
uint8_t *vrb_map;
COMMON_channels_t *cc;
//
for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) {
//
for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) {
LOG_D(MAC, "Running preprocessor for UE %d (%x)\n", UE_id,(int)(UE_RNTI(module_idP, UE_id)));
// initialize harq_pid and round
cc = &RC.mac[module_idP]->common_channels[CC_id];
......@@ -1361,13 +1343,10 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
continue;
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; ++UE_id) {
UE_list = &RC.mac[module_idP]->UE_list;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
rnti = UE_RNTI(module_idP, UE_id);
if (rnti == NOT_A_RNTI)
continue;
......@@ -1378,11 +1357,11 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
continue;
LOG_D(MAC, "Running preprocessor for UE %d (%x)\n", UE_id, rnti);
// initialize harq_pid and round
if (ue_sched_ctl->ta_timer)
ue_sched_ctl->ta_timer--;
/*
eNB_UE_stats *eNB_UE_stats;
......@@ -1440,7 +1419,6 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_TIMING_ADVANCE,ue_sched_ctl->ta_update);
}
*/
nb_rbs_required[CC_id][UE_id] = 0;
ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0;
ue_sched_ctl->dl_pow_off[CC_id] = 2;
......@@ -1451,32 +1429,38 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
}
N_RB_DL = to_prb(RC.mac[module_idP]->common_channels[CC_id].mib->message.dl_Bandwidth);
#ifdef SF0_LIMIT
switch (N_RBG[CC_id]) {
case 6:
sf0_lower = 0;
sf0_upper = 5;
break;
case 8:
sf0_lower = 2;
sf0_upper = 5;
break;
case 13:
sf0_lower = 4;
sf0_upper = 7;
break;
case 17:
sf0_lower = 7;
sf0_upper = 9;
break;
case 25:
sf0_lower = 11;
sf0_upper = 13;
break;
default:
AssertFatal(1 == 0, "unsupported RBs (%d)\n", N_RB_DL);
}
#endif
switch (N_RB_DL) {
......@@ -1484,37 +1468,43 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
RBGsize = 1;
RBGsize_last = 1;
break;
case 15:
RBGsize = 2;
RBGsize_last = 1;
break;
case 25:
RBGsize = 2;
RBGsize_last = 1;
break;
case 50:
RBGsize = 3;
RBGsize_last = 2;
break;
case 75:
RBGsize = 4;
RBGsize_last = 3;
break;
case 100:
RBGsize = 4;
RBGsize_last = 4;
break;
default:
AssertFatal(1 == 0, "unsupported RBs (%d)\n", N_RB_DL);
}
vrb_map = RC.mac[module_idP]->common_channels[CC_id].vrb_map;
// Initialize Subbands according to VRB map
for (i = 0; i < N_RBG[CC_id]; i++) {
int rb_size = i == N_RBG[CC_id] - 1 ? RBGsize_last : RBGsize;
#ifdef SF0_LIMIT
// for avoiding 6+ PRBs around DC in subframe 0 (avoid excessive errors)
/* TODO: make it proper - allocate those RBs, do not "protect" them, but
* compute number of available REs and limit MCS according to the
......@@ -1522,6 +1512,7 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
*/
if (subframeP == 0 && i >= sf0_lower && i <= sf0_upper)
rballoc_sub[CC_id][i] = 1;
#endif
// for SI-RNTI,RA-RNTI and P-RNTI allocations
......@@ -1532,6 +1523,7 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
break;
}
}
//LOG_D(MAC, "Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d\n",
//frameP, subframeP, CC_id, i, rballoc_sub[CC_id][i]);
MIMO_mode_indicator[CC_id][i] = 2;
......@@ -1550,8 +1542,7 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX])
{
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) {
int i;
int tm = get_tmode(Mod_id, CC_id, UE_id);
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
......@@ -1559,12 +1550,16 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
int N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
for (i = 0; i < N_RBG; i++) {
if (rballoc_sub[CC_id][i] != 0) continue;
if (ue_sched_ctl->rballoc_sub_UE[CC_id][i] != 0) continue;
if (nb_rbs_remaining[CC_id][UE_id] <= 0) continue;
if (ue_sched_ctl->pre_nb_available_rbs[CC_id] >= nb_rbs_required[CC_id][UE_id]) continue;
if (ue_sched_ctl->dl_pow_off[CC_id] == 0) continue;
if (slice_allocation_mask[CC_id][i] == 0) continue;
if ((i == N_RBG - 1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
......@@ -1573,9 +1568,11 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
rballoc_sub[CC_id][i] = 1;
ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
MIMO_mode_indicator[CC_id][i] = 1;
if (tm == 5) {
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit + 1;
ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1;
}
......@@ -1585,9 +1582,11 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
rballoc_sub[CC_id][i] = 1;
ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
MIMO_mode_indicator[CC_id][i] = 1;
if (tm == 5) {
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit;
ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit;
}
......@@ -1603,9 +1602,9 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
int frameP,
sub_frame_t subframeP,
unsigned char sched_subframeP,
uint16_t *first_rb)
{
uint16_t UE_id, n;
uint16_t *first_rb) {
int UE_id;
uint16_t n;
uint8_t CC_id, harq_pid;
uint16_t nb_allocated_rbs[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
uint16_t total_allocated_rbs[NFAPI_CC_MAX];
......@@ -1621,19 +1620,16 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
rnti_t rntiTable[MAX_MOBILES_PER_ENB]; // Rnti array => Add SSR 12-2018
bool continueTable[MAX_MOBILES_PER_ENB]; // Loop continue flag array => Add SSR 12-2018
bool sliceMember; // Slice membership flag => Add SSR 12-2018
LOG_D(MAC, "In ulsch_preprocessor: assign max mcs min rb\n");
// maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
assign_max_mcs_min_rb(module_idP, slice_idx, frameP, subframeP, first_rb);
LOG_D(MAC, "In ulsch_preprocessor: sort ue \n");
// sort ues
sort_ue_ul(module_idP, frameP, subframeP);
// we need to distribute RBs among UEs
// step1: reset the vars
uint8_t CC_nb = (uint8_t) RC.nb_mac_CC[module_idP];
for (CC_id = 0; CC_id < CC_nb; CC_id++) {
total_allocated_rbs[CC_id] = 0;
total_remaining_rbs[CC_id] = 0;
......@@ -1655,11 +1651,13 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
rntiTable[UE_id] = UE_RNTI(module_idP, UE_id);
sliceMember = ue_ul_slice_membership(module_idP, UE_id, slice_idx);
continueTable[UE_id] = (rntiTable[UE_id] == NOT_A_RNTI || UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1 || !sliceMember);
// This is not the actual CC_id in the list
if (sliceMember) {
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
if (UE_template->pre_allocated_nb_rb_ul[slice_idx] > 0) {
total_ue_count[CC_id]++;
}
......@@ -1668,30 +1666,31 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
LOG_D(MAC, "In ulsch_preprocessor: step2 \n");
// step 2: calculate the average rb per UE
for (UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) {
if (continueTable[UE_id]) continue;
LOG_D(MAC, "In ulsch_preprocessor: handling UE %d/%x\n", UE_id,
rntiTable[UE_id]);
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
LOG_D(MAC, "In ulsch_preprocessor: handling UE %d/%x CCid %d\n", UE_id, rntiTable[UE_id], CC_id);
/*
if((mac_xface->get_nCCE_max(module_idP,CC_id,3,subframeP) - nCCE_to_be_used[CC_id]) > (1<<aggregation)) {
nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation);
max_num_ue_to_be_scheduled+=1;
} */
N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] =
nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL);
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx],
N_RB_UL - first_rb[CC_id] - first_rb_offset);
if (available_rbs < 0)
available_rbs = 0;
......@@ -1707,6 +1706,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)\n",
module_idP, frameP, subframeP, UE_id, CC_id);
}
if (total_ue_count[CC_id] > 0) {
LOG_D(MAC, "[eNB %d] Frame %d subframe %d: total ue to be scheduled %d\n",
module_idP, frameP, subframeP, total_ue_count[CC_id]);
......@@ -1747,6 +1747,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
if (continueTable[UE_id]) continue;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
......@@ -1775,14 +1776,13 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
module_idP, UE_id, CC_id, UE_template->pre_allocated_nb_rb_ul[slice_idx]);
}
}
return;
}
void
assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
sub_frame_t subframeP, uint16_t * first_rb)
{
sub_frame_t subframeP, uint16_t *first_rb) {
int i;
uint16_t n, UE_id;
uint8_t CC_id;
......@@ -1792,7 +1792,6 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
slice_info_t *sli = &RC.mac[module_idP]->slice_info;
UE_TEMPLATE *UE_template;
UE_sched_ctrl *ue_sched_ctl;
int Ncp;
......@@ -1807,8 +1806,10 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
if (rnti == NOT_A_RNTI)
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!ue_ul_slice_membership(module_idP, i, slice_idx))
continue;
......@@ -1826,32 +1827,27 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
for (n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
AssertFatal(CC_id < RC.nb_mac_CC[module_idP],
"CC_id %u should be < %u, loop n=%u < numactiveULCCs[%u]=%u",
CC_id, NFAPI_CC_MAX, n, UE_id,
UE_list->numactiveULCCs[UE_id]);
UE_template = &UE_list->UE_template[CC_id][UE_id];
UE_template->pre_assigned_mcs_ul = mcs;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
Ncp = RC.mac[module_idP]->common_channels[CC_id].Ncp;
N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] =
nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL);
int bytes_to_schedule = UE_template->estimated_ul_buffer - UE_template->scheduled_ul_bytes;
if (bytes_to_schedule < 0) bytes_to_schedule = 0;
int bits_to_schedule = bytes_to_schedule * 8;
// if this UE has UL traffic
if (bits_to_schedule > 0) {
tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, 3) << 3; // 1 or 2 PRB with cqi enabled does not work well!
rb_table_index = 2;
// fixme: set use_srs flag
tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0);
......@@ -1880,6 +1876,7 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
if (rb_table[rb_table_index] > (available_rbs - 1)) {
rb_table_index--;
}
// 1 or 2 PRB with cqi enabled does not work well
if (rb_table[rb_table_index] < 3) {
rb_table_index = 2; //3PRB
......@@ -1918,19 +1915,15 @@ struct sort_ue_ul_params {
int subframeP;
};
static int ue_ul_compare(const void *_a, const void *_b, void *_params)
{
static int ue_ul_compare(const void *_a, const void *_b, void *_params) {
struct sort_ue_ul_params *params = _params;
UE_list_t *UE_list = &RC.mac[params->module_idP]->UE_list;
int UE_id1 = *(const int *) _a;
int UE_id2 = *(const int *) _b;
int rnti1 = UE_RNTI(params->module_idP, UE_id1);
int pCCid1 = UE_PCCID(params->module_idP, UE_id1);
int round1 = maxround(params->module_idP, rnti1, params->frameP,
params->subframeP, 1);
int rnti2 = UE_RNTI(params->module_idP, UE_id2);
int pCCid2 = UE_PCCID(params->module_idP, UE_id2);
int round2 = maxround(params->module_idP, rnti2, params->frameP,
......@@ -1938,29 +1931,36 @@ static int ue_ul_compare(const void *_a, const void *_b, void *_params)
if (round1 > round2)
return -1;
if (round1 < round2)
return 1;
if (UE_list->UE_template[pCCid1][UE_id1].ul_buffer_info[LCGID0] >
UE_list->UE_template[pCCid2][UE_id2].ul_buffer_info[LCGID0])
return -1;
if (UE_list->UE_template[pCCid1][UE_id1].ul_buffer_info[LCGID0] <
UE_list->UE_template[pCCid2][UE_id2].ul_buffer_info[LCGID0])
return 1;
int bytes_to_schedule1 = UE_list->UE_template[pCCid1][UE_id1].estimated_ul_buffer - UE_list->UE_template[pCCid1][UE_id1].scheduled_ul_bytes;
if (bytes_to_schedule1 < 0) bytes_to_schedule1 = 0;
int bytes_to_schedule2 = UE_list->UE_template[pCCid2][UE_id2].estimated_ul_buffer - UE_list->UE_template[pCCid2][UE_id2].scheduled_ul_bytes;
if (bytes_to_schedule2 < 0) bytes_to_schedule2 = 0;
if (bytes_to_schedule1 > bytes_to_schedule2)
return -1;
if (bytes_to_schedule1 < bytes_to_schedule2)
return 1;
if (UE_list->UE_template[pCCid1][UE_id1].pre_assigned_mcs_ul >
UE_list->UE_template[pCCid2][UE_id2].pre_assigned_mcs_ul)
return -1;
if (UE_list->UE_template[pCCid1][UE_id1].pre_assigned_mcs_ul <
UE_list->UE_template[pCCid2][UE_id2].pre_assigned_mcs_ul)
return 1;
......@@ -1968,21 +1968,21 @@ static int ue_ul_compare(const void *_a, const void *_b, void *_params)
return 0;
}
void sort_ue_ul(module_id_t module_idP, int frameP, sub_frame_t subframeP)
{
void sort_ue_ul(module_id_t module_idP, int frameP, sub_frame_t subframeP) {
int i;
int list[MAX_MOBILES_PER_ENB];
int list_size = 0;
int rnti;
struct sort_ue_ul_params params = { module_idP, frameP, subframeP };
UE_list_t *UE_list = &RC.mac[module_idP]->UE_list;
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
if (UE_list->active[i] == FALSE)
continue;
if ((rnti = UE_RNTI(module_idP, i)) == NOT_A_RNTI)
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
......@@ -1995,6 +1995,7 @@ void sort_ue_ul(module_id_t module_idP, int frameP, sub_frame_t subframeP)
if (list_size) {
for (i = 0; i < list_size - 1; i++)
UE_list->next_ul[list[i]] = list[i + 1];
UE_list->next_ul[list[list_size - 1]] = -1;
UE_list->head_ul = list[0];
} else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment