Commit 8f2d7970 authored by Robert Schmidt's avatar Robert Schmidt

MAC: change all slice_id_t to slice index variables

parent 87b8b6e8
......@@ -423,8 +423,8 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
}
for (i = 0; i < sli->n_dl; i++) {
if (sli->dl[i].pct < 0) {
LOG_W(MAC, "[eNB %d] frame %d subframe %d:invalid slice %d percentage %f. resetting to zero",
module_idP, frameP, subframeP, i, sli->dl[i].pct);
LOG_W(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: invalid percentage %f. resetting to zero",
module_idP, sli->dl[i].id, frameP, subframeP, sli->dl[i].pct);
sli->dl[i].pct = 0;
}
sli->tot_pct_dl += sli->dl[i].pct;
......@@ -465,7 +465,7 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
sli->dl[i].sched_cb = dlsym(NULL, sli->dl[i].sched_name);
sli->dl[i].update_sched = 0;
sli->dl[i].update_sched_current = 0;
LOG_I(MAC, "update dl scheduler slice %d\n", i);
LOG_I(MAC, "update dl scheduler slice index %d ID %d\n", i, sli->dl[i].id);
}
if (sli->tot_pct_dl <= 1.0) { // the new total RB share is within the range
......@@ -487,7 +487,8 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
if (sli->dl[i].pct_current != sli->dl[i].pct) { // new slice percentage
LOG_I(MAC,
"[eNB %d][SLICE %d][DL] frame %d subframe %d: total percentage %f-->%f, slice RB percentage has changed: %f-->%f\n",
module_idP, i, frameP, subframeP, sli->tot_pct_dl_current, sli->tot_pct_dl,
module_idP, sli->dl[i].id, frameP, subframeP,
sli->tot_pct_dl_current, sli->tot_pct_dl,
sli->dl[i].pct_current, sli->dl[i].pct);
sli->tot_pct_dl_current = sli->tot_pct_dl;
sli->dl[i].pct_current = sli->dl[i].pct;
......@@ -497,11 +498,12 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
if (sli->dl[i].maxmcs_current != sli->dl[i].maxmcs) {
if ((sli->dl[i].maxmcs >= 0) && (sli->dl[i].maxmcs < 29)) {
LOG_I(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: slice MAX MCS has changed: %d-->%d\n",
module_idP, i, frameP, subframeP, sli->dl[i].maxmcs_current, sli->dl[i].maxmcs);
module_idP, sli->dl[i].id, frameP, subframeP,
sli->dl[i].maxmcs_current, sli->dl[i].maxmcs);
sli->dl[i].maxmcs_current = sli->dl[i].maxmcs;
} else {
LOG_W(MAC, "[eNB %d][SLICE %d][DL] invalid slice max mcs %d, revert the previous value %d\n",
module_idP, i, sli->dl[i].maxmcs, sli->dl[i].maxmcs_current);
module_idP, sli->dl[i].id, sli->dl[i].maxmcs, sli->dl[i].maxmcs_current);
sli->dl[i].maxmcs = sli->dl[i].maxmcs_current;
}
}
......@@ -509,7 +511,7 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
// check if a new scheduler, and log the console
if (sli->dl[i].update_sched_current != sli->dl[i].update_sched) {
LOG_I(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: DL scheduler for this slice is updated: %s \n",
module_idP, i, frameP, subframeP, sli->dl[i].sched_name);
module_idP, sli->dl[i].id, frameP, subframeP, sli->dl[i].sched_name);
sli->dl[i].update_sched_current = sli->dl[i].update_sched;
}
......@@ -519,7 +521,7 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
if (sli->n_dl == sli->n_dl_current) {
LOG_W(MAC,
"[eNB %d][SLICE %d][DL] invalid total RB share (%f->%f), reduce proportionally the RB share by 0.1\n",
module_idP, i, sli->tot_pct_dl_current, sli->tot_pct_dl);
module_idP, sli->dl[i].id, sli->tot_pct_dl_current, sli->tot_pct_dl);
if (sli->dl[i].pct >= sli->avg_pct_dl) {
sli->dl[i].pct -= 0.1;
sli->tot_pct_dl -= 0.1;
......@@ -527,7 +529,7 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
} else {
LOG_W(MAC,
"[eNB %d][SLICE %d][DL] invalid total RB share (%f->%f), revert the number of slice to its previous value (%d->%d)\n",
module_idP, i, sli->tot_pct_dl_current, sli->tot_pct_dl,
module_idP, sli->dl[i].id, sli->tot_pct_dl_current, sli->tot_pct_dl,
sli->n_dl, sli->n_dl_current);
sli->n_dl = sli->n_dl_current;
sli->dl[i].pct = sli->dl[i].pct_current;
......@@ -539,7 +541,7 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
sli->dl[i].pos_low < 0 ||
sli->dl[i].pos_high > N_RBG_MAX) {
LOG_W(MAC, "[eNB %d][SLICE %d][DL] invalid slicing position (%d-%d), using previous values (%d-%d)\n",
module_idP, i,
module_idP, sli->dl[i].id,
sli->dl[i].pos_low, sli->dl[i].pos_high,
sli->dl[i].pos_low_current, sli->dl[i].pos_high_current);
sli->dl[i].pos_low = sli->dl[i].pos_low_current;
......@@ -547,12 +549,12 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
} else {
if (sli->dl[i].pos_low_current != sli->dl[i].pos_low) {
LOG_N(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: start frequency has changed (%d-->%d)\n",
module_idP, i, frameP, subframeP, sli->dl[i].pos_low_current, sli->dl[i].pos_low);
module_idP, sli->dl[i].id, frameP, subframeP, sli->dl[i].pos_low_current, sli->dl[i].pos_low);
sli->dl[i].pos_low_current = sli->dl[i].pos_low;
}
if (sli->dl[i].pos_high_current != sli->dl[i].pos_high) {
LOG_N(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: end frequency has changed (%d-->%d)\n",
module_idP, i, frameP, subframeP, sli->dl[i].pos_high_current, sli->dl[i].pos_high);
module_idP, sli->dl[i].id, frameP, subframeP, sli->dl[i].pos_high_current, sli->dl[i].pos_high);
sli->dl[i].pos_high_current = sli->dl[i].pos_high;
}
}
......@@ -560,7 +562,7 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
// Check for new sorting policy
if (sli->dl[i].sorting_current != sli->dl[i].sorting) {
LOG_I(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: UE sorting policy has changed (%x-->%x)\n",
module_idP, i, frameP, subframeP, sli->dl[i].sorting_current, sli->dl[i].sorting);
module_idP, sli->dl[i].id, frameP, subframeP, sli->dl[i].sorting_current, sli->dl[i].sorting);
sli->dl[i].sorting_current = sli->dl[i].sorting;
}
......@@ -569,11 +571,11 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
if (sli->dl[i].isol != 1 && sli->dl[i].isol != 0) {
LOG_W(MAC,
"[eNB %d][SLICE %d][DL] frame %d subframe %d: invalid slice isolation setting (%d), revert to its previous value (%d)\n",
module_idP, i, frameP, subframeP, sli->dl[i].isol, sli->dl[i].isol_current);
module_idP, sli->dl[i].id, frameP, subframeP, sli->dl[i].isol, sli->dl[i].isol_current);
sli->dl[i].isol = sli->dl[i].isol_current;
} else {
LOG_I(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: slice isolation setting has changed (%x-->%x)\n",
module_idP, i, frameP, subframeP, sli->dl[i].isol_current, sli->dl[i].isol);
module_idP, sli->dl[i].id, frameP, subframeP, sli->dl[i].isol_current, sli->dl[i].isol);
sli->dl[i].isol_current = sli->dl[i].isol;
}
}
......@@ -581,7 +583,7 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
// Check for new slice priority
if (sli->dl[i].prio_current != sli->dl[i].prio) {
LOG_I(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: slice priority setting has changed (%d-->%d)\n",
module_idP, i, frameP, subframeP, sli->dl[i].prio_current, sli->dl[i].prio);
module_idP, sli->dl[i].id, frameP, subframeP, sli->dl[i].prio_current, sli->dl[i].prio);
sli->dl[i].prio_current = sli->dl[i].prio;
}
......@@ -590,11 +592,13 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
if (sli->dl[i].accounting > 1 || sli->dl[i].accounting < 0) {
LOG_W(MAC,
"[eNB %d][SLICE %d][DL] frame %d subframe %d: invalid accounting policy (%d), revert to its previous value (%d)\n",
module_idP, i, frameP, subframeP, sli->dl[i].accounting, sli->dl[i].accounting_current);
module_idP, sli->dl[i].id, frameP, subframeP,
sli->dl[i].accounting, sli->dl[i].accounting_current);
sli->dl[i].accounting = sli->dl[i].accounting_current;
} else {
LOG_N(MAC, "[eNB %d][SLICE %d][DL] frame %d subframe %d: UE sorting policy has changed (%x-->%x)\n",
module_idP, i, frameP, subframeP, sli->dl[i].accounting_current, sli->dl[i].accounting);
module_idP, sli->dl[i].id, frameP, subframeP,
sli->dl[i].accounting_current, sli->dl[i].accounting);
sli->dl[i].accounting_current = sli->dl[i].accounting;
}
}
......@@ -609,7 +613,7 @@ schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, in
//------------------------------------------------------------------------------
void
schedule_ue_spec(module_id_t module_idP, slice_id_t slice_idP,
schedule_ue_spec(module_id_t module_idP, int slice_idxP,
frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag)
//------------------------------------------------------------------------------
{
......@@ -719,7 +723,7 @@ schedule_ue_spec(module_id_t module_idP, slice_id_t slice_idP,
start_meas(&eNB->schedule_dlsch_preprocessor);
dlsch_scheduler_pre_processor(module_idP,
slice_idP,
slice_idxP,
frameP,
subframeP,
mbsfn_flag);
......@@ -763,7 +767,7 @@ schedule_ue_spec(module_id_t module_idP, slice_id_t slice_idP,
continue_flag = 1;
}
if (!ue_dl_slice_membership(module_idP, UE_id, slice_idP))
if (!ue_dl_slice_membership(module_idP, UE_id, slice_idxP))
continue;
if (continue_flag != 1) {
......@@ -838,7 +842,7 @@ schedule_ue_spec(module_id_t module_idP, slice_id_t slice_idP,
eNB_UE_stats->dlsch_mcs1 = 10; // cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]];
} else { // this operation is also done in the preprocessor
eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1,
RC.mac[module_idP]->slice_info.dl[slice_idP].maxmcs); // cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs);
RC.mac[module_idP]->slice_info.dl[slice_idxP].maxmcs); // cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs);
}
// Store stats
......@@ -1618,7 +1622,8 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id, int frameP, sub
COMMON_channels_t *cc;
int N_RBG[NFAPI_CC_MAX];
int slice_sorted_list[MAX_NUM_SLICES], slice_id;
int slice_sorted_list[MAX_NUM_SLICES];
int slice_idx;
int8_t free_rbgs_map[NFAPI_CC_MAX][N_RBG_MAX];
int has_traffic[NFAPI_CC_MAX][MAX_NUM_SLICES];
uint8_t allocation_mask[NFAPI_CC_MAX][N_RBG_MAX];
......@@ -1672,9 +1677,9 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id, int frameP, sub
min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
for (i = 0; i < sli->n_dl; ++i) {
slice_id = slice_sorted_list[i];
slice_idx = slice_sorted_list[i];
if (has_traffic[CC_id][slice_id] == 0) continue;
if (has_traffic[CC_id][slice_idx] == 0) continue;
// Build an ad-hoc allocation mask fo the slice
for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) {
......@@ -1695,12 +1700,12 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id, int frameP, sub
// Sort UE again
// (UE list gets sorted every time pre_processor is called so it is probably dirty at this point)
// FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it
sort_UEs(Mod_id, (slice_id_t) slice_id, frameP, subframeP);
sort_UEs(Mod_id, slice_idx, frameP, subframeP);
nb_rbs_remaining = sli->pre_processor_results[slice_id].nb_rbs_remaining;
nb_rbs_required = sli->pre_processor_results[slice_id].nb_rbs_required;
rballoc_sub = sli->pre_processor_results[slice_id].slice_allocated_rbgs;
MIMO_mode_indicator = sli->pre_processor_results[slice_id].MIMO_mode_indicator;
nb_rbs_remaining = sli->pre_processor_results[slice_idx].nb_rbs_remaining;
nb_rbs_required = sli->pre_processor_results[slice_idx].nb_rbs_required;
rballoc_sub = sli->pre_processor_results[slice_idx].slice_allocated_rbgs;
MIMO_mode_indicator = sli->pre_processor_results[slice_idx].MIMO_mode_indicator;
// Allocation
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
......@@ -1764,7 +1769,7 @@ void dlsch_scheduler_qos_multiplexing(module_id_t Mod_id, int frameP, sub_frame_
// Sort UE again
// FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it
sort_UEs(Mod_id, (slice_id_t) i, frameP, subframeP);
sort_UEs(Mod_id, (uint8_t)i, frameP, subframeP);
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
......
......@@ -1962,8 +1962,8 @@ int add_new_ue(module_id_t mod_idP, int cc_idP, rnti_t rntiP, int harq_pidP
UE_list->UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 0;
/* default slice in case there was something different */
UE_list->assoc_dl_slice[UE_id] = 0;
UE_list->assoc_ul_slice[UE_id] = 0;
UE_list->assoc_dl_slice_idx[UE_id] = 0;
UE_list->assoc_ul_slice_idx[UE_id] = 0;
UE_list->UE_sched_ctrl[UE_id].ta_update = 31;
......@@ -4542,24 +4542,26 @@ uint16_t nb_rbs_allowed_slice(float rb_percentage, int total_rbs)
return (uint16_t) floor(rb_percentage * total_rbs);
}
int ue_dl_slice_membership(module_id_t mod_id, int UE_id, slice_id_t slice_id)
int ue_dl_slice_membership(module_id_t mod_id, int UE_id, int slice_idx)
{
if ((slice_id < 0)
|| (slice_id >= RC.mac[mod_id]->slice_info.n_dl)) {
LOG_W(MAC, "out of range slice id %d\n", slice_id);
if ((slice_idx < 0)
|| (slice_idx >= RC.mac[mod_id]->slice_info.n_dl)) {
LOG_W(MAC, "out of range slice index %d (slice ID %d)\n",
slice_idx, RC.mac[mod_id]->slice_info.dl[slice_idx].id);
return 0;
}
return RC.mac[mod_id]->UE_list.active[UE_id] == TRUE
&& RC.mac[mod_id]->UE_list.assoc_dl_slice[UE_id] == slice_id;
&& RC.mac[mod_id]->UE_list.assoc_dl_slice_idx[UE_id] == slice_idx;
}
int ue_ul_slice_membership(module_id_t mod_id, int UE_id, slice_id_t slice_id)
int ue_ul_slice_membership(module_id_t mod_id, int UE_id, int slice_idx)
{
if ((slice_id < 0)
|| (slice_id >= RC.mac[mod_id]->slice_info.n_ul)) {
LOG_W(MAC, "out of range slice id %d\n", slice_id);
if ((slice_idx < 0)
|| (slice_idx >= RC.mac[mod_id]->slice_info.n_ul)) {
LOG_W(MAC, "out of range slice index %d (slice ID %d)\n",
slice_idx, RC.mac[mod_id]->slice_info.dl[slice_idx].id);
return 0;
}
return RC.mac[mod_id]->UE_list.active[UE_id] == TRUE
&& RC.mac[mod_id]->UE_list.assoc_ul_slice[UE_id] == slice_id;
&& RC.mac[mod_id]->UE_list.assoc_ul_slice_idx[UE_id] == slice_idx;
}
......@@ -1049,8 +1049,9 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
}
for (i = 0; i < sli->n_ul; i++) {
if (sli->ul[i].pct < 0 ){
LOG_W(MAC, "[eNB %d] frame %d subframe %d:invalid slice %d percentage %f. resetting to zero",
module_idP, frameP, subframeP, i, sli->ul[i].pct);
LOG_W(MAC,
"[eNB %d][SLICE %d][UL] frame %d subframe %d: invalid percentage %f. resetting to zero",
module_idP, sli->ul[i].id, frameP, subframeP, sli->ul[i].pct);
sli->ul[i].pct = 0;
}
sli->tot_pct_ul += sli->ul[i].pct;
......@@ -1088,7 +1089,7 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
// check if the slice rb share has changed, and log the console
if (sli->ul[i].pct_current != sli->ul[i].pct){
LOG_I(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: total percentage %f-->%f, slice RB percentage has changed: %f-->%f\n",
module_idP, i, frameP, subframeP, sli->tot_pct_ul_current,
module_idP, sli->ul[i].id, frameP, subframeP, sli->tot_pct_ul_current,
sli->tot_pct_ul, sli->ul[i].pct_current, sli->ul[i].pct);
sli->tot_pct_ul_current = sli->tot_pct_ul;
sli->ul[i].pct_current = sli->ul[i].pct;
......@@ -1098,11 +1099,12 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
if (sli->ul[i].maxmcs_current != sli->ul[i].maxmcs){
if ((sli->ul[i].maxmcs >= 0) && (sli->ul[i].maxmcs <= 16)){
LOG_I(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: slice MAX MCS has changed: %d-->%d\n",
module_idP, i, frameP, subframeP, sli->ul[i].maxmcs_current, sli->ul[i].maxmcs);
module_idP, sli->ul[i].id, frameP, subframeP,
sli->ul[i].maxmcs_current, sli->ul[i].maxmcs);
sli->ul[i].maxmcs_current = sli->ul[i].maxmcs;
} else {
LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid slice max mcs %d, revert the previous value %d\n",
module_idP, i, sli->ul[i].maxmcs, sli->ul[i].maxmcs_current);
module_idP, sli->ul[i].id, sli->ul[i].maxmcs, sli->ul[i].maxmcs_current);
sli->ul[i].maxmcs = sli->ul[i].maxmcs_current;
}
}
......@@ -1110,11 +1112,13 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
if (sli->ul[i].first_rb_current != sli->ul[i].first_rb){
if (sli->ul[i].first_rb >= 0){ // FIXME: Max limit is checked in the scheduler
LOG_N(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: slice first rb has changed: %d-->%d\n",
module_idP, i, frameP, subframeP, sli->ul[i].first_rb_current, sli->ul[i].first_rb);
module_idP, sli->ul[i].id, frameP, subframeP,
sli->ul[i].first_rb_current, sli->ul[i].first_rb);
sli->ul[i].first_rb_current = sli->ul[i].first_rb;
} else {
LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid slice first rb %d, revert the previous value %d\n",
module_idP, i, sli->ul[i].first_rb, sli->ul[i].first_rb_current);
module_idP, sli->ul[i].id, sli->ul[i].first_rb,
sli->ul[i].first_rb_current);
sli->ul[i].first_rb = sli->ul[i].first_rb_current;
}
}
......@@ -1122,13 +1126,13 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
// check if a new scheduler, and log the console
if (sli->ul[i].update_sched_current != sli->ul[i].update_sched) {
LOG_I(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: UL scheduler for this slice is updated: %s \n",
module_idP, i, frameP, subframeP, sli->ul[i].sched_name);
module_idP, sli->ul[i].id, frameP, subframeP, sli->ul[i].sched_name);
sli->ul[i].update_sched_current = sli->ul[i].update_sched;
}
} else {
if (sli->n_ul == sli->n_ul_current) {
LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid total RB share (%f->%f), reduce proportionally the RB share by 0.1\n",
module_idP, i, sli->tot_pct_ul_current, sli->tot_pct_ul);
module_idP, sli->ul[i].id, sli->tot_pct_ul_current, sli->tot_pct_ul);
if (sli->ul[i].pct > sli->avg_pct_ul) {
sli->ul[i].pct -= 0.1;
sli->tot_pct_ul -= 0.1;
......@@ -1136,9 +1140,8 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
} else {
// here we can correct the values, e.g. reduce proportionally
LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid total RB share (%f->%f), revert the number of slice to its previous value (%d->%d)\n",
module_idP, i, sli->tot_pct_ul_current,
sli->tot_pct_ul, sli->n_ul,
sli->n_ul_current);
module_idP, sli->ul[i].id, sli->tot_pct_ul_current,
sli->tot_pct_ul, sli->n_ul, sli->n_ul_current);
sli->n_ul = sli->n_ul_current;
sli->ul[i].pct = sli->ul[i].pct_current;
}
......@@ -1153,7 +1156,7 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
void
schedule_ulsch_rnti(module_id_t module_idP,
slice_id_t slice_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP,
unsigned char sched_subframeP, uint16_t * first_rb)
......@@ -1197,14 +1200,14 @@ schedule_ulsch_rnti(module_id_t module_idP,
for (CC_id = 0; CC_id < NFAPI_CC_MAX; ++CC_id) {
N_RB_UL = to_prb(cc[CC_id].ul_Bandwidth);
UE_list->first_rb_offset[CC_id][slice_id] = cmin(N_RB_UL, sli->ul[slice_id].first_rb);
UE_list->first_rb_offset[CC_id][slice_idx] = cmin(N_RB_UL, sli->ul[slice_idx].first_rb);
}
//LOG_D(MAC, "entering ulsch preprocesor\n");
ulsch_scheduler_pre_processor(module_idP, slice_id, frameP, subframeP, sched_subframeP, first_rb);
ulsch_scheduler_pre_processor(module_idP, slice_idx, frameP, subframeP, sched_subframeP, first_rb);
for (CC_id = 0; CC_id < NFAPI_CC_MAX; ++CC_id) {
first_rb_slice[CC_id] = first_rb[CC_id] + UE_list->first_rb_offset[CC_id][slice_id];
first_rb_slice[CC_id] = first_rb[CC_id] + UE_list->first_rb_offset[CC_id][slice_idx];
}
//LOG_D(MAC, "exiting ulsch preprocesor\n");
......@@ -1214,7 +1217,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
for (UE_id = UE_list->head_ul; UE_id >= 0;
UE_id = UE_list->next_ul[UE_id]) {
if (!ue_ul_slice_membership(module_idP, UE_id, slice_id))
if (!ue_ul_slice_membership(module_idP, UE_id, slice_idx))
continue;
// don't schedule if Msg4 is not received yet
......@@ -1402,7 +1405,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
UE_template->oldNDI_UL[harq_pid] = ndi;
UE_list->eNB_UE_stats[CC_id][UE_id].normalized_rx_power = normalized_rx_power;
UE_list->eNB_UE_stats[CC_id][UE_id].target_rx_power = target_rx_power;
UE_template->mcs_UL[harq_pid] = cmin(UE_template->pre_assigned_mcs_ul, sli->ul[slice_id].maxmcs);
UE_template->mcs_UL[harq_pid] = cmin(UE_template->pre_assigned_mcs_ul, sli->ul[slice_idx].maxmcs);
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1= UE_template->mcs_UL[harq_pid];
//cmin (UE_template->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS
if (UE_template->pre_allocated_rb_table_index_ul >= 0) {
......
......@@ -1104,8 +1104,8 @@ typedef struct {
uint16_t sorting_criteria[MAX_NUM_SLICES][CR_NUM];
uint16_t first_rb_offset[NFAPI_CC_MAX][MAX_NUM_SLICES];
slice_id_t assoc_dl_slice[MAX_MOBILES_PER_ENB];
slice_id_t assoc_ul_slice[MAX_MOBILES_PER_ENB];
int assoc_dl_slice_idx[MAX_MOBILES_PER_ENB];
int assoc_ul_slice_idx[MAX_MOBILES_PER_ENB];
} UE_list_t;
......@@ -1144,12 +1144,14 @@ typedef struct {
* slice specific scheduler for the DL
*/
typedef void (*slice_scheduler_dl)(module_id_t mod_id,
slice_id_t slice_id,
int slice_idx,
frame_t frame,
sub_frame_t subframe,
int *mbsfn_flag);
typedef struct {
slice_id_t id;
/// RB share for each slice for past and current time
float pct;
float pct_current;
......@@ -1192,13 +1194,15 @@ typedef struct {
} slice_sched_conf_dl_t;
typedef void (*slice_scheduler_ul)(module_id_t mod_id,
slice_id_t slice_id,
int slice_idx,
frame_t frame,
sub_frame_t subframe,
unsigned char sched_subframe,
uint16_t *first_rb);
typedef struct {
slice_id_t id;
/// RB share for each slice for past and current time
float pct;
float pct_current;
......
......@@ -103,12 +103,12 @@ void schedule_ulsch(module_id_t module_idP, frame_t frameP,
/** \brief ULSCH Scheduling per RNTI
@param Mod_id Instance ID of eNB
@param slice_id Instance slice for this eNB
@param slice_idx Slice instance index for this eNB
@param frame Frame index
@param subframe Subframe number on which to act
@param sched_subframe Subframe number where PUSCH is transmitted (for DAI lookup)
*/
void schedule_ulsch_rnti(module_id_t module_idP, slice_id_t slice_idP, frame_t frameP,
void schedule_ulsch_rnti(module_id_t module_idP, int slice_idx, frame_t frameP,
sub_frame_t subframe,
unsigned char sched_subframe,
uint16_t * first_rb);
......@@ -131,7 +131,7 @@ void fill_DLSCH_dci(module_id_t module_idP,frame_t frameP,sub_frame_t subframe,i
void schedule_dlsch(module_id_t module_idP, frame_t frameP,
sub_frame_t subframe, int *mbsfn_flag);
void schedule_ue_spec(module_id_t module_idP, slice_id_t slice_idP,
void schedule_ue_spec(module_id_t module_idP, int slice_idxP,
frame_t frameP,sub_frame_t subframe, int *mbsfn_flag);
void schedule_ue_spec_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t subframe,int *mbsfn_flag);
......@@ -192,6 +192,7 @@ void clear_nfapi_information(eNB_MAC_INST * eNB, int CC_idP,
// eNB functions
/* \brief This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
@param Mod_id Instance ID of eNB
@param slice_idxP Slice instance index for the slice in which scheduling happens
@param frame Index of frame
@param subframe Index of current subframe
@param N_RBS Number of resource block groups
......@@ -199,13 +200,13 @@ void clear_nfapi_information(eNB_MAC_INST * eNB, int CC_idP,
void dlsch_scheduler_pre_processor(module_id_t module_idP,
slice_id_t slice_idP,
int slice_idxP,
frame_t frameP,
sub_frame_t subframe,
int *mbsfn_flag);
void dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
slice_id_t slice_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP,
int min_rb_unit[NFAPI_CC_MAX],
......@@ -215,11 +216,11 @@ void dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
int *mbsfn_flag);
void dlsch_scheduler_pre_processor_partitioning(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
const uint8_t rbs_retx[NFAPI_CC_MAX]);
void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP,
int min_rb_unit[NFAPI_CC_MAX],
......@@ -227,7 +228,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]);
void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
......@@ -236,7 +237,7 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]);
void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
......@@ -713,20 +714,20 @@ void set_ul_DAI(int module_idP,
int frameP,
int subframeP);
void ulsch_scheduler_pre_processor(module_id_t module_idP, slice_id_t slice_id, int frameP,
void ulsch_scheduler_pre_processor(module_id_t module_idP, int slice_idx, int frameP,
sub_frame_t subframeP,
unsigned char sched_subframeP,
uint16_t * first_rb);
void store_ulsch_buffer(module_id_t module_idP, int frameP,
sub_frame_t subframeP);
void sort_ue_ul(module_id_t module_idP, int frameP, sub_frame_t subframeP);
void assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
void assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
sub_frame_t subframeP, uint16_t * first_rb);
void adjust_bsr_info(int buffer_occupancy, uint16_t TBS,
UE_TEMPLATE * UE_template);
int phy_stats_exist(module_id_t Mod_id, int rnti);
void sort_UEs(module_id_t Mod_idP, slice_id_t slice_id, int frameP, sub_frame_t subframeP);
void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subframeP);
/*! \fn UE_L2_state_t ue_scheduler(const module_id_t module_idP,const frame_t frameP, const sub_frame_t subframe, const lte_subframe_t direction,const uint8_t eNB_index)
\brief UE scheduler where all the ue background tasks are done. This function performs the following: 1) Trigger PDCP every 5ms 2) Call RRC for link status return to PHY3) Perform SR/BSR procedures for scheduling feedback 4) Perform PHR procedures.
......@@ -1262,8 +1263,8 @@ void pre_scd_nb_rbs_required( module_id_t module_idP,
/*Slice related functions */
uint16_t nb_rbs_allowed_slice(float rb_percentage, int total_rbs);
int ue_dl_slice_membership(module_id_t mod_id, int UE_id, slice_id_t slice_id);
int ue_ul_slice_membership(module_id_t mod_id, int UE_id, slice_id_t slice_id);
int ue_dl_slice_membership(module_id_t mod_id, int UE_id, int slice_idx);
int ue_ul_slice_membership(module_id_t mod_id, int UE_id, int slice_idx);
/* from here: prototypes to get rid of compilation warnings: doc to be written by function author */
uint8_t ul_subframe2_k_phich(COMMON_channels_t * cc, sub_frame_t ul_subframe);
......
......@@ -88,7 +88,7 @@ int phy_stats_exist(module_id_t Mod_id, int rnti)
// This function stores the downlink buffer for all the logical channels
void
store_dlsch_buffer(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP) {
......@@ -102,7 +102,7 @@ store_dlsch_buffer(module_id_t Mod_id,
if (UE_list->active[UE_id] != TRUE)
continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id))
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx))
continue;
UE_template = &UE_list->UE_template[UE_PCCID(Mod_id, UE_id)][UE_id];
......@@ -146,8 +146,8 @@ store_dlsch_buffer(module_id_t Mod_id,
if (UE_template->dl_buffer_info[lcid] > 0)
LOG_D(MAC,
"[eNB %d][SLICE %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
Mod_id, slice_id, frameP, subframeP, UE_id,
lcid, UE_template->dl_pdus_in_buffer[lcid],
Mod_id, RC.mac[Mod_id]->slice_info.dl[slice_idx].id, frameP,
subframeP, UE_id, lcid, UE_template->dl_pdus_in_buffer[lcid],
UE_template->dl_buffer_info[lcid],
UE_template->dl_buffer_head_sdu_creation_time[lcid],
UE_template->dl_buffer_head_sdu_remaining_size_to_send[lcid],
......@@ -174,7 +174,7 @@ int cqi2mcs(int cqi) {
// This function returns the estimated number of RBs required by each UE for downlink scheduling
void
assign_rbs_required(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframe,
int min_rb_unit[NFAPI_CC_MAX],
......@@ -192,7 +192,7 @@ assign_rbs_required(module_id_t Mod_id,
// clear rb allocations across all CC_id
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (UE_list->active[UE_id] != TRUE) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
pCCid = UE_PCCID(Mod_id, UE_id);
//update CQI information across component carriers
......@@ -200,8 +200,8 @@ assign_rbs_required(module_id_t Mod_id,
CC_id = UE_list->ordered_CCids[n][UE_id];
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
// eNB_UE_stats->dlsch_mcs1 = cmin(cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]], sli->dl[slice_id].maxmcs);
eNB_UE_stats->dlsch_mcs1 = cmin(cqi2mcs(UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]), sli->dl[slice_id].maxmcs);
// eNB_UE_stats->dlsch_mcs1 = cmin(cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]], sli->dl[slice_idx].maxmcs);
eNB_UE_stats->dlsch_mcs1 = cmin(cqi2mcs(UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]), sli->dl[slice_idx].maxmcs);
}
......@@ -243,16 +243,16 @@ assign_rbs_required(module_id_t Mod_id,
N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_id] =
nb_rbs_allowed_slice(sli->dl[slice_id].pct, N_RB_DL);
UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx] =
nb_rbs_allowed_slice(sli->dl[slice_idx].pct, N_RB_DL);
/* calculating required number of RBs for each UE */
while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total) {
nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];
if (nb_rbs_required[CC_id][UE_id] > UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_id]) {
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_id]);
nb_rbs_required[CC_id][UE_id] = UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_id];
if (nb_rbs_required[CC_id][UE_id] > UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]) {
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]);
nb_rbs_required[CC_id][UE_id] = UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx];
break;
}
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rbs_required[CC_id][UE_id]);
......@@ -264,7 +264,7 @@ assign_rbs_required(module_id_t Mod_id,
nb_rbs_required[CC_id][UE_id], TBS,
eNB_UE_stats->dlsch_mcs1);
sli->pre_processor_results[slice_id].mcs[CC_id][UE_id] = eNB_UE_stats->dlsch_mcs1;
sli->pre_processor_results[slice_idx].mcs[CC_id][UE_id] = eNB_UE_stats->dlsch_mcs1;
}
}
}
......@@ -332,7 +332,7 @@ struct sort_ue_dl_params {
int Mod_idP;
int frameP;
int subframeP;
int slice_id;
int slice_idx;
};
static int ue_dl_compare(const void *_a, const void *_b, void *_params)
......@@ -341,7 +341,7 @@ static int ue_dl_compare(const void *_a, const void *_b, void *_params)
UE_list_t *UE_list = &RC.mac[params->Mod_idP]->UE_list;
int i;
int slice_id = params->slice_id;
int slice_idx = params->slice_idx;
int UE_id1 = *(const int *) _a;
int UE_id2 = *(const int *) _b;
......@@ -360,7 +360,7 @@ static int ue_dl_compare(const void *_a, const void *_b, void *_params)
long lcgid2 = min_lcgidpriority(params->Mod_idP, UE_id2);
for (i = 0; i < CR_NUM; ++i) {
switch (UE_list->sorting_criteria[slice_id][i]) {
switch (UE_list->sorting_criteria[slice_idx][i]) {
case CR_ROUND :
if (round1 > round2)
......@@ -421,28 +421,31 @@ static int ue_dl_compare(const void *_a, const void *_b, void *_params)
return 0;
}
void decode_sorting_policy(module_id_t Mod_idP, slice_id_t slice_id) {
void decode_sorting_policy(module_id_t Mod_idP, int slice_idx) {
int i;
UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
uint32_t policy = RC.mac[Mod_idP]->slice_info.dl[slice_id].sorting;
uint32_t policy = RC.mac[Mod_idP]->slice_info.dl[slice_idx].sorting;
uint32_t mask = 0x0000000F;
uint16_t criterion;
for (i = 0; i < CR_NUM; ++i) {
criterion = (uint16_t) (policy >> 4 * (CR_NUM - 1 - i) & mask);
if (criterion >= CR_NUM) {
LOG_W(MAC, "Invalid criterion in slice %d policy, revert to default policy \n", slice_id);
RC.mac[Mod_idP]->slice_info.dl[slice_id].sorting = 0x12345;
LOG_W(MAC,
"Invalid criterion in slice index %d ID %d policy, revert to default policy \n",
slice_idx, RC.mac[Mod_idP]->slice_info.dl[slice_idx].id);
RC.mac[Mod_idP]->slice_info.dl[slice_idx].sorting = 0x12345;
break;
}
UE_list->sorting_criteria[slice_id][i] = criterion;
UE_list->sorting_criteria[slice_idx][i] = criterion;
}
}
void decode_slice_positioning(module_id_t Mod_idP,
slice_id_t slice_id,
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX]) {
int slice_idx,
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX])
{
uint8_t CC_id;
int RBG, start_frequency, end_frequency;
......@@ -453,8 +456,8 @@ void decode_slice_positioning(module_id_t Mod_idP,
}
}
start_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_id].pos_low;
end_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_id].pos_high;
start_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_idx].pos_low;
end_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_idx].pos_high;
for (CC_id = 0; CC_id < NFAPI_CC_MAX; ++CC_id) {
for (RBG = start_frequency; RBG <= end_frequency; ++RBG) {
slice_allocation_mask[CC_id][RBG] = 1;
......@@ -464,12 +467,12 @@ void decode_slice_positioning(module_id_t Mod_idP,
// This fuction sorts the UE in order their dlsch buffer and CQI
void sort_UEs(module_id_t Mod_idP, slice_id_t slice_id, int frameP, sub_frame_t subframeP)
void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subframeP)
{
int i;
int list[MAX_MOBILES_PER_ENB];
int list_size = 0;
struct sort_ue_dl_params params = {Mod_idP, frameP, subframeP, slice_id};
struct sort_ue_dl_params params = {Mod_idP, frameP, subframeP, slice_idx};
UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
......@@ -478,13 +481,13 @@ void sort_UEs(module_id_t Mod_idP, slice_id_t slice_id, int frameP, sub_frame_t
if (UE_list->active[i] == FALSE) continue;
if (UE_RNTI(Mod_idP, i) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_idP, i, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_idP, i, slice_idx)) continue;
list[list_size] = i;
list_size++;
}
decode_sorting_policy(Mod_idP, slice_id);
decode_sorting_policy(Mod_idP, slice_idx);
qsort_r(list, list_size, sizeof(int), ue_dl_compare, &params);
......@@ -571,8 +574,9 @@ void sort_UEs(module_id_t Mod_idP, slice_id_t slice_id, int frameP, sub_frame_t
}
void dlsch_scheduler_pre_processor_partitioning(module_id_t Mod_id,
slice_id_t slice_id,
const uint8_t rbs_retx[NFAPI_CC_MAX]) {
int slice_idx,
const uint8_t rbs_retx[NFAPI_CC_MAX])
{
int UE_id, CC_id, N_RB_DL, i;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_sched_ctrl *ue_sched_ctl;
......@@ -582,24 +586,24 @@ void dlsch_scheduler_pre_processor_partitioning(module_id_t Mod_id,
if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) {
CC_id = UE_list->ordered_CCids[i][UE_id];
N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
available_rbs = nb_rbs_allowed_slice(RC.mac[Mod_id]->slice_info.dl[slice_id].pct, N_RB_DL);
available_rbs = nb_rbs_allowed_slice(RC.mac[Mod_id]->slice_info.dl[slice_idx].pct, N_RB_DL);
if (rbs_retx[CC_id] < available_rbs)
ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id] = available_rbs - rbs_retx[CC_id];
ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx] = available_rbs - rbs_retx[CC_id];
else
ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id] = 0;
ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx] = 0;
}
}
}
void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP,
int min_rb_unit[NFAPI_CC_MAX],
......@@ -640,7 +644,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) {
CC_id = UE_list->ordered_CCids[i][UE_id];
......@@ -670,9 +674,9 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
// PARTITIONING
// Reduces the available RBs according to slicing configuration
dlsch_scheduler_pre_processor_partitioning(Mod_id, slice_id, rbs_retx);
dlsch_scheduler_pre_processor_partitioning(Mod_id, slice_idx, rbs_retx);
switch (RC.mac[Mod_id]->slice_info.dl[slice_id].accounting) {
switch (RC.mac[Mod_id]->slice_info.dl[slice_idx].accounting) {
// If greedy scheduling, try to account all the required RBs
case POL_GREEDY:
......@@ -680,7 +684,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
......@@ -700,13 +704,13 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); ++i) {
CC_id = UE_list->ordered_CCids[i][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
available_rbs = ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_id];
available_rbs = ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx];
if (ue_count_newtx[CC_id] == 0) {
average_rbs_per_user[CC_id] = 0;
......@@ -725,7 +729,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
......@@ -744,7 +748,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
rnti = UE_RNTI(Mod_id, UE_id);
if (rnti == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
......@@ -763,7 +767,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
}
void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
......@@ -780,7 +784,7 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
decode_slice_positioning(Mod_id, slice_id, slice_allocation_mask);
decode_slice_positioning(Mod_id, slice_idx, slice_allocation_mask);
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
......@@ -792,7 +796,7 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
......@@ -990,23 +994,23 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
}
void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) {
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX])
{
int UE_id, CC_id;
int i;
uint8_t transmission_mode;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
int N_RBG[NFAPI_CC_MAX];
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
uint8_t (*slice_allocation_mask)[N_RBG_MAX] = sli->pre_processor_results[slice_id].slice_allocation_mask;
uint8_t (*slice_allocation_mask)[N_RBG_MAX] = sli->pre_processor_results[slice_idx].slice_allocation_mask;
decode_slice_positioning(Mod_id, slice_id, slice_allocation_mask);
decode_slice_positioning(Mod_id, slice_idx, slice_allocation_mask);
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
......@@ -1018,7 +1022,7 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_id)) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
......@@ -1221,7 +1225,7 @@ void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
void
dlsch_scheduler_pre_processor(module_id_t Mod_id,
slice_id_t slice_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP,
int *mbsfn_flag)
......@@ -1233,11 +1237,11 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
int min_rb_unit[NFAPI_CC_MAX];
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_id].nb_rbs_required;
uint16_t (*nb_rbs_accounted)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_id].nb_rbs_accounted;
uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_id].nb_rbs_remaining;
uint8_t (*rballoc_sub)[N_RBG_MAX] = sli->pre_processor_results[slice_id].slice_allocated_rbgs;
uint8_t (*MIMO_mode_indicator)[N_RBG_MAX] = sli->pre_processor_results[slice_id].MIMO_mode_indicator;
uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_required;
uint16_t (*nb_rbs_accounted)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_accounted;
uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_remaining;
uint8_t (*rballoc_sub)[N_RBG_MAX] = sli->pre_processor_results[slice_idx].slice_allocated_rbgs;
uint8_t (*MIMO_mode_indicator)[N_RBG_MAX] = sli->pre_processor_results[slice_idx].MIMO_mode_indicator;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_sched_ctrl *ue_sched_ctl;
......@@ -1255,9 +1259,9 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
#endif
// Initialize scheduling information for all active UEs
memset(&sli->pre_processor_results[slice_id], 0, sizeof(sli->pre_processor_results));
memset(&sli->pre_processor_results[slice_idx], 0, sizeof(sli->pre_processor_results));
// FIXME: After the memset above, some of the resets in reset() are redundant
dlsch_scheduler_pre_processor_reset(Mod_id, slice_id, frameP, subframeP,
dlsch_scheduler_pre_processor_reset(Mod_id, slice_idx, frameP, subframeP,
min_rb_unit,
nb_rbs_required,
rballoc_sub,
......@@ -1266,25 +1270,25 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
// STATUS
// Store the DLSCH buffer for each logical channel
store_dlsch_buffer(Mod_id, slice_id, frameP, subframeP);
store_dlsch_buffer(Mod_id, slice_idx, frameP, subframeP);
// Calculate the number of RBs required by each UE on the basis of logical channel's buffer
assign_rbs_required(Mod_id, slice_id, frameP, subframeP,
assign_rbs_required(Mod_id, slice_idx, frameP, subframeP,
min_rb_unit,
nb_rbs_required);
// Sorts the user on the basis of dlsch logical channel buffer and CQI
sort_UEs(Mod_id, slice_id, frameP, subframeP);
sort_UEs(Mod_id, slice_idx, frameP, subframeP);
// ACCOUNTING
// This procedure decides the number of RBs to allocate
dlsch_scheduler_pre_processor_accounting(Mod_id, slice_id, frameP, subframeP,
dlsch_scheduler_pre_processor_accounting(Mod_id, slice_idx, frameP, subframeP,
min_rb_unit,
nb_rbs_required,
nb_rbs_accounted);
// POSITIONING
// This procedure does the main allocation of the RBs
dlsch_scheduler_pre_processor_positioning(Mod_id, slice_id,
dlsch_scheduler_pre_processor_positioning(Mod_id, slice_idx,
min_rb_unit,
nb_rbs_required,
nb_rbs_accounted,
......@@ -1295,7 +1299,7 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
// SHARING
// If there are available RBs left in the slice, allocate them to the highest priority UEs
if (RC.mac[Mod_id]->slice_info.intraslice_share_active) {
dlsch_scheduler_pre_processor_intraslice_sharing(Mod_id, slice_id,
dlsch_scheduler_pre_processor_intraslice_sharing(Mod_id, slice_idx,
min_rb_unit,
nb_rbs_required,
nb_rbs_accounted,
......@@ -1369,7 +1373,8 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
LOG_D(MAC, "[eNB %d][SLICE %d]Total RBs allocated for UE%d = %d\n",
Mod_id, slice_id, UE_id, ue_sched_ctl->pre_nb_available_rbs[CC_id]);
Mod_id, RC.mac[Mod_id]->slice_info.dl[slice_idx].id, UE_id,
ue_sched_ctl->pre_nb_available_rbs[CC_id]);
}
}
}
......@@ -1379,7 +1384,7 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
void
dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
slice_id_t slice_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP,
int min_rb_unit[NFAPI_CC_MAX],
......@@ -1432,7 +1437,7 @@ dlsch_scheduler_pre_processor_reset(module_id_t module_idP,
if (UE_list->active[UE_id] != TRUE)
continue;
if (!ue_dl_slice_membership(module_idP, UE_id, slice_id))
if (!ue_dl_slice_membership(module_idP, UE_id, slice_idx))
continue;
vrb_map = RC.mac[module_idP]->common_channels[CC_id].vrb_map;
......@@ -1660,7 +1665,7 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
/// ULSCH PRE_PROCESSOR
void ulsch_scheduler_pre_processor(module_id_t module_idP,
slice_id_t slice_id,
int slice_idx,
int frameP,
sub_frame_t subframeP,
unsigned char sched_subframeP,
......@@ -1684,7 +1689,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
LOG_D(MAC, "In ulsch_preprocessor: assign max mcs min rb\n");
// maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
assign_max_mcs_min_rb(module_idP, slice_id, frameP, subframeP, first_rb);
assign_max_mcs_min_rb(module_idP, slice_idx, frameP, subframeP, first_rb);
LOG_D(MAC, "In ulsch_preprocessor: sort ue \n");
// sort ues
......@@ -1706,9 +1711,9 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][i];
UE_template = &UE_list->UE_template[CC_id][i];
if (!ue_ul_slice_membership(module_idP, i, slice_id))
if (!ue_ul_slice_membership(module_idP, i, slice_idx))
continue;
if (UE_template->pre_allocated_nb_rb_ul[slice_id] > 0) {
if (UE_template->pre_allocated_nb_rb_ul[slice_idx] > 0) {
total_ue_count[CC_id] += 1;
}
}
......@@ -1727,7 +1732,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!ue_ul_slice_membership(module_idP, UE_id, slice_id))
if (!ue_ul_slice_membership(module_idP, UE_id, slice_idx))
continue;
LOG_D(MAC, "In ulsch_preprocessor: handling UE %d/%x\n", UE_id,
......@@ -1751,11 +1756,11 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] =
nb_rbs_allowed_slice(sli->ul[slice_id].pct, N_RB_UL);
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] =
nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL);
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_id];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id],
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx],
N_RB_UL - first_rb[CC_id] - first_rb_offset);
if (total_ue_count[CC_id] == 0) {
......@@ -1784,7 +1789,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!ue_ul_slice_membership(module_idP, i, slice_id))
if (!ue_ul_slice_membership(module_idP, i, slice_idx))
continue;
......@@ -1803,7 +1808,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
nb_allocated_rbs[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid];
} else {
nb_allocated_rbs[CC_id][UE_id] =
cmin(UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[slice_id],
cmin(UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[slice_idx],
average_rbs_per_user[CC_id]);
}
......@@ -1825,7 +1830,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!ue_ul_slice_membership(module_idP, i, slice_id))
if (!ue_ul_slice_membership(module_idP, i, slice_idx))
continue;
UE_id = i;
......@@ -1836,8 +1841,8 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_id];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id],
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx],
N_RB_UL - first_rb[CC_id] - first_rb_offset);
total_remaining_rbs[CC_id] = available_rbs - total_allocated_rbs[CC_id];
......@@ -1846,17 +1851,17 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
if (r == 0) {
while ((UE_template->pre_allocated_nb_rb_ul[slice_id] > 0)
&& (nb_allocated_rbs[CC_id][UE_id] < UE_template->pre_allocated_nb_rb_ul[slice_id])
while ((UE_template->pre_allocated_nb_rb_ul[slice_idx] > 0)
&& (nb_allocated_rbs[CC_id][UE_id] < UE_template->pre_allocated_nb_rb_ul[slice_idx])
&& (total_remaining_rbs[CC_id] > 0)) {
nb_allocated_rbs[CC_id][UE_id] =
cmin(nb_allocated_rbs[CC_id][UE_id] + 1,
UE_template->pre_allocated_nb_rb_ul[slice_id]);
UE_template->pre_allocated_nb_rb_ul[slice_idx]);
total_remaining_rbs[CC_id]--;
total_allocated_rbs[CC_id]++;
}
} else {
UE_template->pre_allocated_nb_rb_ul[slice_id] =
UE_template->pre_allocated_nb_rb_ul[slice_idx] =
nb_allocated_rbs[CC_id][UE_id];
LOG_D(MAC,
"******************UL Scheduling Information for UE%d CC_id %d ************************\n",
......@@ -1864,7 +1869,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
LOG_D(MAC,
"[eNB %d] total RB allocated for UE%d CC_id %d = %d\n",
module_idP, UE_id, CC_id,
UE_template->pre_allocated_nb_rb_ul[slice_id]);
UE_template->pre_allocated_nb_rb_ul[slice_idx]);
}
}
}
......@@ -1872,7 +1877,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
void
assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
sub_frame_t subframeP, uint16_t * first_rb)
{
......@@ -1902,16 +1907,16 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!ue_ul_slice_membership(module_idP, i, slice_id))
if (!ue_ul_slice_membership(module_idP, i, slice_idx))
continue;
if (UE_list->UE_sched_ctrl[i].phr_received == 1) {
/* if we've received the power headroom information the UE, we can go to
* maximum mcs */
mcs = cmin(20, sli->ul[slice_id].maxmcs);
mcs = cmin(20, sli->ul[slice_idx].maxmcs);
} else {
/* otherwise, limit to QPSK PUSCH */
mcs = cmin(10, sli->ul[slice_id].maxmcs);
mcs = cmin(10, sli->ul[slice_idx].maxmcs);
}
UE_id = i;
......@@ -1933,8 +1938,8 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
Ncp = RC.mac[module_idP]->common_channels[CC_id].Ncp;
N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] =
nb_rbs_allowed_slice(sli->ul[slice_id].pct, N_RB_UL);
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] =
nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL);
int bytes_to_schedule = UE_template->estimated_ul_buffer - UE_template->scheduled_ul_bytes;
if (bytes_to_schedule < 0) bytes_to_schedule = 0;
......@@ -1957,8 +1962,8 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); // fixme: set use_srs
}
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_id];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id],
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_idx];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx],
N_RB_UL - first_rb[CC_id] - first_rb_offset);
while ((tbs < bits_to_schedule)
......@@ -1979,13 +1984,13 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
}
UE_template->pre_allocated_rb_table_index_ul = rb_table_index;
UE_template->pre_allocated_nb_rb_ul[slice_id] = rb_table[rb_table_index];
UE_template->pre_allocated_nb_rb_ul[slice_idx] = rb_table[rb_table_index];
LOG_D(MAC,
"[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)\n",
module_idP, frameP, subframeP, UE_id, CC_id,
UE_template->pre_assigned_mcs_ul,
UE_template->pre_allocated_rb_table_index_ul,
UE_template->pre_allocated_nb_rb_ul[slice_id],
UE_template->pre_allocated_nb_rb_ul[slice_idx],
UE_template->phr_info, tx_power);
} else {
/* if UE has pending scheduling request then pre-allocate 3 RBs */
......@@ -1994,11 +1999,11 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
/* use QPSK mcs */
UE_template->pre_assigned_mcs_ul = 10;
UE_template->pre_allocated_rb_table_index_ul = 2;
UE_template->pre_allocated_nb_rb_ul[slice_id] = 3;
UE_template->pre_allocated_nb_rb_ul[slice_idx] = 3;
} else {
UE_template->pre_assigned_mcs_ul = 0;
UE_template->pre_allocated_rb_table_index_ul = -1;
UE_template->pre_allocated_nb_rb_ul[slice_id] = 0;
UE_template->pre_allocated_nb_rb_ul[slice_idx] = 0;
}
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment