Commit 4462e2d7 authored by Robert Schmidt's avatar Robert Schmidt

Remove unused/broken code in preprocessor

* Remove MIMO_mode_indicator: it is simply not used ATM
* Remove min_rb_unit as parameter: function auto-detects
* Remove commented code
* Remove slice sharing/multiplexing: it is broken
* Remove UE sorting, add UE with add_new_ue() in MAC
  - UE sorting is useless overhead on every iteration, instead it should
    be governed by a scheduling algorithm (e.g., RR or PF)
  - The MAC keeps a UE list and automatically adds a UE
  - UE_list empty, set head to -1
* Remove slice_positioning: the slicing functionality is broken
* Remove unused/untested code
parent 8d3fa4dc
......@@ -577,19 +577,6 @@ schedule_ue_spec(module_id_t module_idP,
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,
VCD_FUNCTION_OUT);
if (RC.mac[module_idP]->slice_info.interslice_share_active) {
dlsch_scheduler_interslice_multiplexing(module_idP,
frameP,
subframeP,
eNB->slice_info.rballoc_sub);
/* the interslice multiplexing re-sorts the UE_list for the slices it tries
* to multiplex, so we need to sort it for the current slice again */
sort_UEs(module_idP,
0,//slice_idxP,
frameP,
subframeP);
}
LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n",
CC_id);
dl_req = &eNB->DL_req[CC_id].dl_config_request_body;
......@@ -1627,204 +1614,6 @@ schedule_ue_spec(module_id_t module_idP,
VCD_FUNCTION_OUT);
}
//------------------------------------------------------------------------------
void
dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
int frameP,
sub_frame_t subframeP,
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX])
//------------------------------------------------------------------------------
{
// FIXME: I'm prototyping the algorithm, so there may be arrays and variables that carry redundant information here and in pre_processor_results struct.
int UE_id, CC_id, rbg, i;
int N_RB_DL, min_rb_unit, tm;
int owned, used;
eNB_MAC_INST *eNB = RC.mac[Mod_id];
int nb_mac_CC = RC.nb_mac_CC[Mod_id];
UE_list_t *UE_list = &eNB->UE_list;
slice_info_t *sli = &eNB->slice_info;
UE_sched_ctrl_t *ue_sched_ctl;
COMMON_channels_t *cc;
int N_RBG[NFAPI_CC_MAX];
int slice_sorted_list[MAX_NUM_SLICES];
int slice_idx;
int8_t free_rbgs_map[NFAPI_CC_MAX][N_RBG_MAX];
int has_traffic[NFAPI_CC_MAX][MAX_NUM_SLICES];
uint8_t allocation_mask[NFAPI_CC_MAX][N_RBG_MAX];
uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB];
uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB];
uint8_t (*MIMO_mode_indicator)[N_RBG_MAX];
// Initialize the free RBGs map
// free_rbgs_map[CC_id][rbg] = -1 if RBG is allocated,
// otherwise it contains the id of the slice it belongs to.
// (Information about slicing must be retained to deal with isolation).
// FIXME: This method does not consider RBGs that are free and belong to no slices
for (CC_id = 0; CC_id < nb_mac_CC; CC_id++) {
cc = &eNB->common_channels[CC_id];
N_RBG[CC_id] = to_rbg(cc->mib->message.dl_Bandwidth);
for (rbg = 0; rbg < N_RBG[CC_id]; rbg++) {
for (i = 0; i < sli->n_dl; ++i) {
owned = sli->pre_processor_results[i].slice_allocation_mask[CC_id][rbg];
if (owned) {
used = rballoc_sub[CC_id][rbg];
free_rbgs_map[CC_id][rbg] = used ? -1 : i;
break;
}
}
}
}
// Find out which slices need other resources.
// FIXME: I don't think is really needed since we check nb_rbs_remaining later
for (CC_id = 0; CC_id < nb_mac_CC; CC_id++) {
for (i = 0; i < sli->n_dl; i++) {
has_traffic[CC_id][i] = 0;
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (sli->pre_processor_results[i].nb_rbs_remaining[CC_id][UE_id] > 0) {
has_traffic[CC_id][i] = 1;
break;
}
}
}
}
slice_priority_sort(Mod_id,
slice_sorted_list);
// MULTIPLEXING
// This part is an adaptation of dlsch_scheduler_pre_processor_allocate() code
for (CC_id = 0; CC_id < nb_mac_CC; ++CC_id) {
N_RB_DL = to_prb(eNB->common_channels[CC_id].mib->message.dl_Bandwidth);
min_rb_unit = get_min_rb_unit(Mod_id,
CC_id);
for (i = 0; i < sli->n_dl; ++i) {
slice_idx = slice_sorted_list[i];
if (has_traffic[CC_id][slice_idx] == 0) continue;
// Build an ad-hoc allocation mask fo the slice
for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) {
if (free_rbgs_map[CC_id][rbg] == -1) {
// RBG is already allocated
allocation_mask[CC_id][rbg] = 0;
continue;
}
if (sli->dl[free_rbgs_map[CC_id][rbg]].isol == 1) {
// RBG belongs to an isolated slice
allocation_mask[CC_id][rbg] = 0;
continue;
}
// RBG is free
allocation_mask[CC_id][rbg] = 1;
}
// Sort UE again
// (UE list gets sorted every time pre_processor is called so it is probably dirty at this point)
// FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it
sort_UEs(Mod_id,
slice_idx,
frameP,
subframeP);
nb_rbs_remaining = sli->pre_processor_results[slice_idx].nb_rbs_remaining;
nb_rbs_required = sli->pre_processor_results[slice_idx].nb_rbs_required;
MIMO_mode_indicator = sli->pre_processor_results[slice_idx].MIMO_mode_indicator;
// Allocation
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
tm = get_tmode(Mod_id,
CC_id,
UE_id);
for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) {
// FIXME: I think that some of these checks are redundant
if (allocation_mask[CC_id][rbg] == 0) continue;
if (rballoc_sub[CC_id][rbg] != 0) continue;
if (ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] != 0) continue;
if (nb_rbs_remaining[CC_id][UE_id] <= 0) continue;
if (ue_sched_ctl->pre_nb_available_rbs[CC_id] >= nb_rbs_required[CC_id][UE_id]) continue;
if (ue_sched_ctl->dl_pow_off[CC_id] == 0) continue;
if ((rbg == N_RBG[CC_id] - 1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
// Allocating last, smaller RBG
if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit - 1) {
rballoc_sub[CC_id][rbg] = 1;
free_rbgs_map[CC_id][rbg] = -1;
ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] = 1;
MIMO_mode_indicator[CC_id][rbg] = 1;
if (tm == 5) {
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
nb_rbs_remaining[CC_id][UE_id] -= (min_rb_unit - 1);
ue_sched_ctl->pre_nb_available_rbs[CC_id] += (min_rb_unit - 1);
}
} else {
// Allocating a standard-sized RBG
if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit) {
rballoc_sub[CC_id][rbg] = 1;
free_rbgs_map[CC_id][rbg] = -1;
ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] = 1;
MIMO_mode_indicator[CC_id][rbg] = 1;
if (tm == 5) {
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
nb_rbs_remaining[CC_id][UE_id] -= min_rb_unit;
ue_sched_ctl->pre_nb_available_rbs[CC_id] += min_rb_unit;
}
}
}
}
}
}
return;
}
//------------------------------------------------------------------------------
void dlsch_scheduler_qos_multiplexing(module_id_t Mod_id, int frameP, sub_frame_t subframeP)
//------------------------------------------------------------------------------
{
// int UE_id;
int CC_id, i;
// UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
//UE_sched_ctrl *ue_sched_ctl;
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
for (i = 0; i < sli->n_dl; i++) {
// Sort UE again
// FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it
sort_UEs(Mod_id,
(uint8_t)i,
frameP,
subframeP);
/*
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
//ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
// TODO: Do something here
// ue_sched_ctl->pre_nb_available_rbs[CC_id];
}
*/
}
}
}
//------------------------------------------------------------------------------
/*
* Default DLSCH scheduler for LTE-M
......
......@@ -738,9 +738,8 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
uint16_t temp_total_rbs_count;
unsigned char temp_total_ue_count;
unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
uint8_t slice_allocation[MAX_NUM_CCs][N_RBG_MAX];
int UE_id, i;
uint16_t j,c;
uint16_t j;
uint16_t nb_rbs_required[MAX_NUM_CCs][MAX_MOBILES_PER_ENB];
uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][MAX_MOBILES_PER_ENB];
// uint16_t nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
......@@ -795,8 +794,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
// Store the DLSCH buffer for each logical channel
store_dlsch_buffer(Mod_id,0, frameP, subframeP);
// Calculate the number of RBs required by each UE on the basis of logical channel's buffer
assign_rbs_required(Mod_id, 0, frameP, subframeP, nb_rbs_required,
min_rb_unit);
assign_rbs_required(Mod_id, 0, frameP, subframeP, nb_rbs_required);
#else
memcpy(nb_rbs_required, pre_nb_rbs_required[dlsch_ue_select_tbl_in_use], sizeof(uint16_t)*MAX_NUM_CCs*MAX_MOBILES_PER_ENB);
#endif
......@@ -856,23 +854,14 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
nb_rbs_required_remaining[CC_id][UE_id] = cmin(average_rbs_per_user[CC_id], dlsch_ue_select[CC_id].list[i].nb_rb);
}
/* slicing support has been introduced into the scheduler. Provide dummy
* data so that the preprocessor "simply works" */
for (c = 0; c < MAX_NUM_CCs; ++c)
for (j = 0; j < N_RBG_MAX; ++j)
slice_allocation[c][j] = 1;
LOG_T(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n ");
dlsch_scheduler_pre_processor_allocate (Mod_id,
UE_id,
CC_id,
N_RBG[CC_id],
min_rb_unit[CC_id],
(uint16_t (*)[MAX_MOBILES_PER_ENB])nb_rbs_required,
(uint16_t (*)[MAX_MOBILES_PER_ENB])nb_rbs_required_remaining,
rballoc_sub,
slice_allocation,
MIMO_mode_indicator);
(uint16_t (*)[NUMBER_OF_UE_MAX])nb_rbs_required,
(uint16_t (*)[NUMBER_OF_UE_MAX])nb_rbs_required_remaining,
rballoc_sub);
temp_total_rbs_count -= ue_sched_ctl->pre_nb_available_rbs[CC_id];
temp_total_ue_count--;
......
......@@ -2146,6 +2146,37 @@ dump_ue_list(UE_list_t *listP,
return;
}
//------------------------------------------------------------------------------
/*
* Add a UE to the UL or DL UE_list listP
*/
void
add_ue_list(UE_list_t *listP, int UE_id, int ul_flag) {
if (ul_flag == 0) {
if (listP->head == -1) {
listP->head = UE_id;
listP->next[UE_id] = -1;
} else {
int i = listP->head;
while (listP->next[i] >= 0)
i = listP->next[i];
listP->next[i] = UE_id;
listP->next[UE_id] = -1;
}
} else {
if (listP->head_ul == -1) {
listP->head_ul = UE_id;
listP->next_ul[UE_id] = -1;
} else {
int i = listP->head;
while (listP->next_ul[i] >= 0)
i = listP->next[i];
listP->next_ul[i] = UE_id;
listP->next_ul[UE_id] = -1;
}
}
}
//------------------------------------------------------------------------------
int
add_new_ue(module_id_t mod_idP,
......@@ -2165,7 +2196,6 @@ add_new_ue(module_id_t mod_idP,
rntiP,
UE_list->avail,
UE_list->num_UEs);
dump_ue_list(UE_list, 0);
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
if (UE_list->active[i] == TRUE)
......@@ -2182,6 +2212,10 @@ add_new_ue(module_id_t mod_idP,
UE_list->ordered_ULCCids[0][UE_id] = cc_idP;
UE_list->num_UEs++;
UE_list->active[UE_id] = TRUE;
add_ue_list(UE_list, UE_id, 0);
dump_ue_list(UE_list, 0);
add_ue_list(UE_list, UE_id, 1);
dump_ue_list(UE_list, 1);
if (IS_SOFTMODEM_IQPLAYER)// not specific to record/playback ?
UE_list->UE_template[cc_idP][UE_id].pre_assigned_mcs_ul = 0;
UE_list->UE_template[cc_idP][UE_id].rach_resource_type = rach_resource_type;
......@@ -2211,8 +2245,6 @@ add_new_ue(module_id_t mod_idP,
UE_id,
cc_idP,
rntiP);
dump_ue_list(UE_list,
0);
return (UE_id);
}
......@@ -2248,10 +2280,11 @@ rrc_mac_remove_ue(module_id_t mod_idP,
UE_id,
pCC_id,
rntiP);
dump_ue_list(UE_list, 0); // DL list displayed in LOG_T(MAC)
UE_list->active[UE_id] = FALSE;
UE_list->num_UEs--;
UE_list->next[UE_id] = -1;
UE_list->next_ul[UE_id] = -1;
/* If present, remove UE from DL list */
if (UE_list->head == UE_id) {
UE_list->head = UE_list->next[UE_id];
......@@ -2391,130 +2424,6 @@ prev(UE_list_t *listP,
return -1;
}
//------------------------------------------------------------------------------
void
swap_UEs(UE_list_t *listP,
int nodeiP,
int nodejP,
int ul_flag)
//------------------------------------------------------------------------------
{
int prev_i, prev_j, next_i, next_j;
LOG_T(MAC, "Swapping UE %d,%d\n",
nodeiP,
nodejP);
dump_ue_list(listP,
ul_flag);
prev_i = prev(listP,
nodeiP,
ul_flag);
prev_j = prev(listP,
nodejP,
ul_flag);
AssertFatal((prev_i >= 0) && (prev_j >= 0), "swap_UEs: problem");
if (ul_flag == 0) {
next_i = listP->next[nodeiP];
next_j = listP->next[nodejP];
} else {
next_i = listP->next_ul[nodeiP];
next_j = listP->next_ul[nodejP];
}
LOG_T(MAC, "[%s] next_i %d, next_i, next_j %d, head %d \n",
(ul_flag == 0) ? "DL" : "UL",
next_i,
next_j,
listP->head);
if (ul_flag == 0) {
if (next_i == nodejP) { // case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...
LOG_T(MAC, "Case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...\n");
listP->next[nodeiP] = next_j;
listP->next[nodejP] = nodeiP;
if (nodeiP == listP->head) { // case i j n(j)
listP->head = nodejP;
} else {
listP->next[prev_i] = nodejP;
}
} else if (next_j == nodeiP) { // case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...
LOG_T(MAC, "Case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...\n");
listP->next[nodejP] = next_i;
listP->next[nodeiP] = nodejP;
if (nodejP == listP->head) { // case j i n(i)
listP->head = nodeiP;
} else {
listP->next[prev_j] = nodeiP;
}
} else { // case ... p(i) i n(i) ... p(j) j n(j) ...
listP->next[nodejP] = next_i;
listP->next[nodeiP] = next_j;
if (nodeiP == listP->head) {
LOG_T(MAC, "changing head to %d\n",
nodejP);
listP->head = nodejP;
listP->next[prev_j] = nodeiP;
} else if (nodejP == listP->head) {
LOG_D(MAC, "changing head to %d\n",
nodeiP);
listP->head = nodeiP;
listP->next[prev_i] = nodejP;
} else {
listP->next[prev_i] = nodejP;
listP->next[prev_j] = nodeiP;
}
}
} else { // ul_flag
if (next_i == nodejP) { // case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...
LOG_T(MAC, "[UL] Case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...\n");
listP->next_ul[nodeiP] = next_j;
listP->next_ul[nodejP] = nodeiP;
if (nodeiP == listP->head_ul) { // case i j n(j)
listP->head_ul = nodejP;
} else {
listP->next_ul[prev_i] = nodejP;
}
} else if (next_j == nodeiP) { // case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...
LOG_T(MAC, "[UL]Case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...\n");
listP->next_ul[nodejP] = next_i;
listP->next_ul[nodeiP] = nodejP;
if (nodejP == listP->head_ul) { // case j i n(i)
listP->head_ul = nodeiP;
} else {
listP->next_ul[prev_j] = nodeiP;
}
} else { // case ... p(i) i n(i) ... p(j) j n(j) ...
listP->next_ul[nodejP] = next_i;
listP->next_ul[nodeiP] = next_j;
if (nodeiP == listP->head_ul) {
LOG_T(MAC, "[UL]changing head to %d\n",
nodejP);
listP->head_ul = nodejP;
listP->next_ul[prev_j] = nodeiP;
} else if (nodejP == listP->head_ul) {
LOG_T(MAC, "[UL]changing head to %d\n",
nodeiP);
listP->head_ul = nodeiP;
listP->next_ul[prev_i] = nodejP;
} else {
listP->next_ul[prev_i] = nodejP;
listP->next_ul[prev_j] = nodeiP;
}
}
}
LOG_T(MAC, "After swap\n");
dump_ue_list(listP,
ul_flag);
return;
}
// This has to be updated to include BSR information
//------------------------------------------------------------------------------
uint8_t
......
......@@ -1190,7 +1190,6 @@ typedef struct {
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX];
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX];
uint32_t bytes_lcid[MAX_MOBILES_PER_ENB][MAX_NUM_LCID];
uint32_t wb_pmi[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
......
......@@ -248,49 +248,23 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]);
void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]);
void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]);
void slice_priority_sort(module_id_t Mod_id, int slice_list[MAX_NUM_SLICES]);
void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
int frameP,
sub_frame_t subframeP,
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]);
void dlsch_scheduler_qos_multiplexing(module_id_t Mod_id,
int frameP,
sub_frame_t subframeP);
void dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
int UE_id,
uint8_t CC_id,
int N_RBG,
int min_rb_unit,
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]);
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]);
/* \brief Function to trigger the eNB scheduling procedure. It is called by PHY at the beginning of each subframe, \f$n$\f
and generates all DLSCH allocations for subframe \f$n\f$ and ULSCH allocations for subframe \f$n+k$\f.
......@@ -715,10 +689,10 @@ int add_new_ue(module_id_t Mod_id, int CC_id, rnti_t rnti, int harq_pid, uint8_t
int rrc_mac_remove_ue(module_id_t Mod_id, rnti_t rntiP);
void store_dlsch_buffer(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframeP);
void assign_rbs_required(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframe, uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], int min_rb_unit[NFAPI_CC_MAX]);
void assign_rbs_required(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframe, uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]);
void swap_UEs(UE_list_t *listP, int nodeiP, int nodejP, int ul_flag);
int prev(UE_list_t *listP, int nodeP, int ul_flag);
void add_ue_list(UE_list_t *listP, int UE_id, int ul_flag);
void dump_ue_list(UE_list_t *listP, int ul_flag);
int UE_num_active_CC(UE_list_t *listP, int ue_idP);
int UE_PCCID(module_id_t mod_idP, int ue_idP);
......@@ -745,7 +719,6 @@ void adjust_bsr_info(int buffer_occupancy, uint16_t TBS,
UE_TEMPLATE *UE_template);
int phy_stats_exist(module_id_t Mod_id, int rnti);
void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subframeP);
/*! \fn UE_L2_state_t ue_scheduler(const module_id_t module_idP,const frame_t frameP, const sub_frame_t subframe, const lte_subframe_t direction,const uint8_t eNB_index)
\brief UE scheduler where all the ue background tasks are done. This function performs the following: 1) Trigger PDCP every 5ms 2) Call RRC for link status return to PHY3) Perform SR/BSR procedures for scheduling feedback 4) Perform PHR procedures.
......
......@@ -50,12 +50,10 @@ void init_UE_list(UE_list_t *UE_list)
UE_list->head = -1;
UE_list->head_ul = -1;
UE_list->avail = 0;
for (list_el = 0; list_el < MAX_MOBILES_PER_ENB - 1; list_el++) {
UE_list->next[list_el] = list_el + 1;
UE_list->next_ul[list_el] = list_el + 1;
}
for (list_el = 0; list_el < MAX_MOBILES_PER_ENB; list_el++) {
UE_list->next[list_el] = -1;
UE_list->next_ul[list_el] = -1;
}
memset(UE_list->DLSCH_pdu, 0, sizeof(UE_list->DLSCH_pdu));
memset(UE_list->UE_template, 0, sizeof(UE_list->UE_template));
memset(UE_list->eNB_UE_stats, 0, sizeof(UE_list->eNB_UE_stats));
......
......@@ -52,9 +52,7 @@ extern RAN_CONTEXT_t RC;
#define DEBUG_eNB_SCHEDULER 1
#define DEBUG_HEADER_PARSING 1
//#define DEBUG_PACKET_TRACE 1
//#define ICIC 0
void
sort_ue_ul(module_id_t module_idP,
......@@ -63,35 +61,6 @@ sort_ue_ul(module_id_t module_idP,
sub_frame_t sched_subframeP,
rnti_t *rntiTable);
/* this function checks that get_eNB_UE_stats returns
* a non-NULL pointer for all the active CCs of an UE
*/
/*
int phy_stats_exist(module_id_t Mod_id, int rnti)
{
int CC_id;
int i;
int UE_id = find_UE_id(Mod_id, rnti);
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
if (UE_id == -1) {
LOG_W(MAC, "[eNB %d] UE %x not found, should be there (in phy_stats_exist)\n",
Mod_id, rnti);
return 0;
}
if (UE_list->numactiveCCs[UE_id] == 0) {
LOG_W(MAC, "[eNB %d] UE %x has no active CC (in phy_stats_exist)\n",
Mod_id, rnti);
return 0;
}
for (i = 0; i < UE_list->numactiveCCs[UE_id]; i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
if (mac_xface->get_eNB_UE_stats(Mod_id, CC_id, rnti) == NULL)
return 0;
}
return 1;
}
*/
// This function stores the downlink buffer for all the logical channels
void
store_dlsch_buffer(module_id_t Mod_id,
......@@ -175,8 +144,7 @@ assign_rbs_required(module_id_t Mod_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframe,
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
int min_rb_unit[NFAPI_CC_MAX]) {
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]) {
uint16_t TBS = 0;
int UE_id, n, i, j, CC_id, pCCid, tmp;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
......@@ -222,11 +190,12 @@ assign_rbs_required(module_id_t Mod_id,
for (i = 0; i < UE_list->numactiveCCs[UE_id]; i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
const int min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
if (eNB_UE_stats->dlsch_mcs1 == 0) {
nb_rbs_required[CC_id][UE_id] = 4; // don't let the TBS get too small
} else {
nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id];
nb_rbs_required[CC_id][UE_id] = min_rb_unit;
}
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rbs_required[CC_id][UE_id]);
......@@ -242,7 +211,7 @@ assign_rbs_required(module_id_t Mod_id,
/* calculating required number of RBs for each UE */
while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total) {
nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];
nb_rbs_required[CC_id][UE_id] += min_rb_unit;
if (nb_rbs_required[CC_id][UE_id] > UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]) {
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]);
......@@ -255,7 +224,7 @@ assign_rbs_required(module_id_t Mod_id,
LOG_D(MAC,
"[eNB %d] Frame %d: UE %d on CC %d: RB unit %d, nb_required RB %d (TBS %d, mcs %d)\n",
Mod_id, frameP, UE_id, CC_id, min_rb_unit[CC_id],
Mod_id, frameP, UE_id, CC_id, min_rb_unit,
nb_rbs_required[CC_id][UE_id], TBS,
eNB_UE_stats->dlsch_mcs1);
sli->pre_processor_results[slice_idx].mcs[CC_id][UE_id] = eNB_UE_stats->dlsch_mcs1;
......@@ -265,33 +234,6 @@ assign_rbs_required(module_id_t Mod_id,
}
// This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes
int
maxround(module_id_t Mod_id, uint16_t rnti, int frame,
sub_frame_t subframe) {
uint8_t round, round_max = 0, UE_id;
int CC_id, harq_pid;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
COMMON_channels_t *cc;
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
cc = &RC.mac[Mod_id]->common_channels[CC_id];
UE_id = find_UE_id(Mod_id, rnti);
if(UE_id == -1)
continue;
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frame,subframe);
round = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid];
if (round > round_max) {
round_max = round;
}
}
return round_max;
}
int
maxround_ul(module_id_t Mod_id, uint16_t rnti, int sched_frame,
sub_frame_t sched_subframe) {
......@@ -318,229 +260,6 @@ maxround_ul(module_id_t Mod_id, uint16_t rnti, int sched_frame,
return round_max;
}
// This function scans all CC_ids for a particular UE to find the maximum DL CQI
// it returns -1 if the UE is not found in PHY layer (get_eNB_UE_stats gives NULL)
int maxcqi(module_id_t Mod_id, int32_t UE_id) {
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
int CC_id, n;
int CQI = 0;
for (n = 0; n < UE_list->numactiveCCs[UE_id]; n++) {
CC_id = UE_list->ordered_CCids[n][UE_id];
if (UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id] > CQI) {
CQI = UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id];
}
}
return CQI;
}
long min_lcgidpriority(module_id_t Mod_id, int32_t UE_id) {
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
int i;
int pCC_id = UE_PCCID(Mod_id, UE_id);
long ret = UE_list->UE_template[pCC_id][UE_id].lcgidpriority[0];
for (i = 1; i < 11; ++i) {
if (UE_list->UE_template[pCC_id][UE_id].lcgidpriority[i] < ret)
ret = UE_list->UE_template[pCC_id][UE_id].lcgidpriority[i];
}
return ret;
}
struct sort_ue_dl_params {
int Mod_idP;
int frameP;
int subframeP;
int slice_idx;
};
static int ue_dl_compare(const void *_a, const void *_b, void *_params) {
struct sort_ue_dl_params *params = _params;
UE_list_t *UE_list = &RC.mac[params->Mod_idP]->UE_list;
int i;
int slice_idx = params->slice_idx;
int UE_id1 = *(const int *) _a;
int UE_id2 = *(const int *) _b;
int rnti1 = UE_RNTI(params->Mod_idP, UE_id1);
int pCC_id1 = UE_PCCID(params->Mod_idP, UE_id1);
int round1 = maxround(params->Mod_idP, rnti1, params->frameP, params->subframeP);
int rnti2 = UE_RNTI(params->Mod_idP, UE_id2);
int pCC_id2 = UE_PCCID(params->Mod_idP, UE_id2);
int round2 = maxround(params->Mod_idP, rnti2, params->frameP, params->subframeP);
int cqi1 = maxcqi(params->Mod_idP, UE_id1);
int cqi2 = maxcqi(params->Mod_idP, UE_id2);
long lcgid1 = min_lcgidpriority(params->Mod_idP, UE_id1);
long lcgid2 = min_lcgidpriority(params->Mod_idP, UE_id2);
for (i = 0; i < CR_NUM; ++i) {
switch (UE_list->sorting_criteria[slice_idx][i]) {
case CR_ROUND :
if (round1 > round2)
return -1;
if (round1 < round2)
return 1;
break;
case CR_SRB12 :
if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] +
UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] >
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] +
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
return -1;
if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] +
UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] <
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] +
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
return 1;
break;
case CR_HOL :
if (UE_list-> UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max >
UE_list-> UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
return -1;
if (UE_list-> UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
UE_list-> UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
return 1;
break;
case CR_LC :
if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total >
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
return -1;
if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
return 1;
break;
case CR_CQI :
if (cqi1 > cqi2)
return -1;
if (cqi1 < cqi2)
return 1;
break;
case CR_LCP :
if (lcgid1 < lcgid2)
return -1;
if (lcgid1 > lcgid2)
return 1;
default :
break;
}
}
return 0;
}
void decode_sorting_policy(module_id_t Mod_idP, int slice_idx) {
int i;
UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
uint32_t policy = RC.mac[Mod_idP]->slice_info.dl[slice_idx].sorting;
uint32_t mask = 0x0000000F;
uint16_t criterion;
for (i = 0; i < CR_NUM; ++i) {
criterion = (uint16_t) (policy >> 4 * (CR_NUM - 1 - i) & mask);
if (criterion >= CR_NUM) {
LOG_W(MAC,
"Invalid criterion in slice index %d ID %d policy, revert to default policy \n",
slice_idx, RC.mac[Mod_idP]->slice_info.dl[slice_idx].id);
RC.mac[Mod_idP]->slice_info.dl[slice_idx].sorting = 0x12345;
break;
}
UE_list->sorting_criteria[slice_idx][i] = criterion;
}
}
void decode_slice_positioning(module_id_t Mod_idP,
int slice_idx,
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX]) {
uint8_t CC_id;
int RBG, start_frequency, end_frequency;
// Init slice_alloc_mask
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_idP]; ++CC_id) {
for (RBG = 0; RBG < N_RBG_MAX; ++RBG) {
slice_allocation_mask[CC_id][RBG] = 0;
}
}
start_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_idx].pos_low;
end_frequency = RC.mac[Mod_idP]->slice_info.dl[slice_idx].pos_high;
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_idP]; ++CC_id) {
for (RBG = start_frequency; RBG <= end_frequency; ++RBG) {
slice_allocation_mask[CC_id][RBG] = 1;
}
}
}
//-----------------------------------------------------------------------------
/*
* This function sorts the UEs in order, depending on their dlsch buffer and CQI
*/
void sort_UEs(module_id_t Mod_idP,
int slice_idx,
int frameP,
sub_frame_t subframeP)
//-----------------------------------------------------------------------------
{
int list[MAX_MOBILES_PER_ENB];
int list_size = 0;
struct sort_ue_dl_params params = {Mod_idP, frameP, subframeP, slice_idx};
UE_list_t *UE_list = &(RC.mac[Mod_idP]->UE_list);
UE_sched_ctrl_t *UE_scheduling_control = NULL;
for (int i = 0; i < MAX_MOBILES_PER_ENB; i++) {
UE_scheduling_control = &(UE_list->UE_sched_ctrl[i]);
/* Check CDRX configuration and if UE is in active time for this subframe */
if (UE_scheduling_control->cdrx_configured == TRUE) {
if (UE_scheduling_control->in_active_time == FALSE) {
continue;
}
}
if (UE_list->active[i] == TRUE &&
UE_RNTI(Mod_idP, i) != NOT_A_RNTI &&
UE_list->UE_sched_ctrl[i].ul_out_of_sync != 1 &&
ue_dl_slice_membership(Mod_idP, i, slice_idx)) {
list[list_size++] = i;
}
}
decode_sorting_policy(Mod_idP, slice_idx);
qsort_r(list, list_size, sizeof(int), ue_dl_compare, &params);
if (list_size) {
for (int i = 0; i < list_size - 1; ++i) {
UE_list->next[list[i]] = list[i + 1];
}
UE_list->next[list[list_size - 1]] = -1;
UE_list->head = list[0];
} else {
UE_list->head = -1;
}
}
void dlsch_scheduler_pre_processor_partitioning(module_id_t Mod_id,
int slice_idx,
const uint8_t rbs_retx[NFAPI_CC_MAX]) {
......@@ -575,7 +294,6 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
int slice_idx,
frame_t frameP,
sub_frame_t subframeP,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]) {
int UE_id, CC_id;
......@@ -694,14 +412,15 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
CC_id = UE_list->ordered_CCids[i][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
available_rbs[CC_id] = ue_sched_ctl->max_rbs_allowed_slice[CC_id][slice_idx];
const int min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
if (ue_count_newtx[CC_id] == 0) {
average_rbs_per_user[CC_id] = 0;
} else if (min_rb_unit[CC_id]*ue_count_newtx[CC_id] <= available_rbs[CC_id]) {
} else if (min_rb_unit*ue_count_newtx[CC_id] <= available_rbs[CC_id]) {
average_rbs_per_user[CC_id] = (uint16_t)floor(available_rbs[CC_id]/ue_count_newtx[CC_id]);
} else {
// consider the total number of use that can be scheduled UE
average_rbs_per_user[CC_id] = (uint16_t)min_rb_unit[CC_id];
average_rbs_per_user[CC_id] = (uint16_t)min_rb_unit;
}
}
}
......@@ -755,18 +474,14 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) {
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]) {
int UE_id, CC_id;
int i;
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX];
int N_RBG[NFAPI_CC_MAX];
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
decode_slice_positioning(Mod_id, slice_idx, slice_allocation_mask);
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
......@@ -787,91 +502,23 @@ void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
if (nb_rbs_required[CC_id][UE_id] > 0)
LOG_D(MAC,
"Step 1: nb_rbs_remaining[%d][%d]= %d (accounted %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n",
"Step 1: nb_rbs_remaining[%d][%d]= %d (accounted %d, required %d, pre_nb_available_rbs %d, N_RBG %d)\n",
CC_id,
UE_id,
nb_rbs_remaining[CC_id][UE_id],
nb_rbs_accounted[CC_id][UE_id],
nb_rbs_required[CC_id][UE_id],
UE_list->UE_sched_ctrl[UE_id].pre_nb_available_rbs[CC_id],
N_RBG[CC_id],
min_rb_unit[CC_id]);
N_RBG[CC_id]);
LOG_T(MAC, "calling dlsch_scheduler_pre_processor_allocate .. \n ");
dlsch_scheduler_pre_processor_allocate(Mod_id,
UE_id,
CC_id,
N_RBG[CC_id],
min_rb_unit[CC_id],
nb_rbs_required,
nb_rbs_remaining,
rballoc_sub,
slice_allocation_mask,
MIMO_mode_indicator);
}
}
}
void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) {
int UE_id, CC_id;
int i;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
int N_RBG[NFAPI_CC_MAX];
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
uint8_t (*slice_allocation_mask)[N_RBG_MAX] = sli->pre_processor_results[slice_idx].slice_allocation_mask;
decode_slice_positioning(Mod_id, slice_idx, slice_allocation_mask);
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
N_RBG[CC_id] = to_rbg(cc->mib->message.dl_Bandwidth);
}
// Remaining RBs are allocated to high priority UEs
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
if (UE_RNTI(Mod_id, UE_id) == NOT_A_RNTI) continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1) continue;
if (!ue_dl_slice_membership(Mod_id, UE_id, slice_idx)) continue;
for (i = 0; i < UE_num_active_CC(UE_list, UE_id); i++) {
CC_id = UE_list->ordered_CCids[i][UE_id];
nb_rbs_remaining[CC_id][UE_id] =
nb_rbs_required[CC_id][UE_id] - nb_rbs_accounted[CC_id][UE_id] + nb_rbs_remaining[CC_id][UE_id];
if (nb_rbs_remaining[CC_id][UE_id] < 0)
abort();
if (nb_rbs_required[CC_id][UE_id] > 0)
LOG_D(MAC,
"Step 2: nb_rbs_remaining[%d][%d]= %d (accounted %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n",
CC_id,
UE_id,
nb_rbs_remaining[CC_id][UE_id],
nb_rbs_accounted[CC_id][UE_id],
nb_rbs_required[CC_id][UE_id],
UE_list->UE_sched_ctrl[UE_id].pre_nb_available_rbs[CC_id],
N_RBG[CC_id],
min_rb_unit[CC_id]);
LOG_T(MAC, "calling dlsch_scheduler_pre_processor_allocate .. \n ");
dlsch_scheduler_pre_processor_allocate(Mod_id,
UE_id,
CC_id,
N_RBG[CC_id],
min_rb_unit[CC_id],
nb_rbs_required,
nb_rbs_remaining,
rballoc_sub,
slice_allocation_mask,
MIMO_mode_indicator);
rballoc_sub);
}
}
}
......@@ -885,8 +532,6 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
int UE_id;
uint16_t i, j;
int slice_idx = 0;
int min_rb_unit[NFAPI_CC_MAX];
min_rb_unit[CC_id] = get_min_rb_unit(Mod_id, CC_id);
// TODO: remove NFAPI_CC_MAX, here for compatibility for the moment
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX];
memset(rballoc_sub, 0, sizeof(rballoc_sub));
......@@ -896,9 +541,6 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_required;
uint16_t (*nb_rbs_accounted)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_accounted;
uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB] = sli->pre_processor_results[slice_idx].nb_rbs_remaining;
// TODO remove this
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX];
memset(MIMO_mode_indicator, 0, sizeof(MIMO_mode_indicator));
UE_list_t *UE_list = &eNB->UE_list;
UE_sched_ctrl_t *ue_sched_ctl;
......@@ -919,45 +561,24 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
slice_idx,
frameP,
subframeP,
nb_rbs_required,
min_rb_unit);
// Sorts the user on the basis of dlsch logical channel buffer and CQI
sort_UEs(Mod_id,
slice_idx,
frameP,
subframeP);
nb_rbs_required);
// ACCOUNTING
// This procedure decides the number of RBs to allocate
dlsch_scheduler_pre_processor_accounting(Mod_id,
slice_idx,
frameP,
subframeP,
min_rb_unit,
nb_rbs_required,
nb_rbs_accounted);
// POSITIONING
// This procedure does the main allocation of the RBs
dlsch_scheduler_pre_processor_positioning(Mod_id,
slice_idx,
min_rb_unit,
nb_rbs_required,
nb_rbs_accounted,
nb_rbs_remaining,
rballoc_sub,
MIMO_mode_indicator);
// SHARING
// If there are available RBs left in the slice, allocate them to the highest priority UEs
if (eNB->slice_info.intraslice_share_active) {
dlsch_scheduler_pre_processor_intraslice_sharing(Mod_id,
slice_idx,
min_rb_unit,
nb_rbs_required,
nb_rbs_accounted,
nb_rbs_remaining,
rballoc_sub,
MIMO_mode_indicator);
}
rballoc_sub);
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
......@@ -1042,16 +663,14 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
int UE_id,
uint8_t CC_id,
int N_RBG,
int min_rb_unit,
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]) {
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]) {
int i;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
UE_sched_ctrl_t *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
int N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
const int min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
for (i = 0; i < N_RBG; i++) {
if (rballoc_sub[CC_id][i] != 0) continue;
......@@ -1064,14 +683,11 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
if (ue_sched_ctl->dl_pow_off[CC_id] == 0) continue;
if (slice_allocation_mask[CC_id][i] == 0) continue;
if ((i == N_RBG - 1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
// Allocating last, smaller RBG
if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit - 1) {
rballoc_sub[CC_id][i] = 1;
ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
MIMO_mode_indicator[CC_id][i] = 1;
nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit + 1;
ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1;
......@@ -1081,7 +697,6 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit) {
rballoc_sub[CC_id][i] = 1;
ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
MIMO_mode_indicator[CC_id][i] = 1;
nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit;
ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment