Commit 4462e2d7 authored by Robert Schmidt's avatar Robert Schmidt

Remove unused/broken code in preprocessor

* Remove MIMO_mode_indicator: it is simply not used ATM
* Remove min_rb_unit as parameter: function auto-detects
* Remove commented code
* Remove slice sharing/multiplexing: it is broken
* Remove UE sorting, add UE with add_new_ue() in MAC
  - UE sorting is useless overhead on every iteration, instead it should
    be governed by a scheduling algorithm (e.g., RR or PF)
  - The MAC keeps a UE list and automatically adds a UE
  - UE_list empty, set head to -1
* Remove slice_positioning: the slicing functionality is broken
* Remove unused/untested code
parent 8d3fa4dc
...@@ -577,19 +577,6 @@ schedule_ue_spec(module_id_t module_idP, ...@@ -577,19 +577,6 @@ schedule_ue_spec(module_id_t module_idP,
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,
VCD_FUNCTION_OUT); VCD_FUNCTION_OUT);
if (RC.mac[module_idP]->slice_info.interslice_share_active) {
dlsch_scheduler_interslice_multiplexing(module_idP,
frameP,
subframeP,
eNB->slice_info.rballoc_sub);
/* the interslice multiplexing re-sorts the UE_list for the slices it tries
* to multiplex, so we need to sort it for the current slice again */
sort_UEs(module_idP,
0,//slice_idxP,
frameP,
subframeP);
}
LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n", LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n",
CC_id); CC_id);
dl_req = &eNB->DL_req[CC_id].dl_config_request_body; dl_req = &eNB->DL_req[CC_id].dl_config_request_body;
...@@ -1627,204 +1614,6 @@ schedule_ue_spec(module_id_t module_idP, ...@@ -1627,204 +1614,6 @@ schedule_ue_spec(module_id_t module_idP,
VCD_FUNCTION_OUT); VCD_FUNCTION_OUT);
} }
//------------------------------------------------------------------------------
void
dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
int frameP,
sub_frame_t subframeP,
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX])
//------------------------------------------------------------------------------
{
// FIXME: I'm prototyping the algorithm, so there may be arrays and variables that carry redundant information here and in pre_processor_results struct.
int UE_id, CC_id, rbg, i;
int N_RB_DL, min_rb_unit, tm;
int owned, used;
eNB_MAC_INST *eNB = RC.mac[Mod_id];
int nb_mac_CC = RC.nb_mac_CC[Mod_id];
UE_list_t *UE_list = &eNB->UE_list;
slice_info_t *sli = &eNB->slice_info;
UE_sched_ctrl_t *ue_sched_ctl;
COMMON_channels_t *cc;
int N_RBG[NFAPI_CC_MAX];
int slice_sorted_list[MAX_NUM_SLICES];
int slice_idx;
int8_t free_rbgs_map[NFAPI_CC_MAX][N_RBG_MAX];
int has_traffic[NFAPI_CC_MAX][MAX_NUM_SLICES];
uint8_t allocation_mask[NFAPI_CC_MAX][N_RBG_MAX];
uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB];
uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB];
uint8_t (*MIMO_mode_indicator)[N_RBG_MAX];
// Initialize the free RBGs map
// free_rbgs_map[CC_id][rbg] = -1 if RBG is allocated,
// otherwise it contains the id of the slice it belongs to.
// (Information about slicing must be retained to deal with isolation).
// FIXME: This method does not consider RBGs that are free and belong to no slices
for (CC_id = 0; CC_id < nb_mac_CC; CC_id++) {
cc = &eNB->common_channels[CC_id];
N_RBG[CC_id] = to_rbg(cc->mib->message.dl_Bandwidth);
for (rbg = 0; rbg < N_RBG[CC_id]; rbg++) {
for (i = 0; i < sli->n_dl; ++i) {
owned = sli->pre_processor_results[i].slice_allocation_mask[CC_id][rbg];
if (owned) {
used = rballoc_sub[CC_id][rbg];
free_rbgs_map[CC_id][rbg] = used ? -1 : i;
break;
}
}
}
}
// Find out which slices need other resources.
// FIXME: I don't think is really needed since we check nb_rbs_remaining later
for (CC_id = 0; CC_id < nb_mac_CC; CC_id++) {
for (i = 0; i < sli->n_dl; i++) {
has_traffic[CC_id][i] = 0;
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (sli->pre_processor_results[i].nb_rbs_remaining[CC_id][UE_id] > 0) {
has_traffic[CC_id][i] = 1;
break;
}
}
}
}
slice_priority_sort(Mod_id,
slice_sorted_list);
// MULTIPLEXING
// This part is an adaptation of dlsch_scheduler_pre_processor_allocate() code
for (CC_id = 0; CC_id < nb_mac_CC; ++CC_id) {
N_RB_DL = to_prb(eNB->common_channels[CC_id].mib->message.dl_Bandwidth);
min_rb_unit = get_min_rb_unit(Mod_id,
CC_id);
for (i = 0; i < sli->n_dl; ++i) {
slice_idx = slice_sorted_list[i];
if (has_traffic[CC_id][slice_idx] == 0) continue;
// Build an ad-hoc allocation mask fo the slice
for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) {
if (free_rbgs_map[CC_id][rbg] == -1) {
// RBG is already allocated
allocation_mask[CC_id][rbg] = 0;
continue;
}
if (sli->dl[free_rbgs_map[CC_id][rbg]].isol == 1) {
// RBG belongs to an isolated slice
allocation_mask[CC_id][rbg] = 0;
continue;
}
// RBG is free
allocation_mask[CC_id][rbg] = 1;
}
// Sort UE again
// (UE list gets sorted every time pre_processor is called so it is probably dirty at this point)
// FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it
sort_UEs(Mod_id,
slice_idx,
frameP,
subframeP);
nb_rbs_remaining = sli->pre_processor_results[slice_idx].nb_rbs_remaining;
nb_rbs_required = sli->pre_processor_results[slice_idx].nb_rbs_required;
MIMO_mode_indicator = sli->pre_processor_results[slice_idx].MIMO_mode_indicator;
// Allocation
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
tm = get_tmode(Mod_id,
CC_id,
UE_id);
for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) {
// FIXME: I think that some of these checks are redundant
if (allocation_mask[CC_id][rbg] == 0) continue;
if (rballoc_sub[CC_id][rbg] != 0) continue;
if (ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] != 0) continue;
if (nb_rbs_remaining[CC_id][UE_id] <= 0) continue;
if (ue_sched_ctl->pre_nb_available_rbs[CC_id] >= nb_rbs_required[CC_id][UE_id]) continue;
if (ue_sched_ctl->dl_pow_off[CC_id] == 0) continue;
if ((rbg == N_RBG[CC_id] - 1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
// Allocating last, smaller RBG
if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit - 1) {
rballoc_sub[CC_id][rbg] = 1;
free_rbgs_map[CC_id][rbg] = -1;
ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] = 1;
MIMO_mode_indicator[CC_id][rbg] = 1;
if (tm == 5) {
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
nb_rbs_remaining[CC_id][UE_id] -= (min_rb_unit - 1);
ue_sched_ctl->pre_nb_available_rbs[CC_id] += (min_rb_unit - 1);
}
} else {
// Allocating a standard-sized RBG
if (nb_rbs_remaining[CC_id][UE_id] >= min_rb_unit) {
rballoc_sub[CC_id][rbg] = 1;
free_rbgs_map[CC_id][rbg] = -1;
ue_sched_ctl->rballoc_sub_UE[CC_id][rbg] = 1;
MIMO_mode_indicator[CC_id][rbg] = 1;
if (tm == 5) {
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
nb_rbs_remaining[CC_id][UE_id] -= min_rb_unit;
ue_sched_ctl->pre_nb_available_rbs[CC_id] += min_rb_unit;
}
}
}
}
}
}
return;
}
//------------------------------------------------------------------------------
void dlsch_scheduler_qos_multiplexing(module_id_t Mod_id, int frameP, sub_frame_t subframeP)
//------------------------------------------------------------------------------
{
// int UE_id;
int CC_id, i;
// UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
//UE_sched_ctrl *ue_sched_ctl;
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; CC_id++) {
for (i = 0; i < sli->n_dl; i++) {
// Sort UE again
// FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it
sort_UEs(Mod_id,
(uint8_t)i,
frameP,
subframeP);
/*
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
//ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
// TODO: Do something here
// ue_sched_ctl->pre_nb_available_rbs[CC_id];
}
*/
}
}
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
/* /*
* Default DLSCH scheduler for LTE-M * Default DLSCH scheduler for LTE-M
......
...@@ -738,9 +738,8 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, ...@@ -738,9 +738,8 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
uint16_t temp_total_rbs_count; uint16_t temp_total_rbs_count;
unsigned char temp_total_ue_count; unsigned char temp_total_ue_count;
unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]; unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
uint8_t slice_allocation[MAX_NUM_CCs][N_RBG_MAX];
int UE_id, i; int UE_id, i;
uint16_t j,c; uint16_t j;
uint16_t nb_rbs_required[MAX_NUM_CCs][MAX_MOBILES_PER_ENB]; uint16_t nb_rbs_required[MAX_NUM_CCs][MAX_MOBILES_PER_ENB];
uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][MAX_MOBILES_PER_ENB]; uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][MAX_MOBILES_PER_ENB];
// uint16_t nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; // uint16_t nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
...@@ -795,8 +794,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, ...@@ -795,8 +794,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
// Store the DLSCH buffer for each logical channel // Store the DLSCH buffer for each logical channel
store_dlsch_buffer(Mod_id,0, frameP, subframeP); store_dlsch_buffer(Mod_id,0, frameP, subframeP);
// Calculate the number of RBs required by each UE on the basis of logical channel's buffer // Calculate the number of RBs required by each UE on the basis of logical channel's buffer
assign_rbs_required(Mod_id, 0, frameP, subframeP, nb_rbs_required, assign_rbs_required(Mod_id, 0, frameP, subframeP, nb_rbs_required);
min_rb_unit);
#else #else
memcpy(nb_rbs_required, pre_nb_rbs_required[dlsch_ue_select_tbl_in_use], sizeof(uint16_t)*MAX_NUM_CCs*MAX_MOBILES_PER_ENB); memcpy(nb_rbs_required, pre_nb_rbs_required[dlsch_ue_select_tbl_in_use], sizeof(uint16_t)*MAX_NUM_CCs*MAX_MOBILES_PER_ENB);
#endif #endif
...@@ -856,23 +854,14 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, ...@@ -856,23 +854,14 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
nb_rbs_required_remaining[CC_id][UE_id] = cmin(average_rbs_per_user[CC_id], dlsch_ue_select[CC_id].list[i].nb_rb); nb_rbs_required_remaining[CC_id][UE_id] = cmin(average_rbs_per_user[CC_id], dlsch_ue_select[CC_id].list[i].nb_rb);
} }
/* slicing support has been introduced into the scheduler. Provide dummy
* data so that the preprocessor "simply works" */
for (c = 0; c < MAX_NUM_CCs; ++c)
for (j = 0; j < N_RBG_MAX; ++j)
slice_allocation[c][j] = 1;
LOG_T(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n "); LOG_T(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n ");
dlsch_scheduler_pre_processor_allocate (Mod_id, dlsch_scheduler_pre_processor_allocate (Mod_id,
UE_id, UE_id,
CC_id, CC_id,
N_RBG[CC_id], N_RBG[CC_id],
min_rb_unit[CC_id], (uint16_t (*)[NUMBER_OF_UE_MAX])nb_rbs_required,
(uint16_t (*)[MAX_MOBILES_PER_ENB])nb_rbs_required, (uint16_t (*)[NUMBER_OF_UE_MAX])nb_rbs_required_remaining,
(uint16_t (*)[MAX_MOBILES_PER_ENB])nb_rbs_required_remaining, rballoc_sub);
rballoc_sub,
slice_allocation,
MIMO_mode_indicator);
temp_total_rbs_count -= ue_sched_ctl->pre_nb_available_rbs[CC_id]; temp_total_rbs_count -= ue_sched_ctl->pre_nb_available_rbs[CC_id];
temp_total_ue_count--; temp_total_ue_count--;
......
...@@ -2146,6 +2146,37 @@ dump_ue_list(UE_list_t *listP, ...@@ -2146,6 +2146,37 @@ dump_ue_list(UE_list_t *listP,
return; return;
} }
//------------------------------------------------------------------------------
/*
* Add a UE to the UL or DL UE_list listP
*/
void
add_ue_list(UE_list_t *listP, int UE_id, int ul_flag) {
if (ul_flag == 0) {
if (listP->head == -1) {
listP->head = UE_id;
listP->next[UE_id] = -1;
} else {
int i = listP->head;
while (listP->next[i] >= 0)
i = listP->next[i];
listP->next[i] = UE_id;
listP->next[UE_id] = -1;
}
} else {
if (listP->head_ul == -1) {
listP->head_ul = UE_id;
listP->next_ul[UE_id] = -1;
} else {
int i = listP->head;
while (listP->next_ul[i] >= 0)
i = listP->next[i];
listP->next_ul[i] = UE_id;
listP->next_ul[UE_id] = -1;
}
}
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
int int
add_new_ue(module_id_t mod_idP, add_new_ue(module_id_t mod_idP,
...@@ -2165,7 +2196,6 @@ add_new_ue(module_id_t mod_idP, ...@@ -2165,7 +2196,6 @@ add_new_ue(module_id_t mod_idP,
rntiP, rntiP,
UE_list->avail, UE_list->avail,
UE_list->num_UEs); UE_list->num_UEs);
dump_ue_list(UE_list, 0);
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) { for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
if (UE_list->active[i] == TRUE) if (UE_list->active[i] == TRUE)
...@@ -2182,6 +2212,10 @@ add_new_ue(module_id_t mod_idP, ...@@ -2182,6 +2212,10 @@ add_new_ue(module_id_t mod_idP,
UE_list->ordered_ULCCids[0][UE_id] = cc_idP; UE_list->ordered_ULCCids[0][UE_id] = cc_idP;
UE_list->num_UEs++; UE_list->num_UEs++;
UE_list->active[UE_id] = TRUE; UE_list->active[UE_id] = TRUE;
add_ue_list(UE_list, UE_id, 0);
dump_ue_list(UE_list, 0);
add_ue_list(UE_list, UE_id, 1);
dump_ue_list(UE_list, 1);
if (IS_SOFTMODEM_IQPLAYER)// not specific to record/playback ? if (IS_SOFTMODEM_IQPLAYER)// not specific to record/playback ?
UE_list->UE_template[cc_idP][UE_id].pre_assigned_mcs_ul = 0; UE_list->UE_template[cc_idP][UE_id].pre_assigned_mcs_ul = 0;
UE_list->UE_template[cc_idP][UE_id].rach_resource_type = rach_resource_type; UE_list->UE_template[cc_idP][UE_id].rach_resource_type = rach_resource_type;
...@@ -2211,8 +2245,6 @@ add_new_ue(module_id_t mod_idP, ...@@ -2211,8 +2245,6 @@ add_new_ue(module_id_t mod_idP,
UE_id, UE_id,
cc_idP, cc_idP,
rntiP); rntiP);
dump_ue_list(UE_list,
0);
return (UE_id); return (UE_id);
} }
...@@ -2248,10 +2280,11 @@ rrc_mac_remove_ue(module_id_t mod_idP, ...@@ -2248,10 +2280,11 @@ rrc_mac_remove_ue(module_id_t mod_idP,
UE_id, UE_id,
pCC_id, pCC_id,
rntiP); rntiP);
dump_ue_list(UE_list, 0); // DL list displayed in LOG_T(MAC)
UE_list->active[UE_id] = FALSE; UE_list->active[UE_id] = FALSE;
UE_list->num_UEs--; UE_list->num_UEs--;
UE_list->next[UE_id] = -1;
UE_list->next_ul[UE_id] = -1;
/* If present, remove UE from DL list */ /* If present, remove UE from DL list */
if (UE_list->head == UE_id) { if (UE_list->head == UE_id) {
UE_list->head = UE_list->next[UE_id]; UE_list->head = UE_list->next[UE_id];
...@@ -2391,130 +2424,6 @@ prev(UE_list_t *listP, ...@@ -2391,130 +2424,6 @@ prev(UE_list_t *listP,
return -1; return -1;
} }
//------------------------------------------------------------------------------
void
swap_UEs(UE_list_t *listP,
int nodeiP,
int nodejP,
int ul_flag)
//------------------------------------------------------------------------------
{
int prev_i, prev_j, next_i, next_j;
LOG_T(MAC, "Swapping UE %d,%d\n",
nodeiP,
nodejP);
dump_ue_list(listP,
ul_flag);
prev_i = prev(listP,
nodeiP,
ul_flag);
prev_j = prev(listP,
nodejP,
ul_flag);
AssertFatal((prev_i >= 0) && (prev_j >= 0), "swap_UEs: problem");
if (ul_flag == 0) {
next_i = listP->next[nodeiP];
next_j = listP->next[nodejP];
} else {
next_i = listP->next_ul[nodeiP];
next_j = listP->next_ul[nodejP];
}
LOG_T(MAC, "[%s] next_i %d, next_i, next_j %d, head %d \n",
(ul_flag == 0) ? "DL" : "UL",
next_i,
next_j,
listP->head);
if (ul_flag == 0) {
if (next_i == nodejP) { // case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...
LOG_T(MAC, "Case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...\n");
listP->next[nodeiP] = next_j;
listP->next[nodejP] = nodeiP;
if (nodeiP == listP->head) { // case i j n(j)
listP->head = nodejP;
} else {
listP->next[prev_i] = nodejP;
}
} else if (next_j == nodeiP) { // case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...
LOG_T(MAC, "Case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...\n");
listP->next[nodejP] = next_i;
listP->next[nodeiP] = nodejP;
if (nodejP == listP->head) { // case j i n(i)
listP->head = nodeiP;
} else {
listP->next[prev_j] = nodeiP;
}
} else { // case ... p(i) i n(i) ... p(j) j n(j) ...
listP->next[nodejP] = next_i;
listP->next[nodeiP] = next_j;
if (nodeiP == listP->head) {
LOG_T(MAC, "changing head to %d\n",
nodejP);
listP->head = nodejP;
listP->next[prev_j] = nodeiP;
} else if (nodejP == listP->head) {
LOG_D(MAC, "changing head to %d\n",
nodeiP);
listP->head = nodeiP;
listP->next[prev_i] = nodejP;
} else {
listP->next[prev_i] = nodejP;
listP->next[prev_j] = nodeiP;
}
}
} else { // ul_flag
if (next_i == nodejP) { // case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...
LOG_T(MAC, "[UL] Case ... p(i) i j n(j) ... => ... p(j) j i n(i) ...\n");
listP->next_ul[nodeiP] = next_j;
listP->next_ul[nodejP] = nodeiP;
if (nodeiP == listP->head_ul) { // case i j n(j)
listP->head_ul = nodejP;
} else {
listP->next_ul[prev_i] = nodejP;
}
} else if (next_j == nodeiP) { // case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...
LOG_T(MAC, "[UL]Case ... p(j) j i n(i) ... => ... p(i) i j n(j) ...\n");
listP->next_ul[nodejP] = next_i;
listP->next_ul[nodeiP] = nodejP;
if (nodejP == listP->head_ul) { // case j i n(i)
listP->head_ul = nodeiP;
} else {
listP->next_ul[prev_j] = nodeiP;
}
} else { // case ... p(i) i n(i) ... p(j) j n(j) ...
listP->next_ul[nodejP] = next_i;
listP->next_ul[nodeiP] = next_j;
if (nodeiP == listP->head_ul) {
LOG_T(MAC, "[UL]changing head to %d\n",
nodejP);
listP->head_ul = nodejP;
listP->next_ul[prev_j] = nodeiP;
} else if (nodejP == listP->head_ul) {
LOG_T(MAC, "[UL]changing head to %d\n",
nodeiP);
listP->head_ul = nodeiP;
listP->next_ul[prev_i] = nodejP;
} else {
listP->next_ul[prev_i] = nodejP;
listP->next_ul[prev_j] = nodeiP;
}
}
}
LOG_T(MAC, "After swap\n");
dump_ue_list(listP,
ul_flag);
return;
}
// This has to be updated to include BSR information // This has to be updated to include BSR information
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
uint8_t uint8_t
......
...@@ -1190,7 +1190,6 @@ typedef struct { ...@@ -1190,7 +1190,6 @@ typedef struct {
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX]; uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX];
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX];
uint32_t bytes_lcid[MAX_MOBILES_PER_ENB][MAX_NUM_LCID]; uint32_t bytes_lcid[MAX_MOBILES_PER_ENB][MAX_NUM_LCID];
uint32_t wb_pmi[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; uint32_t wb_pmi[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB];
......
...@@ -248,49 +248,23 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, ...@@ -248,49 +248,23 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id,
int slice_idx, int slice_idx,
frame_t frameP, frame_t frameP,
sub_frame_t subframeP, sub_frame_t subframeP,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]); uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]);
void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id, void dlsch_scheduler_pre_processor_positioning(module_id_t Mod_id,
int slice_idx, int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]);
void dlsch_scheduler_pre_processor_intraslice_sharing(module_id_t Mod_id,
int slice_idx,
int min_rb_unit[NFAPI_CC_MAX],
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_accounted[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]);
void slice_priority_sort(module_id_t Mod_id, int slice_list[MAX_NUM_SLICES]);
void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
int frameP,
sub_frame_t subframeP,
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]); uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]);
void dlsch_scheduler_qos_multiplexing(module_id_t Mod_id,
int frameP,
sub_frame_t subframeP);
void dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id, void dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
int UE_id, int UE_id,
uint8_t CC_id, uint8_t CC_id,
int N_RBG, int N_RBG,
int min_rb_unit,
uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], uint16_t nb_rbs_remaining[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB],
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX], uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX]);
uint8_t slice_allocation_mask[NFAPI_CC_MAX][N_RBG_MAX],
uint8_t MIMO_mode_indicator[NFAPI_CC_MAX][N_RBG_MAX]);
/* \brief Function to trigger the eNB scheduling procedure. It is called by PHY at the beginning of each subframe, \f$n$\f /* \brief Function to trigger the eNB scheduling procedure. It is called by PHY at the beginning of each subframe, \f$n$\f
and generates all DLSCH allocations for subframe \f$n\f$ and ULSCH allocations for subframe \f$n+k$\f. and generates all DLSCH allocations for subframe \f$n\f$ and ULSCH allocations for subframe \f$n+k$\f.
...@@ -715,10 +689,10 @@ int add_new_ue(module_id_t Mod_id, int CC_id, rnti_t rnti, int harq_pid, uint8_t ...@@ -715,10 +689,10 @@ int add_new_ue(module_id_t Mod_id, int CC_id, rnti_t rnti, int harq_pid, uint8_t
int rrc_mac_remove_ue(module_id_t Mod_id, rnti_t rntiP); int rrc_mac_remove_ue(module_id_t Mod_id, rnti_t rntiP);
void store_dlsch_buffer(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframeP); void store_dlsch_buffer(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframeP);
void assign_rbs_required(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframe, uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB], int min_rb_unit[NFAPI_CC_MAX]); void assign_rbs_required(module_id_t Mod_id, int slice_idx, frame_t frameP, sub_frame_t subframe, uint16_t nb_rbs_required[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]);
void swap_UEs(UE_list_t *listP, int nodeiP, int nodejP, int ul_flag);
int prev(UE_list_t *listP, int nodeP, int ul_flag); int prev(UE_list_t *listP, int nodeP, int ul_flag);
void add_ue_list(UE_list_t *listP, int UE_id, int ul_flag);
void dump_ue_list(UE_list_t *listP, int ul_flag); void dump_ue_list(UE_list_t *listP, int ul_flag);
int UE_num_active_CC(UE_list_t *listP, int ue_idP); int UE_num_active_CC(UE_list_t *listP, int ue_idP);
int UE_PCCID(module_id_t mod_idP, int ue_idP); int UE_PCCID(module_id_t mod_idP, int ue_idP);
...@@ -745,7 +719,6 @@ void adjust_bsr_info(int buffer_occupancy, uint16_t TBS, ...@@ -745,7 +719,6 @@ void adjust_bsr_info(int buffer_occupancy, uint16_t TBS,
UE_TEMPLATE *UE_template); UE_TEMPLATE *UE_template);
int phy_stats_exist(module_id_t Mod_id, int rnti); int phy_stats_exist(module_id_t Mod_id, int rnti);
void sort_UEs(module_id_t Mod_idP, int slice_idx, int frameP, sub_frame_t subframeP);
/*! \fn UE_L2_state_t ue_scheduler(const module_id_t module_idP,const frame_t frameP, const sub_frame_t subframe, const lte_subframe_t direction,const uint8_t eNB_index) /*! \fn UE_L2_state_t ue_scheduler(const module_id_t module_idP,const frame_t frameP, const sub_frame_t subframe, const lte_subframe_t direction,const uint8_t eNB_index)
\brief UE scheduler where all the ue background tasks are done. This function performs the following: 1) Trigger PDCP every 5ms 2) Call RRC for link status return to PHY3) Perform SR/BSR procedures for scheduling feedback 4) Perform PHR procedures. \brief UE scheduler where all the ue background tasks are done. This function performs the following: 1) Trigger PDCP every 5ms 2) Call RRC for link status return to PHY3) Perform SR/BSR procedures for scheduling feedback 4) Perform PHR procedures.
......
...@@ -50,12 +50,10 @@ void init_UE_list(UE_list_t *UE_list) ...@@ -50,12 +50,10 @@ void init_UE_list(UE_list_t *UE_list)
UE_list->head = -1; UE_list->head = -1;
UE_list->head_ul = -1; UE_list->head_ul = -1;
UE_list->avail = 0; UE_list->avail = 0;
for (list_el = 0; list_el < MAX_MOBILES_PER_ENB - 1; list_el++) { for (list_el = 0; list_el < MAX_MOBILES_PER_ENB; list_el++) {
UE_list->next[list_el] = list_el + 1;
UE_list->next_ul[list_el] = list_el + 1;
}
UE_list->next[list_el] = -1; UE_list->next[list_el] = -1;
UE_list->next_ul[list_el] = -1; UE_list->next_ul[list_el] = -1;
}
memset(UE_list->DLSCH_pdu, 0, sizeof(UE_list->DLSCH_pdu)); memset(UE_list->DLSCH_pdu, 0, sizeof(UE_list->DLSCH_pdu));
memset(UE_list->UE_template, 0, sizeof(UE_list->UE_template)); memset(UE_list->UE_template, 0, sizeof(UE_list->UE_template));
memset(UE_list->eNB_UE_stats, 0, sizeof(UE_list->eNB_UE_stats)); memset(UE_list->eNB_UE_stats, 0, sizeof(UE_list->eNB_UE_stats));
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment