Commit 555630de authored by Niccolò Iardella's avatar Niccolò Iardella Committed by Robert Schmidt

Add first_rb_offset

parent c0152e3c
......@@ -401,10 +401,10 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[lcgid] = BSR_TABLE[bsr];
UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer =
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[0] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[1] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[2] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[3];
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3];
//UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer += UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer / 4;
RC.eNB[enb_mod_idP][CC_idP]->pusch_stats_bsr[UE_id][(frameP * 10) + subframeP] = (payload_ptr[0] & 0x3f);
......@@ -435,10 +435,10 @@ rx_sdu(const module_id_t enb_mod_idP,
int bsr2 = ((payload_ptr[1] & 0x0F) << 2) | ((payload_ptr[2] & 0xC0) >> 6);
int bsr3 = payload_ptr[2] & 0x3F;
lcgid_updated[0] = 1;
lcgid_updated[1] = 1;
lcgid_updated[2] = 1;
lcgid_updated[3] = 1;
lcgid_updated[LCGID0] = 1;
lcgid_updated[LCGID1] = 1;
lcgid_updated[LCGID2] = 1;
lcgid_updated[LCGID3] = 1;
// update buffer info
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] = BSR_TABLE[bsr0];
......@@ -447,10 +447,10 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3] = BSR_TABLE[bsr3];
UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer =
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[0] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[1] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[2] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[3];
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3];
//UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer += UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer / 4;
LOG_D(MAC,
......@@ -920,7 +920,7 @@ void
schedule_ulsch(module_id_t module_idP, frame_t frameP,
sub_frame_t subframeP)
{
uint16_t first_rb[MAX_NUM_CCs], i;
uint16_t first_rb[NFAPI_CC_MAX], i;
int CC_id;
eNB_MAC_INST *mac = RC.mac[module_idP];
COMMON_channels_t *cc;
......@@ -1005,7 +1005,7 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
}
if (sched_subframe < subframeP) sched_frame++;
for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) {
for (CC_id = 0; CC_id < NFAPI_CC_MAX; CC_id++) {
......@@ -1104,6 +1104,18 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
}
}
if (slice_first_rb_current[i] != slice_first_rb[i]){
if (slice_first_rb[i] >= 0){ // FIXME: Max limit is checked in the scheduler
LOG_N(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: slice first rb has changed: %d-->%d\n",
module_idP, i, frameP, subframeP, slice_first_rb_current[i], slice_first_rb[i]);
slice_first_rb_current[i] = slice_first_rb[i];
} else {
LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid slice first rb %d, revert the previous value %d\n",
module_idP, i, slice_first_rb[i],slice_first_rb_current[i]);
slice_first_rb[i] = slice_first_rb_current[i];
}
}
// check if a new scheduler, and log the console
if (update_ul_scheduler_current[i] != update_ul_scheduler[i]){
LOG_I(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: UL scheduler for this slice is updated: %s \n",
......@@ -1166,6 +1178,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
int rvidx_tab[4] = { 0, 2, 3, 1 };
uint16_t ul_req_index;
uint8_t dlsch_flag;
int first_rb_slice[NFAPI_CC_MAX];
if (sched_subframeP < subframeP)
sched_frame++;
......@@ -1177,9 +1190,18 @@ schedule_ulsch_rnti(module_id_t module_idP,
nfapi_ul_config_request_t *ul_req_tmp = &mac->UL_req_tmp[CC_id][sched_subframeP];
nfapi_ul_config_request_body_t *ul_req_tmp_body = &ul_req_tmp->ul_config_request_body;
nfapi_ul_config_ulsch_harq_information *ulsch_harq_information;
for (CC_id = 0; CC_id < NFAPI_CC_MAX; ++CC_id) {
N_RB_UL = to_prb(cc[CC_id].ul_Bandwidth);
UE_list->first_rb_offset[CC_id][slice_id] = cmin(N_RB_UL, slice_first_rb[slice_id]);
}
//LOG_D(MAC, "entering ulsch preprocesor\n");
ulsch_scheduler_pre_processor(module_idP, slice_id, frameP, subframeP, sched_subframeP, first_rb);
for (CC_id = 0; CC_id < NFAPI_CC_MAX; ++CC_id) {
first_rb_slice[CC_id] = first_rb[CC_id] + UE_list->first_rb_offset[CC_id][slice_id];
}
//LOG_D(MAC, "exiting ulsch preprocesor\n");
hi_dci0_req->sfn_sf = (frameP << 4) + subframeP;
......@@ -1265,7 +1287,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
}
/* be sure that there are some free RBs */
if (first_rb[CC_id] >= N_RB_UL - 1) {
if (first_rb_slice[CC_id] >= N_RB_UL - 1) {
LOG_W(MAC,
"[eNB %d] frame %d subframe %d, UE %d/%x CC %d: dropping, not enough RBs\n",
module_idP, frameP, subframeP, UE_id, rnti, CC_id);
......@@ -1390,7 +1412,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
// buffer_occupancy = UE_template->ul_total_buffer;
while (((rb_table[rb_table_index] > (N_RB_UL - 1 - first_rb[CC_id]))
while (((rb_table[rb_table_index] > (N_RB_UL - 1 - first_rb_slice[CC_id]))
|| (rb_table[rb_table_index] > 45))
&& (rb_table_index > 0)) {
rb_table_index--;
......@@ -1407,7 +1429,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid),
T_INT(UE_template->mcs_UL[harq_pid]),
T_INT(first_rb[CC_id]),
T_INT(first_rb_slice[CC_id]),
T_INT(rb_table[rb_table_index]),
T_INT(UE_template->TBS_UL[harq_pid]), T_INT(ndi));
......@@ -1417,14 +1439,14 @@ schedule_ulsch_rnti(module_id_t module_idP,
module_idP, harq_pid, rnti, CC_id, frameP,
subframeP, UE_id,
UE_template->mcs_UL[harq_pid],
first_rb[CC_id], rb_table[rb_table_index],
first_rb_slice[CC_id], rb_table[rb_table_index],
rb_table_index,
UE_template->TBS_UL[harq_pid], harq_pid);
// bad indices : 20 (40 PRB), 21 (45 PRB), 22 (48 PRB)
//store for possible retransmission
UE_template->nb_rb_ul[harq_pid] = rb_table[rb_table_index];
UE_template->first_rb_ul[harq_pid] = first_rb[CC_id];
UE_template->first_rb_ul[harq_pid] = first_rb_slice[CC_id];
UE_sched_ctrl->ul_scheduled |= (1 << harq_pid);
if (UE_id == UE_list->head)
......@@ -1456,7 +1478,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.transmission_power = 6000;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb[CC_id];
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb_slice[CC_id];
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block = rb_table[rb_table_index];
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.mcs_1 = UE_template->mcs_UL[harq_pid];
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cyclic_shift_2_for_drms = cshift;
......@@ -1554,13 +1576,13 @@ schedule_ulsch_rnti(module_id_t module_idP,
LOG_D(MAC,"[PUSCH %d] SFN/SF:%04d%d UL_CFG:SFN/SF:%04d%d CQI:%d for UE %d/%x\n", harq_pid,frameP,subframeP,ul_sched_frame,ul_sched_subframeP,cqi_req,UE_id,rnti);
// increment first rb for next UE allocation
first_rb[CC_id] += rb_table[rb_table_index];
first_rb_slice[CC_id] += rb_table[rb_table_index];
} else { // round > 0 => retransmission
T(T_ENB_MAC_UE_UL_SCHEDULE_RETRANSMISSION,
T_INT(module_idP), T_INT(CC_id), T_INT(rnti),
T_INT(frameP), T_INT(subframeP), T_INT(harq_pid),
T_INT(UE_template->mcs_UL[harq_pid]),
T_INT(first_rb[CC_id]),
T_INT(first_rb_slice[CC_id]),
T_INT(rb_table[rb_table_index]), T_INT(round));
// Add UL_config PDUs
......
......@@ -52,6 +52,9 @@ float total_slice_percentage_current_uplink = 0;
int slice_maxmcs_uplink[MAX_NUM_SLICES] = {20, 20, 20, 20};
int slice_maxmcs_current_uplink[MAX_NUM_SLICES] = {20,20,20,20};
int slice_first_rb[MAX_NUM_SLICES] = {0, 0, 0, 0};
int slice_first_rb_current[MAX_NUM_SLICES] = {0,0,0,0};
/*resource blocks allowed*/
uint16_t nb_rbs_allowed_slice_uplink[NFAPI_CC_MAX][MAX_NUM_SLICES];
/*Slice Update */
......
......@@ -1102,6 +1102,7 @@ typedef struct {
/// Sorting criteria for the UE list in the MAC preprocessor
uint16_t sorting_criteria[MAX_NUM_SLICES][CR_NUM];
uint16_t first_rb_offset[NFAPI_CC_MAX][MAX_NUM_SLICES];
} UE_list_t;
......
......@@ -178,6 +178,9 @@ store_dlsch_buffer(module_id_t Mod_id,
}
}
int cqi2mcs(int cqi) {
return cqi_to_mcs[cqi];
}
// This function returns the estimated number of RBs required by each UE for downlink scheduling
void
......@@ -412,6 +415,7 @@ static int ue_dl_compare(const void *_a, const void *_b, void *_params)
return -1;
if (cqi1 < cqi2)
return 1;
break;
case CR_LCP :
if (lcgid1 < lcgid2)
......@@ -439,7 +443,7 @@ void decode_sorting_policy(module_id_t Mod_idP, slice_id_t slice_id) {
criterion = (uint16_t) (policy >> 4 * (CR_NUM - 1 - i) & mask);
if (criterion >= CR_NUM) {
LOG_W(MAC, "Invalid criterion in slice %d policy, revert to default policy \n", slice_id);
slice_sorting[slice_id] = 0x1234;
slice_sorting[slice_id] = 0x12345;
break;
}
UE_list->sorting_criteria[slice_id][i] = criterion;
......@@ -1684,6 +1688,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
UE_TEMPLATE *UE_template = 0;
UE_sched_ctrl *ue_sched_ctl;
int N_RB_UL = 0;
uint16_t available_rbs, first_rb_offset;
LOG_D(MAC, "In ulsch_preprocessor: assign max mcs min rb\n");
// maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
......@@ -1757,12 +1762,16 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] =
nb_rbs_allowed_slice(slice_percentage_uplink[slice_id], N_RB_UL);
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_id];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id],
N_RB_UL - first_rb[CC_id] - first_rb_offset);
if (total_ue_count[CC_id] == 0) {
average_rbs_per_user[CC_id] = 0;
} else if (total_ue_count[CC_id] == 1) { // increase the available RBs, special case,
average_rbs_per_user[CC_id] = ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id] + 1;
} else if (total_ue_count[CC_id] <= (ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id])) {
average_rbs_per_user[CC_id] = (uint16_t) floor((ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id]) / total_ue_count[CC_id]);
average_rbs_per_user[CC_id] = (uint16_t) (available_rbs + 1);
} else if (total_ue_count[CC_id] <= available_rbs) {
average_rbs_per_user[CC_id] = (uint16_t) floor(available_rbs / total_ue_count[CC_id]);
} else {
average_rbs_per_user[CC_id] = 1;
LOG_W(MAC,
......@@ -1834,8 +1843,11 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
total_remaining_rbs[CC_id] =
ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id] - total_allocated_rbs[CC_id];
N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_id];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id],
N_RB_UL - first_rb[CC_id] - first_rb_offset);
total_remaining_rbs[CC_id] = available_rbs - total_allocated_rbs[CC_id];
if (total_ue_count[CC_id] == 1) {
total_remaining_rbs[CC_id] += 1;
......@@ -1885,6 +1897,7 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
UE_sched_ctrl *ue_sched_ctl;
int Ncp;
int N_RB_UL;
int first_rb_offset, available_rbs;
for (i = 0; i < MAX_MOBILES_PER_ENB; i++) {
if (UE_list->active[i] != TRUE)
......@@ -1951,18 +1964,20 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); // fixme: set use_srs
}
first_rb_offset = UE_list->first_rb_offset[CC_id][slice_id];
available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id],
N_RB_UL - first_rb[CC_id] - first_rb_offset);
while ((tbs < bits_to_schedule)
&& (rb_table[rb_table_index] < (ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id]))
&& ((UE_template->phr_info - tx_power) > 0)
&& (rb_table_index < 32)) {
&& (rb_table[rb_table_index] < available_rbs)
&& ((UE_template->phr_info - tx_power) > 0)
&& (rb_table_index < 32)) {
rb_table_index++;
tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, rb_table[rb_table_index]) << 3;
tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0);
}
UE_template->ue_tx_power = tx_power;
if (rb_table[rb_table_index] > (ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id] - 1)) {
if (rb_table[rb_table_index] > (available_rbs - 1)) {
rb_table_index--;
}
// 1 or 2 PRB with cqi enabled does not work well
......@@ -2086,7 +2101,3 @@ void sort_ue_ul(module_id_t module_idP, int frameP, sub_frame_t subframeP)
UE_list->head_ul = -1;
}
}
int cqi2mcs(int cqi) {
return cqi_to_mcs[cqi];
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment