Commit 485e83f9 authored by Stefan's avatar Stefan

modif scheduler_dlsch + primitives

parent a36510d3
......@@ -76,12 +76,14 @@ add_ue_dlsch_info(module_id_t module_idP,
rnti_t rnti)
//------------------------------------------------------------------------------
{
//LOG_D(MAC, "%s(module_idP:%d, CC_id:%d, UE_id:%d, subframeP:%d, status:%d) serving_num:%d rnti:%x\n", __FUNCTION__, module_idP, CC_id, UE_id, subframeP, status, eNB_dlsch_info[module_idP][CC_id][UE_id].serving_num, UE_RNTI(module_idP,UE_id));
eNB_dlsch_info[module_idP][CC_id][UE_id].rnti = rnti;
// eNB_dlsch_info[module_idP][CC_id][ue_mod_idP].weight = weight;
eNB_dlsch_info[module_idP][CC_id][UE_id].subframe = subframeP;
eNB_dlsch_info[module_idP][CC_id][UE_id].status = status;
eNB_dlsch_info[module_idP][CC_id][UE_id].serving_num++;
eNB_DLSCH_INFO *info = &eNB_dlsch_info[module_idP][CC_id][UE_id];
// LOG_D(MAC, "%s(module_idP:%d, CC_id:%d, UE_id:%d, subframeP:%d, status:%d) serving_num:%d rnti:%x\n", __FUNCTION__, module_idP, CC_id, UE_id, subframeP, status, eNB_dlsch_info[module_idP][CC_id][UE_id].serving_num, UE_RNTI(module_idP,UE_id));
info->rnti = rnti;
// info->weight = weight;
info->subframe = subframeP;
info->status = status;
info->serving_num++;
return;
}
//------------------------------------------------------------------------------
......@@ -176,10 +178,11 @@ generate_dlsch_header(unsigned char *mac_header,
last_size = 1;
// msg("last_size %d,mac_header_ptr %p\n",last_size,mac_header_ptr);
((TIMING_ADVANCE_CMD *) ce_ptr)->R = 0;
AssertFatal(timing_advance_cmd < 64,
"timing_advance_cmd %d > 63\n", timing_advance_cmd);
AssertFatal(timing_advance_cmd < 64, "timing_advance_cmd %d > 63\n",
timing_advance_cmd);
((TIMING_ADVANCE_CMD *) ce_ptr)->TA = timing_advance_cmd; //(timing_advance_cmd+31)&0x3f;
LOG_D(MAC, "timing advance =%d (%d)\n", timing_advance_cmd,
LOG_D(MAC, "timing advance =%d (%d)\n",
timing_advance_cmd,
((TIMING_ADVANCE_CMD *) ce_ptr)->TA);
ce_ptr += sizeof(TIMING_ADVANCE_CMD);
//msg("offset %d\n",ce_ptr-mac_header_control_elements);
......@@ -203,11 +206,15 @@ generate_dlsch_header(unsigned char *mac_header,
mac_header_ptr->E = 0;
mac_header_ptr->LCID = UE_CONT_RES;
last_size = 1;
LOG_T(MAC,
"[eNB ][RAPROC] Generate contention resolution msg: %x.%x.%x.%x.%x.%x\n",
ue_cont_res_id[0], ue_cont_res_id[1], ue_cont_res_id[2],
ue_cont_res_id[3], ue_cont_res_id[4], ue_cont_res_id[5]);
memcpy(ce_ptr, ue_cont_res_id, 6);
LOG_T(MAC, "[eNB ][RAPROC] Generate contention resolution msg: %x.%x.%x.%x.%x.%x\n",
ue_cont_res_id[0],
ue_cont_res_id[1],
ue_cont_res_id[2],
ue_cont_res_id[3],
ue_cont_res_id[4],
ue_cont_res_id[5]);
memcpy(ce_ptr,
ue_cont_res_id, 6);
ce_ptr += 6;
// msg("(cont_res) : offset %d\n",ce_ptr-mac_header_control_elements);
}
......@@ -216,7 +223,8 @@ generate_dlsch_header(unsigned char *mac_header,
for (i = 0; i < num_sdus; i++) {
LOG_T(MAC, "[eNB] Generate DLSCH header num sdu %d len sdu %d\n",
num_sdus, sdu_lengths[i]);
num_sdus,
sdu_lengths[i]);
if (first_element > 0) {
mac_header_ptr->E = 1;
......@@ -248,8 +256,7 @@ generate_dlsch_header(unsigned char *mac_header,
((SCH_SUBHEADER_LONG *) mac_header_ptr)->padding = 0x00;
last_size = 3;
#ifdef DEBUG_HEADER_PARSING
LOG_D(MAC,
"[eNB] generate long sdu, size %x (MSB %x, LSB %x)\n",
LOG_D(MAC, "[eNB] generate long sdu, size %x (MSB %x, LSB %x)\n",
sdu_lengths[i],
((SCH_SUBHEADER_LONG *) mac_header_ptr)->L_MSB,
((SCH_SUBHEADER_LONG *) mac_header_ptr)->L_LSB);
......@@ -291,10 +298,10 @@ generate_dlsch_header(unsigned char *mac_header,
if ((ce_ptr - mac_header_control_elements) > 0) {
// printf("Copying %d bytes for control elements\n",ce_ptr-mac_header_control_elements);
memcpy((void *) mac_header_ptr, mac_header_control_elements,
memcpy((void *) mac_header_ptr,
mac_header_control_elements,
ce_ptr - mac_header_control_elements);
mac_header_ptr +=
(unsigned char) (ce_ptr - mac_header_control_elements);
mac_header_ptr += (unsigned char) (ce_ptr - mac_header_control_elements);
}
//msg("After CEs %d\n",(uint8_t*)mac_header_ptr - mac_header);
......@@ -449,7 +456,14 @@ schedule_ue_spec(module_id_t module_idP,
unsigned char dlsch_buffer[MAX_DLSCH_PAYLOAD_BYTES];
int round = 0;
int harq_pid = 0;
uint16_t release_num;
uint8_t ra_ii;
eNB_UE_STATS *eNB_UE_stats = NULL;
UE_TEMPLATE *ue_template = NULL;
eNB_STATS *eNB_stats = NULL;
RRC_release_ctrl *release_ctrl = NULL;
DLSCH_PDU *dlsch_pdu = NULL;
RA_t *ra = NULL;
int sdu_length_total = 0;
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = eNB->common_channels;
......@@ -457,7 +471,7 @@ schedule_ue_spec(module_id_t module_idP,
int continue_flag = 0;
int32_t normalized_rx_power, target_rx_power;
int tpc = 1;
UE_sched_ctrl *ue_sched_ctl;
UE_sched_ctrl *ue_sched_ctrl;
int mcs;
int i;
int min_rb_unit[NFAPI_CC_MAX];
......@@ -472,7 +486,8 @@ schedule_ue_spec(module_id_t module_idP,
int header_length_total;
rrc_eNB_ue_context_t *ue_contextP = NULL;
start_meas(&eNB->schedule_dlsch);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_FUNCTION_IN);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH,
VCD_FUNCTION_IN);
// for TDD: check that we have to act here, otherwise return
if (cc[0].tdd_Config) {
......@@ -526,7 +541,7 @@ schedule_ue_spec(module_id_t module_idP,
//weight = get_ue_weight(module_idP,UE_id);
aggregation = 2;
for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) {
for (CC_id = 0, eNB_stats = &eNB->eNB_stats[0]; CC_id < RC.nb_mac_CC[module_idP]; CC_id++, eNB_stats++) {
N_RB_DL[CC_id] = to_prb(cc[CC_id].mib->message.dl_Bandwidth);
min_rb_unit[CC_id] = get_min_rb_unit(module_idP, CC_id);
// get number of PRBs less those used by common channels
......@@ -538,16 +553,17 @@ schedule_ue_spec(module_id_t module_idP,
N_RBG[CC_id] = to_rbg(cc[CC_id].mib->message.dl_Bandwidth);
// store the global enb stats:
eNB->eNB_stats[CC_id].num_dlactive_UEs = UE_list->num_UEs;
eNB->eNB_stats[CC_id].available_prbs = total_nb_available_rb[CC_id];
eNB->eNB_stats[CC_id].total_available_prbs += total_nb_available_rb[CC_id];
eNB->eNB_stats[CC_id].dlsch_bytes_tx = 0;
eNB->eNB_stats[CC_id].dlsch_pdus_tx = 0;
eNB_stats->num_dlactive_UEs = UE_list->num_UEs;
eNB_stats->available_prbs = total_nb_available_rb[CC_id];
eNB_stats->total_available_prbs += total_nb_available_rb[CC_id];
eNB_stats->dlsch_bytes_tx = 0;
eNB_stats->dlsch_pdus_tx = 0;
}
// CALLING Pre_Processor for downlink scheduling
// (Returns estimation of RBs required by each UE and the allocation on sub-band)
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_FUNCTION_IN);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,
VCD_FUNCTION_IN);
start_meas(&eNB->schedule_dlsch_preprocessor);
dlsch_scheduler_pre_processor(module_idP,
slice_idxP,
......@@ -556,7 +572,8 @@ schedule_ue_spec(module_id_t module_idP,
mbsfn_flag,
eNB->slice_info.rballoc_sub);
stop_meas(&eNB->schedule_dlsch_preprocessor);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_FUNCTION_OUT);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,
VCD_FUNCTION_OUT);
//RC.mac[module_idP]->slice_info.slice_counter--;
// Do the multiplexing and actual allocation only when all slices have been pre-processed.
......@@ -594,56 +611,52 @@ schedule_ue_spec(module_id_t module_idP,
continue_flag = 0; // reset the flag to allow allocation for the remaining UEs
rnti = UE_RNTI(module_idP, UE_id);
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
ue_template = &UE_list->UE_template[CC_id][UE_id];
/*
if (rnti == NOT_A_RNTI) {
LOG_D(MAC, "Cannot find rnti for UE_id %d (num_UEs %d)\n", UE_id, UE_list->num_UEs);
continue_flag = 1;
}
*/
if (eNB_UE_stats == NULL) {
LOG_D(MAC, "[eNB] Cannot find eNB_UE_stats\n");
continue_flag = 1;
}
// if (!ue_dl_slice_membership(module_idP, UE_id, slice_idxP)) {
// LOG_D(MAC, "UE%d is not part of slice %d ID %d\n",
// UE_id,
// slice_idxP,
// RC.mac[module_idP]->slice_info.dl[slice_idxP].id);
// /* prevent execution of add_ue_dlsch_info(), it is done by the other
// * slice */
// continue;
// }
if (continue_flag != 1) {
switch (get_tmode(module_idP, CC_id, UE_id)) {
switch (get_tmode(module_idP,
CC_id,
UE_id)) {
case 1:
case 2:
case 7:
aggregation = get_aggregation(get_bw_index(module_idP, CC_id),
ue_sched_ctl->dl_cqi[CC_id],
aggregation = get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
break;
case 3:
aggregation = get_aggregation(get_bw_index(module_idP, CC_id),
ue_sched_ctl->dl_cqi[CC_id],
aggregation = get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format2A);
break;
default:
LOG_W(MAC, "Unsupported transmission mode %d\n",
get_tmode(module_idP, CC_id, UE_id));
get_tmode(module_idP,
CC_id,
UE_id));
aggregation = 2;
break;
}
}
/* if (continue_flag != 1 */
if (ue_sched_ctl->pre_nb_available_rbs[CC_id] == 0 || // no RBs allocated
CCE_allocation_infeasible(module_idP, CC_id, 1, subframeP, aggregation, rnti)) {
if (ue_sched_ctrl->pre_nb_available_rbs[CC_id] == 0 || // no RBs allocated
CCE_allocation_infeasible(module_idP,
CC_id,
1,
subframeP,
aggregation,
rnti)) {
LOG_D(MAC, "[eNB %d] Frame %d : no RB allocated for UE %d on CC_id %d: continue \n",
module_idP,
frameP,
......@@ -677,15 +690,18 @@ schedule_ue_spec(module_id_t module_idP,
continue;
}
nb_available_rb = ue_sched_ctl->pre_nb_available_rbs[CC_id];
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
round = ue_sched_ctl->round[CC_id][harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].crnti = rnti;
UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status = mac_eNB_get_rrc_status(module_idP, rnti);
UE_list->eNB_UE_stats[CC_id][UE_id].harq_pid = harq_pid;
UE_list->eNB_UE_stats[CC_id][UE_id].harq_round = round;
if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_CONNECTED) {
nb_available_rb = ue_sched_ctrl->pre_nb_available_rbs[CC_id];
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,
frameP,
subframeP);
round = ue_sched_ctrl->round[CC_id][harq_pid];
eNB_UE_stats->crnti = rnti;
eNB_UE_stats->rrc_status = mac_eNB_get_rrc_status(module_idP,
rnti);
eNB_UE_stats->harq_pid = harq_pid;
eNB_UE_stats->harq_round = round;
if (eNB_UE_stats->rrc_status < RRC_CONNECTED) {
LOG_D(MAC, "UE %d is not in RRC_CONNECTED\n",
UE_id);
continue;
......@@ -701,18 +717,18 @@ schedule_ue_spec(module_id_t module_idP,
eNB_UE_stats->dl_cqi, MIN_CQI_VALUE, MAX_CQI_VALUE);
*/
if (nfapi_mode) {
eNB_UE_stats->dlsch_mcs1 = 10; // cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]];
eNB_UE_stats->dlsch_mcs1 = 10; // cqi_to_mcs[ue_sched_ctrl->dl_cqi[CC_id]];
} else { // this operation is also done in the preprocessor
eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1,
eNB->slice_info.dl[slice_idxP].maxmcs); // cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs);
}
// Store stats
// UE_list->eNB_UE_stats[CC_id][UE_id].dl_cqi= eNB_UE_stats->dl_cqi;
// eNB_UE_stats->dl_cqi= eNB_UE_stats->dl_cqi;
// Initializing the rb allocation indicator for each UE
for (j = 0; j < N_RBG[CC_id]; j++) {
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = 0;
ue_template->rballoc_subband[harq_pid][j] = 0;
}
LOG_D(MAC, "[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n",
......@@ -723,45 +739,46 @@ schedule_ue_spec(module_id_t module_idP,
harq_pid,
round,
nb_available_rb,
ue_sched_ctl->dl_cqi[CC_id],
ue_sched_ctrl->dl_cqi[CC_id],
eNB_UE_stats->dlsch_mcs1,
UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status);
eNB_UE_stats->rrc_status);
/* process retransmission */
if (round != 8) {
// get freq_allocation
nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
TBS = get_TBS_DL(UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid], nb_rb);
nb_rb = ue_template->nb_rb[harq_pid];
TBS = get_TBS_DL(ue_template->oldmcs1[harq_pid],
nb_rb);
if (nb_rb <= nb_available_rb) {
if (cc[CC_id].tdd_Config != NULL) {
UE_list->UE_template[CC_id][UE_id].DAI++;
ue_template->DAI++;
update_ul_dci(module_idP,
CC_id,
rnti,
UE_list->UE_template[CC_id][UE_id].DAI,
ue_template->DAI,
subframeP);
LOG_D(MAC, "DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
CC_id,
subframeP,
UE_id,
UE_list->UE_template[CC_id][UE_id].DAI);
ue_template->DAI);
}
if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
if (nb_rb == ue_sched_ctrl->pre_nb_available_rbs[CC_id]) {
for (j = 0; j < N_RBG[CC_id]; ++j) { // for indicating the rballoc for each sub-band
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j];
}
} else {
nb_rb_temp = nb_rb;
j = 0;
while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) {
if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) {
if (UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j])
if (ue_sched_ctrl->rballoc_sub_UE[CC_id][j] == 1) {
if (ue_template->rballoc_subband[harq_pid][j])
LOG_W(MAC, "WARN: rballoc_subband not free for retrans?\n");
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j];
nb_rb_temp -= min_rb_unit[CC_id];
if ((j == N_RBG[CC_id] - 1) && (N_RB_DL[CC_id] == 25 || N_RB_DL[CC_id] == 50))
......@@ -774,10 +791,10 @@ schedule_ue_spec(module_id_t module_idP,
nb_available_rb -= nb_rb;
/*
eNB->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb;
eNB->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id];
eNB->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctrl->dl_pow_off[CC_id];
for(j = 0; j < N_RBG[CC_id]; ++j) {
eNB->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
eNB->mu_mimo_mode[UE_id].rballoc_sub[j] = ue_template->rballoc_subband[harq_pid][j];
}
*/
......@@ -789,41 +806,44 @@ schedule_ue_spec(module_id_t module_idP,
LOG_D(MAC, "retransmission DL_REQ: rnti:%x\n",
rnti);
dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu];
memset((void *) dl_config_pdu, 0, sizeof(nfapi_dl_config_request_pdu_t));
memset((void *) dl_config_pdu,
0,
sizeof(nfapi_dl_config_request_pdu_t));
dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE;
dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu));
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level =
get_aggregation(get_bw_index(module_idP, CC_id),
ue_sched_ctl->dl_cqi[CC_id],
get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI: see Table 4-10 from SCF082 - nFAPI specifications
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // Don't adjust power when retransmitting
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = ue_template->oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = ue_template->oldmcs1[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = round & 3;
// TDD
if (cc[CC_id].tdd_Config != NULL) {
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (UE_list->UE_template[CC_id][UE_id].DAI - 1) & 3;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (ue_template->DAI - 1) & 3;
LOG_D(MAC, "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, dai %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
round,
UE_list->UE_template[CC_id][UE_id].DAI - 1,
UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]);
ue_template->DAI - 1,
ue_template->oldmcs1[harq_pid]);
} else {
LOG_D(MAC, "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
round,
UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]);
ue_template->oldmcs1[harq_pid]);
}
if (!CCE_allocation_infeasible(module_idP,
......@@ -845,7 +865,7 @@ schedule_ue_spec(module_id_t module_idP,
0, // type 0 allocation from 7.1.6 in 36.213
0, // virtual_resource_block_assignment_flag, unused here
0, // resource_block_coding, to be filled in later
getQm(UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]),
getQm(ue_template->oldmcs1[harq_pid]),
round & 3, // redundancy version
1, // transport blocks
0, // transport block to codeword swap flag
......@@ -854,7 +874,7 @@ schedule_ue_spec(module_id_t module_idP,
1, // number of subbands
// uint8_t codebook_index,
4, // UE category capacity
UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a,
ue_template->physicalConfigDedicated->pdsch_ConfigDedicated->p_a,
0, // delta_power_offset for TM5
0, // ngap
0, // nprb
......@@ -865,45 +885,59 @@ schedule_ue_spec(module_id_t module_idP,
LOG_D(MAC, "Filled NFAPI configuration for DCI/DLSCH %d, retransmission round %d\n",
eNB->pdu_index[CC_id],
round);
program_dlsch_acknak(module_idP, CC_id, UE_id, frameP, subframeP,
program_dlsch_acknak(module_idP,
CC_id,
UE_id,
frameP,
subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx);
// No TX request for retransmission (check if null request for FAPI)
} else {
LOG_W(MAC,
"Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d\%x, infeasible CCE allocation\n",
frameP, subframeP, UE_id, rnti);
LOG_W(MAC, "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d\%x, infeasible CCE allocation\n",
frameP,
subframeP,
UE_id,
rnti);
}
}
add_ue_dlsch_info(module_idP, CC_id, UE_id, subframeP, S_DL_SCHEDULED, rnti);
add_ue_dlsch_info(module_idP,
CC_id, UE_id,
subframeP,
S_DL_SCHEDULED,
rnti);
//eNB_UE_stats->dlsch_trials[round]++;
UE_list->eNB_UE_stats[CC_id][UE_id].num_retransmission += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used_retx = nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_retx += nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1 = eNB_UE_stats->dlsch_mcs1;
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2 = eNB_UE_stats->dlsch_mcs1;
eNB_UE_stats->num_retransmission += 1;
eNB_UE_stats->rbs_used_retx = nb_rb;
eNB_UE_stats->total_rbs_used_retx += nb_rb;
eNB_UE_stats->dlsch_mcs1 = eNB_UE_stats->dlsch_mcs1;
eNB_UE_stats->dlsch_mcs2 = eNB_UE_stats->dlsch_mcs1;
} else {
LOG_D(MAC,
"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n",
module_idP, frameP, CC_id, UE_id);
module_idP,
frameP,
CC_id,
UE_id);
}
} else { /* This is a potentially new SDU opportunity */
rlc_status.bytes_in_buffer = 0;
// Now check RLC information to compute number of required RBs
// get maximum TBS size for RLC request
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_available_rb);
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,
nb_available_rb);
// add the length for all the control elements (timing adv, drx, etc) : header + payload
if (ue_sched_ctl->ta_timer == 0) {
ta_update = ue_sched_ctl->ta_update;
if (ue_sched_ctrl->ta_timer == 0) {
ta_update = ue_sched_ctrl->ta_update;
/* if we send TA then set timer to not send it for a while */
if (ta_update != 31)
ue_sched_ctl->ta_timer = 20;
if (ta_update != 31) {
ue_sched_ctrl->ta_timer = 20;
}
/* reset ta_update */
ue_sched_ctl->ta_update = 31;
ue_sched_ctrl->ta_update = 31;
} else {
ta_update = 31;
}
......@@ -912,55 +946,83 @@ schedule_ue_spec(module_id_t module_idP,
// RLC data on DCCH
if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) {
rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH,
rlc_status = mac_rlc_status_ind(module_idP,
rnti,
module_idP,
frameP,
subframeP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
DCCH,
TBS - ta_len - header_length_total - sdu_length_total - 3
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
,0, 0
, 0
, 0
#endif
);
sdu_lengths[0] = 0;
if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, "[eNB %d] SFN/SF %d.%d, DL-DCCH->DLSCH CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
module_idP, frameP, subframeP, CC_id,
module_idP,
frameP,
subframeP,
CC_id,
TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_lengths[0] = mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH,
sdu_lengths[0] = mac_rlc_data_req(module_idP,
rnti,
module_idP,
frameP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
DCCH,
TBS, //not used
(char *)&dlsch_buffer[0]
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
,0, 0
, 0
, 0
#endif
);
);
pthread_mutex_lock(&rrc_release_freelist);
if((rrc_release_info.num_UEs > 0) && (rlc_am_mui.rrc_mui_num > 0)) {
uint16_t release_total = 0;
for(uint16_t release_num = 0; release_num < NUMBER_OF_UE_MAX; release_num++) {
if(rrc_release_info.RRC_release_ctrl[release_num].flag > 0) {
for (release_num = 0, release_ctrl = &rrc_release_info.RRC_release_ctrl[0];
release_num < NUMBER_OF_UE_MAX;
release_num++, release_ctrl++) {
if(release_ctrl->flag > 0) {
release_total++;
} else {
continue;
}
if(rrc_release_info.RRC_release_ctrl[release_num].flag == 1) {
if(rrc_release_info.RRC_release_ctrl[release_num].rnti == rnti) {
if(release_ctrl->flag == 1) {
if(release_ctrl->rnti == rnti) {
for(uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
if(rrc_release_info.RRC_release_ctrl[release_num].rrc_eNB_mui == rlc_am_mui.rrc_mui[mui_num]) {
rrc_release_info.RRC_release_ctrl[release_num].flag = 3;
LOG_D(MAC,"DLSCH Release send:index %d rnti %x mui %d mui_num %d flag 1->3\n",release_num,rnti,rlc_am_mui.rrc_mui[mui_num],mui_num);
if(release_ctrl->rrc_eNB_mui == rlc_am_mui.rrc_mui[mui_num]) {
release_ctrl->flag = 3;
LOG_D(MAC,"DLSCH Release send:index %d rnti %x mui %d mui_num %d flag 1->3\n",
release_num,
rnti,
rlc_am_mui.rrc_mui[mui_num],
mui_num);
break;
}
}
}
}
if(rrc_release_info.RRC_release_ctrl[release_num].flag == 2) {
if(rrc_release_info.RRC_release_ctrl[release_num].rnti == rnti) {
for(uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
if(rrc_release_info.RRC_release_ctrl[release_num].rrc_eNB_mui == rlc_am_mui.rrc_mui[mui_num]) {
rrc_release_info.RRC_release_ctrl[release_num].flag = 4;
LOG_D(MAC,"DLSCH Release send:index %d rnti %x mui %d mui_num %d flag 2->4\n",release_num,rnti,rlc_am_mui.rrc_mui[mui_num],mui_num);
if(release_ctrl->flag == 2) {
if(release_ctrl->rnti == rnti) {
for (uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
if(release_ctrl->rrc_eNB_mui == rlc_am_mui.rrc_mui[mui_num]) {
release_ctrl->flag = 4;
LOG_D(MAC, "DLSCH Release send:index %d rnti %x mui %d mui_num %d flag 2->4\n",
release_num,
rnti,
rlc_am_mui.rrc_mui[mui_num],
mui_num);
break;
}
}
......@@ -973,44 +1035,50 @@ schedule_ue_spec(module_id_t module_idP,
}
pthread_mutex_unlock(&rrc_release_freelist);
RA_t *ra = &eNB->common_channels[CC_id].ra[0];
for (uint8_t ra_ii = 0; ra_ii < NB_RA_PROC_MAX; ra_ii++) {
if((ra[ra_ii].rnti == rnti) && (ra[ra_ii].state == MSGCRNTI)) {
for(uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
if(ra[ra_ii].crnti_rrc_mui == rlc_am_mui.rrc_mui[mui_num]) {
ra[ra_ii].crnti_harq_pid = harq_pid;
ra[ra_ii].state = MSGCRNTI_ACK;
for (ra_ii = 0, ra = &eNB->common_channels[CC_id].ra[0]; ra_ii < NB_RA_PROC_MAX; ra_ii++, ra++) {
if ((ra->rnti == rnti) && (ra->state == MSGCRNTI)) {
for (uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
if (ra->crnti_rrc_mui == rlc_am_mui.rrc_mui[mui_num]) {
ra->crnti_harq_pid = harq_pid;
ra->state = MSGCRNTI_ACK;
break;
}
}
}
}
T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP),
T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid), T_INT(DCCH),
T(T_ENB_MAC_UE_DL_SDU,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(DCCH),
T_INT(sdu_lengths[0]));
LOG_D(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes from RLC\n",
module_idP, CC_id, sdu_lengths[0]);
module_idP,
CC_id,
sdu_lengths[0]);
sdu_length_total = sdu_lengths[0];
sdu_lcids[0] = DCCH;
UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[0] = DCCH;
UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH] = sdu_lengths[0];
UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH] += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH] += sdu_lengths[0];
eNB_UE_stats->lcid_sdu[0] = DCCH;
eNB_UE_stats->sdu_length_tx[DCCH] = sdu_lengths[0];
eNB_UE_stats->num_pdu_tx[DCCH] += 1;
eNB_UE_stats->num_bytes_tx[DCCH] += sdu_lengths[0];
header_length_last = 1 + 1 + (sdu_lengths[0] >= 128);
header_length_total += header_length_last;
num_sdus = 1;
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC,
"[eNB %d][DCCH] CC_id %d Got %d bytes :",
module_idP, CC_id, sdu_lengths[0]);
LOG_T(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes :",
module_idP,
CC_id,
sdu_lengths[0]);
for (j = 0; j < sdu_lengths[0]; ++j) {
LOG_T(MAC, "%x ", dlsch_buffer[j]);
LOG_T(MAC, "%x ",
dlsch_buffer[j]);
}
LOG_T(MAC, "\n");
#endif
}
......@@ -1018,12 +1086,20 @@ schedule_ue_spec(module_id_t module_idP,
// RLC data on DCCH1
if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) {
rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1,
rlc_status = mac_rlc_status_ind(module_idP,
rnti,
module_idP,
frameP,
subframeP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
DCCH + 1,
TBS - ta_len - header_length_total - sdu_length_total - 3
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
,0, 0
, 0
, 0
#endif
);
);
// DCCH SDU
sdu_lengths[num_sdus] = 0;
......@@ -1031,35 +1107,46 @@ schedule_ue_spec(module_id_t module_idP,
LOG_D(MAC, "[eNB %d], Frame %d, DCCH1->DLSCH, CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
module_idP, frameP, CC_id,
TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_lengths[num_sdus] += mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1,
TBS, //not used
(char *)&dlsch_buffer[sdu_length_total]
sdu_lengths[num_sdus] += mac_rlc_data_req(module_idP,
rnti,
module_idP,
frameP,
ENB_FLAG_YES,
MBMS_FLAG_NO, DCCH + 1,
TBS, //not used
(char *) &dlsch_buffer[sdu_length_total]
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
,0, 0
, 0
, 0
#endif
);
T(T_ENB_MAC_UE_DL_SDU, T_INT(module_idP),
T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid),
T_INT(DCCH + 1), T_INT(sdu_lengths[num_sdus]));
);
T(T_ENB_MAC_UE_DL_SDU,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(DCCH + 1),
T_INT(sdu_lengths[num_sdus]));
sdu_lcids[num_sdus] = DCCH1;
sdu_length_total += sdu_lengths[num_sdus];
UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = DCCH1;
UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH1] = sdu_lengths[num_sdus];
UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH1] += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus];
eNB_UE_stats->lcid_sdu[num_sdus] = DCCH1;
eNB_UE_stats->sdu_length_tx[DCCH1] = sdu_lengths[num_sdus];
eNB_UE_stats->num_pdu_tx[DCCH1] += 1;
eNB_UE_stats->num_bytes_tx[DCCH1] += sdu_lengths[num_sdus];
header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128);
header_length_total += header_length_last;
num_sdus++;
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC,
"[eNB %d][DCCH1] CC_id %d Got %d bytes :",
module_idP, CC_id, sdu_lengths[num_sdus]);
LOG_T(MAC, "[eNB %d][DCCH1] CC_id %d Got %d bytes :",
module_idP,
CC_id,
sdu_lengths[num_sdus]);
for (j = 0; j < sdu_lengths[num_sdus]; ++j) {
LOG_T(MAC, "%x ", dlsch_buffer[j]);
LOG_T(MAC, "%x ",
dlsch_buffer[j]);
}
LOG_T(MAC, "\n");
#endif
}
......@@ -1086,9 +1173,10 @@ schedule_ue_spec(module_id_t module_idP,
lcid,
TBS - ta_len - header_length_total - sdu_length_total - 3
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
, 0, 0
, 0
, 0
#endif
);
);
if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, "[eNB %d][USER-PLANE DEFAULT DRB] Frame %d : DTCH->DLSCH, Requesting %d bytes from RLC (lcid %d total hdr len %d)\n",
......@@ -1098,18 +1186,19 @@ schedule_ue_spec(module_id_t module_idP,
lcid,
header_length_total);
sdu_lengths[num_sdus] = mac_rlc_data_req(module_idP,
rnti,
module_idP,
frameP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
TBS, //not used
(char *)&dlsch_buffer[sdu_length_total]
rnti,
module_idP,
frameP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
TBS, //not used
(char *) &dlsch_buffer[sdu_length_total]
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
, 0, 0
, 0
, 0
#endif
);
);
T(T_ENB_MAC_UE_DL_SDU,
T_INT(module_idP),
T_INT(CC_id),
......@@ -1125,14 +1214,14 @@ schedule_ue_spec(module_id_t module_idP,
lcid);
sdu_lcids[num_sdus] = lcid;
sdu_length_total += sdu_lengths[num_sdus];
UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[lcid]++;
UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = lcid;
UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[lcid] = sdu_lengths[num_sdus];
UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[lcid] += sdu_lengths[num_sdus];
eNB_UE_stats->num_pdu_tx[lcid]++;
eNB_UE_stats->lcid_sdu[num_sdus] = lcid;
eNB_UE_stats->sdu_length_tx[lcid] = sdu_lengths[num_sdus];
eNB_UE_stats->num_bytes_tx[lcid] += sdu_lengths[num_sdus];
header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128);
header_length_total += header_length_last;
num_sdus++;
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
ue_sched_ctrl->uplane_inactivity_timer = 0;
// reset RRC inactivity timer after uplane activity
ue_contextP = rrc_eNB_get_ue_context(RC.rrc[module_idP], rnti);
......@@ -1176,25 +1265,27 @@ schedule_ue_spec(module_id_t module_idP,
if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs
// (can happen if N_RB_DL is odd)
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_available_rb);
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,
nb_available_rb);
nb_rb = nb_available_rb;
break;
}
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rb);
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,
nb_rb);
}
if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
if (nb_rb == ue_sched_ctrl->pre_nb_available_rbs[CC_id]) {
for (j = 0; j < N_RBG[CC_id]; ++j) { // for indicating the rballoc for each sub-band
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j];
}
} else {
nb_rb_temp = nb_rb;
j = 0;
while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) {
if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) {
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
if (ue_sched_ctrl->rballoc_sub_UE[CC_id][j] == 1) {
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j];
if ((j == N_RBG[CC_id] - 1) && ((N_RB_DL[CC_id] == 25) || (N_RB_DL[CC_id] == 50))) {
nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id] + 1;
......@@ -1203,33 +1294,34 @@ schedule_ue_spec(module_id_t module_idP,
}
}
j = j + 1;
j++;
}
}
// decrease mcs until TBS falls below required length
while ((TBS > sdu_length_total + header_length_total + ta_len) && (mcs > 0)) {
mcs--;
TBS = get_TBS_DL(mcs, nb_rb);
TBS = get_TBS_DL(mcs,
nb_rb);
}
// if we have decreased too much or we don't have enough RBs, increase MCS
while ((TBS < sdu_length_total + header_length_total + ta_len)
&& (((ue_sched_ctl->dl_pow_off[CC_id] > 0)
&& (mcs < 28))
|| ((ue_sched_ctl->dl_pow_off[CC_id] == 0)
&& (mcs <= 15)))) {
while (TBS < sdu_length_total + header_length_total + ta_len &&
((ue_sched_ctrl->dl_pow_off[CC_id] > 0 && mcs < 28) || (ue_sched_ctrl->dl_pow_off[CC_id] == 0 && mcs <= 15))) {
mcs++;
TBS = get_TBS_DL(mcs, nb_rb);
TBS = get_TBS_DL(mcs,
nb_rb);
}
LOG_D(MAC,
"dlsch_mcs before and after the rate matching = (%d, %d)\n",
eNB_UE_stats->dlsch_mcs1, mcs);
LOG_D(MAC, "dlsch_mcs before and after the rate matching = (%d, %d)\n",
eNB_UE_stats->dlsch_mcs1,
mcs);
#ifdef DEBUG_eNB_SCHEDULER
LOG_D(MAC,
"[eNB %d] CC_id %d Generated DLSCH header (mcs %d, TBS %d, nb_rb %d)\n",
module_idP, CC_id, mcs, TBS, nb_rb);
LOG_D(MAC, "[eNB %d] CC_id %d Generated DLSCH header (mcs %d, TBS %d, nb_rb %d)\n",
module_idP,
CC_id,
mcs, TBS,
nb_rb);
// msg("[MAC][eNB ] Reminder of DLSCH with random data %d %d %d %d \n",
// TBS, sdu_length_total, offset, TBS-sdu_length_total-offset);
#endif
......@@ -1245,97 +1337,132 @@ schedule_ue_spec(module_id_t module_idP,
offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0],
num_sdus, //num_sdus
sdu_lengths, //
sdu_lcids, 255, // no drx
sdu_lcids,
255, // no drx
ta_update, // timing advance
NULL, // contention res id
padding, post_padding);
padding,
post_padding);
//#ifdef DEBUG_eNB_SCHEDULER
if (ta_update != 31) {
LOG_D(MAC,
LOG_D(MAC,
"[eNB %d][DLSCH] Frame %d Generate header for UE_id %d on CC_id %d: sdu_length_total %d, num_sdus %d, sdu_lengths[0] %d, sdu_lcids[0] %d => payload offset %d,timing advance value : %d, padding %d,post_padding %d,(mcs %d, TBS %d, nb_rb %d),header_length %d\n",
module_idP, frameP, UE_id, CC_id,
sdu_length_total, num_sdus, sdu_lengths[0],
sdu_lcids[0], offset, ta_update, padding,
post_padding, mcs, TBS, nb_rb,
module_idP,
frameP,
UE_id,
CC_id,
sdu_length_total,
num_sdus,
sdu_lengths[0],
sdu_lcids[0],
offset,
ta_update,
padding,
post_padding,
mcs,
TBS,
nb_rb,
header_length_total);
}
//#endif
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC, "[eNB %d] First 16 bytes of DLSCH : \n");
for (i = 0; i < 16; ++i) {
LOG_T(MAC, "%x.", dlsch_buffer[i]);
for (i = 0; i < 16; i++) {
LOG_T(MAC, "%x.",
dlsch_buffer[i]);
}
LOG_T(MAC, "\n");
#endif
// cycle through SDUs and place in dlsch_buffer
memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset], dlsch_buffer, sdu_length_total);
dlsch_pdu = &UE_list->DLSCH_pdu[CC_id][0][UE_id];
memcpy(&dlsch_pdu->payload[0][offset],
dlsch_buffer,
sdu_length_total);
// memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]);
// fill remainder of DLSCH with 0
for (j = 0; j < (TBS - sdu_length_total - offset); j++) {
UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset + sdu_length_total + j] = 0;
dlsch_pdu->payload[0][offset + sdu_length_total + j] = 0;
}
if (opt_enabled == 1) {
trace_pdu(DIRECTION_DOWNLINK,
(uint8_t *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0],
TBS, module_idP, WS_C_RNTI,
UE_RNTI(module_idP, UE_id), eNB->frame,
eNB->subframe, 0, 0);
LOG_D(OPT,
"[eNB %d][DLSCH] CC_id %d Frame %d rnti %x with size %d\n",
module_idP, CC_id, frameP,
UE_RNTI(module_idP, UE_id), TBS);
(uint8_t *) dlsch_pdu->payload[0],
TBS,
module_idP,
WS_C_RNTI,
UE_RNTI(module_idP,
UE_id),
eNB->frame,
eNB->subframe,
0,
0);
LOG_D(OPT, "[eNB %d][DLSCH] CC_id %d Frame %d rnti %x with size %d\n",
module_idP,
CC_id,
frameP,
UE_RNTI(module_idP,
UE_id),
TBS);
}
T(T_ENB_MAC_UE_DL_PDU_WITH_DATA, T_INT(module_idP),
T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid),
T_BUFFER(UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], TBS));
UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb;
add_ue_dlsch_info(module_idP, CC_id, UE_id, subframeP, S_DL_SCHEDULED, rnti);
T(T_ENB_MAC_UE_DL_PDU_WITH_DATA,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_BUFFER(dlsch_pdu->payload[0],
TBS));
ue_template->nb_rb[harq_pid] = nb_rb;
add_ue_dlsch_info(module_idP,
CC_id,
UE_id,
subframeP,
S_DL_SCHEDULED,
rnti);
// store stats
eNB->eNB_stats[CC_id].dlsch_bytes_tx += sdu_length_total;
eNB->eNB_stats[CC_id].dlsch_pdus_tx += 1;
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used = nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].num_mac_sdu_tx = num_sdus;
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used += nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1 = eNB_UE_stats->dlsch_mcs1;
UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2 = mcs;
UE_list->eNB_UE_stats[CC_id][UE_id].TBS = TBS;
UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes = TBS - sdu_length_total;
UE_list->eNB_UE_stats[CC_id][UE_id].total_sdu_bytes += sdu_length_total;
UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes += TBS;
UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus += 1;
eNB_UE_stats->rbs_used = nb_rb;
eNB_UE_stats->num_mac_sdu_tx = num_sdus;
eNB_UE_stats->total_rbs_used += nb_rb;
eNB_UE_stats->dlsch_mcs1 = eNB_UE_stats->dlsch_mcs1;
eNB_UE_stats->dlsch_mcs2 = mcs;
eNB_UE_stats->TBS = TBS;
eNB_UE_stats->overhead_bytes = TBS - sdu_length_total;
eNB_UE_stats->total_sdu_bytes += sdu_length_total;
eNB_UE_stats->total_pdu_bytes += TBS;
eNB_UE_stats->total_num_pdus += 1;
if (cc[CC_id].tdd_Config != NULL) { // TDD
UE_list->UE_template[CC_id][UE_id].DAI++;
update_ul_dci(module_idP, CC_id, rnti,
UE_list->UE_template[CC_id][UE_id].DAI,
ue_template->DAI++;
update_ul_dci(module_idP,
CC_id,
rnti,
ue_template->DAI,
subframeP);
}
// do PUCCH power control
// this is the normalized RX power
eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
/* unit is not dBm, it's special from nfapi */
// unit is not dBm, it's special from nfapi
// converting to dBm: ToDo: Noise power hard coded to 30
normalized_rx_power = (5*ue_sched_ctl->pucch1_snr[CC_id]-640)/10+30;
target_rx_power= eNB->puCch10xSnr/10 + 30;
normalized_rx_power = (((5 * ue_sched_ctrl->pucch1_snr[CC_id]) - 640) / 10) + 30;
target_rx_power= (eNB->puCch10xSnr / 10) + 30;
// this assumes accumulated tpc
// make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
int32_t framex10psubframe = UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame * 10 + UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe;
int32_t framex10psubframe = ue_template->pucch_tpc_tx_frame * 10 + ue_template->pucch_tpc_tx_subframe;
if (((framex10psubframe + 10) <= (frameP * 10 + subframeP)) || //normal case
((framex10psubframe > (frameP * 10 + subframeP)) && (((10240 - framex10psubframe + frameP * 10 + subframeP) >= 10)))) //frame wrap-around
if (ue_sched_ctl->pucch1_cqi_update[CC_id] == 1) {
ue_sched_ctl->pucch1_cqi_update[CC_id] = 0;
UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame = frameP;
UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe = subframeP;
if (framex10psubframe + 10 <= (frameP * 10) + subframeP || //normal case
(framex10psubframe > (frameP * 10) + subframeP && 10240 - framex10psubframe + (frameP * 10) + subframeP >= 10)) //frame wrap-around
if (ue_sched_ctrl->pucch1_cqi_update[CC_id] == 1) {
ue_sched_ctrl->pucch1_cqi_update[CC_id] = 0;
ue_template->pucch_tpc_tx_frame = frameP;
ue_template->pucch_tpc_tx_subframe = subframeP;
if (normalized_rx_power > (target_rx_power + 4)) {
tpc = 0; //-1
......@@ -1345,10 +1472,14 @@ schedule_ue_spec(module_id_t module_idP,
tpc = 1; //0
}
LOG_D(MAC,
"[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, normalized/target rx power %d/%d\n",
module_idP, frameP, subframeP, harq_pid, tpc,
normalized_rx_power, target_rx_power);
LOG_D(MAC, "[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, normalized/target rx power %d/%d\n",
module_idP,
frameP,
subframeP,
harq_pid,
tpc,
normalized_rx_power,
target_rx_power);
} // Po_PUCCH has been updated
else {
tpc = 1; //0
......@@ -1358,20 +1489,24 @@ schedule_ue_spec(module_id_t module_idP,
}
dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu];
memset((void *) dl_config_pdu, 0, sizeof(nfapi_dl_config_request_pdu_t));
memset((void *) dl_config_pdu,
0,
sizeof(nfapi_dl_config_request_pdu_t));
dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE;
dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu));
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level =
get_aggregation(get_bw_index(module_idP, CC_id), ue_sched_ctl->dl_cqi[CC_id], format1);
get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 =
1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1 - ue_template->oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0;
//deactivate second codeword
......@@ -1379,47 +1514,61 @@ schedule_ue_spec(module_id_t module_idP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1;
if (cc[CC_id].tdd_Config != NULL) { //TDD
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index =
(UE_list->UE_template[CC_id][UE_id].DAI - 1) & 3;
LOG_D(MAC,
"[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n",
module_idP, CC_id, harq_pid,
(UE_list->UE_template[CC_id][UE_id].DAI - 1),
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (ue_template->DAI - 1) & 3;
LOG_D(MAC, "[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
(ue_template->DAI - 1),
mcs);
} else {
LOG_D(MAC,
"[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n",
module_idP, CC_id, harq_pid, mcs);
LOG_D(MAC, "[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
mcs);
}
LOG_D(MAC, "Checking feasibility pdu %d (new sdu)\n",
dl_req->number_pdu);
if (!CCE_allocation_infeasible(module_idP, CC_id, 1, subframeP,
dl_config_pdu->dci_dl_pdu.
dci_dl_pdu_rel8.aggregation_level, rnti)) {
ue_sched_ctl->round[CC_id][harq_pid] = 0;
if (!CCE_allocation_infeasible(module_idP,
CC_id,
1,
subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level,
rnti)) {
ue_sched_ctrl->round[CC_id][harq_pid] = 0;
dl_req->number_dci++;
dl_req->number_pdu++;
dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG;
eNB->DL_req[CC_id].sfn_sf = frameP << 4 | subframeP;
eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST;
// Toggle NDI for next time
LOG_D(MAC,
"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n",
CC_id, frameP, subframeP, UE_id, rnti, harq_pid,
UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]);
UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid] = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs;
UE_list->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0;
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated != NULL,
"physicalConfigDedicated is NULL\n");
AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated != NULL,
LOG_D(MAC, "CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n",
CC_id,
frameP,
subframeP,
UE_id,
rnti,
harq_pid,
ue_template->oldNDI[harq_pid]);
ue_template->oldNDI[harq_pid] = 1 - ue_template->oldNDI[harq_pid];
ue_template->oldmcs1[harq_pid] = mcs;
ue_template->oldmcs2[harq_pid] = 0;
AssertFatal(ue_template->physicalConfigDedicated != NULL, "physicalConfigDedicated is NULL\n");
AssertFatal(ue_template->physicalConfigDedicated->pdsch_ConfigDedicated != NULL,
"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n");
fill_nfapi_dlsch_config(eNB, dl_req, TBS, eNB->pdu_index[CC_id], rnti, 0, // type 0 allocation from 7.1.6 in 36.213
fill_nfapi_dlsch_config(eNB,
dl_req,
TBS,
eNB->pdu_index[CC_id],
rnti,
0, // type 0 allocation from 7.1.6 in 36.213
0, // virtual_resource_block_assignment_flag, unused here
0, // resource_block_coding, to be filled in later
getQm(mcs), 0, // redundancy version
getQm(mcs),
0, // redundancy version
1, // transport blocks
0, // transport block to codeword swap flag
cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme
......@@ -1427,57 +1576,74 @@ schedule_ue_spec(module_id_t module_idP,
1, // number of subbands
// uint8_t codebook_index,
4, // UE category capacity
UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, 0, // delta_power_offset for TM5
ue_template->physicalConfigDedicated->pdsch_ConfigDedicated->p_a,
0, // delta_power_offset for TM5
0, // ngap
0, // nprb
cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode
0, //number of PRBs treated as one subband, not used here
0 // number of beamforming vectors, not used here
);
0); // number of beamforming vectors, not used here
eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req(&eNB->TX_req[CC_id].tx_request_body,
(frameP * 10) + subframeP,
TBS, eNB->pdu_index[CC_id],
eNB->UE_list.DLSCH_pdu[CC_id][0][UE_id].payload[0]);
LOG_D(MAC,
"Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",
(frameP * 10) + subframeP,
TBS,
eNB->pdu_index[CC_id],
dlsch_pdu->payload[0]);
LOG_D(MAC, "Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",
eNB->pdu_index[CC_id]);
eNB->pdu_index[CC_id]++;
program_dlsch_acknak(module_idP, CC_id, UE_id,
frameP, subframeP,
program_dlsch_acknak(module_idP,
CC_id,
UE_id,
frameP,
subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx);
} else {
LOG_W(MAC,
"Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d/%x, infeasible CCE allocations\n",
frameP, subframeP, UE_id, rnti);
LOG_W(MAC, "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d/%x, infeasible CCE allocations\n",
frameP,
subframeP,
UE_id,
rnti);
}
} else { // There is no data from RLC or MAC header, so don't schedule
}
}
if (cc[CC_id].tdd_Config != NULL) { // TDD
set_ul_DAI(module_idP, UE_id, CC_id, frameP, subframeP);
set_ul_DAI(module_idP,
UE_id,
CC_id,
frameP,
subframeP);
}
} // UE_id loop
} // CC_id loop
fill_DLSCH_dci(module_idP, frameP, subframeP, mbsfn_flag);
fill_DLSCH_dci(module_idP,
frameP,
subframeP,
mbsfn_flag);
stop_meas(&eNB->schedule_dlsch);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_FUNCTION_OUT);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH,
VCD_FUNCTION_OUT);
}
//------------------------------------------------------------------------------
void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
int frameP,
sub_frame_t subframeP,
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX])
void
dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
int frameP,
sub_frame_t subframeP,
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX])
//------------------------------------------------------------------------------
{
// FIXME: I'm prototyping the algorithm, so there may be arrays and variables that carry redundant information here and in pre_processor_results struct.
int UE_id, CC_id, rbg, i;
int N_RB_DL, min_rb_unit, tm;
int owned, used;
UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
slice_info_t *sli = &RC.mac[Mod_id]->slice_info;
eNB_MAC_INST *eNB = RC.mac[Mod_id];
int nb_mac_CC = RC.nb_mac_CC[Mod_id];
UE_list_t *UE_list = &eNB->UE_list;
slice_info_t *sli = &eNB->slice_info;
UE_sched_ctrl *ue_sched_ctl;
COMMON_channels_t *cc;
int N_RBG[NFAPI_CC_MAX];
......@@ -1489,17 +1655,17 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
uint16_t (*nb_rbs_remaining)[MAX_MOBILES_PER_ENB];
uint16_t (*nb_rbs_required)[MAX_MOBILES_PER_ENB];
uint8_t (*MIMO_mode_indicator)[N_RBG_MAX];
// Initialize the free RBGs map
// free_rbgs_map[CC_id][rbg] = -1 if RBG is allocated,
// otherwise it contains the id of the slice it belongs to.
// (Information about slicing must be retained to deal with isolation).
// FIXME: This method does not consider RBGs that are free and belong to no slices
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) {
cc = &RC.mac[Mod_id]->common_channels[CC_id];
for (CC_id = 0; CC_id < nb_mac_CC; CC_id++) {
cc = &eNB->common_channels[CC_id];
N_RBG[CC_id] = to_rbg(cc->mib->message.dl_Bandwidth);
for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) {
for (rbg = 0; rbg < N_RBG[CC_id]; rbg++) {
for (i = 0; i < sli->n_dl; ++i) {
owned = sli->pre_processor_results[i].slice_allocation_mask[CC_id][rbg];
......@@ -1514,11 +1680,11 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
// Find out which slices need other resources.
// FIXME: I don't think is really needed since we check nb_rbs_remaining later
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) {
for (i = 0; i < sli->n_dl; ++i) {
for (CC_id = 0; CC_id < nb_mac_CC; CC_id++) {
for (i = 0; i < sli->n_dl; i++) {
has_traffic[CC_id][i] = 0;
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; ++UE_id) {
for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) {
if (sli->pre_processor_results[i].nb_rbs_remaining[CC_id][UE_id] > 0) {
has_traffic[CC_id][i] = 1;
break;
......@@ -1527,12 +1693,13 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
}
}
slice_priority_sort(Mod_id, slice_sorted_list);
slice_priority_sort(Mod_id,
slice_sorted_list);
// MULTIPLEXING
// This part is an adaptation of dlsch_scheduler_pre_processor_allocate() code
for (CC_id = 0; CC_id < RC.nb_mac_CC[Mod_id]; ++CC_id) {
N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
for (CC_id = 0; CC_id < nb_mac_CC; ++CC_id) {
N_RB_DL = to_prb(eNB->common_channels[CC_id].mib->message.dl_Bandwidth);
min_rb_unit = get_min_rb_unit(Mod_id, CC_id);
for (i = 0; i < sli->n_dl; ++i) {
......@@ -1561,7 +1728,10 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
// Sort UE again
// (UE list gets sorted every time pre_processor is called so it is probably dirty at this point)
// FIXME: There is only one UE_list for all slices, so it must be sorted again each time we use it
sort_UEs(Mod_id, slice_idx, frameP, subframeP);
sort_UEs(Mod_id,
slice_idx,
frameP,
subframeP);
nb_rbs_remaining = sli->pre_processor_results[slice_idx].nb_rbs_remaining;
nb_rbs_required = sli->pre_processor_results[slice_idx].nb_rbs_required;
MIMO_mode_indicator = sli->pre_processor_results[slice_idx].MIMO_mode_indicator;
......@@ -1569,7 +1739,9 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
// Allocation
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
tm = get_tmode(Mod_id, CC_id, UE_id);
tm = get_tmode(Mod_id,
CC_id,
UE_id);
for (rbg = 0; rbg < N_RBG[CC_id]; ++rbg) {
// FIXME: I think that some of these checks are redundant
......@@ -1597,8 +1769,8 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit + 1;
ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1;
nb_rbs_remaining[CC_id][UE_id] -= (min_rb_unit - 1);
ue_sched_ctl->pre_nb_available_rbs[CC_id] += (min_rb_unit - 1);
}
} else {
// Allocating a standard-sized RBG
......@@ -1612,14 +1784,15 @@ void dlsch_scheduler_interslice_multiplexing(module_id_t Mod_id,
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
nb_rbs_remaining[CC_id][UE_id] = nb_rbs_remaining[CC_id][UE_id] - min_rb_unit;
ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit;
nb_rbs_remaining[CC_id][UE_id] -= min_rb_unit;
ue_sched_ctl->pre_nb_available_rbs[CC_id] += min_rb_unit;
}
}
}
}
}
}
return;
}
//------------------------------------------------------------------------------
......@@ -1650,7 +1823,9 @@ void dlsch_scheduler_qos_multiplexing(module_id_t Mod_id, int frameP, sub_frame_
//------------------------------------------------------------------------------
void
fill_DLSCH_dci(module_id_t module_idP,
frame_t frameP, sub_frame_t subframeP, int *mbsfn_flagP)
frame_t frameP,
sub_frame_t subframeP,
int *mbsfn_flagP)
//------------------------------------------------------------------------------
{
// loop over all allocated UEs and compute frequency allocations for PDSCH
......@@ -1669,12 +1844,16 @@ fill_DLSCH_dci(module_id_t module_idP,
int N_RBG;
int N_RB_DL;
COMMON_channels_t *cc;
eNB_DLSCH_INFO *dlsch_info;
UE_TEMPLATE *ue_template;
start_meas(&eNB->fill_DLSCH_dci);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(VCD_SIGNAL_DUMPER_FUNCTIONS_FILL_DLSCH_DCI, VCD_FUNCTION_IN);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_FILL_DLSCH_DCI,
VCD_FUNCTION_IN);
for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) {
LOG_D(MAC, "Doing fill DCI for CC_id %d\n", CC_id);
LOG_D(MAC, "Doing fill DCI for CC_id %d\n",
CC_id);
if (mbsfn_flagP[CC_id] > 0)
continue;
......@@ -1684,42 +1863,48 @@ fill_DLSCH_dci(module_id_t module_idP,
N_RB_DL = to_prb(cc->mib->message.dl_Bandwidth);
// UE specific DCIs
for (UE_id = UE_list->head; UE_id >= 0;
UE_id = UE_list->next[UE_id]) {
LOG_T(MAC, "CC_id %d, UE_id: %d => status %d\n", CC_id, UE_id,
eNB_dlsch_info[module_idP][CC_id][UE_id].status);
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
dlsch_info = &eNB_dlsch_info[module_idP][CC_id][UE_id];
LOG_T(MAC, "CC_id %d, UE_id: %d => status %d\n",
CC_id,
UE_id,
dlsch_info->status);
if (eNB_dlsch_info[module_idP][CC_id][UE_id].status == S_DL_SCHEDULED) {
if (dlsch_info->status == S_DL_SCHEDULED) {
// clear scheduling flag
eNB_dlsch_info[module_idP][CC_id][UE_id].status = S_DL_WAITING;
dlsch_info->status = S_DL_WAITING;
rnti = UE_RNTI(module_idP, UE_id);
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP);
nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,
subframeP);
ue_template = &UE_list->UE_template[CC_id][UE_id];
nb_rb = ue_template->nb_rb[harq_pid];
/// Synchronizing rballoc with rballoc_sub
for (i = 0; i < N_RBG; i++) {
rballoc_sub[i] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][i];
rballoc_sub[i] = ue_template->rballoc_subband[harq_pid][i];
}
nfapi_dl_config_request_t *DL_req = &RC.mac[module_idP]->DL_req[0];
nfapi_dl_config_request_body_t *dl_config_request_body = &RC.mac[module_idP]->DL_req[CC_id].dl_config_request_body;
nfapi_dl_config_request_pdu_t *dl_config_pdu;
for (i = 0;
i < DL_req[CC_id].dl_config_request_body.number_pdu;
i++) {
dl_config_pdu = &DL_req[CC_id].dl_config_request_body.dl_config_pdu_list[i];
for (i = 0; i < dl_config_request_body->number_pdu; i++) {
dl_config_pdu = &dl_config_request_body->dl_config_pdu_list[i];
if ((dl_config_pdu->pdu_type == NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE)
&& (dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti == rnti)
&& (dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format != 1)) {
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = allocate_prbs_sub(nb_rb, N_RB_DL, N_RBG,
rballoc_sub);
if (dl_config_pdu->pdu_type == NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE &&
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti == rnti &&
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format != 1) {
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = allocate_prbs_sub(nb_rb,
N_RB_DL,
N_RBG,
rballoc_sub);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_allocation_type = 0;
} else if ((dl_config_pdu->pdu_type == NFAPI_DL_CONFIG_DLSCH_PDU_TYPE)
&& (dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.rnti == rnti)
&& (dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_allocation_type == 0)) {
dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_block_coding = allocate_prbs_sub(nb_rb, N_RB_DL, N_RBG,
rballoc_sub);
} else if (dl_config_pdu->pdu_type == NFAPI_DL_CONFIG_DLSCH_PDU_TYPE &&
dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.rnti == rnti &&
dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_allocation_type == 0) {
dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.resource_block_coding = allocate_prbs_sub(nb_rb,
N_RB_DL,
N_RBG,
rballoc_sub);
}
}
}
......@@ -1727,7 +1912,8 @@ fill_DLSCH_dci(module_id_t module_idP,
}
stop_meas(&eNB->fill_DLSCH_dci);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_FILL_DLSCH_DCI, VCD_FUNCTION_OUT);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_FILL_DLSCH_DCI,
VCD_FUNCTION_OUT);
}
//------------------------------------------------------------------------------
......@@ -2230,13 +2416,20 @@ static int slice_priority_compare(const void *_a, const void *_b, void *_c) {
return 1;
}
void slice_priority_sort(module_id_t Mod_id, int slice_list[MAX_NUM_SLICES]) {
void
slice_priority_sort(module_id_t Mod_id,
int slice_list[MAX_NUM_SLICES])
{
int i;
for (i = 0; i < RC.mac[Mod_id]->slice_info.n_dl; ++i) {
int n_dl = RC.mac[Mod_id]->slice_info.n_dl;
for (i = 0; i < n_dl; i++) {
slice_list[i] = i;
}
qsort_r(slice_list, RC.mac[Mod_id]->slice_info.n_dl, sizeof(int),
slice_priority_compare, &Mod_id);
qsort_r(slice_list,
n_dl,
sizeof(int),
slice_priority_compare,
&Mod_id);
return;
}
......@@ -1270,6 +1270,7 @@ program_dlsch_acknak(module_id_t module_idP,
if (harq_information) {
fill_nfapi_harq_information(module_idP, CC_idP, rnti, harq_information, cce_idx);
}
return;
}
uint8_t get_V_UL_DAI(module_id_t module_idP, int CC_idP, uint16_t rntiP,sub_frame_t subframeP) {
......@@ -1388,7 +1389,8 @@ fill_nfapi_harq_information(module_id_t module_idP,
int CC_idP,
uint16_t rntiP,
nfapi_ul_config_harq_information *harq_information,
uint8_t cce_idxP) {
uint8_t cce_idxP)
{
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = &eNB->common_channels[CC_idP];
UE_list_t *UE_list = &eNB->UE_list;
......@@ -1399,7 +1401,7 @@ fill_nfapi_harq_information(module_id_t module_idP,
harq_information->harq_information_rel11.tl.tag = NFAPI_UL_CONFIG_REQUEST_HARQ_INFORMATION_REL11_TAG;
harq_information->harq_information_rel11.num_ant_ports = 1;
LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = UE_list->UE_template[CC_idP][UE_id].physicalConfigDedicated;
struct LTE_PUCCH_ConfigDedicated *pucch_ConfigDedicated = NULL;
if (physicalConfigDedicated != NULL) pucch_ConfigDedicated = physicalConfigDedicated->pucch_ConfigDedicated;
......@@ -1572,7 +1574,8 @@ fill_nfapi_tx_req(nfapi_tx_request_body_t *tx_req_body,
uint8_t *pdu) {
nfapi_tx_request_pdu_t *TX_req = &tx_req_body->tx_pdu_list[tx_req_body->number_of_pdus];
LOG_D(MAC, "Filling TX_req %d for pdu length %d\n",
tx_req_body->number_of_pdus, pdu_length);
tx_req_body->number_of_pdus,
pdu_length);
TX_req->pdu_length = pdu_length;
TX_req->pdu_index = pdu_index;
TX_req->num_segments = 1;
......@@ -2323,7 +2326,8 @@ UE_is_to_be_scheduled(module_id_t module_idP, int CC_id, uint8_t UE_id) {
uint8_t
get_tmode(module_id_t module_idP,
int CC_idP,
int UE_idP) {
int UE_idP)
{
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = &eNB->common_channels[CC_idP];
LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = eNB->UE_list.physicalConfigDedicated[CC_idP][UE_idP];
......@@ -2516,7 +2520,8 @@ get_bw_index(module_id_t module_id,
int
get_min_rb_unit(module_id_t module_id,
uint8_t CC_id) {
uint8_t CC_id)
{
int min_rb_unit = 0;
int N_RB_DL = to_prb(RC.mac[module_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
......@@ -2550,12 +2555,21 @@ get_min_rb_unit(module_id_t module_id,
}
uint32_t
allocate_prbs_sub(int nb_rb, int N_RB_DL, int N_RBG, uint8_t *rballoc) {
allocate_prbs_sub(int nb_rb,
int N_RB_DL,
int N_RBG,
uint8_t *rballoc)
{
int check = 0; //check1=0,check2=0;
uint32_t rballoc_dci = 0;
//uint8_t number_of_subbands=13;
LOG_T(MAC, "*****Check1RBALLOC****: %d%d%d%d (nb_rb %d,N_RBG %d)\n",
rballoc[3], rballoc[2], rballoc[1], rballoc[0], nb_rb, N_RBG);
rballoc[3],
rballoc[2],
rballoc[1],
rballoc[0],
nb_rb,
N_RBG);
while ((nb_rb > 0) && (check < N_RBG)) {
//printf("rballoc[%d] %d\n",check,rballoc[check]);
......@@ -2592,12 +2606,13 @@ allocate_prbs_sub(int nb_rb, int N_RB_DL, int N_RBG, uint8_t *rballoc) {
}
// printf("rb_alloc %x\n",rballoc_dci);
check = check + 1;
check++;
// check1 = check1+2;
}
// rballoc_dci = (rballoc_dci)&(0x1fff);
LOG_T(MAC, "*********RBALLOC : %x\n", rballoc_dci);
LOG_T(MAC, "*********RBALLOC : %x\n",
rballoc_dci);
// exit(-1);
return (rballoc_dci);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment