Commit 99301f43 authored by Robert Schmidt's avatar Robert Schmidt

Call schedule_ue_spec() once for each CC

parent 81fb7628
...@@ -420,17 +420,8 @@ set_ul_DAI(int module_idP, ...@@ -420,17 +420,8 @@ set_ul_DAI(int module_idP,
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
void void
schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag) { schedule_dlsch(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag) {
int i = 0; for (int CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) {
slice_info_t *sli = &RC.mac[module_idP]->slice_info; schedule_ue_spec(module_idP, CC_id, frameP, subframeP, mbsfn_flag);
memset(sli->rballoc_sub, 0, sizeof(sli->rballoc_sub));
for (i = 0; i < sli->n_dl; i++) {
// Run each enabled slice-specific schedulers one by one
sli->dl[i].sched_cb(module_idP,
i,
frameP,
subframeP,
mbsfn_flag/*, dl_info*/);
} }
} }
...@@ -454,13 +445,12 @@ void getRepetition(UE_TEMPLATE *pue_template,unsigned int *maxRep, unsigned int ...@@ -454,13 +445,12 @@ void getRepetition(UE_TEMPLATE *pue_template,unsigned int *maxRep, unsigned int
*/ */
void void
schedule_ue_spec(module_id_t module_idP, schedule_ue_spec(module_id_t module_idP,
int slice_idxP, int CC_id,
frame_t frameP, frame_t frameP,
sub_frame_t subframeP, sub_frame_t subframeP,
int *mbsfn_flag) int *mbsfn_flag)
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
{ {
int CC_id;
int UE_id; int UE_id;
int aggregation; int aggregation;
mac_rlc_status_resp_t rlc_status; mac_rlc_status_resp_t rlc_status;
...@@ -478,7 +468,6 @@ schedule_ue_spec(module_id_t module_idP, ...@@ -478,7 +468,6 @@ schedule_ue_spec(module_id_t module_idP,
uint8_t ra_ii; uint8_t ra_ii;
eNB_UE_STATS *eNB_UE_stats = NULL; eNB_UE_STATS *eNB_UE_stats = NULL;
UE_TEMPLATE *ue_template = NULL; UE_TEMPLATE *ue_template = NULL;
eNB_STATS *eNB_stats = NULL;
RRC_release_ctrl_t *release_ctrl = NULL; RRC_release_ctrl_t *release_ctrl = NULL;
DLSCH_PDU *dlsch_pdu = NULL; DLSCH_PDU *dlsch_pdu = NULL;
RA_t *ra = NULL; RA_t *ra = NULL;
...@@ -491,11 +480,11 @@ schedule_ue_spec(module_id_t module_idP, ...@@ -491,11 +480,11 @@ schedule_ue_spec(module_id_t module_idP,
int tpc = 1; int tpc = 1;
UE_sched_ctrl_t *ue_sched_ctrl; UE_sched_ctrl_t *ue_sched_ctrl;
int mcs; int mcs;
int i; const int min_rb_unit = get_min_rb_unit(module_idP, CC_id);
int min_rb_unit[NFAPI_CC_MAX]; const int dl_Bandwidth = cc[CC_id].mib->message.dl_Bandwidth;
int N_RB_DL[NFAPI_CC_MAX]; const int N_RB_DL = to_prb(dl_Bandwidth);
int total_nb_available_rb[NFAPI_CC_MAX]; const int N_RBG = to_rbg(dl_Bandwidth);
int N_RBG[NFAPI_CC_MAX]; int total_nb_available_rb = N_RB_DL;
nfapi_dl_config_request_body_t *dl_req; nfapi_dl_config_request_body_t *dl_req;
nfapi_dl_config_request_pdu_t *dl_config_pdu; nfapi_dl_config_request_pdu_t *dl_config_pdu;
int tdd_sfa; int tdd_sfa;
...@@ -503,22 +492,19 @@ schedule_ue_spec(module_id_t module_idP, ...@@ -503,22 +492,19 @@ schedule_ue_spec(module_id_t module_idP,
int header_length_last; int header_length_last;
int header_length_total; int header_length_total;
rrc_eNB_ue_context_t *ue_contextP = NULL; rrc_eNB_ue_context_t *ue_contextP = NULL;
int nb_mac_CC = RC.nb_mac_CC[module_idP];
long dl_Bandwidth;
if(is_pmch_subframe(frameP,subframeP,&RC.eNB[module_idP][0]->frame_parms)){ if(is_pmch_subframe(frameP,subframeP,&RC.eNB[module_idP][0]->frame_parms)){
//LOG_E(MAC,"Frame[%d] SF:%d This SF should not be allocated\n",frameP,subframeP); //LOG_E(MAC,"Frame[%d] SF:%d This SF should not be allocated\n",frameP,subframeP);
return ; return ;
} }
start_meas(&eNB->schedule_dlsch); start_meas(&eNB->schedule_dlsch);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH, VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH,
VCD_FUNCTION_IN); VCD_FUNCTION_IN);
// for TDD: check that we have to act here, otherwise return // for TDD: check that we have to act here, otherwise return
if (cc[0].tdd_Config) { if (cc[CC_id].tdd_Config) {
tdd_sfa = cc[0].tdd_Config->subframeAssignment; tdd_sfa = cc[CC_id].tdd_Config->subframeAssignment;
switch (subframeP) { switch (subframeP) {
case 0: case 0:
...@@ -567,33 +553,25 @@ schedule_ue_spec(module_id_t module_idP, ...@@ -567,33 +553,25 @@ schedule_ue_spec(module_id_t module_idP,
aggregation = 2; aggregation = 2;
for (CC_id = 0, eNB_stats = &eNB->eNB_stats[0]; CC_id < nb_mac_CC; CC_id++, eNB_stats++) { for (int i = 0; i < N_RB_DL; i++)
dl_Bandwidth = cc[CC_id].mib->message.dl_Bandwidth; if (cc[CC_id].vrb_map[i] != 0)
N_RB_DL[CC_id] = to_prb(dl_Bandwidth); total_nb_available_rb--;
min_rb_unit[CC_id] = get_min_rb_unit(module_idP, CC_id);
// get number of PRBs less those used by common channels // store the global enb stats:
total_nb_available_rb[CC_id] = N_RB_DL[CC_id]; eNB->eNB_stats[CC_id].num_dlactive_UEs = UE_list->num_UEs;
eNB->eNB_stats[CC_id].available_prbs = total_nb_available_rb;
for (i = 0; i < N_RB_DL[CC_id]; i++) eNB->eNB_stats[CC_id].total_available_prbs += total_nb_available_rb;
if (cc[CC_id].vrb_map[i] != 0) eNB->eNB_stats[CC_id].dlsch_bytes_tx = 0;
total_nb_available_rb[CC_id]--; eNB->eNB_stats[CC_id].dlsch_pdus_tx = 0;
N_RBG[CC_id] = to_rbg(dl_Bandwidth);
// store the global enb stats:
eNB_stats->num_dlactive_UEs = UE_list->num_UEs;
eNB_stats->available_prbs = total_nb_available_rb[CC_id];
eNB_stats->total_available_prbs += total_nb_available_rb[CC_id];
eNB_stats->dlsch_bytes_tx = 0;
eNB_stats->dlsch_pdus_tx = 0;
}
// CALLING Pre_Processor for downlink scheduling // CALLING Pre_Processor for downlink scheduling
// (Returns estimation of RBs required by each UE and the allocation on sub-band) // (Returns estimation of RBs required by each UE and the allocation on sub-band)
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,
VCD_FUNCTION_IN); VCD_FUNCTION_IN);
start_meas(&eNB->schedule_dlsch_preprocessor); start_meas(&eNB->schedule_dlsch_preprocessor);
memset(eNB->slice_info.rballoc_sub, 0, sizeof(eNB->slice_info.rballoc_sub));
dlsch_scheduler_pre_processor(module_idP, dlsch_scheduler_pre_processor(module_idP,
slice_idxP, 0, //slice_idxP,
frameP, frameP,
subframeP, subframeP,
mbsfn_flag, mbsfn_flag,
...@@ -610,372 +588,587 @@ schedule_ue_spec(module_id_t module_idP, ...@@ -610,372 +588,587 @@ schedule_ue_spec(module_id_t module_idP,
/* the interslice multiplexing re-sorts the UE_list for the slices it tries /* the interslice multiplexing re-sorts the UE_list for the slices it tries
* to multiplex, so we need to sort it for the current slice again */ * to multiplex, so we need to sort it for the current slice again */
sort_UEs(module_idP, sort_UEs(module_idP,
slice_idxP, 0,//slice_idxP,
frameP, frameP,
subframeP); subframeP);
} }
for (CC_id = 0; CC_id < nb_mac_CC; CC_id++) { LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n",
LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n", CC_id);
CC_id); dl_req = &eNB->DL_req[CC_id].dl_config_request_body;
dl_req = &eNB->DL_req[CC_id].dl_config_request_body;
if (mbsfn_flag[CC_id] > 0) //if (mbsfn_flag[CC_id] > 0)
continue; // return;
for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { for (UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
LOG_D(MAC, "doing schedule_ue_spec for CC_id %d UE %d\n", LOG_D(MAC, "doing schedule_ue_spec for CC_id %d UE %d\n",
CC_id, CC_id,
UE_id); UE_id);
continue_flag = 0; // reset the flag to allow allocation for the remaining UEs continue_flag = 0; // reset the flag to allow allocation for the remaining UEs
rnti = UE_RNTI(module_idP, UE_id); rnti = UE_RNTI(module_idP, UE_id);
ue_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id]; ue_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
ue_template = &UE_list->UE_template[CC_id][UE_id]; ue_template = &UE_list->UE_template[CC_id][UE_id];
if (ue_template->rach_resource_type > 0) {
continue_flag = 1;
}
if (&(UE_list->eNB_UE_stats[CC_id][UE_id]) == NULL) { if (ue_template->rach_resource_type > 0) {
LOG_D(MAC, "[eNB] Cannot find eNB_UE_stats\n"); continue_flag = 1;
continue_flag = 1; }
} else {
eNB_UE_stats = &(UE_list->eNB_UE_stats[CC_id][UE_id]);
}
if (continue_flag != 1) { if (&(UE_list->eNB_UE_stats[CC_id][UE_id]) == NULL) {
switch (get_tmode(module_idP, LOG_D(MAC, "[eNB] Cannot find eNB_UE_stats\n");
CC_id, continue_flag = 1;
UE_id)) { } else {
case 1: eNB_UE_stats = &(UE_list->eNB_UE_stats[CC_id][UE_id]);
case 2: }
case 7:
aggregation = get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
break;
case 3: if (continue_flag != 1) {
aggregation = get_aggregation(get_bw_index(module_idP, switch (get_tmode(module_idP,
CC_id), CC_id,
ue_sched_ctrl->dl_cqi[CC_id], UE_id)) {
format2A); case 1:
break; case 2:
case 7:
aggregation = get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
break;
default: case 3:
AssertFatal(1==0,"Unsupported transmission mode %d\n", get_tmode(module_idP, CC_id, UE_id)); aggregation = get_aggregation(get_bw_index(module_idP,
aggregation = 2; CC_id),
break; ue_sched_ctrl->dl_cqi[CC_id],
} format2A);
} break;
/* if (continue_flag != 1 */ default:
if (ue_sched_ctrl->pre_nb_available_rbs[CC_id] == 0 || // no RBs allocated AssertFatal(1==0,"Unsupported transmission mode %d\n", get_tmode(module_idP, CC_id, UE_id));
CCE_allocation_infeasible(module_idP, aggregation = 2;
CC_id, break;
1,
subframeP,
aggregation,
rnti)) {
LOG_D(MAC, "[eNB %d] Frame %d : no RB allocated for UE %d on CC_id %d: continue \n",
module_idP,
frameP,
UE_id,
CC_id);
continue_flag = 1; //to next user (there might be rbs availiable for other UEs in TM5
} }
}
// If TDD /* if (continue_flag != 1 */
if (cc[CC_id].tdd_Config != NULL) { //TDD if (ue_sched_ctrl->pre_nb_available_rbs[CC_id] == 0 || // no RBs allocated
set_ue_dai(subframeP, CCE_allocation_infeasible(module_idP,
UE_id, CC_id,
CC_id, 1,
cc[CC_id].tdd_Config->subframeAssignment, subframeP,
UE_list); aggregation,
// update UL DAI after DLSCH scheduling rnti)) {
set_ul_DAI(module_idP, LOG_D(MAC, "[eNB %d] Frame %d : no RB allocated for UE %d on CC_id %d: continue \n",
UE_id, module_idP,
CC_id, frameP,
frameP, UE_id,
subframeP); CC_id);
} continue_flag = 1; //to next user (there might be rbs availiable for other UEs in TM5
}
if (continue_flag == 1) { // If TDD
add_ue_dlsch_info(module_idP, if (cc[CC_id].tdd_Config != NULL) { //TDD
CC_id, set_ue_dai(subframeP,
UE_id, UE_id,
subframeP, CC_id,
S_DL_NONE, cc[CC_id].tdd_Config->subframeAssignment,
rnti); UE_list);
continue; // update UL DAI after DLSCH scheduling
} set_ul_DAI(module_idP,
UE_id,
CC_id,
frameP,
subframeP);
}
nb_available_rb = ue_sched_ctrl->pre_nb_available_rbs[CC_id]; if (continue_flag == 1) {
harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, add_ue_dlsch_info(module_idP,
frameP, CC_id,
subframeP); UE_id,
round_DL = ue_sched_ctrl->round[CC_id][harq_pid]; subframeP,
eNB_UE_stats->crnti = rnti; S_DL_NONE,
eNB_UE_stats->rrc_status = mac_eNB_get_rrc_status(module_idP, rnti); rnti);
eNB_UE_stats->harq_pid = harq_pid; continue;
eNB_UE_stats->harq_round = round_DL; }
if (eNB_UE_stats->rrc_status < RRC_RECONFIGURED) {
ue_sched_ctrl->uplane_inactivity_timer = 0;
}
if (eNB_UE_stats->rrc_status < RRC_CONNECTED) { nb_available_rb = ue_sched_ctrl->pre_nb_available_rbs[CC_id];
LOG_D(MAC, "UE %d is not in RRC_CONNECTED\n", UE_id); harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,
continue; frameP,
} subframeP);
round_DL = ue_sched_ctrl->round[CC_id][harq_pid];
eNB_UE_stats->crnti = rnti;
eNB_UE_stats->rrc_status = mac_eNB_get_rrc_status(module_idP, rnti);
eNB_UE_stats->harq_pid = harq_pid;
eNB_UE_stats->harq_round = round_DL;
if (eNB_UE_stats->rrc_status < RRC_RECONFIGURED) {
ue_sched_ctrl->uplane_inactivity_timer = 0;
}
header_length_total = 0; if (eNB_UE_stats->rrc_status < RRC_CONNECTED) {
sdu_length_total = 0; LOG_D(MAC, "UE %d is not in RRC_CONNECTED\n", UE_id);
num_sdus = 0; continue;
}
/* header_length_total = 0;
DevCheck(((eNB_UE_stats->dl_cqi < MIN_CQI_VALUE) || sdu_length_total = 0;
(eNB_UE_stats->dl_cqi > MAX_CQI_VALUE)), num_sdus = 0;
eNB_UE_stats->dl_cqi, MIN_CQI_VALUE, MAX_CQI_VALUE);
*/ /*
if (NFAPI_MODE != NFAPI_MONOLITHIC) { DevCheck(((eNB_UE_stats->dl_cqi < MIN_CQI_VALUE) ||
eNB_UE_stats->dlsch_mcs1 = cqi_to_mcs[ue_sched_ctrl->dl_cqi[CC_id]]; (eNB_UE_stats->dl_cqi > MAX_CQI_VALUE)),
} else { // this operation is also done in the preprocessor eNB_UE_stats->dl_cqi, MIN_CQI_VALUE, MAX_CQI_VALUE);
eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1, */
eNB->slice_info.dl[slice_idxP].maxmcs); // cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs); if (NFAPI_MODE != NFAPI_MONOLITHIC) {
} eNB_UE_stats->dlsch_mcs1 = cqi_to_mcs[ue_sched_ctrl->dl_cqi[CC_id]];
} else { // this operation is also done in the preprocessor
eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1,
eNB->slice_info.dl[0/*slice_idxP*/].maxmcs); // cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs);
}
// Store stats // Store stats
// eNB_UE_stats->dl_cqi= eNB_UE_stats->dl_cqi; // eNB_UE_stats->dl_cqi= eNB_UE_stats->dl_cqi;
// Initializing the rb allocation indicator for each UE // Initializing the rb allocation indicator for each UE
for (j = 0; j < N_RBG[CC_id]; j++) { for (j = 0; j < N_RBG; j++) {
ue_template->rballoc_subband[harq_pid][j] = 0; ue_template->rballoc_subband[harq_pid][j] = 0;
} }
LOG_D(MAC, "[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n", LOG_D(MAC, "[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n",
module_idP, module_idP,
frameP, frameP,
UE_id, UE_id,
CC_id, CC_id,
rnti, rnti,
harq_pid, harq_pid,
round_DL, round_DL,
nb_available_rb, nb_available_rb,
ue_sched_ctrl->dl_cqi[CC_id], ue_sched_ctrl->dl_cqi[CC_id],
eNB_UE_stats->dlsch_mcs1, eNB_UE_stats->dlsch_mcs1,
eNB_UE_stats->rrc_status); eNB_UE_stats->rrc_status);
/* Process retransmission */ /* Process retransmission */
if (round_DL != 8) { if (round_DL != 8) {
// get freq_allocation // get freq_allocation
nb_rb = ue_template->nb_rb[harq_pid]; nb_rb = ue_template->nb_rb[harq_pid];
TBS = get_TBS_DL(ue_template->oldmcs1[harq_pid], TBS = get_TBS_DL(ue_template->oldmcs1[harq_pid],
nb_rb); nb_rb);
if (nb_rb <= nb_available_rb) {
/* CDRX */
ue_sched_ctrl->harq_rtt_timer[CC_id][harq_pid] = 1; // restart HARQ RTT timer
if (ue_sched_ctrl->cdrx_configured) {
ue_sched_ctrl->drx_retransmission_timer[harq_pid] = 0; // stop drx retransmission
/*
* Note: contrary to the spec drx_retransmission_timer[harq_pid] is reset not stop.
*/
if (harq_pid == 0) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_DRX_RETRANSMISSION_HARQ0, (unsigned long) ue_sched_ctrl->drx_retransmission_timer[0]);
}
}
if (nb_rb <= nb_available_rb) { if (cc[CC_id].tdd_Config != NULL) {
/* CDRX */ ue_template->DAI++;
ue_sched_ctrl->harq_rtt_timer[CC_id][harq_pid] = 1; // restart HARQ RTT timer update_ul_dci(module_idP,
CC_id,
rnti,
ue_template->DAI,
subframeP);
LOG_D(MAC, "DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
CC_id,
subframeP,
UE_id,
ue_template->DAI);
}
if (ue_sched_ctrl->cdrx_configured) { if (nb_rb == ue_sched_ctrl->pre_nb_available_rbs[CC_id]) {
ue_sched_ctrl->drx_retransmission_timer[harq_pid] = 0; // stop drx retransmission for (j = 0; j < N_RBG; j++) { // for indicating the rballoc for each sub-band
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j];
}
} else {
nb_rb_temp = nb_rb;
j = 0;
/* while ((nb_rb_temp > 0) && (j < N_RBG)) {
* Note: contrary to the spec drx_retransmission_timer[harq_pid] is reset not stop. if (ue_sched_ctrl->rballoc_sub_UE[CC_id][j] == 1) {
*/ if (ue_template->rballoc_subband[harq_pid][j])
if (harq_pid == 0) { LOG_W(MAC, "WARN: rballoc_subband not free for retrans?\n");
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_DRX_RETRANSMISSION_HARQ0, (unsigned long) ue_sched_ctrl->drx_retransmission_timer[0]);
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j];
nb_rb_temp -= min_rb_unit;
if ((j == N_RBG - 1) && (N_RB_DL == 25 || N_RB_DL == 50))
nb_rb_temp++;
} }
}
if (cc[CC_id].tdd_Config != NULL) { j++;
ue_template->DAI++;
update_ul_dci(module_idP,
CC_id,
rnti,
ue_template->DAI,
subframeP);
LOG_D(MAC, "DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
CC_id,
subframeP,
UE_id,
ue_template->DAI);
} }
}
if (nb_rb == ue_sched_ctrl->pre_nb_available_rbs[CC_id]) { nb_available_rb -= nb_rb;
for (j = 0; j < N_RBG[CC_id]; j++) { // for indicating the rballoc for each sub-band
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j]; switch (get_tmode(module_idP, CC_id, UE_id)) {
case 1:
case 2:
case 7:
default:
LOG_D(MAC, "retransmission DL_REQ: rnti:%x\n",
rnti);
dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu];
memset((void *) dl_config_pdu,
0,
sizeof(nfapi_dl_config_request_pdu_t));
dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE;
dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu));
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level =
get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI: see Table 4-10 from SCF082 - nFAPI specifications
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // Don't adjust power when retransmitting
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = ue_template->oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = ue_template->oldmcs1[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = round_DL & 3;
// TDD
if (cc[CC_id].tdd_Config != NULL) {
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (ue_template->DAI - 1) & 3;
LOG_D(MAC, "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, dai %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
round_DL,
ue_template->DAI - 1,
ue_template->oldmcs1[harq_pid]);
} else {
LOG_D(MAC, "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
round_DL,
ue_template->oldmcs1[harq_pid]);
} }
} else {
nb_rb_temp = nb_rb;
j = 0;
while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) { if (!CCE_allocation_infeasible(module_idP,
if (ue_sched_ctrl->rballoc_sub_UE[CC_id][j] == 1) { CC_id,
if (ue_template->rballoc_subband[harq_pid][j]) 1,
LOG_W(MAC, "WARN: rballoc_subband not free for retrans?\n"); subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level,
rnti)) {
dl_req->number_dci++;
dl_req->number_pdu++;
dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG;
eNB->DL_req[CC_id].sfn_sf = frameP<<4 | subframeP;
eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST;
fill_nfapi_dlsch_config(eNB,
dl_req,
TBS,
-1, // retransmission, no pdu_index
rnti,
0, // type 0 allocation from 7.1.6 in 36.213
0, // virtual_resource_block_assignment_flag, unused here
0, // resource_block_coding, to be filled in later
getQm(ue_template->oldmcs1[harq_pid]),
round_DL & 3, // redundancy version
1, // transport blocks
0, // transport block to codeword swap flag
cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme
1, // number of layers
1, // number of subbands
// uint8_t codebook_index,
4, // UE category capacity
ue_template->physicalConfigDedicated->pdsch_ConfigDedicated->p_a,
0, // delta_power_offset for TM5
0, // ngap
0, // nprb
cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode
0, //number of PRBs treated as one subband, not used here
0); // number of beamforming vectors, not used here
LOG_D(MAC, "Filled NFAPI configuration for DCI/DLSCH %d, retransmission round %d\n",
eNB->pdu_index[CC_id],
round_DL);
program_dlsch_acknak(module_idP,
CC_id,
UE_id,
frameP,
subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx);
// No TX request for retransmission (check if null request for FAPI)
} else {
LOG_W(MAC, "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d\%x, infeasible CCE allocation\n",
frameP,
subframeP,
UE_id,
rnti);
}
}
add_ue_dlsch_info(module_idP,
CC_id, UE_id,
subframeP,
S_DL_SCHEDULED,
rnti);
//eNB_UE_stats->dlsch_trials[round]++;
eNB_UE_stats->num_retransmission += 1;
eNB_UE_stats->rbs_used_retx = nb_rb;
eNB_UE_stats->total_rbs_used_retx += nb_rb;
eNB_UE_stats->dlsch_mcs2 = eNB_UE_stats->dlsch_mcs1;
} else {
LOG_D(MAC,
"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n",
module_idP,
frameP,
CC_id,
UE_id);
}
} else {
/* This is a potentially new SDU opportunity */
rlc_status.bytes_in_buffer = 0;
// Now check RLC information to compute number of required RBs
// get maximum TBS size for RLC request
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,
nb_available_rb);
// add the length for all the control elements (timing adv, drx, etc) : header + payload
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j]; if (ue_sched_ctrl->ta_timer == 0) {
nb_rb_temp -= min_rb_unit[CC_id]; ta_update = ue_sched_ctrl->ta_update;
if ((j == N_RBG[CC_id] - 1) && (N_RB_DL[CC_id] == 25 || N_RB_DL[CC_id] == 50)) /* if we send TA then set timer to not send it for a while */
nb_rb_temp++; if (ta_update != 31) {
} ue_sched_ctrl->ta_timer = 20;
}
/* reset ta_update */
ue_sched_ctrl->ta_update = 31;
} else {
ta_update = 31;
}
ta_len = (ta_update != 31) ? 2 : 0;
// RLC data on DCCH
if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) {
rlc_status = mac_rlc_status_ind(module_idP,
rnti,
module_idP,
frameP,
subframeP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
DCCH,
TBS - ta_len - header_length_total - sdu_length_total - 3,
0,
0
);
sdu_lengths[0] = 0;
if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, "[eNB %d] SFN/SF %d.%d, DL-DCCH->DLSCH CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
module_idP,
frameP,
subframeP,
CC_id,
TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_lengths[0] = mac_rlc_data_req(module_idP,
rnti,
module_idP,
frameP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
DCCH,
TBS, //not used
(char *)&dlsch_buffer[0],
0,
0
);
j++; if((rrc_release_info.num_UEs > 0) && (rlc_am_mui.rrc_mui_num > 0)) {
while(pthread_mutex_trylock(&rrc_release_freelist)) {
/* spin... */
} }
}
nb_available_rb -= nb_rb; uint16_t release_total = 0;
switch (get_tmode(module_idP, CC_id, UE_id)) { for (release_num = 0, release_ctrl = &rrc_release_info.RRC_release_ctrl[0];
case 1: release_num < NUMBER_OF_UE_MAX;
case 2: release_num++, release_ctrl++) {
case 7: if(release_ctrl->flag > 0) {
default: release_total++;
LOG_D(MAC, "retransmission DL_REQ: rnti:%x\n",
rnti);
dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu];
memset((void *) dl_config_pdu,
0,
sizeof(nfapi_dl_config_request_pdu_t));
dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE;
dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu));
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level =
get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI: see Table 4-10 from SCF082 - nFAPI specifications
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // Don't adjust power when retransmitting
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = ue_template->oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = ue_template->oldmcs1[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = round_DL & 3;
// TDD
if (cc[CC_id].tdd_Config != NULL) {
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (ue_template->DAI - 1) & 3;
LOG_D(MAC, "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, dai %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
round_DL,
ue_template->DAI - 1,
ue_template->oldmcs1[harq_pid]);
} else { } else {
LOG_D(MAC, "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n", continue;
module_idP,
CC_id,
harq_pid,
round_DL,
ue_template->oldmcs1[harq_pid]);
} }
if (!CCE_allocation_infeasible(module_idP, if(release_ctrl->flag == 1) {
CC_id, if(release_ctrl->rnti == rnti) {
1, for(uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
subframeP, if(release_ctrl->rrc_eNB_mui == rlc_am_mui.rrc_mui[mui_num]) {
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level, release_ctrl->flag = 3;
rnti)) { LOG_D(MAC,"DLSCH Release send:index %d rnti %x mui %d mui_num %d flag 1->3\n",
dl_req->number_dci++; release_num,
dl_req->number_pdu++; rnti,
dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; rlc_am_mui.rrc_mui[mui_num],
eNB->DL_req[CC_id].sfn_sf = frameP<<4 | subframeP; mui_num);
eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST; break;
fill_nfapi_dlsch_config(eNB, }
dl_req, }
TBS, }
-1, // retransmission, no pdu_index }
rnti,
0, // type 0 allocation from 7.1.6 in 36.213 if(release_ctrl->flag == 2) {
0, // virtual_resource_block_assignment_flag, unused here if(release_ctrl->rnti == rnti) {
0, // resource_block_coding, to be filled in later for (uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
getQm(ue_template->oldmcs1[harq_pid]), if(release_ctrl->rrc_eNB_mui == rlc_am_mui.rrc_mui[mui_num]) {
round_DL & 3, // redundancy version release_ctrl->flag = 4;
1, // transport blocks LOG_D(MAC, "DLSCH Release send:index %d rnti %x mui %d mui_num %d flag 2->4\n",
0, // transport block to codeword swap flag release_num,
cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme rnti,
1, // number of layers rlc_am_mui.rrc_mui[mui_num],
1, // number of subbands mui_num);
// uint8_t codebook_index, break;
4, // UE category capacity }
ue_template->physicalConfigDedicated->pdsch_ConfigDedicated->p_a, }
0, // delta_power_offset for TM5 }
0, // ngap
0, // nprb
cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode
0, //number of PRBs treated as one subband, not used here
0); // number of beamforming vectors, not used here
LOG_D(MAC, "Filled NFAPI configuration for DCI/DLSCH %d, retransmission round %d\n",
eNB->pdu_index[CC_id],
round_DL);
program_dlsch_acknak(module_idP,
CC_id,
UE_id,
frameP,
subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx);
// No TX request for retransmission (check if null request for FAPI)
} else {
LOG_W(MAC, "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d\%x, infeasible CCE allocation\n",
frameP,
subframeP,
UE_id,
rnti);
} }
if(release_total >= rrc_release_info.num_UEs)
break;
}
pthread_mutex_unlock(&rrc_release_freelist);
} }
add_ue_dlsch_info(module_idP, for (ra_ii = 0, ra = &eNB->common_channels[CC_id].ra[0]; ra_ii < NB_RA_PROC_MAX; ra_ii++, ra++) {
CC_id, UE_id, if ((ra->rnti == rnti) && (ra->state == MSGCRNTI)) {
subframeP, for (uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
S_DL_SCHEDULED, if (ra->crnti_rrc_mui == rlc_am_mui.rrc_mui[mui_num]) {
rnti); ra->crnti_harq_pid = harq_pid;
//eNB_UE_stats->dlsch_trials[round]++; ra->state = MSGCRNTI_ACK;
eNB_UE_stats->num_retransmission += 1; break;
eNB_UE_stats->rbs_used_retx = nb_rb; }
eNB_UE_stats->total_rbs_used_retx += nb_rb; }
eNB_UE_stats->dlsch_mcs2 = eNB_UE_stats->dlsch_mcs1; }
} else { }
LOG_D(MAC,
"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n", T(T_ENB_MAC_UE_DL_SDU,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(DCCH),
T_INT(sdu_lengths[0]));
LOG_D(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes from RLC\n",
module_idP, module_idP,
frameP,
CC_id, CC_id,
UE_id); sdu_lengths[0]);
} sdu_length_total = sdu_lengths[0];
} else { sdu_lcids[0] = DCCH;
/* This is a potentially new SDU opportunity */ eNB_UE_stats->lcid_sdu[0] = DCCH;
rlc_status.bytes_in_buffer = 0; eNB_UE_stats->sdu_length_tx[DCCH] = sdu_lengths[0];
// Now check RLC information to compute number of required RBs eNB_UE_stats->num_pdu_tx[DCCH] += 1;
// get maximum TBS size for RLC request eNB_UE_stats->num_bytes_tx[DCCH] += sdu_lengths[0];
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, header_length_last = 1 + 1 + (sdu_lengths[0] >= 128);
nb_available_rb); header_length_total += header_length_last;
num_sdus = 1;
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes :",
module_idP,
CC_id,
sdu_lengths[0]);
// add the length for all the control elements (timing adv, drx, etc) : header + payload for (j = 0; j < sdu_lengths[0]; ++j) {
LOG_T(MAC, "%x ",
dlsch_buffer[j]);
}
if (ue_sched_ctrl->ta_timer == 0) { LOG_T(MAC, "\n");
ta_update = ue_sched_ctrl->ta_update; #endif
}
}
/* if we send TA then set timer to not send it for a while */ // RLC data on DCCH1
if (ta_update != 31) { if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) {
ue_sched_ctrl->ta_timer = 20; rlc_status = mac_rlc_status_ind(module_idP,
rnti,
module_idP,
frameP,
subframeP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
DCCH + 1,
TBS - ta_len - header_length_total - sdu_length_total - 3,
0,
0
);
// DCCH SDU
sdu_lengths[num_sdus] = 0;
if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, "[eNB %d], Frame %d, DCCH1->DLSCH, CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
module_idP, frameP, CC_id,
TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_lengths[num_sdus] += mac_rlc_data_req(module_idP,
rnti,
module_idP,
frameP,
ENB_FLAG_YES,
MBMS_FLAG_NO, DCCH + 1,
TBS, //not used
(char *) &dlsch_buffer[sdu_length_total],
0,
0
);
T(T_ENB_MAC_UE_DL_SDU,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(DCCH + 1),
T_INT(sdu_lengths[num_sdus]));
sdu_lcids[num_sdus] = DCCH1;
sdu_length_total += sdu_lengths[num_sdus];
eNB_UE_stats->lcid_sdu[num_sdus] = DCCH1;
eNB_UE_stats->sdu_length_tx[DCCH1] = sdu_lengths[num_sdus];
eNB_UE_stats->num_pdu_tx[DCCH1] += 1;
eNB_UE_stats->num_bytes_tx[DCCH1] += sdu_lengths[num_sdus];
header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128);
header_length_total += header_length_last;
num_sdus++;
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC, "[eNB %d][DCCH1] CC_id %d Got %d bytes :",
module_idP,
CC_id,
sdu_lengths[num_sdus]);
for (j = 0; j < sdu_lengths[num_sdus]; ++j) {
LOG_T(MAC, "%x ",
dlsch_buffer[j]);
} }
/* reset ta_update */ LOG_T(MAC, "\n");
ue_sched_ctrl->ta_update = 31; #endif
} else {
ta_update = 31;
} }
}
ta_len = (ta_update != 31) ? 2 : 0; // TODO: lcid has to be sorted before the actual allocation (similar struct as ue_list).
for (lcid = NB_RB_MAX - 1; lcid >= DTCH; lcid--) {
// TODO: check if the lcid is active
LOG_D(MAC, "[eNB %d], Frame %d, DTCH%d->DLSCH, Checking RLC status (tbs %d, len %d)\n",
module_idP,
frameP,
lcid,
TBS,
TBS - ta_len - header_length_total - sdu_length_total - 3);
// RLC data on DCCH
if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) {
rlc_status = mac_rlc_status_ind(module_idP, rlc_status = mac_rlc_status_ind(module_idP,
rnti, rnti,
...@@ -984,660 +1177,453 @@ schedule_ue_spec(module_id_t module_idP, ...@@ -984,660 +1177,453 @@ schedule_ue_spec(module_id_t module_idP,
subframeP, subframeP,
ENB_FLAG_YES, ENB_FLAG_YES,
MBMS_FLAG_NO, MBMS_FLAG_NO,
DCCH, lcid,
TBS - ta_len - header_length_total - sdu_length_total - 3, 0, 0 TBS - ta_len - header_length_total - sdu_length_total - 3, 0, 0
); );
sdu_lengths[0] = 0;
if (rlc_status.bytes_in_buffer > 0) { if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, "[eNB %d] SFN/SF %d.%d, DL-DCCH->DLSCH CC_id %d, Requesting %d bytes from RLC (RRC message)\n", LOG_D(MAC, "[eNB %d][USER-PLANE DEFAULT DRB] Frame %d : DTCH->DLSCH, Requesting %d bytes from RLC (lcid %d total hdr len %d)\n",
module_idP, module_idP,
frameP, frameP,
subframeP, TBS - ta_len - header_length_total - sdu_length_total - 3,
CC_id, lcid,
TBS - ta_len - header_length_total - sdu_length_total - 3); header_length_total);
sdu_lengths[0] = mac_rlc_data_req(module_idP, sdu_lengths[num_sdus] = mac_rlc_data_req(module_idP,
rnti, rnti,
module_idP, module_idP,
frameP, frameP,
ENB_FLAG_YES, ENB_FLAG_YES,
MBMS_FLAG_NO, MBMS_FLAG_NO,
DCCH, lcid,
TBS, //not used TBS, //not used
(char *)&dlsch_buffer[0], 0, 0 (char *) &dlsch_buffer[sdu_length_total],
); 0,
0
if((rrc_release_info.num_UEs > 0) && (rlc_am_mui.rrc_mui_num > 0)) { );
while(pthread_mutex_trylock(&rrc_release_freelist)) { T(T_ENB_MAC_UE_DL_SDU,
/* spin... */ T_INT(module_idP),
} T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(lcid),
T_INT(sdu_lengths[num_sdus]));
LOG_D(MAC, "[eNB %d][USER-PLANE DEFAULT DRB] Got %d bytes for DTCH %d \n",
module_idP,
sdu_lengths[num_sdus],
lcid);
sdu_lcids[num_sdus] = lcid;
sdu_length_total += sdu_lengths[num_sdus];
eNB_UE_stats->num_pdu_tx[lcid]++;
eNB_UE_stats->lcid_sdu[num_sdus] = lcid;
eNB_UE_stats->sdu_length_tx[lcid] = sdu_lengths[num_sdus];
eNB_UE_stats->num_bytes_tx[lcid] += sdu_lengths[num_sdus];
header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128);
header_length_total += header_length_last;
num_sdus++;
ue_sched_ctrl->uplane_inactivity_timer = 0;
// reset RRC inactivity timer after uplane activity
ue_contextP = rrc_eNB_get_ue_context(RC.rrc[module_idP], rnti);
if (ue_contextP != NULL) {
ue_contextP->ue_context.ue_rrc_inactivity_timer = 1;
} else {
LOG_E(MAC, "[eNB %d] CC_id %d Couldn't find the context associated to UE (RNTI %d) and reset RRC inactivity timer\n",
module_idP,
CC_id,
rnti);
}
} // end if (rlc_status.bytes_in_buffer > 0)
} else { // no TBS left
break; // break for (lcid = NB_RB_MAX - 1; lcid >= DTCH; lcid--)
}
}
uint16_t release_total = 0; /* Last header does not have length field */
if (header_length_total) {
header_length_total -= header_length_last;
header_length_total++;
}
for (release_num = 0, release_ctrl = &rrc_release_info.RRC_release_ctrl[0]; // there is at least one SDU or TA command
release_num < NUMBER_OF_UE_MAX; // if (num_sdus > 0 ){
release_num++, release_ctrl++) { if (ta_len + sdu_length_total + header_length_total > 0) {
if(release_ctrl->flag > 0) { // Now compute number of required RBs for total sdu length
release_total++; // Assume RAH format 2
} else { mcs = eNB_UE_stats->dlsch_mcs1;
continue;
}
if(release_ctrl->flag == 1) { if (mcs == 0) {
if(release_ctrl->rnti == rnti) { nb_rb = 4; // don't let the TBS get too small
for(uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) { } else {
if(release_ctrl->rrc_eNB_mui == rlc_am_mui.rrc_mui[mui_num]) { nb_rb = min_rb_unit;
release_ctrl->flag = 3; }
LOG_D(MAC,"DLSCH Release send:index %d rnti %x mui %d mui_num %d flag 1->3\n",
release_num,
rnti,
rlc_am_mui.rrc_mui[mui_num],
mui_num);
break;
}
}
}
}
if(release_ctrl->flag == 2) { TBS = get_TBS_DL(mcs, nb_rb);
if(release_ctrl->rnti == rnti) {
for (uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) {
if(release_ctrl->rrc_eNB_mui == rlc_am_mui.rrc_mui[mui_num]) {
release_ctrl->flag = 4;
LOG_D(MAC, "DLSCH Release send:index %d rnti %x mui %d mui_num %d flag 2->4\n",
release_num,
rnti,
rlc_am_mui.rrc_mui[mui_num],
mui_num);
break;
}
}
}
}
if(release_total >= rrc_release_info.num_UEs) while (TBS < sdu_length_total + header_length_total + ta_len) {
break; nb_rb += min_rb_unit; //
}
pthread_mutex_unlock(&rrc_release_freelist); if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs
} // (can happen if N_RB_DL is odd)
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,
nb_available_rb);
nb_rb = nb_available_rb;
break;
}
for (ra_ii = 0, ra = &eNB->common_channels[CC_id].ra[0]; ra_ii < NB_RA_PROC_MAX; ra_ii++, ra++) { TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,
if ((ra->rnti == rnti) && (ra->state == MSGCRNTI)) { nb_rb);
for (uint16_t mui_num = 0; mui_num < rlc_am_mui.rrc_mui_num; mui_num++) { }
if (ra->crnti_rrc_mui == rlc_am_mui.rrc_mui[mui_num]) {
ra->crnti_harq_pid = harq_pid;
ra->state = MSGCRNTI_ACK;
break;
}
}
}
}
T(T_ENB_MAC_UE_DL_SDU, if (nb_rb == ue_sched_ctrl->pre_nb_available_rbs[CC_id]) {
T_INT(module_idP), for (j = 0; j < N_RBG; ++j) { // for indicating the rballoc for each sub-band
T_INT(CC_id), ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j];
T_INT(rnti), }
T_INT(frameP), } else {
T_INT(subframeP), nb_rb_temp = nb_rb;
T_INT(harq_pid), j = 0;
T_INT(DCCH),
T_INT(sdu_lengths[0]));
LOG_D(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes from RLC\n",
module_idP,
CC_id,
sdu_lengths[0]);
sdu_length_total = sdu_lengths[0];
sdu_lcids[0] = DCCH;
eNB_UE_stats->lcid_sdu[0] = DCCH;
eNB_UE_stats->sdu_length_tx[DCCH] = sdu_lengths[0];
eNB_UE_stats->num_pdu_tx[DCCH] += 1;
eNB_UE_stats->num_bytes_tx[DCCH] += sdu_lengths[0];
header_length_last = 1 + 1 + (sdu_lengths[0] >= 128);
header_length_total += header_length_last;
num_sdus = 1;
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes :",
module_idP,
CC_id,
sdu_lengths[0]);
for (j = 0; j < sdu_lengths[0]; ++j) { while ((nb_rb_temp > 0) && (j < N_RBG)) {
LOG_T(MAC, "%x ", if (ue_sched_ctrl->rballoc_sub_UE[CC_id][j] == 1) {
dlsch_buffer[j]); ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j];
if ((j == N_RBG - 1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
nb_rb_temp = nb_rb_temp - min_rb_unit + 1;
} else {
nb_rb_temp = nb_rb_temp - min_rb_unit;
}
} }
LOG_T(MAC, "\n"); j++;
#endif
} }
} }
// RLC data on DCCH1 // decrease mcs until TBS falls below required length
if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { while ((TBS > sdu_length_total + header_length_total + ta_len) && (mcs > 0)) {
rlc_status = mac_rlc_status_ind(module_idP, mcs--;
rnti, TBS = get_TBS_DL(mcs,
module_idP, nb_rb);
frameP, }
subframeP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
DCCH + 1,
TBS - ta_len - header_length_total - sdu_length_total - 3, 0, 0
);
// DCCH SDU
sdu_lengths[num_sdus] = 0;
if (rlc_status.bytes_in_buffer > 0) {
LOG_D(MAC, "[eNB %d], Frame %d, DCCH1->DLSCH, CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
module_idP, frameP, CC_id,
TBS - ta_len - header_length_total - sdu_length_total - 3);
sdu_lengths[num_sdus] += mac_rlc_data_req(module_idP,
rnti,
module_idP,
frameP,
ENB_FLAG_YES,
MBMS_FLAG_NO, DCCH + 1,
TBS, //not used
(char *) &dlsch_buffer[sdu_length_total], 0, 0
);
T(T_ENB_MAC_UE_DL_SDU,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(DCCH + 1),
T_INT(sdu_lengths[num_sdus]));
sdu_lcids[num_sdus] = DCCH1;
sdu_length_total += sdu_lengths[num_sdus];
eNB_UE_stats->lcid_sdu[num_sdus] = DCCH1;
eNB_UE_stats->sdu_length_tx[DCCH1] = sdu_lengths[num_sdus];
eNB_UE_stats->num_pdu_tx[DCCH1] += 1;
eNB_UE_stats->num_bytes_tx[DCCH1] += sdu_lengths[num_sdus];
header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128);
header_length_total += header_length_last;
num_sdus++;
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC, "[eNB %d][DCCH1] CC_id %d Got %d bytes :",
module_idP,
CC_id,
sdu_lengths[num_sdus]);
for (j = 0; j < sdu_lengths[num_sdus]; ++j) { // if we have decreased too much or we don't have enough RBs, increase MCS
LOG_T(MAC, "%x ", while (TBS < sdu_length_total + header_length_total + ta_len &&
dlsch_buffer[j]); ((ue_sched_ctrl->dl_pow_off[CC_id] > 0 && mcs < 28) || (ue_sched_ctrl->dl_pow_off[CC_id] == 0 && mcs <= 15))) {
} mcs++;
TBS = get_TBS_DL(mcs,
nb_rb);
}
LOG_T(MAC, "\n"); LOG_D(MAC, "dlsch_mcs before and after the rate matching = (%d, %d)\n",
eNB_UE_stats->dlsch_mcs1,
mcs);
#ifdef DEBUG_eNB_SCHEDULER
LOG_D(MAC, "[eNB %d] CC_id %d Generated DLSCH header (mcs %d, TBS %d, nb_rb %d)\n",
module_idP,
CC_id,
mcs, TBS,
nb_rb);
// msg("[MAC][eNB ] Reminder of DLSCH with random data %d %d %d %d \n",
// TBS, sdu_length_total, offset, TBS-sdu_length_total-offset);
#endif #endif
}
if (TBS - header_length_total - sdu_length_total - ta_len <= 2) {
padding = TBS - header_length_total - sdu_length_total - ta_len;
post_padding = 0;
} else {
padding = 0;
post_padding = 1;
} }
// TODO: lcid has to be sorted before the actual allocation (similar struct as ue_list). offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0],
for (lcid = NB_RB_MAX - 1; lcid >= DTCH; lcid--) { num_sdus, //num_sdus
// TODO: check if the lcid is active sdu_lengths, //
LOG_D(MAC, "[eNB %d], Frame %d, DTCH%d->DLSCH, Checking RLC status (tbs %d, len %d)\n", sdu_lcids,
255, // no drx
ta_update, // timing advance
NULL, // contention res id
padding,
post_padding);
//#ifdef DEBUG_eNB_SCHEDULER
if (ta_update != 31) {
LOG_D(MAC,
"[eNB %d][DLSCH] Frame %d Generate header for UE_id %d on CC_id %d: sdu_length_total %d, num_sdus %d, sdu_lengths[0] %d, sdu_lcids[0] %d => payload offset %d,timing advance value : %d, padding %d,post_padding %d,(mcs %d, TBS %d, nb_rb %d),header_length %d\n",
module_idP, module_idP,
frameP, frameP,
lcid, UE_id,
CC_id,
sdu_length_total,
num_sdus,
sdu_lengths[0],
sdu_lcids[0],
offset,
ta_update,
padding,
post_padding,
mcs,
TBS, TBS,
TBS - ta_len - header_length_total - sdu_length_total - 3); nb_rb,
header_length_total);
if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { }
rlc_status = mac_rlc_status_ind(module_idP,
rnti,
module_idP,
frameP,
subframeP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
TBS - ta_len - header_length_total - sdu_length_total - 3, 0, 0
);
if (rlc_status.bytes_in_buffer > 0) { //#endif
LOG_D(MAC, "[eNB %d][USER-PLANE DEFAULT DRB] Frame %d : DTCH->DLSCH, Requesting %d bytes from RLC (lcid %d total hdr len %d)\n", #ifdef DEBUG_eNB_SCHEDULER
module_idP, LOG_T(MAC, "[eNB %d] First 16 bytes of DLSCH : \n");
frameP,
TBS - ta_len - header_length_total - sdu_length_total - 3,
lcid,
header_length_total);
sdu_lengths[num_sdus] = mac_rlc_data_req(module_idP,
rnti,
module_idP,
frameP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
TBS, //not used
(char *) &dlsch_buffer[sdu_length_total], 0, 0
);
T(T_ENB_MAC_UE_DL_SDU,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(lcid),
T_INT(sdu_lengths[num_sdus]));
LOG_D(MAC, "[eNB %d][USER-PLANE DEFAULT DRB] Got %d bytes for DTCH %d \n",
module_idP,
sdu_lengths[num_sdus],
lcid);
sdu_lcids[num_sdus] = lcid;
sdu_length_total += sdu_lengths[num_sdus];
eNB_UE_stats->num_pdu_tx[lcid]++;
eNB_UE_stats->lcid_sdu[num_sdus] = lcid;
eNB_UE_stats->sdu_length_tx[lcid] = sdu_lengths[num_sdus];
eNB_UE_stats->num_bytes_tx[lcid] += sdu_lengths[num_sdus];
header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128);
header_length_total += header_length_last;
num_sdus++;
ue_sched_ctrl->uplane_inactivity_timer = 0;
// reset RRC inactivity timer after uplane activity
ue_contextP = rrc_eNB_get_ue_context(RC.rrc[module_idP], rnti);
if (ue_contextP != NULL) { for (int i = 0; i < 16; i++) {
ue_contextP->ue_context.ue_rrc_inactivity_timer = 1; LOG_T(MAC, "%x.",
} else { dlsch_buffer[i]);
LOG_E(MAC, "[eNB %d] CC_id %d Couldn't find the context associated to UE (RNTI %d) and reset RRC inactivity timer\n",
module_idP,
CC_id,
rnti);
}
} // end if (rlc_status.bytes_in_buffer > 0)
} else { // no TBS left
break; // break for (lcid = NB_RB_MAX - 1; lcid >= DTCH; lcid--)
}
} }
/* Last header does not have length field */ LOG_T(MAC, "\n");
if (header_length_total) { #endif
header_length_total -= header_length_last; // cycle through SDUs and place in dlsch_buffer
header_length_total++; dlsch_pdu = &UE_list->DLSCH_pdu[CC_id][0][UE_id];
memcpy(&dlsch_pdu->payload[0][offset],
dlsch_buffer,
sdu_length_total);
// memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]);
// fill remainder of DLSCH with 0
for (j = 0; j < (TBS - sdu_length_total - offset); j++) {
dlsch_pdu->payload[0][offset + sdu_length_total + j] = 0;
} }
// there is at least one SDU or TA command trace_pdu(DIRECTION_DOWNLINK,
// if (num_sdus > 0 ){ (uint8_t *) dlsch_pdu->payload[0],
if (ta_len + sdu_length_total + header_length_total > 0) { TBS,
// Now compute number of required RBs for total sdu length module_idP,
// Assume RAH format 2 WS_C_RNTI,
mcs = eNB_UE_stats->dlsch_mcs1; UE_RNTI(module_idP,
UE_id),
if (mcs == 0) { eNB->frame,
nb_rb = 4; // don't let the TBS get too small eNB->subframe,
} else { 0,
nb_rb = min_rb_unit[CC_id]; 0);
} T(T_ENB_MAC_UE_DL_PDU_WITH_DATA,
T_INT(module_idP),
TBS = get_TBS_DL(mcs, nb_rb); T_INT(CC_id),
T_INT(rnti),
while (TBS < sdu_length_total + header_length_total + ta_len) { T_INT(frameP),
nb_rb += min_rb_unit[CC_id]; // T_INT(subframeP),
T_INT(harq_pid),
if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs T_BUFFER(dlsch_pdu->payload[0],
// (can happen if N_RB_DL is odd) TBS));
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, ue_template->nb_rb[harq_pid] = nb_rb;
nb_available_rb); add_ue_dlsch_info(module_idP,
nb_rb = nb_available_rb; CC_id,
break; UE_id,
} subframeP,
S_DL_SCHEDULED,
TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, rnti);
nb_rb); // store stats
} eNB->eNB_stats[CC_id].dlsch_bytes_tx += sdu_length_total;
eNB->eNB_stats[CC_id].dlsch_pdus_tx += 1;
if (nb_rb == ue_sched_ctrl->pre_nb_available_rbs[CC_id]) { eNB_UE_stats->rbs_used = nb_rb;
for (j = 0; j < N_RBG[CC_id]; ++j) { // for indicating the rballoc for each sub-band eNB_UE_stats->num_mac_sdu_tx = num_sdus;
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j]; eNB_UE_stats->total_rbs_used += nb_rb;
} eNB_UE_stats->dlsch_mcs2 = mcs;
} else { eNB_UE_stats->TBS = TBS;
nb_rb_temp = nb_rb; eNB_UE_stats->overhead_bytes = TBS - sdu_length_total;
j = 0; eNB_UE_stats->total_sdu_bytes += sdu_length_total;
eNB_UE_stats->total_pdu_bytes += TBS;
while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) { eNB_UE_stats->total_num_pdus += 1;
if (ue_sched_ctrl->rballoc_sub_UE[CC_id][j] == 1) {
ue_template->rballoc_subband[harq_pid][j] = ue_sched_ctrl->rballoc_sub_UE[CC_id][j]; if (cc[CC_id].tdd_Config != NULL) { // TDD
ue_template->DAI++;
if ((j == N_RBG[CC_id] - 1) && ((N_RB_DL[CC_id] == 25) || (N_RB_DL[CC_id] == 50))) { update_ul_dci(module_idP,
nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id] + 1; CC_id,
} else { rnti,
nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]; ue_template->DAI,
} subframeP);
} }
j++; // do PUCCH power control
// this is the snr
// unit is not dBm, it's special from nfapi
// converting to dBm
snr = (5 * ue_sched_ctrl->pucch1_snr[CC_id] - 640) / 10;
target_snr = eNB->puCch10xSnr / 10;
// this assumes accumulated tpc
// make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
int32_t framex10psubframe = ue_template->pucch_tpc_tx_frame * 10 + ue_template->pucch_tpc_tx_subframe;
if (framex10psubframe + 10 <= (frameP * 10) + subframeP || //normal case
(framex10psubframe > (frameP * 10) + subframeP && 10240 - framex10psubframe + (frameP * 10) + subframeP >= 10)) //frame wrap-around
if (ue_sched_ctrl->pucch1_cqi_update[CC_id] == 1) {
ue_sched_ctrl->pucch1_cqi_update[CC_id] = 0;
ue_template->pucch_tpc_tx_frame = frameP;
ue_template->pucch_tpc_tx_subframe = subframeP;
if (snr > target_snr + 4) {
tpc = 0; //-1
} else if (snr < target_snr - 4) {
tpc = 2; //+1
} else {
tpc = 1; //0
} }
}
// decrease mcs until TBS falls below required length LOG_D(MAC, "[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, snr/target snr %d/%d (normal case)\n",
while ((TBS > sdu_length_total + header_length_total + ta_len) && (mcs > 0)) { module_idP,
mcs--; frameP,
TBS = get_TBS_DL(mcs, subframeP,
nb_rb); harq_pid,
} tpc,
snr,
// if we have decreased too much or we don't have enough RBs, increase MCS target_snr);
while (TBS < sdu_length_total + header_length_total + ta_len && } // Po_PUCCH has been updated
((ue_sched_ctrl->dl_pow_off[CC_id] > 0 && mcs < 28) || (ue_sched_ctrl->dl_pow_off[CC_id] == 0 && mcs <= 15))) { else {
mcs++; tpc = 1; //0
TBS = get_TBS_DL(mcs, } // time to do TPC update
nb_rb); else {
} tpc = 1; //0
}
LOG_D(MAC, "dlsch_mcs before and after the rate matching = (%d, %d)\n", dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu];
eNB_UE_stats->dlsch_mcs1, memset((void *) dl_config_pdu,
0,
sizeof(nfapi_dl_config_request_pdu_t));
dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE;
dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu));
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level =
get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1 - ue_template->oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0;
//deactivate second codeword
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = 0;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1;
if (cc[CC_id].tdd_Config != NULL) { //TDD
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (ue_template->DAI - 1) & 3;
LOG_D(MAC, "[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
(ue_template->DAI - 1),
mcs); mcs);
#ifdef DEBUG_eNB_SCHEDULER } else {
LOG_D(MAC, "[eNB %d] CC_id %d Generated DLSCH header (mcs %d, TBS %d, nb_rb %d)\n", LOG_D(MAC, "[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n",
module_idP, module_idP,
CC_id, CC_id,
mcs, TBS, harq_pid,
nb_rb); mcs);
// msg("[MAC][eNB ] Reminder of DLSCH with random data %d %d %d %d \n", }
// TBS, sdu_length_total, offset, TBS-sdu_length_total-offset);
#endif
if (TBS - header_length_total - sdu_length_total - ta_len <= 2) {
padding = TBS - header_length_total - sdu_length_total - ta_len;
post_padding = 0;
} else {
padding = 0;
post_padding = 1;
}
offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0],
num_sdus, //num_sdus
sdu_lengths, //
sdu_lcids,
255, // no drx
ta_update, // timing advance
NULL, // contention res id
padding,
post_padding);
//#ifdef DEBUG_eNB_SCHEDULER
if (ta_update != 31) {
LOG_D(MAC,
"[eNB %d][DLSCH] Frame %d Generate header for UE_id %d on CC_id %d: sdu_length_total %d, num_sdus %d, sdu_lengths[0] %d, sdu_lcids[0] %d => payload offset %d,timing advance value : %d, padding %d,post_padding %d,(mcs %d, TBS %d, nb_rb %d),header_length %d\n",
module_idP,
frameP,
UE_id,
CC_id,
sdu_length_total,
num_sdus,
sdu_lengths[0],
sdu_lcids[0],
offset,
ta_update,
padding,
post_padding,
mcs,
TBS,
nb_rb,
header_length_total);
}
//#endif
#ifdef DEBUG_eNB_SCHEDULER
LOG_T(MAC, "[eNB %d] First 16 bytes of DLSCH : \n");
for (i = 0; i < 16; i++) {
LOG_T(MAC, "%x.",
dlsch_buffer[i]);
}
LOG_T(MAC, "\n");
#endif
// cycle through SDUs and place in dlsch_buffer
dlsch_pdu = &UE_list->DLSCH_pdu[CC_id][0][UE_id];
memcpy(&dlsch_pdu->payload[0][offset],
dlsch_buffer,
sdu_length_total);
// memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]);
// fill remainder of DLSCH with 0
for (j = 0; j < (TBS - sdu_length_total - offset); j++) {
dlsch_pdu->payload[0][offset + sdu_length_total + j] = 0;
}
trace_pdu(DIRECTION_DOWNLINK,
(uint8_t *) dlsch_pdu->payload[0],
TBS,
module_idP,
WS_C_RNTI,
UE_RNTI(module_idP,
UE_id),
eNB->frame,
eNB->subframe,
0,
0);
T(T_ENB_MAC_UE_DL_PDU_WITH_DATA,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_BUFFER(dlsch_pdu->payload[0],
TBS));
ue_template->nb_rb[harq_pid] = nb_rb;
add_ue_dlsch_info(module_idP,
CC_id,
UE_id,
subframeP,
S_DL_SCHEDULED,
rnti);
// store stats
eNB->eNB_stats[CC_id].dlsch_bytes_tx += sdu_length_total;
eNB->eNB_stats[CC_id].dlsch_pdus_tx += 1;
eNB_UE_stats->rbs_used = nb_rb;
eNB_UE_stats->num_mac_sdu_tx = num_sdus;
eNB_UE_stats->total_rbs_used += nb_rb;
eNB_UE_stats->dlsch_mcs2 = mcs;
eNB_UE_stats->TBS = TBS;
eNB_UE_stats->overhead_bytes = TBS - sdu_length_total;
eNB_UE_stats->total_sdu_bytes += sdu_length_total;
eNB_UE_stats->total_pdu_bytes += TBS;
eNB_UE_stats->total_num_pdus += 1;
if (cc[CC_id].tdd_Config != NULL) { // TDD
ue_template->DAI++;
update_ul_dci(module_idP,
CC_id,
rnti,
ue_template->DAI,
subframeP);
}
// do PUCCH power control
// this is the snr
// unit is not dBm, it's special from nfapi
// converting to dBm
snr = (5 * ue_sched_ctrl->pucch1_snr[CC_id] - 640) / 10;
target_snr = eNB->puCch10xSnr / 10;
// this assumes accumulated tpc
// make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
int32_t framex10psubframe = ue_template->pucch_tpc_tx_frame * 10 + ue_template->pucch_tpc_tx_subframe;
if (framex10psubframe + 10 <= (frameP * 10) + subframeP || //normal case
(framex10psubframe > (frameP * 10) + subframeP && 10240 - framex10psubframe + (frameP * 10) + subframeP >= 10)) //frame wrap-around
if (ue_sched_ctrl->pucch1_cqi_update[CC_id] == 1) {
ue_sched_ctrl->pucch1_cqi_update[CC_id] = 0;
ue_template->pucch_tpc_tx_frame = frameP;
ue_template->pucch_tpc_tx_subframe = subframeP;
if (snr > target_snr + 4) { LOG_D(MAC, "Checking feasibility pdu %d (new sdu)\n",
tpc = 0; //-1 dl_req->number_pdu);
} else if (snr < target_snr - 4) {
tpc = 2; //+1
} else {
tpc = 1; //0
}
LOG_D(MAC, "[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, snr/target snr %d/%d (normal case)\n", if (!CCE_allocation_infeasible(module_idP,
module_idP, CC_id,
frameP, 1,
subframeP, subframeP,
harq_pid, dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level,
tpc, rnti)) {
snr, ue_sched_ctrl->round[CC_id][harq_pid] = 0;
target_snr); dl_req->number_dci++;
} // Po_PUCCH has been updated dl_req->number_pdu++;
else { dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG;
tpc = 1; //0 eNB->DL_req[CC_id].sfn_sf = frameP << 4 | subframeP;
} // time to do TPC update eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST;
else { /* CDRX */
tpc = 1; //0 ue_sched_ctrl->harq_rtt_timer[CC_id][harq_pid] = 1; // restart HARQ RTT timer
}
dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; if (ue_sched_ctrl->cdrx_configured) {
memset((void *) dl_config_pdu, ue_sched_ctrl->drx_inactivity_timer = 1; // restart drx inactivity timer when new transmission
0, ue_sched_ctrl->drx_retransmission_timer[harq_pid] = 0; // stop drx retransmission
sizeof(nfapi_dl_config_request_pdu_t)); /*
dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; * Note: contrary to the spec drx_retransmission_timer[harq_pid] is reset not stop.
dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu)); */
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1; VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_DRX_INACTIVITY, (unsigned long) ue_sched_ctrl->drx_inactivity_timer);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level =
get_aggregation(get_bw_index(module_idP,
CC_id),
ue_sched_ctrl->dl_cqi[CC_id],
format1);
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1 - ue_template->oldNDI[harq_pid];
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0;
//deactivate second codeword
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = 0;
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1;
if (cc[CC_id].tdd_Config != NULL) { //TDD
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (ue_template->DAI - 1) & 3;
LOG_D(MAC, "[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
(ue_template->DAI - 1),
mcs);
} else {
LOG_D(MAC, "[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n",
module_idP,
CC_id,
harq_pid,
mcs);
}
LOG_D(MAC, "Checking feasibility pdu %d (new sdu)\n", if (harq_pid == 0) {
dl_req->number_pdu); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_DRX_RETRANSMISSION_HARQ0, (unsigned long) ue_sched_ctrl->drx_retransmission_timer[0]);
if (!CCE_allocation_infeasible(module_idP,
CC_id,
1,
subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level,
rnti)) {
ue_sched_ctrl->round[CC_id][harq_pid] = 0;
dl_req->number_dci++;
dl_req->number_pdu++;
dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG;
eNB->DL_req[CC_id].sfn_sf = frameP << 4 | subframeP;
eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST;
/* CDRX */
ue_sched_ctrl->harq_rtt_timer[CC_id][harq_pid] = 1; // restart HARQ RTT timer
if (ue_sched_ctrl->cdrx_configured) {
ue_sched_ctrl->drx_inactivity_timer = 1; // restart drx inactivity timer when new transmission
ue_sched_ctrl->drx_retransmission_timer[harq_pid] = 0; // stop drx retransmission
/*
* Note: contrary to the spec drx_retransmission_timer[harq_pid] is reset not stop.
*/
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_DRX_INACTIVITY, (unsigned long) ue_sched_ctrl->drx_inactivity_timer);
if (harq_pid == 0) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_DRX_RETRANSMISSION_HARQ0, (unsigned long) ue_sched_ctrl->drx_retransmission_timer[0]);
}
} }
// Toggle NDI for next time
LOG_D(MAC, "CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n",
CC_id,
frameP,
subframeP,
UE_id,
rnti,
harq_pid,
ue_template->oldNDI[harq_pid]);
ue_template->oldNDI[harq_pid] = 1 - ue_template->oldNDI[harq_pid];
ue_template->oldmcs1[harq_pid] = mcs;
ue_template->oldmcs2[harq_pid] = 0;
AssertFatal(ue_template->physicalConfigDedicated != NULL, "physicalConfigDedicated is NULL\n");
AssertFatal(ue_template->physicalConfigDedicated->pdsch_ConfigDedicated != NULL,
"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n");
fill_nfapi_dlsch_config(eNB,
dl_req,
TBS,
eNB->pdu_index[CC_id],
rnti,
0, // type 0 allocation from 7.1.6 in 36.213
0, // virtual_resource_block_assignment_flag, unused here
0, // resource_block_coding, to be filled in later
getQm(mcs),
0, // redundancy version
1, // transport blocks
0, // transport block to codeword swap flag
cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme
1, // number of layers
1, // number of subbands
// uint8_t codebook_index,
4, // UE category capacity
ue_template->physicalConfigDedicated->pdsch_ConfigDedicated->p_a,
0, // delta_power_offset for TM5
0, // ngap
0, // nprb
cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode
0, //number of PRBs treated as one subband, not used here
0); // number of beamforming vectors, not used here
eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req(&eNB->TX_req[CC_id].tx_request_body,
(frameP * 10) + subframeP,
TBS,
eNB->pdu_index[CC_id],
dlsch_pdu->payload[0]);
LOG_D(MAC, "Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",
eNB->pdu_index[CC_id]);
eNB->pdu_index[CC_id]++;
program_dlsch_acknak(module_idP,
CC_id,
UE_id,
frameP,
subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx);
} else {
LOG_W(MAC, "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d/%x, infeasible CCE allocations\n",
frameP,
subframeP,
UE_id,
rnti);
} }
} else { // There is no data from RLC or MAC header, so don't schedule
// Toggle NDI for next time
LOG_D(MAC, "CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n",
CC_id,
frameP,
subframeP,
UE_id,
rnti,
harq_pid,
ue_template->oldNDI[harq_pid]);
ue_template->oldNDI[harq_pid] = 1 - ue_template->oldNDI[harq_pid];
ue_template->oldmcs1[harq_pid] = mcs;
ue_template->oldmcs2[harq_pid] = 0;
AssertFatal(ue_template->physicalConfigDedicated != NULL, "physicalConfigDedicated is NULL\n");
AssertFatal(ue_template->physicalConfigDedicated->pdsch_ConfigDedicated != NULL,
"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n");
fill_nfapi_dlsch_config(eNB,
dl_req,
TBS,
eNB->pdu_index[CC_id],
rnti,
0, // type 0 allocation from 7.1.6 in 36.213
0, // virtual_resource_block_assignment_flag, unused here
0, // resource_block_coding, to be filled in later
getQm(mcs),
0, // redundancy version
1, // transport blocks
0, // transport block to codeword swap flag
cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme
1, // number of layers
1, // number of subbands
// uint8_t codebook_index,
4, // UE category capacity
ue_template->physicalConfigDedicated->pdsch_ConfigDedicated->p_a,
0, // delta_power_offset for TM5
0, // ngap
0, // nprb
cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode
0, //number of PRBs treated as one subband, not used here
0); // number of beamforming vectors, not used here
eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req(&eNB->TX_req[CC_id].tx_request_body,
(frameP * 10) + subframeP,
TBS,
eNB->pdu_index[CC_id],
dlsch_pdu->payload[0]);
LOG_D(MAC, "Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",
eNB->pdu_index[CC_id]);
eNB->pdu_index[CC_id]++;
program_dlsch_acknak(module_idP,
CC_id,
UE_id,
frameP,
subframeP,
dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx);
} else {
LOG_W(MAC, "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d/%x, infeasible CCE allocations\n",
frameP,
subframeP,
UE_id,
rnti);
} }
} else { // There is no data from RLC or MAC header, so don't schedule
} }
}
if (cc[CC_id].tdd_Config != NULL) { // TDD if (cc[CC_id].tdd_Config != NULL) { // TDD
set_ul_DAI(module_idP, set_ul_DAI(module_idP,
UE_id, UE_id,
CC_id, CC_id,
frameP, frameP,
subframeP); subframeP);
} }
} // UE_id loop } // UE_id loop
} // CC_id loop
fill_DLSCH_dci(module_idP, fill_DLSCH_dci(module_idP,
frameP, frameP,
......
...@@ -156,8 +156,11 @@ void fill_DLSCH_dci(module_id_t module_idP,frame_t frameP,sub_frame_t subframe,i ...@@ -156,8 +156,11 @@ void fill_DLSCH_dci(module_id_t module_idP,frame_t frameP,sub_frame_t subframe,i
void schedule_dlsch(module_id_t module_idP, frame_t frameP, void schedule_dlsch(module_id_t module_idP, frame_t frameP,
sub_frame_t subframe, int *mbsfn_flag); sub_frame_t subframe, int *mbsfn_flag);
void schedule_ue_spec(module_id_t module_idP, int slice_idxP, void schedule_ue_spec(module_id_t module_idP,
frame_t frameP,sub_frame_t subframe, int *mbsfn_flag); int CC_id,
frame_t frameP,
sub_frame_t subframe,
int *mbsfn_flag);
void schedule_ue_spec_br(module_id_t module_idP, void schedule_ue_spec_br(module_id_t module_idP,
frame_t frameP, frame_t frameP,
sub_frame_t subframeP); sub_frame_t subframeP);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment