Commit 9805b053 authored by Louis Adrien Dufrene's avatar Louis Adrien Dufrene

formatting eNB_scheduler_ulsch.c

parent 17e70e1d
......@@ -87,7 +87,7 @@ extern mui_t rrc_eNB_mui;
//-----------------------------------------------------------------------------
/*
*
* When data are received on PHY and transmitted to MAC
*/
void
rx_sdu(const module_id_t enb_mod_idP,
......@@ -101,48 +101,65 @@ rx_sdu(const module_id_t enb_mod_idP,
const uint8_t ul_cqi)
//-----------------------------------------------------------------------------
{
int current_rnti = rntiP;
unsigned char rx_ces[MAX_NUM_CE], num_ce, num_sdu, i, *payload_ptr;
int current_rnti = 0;
int UE_id = -1;
int RA_id = 0;
int old_rnti = -1;
int old_UE_id = -1;
int crnti_rx = 0;
int harq_pid = 0;
int first_rb = 0;
unsigned char num_ce = 0;
unsigned char num_sdu = 0;
unsigned char *payload_ptr = NULL;
unsigned char rx_ces[MAX_NUM_CE];
unsigned char rx_lcids[NB_RB_MAX];
unsigned short rx_lengths[NB_RB_MAX];
int UE_id = find_UE_id(enb_mod_idP, current_rnti);
int RA_id;
int ii, j;
eNB_MAC_INST *mac = RC.mac[enb_mod_idP];
int harq_pid =
subframe2harqpid(&mac->common_channels[CC_idP], frameP, subframeP);
uint8_t lcgid = 0;
int lcgid_updated[4] = {0, 0, 0, 0};
UE_list_t *UE_list = &mac->UE_list;
int crnti_rx = 0;
RA_t *ra =
(RA_t *) & RC.mac[enb_mod_idP]->common_channels[CC_idP].ra[0];
int first_rb = 0;
eNB_MAC_INST *mac = NULL;
UE_list_t *UE_list = NULL;
RA_t *ra = NULL;
rrc_eNB_ue_context_t *ue_contextP = NULL;
start_meas(&mac->rx_ulsch_sdu);
if ((UE_id > MAX_MOBILES_PER_ENB) || (UE_id == -1))
for (ii = 0; ii < NB_RB_MAX; ii++) {
rx_lengths[ii] = 0;
}
/* Init */
current_rnti = rntiP;
UE_id = find_UE_id(enb_mod_idP, current_rnti);
mac = RC.mac[enb_mod_idP];
harq_pid = subframe2harqpid(&mac->common_channels[CC_idP], frameP, subframeP);
UE_list = &mac->UE_list;
ra = (RA_t *) &RC.mac[enb_mod_idP]->common_channels[CC_idP].ra[0];
memset(rx_ces, 0, MAX_NUM_CE * sizeof(unsigned char));
memset(rx_lcids, 0, NB_RB_MAX * sizeof(unsigned char));
memset(rx_lengths, 0, NB_RB_MAX * sizeof(unsigned short));
start_meas(&mac->rx_ulsch_sdu);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(VCD_SIGNAL_DUMPER_FUNCTIONS_RX_SDU, 1);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RX_SDU, 1);
if (opt_enabled == 1) {
trace_pdu(DIRECTION_UPLINK, sduP, sdu_lenP, 0, WS_C_RNTI, current_rnti, frameP, subframeP,
0, 0);
trace_pdu(DIRECTION_UPLINK, sduP, sdu_lenP, 0, WS_C_RNTI, current_rnti, frameP, subframeP, 0, 0);
LOG_D(OPT, "[eNB %d][ULSCH] Frame %d rnti %x with size %d\n",
enb_mod_idP, frameP, current_rnti, sdu_lenP);
enb_mod_idP,
frameP,
current_rnti,
sdu_lenP);
}
if (UE_id != -1) {
LOG_D(MAC,
"[eNB %d][PUSCH %d] CC_id %d %d.%d Received ULSCH sdu round %d from PHY (rnti %x, UE_id %d) ul_cqi %d\n",
enb_mod_idP, harq_pid, CC_idP,frameP,subframeP,
LOG_D(MAC, "[eNB %d][PUSCH %d] CC_id %d %d.%d Received ULSCH sdu round %d from PHY (rnti %x, UE_id %d) ul_cqi %d\n",
enb_mod_idP,
harq_pid,
CC_idP,
frameP,
subframeP,
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid],
current_rnti, UE_id, ul_cqi);
AssertFatal(UE_list->UE_sched_ctrl[UE_id].
round_UL[CC_idP][harq_pid] < 8, "round >= 8\n");
current_rnti,
UE_id,
ul_cqi);
AssertFatal(UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] < 8, "round >= 8\n");
if (sduP != NULL) {
UE_list->UE_sched_ctrl[UE_id].ul_inactivity_timer = 0;
......@@ -156,62 +173,75 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_list->UE_sched_ctrl[UE_id].ta_update = (UE_list->UE_sched_ctrl[UE_id].ta_update * 3 + timing_advance) / 4;
UE_list->UE_sched_ctrl[UE_id].pusch_snr[CC_idP] = ul_cqi;
UE_list->UE_sched_ctrl[UE_id].ul_consecutive_errors = 0;
first_rb = UE_list->UE_template[CC_idP][UE_id].first_rb_ul[harq_pid];
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync > 0) {
UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync = 0;
mac_eNB_rrc_ul_in_sync(enb_mod_idP, CC_idP, frameP,
subframeP, UE_RNTI(enb_mod_idP,
UE_id));
mac_eNB_rrc_ul_in_sync(enb_mod_idP, CC_idP, frameP, subframeP, UE_RNTI(enb_mod_idP, UE_id)); // replace UE_RNTI(enb_mod_idP, UE_id) by current_rnti ??
}
/* update scheduled bytes */
/* update bytes to schedule */
UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes -= UE_list->UE_template[CC_idP][UE_id].TBS_UL[harq_pid];
if (UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes < 0)
if (UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes < 0) {
UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes = 0;
} else { // we've got an error
LOG_I(MAC,
"[eNB %d][PUSCH %d] CC_id %d %d.%d ULSCH in error in round %d, ul_cqi %d\n",
enb_mod_idP, harq_pid, CC_idP,frameP,subframeP,
}
} else { // sduP == NULL => error
LOG_I(MAC, "[eNB %d][PUSCH %d] CC_id %d %d.%d ULSCH in error in round %d, ul_cqi %d\n",
enb_mod_idP,
harq_pid,
CC_idP,
frameP,
subframeP,
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid],
ul_cqi);
if(ul_cqi>200) { // too high energy pattern
if (ul_cqi > 200) { // too high energy pattern
UE_list->UE_sched_ctrl[UE_id].pusch_snr[CC_idP] = ul_cqi;
}
// AssertFatal(1==0,"ulsch in error\n");
if (UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] == 3) {
UE_list->UE_sched_ctrl[UE_id].ul_scheduled &= (~(1 << harq_pid));
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] = 0;
if (UE_list->UE_sched_ctrl[UE_id].ul_consecutive_errors++ == 10)
if (UE_list->UE_sched_ctrl[UE_id].ul_consecutive_errors++ == 10) {
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 1;
}
/* update scheduled bytes */
/* Update scheduled bytes */
UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes -= UE_list->UE_template[CC_idP][UE_id].TBS_UL[harq_pid];
if (UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes < 0)
if (UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes < 0) {
UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes = 0;
}
if (find_RA_id(enb_mod_idP, CC_idP, current_rnti) != -1)
if (find_RA_id(enb_mod_idP, CC_idP, current_rnti) != -1) {
cancel_ra_proc(enb_mod_idP, CC_idP, frameP, current_rnti);
} else
}
} else {
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid]++;
}
first_rb = UE_list->UE_template[CC_idP][UE_id].first_rb_ul[harq_pid];
// Program NACK for PHICH
LOG_D(MAC,
"Programming PHICH NACK for rnti %x harq_pid %d (first_rb %d)\n",
current_rnti, harq_pid, first_rb);
nfapi_hi_dci0_request_t *hi_dci0_req;
/* Program NACK for PHICH */
LOG_D(MAC, "Programming PHICH NACK for rnti %x harq_pid %d (first_rb %d)\n",
current_rnti,
harq_pid,
first_rb);
nfapi_hi_dci0_request_t *hi_dci0_req = NULL;
uint8_t sf_ahead_dl = ul_subframe2_k_phich(&mac->common_channels[CC_idP], subframeP);
hi_dci0_req = &mac->HI_DCI0_req[CC_idP][(subframeP+sf_ahead_dl)%10];
hi_dci0_req = &mac->HI_DCI0_req[CC_idP][(subframeP + sf_ahead_dl) % 10];
nfapi_hi_dci0_request_body_t *hi_dci0_req_body = &hi_dci0_req->hi_dci0_request_body;
nfapi_hi_dci0_request_pdu_t *hi_dci0_pdu =
&hi_dci0_req_body->hi_dci0_pdu_list[hi_dci0_req_body->number_of_dci + hi_dci0_req_body->number_of_hi];
nfapi_hi_dci0_request_pdu_t *hi_dci0_pdu = &hi_dci0_req_body->hi_dci0_pdu_list[hi_dci0_req_body->number_of_dci + hi_dci0_req_body->number_of_hi];
memset((void *) hi_dci0_pdu, 0, sizeof(nfapi_hi_dci0_request_pdu_t));
hi_dci0_pdu->pdu_type = NFAPI_HI_DCI0_HI_PDU_TYPE;
hi_dci0_pdu->pdu_size = 2 + sizeof(nfapi_hi_dci0_hi_pdu);
hi_dci0_pdu->hi_pdu.hi_pdu_rel8.tl.tag = NFAPI_HI_DCI0_REQUEST_HI_PDU_REL8_TAG;
......@@ -225,38 +255,43 @@ rx_sdu(const module_id_t enb_mod_idP,
hi_dci0_req->header.message_id = NFAPI_HI_DCI0_REQUEST;
return;
}
// if UE_id == -1
} else if ((RA_id = find_RA_id(enb_mod_idP, CC_idP, current_rnti)) != -1) { // Check if this is an RA process for the rnti
AssertFatal(mac->common_channels[CC_idP].
radioResourceConfigCommon->rach_ConfigCommon.
maxHARQ_Msg3Tx > 1,
AssertFatal(mac->common_channels[CC_idP].radioResourceConfigCommon->rach_ConfigCommon.maxHARQ_Msg3Tx > 1,
"maxHARQ %d should be greater than 1\n",
(int) mac->common_channels[CC_idP].
radioResourceConfigCommon->rach_ConfigCommon.
maxHARQ_Msg3Tx);
LOG_D(MAC,
"[eNB %d][PUSCH %d] CC_id %d [RAPROC Msg3] Received ULSCH sdu round %d from PHY (rnti %x, RA_id %d) ul_cqi %d\n",
enb_mod_idP, harq_pid, CC_idP, ra[RA_id].msg3_round,
current_rnti, RA_id, ul_cqi);
first_rb = ra->msg3_first_rb;
(int) mac->common_channels[CC_idP].radioResourceConfigCommon->rach_ConfigCommon.maxHARQ_Msg3Tx);
LOG_D(MAC, "[eNB %d][PUSCH %d] CC_id %d [RAPROC Msg3] Received ULSCH sdu round %d from PHY (rnti %x, RA_id %d) ul_cqi %d\n",
enb_mod_idP,
harq_pid,
CC_idP,
ra[RA_id].msg3_round,
current_rnti,
RA_id,
ul_cqi);
first_rb = ra->msg3_first_rb; // Should it be ra[RA_id]???
if (sduP == NULL) { // we've got an error on Msg3
LOG_D(MAC,
"[eNB %d] CC_id %d, RA %d ULSCH in error in round %d/%d\n",
enb_mod_idP, CC_idP, RA_id,
LOG_D(MAC, "[eNB %d] CC_id %d, RA %d ULSCH in error in round %d/%d\n",
enb_mod_idP,
CC_idP,
RA_id,
ra[RA_id].msg3_round,
(int) mac->common_channels[CC_idP].
radioResourceConfigCommon->rach_ConfigCommon.
maxHARQ_Msg3Tx);
(int) mac->common_channels[CC_idP].radioResourceConfigCommon->rach_ConfigCommon.maxHARQ_Msg3Tx);
if (ra[RA_id].msg3_round >= mac->common_channels[CC_idP].radioResourceConfigCommon->rach_ConfigCommon.maxHARQ_Msg3Tx - 1) {
cancel_ra_proc(enb_mod_idP, CC_idP, frameP, current_rnti);
} else {
first_rb = UE_list->UE_template[CC_idP][UE_id].first_rb_ul[harq_pid];
ra[RA_id].msg3_round++;
// prepare handling of retransmission
/* Prepare handling of retransmission */
get_Msg3allocret(&mac->common_channels[CC_idP],
ra[RA_id].Msg3_subframe, ra[RA_id].Msg3_frame,
&ra[RA_id].Msg3_frame, &ra[RA_id].Msg3_subframe);
add_msg3(enb_mod_idP, CC_idP, &ra[RA_id], frameP, subframeP);
}
......@@ -264,71 +299,101 @@ rx_sdu(const module_id_t enb_mod_idP,
return;
}
} else {
LOG_W(MAC,
"Cannot find UE or RA corresponding to ULSCH rnti %x, dropping it\n",
current_rnti);
LOG_W(MAC, "Cannot find UE or RA corresponding to ULSCH rnti %x, dropping it\n", current_rnti);
return;
}
payload_ptr = parse_ulsch_header(sduP, &num_ce, &num_sdu, rx_ces, rx_lcids, rx_lengths, sdu_lenP);
if(payload_ptr == NULL) {
if (payload_ptr == NULL) {
LOG_E(MAC,"[eNB %d][PUSCH %d] CC_id %d ulsch header unknown lcid(rnti %x, UE_id %d)\n",
enb_mod_idP, harq_pid, CC_idP,current_rnti, UE_id);
enb_mod_idP,
harq_pid,
CC_idP,
current_rnti,
UE_id);
return;
}
T(T_ENB_MAC_UE_UL_PDU, T_INT(enb_mod_idP), T_INT(CC_idP),
T_INT(current_rnti), T_INT(frameP), T_INT(subframeP),
T_INT(harq_pid), T_INT(sdu_lenP), T_INT(num_ce), T_INT(num_sdu));
T(T_ENB_MAC_UE_UL_PDU_WITH_DATA, T_INT(enb_mod_idP), T_INT(CC_idP),
T_INT(current_rnti), T_INT(frameP), T_INT(subframeP),
T_INT(harq_pid), T_INT(sdu_lenP), T_INT(num_ce), T_INT(num_sdu),
T(T_ENB_MAC_UE_UL_PDU,
T_INT(enb_mod_idP),
T_INT(CC_idP),
T_INT(current_rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(sdu_lenP),
T_INT(num_ce),
T_INT(num_sdu));
T(T_ENB_MAC_UE_UL_PDU_WITH_DATA,
T_INT(enb_mod_idP),
T_INT(CC_idP),
T_INT(current_rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(sdu_lenP),
T_INT(num_ce),
T_INT(num_sdu),
T_BUFFER(sduP, sdu_lenP));
mac->eNB_stats[CC_idP].ulsch_bytes_rx = sdu_lenP;
mac->eNB_stats[CC_idP].total_ulsch_bytes_rx += sdu_lenP;
mac->eNB_stats[CC_idP].total_ulsch_pdus_rx += 1;
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] = 0;
// control element
for (i = 0; i < num_ce; i++) {
T(T_ENB_MAC_UE_UL_CE, T_INT(enb_mod_idP), T_INT(CC_idP),
T_INT(current_rnti), T_INT(frameP), T_INT(subframeP),
/* Control element */
for (int i = 0; i < num_ce; i++) {
T(T_ENB_MAC_UE_UL_CE,
T_INT(enb_mod_idP),
T_INT(CC_idP),
T_INT(current_rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(rx_ces[i]));
switch (rx_ces[i]) { // implement and process BSR + CRNTI +
switch (rx_ces[i]) { // implement and process BSR + CRNTI
case POWER_HEADROOM:
if (UE_id != -1) {
UE_list->UE_template[CC_idP][UE_id].phr_info =
(payload_ptr[0] & 0x3f) - PHR_MAPPING_OFFSET + (int8_t)(hundred_times_log10_NPRB[UE_list->UE_template[CC_idP][UE_id].nb_rb_ul[harq_pid]-1]/100);
UE_list->UE_template[CC_idP][UE_id].phr_info = (payload_ptr[0] & 0x3f) - PHR_MAPPING_OFFSET + (int8_t)(hundred_times_log10_NPRB[UE_list->UE_template[CC_idP][UE_id].nb_rb_ul[harq_pid] - 1] / 100);
if(UE_list->UE_template[CC_idP][UE_id].phr_info > 40)
if (UE_list->UE_template[CC_idP][UE_id].phr_info > 40) {
UE_list->UE_template[CC_idP][UE_id].phr_info = 40;
}
LOG_D(MAC,
"[eNB %d] CC_id %d MAC CE_LCID %d : Received PHR PH = %d (db)\n",
enb_mod_idP, CC_idP, rx_ces[i],
LOG_D(MAC, "[eNB %d] CC_id %d MAC CE_LCID %d : Received PHR PH = %d (db)\n",
enb_mod_idP,
CC_idP,
rx_ces[i],
UE_list->UE_template[CC_idP][UE_id].phr_info);
UE_list->UE_template[CC_idP][UE_id].phr_info_configured =
1;
UE_list->UE_template[CC_idP][UE_id].phr_info_configured = 1;
UE_list->UE_sched_ctrl[UE_id].phr_received = 1;
}
payload_ptr += sizeof(POWER_HEADROOM_CMD);
break;
case CRNTI: {
int old_rnti =
(((uint16_t) payload_ptr[0]) << 8) + payload_ptr[1];
int old_UE_id = find_UE_id(enb_mod_idP, old_rnti);
LOG_D(MAC,
"[eNB %d] Frame %d, Subframe %d CC_id %d MAC CE_LCID %d (ce %d/%d): CRNTI %x (UE_id %d) in Msg3\n",
enb_mod_idP, frameP, subframeP, CC_idP, rx_ces[i], i,
num_ce, old_rnti, old_UE_id);
case CRNTI:
old_rnti = (((uint16_t) payload_ptr[0]) << 8) + payload_ptr[1];
old_UE_id = find_UE_id(enb_mod_idP, old_rnti);
LOG_D(MAC, "[eNB %d] Frame %d, Subframe %d CC_id %d MAC CE_LCID %d (ce %d/%d): CRNTI %x (UE_id %d) in Msg3\n",
enb_mod_idP,
frameP,
subframeP,
CC_idP,
rx_ces[i],
i,
num_ce,
old_rnti,
old_UE_id);
/* receiving CRNTI means that the current rnti has to go away */
//cancel_ra_proc(enb_mod_idP, CC_idP, frameP,
// current_rnti);
/* Receiving CRNTI means that the current rnti has to go away */
if (old_UE_id != -1) {
/* TODO: if the UE did random access (followed by a MAC uplink with
* CRNTI) because none of its scheduling request was granted, then
......@@ -341,8 +406,8 @@ rx_sdu(const module_id_t enb_mod_idP,
* We have to take care of this. As the code is, nothing is done and
* the UE state in the eNB is wrong.
*/
for (ii = 0; ii < NB_RA_PROC_MAX; ii++) {
ra = &mac->common_channels[CC_idP].ra[ii];
for (int ii = 0; ii < NB_RA_PROC_MAX; ii++) {
ra = &mac->common_channels[CC_idP].ra[ii]; // Replace with find_RA_id => ((RA_id = find_RA_id(enb_mod_idP, CC_idP, current_rnti)) != -1)
if ((ra->rnti == current_rnti) && (ra->state != IDLE)) {
mac_rrc_data_ind(enb_mod_idP,
......@@ -353,29 +418,37 @@ rx_sdu(const module_id_t enb_mod_idP,
(uint8_t *) payload_ptr,
rx_lengths[i],
0);
// prepare transmission of Msg4(RRCConnectionReconfiguration)
/* Prepare transmission of Msg4(RRCConnectionReconfiguration) */
ra->state = MSGCRNTI;
LOG_I(MAC,
"[eNB %d] Frame %d, Subframe %d CC_id %d : (rnti %x UE_id %d) RRCConnectionReconfiguration(Msg4)\n",
enb_mod_idP, frameP, subframeP, CC_idP, old_rnti, old_UE_id);
LOG_I(MAC, "[eNB %d] Frame %d, Subframe %d CC_id %d : (rnti %x UE_id %d) RRCConnectionReconfiguration(Msg4)\n",
enb_mod_idP,
frameP,
subframeP,
CC_idP,
old_rnti,
old_UE_id);
UE_id = old_UE_id;
current_rnti = old_rnti;
ra->rnti = old_rnti;
ra->crnti_rrc_mui = rrc_eNB_mui-1;
ra->crnti_harq_pid = -1;
//clear timer
/* Clear timer */
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
UE_list->UE_sched_ctrl[UE_id].ul_inactivity_timer = 0;
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 0;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync > 0) {
UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync = 0;
mac_eNB_rrc_ul_in_sync(enb_mod_idP, CC_idP, frameP,
subframeP, old_rnti);
mac_eNB_rrc_ul_in_sync(enb_mod_idP, CC_idP, frameP, subframeP, old_rnti);
}
UE_list->UE_template[CC_idP][UE_id].ul_SR = 1;
UE_list->UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag = 1;
break;
}
}
......@@ -385,57 +458,60 @@ rx_sdu(const module_id_t enb_mod_idP,
crnti_rx = 1;
payload_ptr += 2;
break;
}
case TRUNCATED_BSR:
case SHORT_BSR: {
uint8_t lcgid;
case SHORT_BSR:
lcgid = (payload_ptr[0] >> 6);
LOG_D(MAC,
"[eNB %d] CC_id %d MAC CE_LCID %d : Received short BSR LCGID = %u bsr = %d\n",
enb_mod_idP, CC_idP, rx_ces[i], lcgid,
payload_ptr[0] & 0x3f);
if (crnti_rx == 1)
LOG_D(MAC,
"[eNB %d] CC_id %d MAC CE_LCID %d : Received short BSR LCGID = %u bsr = %d\n",
enb_mod_idP, CC_idP, rx_ces[i], lcgid,
LOG_D(MAC, "[eNB %d] CC_id %d MAC CE_LCID %d : Received short BSR LCGID = %u bsr = %d\n",
enb_mod_idP,
CC_idP,
rx_ces[i],
lcgid,
payload_ptr[0] & 0x3f);
if (UE_id != -1) {
int bsr = payload_ptr[0] & 0x3f;
int bsr = 0;
bsr = payload_ptr[0] & 0x3f;
lcgid_updated[lcgid] = 1;
// update buffer info
/* Update buffer info */
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[lcgid] = BSR_TABLE[bsr];
UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer =
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3];
//UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer += UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer / 4;
RC.eNB[enb_mod_idP][CC_idP]->pusch_stats_bsr[UE_id][(frameP * 10) + subframeP] = (payload_ptr[0] & 0x3f);
if (UE_id == UE_list->head)
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_BSR,
RC.eNB[enb_mod_idP][CC_idP]->pusch_stats_bsr
[UE_id][(frameP * 10) + subframeP]);
if (UE_id == UE_list->head) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_BSR, RC.eNB[enb_mod_idP][CC_idP]->pusch_stats_bsr[UE_id][(frameP * 10) + subframeP]);
}
if (UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[lcgid] == 0) {
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[lcgid] = frameP;
}
if (mac_eNB_get_rrc_status(enb_mod_idP,UE_RNTI(enb_mod_idP, UE_id)) < RRC_CONNECTED)
LOG_D(MAC,
"[eNB %d] CC_id %d MAC CE_LCID %d : estimated_ul_buffer = %d (lcg increment %d)\n",
enb_mod_idP, CC_idP, rx_ces[i],
if (mac_eNB_get_rrc_status(enb_mod_idP,UE_RNTI(enb_mod_idP, UE_id)) < RRC_CONNECTED) {
LOG_D(MAC, "[eNB %d] CC_id %d MAC CE_LCID %d : estimated_ul_buffer = %d (lcg increment %d)\n",
enb_mod_idP,
CC_idP,
rx_ces[i],
UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer,
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[lcgid]);
}
} else {
/* Need error message */
}
payload_ptr += 1; //sizeof(SHORT_BSR); // fixme
}
break;
case LONG_BSR:
......@@ -444,39 +520,43 @@ rx_sdu(const module_id_t enb_mod_idP,
int bsr1 = ((payload_ptr[0] & 0x03) << 4) | ((payload_ptr[1] & 0xF0) >> 4);
int bsr2 = ((payload_ptr[1] & 0x0F) << 2) | ((payload_ptr[2] & 0xC0) >> 6);
int bsr3 = payload_ptr[2] & 0x3F;
lcgid_updated[LCGID0] = 1;
lcgid_updated[LCGID1] = 1;
lcgid_updated[LCGID2] = 1;
lcgid_updated[LCGID3] = 1;
// update buffer info
/* Update buffer info */
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] = BSR_TABLE[bsr0];
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1] = BSR_TABLE[bsr1];
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2] = BSR_TABLE[bsr2];
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3] = BSR_TABLE[bsr3];
UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer =
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2] +
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3];
//UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer += UE_list->UE_template[CC_idP][UE_id].estimated_ul_buffer / 4;
LOG_D(MAC,
"[eNB %d] CC_id %d MAC CE_LCID %d: Received long BSR. Size is LCGID0 = %u LCGID1 = "
"%u LCGID2 = %u LCGID3 = %u\n", enb_mod_idP, CC_idP,
LOG_D(MAC, "[eNB %d] CC_id %d MAC CE_LCID %d: Received long BSR. Size is LCGID0 = %u LCGID1 = %u LCGID2 = %u LCGID3 = %u\n",
enb_mod_idP,
CC_idP,
rx_ces[i],
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0],
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1],
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2],
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3]);
if (crnti_rx == 1)
LOG_D(MAC,
"[eNB %d] CC_id %d MAC CE_LCID %d: Received long BSR. Size is LCGID0 = %u LCGID1 = "
"%u LCGID2 = %u LCGID3 = %u\n", enb_mod_idP,
CC_idP, rx_ces[i],
if (crnti_rx == 1) {
LOG_D(MAC, "[eNB %d] CC_id %d MAC CE_LCID %d: Received long BSR. Size is LCGID0 = %u LCGID1 = %u LCGID2 = %u LCGID3 = %u\n",
enb_mod_idP,
CC_idP,
rx_ces[i],
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0],
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID1],
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID2],
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID3]);
}
if (UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[LCGID0] == 0) {
UE_list->UE_template[CC_idP][UE_id].ul_buffer_creation_time[LCGID0] = 0;
......@@ -507,83 +587,114 @@ rx_sdu(const module_id_t enb_mod_idP,
break;
default:
LOG_E(MAC,
"[eNB %d] CC_id %d Received unknown MAC header (0x%02x)\n",
enb_mod_idP, CC_idP, rx_ces[i]);
LOG_E(MAC, "[eNB %d] CC_id %d Received unknown MAC header (0x%02x)\n",
enb_mod_idP,
CC_idP,
rx_ces[i]);
break;
}
}
for (i = 0; i < num_sdu; i++) {
} // end switch on control element
} // end for loop on control element
for (int i = 0; i < num_sdu; i++) {
LOG_D(MAC, "SDU Number %d MAC Subheader SDU_LCID %d, length %d\n",
i, rx_lcids[i], rx_lengths[i]);
T(T_ENB_MAC_UE_UL_SDU, T_INT(enb_mod_idP), T_INT(CC_idP),
T_INT(current_rnti), T_INT(frameP), T_INT(subframeP),
T_INT(rx_lcids[i]), T_INT(rx_lengths[i]));
T(T_ENB_MAC_UE_UL_SDU_WITH_DATA, T_INT(enb_mod_idP), T_INT(CC_idP),
T_INT(current_rnti), T_INT(frameP), T_INT(subframeP),
T_INT(rx_lcids[i]), T_INT(rx_lengths[i]), T_BUFFER(payload_ptr,
rx_lengths
[i]));
i,
rx_lcids[i],
rx_lengths[i]);
T(T_ENB_MAC_UE_UL_SDU,
T_INT(enb_mod_idP),
T_INT(CC_idP),
T_INT(current_rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(rx_lcids[i]),
T_INT(rx_lengths[i]));
T(T_ENB_MAC_UE_UL_SDU_WITH_DATA,
T_INT(enb_mod_idP),
T_INT(CC_idP),
T_INT(current_rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(rx_lcids[i]),
T_INT(rx_lengths[i]),
T_BUFFER(payload_ptr, rx_lengths[i]));
switch (rx_lcids[i]) {
case CCCH:
if (rx_lengths[i] > CCCH_PAYLOAD_SIZE_MAX) {
LOG_E(MAC,
"[eNB %d/%d] frame %d received CCCH of size %d (too big, maximum allowed is %d, sdu_len %d), dropping packet\n",
enb_mod_idP, CC_idP, frameP, rx_lengths[i],
CCCH_PAYLOAD_SIZE_MAX, sdu_lenP);
LOG_E(MAC, "[eNB %d/%d] frame %d received CCCH of size %d (too big, maximum allowed is %d, sdu_len %d), dropping packet\n",
enb_mod_idP,
CC_idP,
frameP,
rx_lengths[i],
CCCH_PAYLOAD_SIZE_MAX,
sdu_lenP);
break;
}
LOG_D(MAC,
"[eNB %d][RAPROC] CC_id %d Frame %d, Received CCCH: %x.%x.%x.%x.%x.%x, Terminating RA procedure for UE rnti %x\n",
enb_mod_idP, CC_idP, frameP, payload_ptr[0],
payload_ptr[1], payload_ptr[2], payload_ptr[3],
payload_ptr[4], payload_ptr[5], current_rnti);
LOG_D(MAC, "[eNB %d][RAPROC] CC_id %d Frame %d, Received CCCH: %x.%x.%x.%x.%x.%x, Terminating RA procedure for UE rnti %x\n",
enb_mod_idP,
CC_idP,
frameP,
payload_ptr[0], payload_ptr[1], payload_ptr[2], payload_ptr[3], payload_ptr[4], payload_ptr[5],
current_rnti);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_TERMINATE_RA_PROC, 1);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_TERMINATE_RA_PROC, 0);
for (ii = 0; ii < NB_RA_PROC_MAX; ii++) {
RA_t *ra = &mac->common_channels[CC_idP].ra[ii];
LOG_D(MAC,
"[mac %d][RAPROC] CC_id %d Checking proc %d : rnti (%x, %x), state %d\n",
enb_mod_idP, CC_idP, ii, ra->rnti,
current_rnti, ra->state);
for (int ii = 0; ii < NB_RA_PROC_MAX; ii++) {
ra = &mac->common_channels[CC_idP].ra[ii];
LOG_D(MAC, "[mac %d][RAPROC] CC_id %d Checking proc %d : rnti (%x, %x), state %d\n",
enb_mod_idP,
CC_idP,
ii,
ra->rnti,
current_rnti,
ra->state);
if ((ra->rnti == current_rnti) && (ra->state != IDLE)) {
//payload_ptr = parse_ulsch_header(msg3,&num_ce,&num_sdu,rx_ces,rx_lcids,rx_lengths,msg3_len);
if (UE_id < 0) {
memcpy(&ra->cont_res_id[0], payload_ptr, 6);
LOG_D(MAC,
"[eNB %d][RAPROC] CC_id %d Frame %d CCCH: Received Msg3: length %d, offset %ld\n",
enb_mod_idP, CC_idP, frameP, rx_lengths[i],
LOG_D(MAC, "[eNB %d][RAPROC] CC_id %d Frame %d CCCH: Received Msg3: length %d, offset %ld\n",
enb_mod_idP,
CC_idP,
frameP,
rx_lengths[i],
payload_ptr - sduP);
if ((UE_id = add_new_ue(enb_mod_idP, CC_idP,
mac->common_channels[CC_idP].
ra[ii].rnti, harq_pid
if ((UE_id = add_new_ue(enb_mod_idP, CC_idP, mac->common_channels[CC_idP].ra[ii].rnti, harq_pid
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
,
mac->common_channels[CC_idP].
ra[ii].rach_resource_type
, mac->common_channels[CC_idP].ra[ii].rach_resource_type
#endif
)) == -1) {
LOG_E(MAC,"[MAC][eNB] Max user count reached\n");
cancel_ra_proc(enb_mod_idP, CC_idP, frameP,current_rnti);
cancel_ra_proc(enb_mod_idP, CC_idP, frameP, current_rnti); // send Connection Reject ???
break;
// kill RA procedure
} else
LOG_D(MAC,
"[eNB %d][RAPROC] CC_id %d Frame %d Added user with rnti %x => UE %d\n",
enb_mod_idP, CC_idP, frameP, ra->rnti,
} else {
LOG_D(MAC, "[eNB %d][RAPROC] CC_id %d Frame %d Added user with rnti %x => UE %d\n",
enb_mod_idP,
CC_idP,
frameP,
ra->rnti,
UE_id);
}
} else {
LOG_D(MAC,
"[eNB %d][RAPROC] CC_id %d Frame %d CCCH: Received Msg3 from already registered UE %d: length %d, offset %ld\n",
enb_mod_idP, CC_idP, frameP, UE_id,
rx_lengths[i], payload_ptr - sduP);
// kill RA procedure
LOG_D(MAC, "[eNB %d][RAPROC] CC_id %d Frame %d CCCH: Received Msg3 from already registered UE %d: length %d, offset %ld\n",
enb_mod_idP,
CC_idP,
frameP,
UE_id,
rx_lengths[i],
payload_ptr - sduP);
}
mac_rrc_data_ind(enb_mod_idP,
......@@ -602,7 +713,7 @@ rx_sdu(const module_id_t enb_mod_idP,
// prepare transmission of Msg4
ra->state = MSG4;
if(mac->common_channels[CC_idP].tdd_Config!=NULL) {
if(mac->common_channels[CC_idP].tdd_Config != NULL) {
switch(mac->common_channels[CC_idP].tdd_Config->subframeAssignment) {
case 1:
ra->Msg4_frame = frameP + ((subframeP > 2) ? 1 : 0);
......@@ -615,25 +726,27 @@ rx_sdu(const module_id_t enb_mod_idP,
// TODO need to be complete for other tdd configs.
}
} else {
// Program Msg4 PDCCH+DLSCH/MPDCCH transmission 4 subframes from now, // Check if this is ok for BL/CE, or if the rule is different
/* Program Msg4 PDCCH+DLSCH/MPDCCH transmission 4 subframes from now,
* Check if this is ok for BL/CE, or if the rule is different
*/
ra->Msg4_frame = frameP + ((subframeP > 5) ? 1 : 0);
ra->Msg4_subframe = (subframeP + 4) % 10;
}
UE_list->UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag = 0;
} // if process is active
} // if RA process is active
} // loop on RA processes
break;
case DCCH:
case DCCH1:
// if(eNB_mac_inst[module_idP][CC_idP].Dcch_lchan[UE_id].Active==1){
#if defined(ENABLE_MAC_PAYLOAD_DEBUG)
LOG_T(MAC, "offset: %d\n",
(unsigned char) ((unsigned char *) payload_ptr - sduP));
LOG_T(MAC, "offset: %d\n", (unsigned char) ((unsigned char *) payload_ptr - sduP));
for (j = 0; j < 32; j++) {
for (int j = 0; j < 32; j++) {
LOG_T(MAC, "%x ", payload_ptr[j]);
}
......@@ -642,7 +755,7 @@ rx_sdu(const module_id_t enb_mod_idP,
if (UE_id != -1) {
if (lcgid_updated[UE_list->UE_template[CC_idP][UE_id].lcgidmap[rx_lcids[i]]] == 0) {
// adjust buffer occupancy of the correponding logical channel group
/* Adjust buffer occupancy of the correponding logical channel group */
if (UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[UE_list->UE_template[CC_idP][UE_id].lcgidmap[rx_lcids[i]]] >= rx_lengths[i])
UE_list->UE_template[CC_idP][UE_id].ul_buffer_info[UE_list->UE_template[CC_idP][UE_id].lcgidmap[rx_lcids[i]]] -= rx_lengths[i];
else
......@@ -676,7 +789,7 @@ rx_sdu(const module_id_t enb_mod_idP,
LOG_T(MAC, "offset: %d\n",
(unsigned char) ((unsigned char *) payload_ptr - sduP));
for (j = 0; j < 32; j++) {
for (int j = 0; j < 32; j++) {
LOG_T(MAC, "%x ", payload_ptr[j]);
}
......@@ -693,7 +806,7 @@ rx_sdu(const module_id_t enb_mod_idP,
rx_lcids[i]);
if (UE_id != -1) {
// adjust buffer occupancy of the correponding logical channel group
/* Adjust buffer occupancy of the correponding logical channel group */
LOG_D(MAC, "[eNB %d] CC_id %d Frame %d : ULSCH -> UL-DTCH, received %d bytes from UE %d for lcid %d, removing from LCGID %ld, %d\n",
enb_mod_idP,
CC_idP,
......@@ -722,9 +835,11 @@ rx_sdu(const module_id_t enb_mod_idP,
mac_rlc_data_ind(enb_mod_idP, current_rnti, enb_mod_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, rx_lcids[i], (char *) payload_ptr, rx_lengths[i], 1, NULL);
UE_list->eNB_UE_stats[CC_idP][UE_id].num_pdu_rx[rx_lcids[i]] += 1;
UE_list->eNB_UE_stats[CC_idP][UE_id].num_bytes_rx[rx_lcids[i]] += rx_lengths[i];
//clear uplane_inactivity_timer
/* Clear uplane_inactivity_timer */
UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0;
// reset RRC inactivity timer after uplane activity
/* Reset RRC inactivity timer after uplane activity */
ue_contextP = rrc_eNB_get_ue_context(RC.rrc[enb_mod_idP], current_rnti);
if (ue_contextP != NULL) {
ue_contextP->ue_context.ue_rrc_inactivity_timer = 1;
......@@ -734,8 +849,10 @@ rx_sdu(const module_id_t enb_mod_idP,
CC_idP,
current_rnti);
}
} else { /* rx_length[i] */
} else { /* rx_length[i] Max size */
UE_list->eNB_UE_stats[CC_idP][UE_id].num_errors_rx += 1;
LOG_E(MAC, "[eNB %d] CC_id %d Frame %d : Max size of transport block reached LCID %d from UE %d ",
enb_mod_idP,
CC_idP,
......@@ -752,24 +869,29 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_id);
}
}
break;
}
payload_ptr += rx_lengths[i];
}
// Program ACK for PHICH
LOG_D(MAC,
"Programming PHICH ACK for rnti %x harq_pid %d (first_rb %d)\n",
current_rnti, harq_pid, first_rb);
/* Program ACK for PHICH */
LOG_D(MAC, "Programming PHICH ACK for rnti %x harq_pid %d (first_rb %d)\n",
current_rnti,
harq_pid,
first_rb);
nfapi_hi_dci0_request_t *hi_dci0_req;
uint8_t sf_ahead_dl = ul_subframe2_k_phich(&mac->common_channels[CC_idP], subframeP);
hi_dci0_req = &mac->HI_DCI0_req[CC_idP][(subframeP+sf_ahead_dl)%10];
nfapi_hi_dci0_request_body_t *hi_dci0_req_body = &hi_dci0_req->hi_dci0_request_body;
nfapi_hi_dci0_request_pdu_t *hi_dci0_pdu =
&hi_dci0_req_body->hi_dci0_pdu_list[hi_dci0_req_body->number_of_dci + hi_dci0_req_body->number_of_hi];
nfapi_hi_dci0_request_pdu_t *hi_dci0_pdu = &hi_dci0_req_body->hi_dci0_pdu_list[hi_dci0_req_body->number_of_dci +
hi_dci0_req_body->number_of_hi];
memset((void *) hi_dci0_pdu, 0, sizeof(nfapi_hi_dci0_request_pdu_t));
hi_dci0_pdu->pdu_type = NFAPI_HI_DCI0_HI_PDU_TYPE;
hi_dci0_pdu->pdu_size = 2 + sizeof(nfapi_hi_dci0_hi_pdu);
hi_dci0_pdu->hi_pdu.hi_pdu_rel8.tl.tag = NFAPI_HI_DCI0_REQUEST_HI_PDU_REL8_TAG;
......@@ -787,13 +909,6 @@ rx_sdu(const module_id_t enb_mod_idP,
if (UE_id != -1)
UE_list->eNB_UE_stats[CC_idP][UE_id].total_num_errors_rx += 1;
/*
if (msg3_flagP != NULL) {
if( *msg3_flagP == 1 ) {
LOG_I(MAC,"[eNB %d] CC_id %d frame %d : false msg3 detection: signal phy to canceling RA and remove the UE\n", enb_mod_idP, CC_idP, frameP);
*msg3_flagP=0;
}
} */
} else {
if (UE_id != -1) {
UE_list->eNB_UE_stats[CC_idP][UE_id].pdu_bytes_rx = sdu_lenP;
......@@ -1110,7 +1225,7 @@ schedule_ulsch(module_id_t module_idP,
/* Run each enabled slice-specific schedulers one by one */
for (int i = 0; i < sli->n_ul; i++) {
/* By default it is schedule_ulsch_rnti (see below) */
/* By default the scheduler is schedule_ulsch_rnti (see below) */
sli->ul[i].sched_cb(module_idP, i, frameP, subframeP, sched_subframe, first_rb);
}
......@@ -1144,6 +1259,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
uint32_t tpc = 0;
int32_t normalized_rx_power = 0;
int32_t target_rx_power = 0;
int32_t framex10psubframe = 0;
static int32_t tpc_accumulated = 0;
int sched_frame = 0;
int CC_id = 0;
......@@ -1182,7 +1298,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
/* Note: RC.nb_mac_CC[module_idP] should be lower than or equal to NFAPI_CC_MAX */
for (CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++) {
n_rb_ul_tab[CC_id] = to_prb(cc[CC_id].ul_Bandwidth);
n_rb_ul_tab[CC_id] = to_prb(cc[CC_id].ul_Bandwidth); // return total number of PRB
UE_list->first_rb_offset[CC_id][slice_idx] = cmin(n_rb_ul_tab[CC_id], sli->ul[slice_idx].first_rb);
}
......@@ -1197,7 +1313,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
first_rb_slice[CC_id] = first_rb[CC_id] + UE_list->first_rb_offset[CC_id][slice_idx];
}
// loop over all active UEs
// loop over all active UEs until end of function
for (int UE_id = UE_list->head_ul; UE_id >= 0; UE_id = UE_list->next_ul[UE_id]) {
if (!ue_ul_slice_membership(module_idP, UE_id, slice_idx)) {
continue;
......@@ -1226,13 +1342,13 @@ schedule_ulsch_rnti(module_id_t module_idP,
continue;
}
// loop over all active UL CC_ids for this UE
// loop over all active UL CC_ids for this UE until end of function
for (int n = 0; n < UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
/* This is the actual CC_id in the list */
CC_id = UE_list->ordered_ULCCids[n][UE_id];
/* should format_flag be 2 in CCE_allocation_infeasible??? */
/* this test seems to be way too long, can we provide an optimization? */
/* Should format_flag be 2 in CCE_allocation_infeasible??? */
/* This test seems to be way too long, can we provide an optimization? */
if (CCE_allocation_infeasible(module_idP, CC_id, 1, subframeP, aggregation, rnti)) {
LOG_W(MAC, "[eNB %d] frame %d, subframe %d, UE %d/%x CC %d: not enough CCE\n",
module_idP,
......@@ -1245,7 +1361,8 @@ schedule_ulsch_rnti(module_id_t module_idP,
continue;
}
/* be sure that there are some free RBs */
/* Be sure that there are some free RBs */
/* Is this not done in CCE_allocation_infeasible()??? */
if (first_rb_slice[CC_id] >= n_rb_ul_tab[CC_id] - 1) {
LOG_W(MAC, "[eNB %d] frame %d, subframe %d, UE %d/%x CC %d: dropping, not enough RBs\n",
module_idP,
......@@ -1258,6 +1375,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
continue;
}
/* UE is active and can be scheduled, setting up struct */
UE_template_ptr = &(UE_list->UE_template[CC_id][UE_id]);
UE_sched_ctrl_ptr = &(UE_list->UE_sched_ctrl[UE_id]);
harq_pid = subframe2harqpid(&cc[CC_id], sched_frame, sched_subframeP);
......@@ -1281,6 +1399,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
aggregation,
n_rb_ul_tab[CC_id]);
/* Seems unused, only for debug */
RC.eNB[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP * 10) + subframeP] = UE_template_ptr->estimated_ul_buffer;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_BO, RC.eNB[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP * 10) + subframeP]);
......@@ -1289,8 +1408,8 @@ schedule_ulsch_rnti(module_id_t module_idP,
* If there is information on bsr of DCCH, DTCH or if there is UL_SR,
* or if there is a packet to retransmit, or we want to schedule a periodic feedback
*/
if (UE_is_to_be_scheduled(module_idP, CC_id, UE_id) > 0 || round_index > 0)
{
/* Shouldn't this test be done earlier?? */
if (UE_is_to_be_scheduled(module_idP, CC_id, UE_id) > 0 || round_index > 0) {
LOG_D(MAC, "[eNB %d][PUSCH %d] Frame %d subframe %d Scheduling UE %d/%x in round %d(SR %d,UL_inactivity timer %d,UL_failure timer %d,cqi_req_timer %d)\n",
module_idP,
harq_pid,
......@@ -1309,6 +1428,8 @@ schedule_ulsch_rnti(module_id_t module_idP,
status = mac_eNB_get_rrc_status(module_idP, rnti);
cqi_req = 0;
/* Handle the aperiodic CQI report */
/* These aperiodic reports behave as periodic ones... */
if (status >= RRC_CONNECTED && UE_sched_ctrl_ptr->cqi_req_timer > 30) {
if (UE_sched_ctrl_ptr->cqi_received == 0) {
if (nfapi_mode) {
......@@ -1316,17 +1437,19 @@ schedule_ulsch_rnti(module_id_t module_idP,
} else {
cqi_req = 1;
// To be safe , do not ask CQI in special Subframes:36.213/7.2.3 CQI definition
/* TDD: to be safe, do not ask CQI in special Subframes:36.213/7.2.3 CQI definition */
if (cc[CC_id].tdd_Config) {
switch (cc[CC_id].tdd_Config->subframeAssignment) {
case 1:
if( subframeP == 1 || subframeP == 6 ) cqi_req=0;
if(subframeP == 1 || subframeP == 6) {
cqi_req=0;
}
break;
case 3:
if( subframeP == 1 ) cqi_req=0;
if(subframeP == 1) {
cqi_req=0;
}
break;
default:
......@@ -1335,125 +1458,153 @@ schedule_ulsch_rnti(module_id_t module_idP,
}
}
if(cqi_req == 1) UE_sched_ctrl_ptr->cqi_req_flag |= 1 << sched_subframeP;
if(cqi_req == 1) {
UE_sched_ctrl_ptr->cqi_req_flag |= 1 << sched_subframeP;
}
}
} else if (UE_sched_ctrl_ptr->cqi_received == 1) {
} else {
UE_sched_ctrl_ptr->cqi_req_flag = 0;
UE_sched_ctrl_ptr->cqi_received = 0;
UE_sched_ctrl_ptr->cqi_req_timer = 0;
}
}
//power control
//compute the expected ULSCH RX power (for the stats)
// this is the normalized RX power and this should be constant (regardless of mcs
//is not in dBm, unit from nfapi, converting to dBm: ToDo: Noise power hard coded to 30
normalized_rx_power = (5*UE_sched_ctrl_ptr->pusch_snr[CC_id]-640)/10+30;
target_rx_power= mac->puSch10xSnr/10 + 30;
//printf("\n mac->puSch10xSnr = %d, normalized_rx_power = %d, target_rx_power = %d \n",mac->puSch10xSnr,normalized_rx_power,target_rx_power);
// this assumes accumulated tpc
// make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
int32_t framex10psubframe = UE_template_ptr->pusch_tpc_tx_frame * 10 + UE_template_ptr->pusch_tpc_tx_subframe;
/* Power control */
/*
* Compute the expected ULSCH RX power (for the stats)
* This is the normalized RX power and this should be constant (regardless of mcs)
* Is not in dBm, unit from nfapi, converting to dBm
* ToDo: Noise power hard coded to 30
*/
normalized_rx_power = ((5 * UE_sched_ctrl_ptr->pusch_snr[CC_id] - 640) / 10) + 30;
target_rx_power = (mac->puSch10xSnr / 10) + 30;
/*
* This assumes accumulated tpc
* Make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
*/
framex10psubframe = (UE_template_ptr->pusch_tpc_tx_frame * 10) + UE_template_ptr->pusch_tpc_tx_subframe;
if (((framex10psubframe + 10) <= (frameP * 10 + subframeP)) || //normal case
if (((framex10psubframe + 10) <= (frameP * 10 + subframeP)) || // normal case
((framex10psubframe > (frameP * 10 + subframeP)) && (((10240 - framex10psubframe + frameP * 10 + subframeP) >= 10)))) { //frame wrap-around
UE_template_ptr->pusch_tpc_tx_frame = frameP;
UE_template_ptr->pusch_tpc_tx_subframe = subframeP;
if (normalized_rx_power > (target_rx_power + 4)) {
tpc = 0; //-1
tpc = 0; // -1
tpc_accumulated--;
} else if (normalized_rx_power < (target_rx_power - 4)) {
tpc = 2; //+1
tpc = 2; // +1
tpc_accumulated++;
} else {
tpc = 1; //0
tpc = 1; // 0
}
} else {
tpc = 1; //0
tpc = 1; // 0
}
//tpc = 1;
if (tpc != 1) {
LOG_D(MAC,
"[eNB %d] ULSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n",
module_idP, frameP, subframeP, harq_pid, tpc,
tpc_accumulated, normalized_rx_power,
LOG_D(MAC, "[eNB %d] ULSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n",
module_idP,
frameP,
subframeP,
harq_pid,
tpc,
tpc_accumulated,
normalized_rx_power,
target_rx_power);
}
// new transmission
/* New transmission */
if (round_index == 0) {
ndi = 1 - UE_template_ptr->oldNDI_UL[harq_pid];
ndi = 1 - UE_template_ptr->oldNDI_UL[harq_pid]; // NDI: new data indicator
UE_template_ptr->oldNDI_UL[harq_pid] = ndi;
UE_list->eNB_UE_stats[CC_id][UE_id].normalized_rx_power = normalized_rx_power;
UE_list->eNB_UE_stats[CC_id][UE_id].target_rx_power = target_rx_power;
UE_template_ptr->mcs_UL[harq_pid] = cmin(UE_template_ptr->pre_assigned_mcs_ul, sli->ul[slice_idx].maxmcs);
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1= UE_template_ptr->mcs_UL[harq_pid];
//cmin (UE_template_ptr->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS
if (UE_template_ptr->pre_allocated_rb_table_index_ul >= 0) {
rb_table_index = UE_template_ptr->pre_allocated_rb_table_index_ul;
} else {
UE_template_ptr->mcs_UL[harq_pid] = 10; //cmin (10, openair_daq_vars.target_ue_ul_mcs);
UE_template_ptr->mcs_UL[harq_pid] = 10;
rb_table_index = 5; // for PHR
}
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = UE_template_ptr->mcs_UL[harq_pid];
// buffer_occupancy = UE_template_ptr->ul_total_buffer;
while (((rb_table[rb_table_index] > (n_rb_ul_tab[CC_id] - first_rb_slice[CC_id]))
|| (rb_table[rb_table_index] > 45))
&& (rb_table_index > 0)) {
while (((rb_table[rb_table_index] > (n_rb_ul_tab[CC_id] - first_rb_slice[CC_id])) ||
(rb_table[rb_table_index] > 45)) && (rb_table_index > 0)) {
rb_table_index--;
}
UE_template_ptr->TBS_UL[harq_pid] = get_TBS_UL(UE_template_ptr->mcs_UL[harq_pid],
rb_table[rb_table_index]);
UE_template_ptr->TBS_UL[harq_pid] = get_TBS_UL(UE_template_ptr->mcs_UL[harq_pid], rb_table[rb_table_index]);
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += rb_table[rb_table_index];
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = UE_template_ptr->TBS_UL[harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].total_ulsch_TBS += UE_template_ptr->TBS_UL[harq_pid];
// buffer_occupancy -= TBS;
T(T_ENB_MAC_UE_UL_SCHEDULE, T_INT(module_idP),
T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid),
T(T_ENB_MAC_UE_UL_SCHEDULE,
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(UE_template_ptr->mcs_UL[harq_pid]),
T_INT(first_rb_slice[CC_id]),
T_INT(rb_table[rb_table_index]),
T_INT(UE_template_ptr->TBS_UL[harq_pid]), T_INT(ndi));
T_INT(UE_template_ptr->TBS_UL[harq_pid]),
T_INT(ndi));
if (mac_eNB_get_rrc_status(module_idP, rnti) < RRC_CONNECTED)
LOG_D(MAC,
"[eNB %d][PUSCH %d/%x] CC_id %d Frame %d subframeP %d Scheduled UE %d (mcs %d, first rb %d, nb_rb %d, rb_table_index %d, TBS %d, harq_pid %d)\n",
module_idP, harq_pid, rnti, CC_id, frameP,
subframeP, UE_id,
/* What is this test ? */
if (mac_eNB_get_rrc_status(module_idP, rnti) < RRC_CONNECTED) {
LOG_D(MAC, "[eNB %d][PUSCH %d/%x] CC_id %d Frame %d subframeP %d Scheduled UE %d (mcs %d, first rb %d, nb_rb %d, rb_table_index %d, TBS %d, harq_pid %d)\n",
module_idP,
harq_pid,
rnti,
CC_id,
frameP,
subframeP,
UE_id,
UE_template_ptr->mcs_UL[harq_pid],
first_rb_slice[CC_id], rb_table[rb_table_index],
first_rb_slice[CC_id],
rb_table[rb_table_index],
rb_table_index,
UE_template_ptr->TBS_UL[harq_pid], harq_pid);
UE_template_ptr->TBS_UL[harq_pid],
harq_pid);
}
// bad indices : 20 (40 PRB), 21 (45 PRB), 22 (48 PRB)
//store for possible retransmission
/* Store information for possible retransmission */
UE_template_ptr->nb_rb_ul[harq_pid] = rb_table[rb_table_index];
UE_template_ptr->first_rb_ul[harq_pid] = first_rb_slice[CC_id];
UE_sched_ctrl_ptr->ul_scheduled |= (1 << harq_pid);
if (UE_id == UE_list->head)
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_SCHEDULED,
UE_sched_ctrl_ptr->ul_scheduled);
if (UE_id == UE_list->head) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_SCHEDULED, UE_sched_ctrl_ptr->ul_scheduled);
}
// adjust scheduled UL bytes by TBS, wait for UL sdus to do final update
LOG_D(MAC,
"[eNB %d] CC_id %d UE %d/%x : adjusting scheduled_ul_bytes, old %d, TBS %d\n",
module_idP, CC_id, UE_id, rnti,
/* Adjust scheduled UL bytes by TBS, wait for UL sdus to do final update */
LOG_D(MAC, "[eNB %d] CC_id %d UE %d/%x : adjusting scheduled_ul_bytes, old %d, TBS %d\n",
module_idP,
CC_id,
UE_id,
rnti,
UE_template_ptr->scheduled_ul_bytes,
UE_template_ptr->TBS_UL[harq_pid]);
UE_template_ptr->scheduled_ul_bytes += UE_template_ptr->TBS_UL[harq_pid];
LOG_D(MAC, "scheduled_ul_bytes, new %d\n", UE_template_ptr->scheduled_ul_bytes);
// Cyclic shift for DM RS
LOG_D(MAC, "scheduled_ul_bytes, new %d\n",
UE_template_ptr->scheduled_ul_bytes);
/* Cyclic shift for DMRS */
cshift = 0; // values from 0 to 7 can be used for mapping the cyclic shift (36.211 , Table 5.5.2.1.1-1)
// save it for a potential retransmission
/* Save it for a potential retransmission */
UE_template_ptr->cshift[harq_pid] = cshift;
/* Setting DCI0 NFAPI struct */
hi_dci0_pdu = &hi_dci0_req_body->hi_dci0_pdu_list[hi_dci0_req_body->number_of_dci + hi_dci0_req_body->number_of_hi];
memset((void *) hi_dci0_pdu, 0,sizeof(nfapi_hi_dci0_request_pdu_t));
hi_dci0_pdu->pdu_type = NFAPI_HI_DCI0_DCI_PDU_TYPE;
......@@ -1474,31 +1625,51 @@ schedule_ulsch_rnti(module_id_t module_idP,
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.dl_assignment_index = UE_template_ptr->DAI_ul[sched_subframeP];
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.harq_pid = harq_pid;
hi_dci0_req_body->number_of_dci++;
hi_dci0_req_body->sfnsf = sfnsf_add_subframe(sched_frame, sched_subframeP, 0); //(frameP, subframeP, 4);
hi_dci0_req_body->sfnsf = sfnsf_add_subframe(sched_frame, sched_subframeP, 0);
hi_dci0_req_body->tl.tag = NFAPI_HI_DCI0_REQUEST_BODY_TAG;
hi_dci0_req->sfn_sf = frameP<<4|subframeP; // sfnsf_add_subframe(sched_frame, sched_subframeP, 0); // sunday!
hi_dci0_req->sfn_sf = frameP << 4 | subframeP;
hi_dci0_req->header.message_id = NFAPI_HI_DCI0_REQUEST;
LOG_D(MAC,
"[PUSCH %d] Frame %d, Subframe %d: Adding UL CONFIG.Request for UE %d/%x, ulsch_frame %d, ulsch_subframe %d\n",
harq_pid, frameP, subframeP, UE_id, rnti,
sched_frame, sched_subframeP);
LOG_D(MAC, "[PUSCH %d] Frame %d, Subframe %d: Adding UL CONFIG.Request for UE %d/%x, ulsch_frame %d, ulsch_subframe %d\n",
harq_pid,
frameP,
subframeP,
UE_id,
rnti,
sched_frame,
sched_subframeP);
ul_req_index = 0;
dlsch_flag = 0;
for(ul_req_index = 0; ul_req_index < ul_req_tmp_body->number_of_pdus; ul_req_index++) {
if(ul_req_tmp_body->ul_config_pdu_list[ul_req_index].pdu_type == NFAPI_UL_CONFIG_UCI_HARQ_PDU_TYPE &&
ul_req_tmp_body->ul_config_pdu_list[ul_req_index].uci_harq_pdu.ue_information.ue_information_rel8.rnti == rnti) {
dlsch_flag = 1;
LOG_D(MAC,"Frame %d, Subframe %d:rnti %x ul_req_index %d Switched UCI HARQ to ULSCH HARQ(first)\n",frameP,subframeP,rnti,ul_req_index);
LOG_D(MAC, "Frame %d, Subframe %d:rnti %x ul_req_index %d Switched UCI HARQ to ULSCH HARQ(first)\n",
frameP,
subframeP,
rnti,
ul_req_index);
break;
}
}
// Add UL_config PDUs
fill_nfapi_ulsch_config_request_rel8(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index], cqi_req, cc, UE_template_ptr->physicalConfigDedicated, get_tmode(module_idP, CC_id, UE_id), mac->ul_handle, rnti,
/* Add UL_config PDUs */
fill_nfapi_ulsch_config_request_rel8(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index],
cqi_req,
cc,
UE_template_ptr->physicalConfigDedicated,
get_tmode(module_idP, CC_id, UE_id),
mac->ul_handle,
rnti,
first_rb_slice[CC_id], // resource_block_start
rb_table[rb_table_index], // number_of_resource_blocks
UE_template_ptr->mcs_UL[harq_pid], cshift, // cyclic_shift_2_for_drms
UE_template_ptr->mcs_UL[harq_pid],
cshift, // cyclic_shift_2_for_drms
0, // frequency_hopping_enabled_flag
0, // frequency_hopping_bits
ndi, // new_data_indication
......@@ -1507,28 +1678,25 @@ schedule_ulsch_rnti(module_id_t module_idP,
0, // ul_tx_mode
0, // current_tx_nb
0, // n_srs
get_TBS_UL
(UE_template_ptr->
mcs_UL[harq_pid],
rb_table
[rb_table_index]));
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
get_TBS_UL(UE_template_ptr->mcs_UL[harq_pid], rb_table[rb_table_index]));
if (UE_template_ptr->rach_resource_type > 0) { // This is a BL/CE UE allocation
fill_nfapi_ulsch_config_request_emtc(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index], UE_template_ptr->rach_resource_type > 2 ? 2 : 1, 1, //total_number_of_repetitions
1, //repetition_number
(frameP *
10) +
subframeP);
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
/* This is a BL/CE UE allocation */
if (UE_template_ptr->rach_resource_type > 0) {
fill_nfapi_ulsch_config_request_emtc(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index],
UE_template_ptr->rach_resource_type > 2 ? 2 : 1,
1, // total_number_of_repetitions
1, // repetition_number
(frameP * 10) + subframeP);
}
#endif
if(dlsch_flag == 1) {
if(cqi_req == 1) {
if (dlsch_flag == 1) {
if (cqi_req == 1) {
ul_req_tmp_body->ul_config_pdu_list[ul_req_index].pdu_type = NFAPI_UL_CONFIG_ULSCH_CQI_HARQ_RI_PDU_TYPE;
ulsch_harq_information = &ul_req_tmp_body->ul_config_pdu_list[ul_req_index].ulsch_cqi_harq_ri_pdu.harq_information;
ul_req_tmp_body->ul_config_pdu_list[ul_req_index].ulsch_cqi_harq_ri_pdu.initial_transmission_parameters.initial_transmission_parameters_rel8.tl.tag=
ul_req_tmp_body->ul_config_pdu_list[ul_req_index].ulsch_cqi_harq_ri_pdu.initial_transmission_parameters.initial_transmission_parameters_rel8.tl.tag =
NFAPI_UL_CONFIG_REQUEST_INITIAL_TRANSMISSION_PARAMETERS_REL8_TAG;
ul_req_tmp_body->ul_config_pdu_list[ul_req_index].ulsch_cqi_harq_ri_pdu.initial_transmission_parameters.initial_transmission_parameters_rel8.n_srs_initial = 0; // last symbol not punctured
ul_req_tmp_body->ul_config_pdu_list[ul_req_index].ulsch_cqi_harq_ri_pdu.initial_transmission_parameters.initial_transmission_parameters_rel8.initial_number_of_resource_blocks =
......@@ -1543,6 +1711,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
}
fill_nfapi_ulsch_harq_information(module_idP, CC_id,rnti, ulsch_harq_information,subframeP);
} else {
ul_req_tmp_body->number_of_pdus++;
}
......@@ -1550,45 +1719,83 @@ schedule_ulsch_rnti(module_id_t module_idP,
ul_req_tmp->header.message_id = NFAPI_UL_CONFIG_REQUEST;
ul_req_tmp_body->tl.tag = NFAPI_UL_CONFIG_REQUEST_BODY_TAG;
mac->ul_handle++;
uint16_t ul_sched_frame = sched_frame;
uint16_t ul_sched_subframeP = sched_subframeP;
//add_subframe(&ul_sched_frame, &ul_sched_subframeP, 2);
ul_req_tmp->sfn_sf = ul_sched_frame<<4|ul_sched_subframeP;
add_ue_ulsch_info(module_idP,
CC_id, UE_id, subframeP,
S_UL_SCHEDULED);
LOG_D(MAC, "[eNB %d] CC_id %d Frame %d, subframeP %d: Generated ULSCH DCI for next UE_id %d, format 0\n", module_idP, CC_id, frameP, subframeP, UE_id);
LOG_D(MAC,"[PUSCH %d] SFN/SF:%04d%d UL_CFG:SFN/SF:%04d%d CQI:%d for UE %d/%x\n", harq_pid,frameP,subframeP,ul_sched_frame,ul_sched_subframeP,cqi_req,UE_id,rnti);
// increment first rb for next UE allocation
ul_req_tmp->sfn_sf = sched_frame << 4 | sched_subframeP;
add_ue_ulsch_info(module_idP, CC_id, UE_id, subframeP, S_UL_SCHEDULED);
LOG_D(MAC, "[eNB %d] CC_id %d Frame %d, subframeP %d: Generated ULSCH DCI for next UE_id %d, format 0\n",
module_idP,
CC_id,
frameP,
subframeP,
UE_id);
LOG_D(MAC, "[PUSCH %d] SFN/SF:%04d%d UL_CFG:SFN/SF:%04d%d CQI:%d for UE %d/%x\n",
harq_pid,
frameP,
subframeP,
sched_frame,
sched_subframeP,
cqi_req,
UE_id,
rnti);
/* Increment first rb for next UE allocation */
first_rb_slice[CC_id] += rb_table[rb_table_index];
} else { // round_index > 0 => retransmission
T(T_ENB_MAC_UE_UL_SCHEDULE_RETRANSMISSION,
T_INT(module_idP), T_INT(CC_id), T_INT(rnti),
T_INT(frameP), T_INT(subframeP), T_INT(harq_pid),
T_INT(module_idP),
T_INT(CC_id),
T_INT(rnti),
T_INT(frameP),
T_INT(subframeP),
T_INT(harq_pid),
T_INT(UE_template_ptr->mcs_UL[harq_pid]),
T_INT(first_rb_slice[CC_id]),
T_INT(rb_table[rb_table_index]), T_INT(round_index));
// Add UL_config PDUs
LOG_D(MAC,
"[PUSCH %d] Frame %d, Subframe %d: Adding UL CONFIG.Request for UE %d/%x, ulsch_frame %d, ulsch_subframe %d\n",
harq_pid, frameP, subframeP, UE_id, rnti,
sched_frame, sched_subframeP);
T_INT(rb_table[rb_table_index]),
T_INT(round_index));
/* Add UL_config PDUs */
LOG_D(MAC, "[PUSCH %d] Frame %d, Subframe %d: Adding UL CONFIG.Request for UE %d/%x, ulsch_frame %d, ulsch_subframe %d\n",
harq_pid,
frameP,
subframeP,
UE_id,
rnti,
sched_frame,
sched_subframeP);
ul_req_index = 0;
dlsch_flag = 0;
for(ul_req_index = 0; ul_req_index < ul_req_tmp_body->number_of_pdus; ul_req_index++) {
if(ul_req_tmp_body->ul_config_pdu_list[ul_req_index].pdu_type == NFAPI_UL_CONFIG_UCI_HARQ_PDU_TYPE &&
ul_req_tmp_body->ul_config_pdu_list[ul_req_index].uci_harq_pdu.ue_information.ue_information_rel8.rnti == rnti) {
dlsch_flag = 1;
LOG_D(MAC,"Frame %d, Subframe %d:rnti %x ul_req_index %d Switched UCI HARQ to ULSCH HARQ(first)\n",frameP,subframeP,rnti,ul_req_index);
LOG_D(MAC, "Frame %d, Subframe %d:rnti %x ul_req_index %d Switched UCI HARQ to ULSCH HARQ(first)\n",
frameP,
subframeP,
rnti,
ul_req_index);
break;
}
}
fill_nfapi_ulsch_config_request_rel8(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index], cqi_req, cc, UE_template_ptr->physicalConfigDedicated, get_tmode(module_idP, CC_id, UE_id), mac->ul_handle, rnti,
fill_nfapi_ulsch_config_request_rel8(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index],
cqi_req,
cc,
UE_template_ptr->physicalConfigDedicated,
get_tmode(module_idP, CC_id, UE_id),
mac->ul_handle,
rnti,
UE_template_ptr->first_rb_ul[harq_pid], // resource_block_start
UE_template_ptr->nb_rb_ul[harq_pid], // number_of_resource_blocks
UE_template_ptr->mcs_UL[harq_pid], cshift, // cyclic_shift_2_for_drms
UE_template_ptr->mcs_UL[harq_pid],
cshift, // cyclic_shift_2_for_drms
0, // frequency_hopping_enabled_flag
0, // frequency_hopping_bits
UE_template_ptr->oldNDI_UL[harq_pid], // new_data_indication
......@@ -1597,16 +1804,17 @@ schedule_ulsch_rnti(module_id_t module_idP,
0, // ul_tx_mode
0, // current_tx_nb
0, // n_srs
UE_template_ptr->
TBS_UL[harq_pid]);
UE_template_ptr->TBS_UL[harq_pid]);
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
if (UE_template_ptr->rach_resource_type > 0) { // This is a BL/CE UE allocation
fill_nfapi_ulsch_config_request_emtc(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index], UE_template_ptr->rach_resource_type > 2 ? 2 : 1, 1, //total_number_of_repetitions
1, //repetition_number
(frameP *
10) +
subframeP);
/* This is a BL/CE UE allocation */
if (UE_template_ptr->rach_resource_type > 0) {
fill_nfapi_ulsch_config_request_emtc(&ul_req_tmp_body->ul_config_pdu_list[ul_req_index],
UE_template_ptr->rach_resource_type > 2 ? 2 : 1,
1, // total_number_of_repetitions
1, // repetition_number
(frameP * 10) + subframeP);
}
#endif
......@@ -1631,6 +1839,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
}
fill_nfapi_ulsch_harq_information(module_idP, CC_id,rnti, ulsch_harq_information,subframeP);
} else {
ul_req_tmp_body->number_of_pdus++;
}
......@@ -1639,38 +1848,17 @@ schedule_ulsch_rnti(module_id_t module_idP,
ul_req_tmp_body->tl.tag = NFAPI_UL_CONFIG_REQUEST_BODY_TAG;
ul_req_tmp->sfn_sf = sched_frame<<4|sched_subframeP;
ul_req_tmp->header.message_id = NFAPI_UL_CONFIG_REQUEST;
LOG_D(MAC,"[PUSCH %d] Frame %d, Subframe %d: Adding UL CONFIG.Request for UE %d/%x, ulsch_frame %d, ulsch_subframe %d cqi_req %d\n",
harq_pid,frameP,subframeP,UE_id,rnti,sched_frame,sched_subframeP,cqi_req);
} /*
else if (round_index > 0) { //we schedule a retransmission
ndi = UE_template_ptr->oldNDI_UL[harq_pid];
if ((round_index&3)==0) {
mcs = openair_daq_vars.target_ue_ul_mcs;
} else {
mcs = rvidx_tab[round_index&3] + 28; //not correct for round_index==4!
}
LOG_I(MAC,"[eNB %d][PUSCH %d/%x] CC_id %d Frame %d subframeP %d Scheduled UE retransmission (mcs %d, first rb %d, nb_rb %d, harq_pid %d, round_index %d)\n",
module_idP,UE_id,rnti,CC_id,frameP,subframeP,mcs,
first_rb[CC_id],UE_template_ptr->nb_rb_ul[harq_pid],
harq_pid, round_index);
rballoc = mac_xface->computeRIV(frame_parms->n_rb_ul_tab[CC_id],
first_rb[CC_id],
UE_template_ptr->nb_rb_ul[harq_pid]);
first_rb[CC_id]+=UE_template_ptr->nb_rb_ul[harq_pid]; // increment for next UE allocation
UE_list->eNB_UE_stats[CC_id][UE_id].num_retransmission_rx+=1;
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used_retx_rx=UE_template_ptr->nb_rb_ul[harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx+=UE_template_ptr->nb_rb_ul[harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1=mcs;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2=mcs;
}
*/
LOG_D(MAC, "[PUSCH %d] Frame %d, Subframe %d: Adding UL CONFIG.Request for UE %d/%x, ulsch_frame %d, ulsch_subframe %d cqi_req %d\n",
harq_pid,
frameP,
subframeP,
UE_id,
rnti,
sched_frame,
sched_subframeP,
cqi_req);
} // end of round > 0
} // UE_is_to_be_scheduled
} // loop over all active CC_ids
} // loop over UE_ids
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment