From c2fe241d09783fb17f62160662bda11539ed283e Mon Sep 17 00:00:00 2001 From: Wu Jing <wu.jing@cn.fujitsu.com> Date: Wed, 15 May 2019 18:11:10 +0900 Subject: [PATCH] merge rrc and mac-scd from develop_multi_cw for TM2/TM3 into oai develop --- .../CONTROL_MODULES/MAC/flexran_agent_mac.c | 2 +- openair2/ENB_APP/flexran_agent_ran_api.c | 8 +- openair2/LAYER2/MAC/eNB_scheduler_RA.c | 26 +- openair2/LAYER2/MAC/eNB_scheduler_dlsch.c | 74 +- openair2/LAYER2/MAC/eNB_scheduler_fairRR.c | 1566 +++++++++++++---- openair2/LAYER2/MAC/eNB_scheduler_phytest.c | 2 +- .../LAYER2/MAC/eNB_scheduler_primitives.c | 571 +++--- openair2/LAYER2/MAC/mac.h | 43 +- openair2/LAYER2/MAC/mac_proto.h | 1 + openair2/LAYER2/MAC/pre_processor.c | 30 +- openair2/LAYER2/openair2_proc.c | 12 +- openair2/RRC/LTE/MESSAGES/asn1_msg.c | 14 +- openair2/RRC/LTE/rrc_eNB.c | 69 +- openair2/X2AP/x2ap_eNB_generate_messages.c | 12 +- openair2/X2AP/x2ap_eNB_handler.c | 6 +- 15 files changed, 1760 insertions(+), 676 deletions(-) diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c index 7110f681d7..fcb25dbd7d 100644 --- a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c +++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c @@ -934,7 +934,7 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle dl_info[i]->n_harq_status = 2; dl_info[i]->harq_status = malloc(sizeof(uint32_t) * dl_info[i]->n_harq_status); for (j = 0; j < dl_info[i]->n_harq_status; j++) { - dl_info[i]->harq_status[j] = RC.mac[mod_id]->UE_list.UE_sched_ctrl[UE_id].round[UE_PCCID(mod_id, UE_id)][j]; + dl_info[i]->harq_status[j] = RC.mac[mod_id]->UE_list.UE_sched_ctrl[UE_id].round[UE_PCCID(mod_id, UE_id)][j][TB1]; // TODO: This should be different per TB } // LOG_I(FLEXRAN_AGENT, "Sending subframe trigger for frame %d and subframe %d and harq %d (round %d)\n", flexran_get_current_frame(mod_id), (flexran_get_current_subframe(mod_id) + 1) % 10, dl_info[i]->harq_process_id, dl_info[i]->harq_status[0]); diff --git a/openair2/ENB_APP/flexran_agent_ran_api.c b/openair2/ENB_APP/flexran_agent_ran_api.c index 500f4f1f3a..44e75fa460 100644 --- a/openair2/ENB_APP/flexran_agent_ran_api.c +++ b/openair2/ENB_APP/flexran_agent_ran_api.c @@ -270,7 +270,7 @@ uint32_t flexran_get_total_size_ul_mac_sdus(mid_t mod_id, mid_t ue_id, int cc_id uint32_t flexran_get_TBS_dl(mid_t mod_id, mid_t ue_id, int cc_id) { if (!mac_is_present(mod_id)) return 0; - return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].TBS; + return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].TBS[TB1]; } uint32_t flexran_get_TBS_ul(mid_t mod_id, mid_t ue_id, int cc_id) @@ -312,13 +312,13 @@ uint8_t flexran_get_ue_wpmi(mid_t mod_id, mid_t ue_id, uint8_t cc_id) uint8_t flexran_get_mcs1_dl(mid_t mod_id, mid_t ue_id, int cc_id) { if (!mac_is_present(mod_id)) return 0; - return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].dlsch_mcs1; + return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].dlsch_mcs[TB1]; } uint8_t flexran_get_mcs2_dl(mid_t mod_id, mid_t ue_id, int cc_id) { if (!mac_is_present(mod_id)) return 0; - return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].dlsch_mcs2; + return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].dlsch_mcs[TB2]; } uint8_t flexran_get_mcs1_ul(mid_t mod_id, mid_t ue_id, int cc_id) @@ -372,7 +372,7 @@ uint64_t flexran_get_total_TBS_ul(mid_t mod_id, mid_t ue_id, int cc_id) int flexran_get_harq_round(mid_t mod_id, uint8_t cc_id, mid_t ue_id) { if (!mac_is_present(mod_id)) return 0; - return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].harq_round; + return RC.mac[mod_id]->UE_list.eNB_UE_stats[cc_id][ue_id].harq_round[TB1]; } uint32_t flexran_get_num_mac_sdu_tx(mid_t mod_id, mid_t ue_id, int cc_id) diff --git a/openair2/LAYER2/MAC/eNB_scheduler_RA.c b/openair2/LAYER2/MAC/eNB_scheduler_RA.c index 90afc0a00c..82c2ad21f7 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_RA.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_RA.c @@ -824,7 +824,7 @@ generate_Msg4(module_id_t module_idP, dl_req_body->number_pdu++; ra->state = WAITMSG4ACK; lcid = 0; - UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid] = 0; + UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid][TB1] = 0; msg4_header = 1 + 6 + 1; // CR header, CR CE, SDU header AssertFatal((ra->msg4_TBsize - ra->msg4_rrc_sdu_length - msg4_header)>=0, "msg4_TBS %d is too small, change mcs to increase by %d bytes\n",ra->msg4_TBsize,ra->msg4_rrc_sdu_length+msg4_header-ra->msg4_TBsize); @@ -841,7 +841,7 @@ generate_Msg4(module_id_t module_idP, module_idP, CC_idP, frameP, subframeP, ra->msg4_TBsize, ra->msg4_rrc_sdu_length, msg4_header, msg4_padding, msg4_post_padding); DevAssert (UE_id != UE_INDEX_INVALID); // FIXME not sure how to gracefully return // CHECK THIS: &cc[CC_idP].CCCH_pdu.payload[0] - offset = generate_dlsch_header ((unsigned char *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0], 1, //num_sdus + offset = generate_dlsch_header ((unsigned char *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0][TB1], 1, //num_sdus (unsigned short *) &ra->msg4_rrc_sdu_length, // &lcid, // sdu_lcid 255, // no drx @@ -849,7 +849,7 @@ generate_Msg4(module_id_t module_idP, ra->cont_res_id, // contention res id msg4_padding, // no padding msg4_post_padding); - memcpy ((void *) &mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0][(unsigned char) offset], &cc[CC_idP].CCCH_pdu.payload[0], ra->msg4_rrc_sdu_length); + memcpy ((void *) &mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0][TB1][(unsigned char) offset], &cc[CC_idP].CCCH_pdu.payload[0], ra->msg4_rrc_sdu_length); // DL request mac->TX_req[CC_idP].sfn_sf = (frameP << 4) + subframeP; TX_req = &mac->TX_req[CC_idP].tx_request_body.tx_pdu_list[mac->TX_req[CC_idP].tx_request_body.number_of_pdus]; @@ -857,7 +857,7 @@ generate_Msg4(module_id_t module_idP, TX_req->pdu_index = mac->pdu_index[CC_idP]++; TX_req->num_segments = 1; TX_req->segments[0].segment_length = ra->msg4_TBsize; - TX_req->segments[0].segment_data = mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0]; + TX_req->segments[0].segment_data = mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0][TB1]; mac->TX_req[CC_idP].tx_request_body.number_of_pdus++; // Program ACK/NAK for Msg4 PDSCH int absSF = (frameP * 10) + subframeP; @@ -890,10 +890,10 @@ generate_Msg4(module_id_t module_idP, ul_req_body->number_of_pdus++; T (T_ENB_MAC_UE_DL_PDU_WITH_DATA, T_INT (module_idP), T_INT (CC_idP), T_INT (ra->rnti), T_INT (frameP), T_INT (subframeP), - T_INT (0 /*harq_pid always 0? */ ), T_BUFFER (&mac->UE_list.DLSCH_pdu[CC_idP][0][UE_id].payload[0], ra->msg4_TBsize)); + T_INT (0 /*harq_pid always 0? */ ), T_BUFFER (&mac->UE_list.DLSCH_pdu[CC_idP][0][UE_id].payload[0][TB1], ra->msg4_TBsize)); if (opt_enabled == 1) { - trace_pdu (1, (uint8_t *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0], ra->msg4_rrc_sdu_length, UE_id, 3, UE_RNTI (module_idP, UE_id), mac->frame, mac->subframe, 0, 0); + trace_pdu (1, (uint8_t *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0][TB1], ra->msg4_rrc_sdu_length, UE_id, 3, UE_RNTI (module_idP, UE_id), mac->frame, mac->subframe, 0, 0); LOG_D (OPT, "[eNB %d][DLSCH] CC_id %d Frame %d trace pdu for rnti %x with size %d\n", module_idP, CC_idP, frameP, UE_RNTI (module_idP, UE_id), ra->msg4_rrc_sdu_length); } } // Msg4 frame/subframe @@ -993,7 +993,7 @@ generate_Msg4(module_id_t module_idP, lcid = 0; // put HARQ process round to 0 ra->harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP); - UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid] = 0; + UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid][TB1] = 0; if ((ra->msg4_TBsize - rrc_sdu_length - msg4_header) <= 2) { msg4_padding = ra->msg4_TBsize - rrc_sdu_length - msg4_header; @@ -1011,7 +1011,7 @@ generate_Msg4(module_id_t module_idP, DevAssert(UE_id != UE_INDEX_INVALID); // FIXME not sure how to gracefully return // CHECK THIS: &cc[CC_idP].CCCH_pdu.payload[0] int num_sdus = rrc_sdu_length > 0 ? 1 : 0; - offset = generate_dlsch_header((unsigned char *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0], + offset = generate_dlsch_header((unsigned char *) mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char) UE_id].payload[0][TB1], num_sdus, //num_sdus (unsigned short *) &rrc_sdu_length, // &lcid, // sdu_lcid @@ -1020,7 +1020,7 @@ generate_Msg4(module_id_t module_idP, ra->cont_res_id, // contention res id msg4_padding, // no padding msg4_post_padding); - memcpy((void *) &mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0][(unsigned char)offset], + memcpy((void *) &mac->UE_list.DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0][TB1][(unsigned char)offset], &cc[CC_idP].CCCH_pdu.payload[0], rrc_sdu_length); // DLSCH Config fill_nfapi_dlsch_config(mac, dl_req_body, ra->msg4_TBsize, mac->pdu_index[CC_idP], ra->rnti, 2, // resource_allocation_type : format 1A/1B/1D @@ -1052,7 +1052,7 @@ generate_Msg4(module_id_t module_idP, rrc_sdu_length, mac->pdu_index[CC_idP], mac->UE_list. - DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0]); + DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0][TB1]); mac->pdu_index[CC_idP]++; dl_req->sfn_sf = mac->TX_req[CC_idP].sfn_sf; LOG_D(MAC, "Filling UCI ACK/NAK information, cce_idx %d\n", @@ -1068,12 +1068,12 @@ generate_Msg4(module_id_t module_idP, T_INT(CC_idP), T_INT(ra->rnti), T_INT(frameP), T_INT(subframeP), T_INT(0 /*harq_pid always 0? */ ), T_BUFFER(&mac->UE_list.DLSCH_pdu[CC_idP][0][UE_id]. - payload[0], ra->msg4_TBsize)); + payload[0][TB1], ra->msg4_TBsize)); if (opt_enabled == 1) { trace_pdu(DIRECTION_DOWNLINK, (uint8_t *) mac-> - UE_list.DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0], + UE_list.DLSCH_pdu[CC_idP][0][(unsigned char)UE_id].payload[0][TB1], rrc_sdu_length, UE_id, WS_C_RNTI, UE_RNTI(module_idP, UE_id), mac->frame, mac->subframe, 0, 0); @@ -1157,7 +1157,7 @@ check_Msg4_retransmission(module_id_t module_idP, int CC_idP, // check HARQ status and retransmit if necessary UE_id = find_UE_id(module_idP, ra->rnti); AssertFatal(UE_id >= 0, "Can't find UE for t-crnti\n"); - round = UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid]; + round = UE_list->UE_sched_ctrl[UE_id].round[CC_idP][ra->harq_pid][TB1]; vrb_map = cc[CC_idP].vrb_map; dl_req = &mac->DL_req[CC_idP]; dl_req_body = &dl_req->dl_config_request_body; diff --git a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c index 95e2bf59e8..e41b6225f1 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c @@ -711,11 +711,11 @@ schedule_ue_spec(module_id_t module_idP, harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frameP, subframeP); - round_DL = ue_sched_ctrl->round[CC_id][harq_pid]; + round_DL = ue_sched_ctrl->round[CC_id][harq_pid][TB1]; eNB_UE_stats->crnti = rnti; eNB_UE_stats->rrc_status = mac_eNB_get_rrc_status(module_idP, rnti); eNB_UE_stats->harq_pid = harq_pid; - eNB_UE_stats->harq_round = round_DL; + eNB_UE_stats->harq_round[TB1] = round_DL; if (eNB_UE_stats->rrc_status < RRC_CONNECTED) { LOG_D(MAC, "UE %d is not in RRC_CONNECTED\n", @@ -733,9 +733,9 @@ schedule_ue_spec(module_id_t module_idP, eNB_UE_stats->dl_cqi, MIN_CQI_VALUE, MAX_CQI_VALUE); */ if (NFAPI_MODE != NFAPI_MONOLITHIC) { - eNB_UE_stats->dlsch_mcs1 = 10; // cqi_to_mcs[ue_sched_ctrl->dl_cqi[CC_id]]; + eNB_UE_stats->dlsch_mcs[TB1] = cqi_to_mcs[ue_sched_ctrl->dl_cqi[CC_id]]; } else { // this operation is also done in the preprocessor - eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1, + eNB_UE_stats->dlsch_mcs[TB1] = cmin(eNB_UE_stats->dlsch_mcs[TB1], eNB->slice_info.dl[slice_idxP].maxmcs); // cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs); } @@ -757,14 +757,14 @@ schedule_ue_spec(module_id_t module_idP, round_DL, nb_available_rb, ue_sched_ctrl->dl_cqi[CC_id], - eNB_UE_stats->dlsch_mcs1, + eNB_UE_stats->dlsch_mcs[TB1], eNB_UE_stats->rrc_status); /* Process retransmission */ if (round_DL != 8) { // get freq_allocation nb_rb = ue_template->nb_rb[harq_pid]; - TBS = get_TBS_DL(ue_template->oldmcs1[harq_pid], + TBS = get_TBS_DL(ue_template->oldmcs[harq_pid][TB1], nb_rb); if (nb_rb <= nb_available_rb) { @@ -846,8 +846,8 @@ schedule_ue_spec(module_id_t module_idP, dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // Don't adjust power when retransmitting - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = ue_template->oldNDI[harq_pid]; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = ue_template->oldmcs1[harq_pid]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = ue_template->oldNDI[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = ue_template->oldmcs[harq_pid][TB1]; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = round_DL & 3; // TDD @@ -859,14 +859,14 @@ schedule_ue_spec(module_id_t module_idP, harq_pid, round_DL, ue_template->DAI - 1, - ue_template->oldmcs1[harq_pid]); + ue_template->oldmcs[harq_pid][TB1]); } else { LOG_D(MAC, "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n", module_idP, CC_id, harq_pid, round_DL, - ue_template->oldmcs1[harq_pid]); + UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1]); } if (!CCE_allocation_infeasible(module_idP, @@ -888,7 +888,7 @@ schedule_ue_spec(module_id_t module_idP, 0, // type 0 allocation from 7.1.6 in 36.213 0, // virtual_resource_block_assignment_flag, unused here 0, // resource_block_coding, to be filled in later - getQm(ue_template->oldmcs1[harq_pid]), + getQm(UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1]), round_DL & 3, // redundancy version 1, // transport blocks 0, // transport block to codeword swap flag @@ -932,7 +932,7 @@ schedule_ue_spec(module_id_t module_idP, eNB_UE_stats->num_retransmission += 1; eNB_UE_stats->rbs_used_retx = nb_rb; eNB_UE_stats->total_rbs_used_retx += nb_rb; - eNB_UE_stats->dlsch_mcs2 = eNB_UE_stats->dlsch_mcs1; + eNB_UE_stats->dlsch_mcs[TB2] = eNB_UE_stats->dlsch_mcs[TB1]; } else { LOG_D(MAC, "[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n", @@ -946,7 +946,7 @@ schedule_ue_spec(module_id_t module_idP, rlc_status.bytes_in_buffer = 0; // Now check RLC information to compute number of required RBs // get maximum TBS size for RLC request - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[TB1], nb_available_rb); // add the length for all the control elements (timing adv, drx, etc) : header + payload @@ -1278,7 +1278,7 @@ schedule_ue_spec(module_id_t module_idP, if (ta_len + sdu_length_total + header_length_total > 0) { // Now compute number of required RBs for total sdu length // Assume RAH format 2 - mcs = eNB_UE_stats->dlsch_mcs1; + mcs = eNB_UE_stats->dlsch_mcs[TB1]; if (mcs == 0) { nb_rb = 4; // don't let the TBS get too small @@ -1293,13 +1293,13 @@ schedule_ue_spec(module_id_t module_idP, if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs // (can happen if N_RB_DL is odd) - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[TB1], nb_available_rb); nb_rb = nb_available_rb; break; } - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[TB1], nb_rb); } @@ -1342,7 +1342,7 @@ schedule_ue_spec(module_id_t module_idP, } LOG_D(MAC, "dlsch_mcs before and after the rate matching = (%d, %d)\n", - eNB_UE_stats->dlsch_mcs1, + eNB_UE_stats->dlsch_mcs[TB1], mcs); #ifdef DEBUG_eNB_SCHEDULER LOG_D(MAC, "[eNB %d] CC_id %d Generated DLSCH header (mcs %d, TBS %d, nb_rb %d)\n", @@ -1362,7 +1362,7 @@ schedule_ue_spec(module_id_t module_idP, post_padding = 1; } - offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], + offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][TB1], num_sdus, //num_sdus sdu_lengths, // sdu_lcids, @@ -1407,19 +1407,19 @@ schedule_ue_spec(module_id_t module_idP, #endif // cycle through SDUs and place in dlsch_buffer dlsch_pdu = &UE_list->DLSCH_pdu[CC_id][0][UE_id]; - memcpy(&dlsch_pdu->payload[0][offset], + memcpy(&dlsch_pdu->payload[0][TB1][offset], dlsch_buffer, sdu_length_total); // memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]); // fill remainder of DLSCH with 0 for (j = 0; j < (TBS - sdu_length_total - offset); j++) { - dlsch_pdu->payload[0][offset + sdu_length_total + j] = 0; + dlsch_pdu->payload[0][TB1][offset + sdu_length_total + j] = 0; } if (opt_enabled == 1) { trace_pdu(DIRECTION_DOWNLINK, - (uint8_t *) dlsch_pdu->payload[0], + (uint8_t *) dlsch_pdu->payload[0][TB1], TBS, module_idP, WS_C_RNTI, @@ -1460,9 +1460,9 @@ schedule_ue_spec(module_id_t module_idP, eNB_UE_stats->rbs_used = nb_rb; eNB_UE_stats->num_mac_sdu_tx = num_sdus; eNB_UE_stats->total_rbs_used += nb_rb; - eNB_UE_stats->dlsch_mcs2 = mcs; - eNB_UE_stats->TBS = TBS; - eNB_UE_stats->overhead_bytes = TBS - sdu_length_total; + eNB_UE_stats->dlsch_mcs[TB2] = mcs; + eNB_UE_stats->TBS[TB1] = TBS; + eNB_UE_stats->overhead_bytes[TB1] = TBS - sdu_length_total; eNB_UE_stats->total_sdu_bytes += sdu_length_total; eNB_UE_stats->total_pdu_bytes += TBS; eNB_UE_stats->total_num_pdus += 1; @@ -1535,7 +1535,7 @@ schedule_ue_spec(module_id_t module_idP, dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1 - ue_template->oldNDI[harq_pid]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1 - ue_template->oldNDI[harq_pid][TB1]; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0; //deactivate second codeword @@ -1567,7 +1567,7 @@ schedule_ue_spec(module_id_t module_idP, subframeP, dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level, rnti)) { - ue_sched_ctrl->round[CC_id][harq_pid] = 0; + ue_sched_ctrl->round[CC_id][harq_pid][TB1] = 0; dl_req->number_dci++; dl_req->number_pdu++; dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; @@ -1597,10 +1597,10 @@ schedule_ue_spec(module_id_t module_idP, UE_id, rnti, harq_pid, - ue_template->oldNDI[harq_pid]); - ue_template->oldNDI[harq_pid] = 1 - ue_template->oldNDI[harq_pid]; - ue_template->oldmcs1[harq_pid] = mcs; - ue_template->oldmcs2[harq_pid] = 0; + ue_template->oldNDI[harq_pid][TB1]); + ue_template->oldNDI[harq_pid][TB1] = 1 - ue_template->oldNDI[harq_pid][TB1]; + ue_template->oldmcs[harq_pid][TB1] = mcs; + ue_template->oldmcs[harq_pid][TB2] = 0; AssertFatal(ue_template->physicalConfigDedicated != NULL, "physicalConfigDedicated is NULL\n"); AssertFatal(ue_template->physicalConfigDedicated->pdsch_ConfigDedicated != NULL, "physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n"); @@ -1632,7 +1632,7 @@ schedule_ue_spec(module_id_t module_idP, (frameP * 10) + subframeP, TBS, eNB->pdu_index[CC_id], - dlsch_pdu->payload[0]); + dlsch_pdu->payload[0][TB1]); LOG_D(MAC, "Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n", eNB->pdu_index[CC_id]); eNB->pdu_index[CC_id]++; @@ -1998,7 +1998,7 @@ schedule_ue_spec_br(module_id_t module_idP, continue; } - round_DL = ue_sched_ctl->round[CC_id][harq_pid]; + round_DL = ue_sched_ctl->round[CC_id][harq_pid][TB1]; AssertFatal (UE_template->physicalConfigDedicated != NULL, "UE_template->physicalConfigDedicated is null\n"); AssertFatal (UE_template->physicalConfigDedicated->ext4 != NULL, "UE_template->physicalConfigDedicated->ext4 is null\n"); AssertFatal (UE_template->physicalConfigDedicated->ext4->epdcch_Config_r11 != NULL, "UE_template->physicalConfigDedicated->ext4->epdcch_Config_r11 is null\n"); @@ -2337,7 +2337,7 @@ schedule_ue_spec_br(module_id_t module_idP, /* Fill remainder of DLSCH with random data */ for (j = 0; j < (TBS - sdu_length_total - offset); j++) { - UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset + sdu_length_total + j] = (char)(taus()&0xff); + UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][TB1][offset + sdu_length_total + j] = (char)(taus()&0xff); } if (opt_enabled == 1) { @@ -2408,8 +2408,8 @@ schedule_ue_spec_br(module_id_t module_idP, } // Toggle NDI in first round - UE_template->oldNDI[harq_pid] = 1 - UE_template->oldNDI[harq_pid]; - ue_sched_ctl->round[CC_id][harq_pid] = 0; + UE_template->oldNDI[harq_pid][TB1] = 1 - UE_template->oldNDI[harq_pid][TB1]; + ue_sched_ctl->round[CC_id][harq_pid][TB1] = 0; round_DL = 0; } // if ((sdu_length_total + header_len_dcch + header_len_dtch) > 0) } @@ -2439,7 +2439,7 @@ schedule_ue_spec_br(module_id_t module_idP, dl_config_pdu->mpdcch_pdu.mpdcch_pdu_rel13.mcs = mcs; // adjust according to size of RAR, 208 bits with N1A_PRB=3 dl_config_pdu->mpdcch_pdu.mpdcch_pdu_rel13.pdsch_reptition_levels = 0; // fix to 4 for now dl_config_pdu->mpdcch_pdu.mpdcch_pdu_rel13.redundancy_version = rvseq[round_DL&3]; - dl_config_pdu->mpdcch_pdu.mpdcch_pdu_rel13.new_data_indicator = UE_template->oldNDI[harq_pid]; + dl_config_pdu->mpdcch_pdu.mpdcch_pdu_rel13.new_data_indicator = UE_template->oldNDI[harq_pid][TB1]; dl_config_pdu->mpdcch_pdu.mpdcch_pdu_rel13.harq_process = 0; dl_config_pdu->mpdcch_pdu.mpdcch_pdu_rel13.tpmi_length = 0; dl_config_pdu->mpdcch_pdu.mpdcch_pdu_rel13.tpmi = 0; @@ -2516,7 +2516,7 @@ schedule_ue_spec_br(module_id_t module_idP, TX_req->pdu_index = mac->pdu_index[CC_id]++; TX_req->num_segments = 1; TX_req->segments[0].segment_length = TX_req->pdu_length; - TX_req->segments[0].segment_data = mac->UE_list.DLSCH_pdu[CC_id][0][(unsigned char) UE_id].payload[0]; + TX_req->segments[0].segment_data = mac->UE_list.DLSCH_pdu[CC_id][0][(unsigned char) UE_id].payload[0][TB1]; mac->TX_req[CC_id].tx_request_body.number_of_pdus++; ackNAK_absSF = absSF + 4; ul_req = &mac->UL_req_tmp[CC_id][ackNAK_absSF % 10].ul_config_request_body; diff --git a/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c b/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c index 2626a04664..551d75a5a0 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c @@ -113,6 +113,9 @@ void pre_scd_nb_rbs_required( module_id_t module_idP, rnti_t rnti; mac_rlc_status_resp_t rlc_status; uint16_t step_size=2; + int header_length_last; + int header_length_total; + N_RB_DL = to_prb(RC.mac[module_idP]->common_channels[CC_id].mib->message.dl_Bandwidth); if(N_RB_DL==50) step_size=3; @@ -125,6 +128,7 @@ void pre_scd_nb_rbs_required( module_id_t module_idP, for (UE_id = 0; UE_id <NUMBER_OF_UE_MAX; UE_id++) { if (pre_scd_activeUE[UE_id] != TRUE) continue; + header_length_total = 0; // store dlsch buffer // clear logical channel interface variables @@ -140,17 +144,26 @@ void pre_scd_nb_rbs_required( module_id_t module_idP, #endif ); UE_template.dl_buffer_total += rlc_status.bytes_in_buffer; //storing the total dlsch buffer + if(rlc_status.bytes_in_buffer > 0){ + header_length_last = 1 + 1 + (rlc_status.bytes_in_buffer >= 128); + header_length_total += header_length_last; + } } - + if (header_length_total) { + header_length_total -= header_length_last; + header_length_total++; + } + UE_template.dl_buffer_total += header_length_total; // end of store dlsch buffer // assgin rbs required // Calculate the number of RBs required by each UE on the basis of logical channel's buffer //update CQI information across component carriers eNB_UE_stats = &pre_scd_eNB_UE_stats[CC_id][UE_id]; - eNB_UE_stats->dlsch_mcs1 = cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]]; + eNB_UE_stats->dlsch_mcs[TB1] = cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]]; if (UE_template.dl_buffer_total > 0) { - nb_rbs_required[CC_id][UE_id] = search_rbs_required(eNB_UE_stats->dlsch_mcs1, UE_template.dl_buffer_total, N_RB_DL, step_size); + UE_template.dl_buffer_total += 3; + nb_rbs_required[CC_id][UE_id] = search_rbs_required(eNB_UE_stats->dlsch_mcs[TB1], UE_template.dl_buffer_total, N_RB_DL, step_size); } } } @@ -182,7 +195,7 @@ void dlsch_scheduler_pre_ue_select_fairRR( UE_sched_ctrl *ue_sched_ctl; uint8_t CC_id; int UE_id; - unsigned char round = 0; + //unsigned char round = 0; unsigned char harq_pid = 0; rnti_t rnti; uint16_t i; @@ -193,6 +206,8 @@ void dlsch_scheduler_pre_ue_select_fairRR( uint16_t dlsch_ue_max_num[MAX_NUM_CCs] = {0}; uint16_t saved_dlsch_dci[MAX_NUM_CCs] = {0}; uint8_t end_flag[MAX_NUM_CCs] = {0}; + unsigned char round_1 = 0; + unsigned char round_2 = 0; // Initialization for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) { @@ -227,9 +242,10 @@ void dlsch_scheduler_pre_ue_select_fairRR( ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; harq_pid = frame_subframe2_dl_harq_pid(cc[CC_id].tdd_Config,frameP,subframeP); - round = ue_sched_ctl->round[CC_id][harq_pid]; + round_1 = ue_sched_ctl->round[CC_id][harq_pid][TB1]; + round_2 = ue_sched_ctl->round[CC_id][harq_pid][TB2]; - if (round != 8) { // retransmission + if ((round_1 != 8) || (round_2 != 8)) { // retransmission if(UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] == 0) { continue; } @@ -277,6 +293,12 @@ void dlsch_scheduler_pre_ue_select_fairRR( dlsch_ue_select[CC_id].list[dlsch_ue_select[CC_id].ue_num].nb_rb = nb_rbs_required[CC_id][UE_id]; dlsch_ue_select[CC_id].ue_num++; + if (round_1 != 8) { + ue_sched_ctl->select_tb[CC_id][harq_pid] = TB1; + }else if(round_2 != 8){ + ue_sched_ctl->select_tb[CC_id][harq_pid] = TB2; + } + if (dlsch_ue_select[CC_id].ue_num == dlsch_ue_max_num[CC_id]) { end_flag[CC_id] = 1; break; @@ -352,9 +374,10 @@ void dlsch_scheduler_pre_ue_select_fairRR( continue; harq_pid = frame_subframe2_dl_harq_pid(cc[CC_id].tdd_Config,frameP,subframeP); - round = ue_sched_ctl->round[CC_id][harq_pid]; + round_1 = ue_sched_ctl->round[CC_id][harq_pid][TB1]; + round_2 = ue_sched_ctl->round[CC_id][harq_pid][TB2]; - if (round == 8) { + if ((round_1 == 8) && (round_2 == 8)){ if (nb_rbs_required[CC_id][UE_id] == 0) { continue; } @@ -401,6 +424,8 @@ void dlsch_scheduler_pre_ue_select_fairRR( dlsch_ue_select[CC_id].list[dlsch_ue_select[CC_id].ue_num].rnti = rnti; dlsch_ue_select[CC_id].ue_num++; + ue_sched_ctl->select_tb[CC_id][harq_pid] = TB1; + if (dlsch_ue_select[CC_id].ue_num == dlsch_ue_max_num[CC_id]) { end_flag[CC_id] = 1; break; @@ -476,9 +501,10 @@ void dlsch_scheduler_pre_ue_select_fairRR( continue; harq_pid = frame_subframe2_dl_harq_pid(cc[CC_id].tdd_Config,frameP,subframeP); - round = ue_sched_ctl->round[CC_id][harq_pid]; + round_1 = ue_sched_ctl->round[CC_id][harq_pid][TB1]; + round_2 = ue_sched_ctl->round[CC_id][harq_pid][TB2]; - if (round == 8) { + if ((round_1 == 8) && (round_2 == 8)){ if (nb_rbs_required[CC_id][UE_id] == 0) { continue; } @@ -525,6 +551,8 @@ void dlsch_scheduler_pre_ue_select_fairRR( dlsch_ue_select[CC_id].list[dlsch_ue_select[CC_id].ue_num].rnti = rnti; dlsch_ue_select[CC_id].ue_num++; + ue_sched_ctl->select_tb[CC_id][harq_pid] = TB1; + if (dlsch_ue_select[CC_id].ue_num == dlsch_ue_max_num[CC_id]) { end_flag[CC_id] = 1; break; @@ -600,7 +628,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, #endif memset(rballoc_sub[0],0,(MAX_NUM_CCs)*(N_RBG_MAX)*sizeof(unsigned char)); memset(min_rb_unit,0,sizeof(min_rb_unit)); - + memset(MIMO_mode_indicator[0], 0, MAX_NUM_CCs*N_RBG_MAX*sizeof(unsigned char)); for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) { if (mbsfn_flag[CC_id] > 0) // If this CC is allocated for MBSFN skip it here continue; @@ -668,7 +696,7 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id, rnti = dlsch_ue_select[CC_id].list[i].rnti; ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP); - Round = ue_sched_ctl->round[CC_id][harq_pid]; + Round = ue_sched_ctl->round[CC_id][harq_pid][TB1]; //if (mac_eNB_get_rrc_status(Mod_id, rnti) < RRC_RECONFIGURED || round > 0) { if (mac_eNB_get_rrc_status(Mod_id, rnti) < RRC_RECONFIGURED || Round != 8) { // FIXME @@ -809,8 +837,8 @@ schedule_ue_spec_fairRR(module_id_t module_idP, int UE_id; // unsigned char aggregation; mac_rlc_status_resp_t rlc_status; - unsigned char header_len_dcch = 0, header_len_dcch_tmp = 0; - unsigned char header_len_dtch = 0, header_len_dtch_tmp = 0, header_len_dtch_last = 0; + int header_length_last = 0; + int header_length_total = 0; unsigned char ta_len = 0; unsigned char sdu_lcids[NB_RB_MAX], lcid, offset, num_sdus = 0; uint16_t nb_rb, nb_rb_temp, nb_available_rb; @@ -839,6 +867,9 @@ schedule_ue_spec_fairRR(module_id_t module_idP, nfapi_dl_config_request_pdu_t *dl_config_pdu; int tdd_sfa; int ta_update; + uint8_t select_tb; + uint8_t oppose_tb; + uint8_t first_TB_pdu_create_flg = 0; #ifdef DEBUG_eNB_SCHEDULER int k; #endif @@ -994,17 +1025,21 @@ schedule_ue_spec_fairRR(module_id_t module_idP, nb_available_rb = ue_sched_ctl->pre_nb_available_rbs[CC_id]; harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP); - round = ue_sched_ctl->round[CC_id][harq_pid]; + select_tb = ue_sched_ctl->select_tb[CC_id][harq_pid]; + oppose_tb = select_tb ^ 0x1; + round = ue_sched_ctl->round[CC_id][harq_pid][select_tb]; UE_list->eNB_UE_stats[CC_id][UE_id].crnti = rnti; UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status = mac_eNB_get_rrc_status(module_idP, rnti); UE_list->eNB_UE_stats[CC_id][UE_id].harq_pid = harq_pid; - UE_list->eNB_UE_stats[CC_id][UE_id].harq_round = round; + UE_list->eNB_UE_stats[CC_id][UE_id].harq_round[TB1] = ue_sched_ctl->round[CC_id][harq_pid][TB1]; + UE_list->eNB_UE_stats[CC_id][UE_id].harq_round[TB2] = ue_sched_ctl->round[CC_id][harq_pid][TB2]; if (UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status < RRC_CONNECTED) continue; + header_length_total = 0; sdu_length_total = 0; num_sdus = 0; @@ -1013,12 +1048,15 @@ schedule_ue_spec_fairRR(module_id_t module_idP, eNB_UE_stats->dl_cqi, MIN_CQI_VALUE, MAX_CQI_VALUE); */ if (NFAPI_MODE != NFAPI_MONOLITHIC) { - eNB_UE_stats->dlsch_mcs1 = 10;//cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]]; + eNB_UE_stats->dlsch_mcs[TB1] = cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]]; + eNB_UE_stats->dlsch_mcs[TB2] = cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]]; } else { - eNB_UE_stats->dlsch_mcs1 = cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]]; + eNB_UE_stats->dlsch_mcs[TB1] = cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]]; + eNB_UE_stats->dlsch_mcs[TB2] = cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]]; } - //eNB_UE_stats->dlsch_mcs1 = cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs); + eNB_UE_stats->dlsch_mcs[TB1] = eNB_UE_stats->dlsch_mcs[TB1]; //cmin(eNB_UE_stats->dlsch_mcs[TB1], openair_daq_vars.target_ue_dl_mcs); + eNB_UE_stats->dlsch_mcs[TB2] = eNB_UE_stats->dlsch_mcs[TB2]; // store stats //UE_list->eNB_UE_stats[CC_id][UE_id].dl_cqi= eNB_UE_stats->dl_cqi; @@ -1034,7 +1072,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP, "[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n", module_idP, frameP, UE_id, CC_id, rnti, harq_pid, round, nb_available_rb, ue_sched_ctl->dl_cqi[CC_id], - eNB_UE_stats->dlsch_mcs1, + eNB_UE_stats->dlsch_mcs[TB1], UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status); /* process retransmission */ @@ -1044,7 +1082,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP, nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid]; TBS = get_TBS_DL(UE_list-> - UE_template[CC_id][UE_id].oldmcs1[harq_pid], + UE_template[CC_id][UE_id].oldmcs[harq_pid][select_tb], nb_rb); if (nb_rb <= nb_available_rb) { @@ -1109,7 +1147,519 @@ schedule_ue_spec_fairRR(module_id_t module_idP, } */ + //TBS set + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[select_tb] = TBS; + + //CW_num set + ue_sched_ctl->cw_num[CC_id][harq_pid] = SINGLE_CW; + + //swap_flg set + ue_sched_ctl->swap_flag[CC_id][harq_pid] = select_tb; + + if(ue_sched_ctl->aperiodic_ri_received[CC_id] == MULTI_RI){ + //2nd TB start + + if(ue_sched_ctl->round[CC_id][harq_pid][oppose_tb] == 8){ + //new SDU + sdu_length_total = 0; + num_sdus = 0; + rlc_status.bytes_in_buffer = 0; + + //GET TBS + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[oppose_tb], nb_rb); + + if (ue_sched_ctl->ta_timer == 0) { + ta_update = ue_sched_ctl->ta_update; + + /* if we send TA then set timer to not send it for a while */ + if (ta_update != 31) + ue_sched_ctl->ta_timer = 20; + + /* reset ta_update */ + ue_sched_ctl->ta_update = 31; + } else { + ta_update = 31; + } + + ta_len = (ta_update != 31) ? 2 : 0; + + if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { + rlc_status = mac_rlc_status_ind( module_idP, + rnti, + module_idP, + frameP, + subframeP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + DCCH, + (TBS - ta_len - header_length_total - sdu_length_total - 3) +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); // transport block set size + + sdu_lengths[0] = 0; + + if (rlc_status.bytes_in_buffer > 0) { // There is DCCH to transmit + sdu_lengths[0] = mac_rlc_data_req( module_idP, + rnti, + module_idP, + frameP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + DCCH, + TBS, + (char *) &dlsch_buffer[0] +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); + + sdu_length_total = sdu_lengths[0]; + sdu_lcids[0] = DCCH; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[0] = DCCH; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH] = sdu_lengths[0]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH] += sdu_lengths[0]; + num_sdus = 1; + header_length_last = 1 + 1 + (sdu_lengths[0] >= 128); + header_length_total += header_length_last; + } + } + + // check for DCCH1 and update header information (assume 2 byte sub-header) + if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { + rlc_status = mac_rlc_status_ind( module_idP, + rnti, + module_idP, + frameP, + subframeP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + DCCH + 1, + (TBS - ta_len - header_length_total - sdu_length_total - 3) +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); // transport block set size less allocations for timing advance and + + // DCCH SDU + sdu_lengths[num_sdus] = 0; + + if (rlc_status.bytes_in_buffer > 0) { + sdu_lengths[num_sdus] += mac_rlc_data_req( module_idP, + rnti, + module_idP, + frameP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + DCCH + 1, + TBS, + (char *)&dlsch_buffer[sdu_length_total] +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); + + sdu_lcids[num_sdus] = DCCH1; + sdu_length_total += sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = DCCH1; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH1] = sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH1] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus]; + header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128); + header_length_total += header_length_last; + num_sdus++; + } + } + + // lcid has to be sorted before the actual allocation (similar struct as ue_list). + /* TODO limited lcid for performance */ + for (lcid = DTCH; lcid >= DTCH; lcid--) { + // TBD: check if the lcid is active + + if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { // NN: > 2 ? + rlc_status = mac_rlc_status_ind( module_idP, + rnti, + module_idP, + frameP, + subframeP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + lcid, + TBS - ta_len - header_length_total - sdu_length_total - 3 +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); + + if (rlc_status.bytes_in_buffer > 0) { + sdu_lengths[num_sdus] = mac_rlc_data_req( module_idP, + rnti, + module_idP, + frameP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + lcid, + TBS, + (char*)&dlsch_buffer[sdu_length_total] +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); + + sdu_lcids[num_sdus] = lcid; + sdu_length_total += sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = lcid; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[lcid] = sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[lcid] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[lcid] += sdu_lengths[num_sdus]; + + header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128); + header_length_total += header_length_last; + num_sdus++; + UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0; + } // no data for this LCID + } // no TBS left + else { + break; + } + } + /* last header does not have length field */ + if (header_length_total) { + header_length_total -= header_length_last; + header_length_total++; + } + // there is at least one SDU + // if (num_sdus > 0 ){ + if (ta_len + sdu_length_total + header_length_total > 0) { + // Now compute number of required RBs for total sdu length + // Assume RAH format 2 + + //GET 2nd TB MCS + mcs = eNB_UE_stats->dlsch_mcs[oppose_tb]; + + // decrease mcs until TBS falls below required length + while ( (TBS > (sdu_length_total + header_length_total + ta_len)) + && (mcs > 0)) { + mcs--; + TBS = get_TBS_DL(mcs, nb_rb); + } + + // if we have decreased too much or we don't have enough RBs, increase MCS + while ( (TBS < (sdu_length_total + header_length_total + ta_len)) + && ( ((ue_sched_ctl->dl_pow_off[CC_id] > 0) && (mcs < 28)) + ||((ue_sched_ctl->dl_pow_off[CC_id] == 0) && (mcs <= 15)))) { + mcs++; + TBS = get_TBS_DL(mcs, nb_rb); + } + + if ((TBS - header_length_total - sdu_length_total - ta_len) <= 2) { + padding = (TBS - header_length_total - sdu_length_total - ta_len); + post_padding = 0; + } else { + padding = 0; + + post_padding = 1; + } + + offset = generate_dlsch_header( (unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][oppose_tb], + num_sdus, + sdu_lengths, // + sdu_lcids, + 255, // no drx + ta_update, // timing advance + NULL, // contention res id + padding, + post_padding + ); + + // cycle through SDUs and place in dlsch_buffer + memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][oppose_tb][offset],dlsch_buffer,sdu_length_total); + // memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]); + + // fill remainder of DLSCH with random data + for (j=0; j<(TBS-sdu_length_total-offset); j++) { + UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][oppose_tb][offset+sdu_length_total+j] = (char)(taus()&0xff); + } + + // store stats + eNB->eNB_stats[CC_id].dlsch_bytes_tx += sdu_length_total; + eNB->eNB_stats[CC_id].dlsch_pdus_tx += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used = nb_rb; + UE_list->eNB_UE_stats[CC_id][UE_id].num_mac_sdu_tx = num_sdus; + UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used += nb_rb; + UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[oppose_tb] = mcs; + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[oppose_tb] = TBS; + UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes[oppose_tb] = TBS- sdu_length_total; + UE_list->eNB_UE_stats[CC_id][UE_id].total_sdu_bytes += sdu_length_total; + UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes += TBS; + UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus +=1; + + //CW_num set + ue_sched_ctl->cw_num[CC_id][harq_pid] = MULTI_CW; + + //swap_flg set + ue_sched_ctl->swap_flag[CC_id][harq_pid] = 0; + } + }else{ + //RETX + + //GET TBS + TBS = get_TBS_DL(UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][oppose_tb], nb_rb); + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[oppose_tb] = TBS; + + //CW_num set + ue_sched_ctl->cw_num[CC_id][harq_pid] = MULTI_CW; + + //swap_flg set + ue_sched_ctl->swap_flag[CC_id][harq_pid] = 0; + } + //2nd TB end + } + switch (get_tmode(module_idP, CC_id, UE_id)) { + case 3: + + if((UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][select_tb] == 0) && ((UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][select_tb] & 3) == 1)){ + UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][select_tb]++; + } + + if((UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][oppose_tb] == 0) && ((UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][oppose_tb] & 3) == 1)){ + UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][oppose_tb]++; + } + + dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; + + memset((void *) dl_config_pdu, 0, sizeof(nfapi_dl_config_request_pdu_t)); + + dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; + dl_config_pdu->pdu_size = (uint8_t) (2 + sizeof(nfapi_dl_config_dci_dl_pdu)); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_2A; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = get_aggregation( get_bw_index(module_idP, CC_id), + ue_sched_ctl->dl_cqi[CC_id], + format2A + ); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = 1; // dont adjust power when retransmitting + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.precoding_information = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transport_block_to_codeword_swap_flag = 0; + + if(ue_sched_ctl->cw_num[CC_id][harq_pid] == MULTI_CW){ + if(select_tb == TB1){ + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][TB1] & 3; + + if(ue_sched_ctl->round[CC_id][harq_pid][oppose_tb] == 8){ + //2nd TB new SDU + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_2 = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 0; + }else{ + //2nd TB RETX + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_2 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][TB2] & 3; + } + }else{ + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_2 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][TB2] & 3; + + if(ue_sched_ctl->round[CC_id][harq_pid][oppose_tb] == 8){ + //2nd TB new SDU + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0; + }else{ + //2nd TB RETX + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][TB1] & 3; + } + } + }else{ + if(select_tb == TB1){ + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][TB1] & 3; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_2 = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1; + }else{ + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 1; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_2 = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][TB2] & 3; + } + } + + if (cc[CC_id].tdd_Config != NULL) { //TDD + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (UE_list->UE_template[CC_id][UE_id].DAI - 1) & 3; + + LOG_D(MAC, + "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, dai %d, mcs %d\n", + module_idP, CC_id, harq_pid, round, + (UE_list->UE_template[CC_id][UE_id].DAI - 1), + UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1] + ); + } else { + LOG_D(MAC, + "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n", + module_idP, CC_id, harq_pid, round, + UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1]); + } + + if (!CCE_allocation_infeasible(module_idP, CC_id, 1, subframeP, dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level, rnti)) { + //SET 1st TB + + dl_req->number_dci++; + dl_req->number_pdu++; + dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; + + eNB->DL_req[CC_id].sfn_sf = frameP<<4 | subframeP; + eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST; + + fill_nfapi_dlsch_config( eNB, + dl_req, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[select_tb], + -1, /* retransmission, no pdu_index */ + rnti, + 0, // type 0 allocation from 7.1.6 in 36.213 + 0, // virtual_resource_block_assignment_flag, unused here + 0, // resource_block_coding, to be filled in later + getQm(UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][select_tb]), + UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][select_tb] & 3, // redundancy version + (select_tb + 1), // transport blocks + ue_sched_ctl->swap_flag[CC_id][harq_pid], // transport block to codeword swap flag + ue_sched_ctl->cw_num[CC_id][harq_pid] == SINGLE_CW ? TX_DIVERSITY : LARGE_DELAY_CDD, // transmission_scheme + 2, // number of layers + 1, // number of subbands + // uint8_t codebook_index, + 4, // UE category capacity + UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, + 0, // delta_power_offset for TM5 + 0, // ngap + 0, // nprb + 3, // transmission mode + 0, //number of PRBs treated as one subband, not used here + 0 // number of beamforming vectors, not used here + ); + + //SET 2nd TB + if(ue_sched_ctl->cw_num[CC_id][harq_pid] == MULTI_CW){ + if(ue_sched_ctl->round[CC_id][harq_pid][oppose_tb] == 8){ + //new_SDU + + ue_sched_ctl->round[CC_id][harq_pid][oppose_tb] = 0; + + dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; + + UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][oppose_tb] = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][oppose_tb]; + UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][oppose_tb] = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[oppose_tb]; + + fill_nfapi_dlsch_config( eNB, + dl_req, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[oppose_tb], + eNB->pdu_index[CC_id], + rnti, + 0, // type 0 allocation from 7.1.6 in 36.213 + 0, // virtual_resource_block_assignment_flag, unused here + 0, // resource_block_coding, to be filled in later + getQm(UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[oppose_tb]), + 0, // redundancy version + (oppose_tb + 1), // transport blocks + 0, // transport block to codeword swap flag + ue_sched_ctl->cw_num[CC_id][harq_pid] == SINGLE_CW ? TX_DIVERSITY : LARGE_DELAY_CDD, // transmission_scheme + 2, // number of layers + 1, // number of subbands + // uint8_t codebook_index, + 4, // UE category capacity + UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, + 0, // delta_power_offset for TM5 + 0, // ngap + 0, // nprb + 3, // transmission mode + 0, //number of PRBs treated as one subband, not used here + 0 // number of beamforming vectors, not used here + ); + + eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req( &eNB->TX_req[CC_id].tx_request_body, + (frameP*10)+subframeP, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[oppose_tb], + eNB->pdu_index[CC_id], + eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0][oppose_tb] + ); + + LOG_D(MAC,"Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",eNB->pdu_index[CC_id]); + + eNB->pdu_index[CC_id]++; + } + else{ + //RETX + + fill_nfapi_dlsch_config( eNB, + dl_req, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[oppose_tb], + -1, /* retransmission, no pdu_index */ + rnti, + 0, // type 0 allocation from 7.1.6 in 36.213 + 0, // virtual_resource_block_assignment_flag, unused here + 0, // resource_block_coding, to be filled in later + getQm(UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][oppose_tb]), + UE_list->UE_sched_ctrl[UE_id].rsn[CC_id][harq_pid][oppose_tb] & 3, // redundancy version + (oppose_tb + 1), // transport blocks + 0, // transport block to codeword swap flag + ue_sched_ctl->cw_num[CC_id][harq_pid] == SINGLE_CW ? TX_DIVERSITY : LARGE_DELAY_CDD, // transmission_scheme + 2, // number of layers + 1, // number of subbands + // uint8_t codebook_index, + 4, // UE category capacity + UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, + 0, // delta_power_offset for TM5 + 0, // ngap + 0, // nprb + 3, // transmission mode + 0, //number of PRBs treated as one subband, not used here + 0 // number of beamforming vectors, not used here + ); + } + } + + LOG_D(MAC, + "Filled NFAPI configuration for DCI/DLSCH %d, retransmission round %d\n", + eNB->pdu_index[CC_id], round); + + program_dlsch_acknak( module_idP, + CC_id, + UE_id, + frameP, + subframeP, + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx + ); + + // No TX request for retransmission (check if null request for FAPI) + } else { + LOG_W( MAC, + "Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d\%x, infeasible CCE allocation\n", + frameP, subframeP, UE_id, rnti); + } + + break; + case 1: case 2: case 7: @@ -1144,10 +1694,10 @@ schedule_ue_spec_fairRR(module_id_t module_idP, dl_config_pdu->dci_dl_pdu. dci_dl_pdu_rel8.new_data_indicator_1 = UE_list->UE_template[CC_id][UE_id]. - oldNDI[harq_pid]; + oldNDI[harq_pid][TB1]; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->UE_template[CC_id][UE_id]. - oldmcs1[harq_pid]; + oldmcs[harq_pid][TB1]; dl_config_pdu->dci_dl_pdu. dci_dl_pdu_rel8.redundancy_version_1 = round & 3; @@ -1163,15 +1713,15 @@ schedule_ue_spec_fairRR(module_id_t module_idP, (UE_list->UE_template[CC_id][UE_id].DAI - 1), UE_list-> - UE_template[CC_id][UE_id].oldmcs1 - [harq_pid]); + UE_template[CC_id][UE_id].oldmcs + [harq_pid][TB1]); } else { LOG_D(MAC, "[eNB %d] Retransmission CC_id %d : harq_pid %d, round %d, mcs %d\n", module_idP, CC_id, harq_pid, round, UE_list-> - UE_template[CC_id][UE_id].oldmcs1 - [harq_pid]); + UE_template[CC_id][UE_id].oldmcs + [harq_pid][TB1]); } if (!CCE_allocation_infeasible @@ -1188,7 +1738,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP, , rnti, 0, // type 0 allocation from 7.1.6 in 36.213 0, // virtual_resource_block_assignment_flag, unused here 0, // resource_block_coding, to be filled in later - getQm(UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid]), round & 3, // redundancy version + getQm(UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1]), round & 3, // redundancy version 1, // transport blocks 0, // transport block to codeword swap flag cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme @@ -1232,21 +1782,25 @@ schedule_ue_spec_fairRR(module_id_t module_idP, nb_rb; UE_list->eNB_UE_stats[CC_id][UE_id]. total_rbs_used_retx += nb_rb; - UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1 = - eNB_UE_stats->dlsch_mcs1; - UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2 = - eNB_UE_stats->dlsch_mcs1; + UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB1] = + eNB_UE_stats->dlsch_mcs[TB1]; + UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB2] = + eNB_UE_stats->dlsch_mcs[TB2]; } else { LOG_D(MAC, "[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n", module_idP, frameP, CC_id, UE_id); } } else { /* This is a potentially new SDU opportunity */ + // 1st TB + + ue_sched_ctl->swap_flag[CC_id][harq_pid] = 0; + rlc_status.bytes_in_buffer = 0; // Now check RLC information to compute number of required RBs // get maximum TBS size for RLC request TBS = - get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_available_rb); + get_TBS_DL(eNB_UE_stats->dlsch_mcs[select_tb], nb_available_rb); // check first for RLC data on DCCH // add the length for all the control elements (timing adv, drx, etc) : header + payload @@ -1264,10 +1818,10 @@ schedule_ue_spec_fairRR(module_id_t module_idP, } ta_len = (ta_update != 31) ? 2 : 0; - header_len_dcch = 2; // 2 bytes DCCH SDU subheader - if (TBS - ta_len - header_len_dcch > 0) { - rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH, (TBS - ta_len - header_len_dcch) + // RLC data on DCCH + if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { + rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH, (TBS - ta_len - header_length_total - sdu_length_total - 3) #if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) ,0, 0 #endif @@ -1277,8 +1831,8 @@ schedule_ue_spec_fairRR(module_id_t module_idP, if (rlc_status.bytes_in_buffer > 0) { // There is DCCH to transmit LOG_D(MAC, "[eNB %d] SFN/SF %d.%d, DL-DCCH->DLSCH CC_id %d, Requesting %d bytes from RLC (RRC message)\n", - module_idP, frameP, subframeP, CC_id, - TBS - header_len_dcch); + module_idP, frameP, subframeP, CC_id, TBS - ta_len - header_length_total - sdu_length_total - 3); + sdu_lengths[0] = mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH, TBS, //not used (char *) &dlsch_buffer @@ -1350,14 +1904,18 @@ schedule_ue_spec_fairRR(module_id_t module_idP, LOG_D(MAC, "[eNB %d][DCCH] CC_id %d frame %d subframe %d UE_id %d/%x Got %d bytes bytes_in_buffer %d from release_num %d\n", module_idP, CC_id, frameP, subframeP, UE_id, rnti, sdu_lengths[0],rlc_status.bytes_in_buffer,rrc_release_info.num_UEs); + if(sdu_lengths[0] > 0){ sdu_length_total = sdu_lengths[0]; sdu_lcids[0] = DCCH; - UE_list->eNB_UE_stats[CC_id][UE_id]. - num_pdu_tx[DCCH] += 1; - UE_list-> - eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH] - += sdu_lengths[0]; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[0] = DCCH; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH] = sdu_lengths[0]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH] += sdu_lengths[0]; + + header_length_last = 1 + 1 + (sdu_lengths[0] >= 128); + header_length_total += header_length_last; num_sdus = 1; + } #ifdef DEBUG_eNB_SCHEDULER LOG_T(MAC, "[eNB %d][DCCH] CC_id %d Got %d bytes :", @@ -1369,15 +1927,12 @@ schedule_ue_spec_fairRR(module_id_t module_idP, LOG_T(MAC, "\n"); #endif - } else { - header_len_dcch = 0; - sdu_length_total = 0; } } // check for DCCH1 and update header information (assume 2 byte sub-header) - if (TBS - ta_len - header_len_dcch - sdu_length_total > 0) { - rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1, (TBS - ta_len - header_len_dcch - sdu_length_total) + if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { + rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, frameP, subframeP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1, (TBS - ta_len - header_length_total - sdu_length_total - 3) #if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) ,0, 0 #endif @@ -1388,12 +1943,9 @@ schedule_ue_spec_fairRR(module_id_t module_idP, if (rlc_status.bytes_in_buffer > 0) { LOG_D(MAC, "[eNB %d], Frame %d, DCCH1->DLSCH, CC_id %d, Requesting %d bytes from RLC (RRC message)\n", - module_idP, frameP, CC_id, - TBS - header_len_dcch - sdu_length_total); + module_idP, frameP, CC_id, TBS - ta_len - header_length_total - sdu_length_total - 3); sdu_lengths[num_sdus] += mac_rlc_data_req(module_idP, rnti, module_idP, frameP, ENB_FLAG_YES, MBMS_FLAG_NO, DCCH + 1, TBS, //not used - (char *) - &dlsch_buffer - [sdu_length_total] + (char *)&dlsch_buffer[sdu_length_total] #if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) ,0, 0 #endif @@ -1404,12 +1956,12 @@ schedule_ue_spec_fairRR(module_id_t module_idP, T_INT(DCCH + 1), T_INT(sdu_lengths[num_sdus])); sdu_lcids[num_sdus] = DCCH1; sdu_length_total += sdu_lengths[num_sdus]; - header_len_dcch += 2; - UE_list->eNB_UE_stats[CC_id][UE_id]. - num_pdu_tx[DCCH1] += 1; - UE_list-> - eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] - += sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = DCCH1; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH1] = sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH1] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus]; + header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128); + header_length_total += header_length_last; num_sdus++; #ifdef DEBUG_eNB_SCHEDULER LOG_T(MAC, @@ -1425,24 +1977,16 @@ schedule_ue_spec_fairRR(module_id_t module_idP, } } - // assume the max dtch header size, and adjust it later - header_len_dtch = 0; - header_len_dtch_last = 0; // the header length of the last mac sdu // lcid has to be sorted before the actual allocation (similar struct as ue_list). /* TODO limited lcid for performance */ for (lcid = DTCH; lcid >= DTCH; lcid--) { // TBD: check if the lcid is active - header_len_dtch += 3; - header_len_dtch_last = 3; + LOG_D(MAC, "[eNB %d], Frame %d, DTCH%d->DLSCH, Checking RLC status (tbs %d, len %d)\n", - module_idP, - frameP, - lcid, - TBS, - TBS - ta_len - header_len_dcch - sdu_length_total - header_len_dtch); + module_idP, frameP, lcid, TBS, TBS - ta_len - header_length_total - sdu_length_total - 3); - if (TBS - ta_len - header_len_dcch - sdu_length_total - header_len_dtch > 0) { // NN: > 2 ? + if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { rlc_status = mac_rlc_status_ind(module_idP, rnti, module_idP, @@ -1451,7 +1995,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP, ENB_FLAG_YES, MBMS_FLAG_NO, lcid, - TBS - ta_len - header_len_dcch - sdu_length_total - header_len_dtch + TBS - ta_len - header_length_total - sdu_length_total - 3 #if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) , 0, 0 #endif @@ -1459,11 +2003,8 @@ schedule_ue_spec_fairRR(module_id_t module_idP, if (rlc_status.bytes_in_buffer > 0) { LOG_D(MAC,"[eNB %d][USER-PLANE DEFAULT DRB] Frame %d : DTCH->DLSCH, Requesting %d bytes from RLC (lcid %d total hdr len %d)\n", - module_idP, - frameP, - TBS - header_len_dcch - sdu_length_total - header_len_dtch, - lcid, - header_len_dtch); + module_idP, frameP, TBS - ta_len - header_length_total - sdu_length_total - 3, lcid, header_length_total); + sdu_lengths[num_sdus] = mac_rlc_data_req(module_idP, rnti, module_idP, @@ -1493,69 +2034,53 @@ schedule_ue_spec_fairRR(module_id_t module_idP, sdu_lcids[num_sdus] = lcid; sdu_length_total += sdu_lengths[num_sdus]; UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[lcid] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = lcid; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[lcid] = sdu_lengths[num_sdus]; UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[lcid] += sdu_lengths[num_sdus]; - if (sdu_lengths[num_sdus] < 128) { - header_len_dtch--; - header_len_dtch_last--; - } - + header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128); + header_length_total += header_length_last; num_sdus++; UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0; - } else { // no data for this LCID - header_len_dtch -= 3; } - } else { // no TBS left - header_len_dtch -= 3; + } else { + // no TBS left break; } } - if (header_len_dtch == 0) - header_len_dtch_last = 0; + /* last header does not have length field */ + if (header_length_total) { + header_length_total -= header_length_last; + header_length_total++; + } // there is at least one SDU // if (num_sdus > 0 ){ - if ((sdu_length_total + header_len_dcch + - header_len_dtch) > 0) { + if (ta_len + sdu_length_total + header_length_total > 0) { // Now compute number of required RBs for total sdu length // Assume RAH format 2 - // adjust header lengths - header_len_dcch_tmp = header_len_dcch; - header_len_dtch_tmp = header_len_dtch; - - if (header_len_dtch == 0) { - header_len_dcch = (header_len_dcch > 0) ? 1 : 0; //header_len_dcch; // remove length field - } else { - header_len_dtch_last -= 1; // now use it to find how many bytes has to be removed for the last MAC SDU - header_len_dtch = (header_len_dtch > 0) ? header_len_dtch - header_len_dtch_last : header_len_dtch; // remove length field for the last SDU - } - mcs = eNB_UE_stats->dlsch_mcs1; + mcs = eNB_UE_stats->dlsch_mcs[TB1]; nb_rb = min_rb_unit[CC_id]; TBS = get_TBS_DL(mcs, nb_rb); - while (TBS < - (sdu_length_total + header_len_dcch + - header_len_dtch + ta_len)) { + while (TBS < sdu_length_total + header_length_total + ta_len) { nb_rb += min_rb_unit[CC_id]; // if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs // (can happen if N_RB_DL is odd) - TBS = - get_TBS_DL(eNB_UE_stats->dlsch_mcs1, - nb_available_rb); + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[TB1], nb_available_rb); nb_rb = nb_available_rb; break; } - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rb); + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[TB1], nb_rb); } if (nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) { for (j = 0; j < N_RBG[CC_id]; j++) { // for indicating the rballoc for each sub-band - UE_list->UE_template[CC_id][UE_id]. - rballoc_subband[harq_pid][j] = + UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; } } else { @@ -1565,17 +2090,14 @@ schedule_ue_spec_fairRR(module_id_t module_idP, while ((nb_rb_temp > 0) && (j < N_RBG[CC_id])) { if (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) { - UE_list-> - UE_template[CC_id] - [UE_id].rballoc_subband[harq_pid][j] = + UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j]; if ((j == N_RBG[CC_id] - 1) && ((N_RB_DL[CC_id] == 25) || (N_RB_DL[CC_id] == 50))) { nb_rb_temp = - nb_rb_temp - min_rb_unit[CC_id] + - 1; + nb_rb_temp - min_rb_unit[CC_id] + 1; } else { nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]; @@ -1587,28 +2109,24 @@ schedule_ue_spec_fairRR(module_id_t module_idP, } // decrease mcs until TBS falls below required length - while ((TBS > - (sdu_length_total + header_len_dcch + - header_len_dtch + ta_len)) && (mcs > 0)) { + while ((TBS > sdu_length_total + header_length_total + ta_len) && (mcs > 0)) { mcs--; TBS = get_TBS_DL(mcs, nb_rb); } // if we have decreased too much or we don't have enough RBs, increase MCS - while ((TBS < - (sdu_length_total + header_len_dcch + - header_len_dtch + ta_len)) - && (((ue_sched_ctl->dl_pow_off[CC_id] > 0) - && (mcs < 28)) - || ((ue_sched_ctl->dl_pow_off[CC_id] == 0) - && (mcs <= 15)))) { + while ((TBS < sdu_length_total + header_length_total + ta_len) + && (((ue_sched_ctl->dl_pow_off[CC_id] > 0) + && (mcs < 28)) + || ((ue_sched_ctl->dl_pow_off[CC_id] == 0) + && (mcs <= 15)))) { mcs++; TBS = get_TBS_DL(mcs, nb_rb); } LOG_D(MAC, "dlsch_mcs before and after the rate matching = (%d, %d)\n", - eNB_UE_stats->dlsch_mcs1, mcs); + eNB_UE_stats->dlsch_mcs[TB1], mcs); #ifdef DEBUG_eNB_SCHEDULER LOG_D(MAC, "[eNB %d] CC_id %d Generated DLSCH header (mcs %d, TBS %d, nb_rb %d)\n", @@ -1617,24 +2135,13 @@ schedule_ue_spec_fairRR(module_id_t module_idP, // TBS, sdu_length_total, offset, TBS-sdu_length_total-offset); #endif - if ((TBS - header_len_dcch - header_len_dtch - - sdu_length_total - ta_len) <= 2) { - padding = - (TBS - header_len_dcch - header_len_dtch - - sdu_length_total - ta_len); - post_padding = 0; - } else { - padding = 0; - - // adjust the header len - if (header_len_dtch == 0) { - header_len_dcch = header_len_dcch_tmp; - } else { //if (( header_len_dcch==0)&&((header_len_dtch==1)||(header_len_dtch==2))) - header_len_dtch = header_len_dtch_tmp; - } - - post_padding = TBS - sdu_length_total - header_len_dcch - header_len_dtch - ta_len; // 1 is for the postpadding header - } + if (TBS - header_length_total - sdu_length_total - ta_len <= 2) { + padding = TBS - header_length_total - sdu_length_total - ta_len; + post_padding = 0; + } else { + padding = 0; + post_padding = 1; + } #ifdef PHY_TX_THREAD struct timespec time_req, time_rem; @@ -1647,7 +2154,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP, } #endif - offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], num_sdus, //num_sdus + offset = generate_dlsch_header((unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][select_tb], num_sdus, //num_sdus sdu_lengths, // sdu_lcids, 255, // no drx ta_update, // timing advance @@ -1657,12 +2164,12 @@ schedule_ue_spec_fairRR(module_id_t module_idP, //#ifdef DEBUG_eNB_SCHEDULER if (ta_update != 31) { LOG_D(MAC, - "[eNB %d][DLSCH] Frame %d Generate header for UE_id %d on CC_id %d: sdu_length_total %d, num_sdus %d, sdu_lengths[0] %d, sdu_lcids[0] %d => payload offset %d,timing advance value : %d, padding %d,post_padding %d,(mcs %d, TBS %d, nb_rb %d),header_dcch %d, header_dtch %d\n", + "[eNB %d][DLSCH] Frame %d Generate header for UE_id %d on CC_id %d: sdu_length_total %d, num_sdus %d, sdu_lengths[0] %d, sdu_lcids[0] %d => payload offset %d,timing advance value : %d, padding %d,post_padding %d,(mcs %d, TBS %d, nb_rb %d),header_length %d\n", module_idP, frameP, UE_id, CC_id, sdu_length_total, num_sdus, sdu_lengths[0], sdu_lcids[0], offset, ta_update, padding, post_padding, mcs, TBS, nb_rb, - header_len_dcch, header_len_dtch); + header_length_total); } //#endif @@ -1676,16 +2183,16 @@ schedule_ue_spec_fairRR(module_id_t module_idP, LOG_T(MAC, "\n"); #endif // cycle through SDUs and place in dlsch_buffer - memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset],dlsch_buffer,sdu_length_total); + memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][select_tb][offset],dlsch_buffer,sdu_length_total); // memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]); // fill remainder of DLSCH with random data for (j=0; j<(TBS-sdu_length_total-offset); j++) { - UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][offset+sdu_length_total+j] = (char)(taus()&0xff); + UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][select_tb][offset+sdu_length_total+j] = (char)(taus()&0xff); } if (opt_enabled == 1) { - trace_pdu(DIRECTION_DOWNLINK, (uint8_t *)UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], + trace_pdu(DIRECTION_DOWNLINK, (uint8_t *)UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][select_tb], TBS, module_idP, WS_RA_RNTI, UE_RNTI(module_idP, UE_id), eNB->frame, eNB->subframe,0,0); LOG_D(OPT,"[eNB %d][DLSCH] CC_id %d Frame %d rnti %x with size %d\n", @@ -1693,7 +2200,7 @@ schedule_ue_spec_fairRR(module_id_t module_idP, } T(T_ENB_MAC_UE_DL_PDU_WITH_DATA, T_INT(module_idP), T_INT(CC_id), T_INT(rnti), T_INT(frameP), T_INT(subframeP), - T_INT(harq_pid), T_BUFFER(UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0], TBS)); + T_INT(harq_pid), T_BUFFER(UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][TB1], TBS)); UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb; add_ue_dlsch_info(module_idP, CC_id, @@ -1705,11 +2212,11 @@ schedule_ue_spec_fairRR(module_id_t module_idP, eNB->eNB_stats[CC_id].dlsch_bytes_tx+=sdu_length_total; eNB->eNB_stats[CC_id].dlsch_pdus_tx+=1; UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used = nb_rb; + UE_list->eNB_UE_stats[CC_id][UE_id].num_mac_sdu_tx = num_sdus; UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used += nb_rb; - UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1=eNB_UE_stats->dlsch_mcs1; - UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2=mcs; - UE_list->eNB_UE_stats[CC_id][UE_id].TBS = TBS; - UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes= TBS- sdu_length_total; + UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[select_tb]=mcs; + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[select_tb] = TBS; + UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes[select_tb]= TBS- sdu_length_total; UE_list->eNB_UE_stats[CC_id][UE_id].total_sdu_bytes+= sdu_length_total; UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes+= TBS; UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus+=1; @@ -1730,32 +2237,454 @@ schedule_ue_spec_fairRR(module_id_t module_idP, int32_t framex10psubframe = UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame*10+UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe; if (((framex10psubframe+10)<=(frameP*10+subframeP)) || //normal case - ((framex10psubframe>(frameP*10+subframeP)) && (((10240-framex10psubframe+frameP*10+subframeP)>=10)))) //frame wrap-around - if (ue_sched_ctl->pucch1_cqi_update[CC_id] == 1) { - ue_sched_ctl->pucch1_cqi_update[CC_id] = 0; - UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame=frameP; - UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe=subframeP; - - if (normalized_rx_power>(target_rx_power+4)) { - tpc = 0; //-1 - tpc_accumulated--; - } else if (normalized_rx_power<(target_rx_power-4)) { - tpc = 2; //+1 - tpc_accumulated++; - } else { - tpc = 1; //0 - } - - LOG_D(MAC,"[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n", - module_idP,frameP, subframeP,harq_pid,tpc, - tpc_accumulated,normalized_rx_power,target_rx_power); - } // Po_PUCCH has been updated - else { - tpc = 1; //0 - } // time to do TPC update - else { - tpc = 1; //0 - } + ((framex10psubframe>(frameP*10+subframeP)) && (((10240-framex10psubframe+frameP*10+subframeP)>=10)))) //frame wrap-around + if (ue_sched_ctl->pucch1_cqi_update[CC_id] == 1) { + ue_sched_ctl->pucch1_cqi_update[CC_id] = 0; + + UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame=frameP; + UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe=subframeP; + + if (normalized_rx_power>(target_rx_power+4)) { + tpc = 0; //-1 + tpc_accumulated--; + } else if (normalized_rx_power<(target_rx_power-4)) { + tpc = 2; //+1 + tpc_accumulated++; + } else { + tpc = 1; //0 + } + + LOG_D(MAC,"[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n", + module_idP,frameP, subframeP,harq_pid,tpc, + tpc_accumulated,normalized_rx_power,target_rx_power); + + } // Po_PUCCH has been updated + else { + tpc = 1; //0 + } // time to do TPC update + else { + tpc = 1; //0 + } + + //CW_num set + ue_sched_ctl->cw_num[CC_id][harq_pid] = SINGLE_CW; + + //swap_flg set + ue_sched_ctl->swap_flag[CC_id][harq_pid] = 0; + + first_TB_pdu_create_flg = 1; + + }//1st TB end + + if( (first_TB_pdu_create_flg == 1) + && (ue_sched_ctl->aperiodic_ri_received[CC_id] == MULTI_RI) + && (UE_list->UE_template[CC_id][UE_id].dl_buffer_total > UE_list->eNB_UE_stats[CC_id][UE_id].TBS[select_tb])){ + //2nd TB start + + sdu_length_total = 0; + num_sdus = 0; + rlc_status.bytes_in_buffer = 0; + + //GET 1st TB TBS + TBS = UE_list->eNB_UE_stats[CC_id][UE_id].TBS[select_tb]; + + // check first for RLC data on DCCH + // add the length for all the control elements (timing adv, drx, etc) : header + payload + + if (ue_sched_ctl->ta_timer == 0) { + ta_update = ue_sched_ctl->ta_update; + /* if we send TA then set timer to not send it for a while */ + if (ta_update != 31) + ue_sched_ctl->ta_timer = 20; + /* reset ta_update */ + ue_sched_ctl->ta_update = 31; + } else { + ta_update = 31; + } + + ta_len = (ta_update != 31) ? 2 : 0; + + if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { + rlc_status = mac_rlc_status_ind( module_idP, + rnti, + module_idP, + frameP, + subframeP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + DCCH, + (TBS - ta_len - header_length_total - sdu_length_total - 3) +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); // transport block set size + + sdu_lengths[0] = 0; + + if (rlc_status.bytes_in_buffer > 0) { // There is DCCH to transmit + sdu_lengths[0] = mac_rlc_data_req( module_idP, + rnti, + module_idP, + frameP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + DCCH, + TBS, + (char *) &dlsch_buffer[0] +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); + + sdu_length_total = sdu_lengths[0]; + sdu_lcids[0] = DCCH; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[0] = DCCH; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH] = sdu_lengths[0]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH] += sdu_lengths[0]; + header_length_last = 1 + 1 + (sdu_lengths[0] >= 128); + header_length_total += header_length_last; + num_sdus = 1; + } + } + + // check for DCCH1 and update header information (assume 2 byte sub-header) + if(TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { + rlc_status = mac_rlc_status_ind( module_idP, + rnti, + module_idP, + frameP, + subframeP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + DCCH + 1, + (TBS - ta_len - header_length_total - sdu_length_total - 3) +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + );// transport block set size less allocations for timing advance and + // DCCH SDU + sdu_lengths[num_sdus] = 0; + + if (rlc_status.bytes_in_buffer > 0) { + sdu_lengths[num_sdus] += mac_rlc_data_req( module_idP, + rnti, + module_idP, + frameP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + DCCH + 1, + TBS, //not used + (char *)&dlsch_buffer[sdu_length_total] +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); + sdu_lcids[num_sdus] = DCCH1; + sdu_length_total += sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = DCCH1; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[DCCH1] = sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[DCCH1] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[DCCH1] += sdu_lengths[num_sdus]; + header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128); + header_length_total += header_length_last; + num_sdus++; + } + } + + // lcid has to be sorted before the actual allocation (similar struct as ue_list). + /* TODO limited lcid for performance */ + for (lcid = DTCH; lcid >= DTCH; lcid--) { + // TBD: check if the lcid is active + + if (TBS - ta_len - header_length_total - sdu_length_total - 3 > 0) { // NN: > 2 ? + rlc_status = mac_rlc_status_ind( module_idP, + rnti, + module_idP, + frameP, + subframeP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + lcid, + TBS - ta_len - header_length_total - sdu_length_total - 3 +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); + + if (rlc_status.bytes_in_buffer > 0) { + sdu_lengths[num_sdus] = mac_rlc_data_req( module_idP, + rnti, + module_idP, + frameP, + ENB_FLAG_YES, + MBMS_FLAG_NO, + lcid, + TBS, //not used + (char*)&dlsch_buffer[sdu_length_total] +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + ,0, + 0 +#endif + ); + + sdu_lcids[num_sdus] = lcid; + sdu_length_total += sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_pdu_tx[lcid] += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].lcid_sdu[num_sdus] = lcid; + UE_list->eNB_UE_stats[CC_id][UE_id].sdu_length_tx[lcid] = sdu_lengths[num_sdus]; + UE_list->eNB_UE_stats[CC_id][UE_id].num_bytes_tx[lcid] += sdu_lengths[num_sdus]; + + header_length_last = 1 + 1 + (sdu_lengths[num_sdus] >= 128); + header_length_total += header_length_last; + num_sdus++; + UE_list->UE_sched_ctrl[UE_id].uplane_inactivity_timer = 0; + } // no data for this LCID + } // no TBS left + else { + break; + } + } + + /* last header does not have length field */ + if (header_length_total) { + header_length_total -= header_length_last; + header_length_total++; + } + // there is at least one SDU + // if (num_sdus > 0 ){ + if (ta_len + sdu_length_total + header_length_total > 0) { + // Now compute number of required RBs for total sdu length + // Assume RAH format 2 + + //GET 1st TB MCS and RBs + mcs = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[select_tb]; + nb_rb = UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used; + + // decrease mcs until TBS falls below required length + while ((TBS > (sdu_length_total + header_length_total + ta_len)) && (mcs > 0)) { + mcs--; + TBS = get_TBS_DL(mcs, nb_rb); + } + + // if we have decreased too much or we don't have enough RBs, increase MCS + while ( (TBS < (sdu_length_total + header_length_total + ta_len)) + && ( ((ue_sched_ctl->dl_pow_off[CC_id] > 0) && (mcs < 28)) + || ((ue_sched_ctl->dl_pow_off[CC_id] == 0) && (mcs <= 15))) + ) { + mcs++; + TBS = get_TBS_DL(mcs, nb_rb); + } + + if ((TBS - header_length_total - sdu_length_total - ta_len) <= 2) { + padding = (TBS - header_length_total - sdu_length_total - ta_len); + post_padding = 0; + } else { + padding = 0; + post_padding = 1; // 1 is for the postpadding header + } + + offset = generate_dlsch_header( (unsigned char *) UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][oppose_tb], + num_sdus, //num_sdus + sdu_lengths, // + sdu_lcids, + 255, // no drx + ta_update, // timing advance + NULL, // contention res id + padding, + post_padding); + + // cycle through SDUs and place in dlsch_buffer + memcpy(&UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][oppose_tb][offset],dlsch_buffer,sdu_length_total); + // memcpy(RC.mac[0].DLSCH_pdu[0][0].payload[0][offset],dcch_buffer,sdu_lengths[0]); + + // fill remainder of DLSCH with random data + for (j=0; j<(TBS-sdu_length_total-offset); j++) { + UE_list->DLSCH_pdu[CC_id][0][UE_id].payload[0][oppose_tb][offset+sdu_length_total+j] = (char)(taus()&0xff); + } + + // store stats + eNB->eNB_stats[CC_id].dlsch_bytes_tx += sdu_length_total; + eNB->eNB_stats[CC_id].dlsch_pdus_tx += 1; + UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used = nb_rb; + UE_list->eNB_UE_stats[CC_id][UE_id].num_mac_sdu_tx = num_sdus; + UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used += nb_rb; + UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[oppose_tb] = mcs; + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[oppose_tb] = TBS; + UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes[oppose_tb] = TBS- sdu_length_total; + UE_list->eNB_UE_stats[CC_id][UE_id].total_sdu_bytes += sdu_length_total; + UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes += TBS; + UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus +=1; + + //CW_num set + ue_sched_ctl->cw_num[CC_id][harq_pid] = MULTI_CW; + } + } //2nd TB end + + if(first_TB_pdu_create_flg == 1){ + + switch (get_tmode(module_idP, CC_id, UE_id)) { + case 3: + + dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; + memset((void*)dl_config_pdu,0,sizeof(nfapi_dl_config_request_pdu_t)); + dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; + dl_config_pdu->pdu_size = (uint8_t)(2+sizeof(nfapi_dl_config_dci_dl_pdu)); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_2A; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = get_aggregation(get_bw_index(module_idP,CC_id),ue_sched_ctl->dl_cqi[CC_id],format2A); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0 ; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.precoding_information = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transport_block_to_codeword_swap_flag = 0; + + if(ue_sched_ctl->cw_num[CC_id][harq_pid] == MULTI_CW){ + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_2 = 1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB2]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 0; + }else{ + //deactivate second codeword + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_2 = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1; + } + + if (cc[CC_id].tdd_Config != NULL) { //TDD + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3; + LOG_D(MAC,"[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n", + module_idP,CC_id,harq_pid, + (UE_list->UE_template[CC_id][UE_id].DAI-1), + mcs); + } else { + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = 0; + LOG_D(MAC,"[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n", + module_idP,CC_id,harq_pid,mcs); + } + LOG_D(MAC,"Checking feasibility pdu %d (new sdu)\n",dl_req->number_pdu); + + if (!CCE_allocation_infeasible(module_idP,CC_id,1,subframeP,dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level,rnti)) { + //SET 1st TB + ue_sched_ctl->round[CC_id][harq_pid][select_tb] = 0; + dl_req->number_dci++; + dl_req->number_pdu++; + dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; + + eNB->DL_req[CC_id].sfn_sf = frameP<<4 | subframeP; + eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST; + // Toggle NDI for next time + LOG_D(MAC,"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n", + CC_id, frameP,subframeP,UE_id, + rnti,harq_pid,UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][select_tb]); + + UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][select_tb] = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][select_tb]; + UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][select_tb] = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[select_tb]; + AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n"); + AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n"); + + fill_nfapi_dlsch_config( eNB, + dl_req, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[select_tb], + eNB->pdu_index[CC_id], + rnti, + 0, // type 0 allocation from 7.1.6 in 36.213 + 0, // virtual_resource_block_assignment_flag, unused here + 0, // resource_block_coding, to be filled in later + getQm(UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[select_tb]), + 0, // redundancy version + (select_tb + 1), // transport blocks + 0, // transport block to codeword swap flag + ue_sched_ctl->cw_num[CC_id][harq_pid] == SINGLE_CW ? TX_DIVERSITY : LARGE_DELAY_CDD, // transmission_scheme + 2, // number of layers + 1, // number of subbands + // uint8_t codebook_index, + 4, // UE category capacity + UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, + 0, // delta_power_offset for TM5 + 0, // ngap + 0, // nprb + 3, // transmission mode + 0, //number of PRBs treated as one subband, not used here + 0 // number of beamforming vectors, not used here + ); + eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req( &eNB->TX_req[CC_id].tx_request_body, + (frameP*10)+subframeP, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[select_tb], + eNB->pdu_index[CC_id], + eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0][select_tb] + ); + + LOG_D(MAC,"Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",eNB->pdu_index[CC_id]); + + eNB->pdu_index[CC_id]++; + + //SET 2nd TB + if(ue_sched_ctl->cw_num[CC_id][harq_pid] == MULTI_CW){ + ue_sched_ctl->round[CC_id][harq_pid][oppose_tb] = 0; + dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; + + UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][oppose_tb] = 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][oppose_tb]; + UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][oppose_tb] = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[oppose_tb]; + AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n"); + AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n"); + + fill_nfapi_dlsch_config( eNB, + dl_req, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[oppose_tb], + eNB->pdu_index[CC_id], + rnti, + 0, // type 0 allocation from 7.1.6 in 36.213 + 0, // virtual_resource_block_assignment_flag, unused here + 0, // resource_block_coding, to be filled in later + getQm(UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[oppose_tb]), + 0, // redundancy version + (oppose_tb + 1), // transport blocks + 0, // transport block to codeword swap flag + ue_sched_ctl->cw_num[CC_id][harq_pid] == SINGLE_CW ? TX_DIVERSITY : LARGE_DELAY_CDD, // transmission_scheme + 2, // number of layers + 1, // number of subbands + // uint8_t codebook_index, + 4, // UE category capacity + UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, + 0, // delta_power_offset for TM5 + 0, // ngap + 0, // nprb + 3, // transmission mode + 0, //number of PRBs treated as one subband, not used here + 0 // number of beamforming vectors, not used here + ); + eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req( &eNB->TX_req[CC_id].tx_request_body, + (frameP*10)+subframeP, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[oppose_tb], + eNB->pdu_index[CC_id], + eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0][oppose_tb] + ); + + LOG_D(MAC,"Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",eNB->pdu_index[CC_id]); + + eNB->pdu_index[CC_id]++; + } + + program_dlsch_acknak(module_idP,CC_id,UE_id,frameP,subframeP,dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx); + last_dlsch_ue_id[CC_id] = UE_id; + } + + break; + + case 1: + case 2: + case 7: + default: dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; memset((void *)dl_config_pdu,0,sizeof(nfapi_dl_config_request_pdu_t)); @@ -1768,13 +2697,13 @@ schedule_ue_spec_fairRR(module_id_t module_idP, dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; - dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = 1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB1]; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0; //deactivate second codeword dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = 0; dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1; - + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tl.tag = NFAPI_DL_CONFIG_REQUEST_DCI_DL_PDU_REL8_TAG; if (cc[CC_id].tdd_Config != NULL) { //TDD dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3; LOG_D(MAC,"[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n", @@ -1789,60 +2718,67 @@ schedule_ue_spec_fairRR(module_id_t module_idP, LOG_D(MAC,"Checking feasibility pdu %d (new sdu)\n",dl_req->number_pdu); if (!CCE_allocation_infeasible(module_idP,CC_id,1,subframeP,dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level,rnti)) { - ue_sched_ctl->round[CC_id][harq_pid] = 0; - dl_req->number_dci++; - dl_req->number_pdu++; - dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; - eNB->DL_req[CC_id].sfn_sf = frameP<<4 | subframeP; - eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST; - // Toggle NDI for next time - LOG_D(MAC,"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n", - CC_id, frameP,subframeP,UE_id, - rnti,harq_pid,UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]); - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]=1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; - UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs; - UE_list->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0; - AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n"); - AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n"); - fill_nfapi_dlsch_config(eNB,dl_req, - TBS, - eNB->pdu_index[CC_id], - rnti, - 0, // type 0 allocation from 7.1.6 in 36.213 - 0, // virtual_resource_block_assignment_flag, unused here - 0, // resource_block_coding, to be filled in later - getQm(mcs), - 0, // redundancy version - 1, // transport blocks - 0, // transport block to codeword swap flag - cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme - 1, // number of layers - 1, // number of subbands - // uint8_t codebook_index, - 4, // UE category capacity - UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, - 0, // delta_power_offset for TM5 - 0, // ngap - 0, // nprb - cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode - 0, //number of PRBs treated as one subband, not used here - 0 // number of beamforming vectors, not used here - ); - eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req(&eNB->TX_req[CC_id].tx_request_body, - (frameP*10)+subframeP, - TBS, - eNB->pdu_index[CC_id], - eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0]); - LOG_D(MAC,"Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",eNB->pdu_index[CC_id]); - eNB->pdu_index[CC_id]++; - program_dlsch_acknak(module_idP,CC_id,UE_id,frameP,subframeP,dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx); - last_dlsch_ue_id[CC_id] = UE_id; - } else { - LOG_W(MAC,"Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d/%x, infeasible CCE allocations\n", - frameP,subframeP,UE_id,rnti); - } - } else { // There is no data from RLC or MAC header, so don't schedule - } + ue_sched_ctl->round[CC_id][harq_pid][TB1] = 0; + dl_req->number_dci++; + dl_req->number_pdu++; + dl_req->tl.tag = NFAPI_DL_CONFIG_REQUEST_BODY_TAG; + + eNB->DL_req[CC_id].sfn_sf = frameP<<4 | subframeP; + eNB->DL_req[CC_id].header.message_id = NFAPI_DL_CONFIG_REQUEST; + // Toggle NDI for next time + LOG_D(MAC,"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n", + CC_id, frameP,subframeP,UE_id, + rnti,harq_pid,UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]); + + UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]=1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid][TB1]; + UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB1] = UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB1]; + UE_list->UE_template[CC_id][UE_id].oldmcs[harq_pid][TB2] = 0; + AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n"); + AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n"); + + fill_nfapi_dlsch_config(eNB,dl_req, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[TB1], + eNB->pdu_index[CC_id], + rnti, + 0, // type 0 allocation from 7.1.6 in 36.213 + 0, // virtual_resource_block_assignment_flag, unused here + 0, // resource_block_coding, to be filled in later + getQm(UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB1]), + 0, // redundancy version + 1, // transport blocks + 0, // transport block to codeword swap flag + cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme + 1, // number of layers + 1, // number of subbands + // uint8_t codebook_index, + 4, // UE category capacity + UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated->p_a, + 0, // delta_power_offset for TM5 + 0, // ngap + 0, // nprb + cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode + 0, //number of PRBs treated as one subband, not used here + 0 // number of beamforming vectors, not used here + ); + eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req(&eNB->TX_req[CC_id].tx_request_body, + (frameP*10)+subframeP, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[TB1], + eNB->pdu_index[CC_id], + eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0][select_tb]); + + LOG_D(MAC,"Filled NFAPI configuration for DCI/DLSCH/TXREQ %d, new SDU\n",eNB->pdu_index[CC_id]); + + eNB->pdu_index[CC_id]++; + program_dlsch_acknak(module_idP,CC_id,UE_id,frameP,subframeP,dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.cce_idx); + last_dlsch_ue_id[CC_id] = UE_id; + } + else { + LOG_W(MAC,"Frame %d, Subframe %d: Dropping DLSCH allocation for UE %d/%x, infeasible CCE allocations\n", + frameP,subframeP,UE_id,rnti); + } + + } + } } if (cc[CC_id].tdd_Config != NULL) { // TDD @@ -2402,8 +3338,8 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP, while ( (tbs < bytes_to_schedule) && (rb_table[rb_table_index]<(frame_parms->N_RB_UL-num_pucch_rb-first_rb[CC_id])) && ((UE_template->phr_info - tx_power) > 0) && (rb_table_index < 32 )) { rb_table_index++; - tbs = get_TBS_UL(mcs,rb_table[rb_table_index])<<3; - tx_power= estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0); + tbs = get_TBS_UL(mcs,rb_table[rb_table_index]); + tx_power= estimate_ue_tx_power(tbs*8,rb_table[rb_table_index],0,frame_parms->Ncp,0); } if ( rb_table[rb_table_index]<3 ) { @@ -2757,12 +3693,12 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP, if (cc->tdd_Config) { switch (cc->tdd_Config->subframeAssignment) { case 1: - if( subframeP == 1 || subframeP == 6 ) cqi_req=0; + if( sched_subframeP == 1 || sched_subframeP == 6 ) cqi_req=0; break; case 3: - if( subframeP == 1 ) cqi_req=0; + if( sched_subframeP == 1 ) cqi_req=0; break; @@ -2857,78 +3793,81 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP, if (UE_id == UE_list->head) VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_SCHEDULED,UE_sched_ctrl->ul_scheduled); - // adjust total UL buffer status by TBS, wait for UL sdus to do final update - /*LOG_D(MAC,"[eNB %d] CC_id %d UE %d/%x : adjusting ul_total_buffer, old %d, TBS %d\n", module_idP,CC_id,UE_id,rnti,UE_template->ul_total_buffer,UE_template->TBS_UL[harq_pid]); - if (UE_template->ul_total_buffer > UE_template->TBS_UL[harq_pid]) - UE_template->ul_total_buffer -= UE_template->TBS_UL[harq_pid]; - else - UE_template->ul_total_buffer = 0; - LOG_D(MAC,"ul_total_buffer, new %d\n", UE_template->ul_total_buffer);*/ - // Cyclic shift for DM RS - cshift = 0;// values from 0 to 7 can be used for mapping the cyclic shift (36.211 , Table 5.5.2.1.1-1) - // save it for a potential retransmission - UE_template->cshift[harq_pid] = cshift; - hi_dci0_pdu = &hi_dci0_req->hi_dci0_pdu_list[hi_dci0_req->number_of_dci+hi_dci0_req->number_of_hi]; - memset((void *)hi_dci0_pdu,0,sizeof(nfapi_hi_dci0_request_pdu_t)); - hi_dci0_pdu->pdu_type = NFAPI_HI_DCI0_DCI_PDU_TYPE; - hi_dci0_pdu->pdu_size = 2+sizeof(nfapi_hi_dci0_dci_pdu); - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.dci_format = NFAPI_UL_DCI_FORMAT_0; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.transmission_power = 6000; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb[CC_id]; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block = rb_table[rb_table_index]; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.mcs_1 = UE_template->mcs_UL[harq_pid]; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cyclic_shift_2_for_drms = cshift; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.frequency_hopping_enabled_flag = 0; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.new_data_indication_1 = ndi; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.tpc = tpc; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cqi_csi_request = cqi_req; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.dl_assignment_index = UE_template->DAI_ul[sched_subframeP]; - hi_dci0_pdu->dci_pdu.dci_pdu_rel8.harq_pid = harq_pid; - hi_dci0_req->number_of_dci++; - hi_dci0_req->sfnsf = sfnsf_add_subframe(sched_frame, sched_subframeP, 0); //(frameP, subframeP, 4) - hi_dci0_req->tl.tag = NFAPI_HI_DCI0_REQUEST_BODY_TAG; - nfapi_hi_dci0_request_t *nfapi_hi_dci0_req = &eNB->HI_DCI0_req[CC_id][subframeP]; - nfapi_hi_dci0_req->sfn_sf = frameP<<4|subframeP; // sfnsf_add_subframe(sched_frame, sched_subframeP, 0); // sunday! - nfapi_hi_dci0_req->header.message_id = NFAPI_HI_DCI0_REQUEST; - LOG_D(MAC,"[PUSCH %d] Frame %d, Subframe %d: Adding UL CONFIG.Request for UE %d/%x, ulsch_frame %d, ulsch_subframe %d\n", - harq_pid,frameP,subframeP,UE_id,rnti,sched_frame,sched_subframeP); - ul_req_index = 0; - dlsch_flag = 0; - - for(ul_req_index = 0; ul_req_index < ul_req_tmp->number_of_pdus; ul_req_index++) { - if((ul_req_tmp->ul_config_pdu_list[ul_req_index].pdu_type == NFAPI_UL_CONFIG_UCI_HARQ_PDU_TYPE) && - (ul_req_tmp->ul_config_pdu_list[ul_req_index].uci_harq_pdu.ue_information.ue_information_rel8.rnti == rnti)) { - dlsch_flag = 1; - LOG_D(MAC,"Frame %d, Subframe %d:rnti %x ul_req_index %d Switched UCI HARQ to ULSCH HARQ(first)\n",frameP,subframeP,rnti,ul_req_index); - break; - } - } - - // Add UL_config PDUs - fill_nfapi_ulsch_config_request_rel8(&ul_req_tmp->ul_config_pdu_list[ul_req_index], - cqi_req, - cc, - UE_template->physicalConfigDedicated, - get_tmode(module_idP,CC_id,UE_id), - eNB->ul_handle, - rnti, - first_rb[CC_id], // resource_block_start - rb_table[rb_table_index], // number_of_resource_blocks - UE_template->mcs_UL[harq_pid], - cshift, // cyclic_shift_2_for_drms - 0, // frequency_hopping_enabled_flag - 0, // frequency_hopping_bits - ndi, // new_data_indication - 0, // redundancy_version - harq_pid, // harq_process_number - 0, // ul_tx_mode - 0, // current_tx_nb - 0, // n_srs - get_TBS_UL(UE_template->mcs_UL[harq_pid], - rb_table[rb_table_index]) - ); + // adjust total UL buffer status by TBS, wait for UL sdus to do final update + /*LOG_D(MAC,"[eNB %d] CC_id %d UE %d/%x : adjusting ul_total_buffer, old %d, TBS %d\n", module_idP,CC_id,UE_id,rnti,UE_template->ul_total_buffer,UE_template->TBS_UL[harq_pid]); + if (UE_template->ul_total_buffer > UE_template->TBS_UL[harq_pid]) + UE_template->ul_total_buffer -= UE_template->TBS_UL[harq_pid]; + else + UE_template->ul_total_buffer = 0; + LOG_D(MAC,"ul_total_buffer, new %d\n", UE_template->ul_total_buffer);*/ + // Cyclic shift for DM RS + cshift = 0;// values from 0 to 7 can be used for mapping the cyclic shift (36.211 , Table 5.5.2.1.1-1) + // save it for a potential retransmission + UE_template->cshift[harq_pid] = cshift; + + hi_dci0_pdu = &hi_dci0_req->hi_dci0_pdu_list[hi_dci0_req->number_of_dci+hi_dci0_req->number_of_hi]; + memset((void*)hi_dci0_pdu,0,sizeof(nfapi_hi_dci0_request_pdu_t)); + hi_dci0_pdu->pdu_type = NFAPI_HI_DCI0_DCI_PDU_TYPE; + hi_dci0_pdu->pdu_size = 2+sizeof(nfapi_hi_dci0_dci_pdu); + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.tl.tag = NFAPI_HI_DCI0_REQUEST_DCI_PDU_REL8_TAG; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.dci_format = NFAPI_UL_DCI_FORMAT_0; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.transmission_power = 6000; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb[CC_id]; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block = rb_table[rb_table_index]; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.mcs_1 = UE_template->mcs_UL[harq_pid]; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cyclic_shift_2_for_drms = cshift; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.frequency_hopping_enabled_flag = 0; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.new_data_indication_1 = ndi; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.tpc = tpc; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cqi_csi_request = cqi_req; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.dl_assignment_index = UE_template->DAI_ul[sched_subframeP]; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.harq_pid = harq_pid; + + hi_dci0_req->number_of_dci++; + hi_dci0_req->sfnsf = sfnsf_add_subframe(sched_frame, sched_subframeP, 0); //(frameP, subframeP, 4) + hi_dci0_req->tl.tag = NFAPI_HI_DCI0_REQUEST_BODY_TAG; + nfapi_hi_dci0_request_t *nfapi_hi_dci0_req = &eNB->HI_DCI0_req[CC_id][subframeP]; + nfapi_hi_dci0_req->sfn_sf = frameP<<4|subframeP; // sfnsf_add_subframe(sched_frame, sched_subframeP, 0); // sunday! + nfapi_hi_dci0_req->header.message_id = NFAPI_HI_DCI0_REQUEST; + + LOG_D(MAC,"[PUSCH %d] Frame %d, Subframe %d: Adding UL CONFIG.Request for UE %d/%x, ulsch_frame %d, ulsch_subframe %d\n", + harq_pid,frameP,subframeP,UE_id,rnti,sched_frame,sched_subframeP); + + ul_req_index = 0; + dlsch_flag = 0; + for(ul_req_index = 0;ul_req_index < ul_req_tmp->number_of_pdus;ul_req_index++){ + if((ul_req_tmp->ul_config_pdu_list[ul_req_index].pdu_type == NFAPI_UL_CONFIG_UCI_HARQ_PDU_TYPE) && + (ul_req_tmp->ul_config_pdu_list[ul_req_index].uci_harq_pdu.ue_information.ue_information_rel8.rnti == rnti)){ + dlsch_flag = 1; + LOG_D(MAC,"Frame %d, Subframe %d:rnti %x ul_req_index %d Switched UCI HARQ to ULSCH HARQ(first)\n",frameP,subframeP,rnti,ul_req_index); + break; + } + } + // Add UL_config PDUs + fill_nfapi_ulsch_config_request_rel8(&ul_req_tmp->ul_config_pdu_list[ul_req_index], + cqi_req, + cc, + UE_template->physicalConfigDedicated, + get_tmode(module_idP,CC_id,UE_id), + eNB->ul_handle, + rnti, + first_rb[CC_id], // resource_block_start + rb_table[rb_table_index], // number_of_resource_blocks + UE_template->mcs_UL[harq_pid], + cshift, // cyclic_shift_2_for_drms + 0, // frequency_hopping_enabled_flag + 0, // frequency_hopping_bits + ndi, // new_data_indication + 0, // redundancy_version + harq_pid, // harq_process_number + 0, // ul_tx_mode + 0, // current_tx_nb + 0, // n_srs + get_TBS_UL(UE_template->mcs_UL[harq_pid], + rb_table[rb_table_index]) + ); #if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) if (UE_template->rach_resource_type>0) { // This is a BL/CE UE allocation @@ -3028,6 +3967,7 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP, memset((void *)hi_dci0_pdu,0,sizeof(nfapi_hi_dci0_request_pdu_t)); hi_dci0_pdu->pdu_type = NFAPI_HI_DCI0_DCI_PDU_TYPE; hi_dci0_pdu->pdu_size = 2+sizeof(nfapi_hi_dci0_dci_pdu); + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.tl.tag = NFAPI_HI_DCI0_REQUEST_DCI_PDU_REL8_TAG; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.dci_format = NFAPI_UL_DCI_FORMAT_0; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti; diff --git a/openair2/LAYER2/MAC/eNB_scheduler_phytest.c b/openair2/LAYER2/MAC/eNB_scheduler_phytest.c index 2f566c0d7a..46411cd7cc 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_phytest.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_phytest.c @@ -182,7 +182,7 @@ schedule_ue_spec_phy_test( (frameP*10)+subframeP, TBS, eNB->pdu_index[CC_id], - eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0]); + eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0][TB1]); } else { LOG_W(MAC,"[eNB_scheduler_phytest] DCI allocation infeasible!\n"); diff --git a/openair2/LAYER2/MAC/eNB_scheduler_primitives.c b/openair2/LAYER2/MAC/eNB_scheduler_primitives.c index 0385ae6730..e4c5ff1331 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_primitives.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_primitives.c @@ -1335,6 +1335,7 @@ program_dlsch_acknak(module_id_t module_idP, fill_nfapi_harq_information(module_idP, CC_idP, rnti, + (frameP * 10) + subframeP, harq_information, cce_idx); } @@ -1482,15 +1483,26 @@ void fill_nfapi_harq_information(module_id_t module_idP, int CC_idP, uint16_t rntiP, + uint16_t absSFP, nfapi_ul_config_harq_information *harq_information, uint8_t cce_idxP) //------------------------------------------------------------------------------ { + UE_sched_ctrl *ue_sched_ctl; + uint8_t harq_pid; + frame_t frameP; + sub_frame_t subframeP; eNB_MAC_INST *eNB = RC.mac[module_idP]; COMMON_channels_t *cc = &eNB->common_channels[CC_idP]; UE_list_t *UE_list = &eNB->UE_list; - int UE_id = find_UE_id(module_idP, - rntiP); + + int UE_id = find_UE_id(module_idP, rntiP); + + frameP = absSFP / 10; + subframeP = absSFP % 10; + ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; + harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frameP, subframeP); + AssertFatal(UE_id >= 0, "UE_id cannot be found, impossible\n"); AssertFatal(UE_list != NULL, "UE_list is null\n"); harq_information->harq_information_rel11.tl.tag = NFAPI_UL_CONFIG_REQUEST_HARQ_INFORMATION_REL11_TAG; @@ -1546,14 +1558,22 @@ fill_nfapi_harq_information(module_id_t module_idP, } harq_information->harq_information_rel10_tdd.tl.tag = NFAPI_UL_CONFIG_REQUEST_HARQ_INFORMATION_REL10_TDD_TAG; + if(ue_sched_ctl->cw_num[CC_idP][harq_pid] == MULTI_CW){ harq_information->harq_information_rel10_tdd.harq_size = 2; + }else{ + harq_information->harq_information_rel10_tdd.harq_size = 1; + } harq_information->harq_information_rel10_tdd.n_pucch_1_0 = cc->radioResourceConfigCommon->pucch_ConfigCommon.n1PUCCH_AN + cce_idxP; harq_information->harq_information_rel10_tdd.number_of_pucch_resources = 1; } else { harq_information->harq_information_rel9_fdd.tl.tag = NFAPI_UL_CONFIG_REQUEST_HARQ_INFORMATION_REL9_FDD_TAG; harq_information->harq_information_rel9_fdd.number_of_pucch_resources = 1; harq_information->harq_information_rel9_fdd.ack_nack_mode = 0; // 1a/b + if(ue_sched_ctl->cw_num[CC_idP][harq_pid] == MULTI_CW){ harq_information->harq_information_rel9_fdd.harq_size = 2; + }else{ + harq_information->harq_information_rel9_fdd.harq_size = 1; + } harq_information->harq_information_rel9_fdd.n_pucch_1_0 = cc->radioResourceConfigCommon->pucch_ConfigCommon.n1PUCCH_AN + cce_idxP; } @@ -1587,6 +1607,7 @@ fill_nfapi_uci_acknak(module_id_t module_idP, fill_nfapi_harq_information(module_idP, CC_idP, rntiP, + absSFP, &ul_config_pdu->uci_harq_pdu.harq_information, cce_idxP); LOG_D(MAC, "Filled in UCI HARQ request for rnti %x SF %d.%d acknakSF %d.%d, cce_idxP %d-> n1_pucch %d\n", @@ -1634,6 +1655,7 @@ fill_nfapi_dlsch_config(eNB_MAC_INST *eNB, uint8_t num_bf_vector) //------------------------------------------------------------------------------ { + uint8_t subband_num; nfapi_dl_config_request_pdu_t *dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; memset((void *) dl_config_pdu, 0, sizeof(nfapi_dl_config_request_pdu_t)); dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DLSCH_PDU_TYPE; @@ -1652,7 +1674,11 @@ fill_nfapi_dlsch_config(eNB_MAC_INST *eNB, dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.transmission_scheme = transmission_scheme; dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.number_of_layers = number_of_layers; dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.number_of_subbands = number_of_subbands; - // dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.codebook_index = codebook_index; + + for(subband_num = 0; subband_num < number_of_subbands; subband_num++){ + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.codebook_index[subband_num] = 0; + } + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.ue_category_capacity = ue_category_capacity; dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.pa = pa; dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.delta_power_offset_index = delta_power_offset_index; @@ -1661,6 +1687,18 @@ fill_nfapi_dlsch_config(eNB_MAC_INST *eNB, dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.transmission_mode = transmission_mode; dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.num_bf_prb_per_subband = num_bf_prb_per_subband; dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.num_bf_vector = num_bf_vector; + // Rel10 fields +#if (LTE_RRC_VERSION >= MAKE_VERSION(10, 0, 0)) + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel10.tl.tag = NFAPI_DL_CONFIG_REQUEST_DLSCH_PDU_REL10_TAG; + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel10.pdsch_start = 3; +#endif + // Rel13 fields +#if (LTE_RRC_VERSION >= MAKE_VERSION(13, 0, 0)) + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel13.tl.tag = NFAPI_DL_CONFIG_REQUEST_DLSCH_PDU_REL13_TAG; + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel13.ue_type = 0; // regular UE + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel13.pdsch_payload_type = 2; // not BR + dl_config_pdu->dlsch_pdu.dlsch_pdu_rel13.initial_transmission_sf_io = 0xFFFF; // absolute SF +#endif dl_req->number_pdu++; return; } @@ -2193,10 +2231,16 @@ add_new_ue(module_id_t mod_idP, UE_list->UE_sched_ctrl[UE_id].ta_update = 31; for (j = 0; j < 8; j++) { - UE_list->UE_template[cc_idP][UE_id].oldNDI[j] = (j == 0) ? 1 : 0; // 1 because first transmission is with format1A (Msg4) for harq_pid 0 - UE_list->UE_template[cc_idP][UE_id].oldNDI_UL[j] = (j == harq_pidP) ? 0 : 1; // 1st transmission is with Msg3; - UE_list->UE_sched_ctrl[UE_id].round[cc_idP][j] = 8; + UE_list->UE_template[cc_idP][UE_id].oldNDI[j][TB1] = (j == 0) ? 1 : 0; // 1 because first transmission is with format1A (Msg4) for harq_pid 0 + UE_list->UE_template[cc_idP][UE_id].oldNDI[j][TB2] = 1; + UE_list->UE_template[cc_idP][UE_id].oldNDI_UL[j] = (j == harq_pidP) ? 0 : 1; // 1st transmission is with Msg3; + UE_list->UE_sched_ctrl[UE_id].round[cc_idP][j][TB1] = 8; + UE_list->UE_sched_ctrl[UE_id].round[cc_idP][j][TB2] = 8; UE_list->UE_sched_ctrl[UE_id].round_UL[cc_idP][j] = 0; + UE_list->UE_sched_ctrl[UE_id].rsn[cc_idP][j][TB1] = 0; + UE_list->UE_sched_ctrl[UE_id].rsn[cc_idP][j][TB2] = 0; + UE_list->eNB_UE_stats[cc_idP][UE_id].TBS[TB1] = 0; + UE_list->eNB_UE_stats[cc_idP][UE_id].TBS[TB2] = 0; } eNB_ulsch_info[mod_idP][cc_idP][UE_id].status = S_UL_WAITING; @@ -3943,6 +3987,10 @@ extract_harq(module_id_t mod_idP, sub_frame_t subframe_tx; int frame_tx; uint8_t harq_pid; + uint8_t select_tb; + uint8_t oppose_tb; + uint8_t swap_flg; + #if (LTE_RRC_VERSION >= MAKE_VERSION(13, 0, 0)) LTE_PhysicalConfigDedicated_t *physicalConfigDedicated = UE_list->UE_template[pCCid][UE_id].physicalConfigDedicated; @@ -3978,106 +4026,144 @@ extract_harq(module_id_t mod_idP, subframeP, m); - if (frameP==1023&&subframeP>5) frame_tx=-1; - else frame_tx = subframeP < 4 ? frameP -1 : frameP; + if(frameP==1023&&subframeP>5) + frame_tx=-1; + else + frame_tx = subframeP < 4 ? frameP -1 : frameP; + harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frame_tx,subframe_tx); + + select_tb = sched_ctl->select_tb[CC_idP][harq_pid]; + oppose_tb = select_tb ^ 0x1; + + swap_flg = sched_ctl->swap_flag[CC_idP][harq_pid]; + + if (tmode[0] == 1 || tmode[0] == 2 || tmode[0] == 5 || tmode[0] == 6 || tmode[0] == 7) { - harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, - frame_tx, - subframe_tx); RA_t *ra = &eNB->common_channels[CC_idP].ra[0]; if(num_ack_nak == 1) { if (harq_indication_tdd->harq_data[0].bundling.value_0 == 1) { //ack - sched_ctl->round[CC_idP][harq_pid] = 8; // release HARQ process + sched_ctl->round[CC_idP][harq_pid][TB1] = 8; // release HARQ process sched_ctl->tbcnt[CC_idP][harq_pid] = 0; - LOG_D(MAC, "frame %d subframe %d Acking (%d,%d) harq_pid %d round %d\n", - frameP, - subframeP, - frame_tx, - subframe_tx, - harq_pid, - sched_ctl->round[CC_idP][harq_pid]); + LOG_D(MAC,"frame %d subframe %d Acking (%d,%d) harq_pid %d round %d\n",frameP,subframeP,frame_tx,subframe_tx,harq_pid,sched_ctl->round[CC_idP][harq_pid][TB1]); } else { //nack - if (sched_ctl->round[CC_idP][harq_pid] < 8) sched_ctl->round[CC_idP][harq_pid]++; - - if (sched_ctl->round[CC_idP][harq_pid] == 4) { - sched_ctl->round[CC_idP][harq_pid] = 8; // release HARQ process - sched_ctl->tbcnt[CC_idP][harq_pid] = 0; - } - - LOG_D(MAC,"frame %d subframe %d Nacking (%d,%d) harq_pid %d round %d\n", - frameP, - subframeP, - frame_tx, - subframe_tx, - harq_pid, - sched_ctl->round[CC_idP][harq_pid]); - - if (sched_ctl->round[CC_idP][harq_pid] == 8) { - for (uint8_t ra_i = 0; ra_i < NB_RA_PROC_MAX; ra_i++) { - if (ra[ra_i].rnti == rnti && ra[ra_i].state == WAITMSG4ACK) { - //Msg NACK num to MAC ,remove UE - // add UE info to freeList - LOG_I(RRC, "put UE %x into freeList\n", - rnti); - put_UE_in_freelist(mod_idP, - rnti, - 1); - } - } - } - } - } - - for (uint8_t ra_i = 0; ra_i < NB_RA_PROC_MAX; ra_i++) { - if (ra[ra_i].rnti == rnti && ra[ra_i].state == MSGCRNTI_ACK && ra[ra_i].crnti_harq_pid == harq_pid) { - LOG_D(MAC,"CRNTI Reconfiguration: ACK %d rnti %x round %d frame %d subframe %d \n", - harq_indication_tdd->harq_data[0].bundling.value_0, - rnti, - sched_ctl->round[CC_idP][harq_pid], - frameP, - subframeP); - - if (num_ack_nak == 1 && harq_indication_tdd->harq_data[0].bundling.value_0 == 1) { - cancel_ra_proc(mod_idP, - CC_idP, - frameP, - ra[ra_i].rnti); - } else { - if(sched_ctl->round[CC_idP][harq_pid] == 7) { - cancel_ra_proc(mod_idP, - CC_idP, - frameP, - ra[ra_i].rnti); - } - } - - break; - } + if( sched_ctl->round[CC_idP][harq_pid][TB1]<8) sched_ctl->round[CC_idP][harq_pid][TB1]++; + if (sched_ctl->round[CC_idP][harq_pid][TB1] == 4) { + sched_ctl->round[CC_idP][harq_pid][TB1] = 8; // release HARQ process + sched_ctl->tbcnt[CC_idP][harq_pid] = 0; + } + LOG_D(MAC,"frame %d subframe %d Nacking (%d,%d) harq_pid %d round %d\n",frameP,subframeP,frame_tx,subframe_tx,harq_pid,sched_ctl->round[CC_idP][harq_pid][TB1]); + if(sched_ctl->round[CC_idP][harq_pid][TB1] == 8){ + for (uint8_t ra_i = 0; ra_i < NB_RA_PROC_MAX; ra_i++) { + if((ra[ra_i].rnti == rnti) && (ra[ra_i].state == WAITMSG4ACK)){ + //Msg NACK num to MAC ,remove UE + // add UE info to freeList + LOG_I(RRC, "put UE %x into freeList\n", rnti); + put_UE_in_freelist(mod_idP, rnti, 1); + } + } + } + } + } + for (uint8_t ra_i = 0; ra_i < NB_RA_PROC_MAX; ra_i++) { + if ((ra[ra_i].rnti == rnti) && (ra[ra_i].state == MSGCRNTI_ACK) && (ra[ra_i].crnti_harq_pid == harq_pid)) { + LOG_D(MAC,"CRNTI Reconfiguration: ACK %d rnti %x round %d frame %d subframe %d \n",harq_indication_tdd->harq_data[0].bundling.value_0,rnti,sched_ctl->round[CC_idP][harq_pid][TB1],frameP,subframeP); + if(num_ack_nak == 1 && harq_indication_tdd->harq_data[0].bundling.value_0 == 1) { + cancel_ra_proc(mod_idP, CC_idP, frameP, ra[ra_i].rnti); + }else{ + if(sched_ctl->round[CC_idP][harq_pid][TB1] == 7){ + cancel_ra_proc(mod_idP, CC_idP, frameP, ra[ra_i].rnti); } + } + break; } - - break; - - case 1: // Channel Selection - case 2: // Format 3 - case 3: // Format 4 - case 4: // Format 5 - break; + } + }else{ + if((num_ack_nak==2) + && (harq_indication_tdd->harq_data[select_tb].bundling.value_0==1) + && (harq_indication_tdd->harq_data[oppose_tb].bundling.value_0==1)){ + sched_ctl->round[CC_idP][harq_pid][select_tb] = 8; + sched_ctl->round[CC_idP][harq_pid][oppose_tb] = 8; + sched_ctl->rsn[CC_idP][harq_pid][select_tb] = 0; + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb] = 0; + }else if( (num_ack_nak==2) + && ((harq_indication_tdd->harq_data[select_tb].bundling.value_0==2) || (harq_indication_tdd->harq_data[select_tb].bundling.value_0==4)) + && ((harq_indication_tdd->harq_data[oppose_tb].bundling.value_0==2) || (harq_indication_tdd->harq_data[oppose_tb].bundling.value_0==4))){ + sched_ctl->round[CC_idP][harq_pid][select_tb]++; + sched_ctl->round[CC_idP][harq_pid][oppose_tb]++; + sched_ctl->rsn[CC_idP][harq_pid][select_tb]++; + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb]++; + + if(sched_ctl->round[CC_idP][harq_pid][select_tb] == 4){ + sched_ctl->round[CC_idP][harq_pid][select_tb] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][select_tb] = 0; + } + if(sched_ctl->round[CC_idP][harq_pid][oppose_tb] == 4){ + sched_ctl->round[CC_idP][harq_pid][oppose_tb] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb] = 0; + } + }else if( (num_ack_nak==2) + && (harq_indication_tdd->harq_data[select_tb].bundling.value_0==1) + && ((harq_indication_tdd->harq_data[oppose_tb].bundling.value_0==2) || (harq_indication_tdd->harq_data[oppose_tb].bundling.value_0==4))){ + sched_ctl->round[CC_idP][harq_pid][select_tb] = 8; + sched_ctl->round[CC_idP][harq_pid][oppose_tb]++; + sched_ctl->rsn[CC_idP][harq_pid][select_tb] = 0; + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb]++; + + if(sched_ctl->round[CC_idP][harq_pid][oppose_tb] == 4){ + sched_ctl->round[CC_idP][harq_pid][oppose_tb] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb] = 0; + } + }else if( (num_ack_nak==2) + && ((harq_indication_tdd->harq_data[select_tb].bundling.value_0==2) || (harq_indication_tdd->harq_data[select_tb].bundling.value_0==4)) + && (harq_indication_tdd->harq_data[oppose_tb].bundling.value_0==1)){ + sched_ctl->round[CC_idP][harq_pid][select_tb]++; + sched_ctl->round[CC_idP][harq_pid][oppose_tb] = 8; + sched_ctl->rsn[CC_idP][harq_pid][select_tb]++; + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb] = 0; + + if(sched_ctl->round[CC_idP][harq_pid][select_tb] == 4){ + sched_ctl->round[CC_idP][harq_pid][select_tb] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][select_tb] = 0; + } + }else if( (num_ack_nak==1) + && (harq_indication_tdd->harq_data[TB1].bundling.value_0==1)){ + sched_ctl->round[CC_idP][harq_pid][swap_flg] = 8; + sched_ctl->rsn[CC_idP][harq_pid][swap_flg] = 0; + }else if( (num_ack_nak==1) + && ((harq_indication_tdd->harq_data[TB1].bundling.value_0==2) || (harq_indication_tdd->harq_data[TB1].bundling.value_0==4))){ + sched_ctl->round[CC_idP][harq_pid][swap_flg]++; + sched_ctl->rsn[CC_idP][harq_pid][swap_flg]++; + + if(sched_ctl->round[CC_idP][harq_pid][swap_flg] == 4){ + sched_ctl->round[CC_idP][harq_pid][swap_flg] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][swap_flg] = 0; + } + } + } + + } + break; + case 1: // Channel Selection + break; + case 2: // Format 3 + break; + case 3: // Format 4 + break; + case 4: // Format 5 + break; } } else { harq_indication_fdd = (nfapi_harq_indication_fdd_rel13_t *) harq_indication; num_ack_nak = harq_indication_fdd->number_of_ack_nack; pdu = &harq_indication_fdd->harq_tb_n[0]; harq_pid = ((10 * frameP) + subframeP + 10236) & 7; - LOG_D(MAC, "frame %d subframe %d harq_pid %d mode %d tmode[0] %d num_ack_nak %d round %d\n", - frameP, - subframeP, - harq_pid, - harq_indication_fdd->mode, - tmode[0], - num_ack_nak, - sched_ctl->round[CC_idP][harq_pid]); + select_tb = sched_ctl->select_tb[CC_idP][harq_pid]; + oppose_tb = select_tb ^ 0x1; + + swap_flg = sched_ctl->swap_flag[CC_idP][harq_pid]; + + LOG_D(MAC,"frame %d subframe %d harq_pid %d mode %d tmode[0] %d num_ack_nak %d round %d\n",frameP,subframeP,harq_pid,harq_indication_fdd->mode,tmode[0],num_ack_nak,sched_ctl->round[CC_idP][harq_pid][TB1]); // use 1 HARQ proces of BL/CE UE for now if (UE_list->UE_template[pCCid][UE_id].rach_resource_type > 0) harq_pid = 0; @@ -4097,16 +4183,12 @@ extract_harq(module_id_t mod_idP, // In case of nFAPI, sometimes timing of eNB and UE become different. // So if nfapi_mode == 2(VNF), this function don't check assertion to avoid process exit. if (NFAPI_MODE != NFAPI_MODE_VNF) { - AssertFatal(sched_ctl->round[CC_idP][harq_pid] < 8, "Got ACK/NAK for inactive harq_pid %d for UE %d/%x\n", - harq_pid, - UE_id, - rnti); + AssertFatal(sched_ctl->round[CC_idP][harq_pid][TB1] < 8, + "Got ACK/NAK for inactive harq_pid %d for UE %d/%x\n", + harq_pid, UE_id, rnti); } else { - if (sched_ctl->round[CC_idP][harq_pid] == 8) { - LOG_E(MAC,"Got ACK/NAK for inactive harq_pid %d for UE %d/%x\n", - harq_pid, - UE_id, - rnti); + if(sched_ctl->round[CC_idP][harq_pid][TB1] == 8){ + LOG_E(MAC,"Got ACK/NAK for inactive harq_pid %d for UE %d/%x\n",harq_pid, UE_id, rnti); return; } } @@ -4126,7 +4208,7 @@ extract_harq(module_id_t mod_idP, LOG_D(MAC,"CRNTI Reconfiguration: ACK %d rnti %x round %d frame %d subframe %d \n", pdu[0], rnti, - sched_ctl->round[CC_idP][harq_pid], + sched_ctl->round[CC_idP][harq_pid][TB1], frameP, subframeP); @@ -4136,7 +4218,7 @@ extract_harq(module_id_t mod_idP, frameP, ra[ra_i].rnti); } else { - if (sched_ctl->round[CC_idP][harq_pid] == 7) { + if(sched_ctl->round[CC_idP][harq_pid][TB1] == 7){ cancel_ra_proc(mod_idP, CC_idP, frameP, @@ -4151,24 +4233,24 @@ extract_harq(module_id_t mod_idP, LOG_D(MAC, "In extract_harq(): pdu[0] = %d for harq_pid = %d\n", pdu[0], harq_pid); if (pdu[0] == 1) { // ACK - sched_ctl->round[CC_idP][harq_pid] = 8; // release HARQ process + sched_ctl->round[CC_idP][harq_pid][TB1] = 8; // release HARQ process sched_ctl->tbcnt[CC_idP][harq_pid] = 0; /* CDRX: PUCCH gives an ACK, so reset corresponding HARQ RTT */ sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; } else if (pdu[0] == 2 || pdu[0] == 4) { // NAK (treat DTX as NAK) - sched_ctl->round[CC_idP][harq_pid]++; // increment round + sched_ctl->round[CC_idP][harq_pid][TB1]++; // increment round - if (sched_ctl->round[CC_idP][harq_pid] == 4) { - sched_ctl->round[CC_idP][harq_pid] = 8; // release HARQ process + if (sched_ctl->round[CC_idP][harq_pid][TB1] == 4) { + sched_ctl->round[CC_idP][harq_pid][TB1] = 8; // release HARQ process sched_ctl->tbcnt[CC_idP][harq_pid] = 0; /* CDRX: PUCCH gives an NACK and max number of repetitions reached so reset corresponding HARQ RTT */ sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; } - if (sched_ctl->round[CC_idP][harq_pid] == 8) { + if (sched_ctl->round[CC_idP][harq_pid][TB1] == 8){ for (uint8_t ra_i = 0; ra_i < NB_RA_PROC_MAX; ra_i++) { if((ra[ra_i].rnti == rnti) && (ra[ra_i].state == WAITMSG4ACK)) { // Msg NACK num to MAC ,remove UE @@ -4186,64 +4268,86 @@ extract_harq(module_id_t mod_idP, // one or two ACK/NAK bits AssertFatal(num_ack_nak <= 2, "num_ack_nak %d > 2 for 1 CC and TM3/4/8/9/10\n", num_ack_nak); - - if (num_ack_nak == 2 && sched_ctl->round[CC_idP][harq_pid] < 8 && sched_ctl->tbcnt[CC_idP][harq_pid] == 1 && pdu[0] == 1 && pdu[1] == 1) { - sched_ctl->round[CC_idP][harq_pid] = 8; - sched_ctl->tbcnt[CC_idP][harq_pid] = 0; - + if ((num_ack_nak == 2) + && (sched_ctl->round[CC_idP][harq_pid][select_tb] < 8) + && (sched_ctl->round[CC_idP][harq_pid][oppose_tb] < 8) + && (pdu[select_tb] == 1) && (pdu[oppose_tb] == 1)) { + sched_ctl->round[CC_idP][harq_pid][select_tb] = 8; + sched_ctl->round[CC_idP][harq_pid][oppose_tb] = 8; + sched_ctl->rsn[CC_idP][harq_pid][select_tb] = 0; + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb] = 0; /* CDRX: PUCCH gives an ACK, so reset corresponding HARQ RTT */ sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; } - if ((num_ack_nak == 2) - && (sched_ctl->round[CC_idP][harq_pid] < 8) - && (sched_ctl->tbcnt[CC_idP][harq_pid] == 1) - && (pdu[0] == 2) && (pdu[1] == 2)) { - sched_ctl->round[CC_idP][harq_pid]++; - - if (sched_ctl->round[CC_idP][harq_pid] == 4) { - sched_ctl->round[CC_idP][harq_pid] = 8; // release HARQ process - sched_ctl->tbcnt[CC_idP][harq_pid] = 0; - - /* CDRX: PUCCH gives an NACK and max number of repetitions reached so reset corresponding HARQ RTT */ - sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; - } - } else if (((num_ack_nak == 2) - && (sched_ctl->round[CC_idP][harq_pid] < 8) - && (sched_ctl->tbcnt[0][harq_pid] == 2) - && (pdu[0] == 1) && (pdu[1] == 2)) - || ((num_ack_nak == 2) - && (sched_ctl->round[CC_idP][harq_pid] < 8) - && (sched_ctl->tbcnt[CC_idP][harq_pid] == 2) - && (pdu[0] == 2) && (pdu[1] == 1))) { - sched_ctl->round[CC_idP][harq_pid]++; - sched_ctl->tbcnt[CC_idP][harq_pid] = 1; - - if (sched_ctl->round[CC_idP][harq_pid] == 4) { - sched_ctl->round[CC_idP][harq_pid] = 8; // release HARQ process - sched_ctl->tbcnt[CC_idP][harq_pid] = 0; /* TODO: do we have to set it to 0? */ + else if( (num_ack_nak == 2) + && (sched_ctl->round[CC_idP][harq_pid][select_tb] < 8) + && (sched_ctl->round[CC_idP][harq_pid][oppose_tb] < 8) + && ((pdu[select_tb] == 2) || (pdu[select_tb] == 4)) + && ((pdu[oppose_tb] == 2) || (pdu[oppose_tb] == 4))) { + sched_ctl->round[CC_idP][harq_pid][select_tb]++; + sched_ctl->round[CC_idP][harq_pid][oppose_tb]++; + sched_ctl->rsn[CC_idP][harq_pid][select_tb]++; + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb]++; + if (sched_ctl->round[CC_idP][harq_pid][select_tb] == 4) { + sched_ctl->round[CC_idP][harq_pid][select_tb] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][select_tb] = 0; /* CDRX: PUCCH gives an NACK and max number of repetitions reached so reset corresponding HARQ RTT */ sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; } - } else if ((num_ack_nak == 2) - && (sched_ctl->round[CC_idP][harq_pid] < 8) - && (sched_ctl->tbcnt[CC_idP][harq_pid] == 2) - && (pdu[0] == 2) && (pdu[1] == 2)) { - sched_ctl->round[CC_idP][harq_pid]++; - - if (sched_ctl->round[CC_idP][harq_pid] == 4) { - sched_ctl->round[CC_idP][harq_pid] = 8; // release HARQ process - sched_ctl->tbcnt[CC_idP][harq_pid] = 0; - - /* CDRX: PUCCH gives an NACK and max number of repetitions reached so reset corresponding HARQ RTT */ - sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; - } - } else + if (sched_ctl->round[CC_idP][harq_pid][oppose_tb] == 4) { + sched_ctl->round[CC_idP][harq_pid][oppose_tb] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb] = 0; + sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; + } + } else if ((num_ack_nak == 2) + && (sched_ctl->round[CC_idP][harq_pid][select_tb] < 8) + && (sched_ctl->round[CC_idP][harq_pid][oppose_tb] < 8) + && (pdu[select_tb] == 1) + && ((pdu[oppose_tb] == 2) || pdu[oppose_tb] == 4)){ + sched_ctl->round[CC_idP][harq_pid][select_tb] = 8; + sched_ctl->round[CC_idP][harq_pid][oppose_tb]++; + sched_ctl->rsn[CC_idP][harq_pid][select_tb] = 0; + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb]++; + + if (sched_ctl->round[CC_idP][harq_pid][oppose_tb] == 4) { + sched_ctl->round[CC_idP][harq_pid][oppose_tb] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb] = 0; + /* CDRX: PUCCH gives an NACK and max number of repetitions reached so reset corresponding HARQ RTT */ + sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; + } + } + else if ((num_ack_nak == 2) + && (sched_ctl->round[CC_idP][harq_pid][select_tb] < 8) + && (sched_ctl->round[CC_idP][harq_pid][oppose_tb] < 8) + && ((pdu[select_tb] == 2) || (pdu[select_tb] == 4)) + && (pdu[oppose_tb] == 1)){ + sched_ctl->round[CC_idP][harq_pid][select_tb]++; + sched_ctl->round[CC_idP][harq_pid][oppose_tb] = 8; + sched_ctl->rsn[CC_idP][harq_pid][select_tb]++; + sched_ctl->rsn[CC_idP][harq_pid][oppose_tb] = 0; + + if (sched_ctl->round[CC_idP][harq_pid][select_tb] == 4) { + sched_ctl->round[CC_idP][harq_pid][select_tb] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][select_tb] = 0; + /* CDRX: PUCCH gives an NACK and max number of repetitions reached so reset corresponding HARQ RTT */ + sched_ctl->harq_rtt_timer[CC_idP][harq_pid] = 0; + } + } else if( (num_ack_nak == 1) + && (pdu[TB1] == 2 || pdu[TB1] == 4)) { // NAK (treat DTX as NAK) + sched_ctl->round[CC_idP][harq_pid][swap_flg]++; // increment round + sched_ctl->rsn[CC_idP][harq_pid][swap_flg]++; + + if (sched_ctl->round[CC_idP][harq_pid][swap_flg] == 4) { + sched_ctl->round[CC_idP][harq_pid][swap_flg] = 8; // release HARQ process + sched_ctl->rsn[CC_idP][harq_pid][swap_flg] = 0; + } + } else AssertFatal(1 == 0, "Illegal ACK/NAK/round combination (%d,%d,%d,%d,%d) for harq_pid %d, UE %d/%x\n", num_ack_nak, - sched_ctl->round[CC_idP][harq_pid], - sched_ctl->round[CC_idP][harq_pid], + sched_ctl->round[CC_idP][harq_pid][TB1], + sched_ctl->round[CC_idP][harq_pid][TB1], pdu[0], pdu[0], pdu[1], harq_pid, @@ -4258,8 +4362,8 @@ extract_harq(module_id_t mod_idP, numCC); if ((num_ack_nak == 2) - && (sched_ctl->round[pCCid][harq_pid] < 8) - && (sched_ctl->round[1 - pCCid][harq_pid] < 8) + && (sched_ctl->round[pCCid][harq_pid][TB1] < 8) + && (sched_ctl->round[1 - pCCid][harq_pid][TB1] < 8) && (sched_ctl->tbcnt[pCCid][harq_pid] == 1) && (sched_ctl->tbcnt[1 - pCCid][harq_pid] == 1)) { AssertFatal(pdu[0] <= 3, "pdu[0] %d is not ACK/NAK/DTX\n", @@ -4268,27 +4372,26 @@ extract_harq(module_id_t mod_idP, pdu[1]); if (pdu[0] == 1) - sched_ctl->round[pCCid][harq_pid] = 8; + sched_ctl->round[pCCid][harq_pid][TB1] = 8; else { - sched_ctl->round[pCCid][harq_pid]++; + sched_ctl->round[pCCid][harq_pid][TB1]++; - if (sched_ctl->round[pCCid][harq_pid] == 4) - sched_ctl->round[pCCid][harq_pid] = 8; + if (sched_ctl->round[pCCid][harq_pid][TB1] == 4) + sched_ctl->round[pCCid][harq_pid][TB1] = 8; } if (pdu[1] == 1) - sched_ctl->round[1 - pCCid][harq_pid] = 8; - else { - sched_ctl->round[1 - pCCid][harq_pid]++; - - if (sched_ctl->round[1 - pCCid][harq_pid] == 4) - sched_ctl->round[1 - pCCid][harq_pid] = 8; + sched_ctl->round[1 - pCCid][harq_pid][TB1] = 8; + else { + sched_ctl->round[1 - pCCid][harq_pid][TB1]++; + if (sched_ctl->round[1 - pCCid][harq_pid][TB1] == 4) + sched_ctl->round[1 - pCCid][harq_pid][TB1] = 8; } } // A=2 else if ((num_ack_nak == 3) - && (sched_ctl->round[pCCid][harq_pid] < 8) - && (sched_ctl->tbcnt[pCCid][harq_pid] == 2) - && (sched_ctl->round[1 - pCCid][harq_pid] < 8) + && (sched_ctl->round[pCCid][harq_pid][TB1] < 8) + && (sched_ctl->tbcnt[pCCid][harq_pid] == 2) + && (sched_ctl->round[1 - pCCid][harq_pid][TB1] < 8) && (sched_ctl->tbcnt[1 - pCCid][harq_pid] == 1)) { AssertFatal(pdu[0] <= 3, "pdu[0] %d is not ACK/NAK/DTX\n", pdu[0]); @@ -4308,38 +4411,36 @@ extract_harq(module_id_t mod_idP, rnti); if (pdu[0] == 1 && pdu[1] == 1) { // both ACK - sched_ctl->round[pCCid][harq_pid] = 8; + sched_ctl->round[pCCid][harq_pid][TB1] = 8; sched_ctl->tbcnt[pCCid][harq_pid] = 0; } else if ((pdu[0] == 2 && pdu[1] == 1) || (pdu[0] == 1 && pdu[1] == 2)) { - sched_ctl->round[pCCid][harq_pid]++; + sched_ctl->round[pCCid][harq_pid][TB1]++; sched_ctl->tbcnt[pCCid][harq_pid] = 1; - if (sched_ctl->round[pCCid][harq_pid] == 4) { - sched_ctl->round[pCCid][harq_pid] = 8; + if (sched_ctl->round[pCCid][harq_pid][TB1] == 4) { + sched_ctl->round[pCCid][harq_pid][TB1] = 8; sched_ctl->tbcnt[pCCid][harq_pid] = 0; /* TODO: do we have to set it to 0? */ } } else { - sched_ctl->round[pCCid][harq_pid]++; - - if (sched_ctl->round[pCCid][harq_pid] == 4) { - sched_ctl->round[pCCid][harq_pid] = 8; + sched_ctl->round[pCCid][harq_pid][TB1]++; + if (sched_ctl->round[pCCid][harq_pid][TB1] == 4) { + sched_ctl->round[pCCid][harq_pid][TB1] = 8; sched_ctl->tbcnt[pCCid][harq_pid] = 0; } } - if (pdu[2] == 1) sched_ctl->round[1 - pCCid][harq_pid] = 8; + if (pdu[2] == 1) sched_ctl->round[1 - pCCid][harq_pid][TB1] = 8; else { - sched_ctl->round[1 - pCCid][harq_pid]++; - - if (sched_ctl->round[1 - pCCid][harq_pid] == 4) { - sched_ctl->round[1 - pCCid][harq_pid] = 8; - } + sched_ctl->round[1 - pCCid][harq_pid][TB1]++; + if (sched_ctl->round[1 - pCCid][harq_pid][TB1] == 4) { + sched_ctl->round[1 - pCCid][harq_pid][TB1] = 8; } - } // A=3 primary cell has 2 TBs - else if ((num_ack_nak == 3) - && (sched_ctl->round[1 - pCCid][harq_pid] < 8) - && (sched_ctl->round[pCCid][harq_pid] < 8) - && (sched_ctl->tbcnt[1 - pCCid][harq_pid] == 2) + } + } // A=3 primary cell has 2 TBs + else if ((num_ack_nak == 3) + && (sched_ctl->round[1 - pCCid][harq_pid][TB1] < 8) + && (sched_ctl->round[pCCid][harq_pid][TB1] < 8) + && (sched_ctl->tbcnt[1 - pCCid][harq_pid] == 2) && (sched_ctl->tbcnt[pCCid][harq_pid] == 1)) { AssertFatal(pdu[0] <= 3, "pdu[0] %d is not ACK/NAK/DTX\n", pdu[0]); @@ -4359,39 +4460,39 @@ extract_harq(module_id_t mod_idP, rnti); if (pdu[0] == 1 && pdu[1] == 1) { // both ACK - sched_ctl->round[1 - pCCid][harq_pid] = 8; + sched_ctl->round[1 - pCCid][harq_pid][TB1] = 8; sched_ctl->tbcnt[1 - pCCid][harq_pid] = 0; } else if ((pdu[0] >= 2 && pdu[1] == 1) || (pdu[0] == 1 && pdu[1] >= 2)) { // one ACK - sched_ctl->round[1 - pCCid][harq_pid]++; + sched_ctl->round[1 - pCCid][harq_pid][TB1]++; sched_ctl->tbcnt[1 - pCCid][harq_pid] = 1; - if (sched_ctl->round[1 - pCCid][harq_pid] == 4) { - sched_ctl->round[1 - pCCid][harq_pid] = 8; + if (sched_ctl->round[1 - pCCid][harq_pid][TB1] == 4) { + sched_ctl->round[1 - pCCid][harq_pid][TB1] = 8; sched_ctl->tbcnt[1 - pCCid][harq_pid] = 0; } } else { // both NAK/DTX - sched_ctl->round[1 - pCCid][harq_pid]++; + sched_ctl->round[1 - pCCid][harq_pid][TB1]++; - if (sched_ctl->round[1 - pCCid][harq_pid] == 4) { - sched_ctl->round[1 - pCCid][harq_pid] = 8; + if (sched_ctl->round[1 - pCCid][harq_pid][TB1] == 4) { + sched_ctl->round[1 - pCCid][harq_pid][TB1] = 8; sched_ctl->tbcnt[1 - pCCid][harq_pid] = 0; } } - if (pdu[2] == 1) sched_ctl->round[pCCid][harq_pid] = 8; + if (pdu[2] == 1) sched_ctl->round[pCCid][harq_pid][TB1] = 8; else { - sched_ctl->round[pCCid][harq_pid]++; + sched_ctl->round[pCCid][harq_pid][TB1]++; - if (sched_ctl->round[pCCid][harq_pid] == 4) { - sched_ctl->round[pCCid][harq_pid] = 8; + if (sched_ctl->round[pCCid][harq_pid][TB1] == 4) { + sched_ctl->round[pCCid][harq_pid][TB1] = 8; } } } // A=3 secondary cell has 2 TBs #if MAX_NUM_CCs>1 else if ((num_ack_nak == 4) - && (sched_ctl->round[0][harq_pid] < 8) - && (sched_ctl->round[1][harq_pid] < 8) + && (sched_ctl->round[0][harq_pid][TB1] < 8) + && (sched_ctl->round[1][harq_pid][TB1] < 8) && (sched_ctl->tbcnt[1 - pCCid][harq_pid] == 2) && (sched_ctl->tbcnt[pCCid][harq_pid] == 2)) { AssertFatal(pdu[0] <= 3, "pdu[0] %d is not ACK/NAK/DTX\n", @@ -4412,41 +4513,41 @@ extract_harq(module_id_t mod_idP, rnti); if (pdu[0] == 1 && pdu[1] == 1) { // both ACK - sched_ctl->round[0][harq_pid] = 8; + sched_ctl->round[0][harq_pid][TB1] = 8; sched_ctl->tbcnt[0][harq_pid] = 0; } else if ((pdu[0] >= 2 && pdu[1] == 1) || (pdu[0] == 1 && pdu[1] >= 2)) { // one ACK - sched_ctl->round[0][harq_pid]++; + sched_ctl->round[0][harq_pid][TB1]++; sched_ctl->tbcnt[0][harq_pid] = 1; - if (sched_ctl->round[0][harq_pid] == 4) { - sched_ctl->round[0][harq_pid] = 8; + if (sched_ctl->round[0][harq_pid][TB1] == 4) { + sched_ctl->round[0][harq_pid][TB1] = 8; sched_ctl->tbcnt[0][harq_pid] = 0; } } else { // both NAK/DTX - sched_ctl->round[0][harq_pid]++; + sched_ctl->round[0][harq_pid][TB1]++; - if (sched_ctl->round[0][harq_pid] == 4) { - sched_ctl->round[0][harq_pid] = 8; + if (sched_ctl->round[0][harq_pid][TB1] == 4) { + sched_ctl->round[0][harq_pid][TB1] = 8; sched_ctl->tbcnt[0][harq_pid] = 0; } } if (pdu[2] == 1 && pdu[3] == 1) { // both ACK - sched_ctl->round[1][harq_pid] = 8; + sched_ctl->round[1][harq_pid][TB1] = 8; sched_ctl->tbcnt[1][harq_pid] = 0; } else if ((pdu[2] >= 2 && pdu[3] == 1) || (pdu[2] == 1 && pdu[3] >= 2)) { // one ACK - sched_ctl->round[1][harq_pid]++; + sched_ctl->round[1][harq_pid][TB1]++; sched_ctl->tbcnt[1][harq_pid] = 1; - if (sched_ctl->round[1][harq_pid] == 4) { - sched_ctl->round[1][harq_pid] = 8; + if (sched_ctl->round[1][harq_pid][TB1] == 4) { + sched_ctl->round[1][harq_pid][TB1] = 8; sched_ctl->tbcnt[1][harq_pid] = 0; } } else { // both NAK/DTX - sched_ctl->round[1][harq_pid]++; + sched_ctl->round[1][harq_pid][TB1]++; - if (sched_ctl->round[1][harq_pid] == 4) { - sched_ctl->round[1][harq_pid] = 8; + if (sched_ctl->round[1][harq_pid][TB1] == 4) { + sched_ctl->round[1][harq_pid][TB1] = 8; sched_ctl->tbcnt[1][harq_pid] = 0; } } @@ -4460,16 +4561,16 @@ extract_harq(module_id_t mod_idP, numCC); for (i = 0, j = 0; i < numCC; i++) { - if (sched_ctl->round[i][harq_pid] < 8) { + if ((sched_ctl->round[i][harq_pid][TB1] < 8)) { if (tmode[i] == 1 || tmode[i] == 2 || tmode[0] == 5 || tmode[0] == 6 || tmode[0] == 7) { if (pdu[j] == 1) { - sched_ctl->round[i][harq_pid] = 8; + sched_ctl->round[i][harq_pid][TB1] = 8; sched_ctl->tbcnt[i][harq_pid] = 0; } else if (pdu[j] == 2) { - sched_ctl->round[i][harq_pid]++; + sched_ctl->round[i][harq_pid][TB1]++; - if (sched_ctl->round[i][harq_pid] == 4) { - sched_ctl->round[i][harq_pid] = 8; + if (sched_ctl->round[i][harq_pid][TB1] == 4) { + sched_ctl->round[i][harq_pid][TB1] = 8; sched_ctl->tbcnt[i][harq_pid] = 0; } } else @@ -4483,29 +4584,29 @@ extract_harq(module_id_t mod_idP, j++; } else if (spatial_bundling == 0) { if (sched_ctl->tbcnt[i][harq_pid] == 2 && pdu[j] == 1 && pdu[j + 1] == 1) { - sched_ctl->round[i][harq_pid] = 8; + sched_ctl->round[i][harq_pid][TB1] = 8; sched_ctl->tbcnt[i][harq_pid] = 0; } else if (sched_ctl->tbcnt[i][harq_pid] == 2 && pdu[j] == 1 && pdu[j + 1] == 2) { - sched_ctl->round[i][harq_pid]++; + sched_ctl->round[i][harq_pid][TB1]++; sched_ctl->tbcnt[i][harq_pid] = 1; - if (sched_ctl->round[i][harq_pid] == 4) { - sched_ctl->round[i][harq_pid] = 8; + if (sched_ctl->round[i][harq_pid][TB1] == 4) { + sched_ctl->round[i][harq_pid][TB1] = 8; sched_ctl->tbcnt[i][harq_pid] = 0; } } else if (sched_ctl->tbcnt[i][harq_pid] == 2 && pdu[j] == 2 && pdu[j + 1] == 1) { - sched_ctl->round[i][harq_pid]++; + sched_ctl->round[i][harq_pid][TB1]++; sched_ctl->tbcnt[i][harq_pid] = 1; - if (sched_ctl->round[i][harq_pid] == 4) { - sched_ctl->round[i][harq_pid] = 8; + if (sched_ctl->round[i][harq_pid][TB1] == 4) { + sched_ctl->round[i][harq_pid][TB1] = 8; sched_ctl->tbcnt[i][harq_pid] = 0; } } else if (sched_ctl->tbcnt[i][harq_pid] == 2 && pdu[j] == 2 && pdu[j + 1] == 2) { - sched_ctl->round[i][harq_pid]++; + sched_ctl->round[i][harq_pid][TB1]++; - if (sched_ctl->round[i][harq_pid] == 4) { - sched_ctl->round[i][harq_pid] = 8; + if (sched_ctl->round[i][harq_pid][TB1] == 4) { + sched_ctl->round[i][harq_pid][TB1] = 8; sched_ctl->tbcnt[i][harq_pid] = 0; } } else @@ -4521,13 +4622,13 @@ extract_harq(module_id_t mod_idP, j += 2; } else if (spatial_bundling == 1) { if (pdu[j] == 1) { - sched_ctl->round[i][harq_pid] = 8; + sched_ctl->round[i][harq_pid][TB1] = 8; sched_ctl->tbcnt[i][harq_pid] = 0; } else if (pdu[j] == 2) { - sched_ctl->round[i][harq_pid]++; + sched_ctl->round[i][harq_pid][TB1]++; - if (sched_ctl->round[i][harq_pid] == 4) { - sched_ctl->round[i][harq_pid] = 8; + if (sched_ctl->round[i][harq_pid][TB1] == 4) { + sched_ctl->round[i][harq_pid][TB1] = 8; sched_ctl->tbcnt[i][harq_pid] = 0; } } else { diff --git a/openair2/LAYER2/MAC/mac.h b/openair2/LAYER2/MAC/mac.h index 0ac3a32fa8..947b37331b 100644 --- a/openair2/LAYER2/MAC/mac.h +++ b/openair2/LAYER2/MAC/mac.h @@ -471,9 +471,19 @@ typedef struct { #define BSR_TRIGGER_PADDING (4) /* For Padding BSR Trigger */ +#define MAX_NUM_TB 2 +#define TB1 0 +#define TB2 1 +#define SINGLE_RI 1 +#define MULTI_RI 2 +#define SINGLE_CW 1 +#define MULTI_CW 2 +#define TX_DIVERSITY 1 +#define LARGE_DELAY_CDD 2 + /*! \brief Downlink SCH PDU Structure */ typedef struct { - uint8_t payload[8][SCH_PAYLOAD_SIZE_MAX]; + uint8_t payload[8][MAX_NUM_TB][SCH_PAYLOAD_SIZE_MAX]; uint16_t Pdu_size[8]; } __attribute__ ((__packed__)) DLSCH_PDU; @@ -647,7 +657,7 @@ typedef struct { /// harq pid uint8_t harq_pid; /// harq rounf - uint8_t harq_round; + uint8_t harq_round[MAX_NUM_TB]; /// total available number of PRBs for a new transmission uint16_t rbs_used; /// total available number of PRBs for a retransmission @@ -657,12 +667,10 @@ typedef struct { /// total avilable nccc for a retransmission: num control channel element uint16_t ncce_used_retx; - // mcs1 before the rate adaptaion - uint8_t dlsch_mcs1; - /// Target mcs2 after rate-adaptation - uint8_t dlsch_mcs2; - // current TBS with mcs2 - uint32_t TBS; + // mcs + uint8_t dlsch_mcs[MAX_NUM_TB]; + // current TBS + uint32_t TBS[MAX_NUM_TB]; // total TBS with mcs2 // uint32_t total_TBS; // total rb used for a new transmission @@ -693,7 +701,7 @@ typedef struct { //total uint32_t total_dlsch_bitrate; /// headers+ CE + padding bytes for a MAC PDU - uint64_t overhead_bytes; + uint64_t overhead_bytes[MAX_NUM_TB]; /// headers+ CE + padding bytes for a MAC PDU uint64_t total_overhead_bytes; /// headers+ CE + padding bytes for a MAC PDU @@ -786,11 +794,9 @@ typedef struct { /// C-RNTI of UE rnti_t rnti; /// NDI from last scheduling - uint8_t oldNDI[8]; - /// mcs1 from last scheduling - uint8_t oldmcs1[8]; - /// mcs2 from last scheduling - uint8_t oldmcs2[8]; + uint8_t oldNDI[8][MAX_NUM_TB]; + /// mcs from last scheduling + uint8_t oldmcs[8][MAX_NUM_TB]; /// NDI from last UL scheduling uint8_t oldNDI_UL[8]; /// mcs from last UL scheduling @@ -947,7 +953,7 @@ typedef struct { // resource scheduling information /// Current DL harq round per harq_pid on each CC - uint8_t round[NFAPI_CC_MAX][10]; + uint8_t round[NFAPI_CC_MAX][10][MAX_NUM_TB]; /// Current Active TBs per harq_pid on each CC uint8_t tbcnt[NFAPI_CC_MAX][10]; /// Current UL harq round per harq_pid on each CC @@ -1003,6 +1009,11 @@ typedef struct { uint8_t crnti_reconfigurationcomplete_flag; uint8_t cqi_req_flag; + uint8_t cw_num[NFAPI_CC_MAX][10]; + uint8_t select_tb[NFAPI_CC_MAX][10]; + uint8_t swap_flag[NFAPI_CC_MAX][10]; + uint8_t rsn[NFAPI_CC_MAX][10][MAX_NUM_TB]; + /* HARQ RRT Timers */ /// (UL) HARQ RTT timers, especially used for CDRX operations, one timer per cell per harq process (and per user) uint8_t harq_rtt_timer[NFAPI_CC_MAX][8]; @@ -1199,7 +1210,7 @@ typedef struct { uint32_t bytes_lcid[MAX_MOBILES_PER_ENB][MAX_NUM_LCID]; uint32_t wb_pmi[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; - uint8_t mcs[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; + uint8_t mcs[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB][MAX_NUM_TB]; } pre_processor_results_t; diff --git a/openair2/LAYER2/MAC/mac_proto.h b/openair2/LAYER2/MAC/mac_proto.h index 457fb89031..3ed23b132a 100644 --- a/openair2/LAYER2/MAC/mac_proto.h +++ b/openair2/LAYER2/MAC/mac_proto.h @@ -1207,6 +1207,7 @@ void fill_nfapi_dlsch_config(eNB_MAC_INST * eNB, void fill_nfapi_harq_information(module_id_t module_idP, int CC_idP, uint16_t rntiP, + uint16_t absSFP, nfapi_ul_config_harq_information * harq_information, uint8_t cce_idxP); diff --git a/openair2/LAYER2/MAC/pre_processor.c b/openair2/LAYER2/MAC/pre_processor.c index e70ed082f9..71d54f3c7b 100644 --- a/openair2/LAYER2/MAC/pre_processor.c +++ b/openair2/LAYER2/MAC/pre_processor.c @@ -186,6 +186,7 @@ assign_rbs_required(module_id_t Mod_id, slice_info_t *sli = &RC.mac[Mod_id]->slice_info; eNB_UE_STATS *eNB_UE_stats, *eNB_UE_stats_i, *eNB_UE_stats_j; int N_RB_DL; + int ri; // clear rb allocations across all CC_id for (UE_id = 0; UE_id < MAX_MOBILES_PER_ENB; UE_id++) { @@ -200,7 +201,8 @@ assign_rbs_required(module_id_t Mod_id, CC_id = UE_list->ordered_CCids[n][UE_id]; eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id]; // eNB_UE_stats->dlsch_mcs1 = cmin(cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]], sli->dl[slice_idx].maxmcs); - eNB_UE_stats->dlsch_mcs1 = cmin(cqi2mcs(UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]), sli->dl[slice_idx].maxmcs); + eNB_UE_stats->dlsch_mcs[TB1] = cmin(cqi2mcs(UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]), sli->dl[slice_idx].maxmcs); + eNB_UE_stats->dlsch_mcs[TB2] = eNB_UE_stats->dlsch_mcs[TB1]; } // provide the list of CCs sorted according to MCS @@ -211,7 +213,7 @@ assign_rbs_required(module_id_t Mod_id, DevAssert(j < NFAPI_CC_MAX); eNB_UE_stats_j = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]][UE_id]; - if (eNB_UE_stats_j->dlsch_mcs1 > eNB_UE_stats_i->dlsch_mcs1) { + if (eNB_UE_stats_j->dlsch_mcs[TB1] > eNB_UE_stats_i->dlsch_mcs[TB1]) { tmp = UE_list->ordered_CCids[i][UE_id]; UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id]; UE_list->ordered_CCids[j][UE_id] = tmp; @@ -226,42 +228,44 @@ assign_rbs_required(module_id_t Mod_id, CC_id = UE_list->ordered_CCids[i][UE_id]; eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id]; - if (eNB_UE_stats->dlsch_mcs1 == 0) { + if (eNB_UE_stats->dlsch_mcs[TB1] == 0) { nb_rbs_required[CC_id][UE_id] = 4; // don't let the TBS get too small } else { nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id]; } - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rbs_required[CC_id][UE_id]); + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[TB1], nb_rbs_required[CC_id][UE_id]); LOG_D(MAC, "[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n", UE_id, CC_id, UE_list->UE_template[pCCid][UE_id].dl_buffer_total, nb_rbs_required[CC_id][UE_id], - eNB_UE_stats->dlsch_mcs1, TBS); + eNB_UE_stats->dlsch_mcs[TB1], TBS); N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth); UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx] = nb_rbs_allowed_slice(sli->dl[slice_idx].pct, N_RB_DL); /* calculating required number of RBs for each UE */ - while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total) { + ri = UE_list->UE_sched_ctrl[UE_id].aperiodic_ri_received[CC_id] - 1; + while ((TBS + ri * TBS) < UE_list->UE_template[pCCid][UE_id].dl_buffer_total) { nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id]; if (nb_rbs_required[CC_id][UE_id] > UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]) { - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]); + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[TB1], UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]); nb_rbs_required[CC_id][UE_id] = UE_list->UE_sched_ctrl[UE_id].max_rbs_allowed_slice[CC_id][slice_idx]; break; } - TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1, nb_rbs_required[CC_id][UE_id]); + TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs[TB1], nb_rbs_required[CC_id][UE_id]); } // end of while LOG_D(MAC, "[eNB %d] Frame %d: UE %d on CC %d: RB unit %d, nb_required RB %d (TBS %d, mcs %d)\n", Mod_id, frameP, UE_id, CC_id, min_rb_unit[CC_id], nb_rbs_required[CC_id][UE_id], TBS, - eNB_UE_stats->dlsch_mcs1); - sli->pre_processor_results[slice_idx].mcs[CC_id][UE_id] = eNB_UE_stats->dlsch_mcs1; + eNB_UE_stats->dlsch_mcs[TB1]); + sli->pre_processor_results[slice_idx].mcs[CC_id][UE_id][TB1] = eNB_UE_stats->dlsch_mcs[TB1]; + sli->pre_processor_results[slice_idx].mcs[CC_id][UE_id][TB2] = eNB_UE_stats->dlsch_mcs[TB2]; } } } @@ -281,7 +285,7 @@ maxround(module_id_t Mod_id, uint16_t rnti, int frame, cc = &RC.mac[Mod_id]->common_channels[CC_id]; UE_id = find_UE_id(Mod_id, rnti); harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frame,subframe); - round = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid]; + round = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid][TB1]; if (round > round_max) { round_max = round; @@ -617,7 +621,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; cc = &RC.mac[Mod_id]->common_channels[CC_id]; harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP); - round = ue_sched_ctl->round[CC_id][harq_pid]; + round = ue_sched_ctl->round[CC_id][harq_pid][TB1]; if (nb_rbs_required[CC_id][UE_id] > 0) { total_ue_count[CC_id]++; @@ -738,7 +742,7 @@ void dlsch_scheduler_pre_processor_accounting(module_id_t Mod_id, ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id]; cc = &RC.mac[Mod_id]->common_channels[CC_id]; harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config,frameP,subframeP); - round = ue_sched_ctl->round[CC_id][harq_pid]; + round = ue_sched_ctl->round[CC_id][harq_pid][TB1]; // control channel or retransmission /* TODO: do we have to check for retransmission? */ diff --git a/openair2/LAYER2/openair2_proc.c b/openair2/LAYER2/openair2_proc.c index 2251344c47..6a3ce417e9 100644 --- a/openair2/LAYER2/openair2_proc.c +++ b/openair2/LAYER2/openair2_proc.c @@ -144,9 +144,9 @@ int dump_eNB_l2_stats(char *buffer, int length) { for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) { for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) { CC_id=UE_list->ordered_CCids[i][UE_id]; - UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_bitrate=((UE_list->eNB_UE_stats[CC_id][UE_id].TBS*8)/((eNB->frame + 1)*10)); + UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_bitrate=(((UE_list->eNB_UE_stats[CC_id][UE_id].TBS[TB1] + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[TB2])*8)/((eNB->frame + 1)*10)); UE_list->eNB_UE_stats[CC_id][UE_id].total_dlsch_bitrate= ((UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes*8)/((eNB->frame + 1)*10)); - UE_list->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes+= UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes; + UE_list->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes+= UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes[TB1]; UE_list->eNB_UE_stats[CC_id][UE_id].avg_overhead_bytes=((UE_list->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes*8)/((eNB->frame + 1)*10)); UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_bitrate=((UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS*8)/((eNB->frame + 1)*10)); UE_list->eNB_UE_stats[CC_id][UE_id].total_ulsch_bitrate= ((UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes_rx*8)/((eNB->frame + 1)*10)); @@ -155,8 +155,8 @@ int dump_eNB_l2_stats(char *buffer, int length) { map_int_to_str(rrc_status_names, UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status), UE_list->eNB_UE_stats[CC_id][UE_id].crnti, UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id], - UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs1, - UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs2, + UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB1], + UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_mcs[TB2], UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used, UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used_retx, UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used, @@ -169,10 +169,10 @@ int dump_eNB_l2_stats(char *buffer, int length) { "(TTI %"PRIu64", total %"PRIu64", avg %"PRIu64")\n", UE_list->eNB_UE_stats[CC_id][UE_id].dlsch_bitrate, UE_list->eNB_UE_stats[CC_id][UE_id].total_dlsch_bitrate, - UE_list->eNB_UE_stats[CC_id][UE_id].TBS, + UE_list->eNB_UE_stats[CC_id][UE_id].TBS[TB1], UE_list->eNB_UE_stats[CC_id][UE_id].total_pdu_bytes, UE_list->eNB_UE_stats[CC_id][UE_id].total_num_pdus, - UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes, + UE_list->eNB_UE_stats[CC_id][UE_id].overhead_bytes[TB1], UE_list->eNB_UE_stats[CC_id][UE_id].total_overhead_bytes, UE_list->eNB_UE_stats[CC_id][UE_id].avg_overhead_bytes ); diff --git a/openair2/RRC/LTE/MESSAGES/asn1_msg.c b/openair2/RRC/LTE/MESSAGES/asn1_msg.c index 9f12e99d9c..a9f92ab719 100644 --- a/openair2/RRC/LTE/MESSAGES/asn1_msg.c +++ b/openair2/RRC/LTE/MESSAGES/asn1_msg.c @@ -2715,7 +2715,7 @@ do_RRCConnectionSetup( break; */ } - + LOG_I(RRC,"UE %x Transmission mode is set to %ld at RRCConnectionSetup because antenna port is %d!\n", ue_context_pP->ue_id_rnti, physicalConfigDedicated2->antennaInfo->choice.explicitValue.transmissionMode+1, carrier->p_eNB); physicalConfigDedicated2->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.present = LTE_AntennaInfoDedicated__ue_TransmitAntennaSelection_PR_release; physicalConfigDedicated2->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.choice.release = 0; // SchedulingRequestConfig @@ -3101,7 +3101,7 @@ uint8_t do_RRCConnectionSetup_BR( */ } - + LOG_I(RRC,"UE %x Transmission mode is set to %ld at RRCConnectionSetup_RB because antenna port is %d!\n", ue_context_pP->ue_id_rnti, physicalConfigDedicated2->antennaInfo->choice.explicitValue.transmissionMode+1, carrier->p_eNB); physicalConfigDedicated2->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.present = LTE_AntennaInfoDedicated__ue_TransmitAntennaSelection_PR_release; physicalConfigDedicated2->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.choice.release = 0; @@ -3600,6 +3600,11 @@ uint16_t do_RRCConnectionReconfiguration(const protocol_ctxt_t *const ctxt_pP, rrcConnectionReconfiguration->criticalExtensions.choice.c1.choice.rrcConnectionReconfiguration_r8.radioResourceConfigDedicated->drb_ToReleaseList = DRB_list2; rrcConnectionReconfiguration->criticalExtensions.choice.c1.choice.rrcConnectionReconfiguration_r8.radioResourceConfigDedicated->sps_Config = sps_Config; rrcConnectionReconfiguration->criticalExtensions.choice.c1.choice.rrcConnectionReconfiguration_r8.radioResourceConfigDedicated->physicalConfigDedicated = physicalConfigDedicated; + if (physicalConfigDedicated && physicalConfigDedicated->antennaInfo) { + LOG_I(RRC,"UE %x Transmission mode is set to %ld at this RRCConnectionReconfiguration!\n", ctxt_pP->rnti, physicalConfigDedicated->antennaInfo->choice.explicitValue.transmissionMode+1); + } else { + LOG_I(RRC,"UE %x Transmission mode is not defined at this RRCConnectionReconfiguration!\n", ctxt_pP->rnti); + } #ifdef CBA rrcConnectionReconfiguration->criticalExtensions.choice.c1.choice.rrcConnectionReconfiguration_r8.radioResourceConfigDedicated->cba_RNTI_vlola= cba_rnti; #endif @@ -3860,6 +3865,11 @@ do_RRCConnectionReestablishment( rrcConnectionReestablishment->criticalExtensions.choice.c1.choice.rrcConnectionReestablishment_r8.radioResourceConfigDedicated.sps_Config = NULL; rrcConnectionReestablishment->criticalExtensions.choice.c1.choice.rrcConnectionReestablishment_r8.radioResourceConfigDedicated.physicalConfigDedicated = physicalConfigDedicated2; rrcConnectionReestablishment->criticalExtensions.choice.c1.choice.rrcConnectionReestablishment_r8.radioResourceConfigDedicated.mac_MainConfig = NULL; + if (physicalConfigDedicated2 && physicalConfigDedicated2->antennaInfo) { + LOG_I(RRC,"UE %x Transmission mode is set to %ld at RRCConnectionReestablishment!\n", ue_context_pP->ue_id_rnti, physicalConfigDedicated2->antennaInfo->choice.explicitValue.transmissionMode+1); + } else { + LOG_I(RRC,"UE %x Transmission mode is not defined at RRCConnectionReestablishment!\n", ue_context_pP->ue_id_rnti); + } uint8_t KeNB_star[32] = { 0 }; uint16_t pci = rrc->carrier[CC_id].physCellId; uint32_t earfcn_dl = (uint32_t)freq_to_arfcn10(RC.mac[ctxt_pP->module_id]->common_channels[CC_id].eutra_band, diff --git a/openair2/RRC/LTE/rrc_eNB.c b/openair2/RRC/LTE/rrc_eNB.c index 60a9f76a14..7599c6903a 100644 --- a/openair2/RRC/LTE/rrc_eNB.c +++ b/openair2/RRC/LTE/rrc_eNB.c @@ -3036,17 +3036,21 @@ void rrc_eNB_generate_defaultRRCConnectionReconfiguration(const protocol_ctxt_t if (*physicalConfigDedicated) { if ((*physicalConfigDedicated)->antennaInfo) { (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.transmissionMode = rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode; - LOG_D(RRC,"Setting transmission mode to %ld+1\n",rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode); + LOG_D(RRC,"Setting transmission mode to %ld+1 ue_Category %ld\n",rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode,ue_context_pP->ue_context.UE_Capability->ue_Category); if (rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode==LTE_AntennaInfoDedicated__transmissionMode_tm3) { - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= - CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->present = - LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR_n2TxAntenna_tm3; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf= MALLOC(1); - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf[0] = 0xc0; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.size=1; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.bits_unused=6; + if (ue_context_pP->ue_context.UE_Capability->ue_Category >= 2) { + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= + CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->present = + LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR_n2TxAntenna_tm3; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf= MALLOC(1); + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf[0] = 0xc0; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.size=1; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.bits_unused=6; + } else { + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.transmissionMode = LTE_AntennaInfoDedicated__transmissionMode_tm2; + } } else if (rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode==LTE_AntennaInfoDedicated__transmissionMode_tm4) { (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); @@ -3075,6 +3079,8 @@ void rrc_eNB_generate_defaultRRCConnectionReconfiguration(const protocol_ctxt_t (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm6.size=1; (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm6.bits_unused=4; } + LOG_I(RRC,"UE %x Transmission mode is set to %ld at defaultRRCConnectionReconfiguration because ue_Category is %ld and ue_TransmissionMode in configfile is %ld!\n", + ue_context_pP->ue_id_rnti, (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.transmissionMode+1, ue_context_pP->ue_context.UE_Capability->ue_Category, rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode+1); } else { LOG_E(RRC,"antenna_info not present in physical_config_dedicated. Not reconfiguring!\n"); } @@ -3660,14 +3666,18 @@ flexran_rrc_eNB_generate_defaultRRCConnectionReconfiguration(const protocol_ctxt LOG_D(RRC,"Setting transmission mode to %ld+1\n",rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode); if (rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode==LTE_AntennaInfoDedicated__transmissionMode_tm3) { - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= - CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->present = - LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR_n2TxAntenna_tm3; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf= MALLOC(1); - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf[0] = 0xc0; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.size=1; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.bits_unused=6; + if (ue_context_pP->ue_context.UE_Capability->ue_Category >= 2) { + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= + CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->present = + LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR_n2TxAntenna_tm3; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf= MALLOC(1); + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf[0] = 0xc0; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.size=1; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.bits_unused=6; + } else { + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.transmissionMode = LTE_AntennaInfoDedicated__transmissionMode_tm2; + } } else if (rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode==LTE_AntennaInfoDedicated__transmissionMode_tm4) { (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); @@ -3696,6 +3706,8 @@ flexran_rrc_eNB_generate_defaultRRCConnectionReconfiguration(const protocol_ctxt (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm6.size=1; (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm6.bits_unused=4; } + LOG_I(RRC,"UE %x Transmission mode is set to %ld at flexran defaultRRCConnectionReconfiguration because ue_Category is %ld and ue_TransmissionMode in configfile is %ld!\n", + ue_context_pP->ue_id_rnti, (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.transmissionMode+1, ue_context_pP->ue_context.UE_Capability->ue_Category, rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode+1); } else { LOG_E(RRC,"antenna_info not present in physical_config_dedicated. Not reconfiguring!\n"); } @@ -4796,14 +4808,18 @@ rrc_eNB_generate_HO_RRCConnectionReconfiguration(const protocol_ctxt_t *const ct LOG_D(RRC,"Setting transmission mode to %ld+1\n",rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode); if (rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode==LTE_AntennaInfoDedicated__transmissionMode_tm3) { - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= - CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->present = - LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR_n2TxAntenna_tm3; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf= MALLOC(1); - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf[0] = 0xc0; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.size=1; - (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.bits_unused=6; + if (ue_context_pP->ue_context.UE_Capability->ue_Category >= 2) { + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= + CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->present = + LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR_n2TxAntenna_tm3; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf= MALLOC(1); + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.buf[0] = 0xc0; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.size=1; + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm3.bits_unused=6; + } else { + (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.transmissionMode = LTE_AntennaInfoDedicated__transmissionMode_tm2; + } } else if (rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode==LTE_AntennaInfoDedicated__transmissionMode_tm4) { (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction= CALLOC(1,sizeof(LTE_AntennaInfoDedicated__codebookSubsetRestriction_PR)); @@ -4832,7 +4848,8 @@ rrc_eNB_generate_HO_RRCConnectionReconfiguration(const protocol_ctxt_t *const ct (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm6.size=1; (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.codebookSubsetRestriction->choice.n2TxAntenna_tm6.bits_unused=4; } - + LOG_I(RRC,"UE %x Transmission mode is set to %ld at handover RRCConnectionReconfiguration because ue_Category is %ld and ue_TransmissionMode in configfile is %ld!\n", + ue_context_pP->ue_id_rnti, (*physicalConfigDedicated)->antennaInfo->choice.explicitValue.transmissionMode+1, ue_context_pP->ue_context.UE_Capability->ue_Category, rrc_inst->configuration.radioresourceconfig[0].ue_TransmissionMode+1); physicalConfigDedicated2->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.present = LTE_AntennaInfoDedicated__ue_TransmitAntennaSelection_PR_release; physicalConfigDedicated2->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.choice.release = 0; diff --git a/openair2/X2AP/x2ap_eNB_generate_messages.c b/openair2/X2AP/x2ap_eNB_generate_messages.c index 53ec8574fd..a9c01d134e 100644 --- a/openair2/X2AP/x2ap_eNB_generate_messages.c +++ b/openair2/X2AP/x2ap_eNB_generate_messages.c @@ -608,8 +608,8 @@ int x2ap_eNB_generate_x2_handover_request_ack (x2ap_eNB_instance_t *instance_p, ie = (X2AP_HandoverRequestAcknowledge_IEs_t *)calloc(1, sizeof(X2AP_HandoverRequestAcknowledge_IEs_t)); ie->id = X2AP_ProtocolIE_ID_id_New_eNB_UE_X2AP_ID; ie->criticality = X2AP_Criticality_ignore; - ie->value.present = X2AP_HandoverRequestAcknowledge_IEs__value_PR_UE_X2AP_ID_1; - ie->value.choice.UE_X2AP_ID_1 = id_target; + ie->value.present = X2AP_HandoverRequestAcknowledge_IEs__value_PR_UE_X2AP_ID; + ie->value.choice.UE_X2AP_ID = id_target; ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /* mandatory */ @@ -702,8 +702,8 @@ int x2ap_eNB_generate_x2_ue_context_release (x2ap_eNB_instance_t *instance_p, x2 ie = (X2AP_UEContextRelease_IEs_t *)calloc(1, sizeof(X2AP_UEContextRelease_IEs_t)); ie->id = X2AP_ProtocolIE_ID_id_New_eNB_UE_X2AP_ID; ie->criticality = X2AP_Criticality_reject; - ie->value.present = X2AP_UEContextRelease_IEs__value_PR_UE_X2AP_ID_1; - ie->value.choice.UE_X2AP_ID_1 = id_target; + ie->value.present = X2AP_UEContextRelease_IEs__value_PR_UE_X2AP_ID; + ie->value.choice.UE_X2AP_ID = id_target; ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); if (x2ap_eNB_encode_pdu(&pdu, &buffer, &len) < 0) { @@ -762,8 +762,8 @@ int x2ap_eNB_generate_x2_handover_cancel (x2ap_eNB_instance_t *instance_p, x2ap_ ie = (X2AP_HandoverCancel_IEs_t *)calloc(1, sizeof(X2AP_HandoverCancel_IEs_t)); ie->id = X2AP_ProtocolIE_ID_id_New_eNB_UE_X2AP_ID; ie->criticality = X2AP_Criticality_ignore; - ie->value.present = X2AP_HandoverCancel_IEs__value_PR_UE_X2AP_ID_1; - ie->value.choice.UE_X2AP_ID_1 = id_target; + ie->value.present = X2AP_HandoverCancel_IEs__value_PR_UE_X2AP_ID; + ie->value.choice.UE_X2AP_ID = id_target; ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); } diff --git a/openair2/X2AP/x2ap_eNB_handler.c b/openair2/X2AP/x2ap_eNB_handler.c index 96ffac162e..cc5f01586f 100644 --- a/openair2/X2AP/x2ap_eNB_handler.c +++ b/openair2/X2AP/x2ap_eNB_handler.c @@ -778,7 +778,7 @@ int x2ap_eNB_handle_handover_response (instance_t instance, return -1; } - id_target = ie->value.choice.UE_X2AP_ID_1; + id_target = ie->value.choice.UE_X2AP_ID; ue_id = id_source; @@ -867,7 +867,7 @@ int x2ap_eNB_handle_ue_context_release (instance_t instance, return -1; } - id_target = ie->value.choice.UE_X2AP_ID_1; + id_target = ie->value.choice.UE_X2AP_ID; ue_id = id_source; if (ue_id != x2ap_find_id_from_id_source(&instance_p->id_manager, id_source)) { @@ -944,7 +944,7 @@ int x2ap_eNB_handle_handover_cancel (instance_t instance, X2AP_INFO("%s %d: ie is a NULL pointer \n",__FILE__,__LINE__); id_target = -1; } else - id_target = ie->value.choice.UE_X2AP_ID_1; + id_target = ie->value.choice.UE_X2AP_ID; X2AP_FIND_PROTOCOLIE_BY_ID(X2AP_HandoverCancel_IEs_t, ie, x2HandoverCancel, X2AP_ProtocolIE_ID_id_Cause, true); -- 2.26.2