Commit 8bf7a13f authored by Robert Schmidt's avatar Robert Schmidt

Remove first_rb from ULSCH scheduler

parent 8a84e2d9
...@@ -1099,14 +1099,12 @@ schedule_ulsch(module_id_t module_idP, ...@@ -1099,14 +1099,12 @@ schedule_ulsch(module_id_t module_idP,
sub_frame_t subframeP) sub_frame_t subframeP)
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
{ {
uint16_t first_rb[NFAPI_CC_MAX];
eNB_MAC_INST *mac = NULL; eNB_MAC_INST *mac = NULL;
COMMON_channels_t *cc = NULL; COMMON_channels_t *cc = NULL;
int sched_subframe; int sched_subframe;
int sched_frame; int sched_frame;
/* Init */ /* Init */
mac = RC.mac[module_idP]; mac = RC.mac[module_idP];
memset(first_rb, 0, NFAPI_CC_MAX * sizeof(uint16_t));
start_meas(&(mac->schedule_ulsch)); start_meas(&(mac->schedule_ulsch));
sched_subframe = (subframeP + 4) % 10; sched_subframe = (subframeP + 4) % 10;
sched_frame = frameP; sched_frame = frameP;
...@@ -1213,9 +1211,6 @@ schedule_ulsch(module_id_t module_idP, ...@@ -1213,9 +1211,6 @@ schedule_ulsch(module_id_t module_idP,
/* Note: RC.nb_mac_CC[module_idP] should be lower than or equal to NFAPI_CC_MAX */ /* Note: RC.nb_mac_CC[module_idP] should be lower than or equal to NFAPI_CC_MAX */
for (int CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++, cc++) { for (int CC_id = 0; CC_id < RC.nb_mac_CC[module_idP]; CC_id++, cc++) {
first_rb[CC_id] = (emtc_active[CC_id] == 1) ? 7 : 1;
RA_t *ra_ptr = cc->ra;
/* From Louis-Adrien to François: /* From Louis-Adrien to François:
* The comment bloc below is to configure with a command line. * The comment bloc below is to configure with a command line.
* I took it from the equivalent part in the fairRR scheduler (around line 2578 in eNB_scheduler_fairRR.c). * I took it from the equivalent part in the fairRR scheduler (around line 2578 in eNB_scheduler_fairRR.c).
...@@ -1226,6 +1221,7 @@ schedule_ulsch(module_id_t module_idP, ...@@ -1226,6 +1221,7 @@ schedule_ulsch(module_id_t module_idP,
* I think it should be sched_frame instead. This parameter has only impacts in case TDD and preamble format 4. * I think it should be sched_frame instead. This parameter has only impacts in case TDD and preamble format 4.
* To confirm. * To confirm.
*/ */
/* TODO: update vrb_map_UL here? */
/* /*
int start_rb = 0; int start_rb = 0;
int nb_rb = 6; int nb_rb = 6;
...@@ -1242,27 +1238,41 @@ schedule_ulsch(module_id_t module_idP, ...@@ -1242,27 +1238,41 @@ schedule_ulsch(module_id_t module_idP,
} }
*/ */
/* /* HACK: let's remove the PUCCH from available RBs
* Check if RA (Msg3) is active in this subframeP, if so skip the PRB used for Msg3 * we suppose PUCCH size is:
* Msg3 is using 1 PRB so we need to increase first_rb accordingly * - for 25 RBs: 1 RB (top and bottom of ressource grid)
* Not sure about the break (can there be more than 1 active RA procedure per CC_id and per subframe?) * - for 50: 2 RBs
* - for 100: 3 RBs
* This is totally arbitrary and might even be wrong.
*/ */
for (int ra_index = 0; ra_index < NB_RA_PROC_MAX; ra_index++, ra_ptr++) { switch (to_prb(cc[CC_id].ul_Bandwidth)) {
if ((ra_ptr->state == WAITMSG3) && (ra_ptr->Msg3_subframe == sched_subframe)) { case 25:
if (first_rb[CC_id] < ra_ptr->msg3_first_rb + ra_ptr->msg3_nb_rb) { cc[CC_id].vrb_map_UL[0] = 1;
first_rb[CC_id] = ra_ptr->msg3_first_rb + ra_ptr->msg3_nb_rb; cc[CC_id].vrb_map_UL[24] = 1;
} break;
/* Louis-Adrien: I couldn't find an interdiction of multiple Msg3 scheduling case 50:
* on the same time resources. Also the performance improvement of breaking is low, cc[CC_id].vrb_map_UL[0] = 1;
* since we will loop until the end, most of the time. cc[CC_id].vrb_map_UL[1] = 1;
* I'm letting the break as a reminder, in case of misunderstanding the spec. cc[CC_id].vrb_map_UL[48] = 1;
*/ cc[CC_id].vrb_map_UL[49] = 1;
// break; break;
}
case 100:
cc[CC_id].vrb_map_UL[0] = 1;
cc[CC_id].vrb_map_UL[1] = 1;
cc[CC_id].vrb_map_UL[2] = 1;
cc[CC_id].vrb_map_UL[97] = 1;
cc[CC_id].vrb_map_UL[98] = 1;
cc[CC_id].vrb_map_UL[99] = 1;
break;
default:
LOG_E(MAC, "RBs setting not handled. Todo.\n");
exit(1);
} }
schedule_ulsch_rnti(module_idP, CC_id, frameP, subframeP, sched_subframe, first_rb); schedule_ulsch_rnti(module_idP, CC_id, frameP, subframeP, sched_subframe);
} }
stop_meas(&mac->schedule_ulsch); stop_meas(&mac->schedule_ulsch);
...@@ -1277,8 +1287,7 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1277,8 +1287,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
int CC_id, int CC_id,
frame_t frameP, frame_t frameP,
sub_frame_t subframeP, sub_frame_t subframeP,
unsigned char sched_subframeP, unsigned char sched_subframeP) {
uint16_t *first_rb) {
const uint8_t aggregation = 2; const uint8_t aggregation = 2;
/* TODO: does this need to be static? */ /* TODO: does this need to be static? */
static int32_t tpc_accumulated = 0; static int32_t tpc_accumulated = 0;
...@@ -1306,43 +1315,13 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1306,43 +1315,13 @@ schedule_ulsch_rnti(module_id_t module_idP,
nfapi_ul_config_ulsch_harq_information *ulsch_harq_information; nfapi_ul_config_ulsch_harq_information *ulsch_harq_information;
hi_dci0_req->sfn_sf = (frameP << 4) + subframeP; hi_dci0_req->sfn_sf = (frameP << 4) + subframeP;
int n_rb_ul_tab = to_prb(cc[CC_id].ul_Bandwidth);
/* HACK: let's remove the PUCCH from available RBs
* we suppose PUCCH size is:
* - for 25 RBs: 1 RB (top and bottom of ressource grid)
* - for 50: 2 RBs
* - for 100: 3 RBs
* This is totally arbitrary and might even be wrong.
* We suppose 'first_rb[]' has been correctly populated by the caller,
* so we only remove the top part of the resource grid.
*/
switch (n_rb_ul_tab) {
case 25:
n_rb_ul_tab -= 1;
break;
case 50:
n_rb_ul_tab -= 2;
break;
case 100:
n_rb_ul_tab -= 3;
break;
default:
LOG_E(MAC, "RBs setting not handled. Todo.\n");
exit(1);
}
UE_info->first_rb_offset[CC_id] = n_rb_ul_tab;
/* /*
* ULSCH preprocessor: set UE_template-> * ULSCH preprocessor: set UE_template->
* pre_allocated_nb_rb_ul[slice_idx] * pre_allocated_nb_rb_ul[slice_idx]
* pre_assigned_mcs_ul * pre_assigned_mcs_ul
* pre_allocated_rb_table_index_ul * pre_allocated_rb_table_index_ul
*/ */
ulsch_scheduler_pre_processor(module_idP, CC_id, frameP, subframeP, sched_frame, sched_subframeP, first_rb); ulsch_scheduler_pre_processor(module_idP, CC_id, frameP, subframeP, sched_frame, sched_subframeP);
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
if (UE_info->UE_template[CC_id][UE_id].rach_resource_type > 0) if (UE_info->UE_template[CC_id][UE_id].rach_resource_type > 0)
...@@ -1382,9 +1361,8 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1382,9 +1361,8 @@ schedule_ulsch_rnti(module_id_t module_idP,
UE_id, UE_id,
rnti); rnti);
LOG_D(MAC, LOG_D(MAC,
"[eNB %d] %d.%d (sched_frame %d, sched_subframe %d), " "[eNB %d] %d.%d (sched %d.%d), "
"Checking PUSCH %d for UE %d/%x CC %d : aggregation level %d, " "Checking PUSCH %d for UE %d/%x CC %d : aggregation level %d\n",
"N_RB_UL %d\n",
module_idP, module_idP,
frameP, frameP,
subframeP, subframeP,
...@@ -1394,8 +1372,7 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1394,8 +1372,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
UE_id, UE_id,
rnti, rnti,
CC_id, CC_id,
aggregation, aggregation);
n_rb_ul_tab);
/* Seems unused, only for debug */ /* Seems unused, only for debug */
RC.eNB[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP * 10) + subframeP] = RC.eNB[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP * 10) + subframeP] =
UE_template_ptr->estimated_ul_buffer; UE_template_ptr->estimated_ul_buffer;
...@@ -1570,11 +1547,8 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1570,11 +1547,8 @@ schedule_ulsch_rnti(module_id_t module_idP,
UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = UE_template_ptr->mcs_UL[harq_pid]; UE_info->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = UE_template_ptr->mcs_UL[harq_pid];
while (((rb_table[rb_table_index] > (n_rb_ul_tab - first_rb[CC_id])) while (rb_table[rb_table_index] > 45 && rb_table_index > 0)
|| (rb_table[rb_table_index] > 45))
&& (rb_table_index > 0)) {
rb_table_index--; rb_table_index--;
}
UE_template_ptr->TBS_UL[harq_pid] = get_TBS_UL(UE_template_ptr->mcs_UL[harq_pid], rb_table[rb_table_index]); UE_template_ptr->TBS_UL[harq_pid] = get_TBS_UL(UE_template_ptr->mcs_UL[harq_pid], rb_table[rb_table_index]);
UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += rb_table[rb_table_index]; UE_info->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += rb_table[rb_table_index];
...@@ -1588,13 +1562,12 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1588,13 +1562,12 @@ schedule_ulsch_rnti(module_id_t module_idP,
T_INT(subframeP), T_INT(subframeP),
T_INT(harq_pid), T_INT(harq_pid),
T_INT(UE_template_ptr->mcs_UL[harq_pid]), T_INT(UE_template_ptr->mcs_UL[harq_pid]),
T_INT(first_rb[CC_id]),
T_INT(rb_table[rb_table_index]), T_INT(rb_table[rb_table_index]),
T_INT(UE_template_ptr->TBS_UL[harq_pid]), T_INT(UE_template_ptr->TBS_UL[harq_pid]),
T_INT(ndi)); T_INT(ndi));
/* Store information for possible retransmission */ /* Store information for possible retransmission */
UE_template_ptr->nb_rb_ul[harq_pid] = rb_table[rb_table_index]; UE_template_ptr->nb_rb_ul[harq_pid] = rb_table[rb_table_index];
UE_template_ptr->first_rb_ul[harq_pid] = first_rb[CC_id]; UE_template_ptr->first_rb_ul[harq_pid] = UE_template_ptr->pre_first_nb_rb_ul;
UE_template_ptr->cqi_req[harq_pid] = cqi_req; UE_template_ptr->cqi_req[harq_pid] = cqi_req;
UE_sched_ctrl_ptr->ul_scheduled |= (1 << harq_pid); UE_sched_ctrl_ptr->ul_scheduled |= (1 << harq_pid);
...@@ -1632,7 +1605,7 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1632,7 +1605,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.transmission_power = 6000; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.transmission_power = 6000;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb[CC_id]; hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = UE_template_ptr->pre_first_nb_rb_ul;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block = hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block =
rb_table[rb_table_index]; rb_table[rb_table_index];
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.mcs_1 = hi_dci0_pdu->dci_pdu.dci_pdu_rel8.mcs_1 =
...@@ -1688,7 +1661,7 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1688,7 +1661,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
get_tmode(module_idP, CC_id, UE_id), get_tmode(module_idP, CC_id, UE_id),
mac->ul_handle, mac->ul_handle,
rnti, rnti,
first_rb[CC_id], // resource_block_start UE_template_ptr->pre_first_nb_rb_ul, // resource_block_start
rb_table[rb_table_index], // number_of_resource_blocks rb_table[rb_table_index], // number_of_resource_blocks
UE_template_ptr->mcs_UL[harq_pid], UE_template_ptr->mcs_UL[harq_pid],
cshift, // cyclic_shift_2_for_drms cshift, // cyclic_shift_2_for_drms
...@@ -1757,8 +1730,6 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1757,8 +1730,6 @@ schedule_ulsch_rnti(module_id_t module_idP,
cqi_req, cqi_req,
UE_id, UE_id,
rnti); rnti);
/* Increment first rb for next UE allocation */
first_rb[CC_id] += rb_table[rb_table_index];
} else { // round_index > 0 => retransmission } else { // round_index > 0 => retransmission
T(T_ENB_MAC_UE_UL_SCHEDULE_RETRANSMISSION, T(T_ENB_MAC_UE_UL_SCHEDULE_RETRANSMISSION,
T_INT(module_idP), T_INT(module_idP),
...@@ -1868,20 +1839,6 @@ schedule_ulsch_rnti(module_id_t module_idP, ...@@ -1868,20 +1839,6 @@ schedule_ulsch_rnti(module_id_t module_idP,
sched_frame, sched_frame,
sched_subframeP, sched_subframeP,
cqi_req); cqi_req);
/* HACK: RBs used by retransmission have to be reserved.
* The current mechanism uses the notion of 'first_rb', so
* we skip all RBs below the ones retransmitted. This is
* not correct. Imagine only RB 23 is retransmitted, then all
* RBs < 23 will be marked unusable for new transmissions (case where
* round == 0). Note also that this code works only if the preprocessor
* orders UEs with retransmission with higher priority than UEs with new
* transmission.
* All this should be cleaned up properly.
*/
if (first_rb[CC_id] < UE_template_ptr->first_rb_ul[harq_pid] + UE_template_ptr->nb_rb_ul[harq_pid])
first_rb[CC_id] = UE_template_ptr->first_rb_ul[harq_pid]
+ UE_template_ptr->nb_rb_ul[harq_pid];
} // end of round > 0 } // end of round > 0
} // loop over UE_ids } // loop over UE_ids
} }
...@@ -2018,6 +1975,12 @@ void schedule_ulsch_rnti_emtc(module_id_t module_idP, ...@@ -2018,6 +1975,12 @@ void schedule_ulsch_rnti_emtc(module_id_t module_idP,
UE_sched_ctrl->cqi_req_timer); UE_sched_ctrl->cqi_req_timer);
/* Reset the scheduling request */ /* Reset the scheduling request */
emtc_active[CC_id] = 1; emtc_active[CC_id] = 1;
cc[CC_id].vrb_map_UL[1] = 1;
cc[CC_id].vrb_map_UL[2] = 1;
cc[CC_id].vrb_map_UL[3] = 1;
cc[CC_id].vrb_map_UL[4] = 1;
cc[CC_id].vrb_map_UL[5] = 1;
cc[CC_id].vrb_map_UL[6] = 1;
UE_template->ul_SR = 0; UE_template->ul_SR = 0;
status = mac_eNB_get_rrc_status(module_idP,rnti); status = mac_eNB_get_rrc_status(module_idP,rnti);
cqi_req = 0; cqi_req = 0;
......
...@@ -825,6 +825,8 @@ typedef struct { ...@@ -825,6 +825,8 @@ typedef struct {
/// Number of Allocated RBs by the ulsch preprocessor /// Number of Allocated RBs by the ulsch preprocessor
uint8_t pre_allocated_nb_rb_ul; uint8_t pre_allocated_nb_rb_ul;
/// Start of Allocated RBs by the USLCH preprocessor
uint8_t pre_first_nb_rb_ul;
/// index of Allocated RBs by the ulsch preprocessor /// index of Allocated RBs by the ulsch preprocessor
int8_t pre_allocated_rb_table_index_ul; int8_t pre_allocated_rb_table_index_ul;
...@@ -1147,7 +1149,6 @@ typedef struct { ...@@ -1147,7 +1149,6 @@ typedef struct {
/// Sorting criteria for the UE list in the MAC preprocessor /// Sorting criteria for the UE list in the MAC preprocessor
uint16_t sorting_criteria[MAX_NUM_SLICES][CR_NUM]; uint16_t sorting_criteria[MAX_NUM_SLICES][CR_NUM];
uint16_t first_rb_offset[NFAPI_CC_MAX];
} UE_info_t; } UE_info_t;
/*! \brief deleting control information*/ /*! \brief deleting control information*/
......
...@@ -129,8 +129,7 @@ void schedule_ulsch(module_id_t module_idP, frame_t frameP, ...@@ -129,8 +129,7 @@ void schedule_ulsch(module_id_t module_idP, frame_t frameP,
*/ */
void schedule_ulsch_rnti(module_id_t module_idP, int CC_id, frame_t frameP, void schedule_ulsch_rnti(module_id_t module_idP, int CC_id, frame_t frameP,
sub_frame_t subframe, sub_frame_t subframe,
unsigned char sched_subframe, unsigned char sched_subframe);
uint16_t *first_rb);
void schedule_ulsch_rnti_emtc(module_id_t module_idP, void schedule_ulsch_rnti_emtc(module_id_t module_idP,
frame_t frameP, frame_t frameP,
...@@ -674,15 +673,14 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, ...@@ -674,15 +673,14 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
int frameP, int frameP,
sub_frame_t subframeP, sub_frame_t subframeP,
int sched_frameP, int sched_frameP,
unsigned char sched_subframeP, unsigned char sched_subframeP);
uint16_t *first_rb);
void store_ulsch_buffer(module_id_t module_idP, int frameP, void store_ulsch_buffer(module_id_t module_idP, int frameP,
sub_frame_t subframeP); sub_frame_t subframeP);
void assign_max_mcs_min_rb(module_id_t module_idP, void assign_max_mcs_min_rb(module_id_t module_idP,
int CC_id, int CC_id,
int frameP, int frameP,
sub_frame_t subframeP, sub_frame_t subframeP,
uint16_t *first_rb); int available_rbs);
void adjust_bsr_info(int buffer_occupancy, uint16_t TBS, void adjust_bsr_info(int buffer_occupancy, uint16_t TBS,
UE_TEMPLATE *UE_template); UE_TEMPLATE *UE_template);
......
...@@ -391,21 +391,30 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, ...@@ -391,21 +391,30 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
int frameP, int frameP,
sub_frame_t subframeP, sub_frame_t subframeP,
int sched_frameP, int sched_frameP,
unsigned char sched_subframeP, unsigned char sched_subframeP) {
uint16_t *first_rb) {
uint16_t nb_allocated_rbs[MAX_MOBILES_PER_ENB]; uint16_t nb_allocated_rbs[MAX_MOBILES_PER_ENB];
uint16_t total_allocated_rbs = 0; uint16_t total_allocated_rbs = 0;
uint16_t average_rbs_per_user = 0; uint16_t average_rbs_per_user = 0;
int16_t total_remaining_rbs = 0; int16_t total_remaining_rbs = 0;
uint16_t total_ue_count = 0; uint16_t total_ue_count = 0;
eNB_MAC_INST *eNB = RC.mac[module_idP];
UE_info_t *UE_info = &eNB->UE_info;
const int N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth);
uint16_t available_rbs = N_RB_UL - 2 * first_rb[CC_id]; // top and bottom // - UE_info->first_rb_offset[CC_id];
// maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB UE_info_t *UE_info = &RC.mac[module_idP]->UE_info;
const int N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
const COMMON_channels_t *cc = &RC.mac[module_idP]->common_channels[CC_id];
int available_rbs = 0;
int first_rb = -1;
for (int i = 0; i < N_RB_UL; ++i) {
if (cc->vrb_map_UL[i] == 0) {
available_rbs++;
if (first_rb < 0)
first_rb = i;
}
}
// maximize MCS and then allocate required RB according to the buffer
// occupancy with the limit of max available UL RB
LOG_D(MAC, "In ulsch_preprocessor: assign max mcs min rb\n"); LOG_D(MAC, "In ulsch_preprocessor: assign max mcs min rb\n");
assign_max_mcs_min_rb(module_idP, CC_id, frameP, subframeP, first_rb); assign_max_mcs_min_rb(module_idP, CC_id, frameP, subframeP, available_rbs);
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
if (UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul > 0) { if (UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul > 0) {
...@@ -462,7 +471,9 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, ...@@ -462,7 +471,9 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
total_allocated_rbs++; total_allocated_rbs++;
} }
UE_template->pre_first_nb_rb_ul = first_rb;
UE_template->pre_allocated_nb_rb_ul = nb_allocated_rbs[UE_id]; UE_template->pre_allocated_nb_rb_ul = nb_allocated_rbs[UE_id];
first_rb += nb_allocated_rbs[UE_id];
LOG_D(MAC, "******************UL Scheduling Information for UE%d CC_id %d ************************\n", LOG_D(MAC, "******************UL Scheduling Information for UE%d CC_id %d ************************\n",
UE_id, UE_id,
CC_id); CC_id);
...@@ -524,9 +535,7 @@ assign_max_mcs_min_rb(module_id_t module_idP, ...@@ -524,9 +535,7 @@ assign_max_mcs_min_rb(module_id_t module_idP,
int CC_id, int CC_id,
int frameP, int frameP,
sub_frame_t subframeP, sub_frame_t subframeP,
uint16_t *first_rb) { int available_rbs) {
const int N_RB_UL = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
const int available_rbs = N_RB_UL - 2 * first_rb[CC_id]; // top and bottom - UE_info->first_rb_offset[CC_id];
const int Ncp = RC.mac[module_idP]->common_channels[CC_id].Ncp; const int Ncp = RC.mac[module_idP]->common_channels[CC_id].Ncp;
UE_info_t *UE_info = &RC.mac[module_idP]->UE_info; UE_info_t *UE_info = &RC.mac[module_idP]->UE_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment