Commit 8de28502 authored by shahab SHARIATBAGHERI's avatar shahab SHARIATBAGHERI

Merge branch 'feature-68-uplink' into feature-68-enb-agent

parents e2ac251c 9e7e57de
......@@ -1148,6 +1148,7 @@ if (FLEXRAN_AGENT_SB_IF)
set (MAC_SRC ${MAC_SRC}
${MAC_DIR}/flexran_agent_scheduler_dlsch_ue.c
${MAC_DIR}/flexran_agent_scheduler_ulsch_ue.c
${MAC_DIR}/flexran_agent_scheduler_dataplane.c
${MAC_DIR}/flexran_agent_scheduler_dlsch_ue_remote.c
)
......@@ -1171,6 +1172,9 @@ if (FLEXRAN_AGENT_SB_IF)
add_library(default_sched SHARED ${MAC_DIR}/flexran_agent_scheduler_dlsch_ue.c)
add_library(remote_sched SHARED ${MAC_DIR}/flexran_agent_scheduler_dlsch_ue_remote.c)
add_library(default_ul_sched SHARED ${MAC_DIR}/flexran_agent_scheduler_ulsch_ue.c)
endif()
# L3 Libs
......
......@@ -1044,6 +1044,78 @@ int flexran_agent_mac_destroy_dl_config(Protocol__FlexranMessage *msg) {
return -1;
}
int flexran_agent_mac_create_empty_ul_config(mid_t mod_id, Protocol__FlexranMessage **msg) {
int xid = 0;
Protocol__FlexHeader *header;
if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_UL_MAC_CONFIG, &header) != 0)
goto error;
Protocol__FlexUlMacConfig *ul_mac_config_msg;
ul_mac_config_msg = malloc(sizeof(Protocol__FlexUlMacConfig));
if (ul_mac_config_msg == NULL) {
goto error;
}
protocol__flex_ul_mac_config__init(ul_mac_config_msg);
ul_mac_config_msg->header = header;
ul_mac_config_msg->has_sfn_sf = 1;
ul_mac_config_msg->sfn_sf = flexran_get_sfn_sf(mod_id);
*msg = malloc(sizeof(Protocol__FlexranMessage));
if(*msg == NULL)
goto error;
protocol__flexran_message__init(*msg);
(*msg)->msg_case = PROTOCOL__FLEXRAN_MESSAGE__MSG_UL_MAC_CONFIG_MSG;
(*msg)->msg_dir = PROTOCOL__FLEXRAN_DIRECTION__INITIATING_MESSAGE;
(*msg)->ul_mac_config_msg = ul_mac_config_msg;
return 0;
error:
return -1;
}
int flexran_agent_mac_destroy_ul_config(Protocol__FlexranMessage *msg) {
int i,j, k;
if(msg->msg_case != PROTOCOL__FLEXRAN_MESSAGE__MSG_UL_MAC_CONFIG_MSG)
goto error;
Protocol__FlexUlDci *ul_dci;
free(msg->ul_mac_config_msg->header);
for (i = 0; i < msg->ul_mac_config_msg->n_ul_ue_data; i++) {
// TODO uplink rlc ...
// free(msg->ul_mac_config_msg->dl_ue_data[i]->ce_bitmap);
// for (j = 0; j < msg->ul_mac_config_msg->ul_ue_data[i]->n_rlc_pdu; j++) {
// for (k = 0; k < msg->ul_mac_config_msg->ul_ue_data[i]->rlc_pdu[j]->n_rlc_pdu_tb; k++) {
// free(msg->ul_mac_config_msg->dl_ue_data[i]->rlc_pdu[j]->rlc_pdu_tb[k]);
// }
// free(msg->ul_mac_config_msg->ul_ue_data[i]->rlc_pdu[j]->rlc_pdu_tb);
// free(msg->ul_mac_config_msg->ul_ue_data[i]->rlc_pdu[j]);
// }
// free(msg->ul_mac_config_msg->ul_ue_data[i]->rlc_pdu);
ul_dci = msg->ul_mac_config_msg->ul_ue_data[i]->ul_dci;
// free(dl_dci->tbs_size);
// free(ul_dci->mcs);
// free(ul_dci->ndi);
// free(ul_dci->rv);
// free(ul_dci);
free(msg->ul_mac_config_msg->ul_ue_data[i]);
}
free(msg->ul_mac_config_msg->ul_ue_data);
free(msg->ul_mac_config_msg);
free(msg);
return 0;
error:
return -1;
}
void flexran_agent_get_pending_dl_mac_config(mid_t mod_id, Protocol__FlexranMessage **msg) {
struct lfds700_misc_prng_state ls;
......@@ -1176,12 +1248,13 @@ int flexran_agent_register_mac_xface(mid_t mod_id, AGENT_MAC_xface *xface) {
xface->flexran_agent_send_sr_info = flexran_agent_send_sr_info;
xface->flexran_agent_send_sf_trigger = flexran_agent_send_sf_trigger;
//xface->flexran_agent_send_update_mac_stats = flexran_agent_send_update_mac_stats;
xface->flexran_agent_schedule_ue_spec = flexran_schedule_ue_spec_default;
xface->flexran_agent_schedule_ue_spec = flexran_schedule_ue_dl_spec_default;
xface->flexran_agent_schedule_ul_spec = flexran_schedule_ue_ul_spec_default;
//xface->flexran_agent_schedule_ue_spec = flexran_schedule_ue_spec_remote;
xface->flexran_agent_get_pending_dl_mac_config = flexran_agent_get_pending_dl_mac_config;
xface->dl_scheduler_loaded_lib = NULL;
xface->ul_scheduler_loaded_lib = NULL;
mac_agent_registered[mod_id] = 1;
agent_mac_xface[mod_id] = xface;
......@@ -1199,7 +1272,7 @@ int flexran_agent_unregister_mac_xface(mid_t mod_id, AGENT_MAC_xface *xface) {
xface->dl_scheduler_loaded_lib = NULL;
xface->ul_scheduler_loaded_lib = NULL;
mac_agent_registered[mod_id] = 0;
agent_mac_xface[mod_id] = NULL;
......
......@@ -57,6 +57,10 @@ int flexran_agent_mac_destroy_stats_reply(Protocol__FlexranMessage *msg);
int flexran_agent_mac_create_empty_dl_config(mid_t mod_id, Protocol__FlexranMessage **msg);
int flexran_agent_mac_destroy_dl_config(Protocol__FlexranMessage *msg);
/* UL MAC scheduling decision protocol message constructor (empty command) and destructor */
int flexran_agent_mac_create_empty_ul_config(mid_t mod_id, Protocol__FlexranMessage **msg);
int flexran_agent_mac_destroy_ul_config(Protocol__FlexranMessage *msg);
int flexran_agent_mac_handle_dl_mac_config(mid_t mod_id, const void *params, Protocol__FlexranMessage **msg);
......
......@@ -61,6 +61,9 @@ typedef struct {
int *mbsfn_flag, Protocol__FlexranMessage **dl_info);
void (*flexran_agent_schedule_ul_spec)(mid_t module_idP, uint32_t frameP, unsigned char cooperation_flag,
uint32_t subframeP, unsigned char sched_subframe, Protocol__FlexranMessage **ul_info);
/// Notify the controller for a state change of a particular UE, by sending the proper
/// UE state change message (ACTIVATION, DEACTIVATION, HANDOVER)
// int (*flexran_agent_notify_ue_state_change)(mid_t mod_id, uint32_t rnti,
......@@ -68,6 +71,7 @@ typedef struct {
void *dl_scheduler_loaded_lib;
void *ul_scheduler_loaded_lib;
/*TODO: Fill in with the rest of the MAC layer technology specific callbacks (UL/DL scheduling, RACH info etc)*/
} AGENT_MAC_xface;
......
......@@ -607,7 +607,10 @@ int parse_mac_config(mid_t mod_id, yaml_parser_t *parser) {
} else if (strcmp((char *) event.data.scalar.value, "ul_scheduler") == 0) {
// Call the proper handler
LOG_D(ENB_APP, "This is for the ul_scheduler subsystem\n");
goto error;
if (parse_ul_scheduler_config(mod_id, parser) == -1) {
LOG_D(ENB_APP, "An error occured\n");
goto error;
}
// TODO
} else if (strcmp((char *) event.data.scalar.value, "ra_scheduler") == 0) {
// Call the proper handler
......@@ -700,6 +703,56 @@ int parse_dl_scheduler_config(mid_t mod_id, yaml_parser_t *parser) {
return -1;
}
int parse_ul_scheduler_config(mid_t mod_id, yaml_parser_t *parser) {
yaml_event_t event;
int done = 0;
int mapping_started = 0;
while (!done) {
if (!yaml_parser_parse(parser, &event))
goto error;
switch (event.type) {
// We are expecting a mapping (behavior and parameters)
case YAML_MAPPING_START_EVENT:
LOG_D(ENB_APP, "The mapping of the subsystem started\n");
mapping_started = 1;
break;
case YAML_MAPPING_END_EVENT:
LOG_D(ENB_APP, "The mapping of the subsystem ended\n");
mapping_started = 0;
break;
case YAML_SCALAR_EVENT:
if (!mapping_started) {
goto error;
}
// Check what key needs to be set
if (strcmp((char *) event.data.scalar.value, "parameters") == 0) {
LOG_D(ENB_APP, "Now it is time to set the parameters for this subsystem\n");
if (parse_ul_scheduler_parameters(mod_id, parser) == -1) {
goto error;
}
}
break;
default:
goto error;
}
done = (event.type == YAML_MAPPING_END_EVENT);
yaml_event_delete(&event);
}
return 0;
error:
yaml_event_delete(&event);
return -1;
}
int parse_dl_scheduler_parameters(mid_t mod_id, yaml_parser_t *parser) {
yaml_event_t event;
......@@ -755,6 +808,61 @@ int parse_dl_scheduler_parameters(mid_t mod_id, yaml_parser_t *parser) {
return -1;
}
int parse_ul_scheduler_parameters(mid_t mod_id, yaml_parser_t *parser) {
yaml_event_t event;
void *param;
int done = 0;
int mapping_started = 0;
while (!done) {
if (!yaml_parser_parse(parser, &event))
goto error;
switch (event.type) {
// We are expecting a mapping of parameters
case YAML_MAPPING_START_EVENT:
LOG_D(ENB_APP, "The mapping of the parameters started\n");
mapping_started = 1;
break;
case YAML_MAPPING_END_EVENT:
LOG_D(ENB_APP, "The mapping of the parameters ended\n");
mapping_started = 0;
break;
case YAML_SCALAR_EVENT:
if (!mapping_started) {
goto error;
}
// Check what key needs to be set
if (mac_agent_registered[mod_id]) {
LOG_D(ENB_APP, "Setting parameter %s\n", event.data.scalar.value);
param = dlsym(agent_mac_xface[mod_id]->ul_scheduler_loaded_lib,
(char *) event.data.scalar.value);
if (param == NULL) {
goto error;
}
apply_parameter_modification(param, parser);
} else {
goto error;
}
break;
default:
goto error;
}
done = (event.type == YAML_MAPPING_END_EVENT);
yaml_event_delete(&event);
}
return 0;
error:
yaml_event_delete(&event);
return -1;
}
int load_dl_scheduler_function(mid_t mod_id, const char *function_name) {
void *lib;
......
......@@ -101,6 +101,10 @@ int parse_dl_scheduler_config(mid_t mod_id, yaml_parser_t *parser);
int parse_dl_scheduler_parameters(mid_t mod_id, yaml_parser_t *parser);
int parse_ul_scheduler_config(mid_t mod_id, yaml_parser_t *parser);
int parse_ul_scheduler_parameters(mid_t mod_id, yaml_parser_t *parser);
int load_dl_scheduler_function(mid_t mod_id, const char *function_name);
#endif /*FLEXRAN_AGENT_MAC_INTERNAL_H_*/
......@@ -3,7 +3,7 @@ package protocol;
import "mac_primitives.proto";
//
// Body of UE DL MAC scheduling configuration info
// Body of UE DL/UL MAC scheduling configuration info
//
message flex_dl_data {
......@@ -15,6 +15,12 @@ message flex_dl_data {
optional uint32 act_deact_ce = 6; //Hex content of MAC CE for Activation/Deactivation in CA
}
message flex_ul_data {
optional uint32 rnti = 1;
optional flex_ul_dci ul_dci = 2;
}
//
// Body of the RAR scheduler configuration
//
......@@ -55,4 +61,4 @@ message flex_pdcch_ofdm_sym_count {
enum flex_broadcast_type {
FLBT_BCCH = 0;
FLBT_PCCH = 1;
}
\ No newline at end of file
}
......@@ -24,11 +24,12 @@ message flexran_message {
flex_ue_config_reply ue_config_reply_msg = 11;
flex_lc_config_request lc_config_request_msg = 12;
flex_lc_config_reply lc_config_reply_msg = 13;
flex_dl_mac_config dl_mac_config_msg = 14;
flex_dl_mac_config dl_mac_config_msg = 14;
flex_ue_state_change ue_state_change_msg = 15;
flex_control_delegation control_delegation_msg = 16;
flex_agent_reconfiguration agent_reconfiguration_msg = 17;
flex_rrc_triggering rrc_triggering = 18;
flex_ul_mac_config ul_mac_config_msg = 19;
}
}
......@@ -164,6 +165,14 @@ message flex_dl_mac_config {
repeated flex_pdcch_ofdm_sym_count ofdm_sym = 6; // OFDM symbol count for each CC
}
message flex_ul_mac_config {
optional flex_header header = 1;
optional uint32 sfn_sf = 2;
repeated flex_ul_data ul_ue_data = 3;
}
message flex_rrc_triggering {
optional flex_header header = 1;
......@@ -171,6 +180,7 @@ message flex_rrc_triggering {
}
//
// UE state change message
//
......
......@@ -32,7 +32,7 @@ enum flex_type {
//Controller command messages
FLPT_DL_MAC_CONFIG = 13;
// UE state change messages
FLPT_UE_STATE_CHANGE = 14;
......@@ -40,6 +40,6 @@ enum flex_type {
FLPT_DELEGATE_CONTROL = 15;
FLPT_RECONFIGURE_AGENT = 16;
FLPT_RRC_TRIGGERING = 17;
FLPT_UL_MAC_CONFIG = 18;
}
......@@ -33,6 +33,38 @@ message flex_dl_dci {
optional uint32 pdcch_power_offset = 25; // DL PDCCH power boosting in dB
optional uint32 cif_present = 26; // Boolean. Indication of CIF field
optional uint32 cif = 27; // CIF for cross-carrier scheduling
}
message flex_ul_dci {
optional uint32 rnti = 1;
optional uint32 rb_start = 2; // The start RB allocated to the UE
optional uint32 rb_len = 3; // The number of RBs allocated to the UE
optional uint32 mcs = 4; // Modulation and coding scheme
optional uint32 cyclic_shift2 = 5; // match DCI format 0/4 PDU
optional uint32 freq_hop_flag = 6; // 0 no hopping, 1 hoppping
optional uint32 freq_hop_map = 7; // Frequency hopping bits (0..4)
optional uint32 ndi = 8; // New data indicator
optional uint32 rv = 9; // Redundancy version
optional uint32 harq_pid = 10; // The harq process id
optional uint32 ultx_mode = 11; // A FLULM_* value
optional uint32 tbs_size = 12; // The size of each TBS
optional uint32 n_srs = 13; // Overlap indication with srs
optional uint32 res_alloc = 14; // Type of resource allocation
optional uint32 size = 15; // Size of the ULSCH PDU in bytes for UL Grant.
optional uint32 dai = 16; // TDD only
// optional uint32 tb_swap = 17; // Boolean. TB to codeword swap flag
// optional uint32 pdcch_order = 19;
// optional uint32 preamble_index = 20; // Only valid if pdcch_order = 1
// optional uint32 prach_mask_index = 21; // Only valid if pdcch_order = 1
// optional uint32 tbs_idx = 23; // The TBS index for Format 1A
}
//
......@@ -73,4 +105,10 @@ enum flex_vrb_format {
enum flex_ngap_val {
FLNGV_1 = 0;
FLNGV_2 = 1;
}
\ No newline at end of file
}
enum flex_mod_type {
FLMOD_QPSK = 2;
FLMOD_16QAM = 4;
FLMOD_64QAM = 6;
}
......@@ -299,8 +299,9 @@ int flexran_get_harq(const mid_t mod_id,
const mid_t ue_id,
const int frame,
const uint8_t subframe,
uint8_t *id,
uint8_t *round) { //flag_id_status = 0 then id, else status
uint8_t *pid,
uint8_t *round,
const uint8_t harq_flag) { //flag_id_status = 0 then id, else status
/*TODO: Add int TB in function parameters to get the status of the second TB. This can be done to by editing in
* get_ue_active_harq_pid function in line 272 file: phy_procedures_lte_eNB.c to add
* DLSCH_ptr = PHY_vars_eNB_g[Mod_id][CC_id]->dlsch_eNB[(uint32_t)UE_id][1];*/
......@@ -310,10 +311,21 @@ int flexran_get_harq(const mid_t mod_id,
uint16_t rnti = flexran_get_ue_crnti(mod_id,ue_id);
if (harq_flag == openair_harq_DL){
mac_xface->get_ue_active_harq_pid(mod_id,CC_id,rnti,frame,subframe,&harq_pid,&harq_round,openair_harq_DL);
mac_xface->get_ue_active_harq_pid(mod_id,CC_id,rnti,frame,subframe,&harq_pid,&harq_round,openair_harq_DL);
*id = harq_pid;
} else if (harq_flag == openair_harq_UL){
mac_xface->get_ue_active_harq_pid(mod_id,CC_id,rnti,frame,subframe,&harq_pid,&round,openair_harq_UL);
}
else {
LOG_W(FLEXRAN_AGENT,"harq_flag is not recongnized");
}
*pid = harq_pid;
*round = harq_round;
/* if (round > 0) { */
/* *status = 1; */
......@@ -643,6 +655,21 @@ int flexran_get_meas_gap_config_offset(mid_t mod_id, mid_t ue_id) {
return -1;
}
int flexran_get_rrc_status(const mid_t mod_id, const rnti_t rntiP){
struct rrc_eNB_ue_context_s* ue_context_p = NULL;
ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
if (ue_context_p != NULL) {
return(ue_context_p->ue_context.Status);
} else {
return RRC_INACTIVE;
}
}
int flexran_get_ue_aggregated_max_bitrate_dl (mid_t mod_id, mid_t ue_id) {
return ((UE_list_t *)enb_ue[mod_id])->UE_sched_ctrl[ue_id].ue_AggregatedMaximumBitrateDL;
}
......
......@@ -187,7 +187,7 @@ int flexran_get_ue_pmi(mid_t mod_id);
a designated frame and subframe. Returns 0 for success. The id and the
status of the HARQ process are stored in id and status respectively */
int flexran_get_harq(const mid_t mod_id, const uint8_t CC_id, const mid_t ue_id,
const int frame, const uint8_t subframe, unsigned char *id, unsigned char *round);
const int frame, const uint8_t subframe, unsigned char *id, unsigned char *round,const uint8_t harq_flag);
/* Uplink power control management*/
int flexran_get_p0_pucch_dbm(mid_t mod_id, mid_t ue_id, int CC_id);
......
......@@ -386,11 +386,30 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
if (mac_xface->frame_parms->frame_type == FDD) { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,0,4);//,calibration_flag);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,0,4, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
} else if ((mac_xface->frame_parms->tdd_config == 0) || //TDD
(mac_xface->frame_parms->tdd_config == 3) ||
(mac_xface->frame_parms->tdd_config == 6)) {
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,4);//,calibration_flag);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,4, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
}
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
......@@ -404,7 +423,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -422,14 +441,36 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
switch (mac_xface->frame_parms->tdd_config) {
case 0:
case 1:
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,7);
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,7);//,calibration_flag);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,7, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
#endif
break;
case 6:
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,8);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,8, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
#endif
......@@ -439,7 +480,19 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
}
} else { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,1,5);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,1,5, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -452,7 +505,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -469,7 +522,20 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
// TDD, nothing
// FDD, normal UL/DLSCH
if (mac_xface->frame_parms->frame_type == FDD) { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,2,6);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,2,6, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -482,7 +548,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -502,7 +568,18 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
if (mac_xface->frame_parms->frame_type == TDD) {
switch (mac_xface->frame_parms->tdd_config) {
case 2:
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,7);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,7, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
// no break here!
case 5:
......@@ -518,7 +595,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -532,8 +609,20 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
}
} else { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,3,7);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,3,7, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -546,7 +635,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -567,7 +656,18 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
switch (mac_xface->frame_parms->tdd_config) {
case 1:
// schedule_RA(module_idP,frameP,subframeP);
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,8);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,8, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
// no break here!
case 2:
......@@ -589,7 +689,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -604,8 +704,19 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
}
} else {
if (mac_xface->frame_parms->frame_type == FDD) { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP, frameP, cooperation_flag, 4, 8);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,4,8, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP, frameP, subframeP, mbsfn_status);
fill_DLSCH_dci(module_idP, frameP, subframeP, mbsfn_status);
......@@ -618,7 +729,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -641,7 +752,19 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
//schedule_RA(module_idP,frameP,subframeP,5);
if (mac_xface->frame_parms->frame_type == FDD) {
schedule_RA(module_idP,frameP,subframeP,1);
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,5,9);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,5,9, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP, frameP, subframeP, mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -654,7 +777,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -681,7 +804,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -704,7 +827,18 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
case 1:
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,2);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,2, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
// schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
#ifndef FLEXRAN_AGENT_SB_IF
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -712,7 +846,18 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
case 6:
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,3);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,3, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
// schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
#ifndef FLEXRAN_AGENT_SB_IF
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -733,7 +878,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -757,7 +902,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -771,7 +916,19 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
}
} else { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,6,0);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,6,0, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -784,7 +941,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -817,7 +974,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -840,7 +997,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -854,7 +1011,19 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
}
} else { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,7,1);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,7,1, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -867,7 +1036,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -892,7 +1061,18 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
case 5:
// schedule_RA(module_idP,subframeP);
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,2);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,2, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -905,7 +1085,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -919,7 +1099,19 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
}
} else { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,8,2);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,8,2, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -932,7 +1124,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -950,7 +1142,18 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
if (mac_xface->frame_parms->frame_type == TDD) {
switch (mac_xface->frame_parms->tdd_config) {
case 1:
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,3);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,3, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
schedule_RA(module_idP,frameP,subframeP,7); // 7 = Msg3 subframeP, not
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
......@@ -964,7 +1167,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -976,7 +1179,19 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
case 3:
case 4:
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,3);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,3, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -989,7 +1204,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -1000,7 +1215,18 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
case 6:
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,subframeP,4);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,subframeP,4, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
//schedule_RA(module_idP,frameP,subframeP);
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
......@@ -1014,7 +1240,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -1039,7 +1265,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......@@ -1053,7 +1279,19 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
break;
}
} else { //FDD
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ulsch(module_idP,frameP,cooperation_flag,9,3);
#else
if (mac_agent_registered[module_idP]){
agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,9,3, &msg);
}
flexran_agent_mac_destroy_ul_config(msg);
#endif
#ifndef FLEXRAN_AGENT_SB_IF
schedule_ue_spec(module_idP,frameP,subframeP,mbsfn_status);
fill_DLSCH_dci(module_idP,frameP,subframeP,mbsfn_status);
......@@ -1066,7 +1304,7 @@ void eNB_dlsch_ulsch_scheduler(module_id_t module_idP,uint8_t cooperation_flag,
mbsfn_status,
&msg);
flexran_apply_dl_scheduling_decisions(module_idP,
flexran_apply_scheduling_decisions(module_idP,
frameP,
subframeP,
mbsfn_status,
......
......@@ -885,7 +885,7 @@ abort();
rb_table_index=UE_template->pre_allocated_rb_table_index_ul;
} else {
mcs=10;//cmin (10, openair_daq_vars.target_ue_ul_mcs);
rb_table_index=5; // for PHR
rb_table_index=13; // for PHR
}
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2=mcs;
......
......@@ -39,19 +39,25 @@
/*
* slice specific scheduler
*/
typedef void (*slice_scheduler)(module_id_t mod_id,
typedef void (*slice_scheduler_dl)(module_id_t mod_id,
int slice_id,
uint32_t frame,
uint32_t subframe,
int *mbsfn_flag,
Protocol__FlexranMessage **dl_info);
typedef void (*slice_scheduler_ul)(module_id_t mod_id,
frame_t frame,
unsigned char cooperation_flag,
uint32_t subframe,
unsigned char sched_subframe,
Protocol__FlexranMessage **ul_info);
/*
* top level flexran scheduler used by the eNB scheduler
*/
void flexran_schedule_ue_spec_default(mid_t mod_id,
void flexran_schedule_ue_dl_spec_default(mid_t mod_id,
uint32_t frame,
uint32_t subframe,
int *mbsfn_flag,
......@@ -102,13 +108,22 @@ flexran_schedule_ue_spec_be(mid_t mod_id,
* common flexran scheduler function
*/
void
flexran_schedule_ue_spec_common(mid_t mod_id,
flexran_schedule_ue_dl_spec_common(mid_t mod_id,
int slice_id,
uint32_t frame,
uint32_t subframe,
int *mbsfn_flag,
Protocol__FlexranMessage **dl_info);
void
flexran_schedule_ue_ul_spec_default(mid_t mod_id,
uint32_t frame,
uint32_t cooperation_flag,
int subframe,
unsigned char sched_subframe,
Protocol__FlexranMessage **ul_info);
uint16_t flexran_nb_rbs_allowed_slice(float rb_percentage,
int total_rbs);
......@@ -117,6 +132,8 @@ int flexran_slice_member(int UE_id,
int flexran_slice_maxmcs(int slice_id) ;
/* Downlink Primitivies */
void _store_dlsch_buffer (module_id_t Mod_id,
int slice_id,
frame_t frameP,
......@@ -131,6 +148,21 @@ void _assign_rbs_required (module_id_t Mod_id,
uint16_t nb_rbs_allowed_slice[MAX_NUM_CCs][MAX_NUM_SLICES],
int min_rb_unit[MAX_NUM_CCs]);
/* Uplink Primitivies */
// void _sort_ue_ul (module_id_t module_idP,int frameP, sub_frame_t subframeP);
void _assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, sub_frame_t subframeP, uint16_t *first_rb);
void _ulsch_scheduler_pre_processor(module_id_t module_idP,
int slice_id,
int frameP,
sub_frame_t subframeP,
uint16_t *first_rb);
void _dlsch_scheduler_pre_processor (module_id_t Mod_id,
int slice_id,
frame_t frameP,
......@@ -165,13 +197,21 @@ void _dlsch_scheduler_pre_processor_allocate (module_id_t Mod_id,
/*
* Default scheduler used by the eNB agent
*/
void flexran_schedule_ue_spec_default(mid_t mod_id, uint32_t frame, uint32_t subframe,
void flexran_schedule_ue_dl_spec_default(mid_t mod_id, uint32_t frame, uint32_t subframe,
int *mbsfn_flag, Protocol__FlexranMessage **dl_info);
/*
Uplink scheduler used by MAC agent
*/
void flexran_agent_schedule_ulsch_ue_spec(module_id_t module_idP, frame_t frameP, unsigned char cooperation_flag,
sub_frame_t subframeP,
unsigned char sched_subframe, Protocol__FlexranMessage **ul_info);
/*
* Data plane function for applying the DL decisions of the scheduler
*/
void flexran_apply_dl_scheduling_decisions(mid_t mod_id, uint32_t frame, uint32_t subframe, int *mbsfn_flag,
void flexran_apply_scheduling_decisions(mid_t mod_id, uint32_t frame, uint32_t subframe, int *mbsfn_flag,
Protocol__FlexranMessage *dl_scheduling_info);
/*
......
......@@ -59,28 +59,28 @@
#include "SIMULATION/TOOLS/defs.h" // for taus
void flexran_apply_dl_scheduling_decisions(mid_t mod_id,
void flexran_apply_scheduling_decisions(mid_t mod_id,
uint32_t frame,
uint32_t subframe,
int *mbsfn_flag,
Protocol__FlexranMessage *dl_scheduling_info) {
Protocol__FlexDlMacConfig *mac_config = dl_scheduling_info->dl_mac_config_msg;
Protocol__FlexDlMacConfig *mac_dl_config = dl_scheduling_info->dl_mac_config_msg;
// Check if there is anything to schedule for random access
if (mac_config->n_dl_rar > 0) {
if (mac_dl_config->n_dl_rar > 0) {
/*TODO: call the random access data plane function*/
}
// Check if there is anything to schedule for paging/broadcast
if (mac_config->n_dl_broadcast > 0) {
if (mac_dl_config->n_dl_broadcast > 0) {
/*TODO: call the broadcast/paging data plane function*/
}
// Check if there is anything to schedule for the UEs
if (mac_config->n_dl_ue_data > 0) {
if (mac_dl_config->n_dl_ue_data > 0) {
flexran_apply_ue_spec_scheduling_decisions(mod_id, frame, subframe, mbsfn_flag,
mac_config->n_dl_ue_data, mac_config->dl_ue_data);
mac_dl_config->n_dl_ue_data, mac_dl_config->dl_ue_data);
}
}
......
......@@ -88,16 +88,16 @@ typedef enum {
// number of active slices for past and current time
int n_active_slices = 1;
int n_active_slices_current = 1;
int n_active_slices = 2;
int n_active_slices_current = 2;
// ue to slice mapping
int slicing_strategy = UEID_TO_SLICEID;
int slicing_strategy_current = UEID_TO_SLICEID;
// RB share for each slice for past and current time
float slice_percentage[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0};
float slice_percentage_current[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0};
float slice_percentage[MAX_NUM_SLICES] = {0.5, 0.5, 0.0, 0.0};
float slice_percentage_current[MAX_NUM_SLICES] = {0.5, 0.5, 0.0, 0.0};
float total_slice_percentage = 0;
// MAX MCS for each slice for past and current time
......@@ -115,7 +115,7 @@ char *dl_scheduler_type[MAX_NUM_SLICES] = {"flexran_schedule_ue_spec_embb",
};
// pointer to the slice specific scheduler
slice_scheduler slice_sched[MAX_NUM_SLICES] = {0};
slice_scheduler_dl slice_sched_dl[MAX_NUM_SLICES] = {0};
/**
......@@ -495,7 +495,7 @@ void _dlsch_scheduler_pre_processor (module_id_t Mod_id,
}
}
// Store the DLSCH buffer for each logical channel
/* Store the DLSCH buffer for each logical channel for each UE */
_store_dlsch_buffer (Mod_id,slice_id,frameP,subframeP);
// Calculate the number of RBs required by each UE on the basis of logical channel's buffer
......@@ -526,7 +526,7 @@ void _dlsch_scheduler_pre_processor (module_id_t Mod_id,
CC_id = UE_list->ordered_CCids[ii][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl->max_allowed_rbs[CC_id]=nb_rbs_allowed_slice[CC_id][slice_id];
flexran_get_harq(Mod_id, CC_id, UE_id, frameP, subframeP, &harq_pid, &round);
flexran_get_harq(Mod_id, CC_id, UE_id, frameP, subframeP, &harq_pid, &round, openair_harq_DL);
// if there is no available harq_process, skip the UE
if (UE_list->UE_sched_ctrl[UE_id].harq_pid[CC_id]<0)
......@@ -640,7 +640,7 @@ void _dlsch_scheduler_pre_processor (module_id_t Mod_id,
for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
CC_id = UE_list->ordered_CCids[ii][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
flexran_get_harq(Mod_id, CC_id, UE_id, frameP, subframeP, &harq_pid, &round);
flexran_get_harq(Mod_id, CC_id, UE_id, frameP, subframeP, &harq_pid, &round, openair_harq_DL);
rnti = UE_RNTI(Mod_id,UE_id);
// LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti );
......@@ -709,12 +709,12 @@ void _dlsch_scheduler_pre_processor (module_id_t Mod_id,
#define SF05_LIMIT 1
/*
* Main scheduling functions to support slicing
* Main Downlink Slicing
*
*/
void
flexran_schedule_ue_spec_default(mid_t mod_id,
flexran_schedule_ue_dl_spec_default(mid_t mod_id,
uint32_t frame,
uint32_t subframe,
int *mbsfn_flag,
......@@ -729,7 +729,7 @@ flexran_schedule_ue_spec_default(mid_t mod_id,
// Load any updated functions
if (update_dl_scheduler[i] > 0 ) {
slice_sched[i] = dlsym(NULL, dl_scheduler_type[i]);
slice_sched_dl[i] = dlsym(NULL, dl_scheduler_type[i]);
update_dl_scheduler[i] = 0;
update_dl_scheduler_current[i] = 0;
slice_percentage_current[i]= slice_percentage[i];
......@@ -742,7 +742,9 @@ flexran_schedule_ue_spec_default(mid_t mod_id,
if ((n_active_slices > 0) && (n_active_slices <= MAX_NUM_SLICES)) {
LOG_N(MAC,"[eNB %d]frame %d subframe %d: number of active slices has changed: %d-->%d\n",
mod_id, frame, subframe, n_active_slices_current, n_active_slices);
n_active_slices_current = n_active_slices;
} else {
LOG_W(MAC,"invalid number of slices %d, revert to the previous value %d\n",n_active_slices, n_active_slices_current);
n_active_slices = n_active_slices_current;
......@@ -751,26 +753,30 @@ flexran_schedule_ue_spec_default(mid_t mod_id,
// check if the slice rb share has changed, and log the console
if (slice_percentage_current[i] != slice_percentage[i]){
if ((slice_percentage[i] >= 0.0) && (slice_percentage[i] <= 1.0)){
if ((total_slice_percentage - slice_percentage_current[i] + slice_percentage[i]) <= 1.0) {
total_slice_percentage=total_slice_percentage - slice_percentage_current[i] + slice_percentage[i];
// if ((slice_percentage[i] >= 0.0) && (slice_percentage[i] <= 1.0)){
// if ((total_slice_percentage - slice_percentage_current[i] + slice_percentage[i]) <= 1.0) {
// total_slice_percentage=total_slice_percentage - slice_percentage_current[i] + slice_percentage[i];
LOG_N(MAC,"[eNB %d][SLICE %d] frame %d subframe %d: total percentage %f, slice RB percentage has changed: %f-->%f\n",
mod_id, i, frame, subframe, total_slice_percentage, slice_percentage_current[i], slice_percentage[i]);
slice_percentage_current[i] = slice_percentage[i];
} else {
LOG_W(MAC,"[eNB %d][SLICE %d] invalid total RB share (%f->%f), revert the previous value (%f->%f)\n",
mod_id,i,
total_slice_percentage,
total_slice_percentage - slice_percentage_current[i] + slice_percentage[i],
slice_percentage[i],slice_percentage_current[i]);
slice_percentage[i]= slice_percentage_current[i];
}
} else {
LOG_W(MAC,"[eNB %d][SLICE %d] invalid slice RB share, revert the previous value (%f->%f)\n",mod_id, i, slice_percentage[i],slice_percentage_current[i]);
slice_percentage[i]= slice_percentage_current[i];
// } else {
// LOG_W(MAC,"[eNB %d][SLICE %d] invalid total RB share (%f->%f), revert the previous value (%f->%f)\n",
// mod_id,i,
// total_slice_percentage,
// total_slice_percentage - slice_percentage_current[i] + slice_percentage[i],
// slice_percentage[i],slice_percentage_current[i]);
}
// slice_percentage[i]= slice_percentage_current[i];
// }
// } else {
// LOG_W(MAC,"[eNB %d][SLICE %d] invalid slice RB share, revert the previous value (%f->%f)\n",mod_id, i, slice_percentage[i],slice_percentage_current[i]);
// slice_percentage[i]= slice_percentage_current[i];
// }
}
// check if the slice max MCS, and log the console
......@@ -778,10 +784,14 @@ flexran_schedule_ue_spec_default(mid_t mod_id,
if ((slice_maxmcs[i] >= 0) && (slice_maxmcs[i] < 29)){
LOG_N(MAC,"[eNB %d][SLICE %d] frame %d subframe %d: slice MAX MCS has changed: %d-->%d\n",
mod_id, i, frame, subframe, slice_maxmcs_current[i], slice_maxmcs[i]);
slice_maxmcs_current[i] = slice_maxmcs[i];
} else {
LOG_W(MAC,"[eNB %d][SLICE %d] invalid slice max mcs %d, revert the previous value %d\n",mod_id, i, slice_percentage[i],slice_percentage[i]);
slice_maxmcs[i]= slice_maxmcs_current[i];
}
}
......@@ -789,12 +799,14 @@ flexran_schedule_ue_spec_default(mid_t mod_id,
if (update_dl_scheduler_current[i] != update_dl_scheduler[i]){
LOG_N(MAC,"[eNB %d][SLICE %d] frame %d subframe %d: DL scheduler for this slice is updated: %s \n",
mod_id, i, frame, subframe, dl_scheduler_type[i]);
update_dl_scheduler_current[i] = update_dl_scheduler[i];
}
// Run each enabled slice-specific schedulers one by one
//LOG_N(MAC,"[eNB %d]frame %d subframe %d slice %d: calling the scheduler\n", mod_id, frame, subframe,i);
slice_sched[i](mod_id, i, frame, subframe, mbsfn_flag,dl_info);
slice_sched_dl[i](mod_id, i, frame, subframe, mbsfn_flag,dl_info);
}
......@@ -864,7 +876,7 @@ flexran_schedule_ue_spec_embb(mid_t mod_id,
Protocol__FlexranMessage **dl_info)
{
flexran_schedule_ue_spec_common(mod_id,
flexran_schedule_ue_dl_spec_common(mod_id,
slice_id,
frame,
subframe,
......@@ -882,7 +894,7 @@ flexran_schedule_ue_spec_urllc(mid_t mod_id,
Protocol__FlexranMessage **dl_info)
{
flexran_schedule_ue_spec_common(mod_id,
flexran_schedule_ue_dl_spec_common(mod_id,
slice_id,
frame,
subframe,
......@@ -901,7 +913,7 @@ flexran_schedule_ue_spec_mmtc(mid_t mod_id,
{
flexran_schedule_ue_spec_common(mod_id,
flexran_schedule_ue_dl_spec_common(mod_id,
slice_id,
frame,
subframe,
......@@ -920,7 +932,7 @@ flexran_schedule_ue_spec_be(mid_t mod_id,
{
flexran_schedule_ue_spec_common(mod_id,
flexran_schedule_ue_dl_spec_common(mod_id,
slice_id,
frame,
subframe,
......@@ -931,7 +943,7 @@ flexran_schedule_ue_spec_be(mid_t mod_id,
//------------------------------------------------------------------------------
void
flexran_schedule_ue_spec_common(mid_t mod_id,
flexran_schedule_ue_dl_spec_common(mid_t mod_id,
int slice_id,
uint32_t frame,
uint32_t subframe,
......@@ -1032,9 +1044,10 @@ flexran_schedule_ue_spec_common(mid_t mod_id,
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
if (eNB_UE_stats==NULL) {
LOG_D(MAC,"[eNB] Cannot find eNB_UE_stats\n");
// mac_xface->macphy_exit("[MAC][eNB] Cannot find eNB_UE_stats\n");
continue;
LOG_D(MAC,"[eNB] Cannot find eNB_UE_stats\n");
// mac_xface->macphy_exit("[MAC][eNB] Cannot find eNB_UE_stats\n");
continue;
}
if (flexran_slice_member(UE_id, slice_id) == 0)
......@@ -1050,28 +1063,29 @@ flexran_schedule_ue_spec_common(mid_t mod_id,
case 1:
case 2:
case 7:
aggregation = get_aggregation(get_bw_index(mod_id,CC_id),
aggregation = get_aggregation(get_bw_index(mod_id,CC_id),
eNB_UE_stats->DL_cqi[0],
format1);
break;
break;
case 3:
aggregation = get_aggregation(get_bw_index(mod_id,CC_id),
aggregation = get_aggregation(get_bw_index(mod_id,CC_id),
eNB_UE_stats->DL_cqi[0],
format2A);
break;
break;
default:
LOG_W(MAC,"Unsupported transmission mode %d\n", mac_xface->get_transmission_mode(mod_id,CC_id,rnti));
aggregation = 2;
LOG_W(MAC,"Unsupported transmission mode %d\n", mac_xface->get_transmission_mode(mod_id,CC_id,rnti));
aggregation = 2;
}
if ((ue_sched_ctl->pre_nb_available_rbs[CC_id] == 0) || // no RBs allocated
CCE_allocation_infeasible(mod_id, CC_id, 0, subframe, aggregation, rnti)) {
LOG_D(MAC,"[eNB %d] Frame %d : no RB allocated for UE %d on CC_id %d: continue \n",
mod_id, frame, UE_id, CC_id);
//if(mac_xface->get_transmission_mode(module_idP,rnti)==5)
continue; //to next user (there might be rbs availiable for other UEs in TM5
// else
// break;
LOG_D(MAC,"[eNB %d] Frame %d : no RB allocated for UE %d on CC_id %d: continue \n",
mod_id, frame, UE_id, CC_id);
//if(mac_xface->get_transmission_mode(module_idP,rnti)==5)
continue; //to next user (there might be rbs availiable for other UEs in TM5
// else
// break;
}
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
......@@ -1094,26 +1108,28 @@ flexran_schedule_ue_spec_common(mid_t mod_id,
dl_data[num_ues_added]->n_rlc_pdu = 0;
dl_data[num_ues_added]->has_serv_cell_index = 1;
dl_data[num_ues_added]->serv_cell_index = CC_id;
nb_available_rb = ue_sched_ctl->pre_nb_available_rbs[CC_id];
flexran_get_harq(mod_id, CC_id, UE_id, frame, subframe, &harq_pid, &round);
flexran_get_harq(mod_id, CC_id, UE_id, frame, subframe, &harq_pid, &round, openair_harq_DL);
sdu_length_total=0;
mcs = cqi_to_mcs[flexran_get_ue_wcqi(mod_id, UE_id)];
// LOG_I(FLEXRAN_AGENT, "The MCS is %d\n", mcs);
mcs = cmin(mcs,flexran_slice_maxmcs(slice_id));
#ifdef EXMIMO
// #ifdef EXMIMO
if (mac_xface->get_transmission_mode(mod_id, CC_id, rnti) == 5) {
mcs = cqi_to_mcs[flexran_get_ue_wcqi(mod_id, UE_id)];
mcs = cmin(mcs,16);
}
// if (mac_xface->get_transmission_mode(mod_id, CC_id, rnti) == 5) {
// mcs = cqi_to_mcs[flexran_get_ue_wcqi(mod_id, UE_id)];
// mcs = cmin(mcs,16);
// }
#endif
// #endif
/*Get pre available resource blocks based on buffers*/
nb_available_rb = ue_sched_ctl->pre_nb_available_rbs[CC_id];
// initializing the rb allocation indicator for each UE
for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = 0;
rballoc_sub[j] = 0;
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = 0;
rballoc_sub[j] = 0;
}
/* LOG_D(MAC,"[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n", */
......@@ -1134,72 +1150,74 @@ flexran_schedule_ue_spec_common(mid_t mod_id,
/* process retransmission */
if (round > 0) {
LOG_D(FLEXRAN_AGENT, "There was a retransmission just now and the round was %d\n", round);
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
UE_list->UE_template[CC_id][UE_id].DAI++;
update_ul_dci(mod_id, CC_id, rnti, UE_list->UE_template[CC_id][UE_id].DAI);
LOG_D(MAC,"DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
CC_id, subframe,UE_id,UE_list->UE_template[CC_id][UE_id].DAI);
}
mcs = UE_list->UE_template[CC_id][UE_id].mcs[harq_pid];
// get freq_allocation
nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
/*TODO: Must add this to FlexRAN agent API */
dci_tbs = mac_xface->get_TBS_DL(mcs, nb_rb);
if (nb_rb <= nb_available_rb) {
if(nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) { // for indicating the rballoc for each sub-band
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
}
} else {
nb_rb_temp = nb_rb;
j = 0;
while((nb_rb_temp > 0) && (j < flexran_get_N_RBG(mod_id, CC_id))) {
if(ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) {
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
if((j == flexran_get_N_RBG(mod_id, CC_id) - 1) &&
((flexran_get_N_RB_DL(mod_id, CC_id) == 25)||
(flexran_get_N_RB_DL(mod_id, CC_id) == 50))) {
nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]+1;
} else {
nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id];
}
}
j = j + 1;
}
}
nb_available_rb -= nb_rb;
PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb;
PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id];
for(j=0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
}
// Keep the old NDI, do not toggle
ndi = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
tpc = UE_list->UE_template[CC_id][UE_id].oldTPC[harq_pid];
UE_list->UE_template[CC_id][UE_id].mcs[harq_pid] = mcs;
LOG_D(FLEXRAN_AGENT, "There was a retransmission just now and the round was %d\n", round);
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
UE_list->UE_template[CC_id][UE_id].DAI++;
update_ul_dci(mod_id, CC_id, rnti, UE_list->UE_template[CC_id][UE_id].DAI);
LOG_D(MAC,"DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
CC_id, subframe,UE_id,UE_list->UE_template[CC_id][UE_id].DAI);
}
mcs = UE_list->UE_template[CC_id][UE_id].mcs[harq_pid];
// get freq_allocation
nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
/*TODO: Must add this to FlexRAN agent API */
dci_tbs = mac_xface->get_TBS_DL(mcs, nb_rb);
if (nb_rb <= nb_available_rb) {
if(nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) { // for indicating the rballoc for each sub-band
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
}
} else {
nb_rb_temp = nb_rb;
j = 0;
while((nb_rb_temp > 0) && (j < flexran_get_N_RBG(mod_id, CC_id))) {
if(ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) {
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
if((j == flexran_get_N_RBG(mod_id, CC_id) - 1) &&
((flexran_get_N_RB_DL(mod_id, CC_id) == 25)||
(flexran_get_N_RB_DL(mod_id, CC_id) == 50))) {
nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]+1;
} else {
nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id];
}
}
j = j + 1;
}
}
nb_available_rb -= nb_rb;
PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb;
PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id];
for(j=0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
}
// Keep the old NDI, do not toggle
ndi = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
tpc = UE_list->UE_template[CC_id][UE_id].oldTPC[harq_pid];
UE_list->UE_template[CC_id][UE_id].mcs[harq_pid] = mcs;
ue_has_transmission = 1;
num_ues_added++;
} else {
LOG_D(MAC,"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n",
mod_id, frame, CC_id, UE_id);
ue_has_transmission = 0;
}
//End of retransmission
}
ue_has_transmission = 1;
num_ues_added++;
} else {
LOG_D(MAC,"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n",
mod_id, frame, CC_id, UE_id);
ue_has_transmission = 0;
}
//End of retransmission
} else { /* This is a potentially new SDU opportunity */
else { /* This is a potentially new SDU opportunity */
rlc_status.bytes_in_buffer = 0;
// Now check RLC information to compute number of required RBs
// get maximum TBS size for RLC request
......@@ -1253,11 +1271,14 @@ flexran_schedule_ue_spec_common(mid_t mod_id,
//Fill in as much as possible
data_to_request = cmin(dci_tbs - ta_len - header_len - sdu_length_total, rlc_status.bytes_in_buffer);
LOG_D(FLEXRAN_AGENT, "Will request %d bytes from channel %d\n", data_to_request, j);
if (data_to_request < 128) { //The header will be one byte less
header_len--;
header_len_last = 2;
} else {
header_len_last = 3;
header_len--;
header_len_last = 2;
}
else {
header_len_last = 3;
}
/* if (j == 1 || j == 2) {
data_to_request+=0;
......@@ -1352,7 +1373,8 @@ flexran_schedule_ue_spec_common(mid_t mod_id,
for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) { // for indicating the rballoc for each sub-band
UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
}
} else {
}
else {
nb_rb_temp = nb_rb;
j = 0;
LOG_D(MAC, "[TEST]Will only partially fill the bitmap\n");
......@@ -1400,12 +1422,12 @@ flexran_schedule_ue_spec_common(mid_t mod_id,
UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
UE_list->UE_template[CC_id][UE_id].DAI++;
// printf("DAI update: subframeP %d: UE %d, DAI %d\n",subframeP,UE_id,UE_list->UE_template[CC_id][UE_id].DAI);
// if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
// UE_list->UE_template[CC_id][UE_id].DAI++;
// // printf("DAI update: subframeP %d: UE %d, DAI %d\n",subframeP,UE_id,UE_list->UE_template[CC_id][UE_id].DAI);
//#warning only for 5MHz channel
update_ul_dci(mod_id, CC_id, rnti, UE_list->UE_template[CC_id][UE_id].DAI);
}
// update_ul_dci(mod_id, CC_id, rnti, UE_list->UE_template[CC_id][UE_id].DAI);
// }
// do PUCCH power control
// this is the normalized RX power
......@@ -1463,189 +1485,189 @@ flexran_schedule_ue_spec_common(mid_t mod_id,
ue_has_transmission = 0;
}
} // End of new scheduling
// If we has transmission or retransmission
if (ue_has_transmission) {
switch (mac_xface->get_transmission_mode(mod_id, CC_id, rnti)) {
case 1:
case 2:
default:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 1;
dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
dl_dci->ndi[0] = ndi;
dl_dci->n_rv = 1;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 1;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
dl_dci->n_tbs_size = 1;
dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
dl_dci->tbs_size[0] = dci_tbs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
break;
case 3:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_2A;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 2;
dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
dl_dci->ndi[0] = ndi;
dl_dci->ndi[1] = ndi;
dl_dci->n_rv = 2;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->rv[1] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 2;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
dl_dci->mcs[1] = mcs;
dl_dci->n_tbs_size = 2;
dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
dl_dci->tbs_size[0] = dci_tbs;
dl_dci->tbs_size[1] = dci_tbs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
break;
case 4:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_2A;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 2;
dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
dl_dci->ndi[0] = ndi;
dl_dci->ndi[1] = ndi;
dl_dci->n_rv = 2;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->rv[1] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 2;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
dl_dci->mcs[1] = mcs;
dl_dci->n_tbs_size = 2;
dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
dl_dci->tbs_size[0] = dci_tbs;
dl_dci->tbs_size[1] = dci_tbs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
break;
case 5:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1D;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 1;
dl_dci->ndi[0] = ndi;
dl_dci->n_rv = 1;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 1;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
dl_dci->n_tbs_size = 1;
dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
dl_dci->tbs_size[0] = dci_tbs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
if(ue_sched_ctl->dl_pow_off[CC_id] == 2) {
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
dl_dci->has_dl_power_offset = 1;
dl_dci->dl_power_offset = ue_sched_ctl->dl_pow_off[CC_id];
dl_dci->has_precoding_info = 1;
dl_dci->precoding_info = 5; // Is this right??
break;
case 6:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1D;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 1;
dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
dl_dci->ndi[0] = ndi;
dl_dci->n_rv = 1;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 1;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
dl_dci->has_dl_power_offset = 1;
dl_dci->dl_power_offset = ue_sched_ctl->dl_pow_off[CC_id];
dl_dci->has_precoding_info = 1;
dl_dci->precoding_info = 5; // Is this right??
break;
}
switch (mac_xface->get_transmission_mode(mod_id, CC_id, rnti)) {
case 1:
case 2:
default:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 1;
dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
dl_dci->ndi[0] = ndi;
dl_dci->n_rv = 1;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 1;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
dl_dci->n_tbs_size = 1;
dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
dl_dci->tbs_size[0] = dci_tbs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
break;
case 3:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_2A;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 2;
dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
dl_dci->ndi[0] = ndi;
dl_dci->ndi[1] = ndi;
dl_dci->n_rv = 2;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->rv[1] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 2;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
dl_dci->mcs[1] = mcs;
dl_dci->n_tbs_size = 2;
dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
dl_dci->tbs_size[0] = dci_tbs;
dl_dci->tbs_size[1] = dci_tbs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
break;
case 4:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_2A;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 2;
dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
dl_dci->ndi[0] = ndi;
dl_dci->ndi[1] = ndi;
dl_dci->n_rv = 2;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->rv[1] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 2;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
dl_dci->mcs[1] = mcs;
dl_dci->n_tbs_size = 2;
dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
dl_dci->tbs_size[0] = dci_tbs;
dl_dci->tbs_size[1] = dci_tbs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
break;
case 5:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1D;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 1;
dl_dci->ndi[0] = ndi;
dl_dci->n_rv = 1;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 1;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
dl_dci->n_tbs_size = 1;
dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
dl_dci->tbs_size[0] = dci_tbs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
if(ue_sched_ctl->dl_pow_off[CC_id] == 2) {
ue_sched_ctl->dl_pow_off[CC_id] = 1;
}
dl_dci->has_dl_power_offset = 1;
dl_dci->dl_power_offset = ue_sched_ctl->dl_pow_off[CC_id];
dl_dci->has_precoding_info = 1;
dl_dci->precoding_info = 5; // Is this right??
break;
case 6:
dl_dci->has_res_alloc = 1;
dl_dci->res_alloc = 0;
dl_dci->has_vrb_format = 1;
dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
dl_dci->has_format = 1;
dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1D;
dl_dci->has_rb_bitmap = 1;
dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
dl_dci->has_rb_shift = 1;
dl_dci->rb_shift = 0;
dl_dci->n_ndi = 1;
dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
dl_dci->ndi[0] = ndi;
dl_dci->n_rv = 1;
dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
dl_dci->rv[0] = round & 3;
dl_dci->has_tpc = 1;
dl_dci->tpc = tpc;
dl_dci->n_mcs = 1;
dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
dl_dci->mcs[0] = mcs;
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
dl_dci->has_dai = 1;
dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
}
dl_dci->has_dl_power_offset = 1;
dl_dci->dl_power_offset = ue_sched_ctl->dl_pow_off[CC_id];
dl_dci->has_precoding_info = 1;
dl_dci->precoding_info = 5; // Is this right??
break;
}
}
if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
// if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
/* TODO */
//set_ul_DAI(mod_id, UE_id, CC_id, frame, subframe, frame_parms);
}
// }
} // UE_id loop
} // CC_id loop
......
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.0 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*! \file eNB_scheduler_ulsch.c
* \brief FlexRAN eNB procedures for the ULSCH transport channel
* \author shahab SHARIAT BAGHERI
* \date 2017
* \version 1.0
* @ingroup _mac
*/
#include "assertions.h"
#include "PHY/defs.h"
#include "PHY/extern.h"
#include "SCHED/defs.h"
#include "SCHED/extern.h"
#include "LAYER2/MAC/flexran_agent_mac_proto.h"
#include "LAYER2/MAC/defs.h"
#include "LAYER2/MAC/proto.h"
#include "LAYER2/MAC/extern.h"
#include "UTIL/LOG/log.h"
#include "UTIL/LOG/vcd_signal_dumper.h"
#include "UTIL/OPT/opt.h"
#include "OCG.h"
#include "OCG_extern.h"
#include "RRC/LITE/extern.h"
#include "RRC/L2_INTERFACE/openair_rrc_L2_interface.h"
//#include "LAYER2/MAC/pre_processor.c"
#include "pdcp.h"
#if defined(ENABLE_ITTI)
# include "intertask_interface.h"
#endif
#include "T.h"
/* number of active slices for past and current time*/
int n_active_slices_uplink = 2;
int n_active_slices_uplink_current = 2;
/* RB share for each slice for past and current time*/
float slice_percentage_uplink[MAX_NUM_SLICES] = {0.5, 0.5, 0.0, 0.0};
float slice_percentage_current_uplink[MAX_NUM_SLICES] = {0.5, 0.5, 0.0, 0.0};
float total_slice_percentage_uplink = 0;
// MAX MCS for each slice for past and current time
int slice_maxmcs_uplink[MAX_NUM_SLICES] = {16, 16, 16, 16};
int slice_maxmcs_current_uplink[MAX_NUM_SLICES] = {28, 28, 28, 28};
/*resource blocks allowed*/
uint16_t nb_rbs_allowed_slice_uplink[MAX_NUM_CCs][MAX_NUM_SLICES];
/*Slice Update */
int update_ul_scheduler[MAX_NUM_SLICES] = {1, 1, 1, 1};
int update_ul_scheduler_current[MAX_NUM_SLICES] = {1, 1, 1, 1};
/* Slice Function Pointer */
slice_scheduler_ul slice_sched_ul[MAX_NUM_SLICES] = {0};
/* name of available scheduler*/
char *ul_scheduler_type[MAX_NUM_SLICES] = {"flexran_schedule_ue_ul_spec_embb",
"flexran_schedule_ue_ul_spec_urllc",
"flexran_schedule_ue_ul_spec_mmtc",
"flexran_schedule_ue_ul_spec_be" // best effort
};
uint16_t flexran_nb_rbs_allowed_slice_uplink(float rb_percentage, int total_rbs){
return (uint16_t) floor(rb_percentage * total_rbs);
}
void _assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP, sub_frame_t subframeP, uint16_t *first_rb)
{
int i;
uint16_t n,UE_id;
uint8_t CC_id;
rnti_t rnti = -1;
int mcs;
int rb_table_index=0,tbs,tx_power;
eNB_MAC_INST *eNB = &eNB_mac_inst[module_idP];
UE_list_t *UE_list = &eNB->UE_list;
UE_TEMPLATE *UE_template;
LTE_DL_FRAME_PARMS *frame_parms;
for (i = 0; i < NUMBER_OF_UE_MAX; i++) {
if (UE_list->active[i] != TRUE) continue;
rnti = UE_RNTI(module_idP,i);
if (rnti==NOT_A_RNTI)
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!phy_stats_exist(module_idP, rnti))
continue;
if (UE_list->UE_sched_ctrl[i].phr_received == 1)
mcs = 20; // if we've received the power headroom information the UE, we can go to maximum mcs
else
mcs = 10; // otherwise, limit to QPSK PUSCH
UE_id = i;
for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
if (CC_id >= MAX_NUM_CCs) {
LOG_E( MAC, "CC_id %u should be < %u, loop n=%u < numactiveULCCs[%u]=%u",
CC_id,
MAX_NUM_CCs,
n,
UE_id,
UE_list->numactiveULCCs[UE_id]);
}
AssertFatal( CC_id < MAX_NUM_CCs, "CC_id %u should be < %u, loop n=%u < numactiveULCCs[%u]=%u",
CC_id,
MAX_NUM_CCs,
n,
UE_id,
UE_list->numactiveULCCs[UE_id]);
frame_parms=mac_xface->get_lte_frame_parms(module_idP,CC_id);
UE_template = &UE_list->UE_template[CC_id][UE_id];
nb_rbs_allowed_slice_uplink[CC_id][UE_id] = flexran_nb_rbs_allowed_slice_uplink(slice_percentage_uplink[UE_id], flexran_get_N_RB_UL(module_idP, CC_id));
// if this UE has UL traffic
if (UE_template->ul_total_buffer > 0 ) {
tbs = mac_xface->get_TBS_UL(mcs,3); // 1 or 2 PRB with cqi enabled does not work well!
// fixme: set use_srs flag
tx_power= mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0);
while ((((UE_template->phr_info - tx_power) < 0 ) || (tbs > UE_template->ul_total_buffer))&&
(mcs > 3)) {
// LOG_I(MAC,"UE_template->phr_info %d tx_power %d mcs %d\n", UE_template->phr_info,tx_power, mcs);
mcs--;
tbs = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
tx_power = mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0); // fixme: set use_srs
}
while ((tbs < UE_template->ul_total_buffer) &&
(rb_table[rb_table_index]<(nb_rbs_allowed_slice_uplink[CC_id][UE_id]-first_rb[CC_id])) &&
((UE_template->phr_info - tx_power) > 0) &&
(rb_table_index < 32 )) {
// LOG_I(MAC,"tbs %d ul buffer %d rb table %d max ul rb %d\n", tbs, UE_template->ul_total_buffer, rb_table[rb_table_index], frame_parms->N_RB_UL-first_rb[CC_id]);
rb_table_index++;
tbs = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
tx_power = mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0);
}
UE_template->ue_tx_power = tx_power;
if (rb_table[rb_table_index]>(nb_rbs_allowed_slice_uplink[CC_id][UE_id]-first_rb[CC_id]-1)) {
rb_table_index--;
}
// 1 or 2 PRB with cqi enabled does not work well!
if (rb_table[rb_table_index]<3) {
rb_table_index=2; //3PRB
}
UE_template->pre_assigned_mcs_ul=mcs;
UE_template->pre_allocated_rb_table_index_ul=rb_table_index;
UE_template->pre_allocated_nb_rb_ul= rb_table[rb_table_index];
LOG_D(MAC,"[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)\n",
module_idP, frameP, subframeP, UE_id, CC_id,
UE_template->pre_assigned_mcs_ul,
UE_template->pre_allocated_rb_table_index_ul,
UE_template->pre_allocated_nb_rb_ul,
UE_template->phr_info,tx_power);
} else {
UE_template->pre_allocated_rb_table_index_ul=-1;
UE_template->pre_allocated_nb_rb_ul=0;
}
}
}
}
void _ulsch_scheduler_pre_processor(module_id_t module_idP,
int slice_id,
int frameP,
sub_frame_t subframeP,
uint16_t *first_rb)
{
int16_t i;
uint16_t UE_id,n,r;
uint8_t CC_id, round, harq_pid;
uint16_t nb_allocated_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX],total_allocated_rbs[MAX_NUM_CCs],average_rbs_per_user[MAX_NUM_CCs];
uint16_t nb_rbs_allowed_slice_uplink[MAX_NUM_CCs][MAX_NUM_SLICES];
int16_t total_remaining_rbs[MAX_NUM_CCs];
uint16_t max_num_ue_to_be_scheduled=0,total_ue_count=0;
rnti_t rnti= -1;
UE_list_t *UE_list = &eNB_mac_inst[module_idP].UE_list;
UE_TEMPLATE *UE_template = 0;
LTE_DL_FRAME_PARMS *frame_parms = 0;
UE_sched_ctrl *ue_sched_ctl;
//LOG_I(MAC,"assign max mcs min rb\n");
// maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
_assign_max_mcs_min_rb(module_idP, slice_id, frameP, subframeP, first_rb);
//LOG_I(MAC,"sort ue \n");
// sort ues
sort_ue_ul (module_idP,frameP, subframeP);
// we need to distribute RBs among UEs
// step1: reset the vars
for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
total_allocated_rbs[CC_id]=0;
total_remaining_rbs[CC_id]=0;
average_rbs_per_user[CC_id]=0;
for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {
nb_allocated_rbs[CC_id][i]=0;
}
}
//LOG_I(MAC,"step2 \n");
// step 2: calculate the average rb per UE
total_ue_count =0;
max_num_ue_to_be_scheduled=0;
for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {
rnti = UE_RNTI(module_idP,i);
if (rnti==NOT_A_RNTI)
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!phy_stats_exist(module_idP, rnti))
continue;
UE_id = i;
for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
average_rbs_per_user[CC_id]=0;
frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);
if (UE_template->pre_allocated_nb_rb_ul > 0) {
total_ue_count+=1;
}
/*
if((mac_xface->get_nCCE_max(module_idP,CC_id,3,subframeP) - nCCE_to_be_used[CC_id]) > (1<<aggregation)) {
nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation);
max_num_ue_to_be_scheduled+=1;
}*/
max_num_ue_to_be_scheduled+=1;
nb_rbs_allowed_slice_uplink[CC_id][UE_id] = flexran_nb_rbs_allowed_slice_uplink(slice_percentage_uplink[UE_id], flexran_get_N_RB_UL(module_idP, CC_id));
if (total_ue_count == 0) {
average_rbs_per_user[CC_id] = 0;
} else if (total_ue_count == 1 ) { // increase the available RBs, special case,
average_rbs_per_user[CC_id] = nb_rbs_allowed_slice_uplink[CC_id][i]-first_rb[CC_id]+1;
} else if( (total_ue_count <= (nb_rbs_allowed_slice_uplink[CC_id][i]-first_rb[CC_id])) &&
(total_ue_count <= max_num_ue_to_be_scheduled)) {
average_rbs_per_user[CC_id] = (uint16_t) floor((nb_rbs_allowed_slice_uplink[CC_id][i]-first_rb[CC_id])/total_ue_count);
} else if (max_num_ue_to_be_scheduled > 0 ) {
average_rbs_per_user[CC_id] = (uint16_t) floor((nb_rbs_allowed_slice_uplink[CC_id][i]-first_rb[CC_id])/max_num_ue_to_be_scheduled);
} else {
average_rbs_per_user[CC_id]=1;
LOG_W(MAC,"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)\n",
module_idP,frameP,subframeP,UE_id,CC_id);
}
}
}
if (total_ue_count > 0)
LOG_D(MAC,"[eNB %d] Frame %d subframe %d: total ue to be scheduled %d/%d\n",
module_idP, frameP, subframeP,total_ue_count, max_num_ue_to_be_scheduled);
//LOG_D(MAC,"step3\n");
// step 3: assigne RBS
for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {
rnti = UE_RNTI(module_idP,i);
if (rnti==NOT_A_RNTI)
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!phy_stats_exist(module_idP, rnti))
continue;
UE_id = i;
for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
ue_sched_ctl->max_allowed_rbs[CC_id]=nb_rbs_allowed_slice_uplink[CC_id][UE_id];
// mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,openair_harq_UL);
flexran_get_harq(module_idP, CC_id, UE_id, frameP, subframeP, &harq_pid, &round, openair_harq_UL);
if(round>0) {
nb_allocated_rbs[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid];
} else {
nb_allocated_rbs[CC_id][UE_id] = cmin(UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul, average_rbs_per_user[CC_id]);
}
total_allocated_rbs[CC_id]+= nb_allocated_rbs[CC_id][UE_id];
}
}
// step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly
for(r=0; r<2; r++) {
for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {
rnti = UE_RNTI(module_idP,i);
if (rnti==NOT_A_RNTI)
continue;
if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
continue;
if (!phy_stats_exist(module_idP, rnti))
continue;
UE_id = i;
for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
UE_template = &UE_list->UE_template[CC_id][UE_id];
frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);
total_remaining_rbs[CC_id]=nb_rbs_allowed_slice_uplink[CC_id][UE_id] - first_rb[CC_id] - total_allocated_rbs[CC_id];
if (total_ue_count == 1 ) {
total_remaining_rbs[CC_id]+=1;
}
if ( r == 0 ) {
while ( (UE_template->pre_allocated_nb_rb_ul > 0 ) &&
(nb_allocated_rbs[CC_id][UE_id] < UE_template->pre_allocated_nb_rb_ul) &&
(total_remaining_rbs[CC_id] > 0)) {
nb_allocated_rbs[CC_id][UE_id] = cmin(nb_allocated_rbs[CC_id][UE_id]+1,UE_template->pre_allocated_nb_rb_ul);
total_remaining_rbs[CC_id]--;
total_allocated_rbs[CC_id]++;
}
} else {
UE_template->pre_allocated_nb_rb_ul= nb_allocated_rbs[CC_id][UE_id];
LOG_D(MAC,"******************UL Scheduling Information for UE%d CC_id %d ************************\n",UE_id, CC_id);
LOG_D(MAC,"[eNB %d] total RB allocated for UE%d CC_id %d = %d\n", module_idP, UE_id, CC_id, UE_template->pre_allocated_nb_rb_ul);
}
}
}
}
for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
frame_parms= mac_xface->get_lte_frame_parms(module_idP,CC_id);
if (total_allocated_rbs[CC_id]>0) {
LOG_D(MAC,"[eNB %d] total RB allocated for all UEs = %d/%d\n", module_idP, total_allocated_rbs[CC_id], nb_rbs_allowed_slice_uplink[CC_id][slice_id] - first_rb[CC_id]);
}
}
}
/*
* Main Uplink Slicing
*
*/
void
flexran_schedule_ue_ul_spec_default(mid_t mod_id,
uint32_t frame,
uint32_t cooperation_flag,
int subframe,
unsigned char sched_subframe,
Protocol__FlexranMessage **ul_info)
//------------------------------------------------------------------------------
{
int i=0;
flexran_agent_mac_create_empty_ul_config(mod_id, ul_info);
for (i = 0; i < n_active_slices_uplink; i++) {
// Load any updated functions
if (update_ul_scheduler[i] > 0 ) {
slice_sched_ul[i] = dlsym(NULL, ul_scheduler_type[i]);
update_ul_scheduler[i] = 0;
update_ul_scheduler_current[i] = 0;
slice_percentage_current_uplink[i]= slice_percentage_uplink[i];
total_slice_percentage_uplink+=slice_percentage_uplink[i];
LOG_N(MAC,"update ul scheduler slice %d\n", i);
}
// check if the number of slices has changed, and log
if (n_active_slices_uplink_current != n_active_slices_uplink ){
if ((n_active_slices_uplink > 0) && (n_active_slices_uplink <= MAX_NUM_SLICES)) {
LOG_N(MAC,"[eNB %d]frame %d subframe %d: number of active slices has changed: %d-->%d\n",
mod_id, frame, subframe, n_active_slices_uplink_current, n_active_slices_uplink);
n_active_slices_uplink_current = n_active_slices_uplink;
} else {
LOG_W(MAC,"invalid number of slices %d, revert to the previous value %d\n",n_active_slices_uplink, n_active_slices_uplink_current);
n_active_slices_uplink = n_active_slices_uplink_current;
}
}
// check if the slice rb share has changed, and log the console
if (slice_percentage_current_uplink[i] != slice_percentage_uplink[i]){
// if ((slice_percentage_uplink[i] >= 0.0) && (slice_percentage_uplink[i] <= 1.0)){
// if ((total_slice_percentage_uplink - slice_percentage_current_uplink[i] + slice_percentage_uplink[i]) <= 1.0) {
// total_slice_percentage_uplink=total_slice_percentage_uplink - slice_percentage_current_uplink[i] + slice_percentage_uplink[i];
LOG_N(MAC,"[eNB %d][SLICE %d] frame %d subframe %d: total percentage %f, slice RB percentage has changed: %f-->%f\n",
mod_id, i, frame, subframe, total_slice_percentage_uplink, slice_percentage_current_uplink[i], slice_percentage_uplink[i]);
slice_percentage_current_uplink[i] = slice_percentage_uplink[i];
// } else {
// LOG_W(MAC,"[eNB %d][SLICE %d] invalid total RB share (%f->%f), revert the previous value (%f->%f)\n",
// mod_id,i,
// total_slice_percentage_uplink,
// total_slice_percentage_uplink - slice_percentage_current_uplink[i] + slice_percentage_uplink[i],
// slice_percentage_uplink[i],slice_percentage_current_uplink[i]);
// slice_percentage_uplink[i]= slice_percentage_current_uplink[i];
// }
// } else {
// LOG_W(MAC,"[eNB %d][SLICE %d] invalid slice RB share, revert the previous value (%f->%f)\n",mod_id, i, slice_percentage_uplink[i],slice_percentage_current_uplink[i]);
// slice_percentage_uplink[i]= slice_percentage_current_uplink[i];
// }
}
// check if a new scheduler, and log the console
if (update_ul_scheduler_current[i] != update_ul_scheduler[i]){
LOG_N(MAC,"[eNB %d][SLICE %d] frame %d subframe %d: DL scheduler for this slice is updated: %s \n",
mod_id, i, frame, subframe, ul_scheduler_type[i]);
update_ul_scheduler_current[i] = update_ul_scheduler[i];
}
// Run each enabled slice-specific schedulers one by one
//LOG_N(MAC,"[eNB %d]frame %d subframe %d slice %d: calling the scheduler\n", mod_id, frame, subframe,i);
}
slice_sched_ul[0](mod_id, frame, cooperation_flag, subframe, sched_subframe,ul_info);
}
void
flexran_schedule_ue_ul_spec_embb(mid_t mod_id,
frame_t frame,
unsigned char cooperation_flag,
uint32_t subframe,
unsigned char sched_subframe,
Protocol__FlexranMessage **ul_info)
{
flexran_agent_schedule_ulsch_ue_spec(mod_id,
frame,
cooperation_flag,
subframe,
sched_subframe,
ul_info);
}
void flexran_agent_schedule_ulsch_ue_spec(mid_t module_idP,
frame_t frameP,
unsigned char cooperation_flag,
sub_frame_t subframeP,
unsigned char sched_subframe,
Protocol__FlexranMessage **ul_info) {
uint16_t first_rb[MAX_NUM_CCs],i;
int CC_id;
eNB_MAC_INST *eNB=&eNB_mac_inst[module_idP];
start_meas(&eNB->schedule_ulsch);
for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
//leave out first RB for PUCCH
first_rb[CC_id] = 1;
// UE data info;
// check which UE has data to transmit
// function to decide the scheduling
// e.g. scheduling_rslt = Greedy(granted_UEs, nb_RB)
// default function for default scheduling
//
// output of scheduling, the UE numbers in RBs, where it is in the code???
// check if RA (Msg3) is active in this subframeP, if so skip the PRBs used for Msg3
// Msg3 is using 1 PRB so we need to increase first_rb accordingly
// not sure about the break (can there be more than 1 active RA procedure?)
for (i=0; i<NB_RA_PROC_MAX; i++) {
if ((eNB->common_channels[CC_id].RA_template[i].RA_active == TRUE) &&
(eNB->common_channels[CC_id].RA_template[i].generate_rar == 0) &&
(eNB->common_channels[CC_id].RA_template[i].generate_Msg4 == 0) &&
(eNB->common_channels[CC_id].RA_template[i].wait_ack_Msg4 == 0) &&
(eNB->common_channels[CC_id].RA_template[i].Msg3_subframe == sched_subframe)) {
first_rb[CC_id]++;
eNB->common_channels[CC_id].RA_template[i].Msg3_subframe = -1;
break;
}
}
/*
if (mac_xface->is_prach_subframe(&(mac_xface->lte_frame_parms),frameP,subframeP)) {
first_rb[CC_id] = (mac_xface->get_prach_prb_offset(&(mac_xface->lte_frame_parms),
*/
}
flexran_agent_schedule_ulsch_rnti(module_idP, cooperation_flag, frameP, subframeP, sched_subframe,first_rb);
stop_meas(&eNB->schedule_ulsch);
}
void flexran_agent_schedule_ulsch_rnti(module_id_t module_idP,
unsigned char cooperation_flag,
frame_t frameP,
sub_frame_t subframeP,
unsigned char sched_subframe,
uint16_t *first_rb)
{
int UE_id;
uint8_t aggregation = 2;
rnti_t rnti = -1;
uint8_t round = 0;
uint8_t harq_pid = 0;
void *ULSCH_dci = NULL;
LTE_eNB_UE_stats *eNB_UE_stats = NULL;
DCI_PDU *DCI_pdu;
uint8_t status = 0;
uint8_t rb_table_index = -1;
uint16_t TBS = 0;
// int32_t buffer_occupancy=0;
uint32_t cqi_req,cshift,ndi,mcs=0,rballoc,tpc;
int32_t normalized_rx_power, target_rx_power=-90;
static int32_t tpc_accumulated=0;
int n,CC_id = 0;
eNB_MAC_INST *eNB=&eNB_mac_inst[module_idP];
UE_list_t *UE_list=&eNB->UE_list;
UE_TEMPLATE *UE_template;
UE_sched_ctrl *UE_sched_ctrl;
// int rvidx_tab[4] = {0,2,3,1};
LTE_DL_FRAME_PARMS *frame_parms;
int drop_ue=0;
// LOG_I(MAC,"entering ulsch preprocesor\n");
/*TODO*/
int slice_id = 0;
_ulsch_scheduler_pre_processor(module_idP,
slice_id,
frameP,
subframeP,
first_rb);
// LOG_I(MAC,"exiting ulsch preprocesor\n");
// loop over all active UEs
for (UE_id=UE_list->head_ul; UE_id>=0; UE_id=UE_list->next_ul[UE_id]) {
// don't schedule if Msg4 is not received yet
if (UE_list->UE_template[UE_PCCID(module_idP,UE_id)][UE_id].configured==FALSE) {
LOG_I(MAC,"[eNB %d] frame %d subfarme %d, UE %d: not configured, skipping UE scheduling \n",
module_idP,frameP,subframeP,UE_id);
continue;
}
rnti = flexran_get_ue_crnti(module_idP, UE_id);
if (rnti==NOT_A_RNTI) {
LOG_W(MAC,"[eNB %d] frame %d subfarme %d, UE %d: no RNTI \n", module_idP,frameP,subframeP,UE_id);
continue;
}
/* let's drop the UE if get_eNB_UE_stats returns NULL when calling it with any of the UE's active UL CCs */
/* TODO: refine? */
drop_ue = 0;
for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
CC_id = UE_list->ordered_ULCCids[n][UE_id];
if (mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti) == NULL) {
LOG_W(MAC,"[eNB %d] frame %d subframe %d, UE %d/%x CC %d: no PHY context\n", module_idP,frameP,subframeP,UE_id,rnti,CC_id);
drop_ue = 1;
break;
}
}
if (drop_ue == 1) {
/* we can't come here, ulsch_scheduler_pre_processor won't put in the list a UE with no PHY context */
abort();
/* TODO: this is a hack. Sometimes the UE has no PHY context but
* is still present in the MAC with 'ul_failure_timer' = 0 and
* 'ul_out_of_sync' = 0. It seems wrong and the UE stays there forever. Let's
* start an UL out of sync procedure in this case.
* The root cause of this problem has to be found and corrected.
* In the meantime, this hack...
*/
if (UE_list->UE_sched_ctrl[UE_id].ul_failure_timer == 0 &&
UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 0) {
LOG_W(MAC,"[eNB %d] frame %d subframe %d, UE %d/%x CC %d: UE in weird state, let's put it 'out of sync'\n",
module_idP,frameP,subframeP,UE_id,rnti,CC_id);
// inform RRC of failure and clear timer
mac_eNB_rrc_ul_failure(module_idP,CC_id,frameP,subframeP,rnti);
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer=0;
UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync=1;
}
continue;
}
// loop over all active UL CC_ids for this UE
for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
// This is the actual CC_id in the list
CC_id = UE_list->ordered_ULCCids[n][UE_id];
frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);
eNB_UE_stats = mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti);
aggregation=get_aggregation(get_bw_index(module_idP,CC_id),
eNB_UE_stats->DL_cqi[0],
format0);
if (CCE_allocation_infeasible(module_idP,CC_id,0,subframeP,aggregation,rnti)) {
LOG_W(MAC,"[eNB %d] frame %d subframe %d, UE %d/%x CC %d: not enough nCCE\n", module_idP,frameP,subframeP,UE_id,rnti,CC_id);
continue; // break;
} else{
LOG_D(MAC,"[eNB %d] frame %d subframe %d, UE %d/%x CC %d mode %s: aggregation level %d\n",
module_idP,frameP,subframeP,UE_id,rnti,CC_id, mode_string[eNB_UE_stats->mode], 1<<aggregation);
}
if (eNB_UE_stats->mode == PUSCH) { // ue has a ulsch channel
DCI_pdu = &eNB->common_channels[CC_id].DCI_pdu;
UE_template = &UE_list->UE_template[CC_id][UE_id];
UE_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
if (flexran_get_harq(module_idP, CC_id, UE_id, frameP, subframeP, &harq_pid, &round, openair_harq_UL) == -1 ) {
LOG_W(MAC,"[eNB %d] Scheduler Frame %d, subframeP %d: candidate harq_pid from PHY for UE %d CC %d RNTI %x\n",
module_idP,frameP,subframeP, UE_id, CC_id, rnti);
continue;
} else
LOG_T(MAC,"[eNB %d] Frame %d, subframeP %d, UE %d CC %d : got harq pid %d round %d (rnti %x,mode %s)\n",
module_idP,frameP,subframeP,UE_id,CC_id, harq_pid, round,rnti,mode_string[eNB_UE_stats->mode]);
PHY_vars_eNB_g[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP*10)+subframeP] = UE_template->ul_total_buffer;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_BO,PHY_vars_eNB_g[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP*10)+subframeP]);
if (((UE_is_to_be_scheduled(module_idP,CC_id,UE_id)>0)) || (round>0))// || ((frameP%10)==0))
// if there is information on bsr of DCCH, DTCH or if there is UL_SR, or if there is a packet to retransmit, or we want to schedule a periodic feedback every 10 frames
{
LOG_D(MAC,"[eNB %d][PUSCH] Frame %d subframe %d Scheduling UE %d/%x in round %d(SR %d,UL_inactivity timer %d,UL_failure timer %d)\n",
module_idP,frameP,subframeP,UE_id,rnti,round,UE_template->ul_SR,
UE_sched_ctrl->ul_inactivity_timer,
UE_sched_ctrl->ul_failure_timer);
// reset the scheduling request
UE_template->ul_SR = 0;
// status = mac_eNB_get_rrc_status(module_idP,rnti);
status = flexran_get_rrc_status(module_idP, rnti);
if (status < RRC_CONNECTED)
cqi_req = 0;
else if (UE_sched_ctrl->cqi_req_timer>30) {
cqi_req = 1;
UE_sched_ctrl->cqi_req_timer=0;
}
else
cqi_req = 0;
//power control
//compute the expected ULSCH RX power (for the stats)
// this is the normalized RX power and this should be constant (regardless of mcs
normalized_rx_power = eNB_UE_stats->UL_rssi[0];
target_rx_power = mac_xface->get_target_pusch_rx_power(module_idP,CC_id);
// this assumes accumulated tpc
// make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
int32_t framex10psubframe = UE_template->pusch_tpc_tx_frame*10+UE_template->pusch_tpc_tx_subframe;
if (((framex10psubframe+10)<=(frameP*10+subframeP)) || //normal case
((framex10psubframe>(frameP*10+subframeP)) && (((10240-framex10psubframe+frameP*10+subframeP)>=10)))) //frame wrap-around
{
UE_template->pusch_tpc_tx_frame=frameP;
UE_template->pusch_tpc_tx_subframe=subframeP;
if (normalized_rx_power>(target_rx_power+1)) {
tpc = 0; //-1
tpc_accumulated--;
} else if (normalized_rx_power<(target_rx_power-1)) {
tpc = 2; //+1
tpc_accumulated++;
} else {
tpc = 1; //0
}
} else {
tpc = 1; //0
}
if (tpc!=1) {
LOG_D(MAC,"[eNB %d] ULSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n",
module_idP,frameP,subframeP,harq_pid,tpc,
tpc_accumulated,normalized_rx_power,target_rx_power);
}
// new transmission
if (round==0) {
ndi = 1-UE_template->oldNDI_UL[harq_pid];
UE_template->oldNDI_UL[harq_pid]=ndi;
UE_list->eNB_UE_stats[CC_id][UE_id].normalized_rx_power=normalized_rx_power;
UE_list->eNB_UE_stats[CC_id][UE_id].target_rx_power=target_rx_power;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1=UE_template->pre_assigned_mcs_ul;
mcs = UE_template->pre_assigned_mcs_ul;//cmin (UE_template->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS
if (UE_template->pre_allocated_rb_table_index_ul >=0) {
rb_table_index=UE_template->pre_allocated_rb_table_index_ul;
} else {
mcs=10;//cmin (10, openair_daq_vars.target_ue_ul_mcs);
rb_table_index=5; // for PHR
}
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2=mcs;
// buffer_occupancy = UE_template->ul_total_buffer;
while (((rb_table[rb_table_index]>(nb_rbs_allowed_slice_uplink[CC_id][UE_id]-1-first_rb[CC_id])) ||
(rb_table[rb_table_index]>45)) &&
(rb_table_index>0)) {
rb_table_index--;
}
TBS = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]);
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx+=rb_table[rb_table_index];
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS=TBS;
// buffer_occupancy -= TBS;
rballoc = mac_xface->computeRIV(frame_parms->N_RB_UL,
first_rb[CC_id],
rb_table[rb_table_index]);
T(T_ENB_MAC_UE_UL_SCHEDULE, T_INT(module_idP), T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid), T_INT(mcs), T_INT(first_rb[CC_id]), T_INT(rb_table[rb_table_index]),
T_INT(TBS), T_INT(ndi));
if (mac_eNB_get_rrc_status(module_idP,rnti) < RRC_CONNECTED)
LOG_I(MAC,"[eNB %d][PUSCH %d/%x] CC_id %d Frame %d subframeP %d Scheduled UE %d (mcs %d, first rb %d, nb_rb %d, rb_table_index %d, TBS %d, harq_pid %d)\n",
module_idP,harq_pid,rnti,CC_id,frameP,subframeP,UE_id,mcs,
first_rb[CC_id],rb_table[rb_table_index],
rb_table_index,TBS,harq_pid);
// bad indices : 20 (40 PRB), 21 (45 PRB), 22 (48 PRB)
// increment for next UE allocation
first_rb[CC_id]+=rb_table[rb_table_index];
//store for possible retransmission
UE_template->nb_rb_ul[harq_pid] = rb_table[rb_table_index];
UE_sched_ctrl->ul_scheduled |= (1<<harq_pid);
if (UE_id == UE_list->head)
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_SCHEDULED,UE_sched_ctrl->ul_scheduled);
// adjust total UL buffer status by TBS, wait for UL sdus to do final update
LOG_D(MAC,"[eNB %d] CC_id %d UE %d/%x : adjusting ul_total_buffer, old %d, TBS %d\n", module_idP,CC_id,UE_id,rnti,UE_template->ul_total_buffer,TBS);
if (UE_template->ul_total_buffer > TBS)
UE_template->ul_total_buffer -= TBS;
else
UE_template->ul_total_buffer = 0;
LOG_D(MAC,"ul_total_buffer, new %d\n", UE_template->ul_total_buffer);
// Cyclic shift for DM RS
cshift = 0;// values from 0 to 7 can be used for mapping the cyclic shift (36.211 , Table 5.5.2.1.1-1)
if (frame_parms->frame_type == TDD) {
switch (frame_parms->N_RB_UL) {
case 6:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->type = 0;
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->hopping = 0;
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->rballoc = rballoc;
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->mcs = mcs;
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->ndi = ndi;
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->TPC = tpc;
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->cshift = cshift;
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->padding = 0;
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->dai = UE_template->DAI_ul[sched_subframe];
((DCI0_1_5MHz_TDD_1_6_t *)ULSCH_dci)->cqi_req = cqi_req;
add_ue_spec_dci(DCI_pdu,
ULSCH_dci,
rnti,
sizeof(DCI0_1_5MHz_TDD_1_6_t),
aggregation,
sizeof_DCI0_1_5MHz_TDD_1_6_t,
format0,
0);
break;
default:
case 25:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->type = 0;
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->hopping = 0;
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->rballoc = rballoc;
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->mcs = mcs;
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->ndi = ndi;
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->TPC = tpc;
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->cshift = cshift;
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->padding = 0;
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->dai = UE_template->DAI_ul[sched_subframe];
((DCI0_5MHz_TDD_1_6_t *)ULSCH_dci)->cqi_req = cqi_req;
add_ue_spec_dci(DCI_pdu,
ULSCH_dci,
rnti,
sizeof(DCI0_5MHz_TDD_1_6_t),
aggregation,
sizeof_DCI0_5MHz_TDD_1_6_t,
format0,
0);
break;
case 50:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->type = 0;
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->hopping = 0;
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->rballoc = rballoc;
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->mcs = mcs;
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->ndi = ndi;
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->TPC = tpc;
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->cshift = cshift;
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->padding = 0;
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->dai = UE_template->DAI_ul[sched_subframe];
((DCI0_10MHz_TDD_1_6_t *)ULSCH_dci)->cqi_req = cqi_req;
add_ue_spec_dci(DCI_pdu,
ULSCH_dci,
rnti,
sizeof(DCI0_10MHz_TDD_1_6_t),
aggregation,
sizeof_DCI0_10MHz_TDD_1_6_t,
format0,
0);
break;
case 100:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->type = 0;
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->hopping = 0;
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->rballoc = rballoc;
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->mcs = mcs;
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->ndi = ndi;
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->TPC = tpc;
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->cshift = cshift;
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->padding = 0;
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->dai = UE_template->DAI_ul[sched_subframe];
((DCI0_20MHz_TDD_1_6_t *)ULSCH_dci)->cqi_req = cqi_req;
add_ue_spec_dci(DCI_pdu,
ULSCH_dci,
rnti,
sizeof(DCI0_20MHz_TDD_1_6_t),
aggregation,
sizeof_DCI0_20MHz_TDD_1_6_t,
format0,
0);
break;
}
} // TDD
else { //FDD
switch (frame_parms->N_RB_UL) {
case 25:
default:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
((DCI0_5MHz_FDD_t *)ULSCH_dci)->type = 0;
((DCI0_5MHz_FDD_t *)ULSCH_dci)->hopping = 0;
((DCI0_5MHz_FDD_t *)ULSCH_dci)->rballoc = rballoc;
((DCI0_5MHz_FDD_t *)ULSCH_dci)->mcs = mcs;
((DCI0_5MHz_FDD_t *)ULSCH_dci)->ndi = ndi;
((DCI0_5MHz_FDD_t *)ULSCH_dci)->TPC = tpc;
((DCI0_5MHz_FDD_t *)ULSCH_dci)->cshift = cshift;
((DCI0_5MHz_FDD_t *)ULSCH_dci)->padding = 0;
((DCI0_5MHz_FDD_t *)ULSCH_dci)->cqi_req = cqi_req;
add_ue_spec_dci(DCI_pdu,
ULSCH_dci,
rnti,
sizeof(DCI0_5MHz_FDD_t),
aggregation,
sizeof_DCI0_5MHz_FDD_t,
format0,
0);
break;
case 6:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->type = 0;
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->hopping = 0;
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->rballoc = rballoc;
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->mcs = mcs;
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->ndi = ndi;
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->TPC = tpc;
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->cshift = cshift;
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->padding = 0;
((DCI0_1_5MHz_FDD_t *)ULSCH_dci)->cqi_req = cqi_req;
add_ue_spec_dci(DCI_pdu,
ULSCH_dci,
rnti,
sizeof(DCI0_1_5MHz_FDD_t),
aggregation,
sizeof_DCI0_1_5MHz_FDD_t,
format0,
0);
break;
case 50:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
((DCI0_10MHz_FDD_t *)ULSCH_dci)->type = 0;
((DCI0_10MHz_FDD_t *)ULSCH_dci)->hopping = 0;
((DCI0_10MHz_FDD_t *)ULSCH_dci)->rballoc = rballoc;
((DCI0_10MHz_FDD_t *)ULSCH_dci)->mcs = mcs;
((DCI0_10MHz_FDD_t *)ULSCH_dci)->ndi = ndi;
((DCI0_10MHz_FDD_t *)ULSCH_dci)->TPC = tpc;
((DCI0_10MHz_FDD_t *)ULSCH_dci)->padding = 0;
((DCI0_10MHz_FDD_t *)ULSCH_dci)->cshift = cshift;
((DCI0_10MHz_FDD_t *)ULSCH_dci)->cqi_req = cqi_req;
add_ue_spec_dci(DCI_pdu,
ULSCH_dci,
rnti,
sizeof(DCI0_10MHz_FDD_t),
aggregation,
sizeof_DCI0_10MHz_FDD_t,
format0,
0);
break;
case 100:
ULSCH_dci = UE_template->ULSCH_DCI[harq_pid];
((DCI0_20MHz_FDD_t *)ULSCH_dci)->type = 0;
((DCI0_20MHz_FDD_t *)ULSCH_dci)->hopping = 0;
((DCI0_20MHz_FDD_t *)ULSCH_dci)->rballoc = rballoc;
((DCI0_20MHz_FDD_t *)ULSCH_dci)->mcs = mcs;
((DCI0_20MHz_FDD_t *)ULSCH_dci)->ndi = ndi;
((DCI0_20MHz_FDD_t *)ULSCH_dci)->TPC = tpc;
((DCI0_20MHz_FDD_t *)ULSCH_dci)->padding = 0;
((DCI0_20MHz_FDD_t *)ULSCH_dci)->cshift = cshift;
((DCI0_20MHz_FDD_t *)ULSCH_dci)->cqi_req = cqi_req;
add_ue_spec_dci(DCI_pdu,
ULSCH_dci,
rnti,
sizeof(DCI0_20MHz_FDD_t),
aggregation,
sizeof_DCI0_20MHz_FDD_t,
format0,
0);
break;
}
}
add_ue_ulsch_info(module_idP,
CC_id,
UE_id,
subframeP,
S_UL_SCHEDULED);
LOG_D(MAC,"[eNB %d] CC_id %d Frame %d, subframeP %d: Generated ULSCH DCI for next UE_id %d, format 0\n", module_idP,CC_id,frameP,subframeP,UE_id);
#ifdef DEBUG
dump_dci(frame_parms, &DCI_pdu->dci_alloc[DCI_pdu->Num_common_dci+DCI_pdu->Num_ue_spec_dci-1]);
#endif
}
else {
T(T_ENB_MAC_UE_UL_SCHEDULE_RETRANSMISSION, T_INT(module_idP), T_INT(CC_id), T_INT(rnti), T_INT(frameP),
T_INT(subframeP), T_INT(harq_pid), T_INT(mcs), T_INT(first_rb[CC_id]), T_INT(rb_table[rb_table_index]),
T_INT(round));
LOG_D(MAC,"[eNB %d][PUSCH %d/%x] CC_id %d Frame %d subframeP %d Scheduled (PHICH) UE %d (mcs %d, first rb %d, nb_rb %d, rb_table_index %d, TBS %d, harq_pid %d,round %d)\n",
module_idP,harq_pid,rnti,CC_id,frameP,subframeP,UE_id,mcs,
first_rb[CC_id],rb_table[rb_table_index],
rb_table_index,TBS,harq_pid,round);
}/*
else if (round > 0) { //we schedule a retransmission
ndi = UE_template->oldNDI_UL[harq_pid];
if ((round&3)==0) {
mcs = openair_daq_vars.target_ue_ul_mcs;
} else {
mcs = rvidx_tab[round&3] + 28; //not correct for round==4!
}
LOG_I(MAC,"[eNB %d][PUSCH %d/%x] CC_id %d Frame %d subframeP %d Scheduled UE retransmission (mcs %d, first rb %d, nb_rb %d, harq_pid %d, round %d)\n",
module_idP,UE_id,rnti,CC_id,frameP,subframeP,mcs,
first_rb[CC_id],UE_template->nb_rb_ul[harq_pid],
harq_pid, round);
rballoc = mac_xface->computeRIV(frame_parms->N_RB_UL,
first_rb[CC_id],
UE_template->nb_rb_ul[harq_pid]);
first_rb[CC_id]+=UE_template->nb_rb_ul[harq_pid]; // increment for next UE allocation
UE_list->eNB_UE_stats[CC_id][UE_id].num_retransmission_rx+=1;
UE_list->eNB_UE_stats[CC_id][UE_id].rbs_used_retx_rx=UE_template->nb_rb_ul[harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx+=UE_template->nb_rb_ul[harq_pid];
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1=mcs;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2=mcs;
}
*/
} // UE_is_to_be_scheduled
} // UE is in PUSCH
} // loop over UE_id
} // loop of CC_id
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment