Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wangjie
OpenXG-RAN
Commits
83e067dc
Commit
83e067dc
authored
Jul 10, 2020
by
Haruki NAOI
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
scd_optimization.
(cherry picked from commit 442c24f4e95e531ed258009355d9db238d9c54f5)
parent
ab4a1806
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
591 additions
and
725 deletions
+591
-725
openair2/LAYER2/MAC/eNB_scheduler.c
openair2/LAYER2/MAC/eNB_scheduler.c
+148
-130
openair2/LAYER2/MAC/eNB_scheduler_fairRR.c
openair2/LAYER2/MAC/eNB_scheduler_fairRR.c
+427
-593
openair2/LAYER2/MAC/eNB_scheduler_fairRR.h
openair2/LAYER2/MAC/eNB_scheduler_fairRR.h
+9
-0
openair2/LAYER2/MAC/mac_proto.h
openair2/LAYER2/MAC/mac_proto.h
+2
-0
openair2/PHY_INTERFACE/IF_Module.c
openair2/PHY_INTERFACE/IF_Module.c
+5
-2
No files found.
openair2/LAYER2/MAC/eNB_scheduler.c
View file @
83e067dc
...
...
@@ -425,6 +425,7 @@ schedule_SR (module_id_t module_idP,
ul_req_body
->
ul_config_pdu_list
[
ul_req_body
->
number_of_pdus
].
pdu_type
=
NFAPI_UL_CONFIG_UCI_SR_PDU_TYPE
;
ul_req_body
->
ul_config_pdu_list
[
ul_req_body
->
number_of_pdus
].
uci_sr_pdu
.
ue_information
.
ue_information_rel8
.
tl
.
tag
=
NFAPI_UL_CONFIG_REQUEST_UE_INFORMATION_REL8_TAG
;
ul_req_body
->
ul_config_pdu_list
[
ul_req_body
->
number_of_pdus
].
uci_sr_pdu
.
ue_information
.
ue_information_rel8
.
rnti
=
UE_list
->
UE_template
[
CC_id
][
UE_id
].
rnti
;
ul_req_body
->
ul_config_pdu_list
[
ul_req_body
->
number_of_pdus
].
uci_sr_pdu
.
ue_information
.
ue_information_rel8
.
ue_id
=
UE_id
;
ul_req_body
->
ul_config_pdu_list
[
ul_req_body
->
number_of_pdus
].
uci_sr_pdu
.
ue_information
.
ue_information_rel11
.
tl
.
tag
=
0
;
ul_req_body
->
ul_config_pdu_list
[
ul_req_body
->
number_of_pdus
].
uci_sr_pdu
.
ue_information
.
ue_information_rel13
.
tl
.
tag
=
0
;
ul_req_body
->
ul_config_pdu_list
[
ul_req_body
->
number_of_pdus
].
uci_sr_pdu
.
sr_information
=
sr
;
...
...
@@ -589,16 +590,11 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP,
sub_frame_t
subframeP
)
{
int
mbsfn_status
[
MAX_NUM_CCs
];
protocol_ctxt_t
ctxt
;
rnti_t
rnti
=
0
;
int
CC_id
=
0
;
int
UE_id
=
-
1
;
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
UE_list_t
*
UE_list
=
&
(
eNB
->
UE_list
);
COMMON_channels_t
*
cc
=
eNB
->
common_channels
;
UE_sched_ctrl_t
*
UE_scheduling_control
=
NULL
;
uint8_t
volte_ul_cycle
[
MAX_NUM_CCs
];
uint8_t
volte_ul_buffersize
[
MAX_NUM_CCs
];
start_meas
(
&
(
eNB
->
eNB_scheduler
));
...
...
@@ -618,7 +614,136 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP,
cc
[
CC_id
].
mcch_active
=
0
;
#endif
clear_nfapi_information
(
RC
.
mac
[
module_idP
],
CC_id
,
frameP
,
subframeP
);
}
for
(
UE_id
=
0
;
UE_id
<
MAX_MOBILES_PER_ENB
;
UE_id
++
)
{
if
(
UE_list
->
active
[
UE_id
])
{
CC_id
=
UE_PCCID
(
module_idP
,
UE_id
);
check_ul_failure
(
module_idP
,
CC_id
,
UE_id
,
frameP
,
subframeP
);
}
}
#if (!defined(PRE_SCD_THREAD))
PROTOCOL_CTXT_SET_BY_MODULE_ID
(
&
ctxt
,
module_idP
,
ENB_FLAG_YES
,
NOT_A_RNTI
,
frameP
,
subframeP
,
module_idP
);
pdcp_run
(
&
ctxt
);
rrc_rx_tx
(
&
ctxt
,
CC_id
);
#endif
#if (LTE_RRC_VERSION >= MAKE_VERSION(10, 0, 0))
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
cc
[
CC_id
].
MBMS_flag
>
0
)
{
start_meas
(
&
RC
.
mac
[
module_idP
]
->
schedule_mch
);
mbsfn_status
[
CC_id
]
=
schedule_MBMS
(
module_idP
,
CC_id
,
frameP
,
subframeP
);
if
(
mbsfn_status
[
CC_id
]
<
0
)
{
LOG_E
(
MAC
,
"mbsfn_status[%d] < 0
\n
"
,
CC_id
);
return
;
}
stop_meas
(
&
RC
.
mac
[
module_idP
]
->
schedule_mch
);
}
}
#endif
static
int
debug_flag
=
0
;
void
(
*
schedule_ulsch_p
)(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframe
)
=
NULL
;
void
(
*
schedule_ue_spec_p
)(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframe
,
int
*
mbsfn_flag
)
=
NULL
;
if
(
eNB
->
scheduler_mode
==
SCHED_MODE_DEFAULT
)
{
schedule_ulsch_p
=
schedule_ulsch
;
schedule_ue_spec_p
=
schedule_dlsch
;
}
else
if
(
eNB
->
scheduler_mode
==
SCHED_MODE_FAIR_RR
)
{
memset
(
dlsch_ue_select
,
0
,
sizeof
(
dlsch_ue_select
));
schedule_ulsch_p
=
schedule_ulsch_fairRR
;
schedule_ue_spec_p
=
schedule_ue_spec_fairRR
;
}
if
(
debug_flag
==
0
){
LOG_E
(
MAC
,
"SCHED_MODE = %d
\n
"
,
eNB
->
scheduler_mode
);
debug_flag
=
1
;
}
/* This schedules MIB */
if
((
subframeP
==
0
)
&&
(
frameP
&
3
)
==
0
)
schedule_mib
(
module_idP
,
frameP
,
subframeP
);
if
(
get_softmodem_params
()
->
phy_test
==
0
)
{
/* This schedules SI for legacy LTE and eMTC starting in subframeP */
schedule_SI
(
module_idP
,
frameP
,
subframeP
);
/* This schedules Paging in subframeP */
schedule_PCH
(
module_idP
,
frameP
,
subframeP
);
/* This schedules Random-Access for legacy LTE and eMTC starting in subframeP */
schedule_RA
(
module_idP
,
frameP
,
subframeP
);
/* Copy previously scheduled UL resources (ULSCH + HARQ) */
copy_ulreq
(
module_idP
,
frameP
,
subframeP
);
/* This schedules SRS in subframeP */
schedule_SRS
(
module_idP
,
frameP
,
subframeP
);
/* This schedules ULSCH in subframeP (dci0) */
if
(
schedule_ulsch_p
!=
NULL
)
{
schedule_ulsch_p
(
module_idP
,
frameP
,
subframeP
);
}
else
{
LOG_E
(
MAC
,
" %s %d: schedule_ulsch_p is NULL, function not called
\n
"
,
__FILE__
,
__LINE__
);
}
/* This schedules UCI_SR in subframeP */
schedule_SR
(
module_idP
,
frameP
,
subframeP
);
/* This schedules UCI_CSI in subframeP */
schedule_CSI
(
module_idP
,
frameP
,
subframeP
);
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
/* This schedules DLSCH in subframeP for BR UE*/
schedule_ue_spec_br
(
module_idP
,
frameP
,
subframeP
);
#endif
/* This schedules DLSCH in subframeP */
if
(
schedule_ue_spec_p
!=
NULL
)
{
schedule_ue_spec_p
(
module_idP
,
frameP
,
subframeP
,
mbsfn_status
);
}
else
{
LOG_E
(
MAC
,
" %s %d: schedule_ue_spec_p is NULL, function not called
\n
"
,
__FILE__
,
__LINE__
);
}
}
else
{
schedule_ulsch_phy_test
(
module_idP
,
frameP
,
subframeP
);
schedule_ue_spec_phy_test
(
module_idP
,
frameP
,
subframeP
,
mbsfn_status
);
}
/* Allocate CCEs for good after scheduling is done */
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
is_UL_sf
(
&
cc
[
CC_id
],
subframeP
)
<
0
)
{
LOG_E
(
MAC
,
"is_UL_sf(&cc[%d],subframeP) < 0
\n
"
,
CC_id
);
return
;
}
if
(
cc
[
CC_id
].
tdd_Config
==
NULL
||
!
(
is_UL_sf
(
&
cc
[
CC_id
],
subframeP
)))
{
if
(
allocate_CCEs
(
module_idP
,
CC_id
,
frameP
,
subframeP
,
2
)
==
-
1
)
{
LOG_E
(
MAC
,
"allocate_CCEs failed.
\n
"
);
return
;
}
}
}
if
(
flexran_agent_get_mac_xface
(
module_idP
)
&&
subframeP
==
9
)
{
flexran_agent_slice_update
(
module_idP
);
}
stop_meas
(
&
(
eNB
->
eNB_scheduler
));
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ULSCH_SCHEDULER
,
VCD_FUNCTION_OUT
);
}
void
update_ue_timers
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
)
{
protocol_ctxt_t
ctxt
;
rnti_t
rnti
=
0
;
int
CC_id
=
0
;
int
UE_id
=
-
1
;
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
UE_list_t
*
UE_list
=
&
(
eNB
->
UE_list
);
UE_sched_ctrl_t
*
UE_scheduling_control
=
NULL
;
uint8_t
volte_ul_cycle
[
MAX_NUM_CCs
];
uint8_t
volte_ul_buffersize
[
MAX_NUM_CCs
];
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
volte_ul_cycle
[
CC_id
]
=
eNB
->
volte_ul_cycle
[
CC_id
];
if
(
volte_ul_cycle
[
CC_id
]
!=
0
){
volte_ul_buffersize
[
CC_id
]
=
(
RC
.
rrc
[
module_idP
]
->
configuration
.
radioresourceconfig
[
CC_id
].
volte_ul_buffersize
*
(
volte_ul_cycle
[
CC_id
]
/
20
)
);
...
...
@@ -913,43 +1038,12 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP,
UE_scheduling_control
->
ul_inactivity_timer
++
;
UE_scheduling_control
->
cqi_req_timer
++
;
// various timers update
struct
rrc_eNB_ue_context_s
*
ue_context_p
=
NULL
;
RB_FOREACH
(
ue_context_p
,
rrc_ue_tree_s
,
&
(
RC
.
rrc
[
module_idP
]
->
rrc_ue_head
))
{
if
(
ue_context_p
->
ue_context
.
ul_failure_timer
>
0
)
{
ue_context_p
->
ue_context
.
ul_failure_timer
++
;
}
if
(
ue_context_p
->
ue_context
.
ue_release_timer_s1
>
0
)
{
ue_context_p
->
ue_context
.
ue_release_timer_s1
++
;
}
if
(
ue_context_p
->
ue_context
.
ue_release_timer_rrc
>
0
)
{
ue_context_p
->
ue_context
.
ue_release_timer_rrc
++
;
}
if
((
ue_context_p
->
ue_context
.
ue_rrc_inactivity_timer
>
0
)
&&
(
RC
.
rrc
[
module_idP
]
->
configuration
.
rrc_inactivity_timer_thres
>
0
))
{
ue_context_p
->
ue_context
.
ue_rrc_inactivity_timer
++
;
}
if
(
ue_context_p
->
ue_context
.
ue_reestablishment_timer
>
0
)
{
ue_context_p
->
ue_context
.
ue_reestablishment_timer
++
;
}
if
(
ue_context_p
->
ue_context
.
ue_release_timer
>
0
)
{
ue_context_p
->
ue_context
.
ue_release_timer
++
;
}
}
// end RB_FOREACH
LOG_D
(
MAC
,
"UE %d/%x : ul_inactivity %d, cqi_req %d
\n
"
,
UE_id
,
rnti
,
UE_scheduling_control
->
ul_inactivity_timer
,
UE_scheduling_control
->
cqi_req_timer
);
check_ul_failure
(
module_idP
,
CC_id
,
UE_id
,
frameP
,
subframeP
);
if
(
UE_scheduling_control
->
ue_reestablishment_reject_timer
>
0
)
{
UE_scheduling_control
->
ue_reestablishment_reject_timer
++
;
...
...
@@ -1033,113 +1127,37 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP,
}
// end if UE active
}
// end for loop on UE_id
#if (!defined(PRE_SCD_THREAD))
PROTOCOL_CTXT_SET_BY_MODULE_ID
(
&
ctxt
,
module_idP
,
ENB_FLAG_YES
,
NOT_A_RNTI
,
frameP
,
subframeP
,
module_idP
);
pdcp_run
(
&
ctxt
)
;
rrc_rx_tx
(
&
ctxt
,
CC_id
);
// various timers update
struct
rrc_eNB_ue_context_s
*
ue_context_p
=
NULL
;
RB_FOREACH
(
ue_context_p
,
rrc_ue_tree_s
,
&
(
RC
.
rrc
[
module_idP
]
->
rrc_ue_head
))
{
#endif
#if (LTE_RRC_VERSION >= MAKE_VERSION(10, 0, 0))
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
cc
[
CC_id
].
MBMS_flag
>
0
)
{
start_meas
(
&
RC
.
mac
[
module_idP
]
->
schedule_mch
);
mbsfn_status
[
CC_id
]
=
schedule_MBMS
(
module_idP
,
CC_id
,
frameP
,
subframeP
);
if
(
mbsfn_status
[
CC_id
]
<
0
)
{
LOG_E
(
MAC
,
"mbsfn_status[%d] < 0
\n
"
,
CC_id
);
return
;
}
stop_meas
(
&
RC
.
mac
[
module_idP
]
->
schedule_mch
);
}
if
(
ue_context_p
->
ue_context
.
ul_failure_timer
>
0
)
{
ue_context_p
->
ue_context
.
ul_failure_timer
++
;
}
#endif
static
int
debug_flag
=
0
;
void
(
*
schedule_ulsch_p
)(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframe
)
=
NULL
;
void
(
*
schedule_ue_spec_p
)(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframe
,
int
*
mbsfn_flag
)
=
NULL
;
if
(
eNB
->
scheduler_mode
==
SCHED_MODE_DEFAULT
)
{
schedule_ulsch_p
=
schedule_ulsch
;
schedule_ue_spec_p
=
schedule_dlsch
;
}
else
if
(
eNB
->
scheduler_mode
==
SCHED_MODE_FAIR_RR
)
{
memset
(
dlsch_ue_select
,
0
,
sizeof
(
dlsch_ue_select
));
schedule_ulsch_p
=
schedule_ulsch_fairRR
;
schedule_ue_spec_p
=
schedule_ue_spec_fairRR
;
if
(
ue_context_p
->
ue_context
.
ue_release_timer_s1
>
0
)
{
ue_context_p
->
ue_context
.
ue_release_timer_s1
++
;
}
if
(
debug_flag
==
0
){
LOG_E
(
MAC
,
"SCHED_MODE = %d
\n
"
,
eNB
->
scheduler_mode
);
debug_flag
=
1
;
}
/* This schedules MIB */
if
((
subframeP
==
0
)
&&
(
frameP
&
3
)
==
0
)
schedule_mib
(
module_idP
,
frameP
,
subframeP
);
if
(
get_softmodem_params
()
->
phy_test
==
0
)
{
/* This schedules SI for legacy LTE and eMTC starting in subframeP */
schedule_SI
(
module_idP
,
frameP
,
subframeP
);
/* This schedules Paging in subframeP */
schedule_PCH
(
module_idP
,
frameP
,
subframeP
);
/* This schedules Random-Access for legacy LTE and eMTC starting in subframeP */
schedule_RA
(
module_idP
,
frameP
,
subframeP
);
/* Copy previously scheduled UL resources (ULSCH + HARQ) */
copy_ulreq
(
module_idP
,
frameP
,
subframeP
);
/* This schedules SRS in subframeP */
schedule_SRS
(
module_idP
,
frameP
,
subframeP
);
/* This schedules ULSCH in subframeP (dci0) */
if
(
schedule_ulsch_p
!=
NULL
)
{
schedule_ulsch_p
(
module_idP
,
frameP
,
subframeP
);
}
else
{
LOG_E
(
MAC
,
" %s %d: schedule_ulsch_p is NULL, function not called
\n
"
,
__FILE__
,
__LINE__
);
if
(
ue_context_p
->
ue_context
.
ue_release_timer_rrc
>
0
)
{
ue_context_p
->
ue_context
.
ue_release_timer_rrc
++
;
}
/* This schedules UCI_SR in subframeP */
schedule_SR
(
module_idP
,
frameP
,
subframeP
);
/* This schedules UCI_CSI in subframeP */
schedule_CSI
(
module_idP
,
frameP
,
subframeP
);
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
/* This schedules DLSCH in subframeP for BR UE*/
schedule_ue_spec_br
(
module_idP
,
frameP
,
subframeP
);
#endif
/* This schedules DLSCH in subframeP */
if
(
schedule_ue_spec_p
!=
NULL
)
{
schedule_ue_spec_p
(
module_idP
,
frameP
,
subframeP
,
mbsfn_status
);
}
else
{
LOG_E
(
MAC
,
" %s %d: schedule_ue_spec_p is NULL, function not called
\n
"
,
__FILE__
,
__LINE__
);
if
((
ue_context_p
->
ue_context
.
ue_rrc_inactivity_timer
>
0
)
&&
(
RC
.
rrc
[
module_idP
]
->
configuration
.
rrc_inactivity_timer_thres
>
0
))
{
ue_context_p
->
ue_context
.
ue_rrc_inactivity_timer
++
;
}
}
else
{
schedule_ulsch_phy_test
(
module_idP
,
frameP
,
subframeP
);
schedule_ue_spec_phy_test
(
module_idP
,
frameP
,
subframeP
,
mbsfn_status
);
}
/* Allocate CCEs for good after scheduling is done */
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
is_UL_sf
(
&
cc
[
CC_id
],
subframeP
)
<
0
)
{
LOG_E
(
MAC
,
"is_UL_sf(&cc[%d],subframeP) < 0
\n
"
,
CC_id
);
return
;
}
if
(
cc
[
CC_id
].
tdd_Config
==
NULL
||
!
(
is_UL_sf
(
&
cc
[
CC_id
],
subframeP
)))
{
if
(
allocate_CCEs
(
module_idP
,
CC_id
,
frameP
,
subframeP
,
2
)
==
-
1
)
{
LOG_E
(
MAC
,
"allocate_CCEs failed.
\n
"
);
return
;
}
}
if
(
ue_context_p
->
ue_context
.
ue_reestablishment_timer
>
0
)
{
ue_context_p
->
ue_context
.
ue_reestablishment_timer
++
;
}
if
(
flexran_agent_get_mac_xface
(
module_idP
)
&&
subframeP
==
9
)
{
flexran_agent_slice_update
(
module_idP
)
;
if
(
ue_context_p
->
ue_context
.
ue_release_timer
>
0
)
{
ue_context_p
->
ue_context
.
ue_release_timer
++
;
}
}
// end RB_FOREACH
stop_meas
(
&
(
eNB
->
eNB_scheduler
));
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ULSCH_SCHEDULER
,
VCD_FUNCTION_OUT
);
select_ul_ue_candidate
(
module_idP
,
frameP
,
subframeP
);
select_dl_ue_candidate
(
module_idP
,
frameP
,
subframeP
);
}
openair2/LAYER2/MAC/eNB_scheduler_fairRR.c
View file @
83e067dc
...
...
@@ -116,6 +116,7 @@ int dl_dtch_num;
UE_list_t
*
UE_list
=
&
(
eNB
->
UE_list
);
UE_sched_ctrl_t
*
UE_scheduling_control
=
NULL
;
int
dl_buffer
=
0
;
uint8_t
volte_dl_cycle
[
MAX_NUM_CCs
];
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
if
(
pre_scd_activeUE
[
UE_id
]
!=
TRUE
)
...
...
@@ -133,8 +134,12 @@ int dl_dtch_num;
continue
;
for
(
lc_id
=
DCCH
;
lc_id
<
MAX_NUM_LCID
;
lc_id
++
)
{
/* VoLTE Scheduling timing cal */
if
(
UE_scheduling_control
->
volte_configured
==
TRUE
)
{
volte_dl_cycle
[
CC_id
]
=
eNB
->
volte_dl_cycle
[
CC_id
];
if
(
lc_id
==
UE_scheduling_control
->
volte_lcid
){
if
(
UE_scheduling_control
->
dl_periodic_timer
<
volte_dl_cycle
[
CC_id
]
&&
UE_scheduling_control
->
dl_volte_ue_select_flag
==
FALSE
){
/* VoLTE no Scheduling */
continue
;
}
}
...
...
@@ -165,6 +170,9 @@ int dl_dtch_num;
if
(
lc_id
>=
DTCH
)
{
dl_dtch_list
[
dl_dtch_num
]
=
lc_id
;
dl_dtch_num
++
;
if
(
lc_id
==
UE_scheduling_control
->
volte_lcid
){
UE_scheduling_control
->
dl_volte_ue_select_flag
=
TRUE
;
/* VoLTE UE select flag set */
}
}
}
}
...
...
@@ -181,88 +189,6 @@ int dl_dtch_num;
}
}
void
dlsch_scheduler_nb_rbs_required_lcid
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
)
{
int
CC_id
=
0
,
UE_id
,
lc_id
,
drb_id
;
rnti_t
rnti
;
mac_rlc_status_resp_t
rlc_status
;
rrc_eNB_ue_context_t
*
ue_contextP
=
NULL
;
int
header_length_last
;
int
header_length_total
;
int
dl_dtch_num
;
int
dl_dtch_list
[
MAX_NUM_DTCH
];
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
UE_list_t
*
UE_list
=
&
(
eNB
->
UE_list
);
UE_sched_ctrl_t
*
UE_scheduling_control
=
NULL
;
uint8_t
volte_dl_cycle
[
MAX_NUM_CCs
];
int
dl_buffer
;
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
if
(
pre_scd_activeUE
[
UE_id
]
!=
TRUE
){
continue
;
}
/* VoLTE Scheduling timing cal */
UE_scheduling_control
=
&
(
UE_list
->
UE_sched_ctrl
[
UE_id
]);
lc_id
=
UE_scheduling_control
->
volte_lcid
;
if
(
UE_scheduling_control
->
volte_configured
==
TRUE
)
{
/* VoLTE Check */
volte_dl_cycle
[
CC_id
]
=
eNB
->
volte_dl_cycle
[
CC_id
];
if
(
UE_scheduling_control
->
dl_periodic_timer
<
volte_dl_cycle
[
CC_id
]
&&
UE_scheduling_control
->
dl_volte_ue_select_flag
==
FALSE
){
/* VoLTE no Scheduling */
continue
;
}
else
{
/* VoLTE Scheduling */
UE_scheduling_control
->
dl_periodic_timer
=
0
;
}
}
else
{
continue
;
/* volte_configured = FALSE */
}
header_length_total
=
0
;
dl_dtch_num
=
0
;
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
ue_contextP
=
rrc_eNB_get_ue_context
(
RC
.
rrc
[
module_idP
],
rnti
);
if
(
ue_contextP
==
NULL
){
continue
;
}
drb_id
=
lc_id
-
2
;
if
(
ue_contextP
->
ue_context
.
DRB_active
[
drb_id
]
==
0
)
{
continue
;
}
rlc_status
=
mac_rlc_status_ind
(
module_idP
,
rnti
,
module_idP
,
frameP
,
subframeP
,
ENB_FLAG_YES
,
MBMS_FLAG_NO
,
lc_id
,
0
#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0))
,
0
,
0
#endif
);
dl_buffer
=
dl_buffer_total
[
CC_id
][
UE_id
];
dl_buffer
+=
rlc_status
.
bytes_in_buffer
;
//storing the total dlsch buffer
if
(
rlc_status
.
bytes_in_buffer
>
0
){
header_length_last
=
1
+
1
+
(
rlc_status
.
bytes_in_buffer
>=
128
);
header_length_total
+=
header_length_last
;
dl_dtch_list
[
dl_dtch_num
]
=
lc_id
;
dl_dtch_num
++
;
UE_scheduling_control
->
dl_volte_ue_select_flag
=
TRUE
;
/* VoLTE UE select flag set */
}
sort_lcid_priority
(
module_idP
,
UE_id
,
dl_dtch_num
,
&
dl_dtch_list
[
0
]);
if
(
header_length_total
)
{
header_length_total
-=
header_length_last
;
header_length_total
++
;
}
dl_buffer_total
[
CC_id
][
UE_id
]
=
dl_buffer
+
header_length_total
;
}
}
#endif
int
cc_id_end
(
uint8_t
*
cc_id_flag
)
{
...
...
@@ -278,209 +204,103 @@ int cc_id_end(uint8_t *cc_id_flag ) {
return
end_flag
;
}
void
dlsch_scheduler_pre_ue_select_fairRR
(
#define MAX_UE_MULTIPLEX 16
int16_t
dl_ue_candidate
[
MAX_NUM_CCs
][
MAX_UE_MULTIPLEX
];
int16_t
dl_ue_candidate_volte
[
MAX_NUM_CCs
][
MAX_UE_MULTIPLEX
];
int16_t
ul_ue_candidate
[
MAX_UE_MULTIPLEX
];
int16_t
ul_ue_candidate_volte
[
MAX_UE_MULTIPLEX
];
void
select_dl_ue_candidate
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
,
int
*
mbsfn_flag
,
uint16_t
nb_rbs_required
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
DLSCH_UE_SELECT
dlsch_ue_select
[
MAX_NUM_CCs
])
{
sub_frame_t
subframeP
){
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
COMMON_channels_t
*
cc
=
eNB
->
common_channels
;
UE_list_t
*
UE_list
=
&
eNB
->
UE_list
;
UE_sched_ctrl_t
*
ue_sched_ctl
;
uint8_t
CC_id
;
int
UE_id
;
int
last_UE_id
[
MAX_NUM_CCs
];
unsigned
char
harq_pid
=
0
;
rnti_t
rnti
;
uint16_t
i
;
unsigned
char
aggregation
;
int
format_flag
;
nfapi_dl_config_request_body_t
*
DL_req
;
nfapi_dl_config_request_pdu_t
*
dl_config_pdu
;
uint16_t
dlsch_ue_max_num
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
dlsch_ue_max_num_volte
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
dlsch_ue_max_num_normal
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
dlsch_ue_num_volte
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
saved_dlsch_dci
[
MAX_NUM_CCs
]
=
{
0
};
uint8_t
end_flag
[
MAX_NUM_CCs
]
=
{
0
};
uint8_t
dlsch_ue_max_flag
[
MAX_NUM_CCs
]
=
{
0
};
unsigned
char
round_1
=
0
;
unsigned
char
round_2
=
0
;
UE_sched_ctrl_t
*
UE_scheduling_control
=
NULL
;
// Initialization
uint16_t
dlsch_ue_max_num
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
dlsch_ue_max_num_volte
[
MAX_NUM_CCs
]
=
{
0
};
int
index
=
0
,
i
;
int
UE_id_idx
;
int
tdd_sfa
;
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
dlsch_ue_max_num
[
CC_id
]
=
(
uint16_t
)
RC
.
rrc
[
module_idP
]
->
configuration
.
radioresourceconfig
[
CC_id
].
ue_multiple_max
;
if
(
eNB
->
volte_dl_cycle
[
CC_id
]
!=
0
){
dlsch_ue_max_num_volte
[
CC_id
]
=
(
uint16_t
)(
dlsch_ue_max_num
[
CC_id
]
/
2
);
dlsch_ue_max_num_normal
[
CC_id
]
=
dlsch_ue_max_num
[
CC_id
]
-
dlsch_ue_max_num_volte
[
CC_id
];
}
else
{
dlsch_ue_max_num_normal
[
CC_id
]
=
dlsch_ue_max_num
[
CC_id
];
for
(
i
=
0
;
i
<
MAX_UE_MULTIPLEX
;
i
++
){
dl_ue_candidate
[
CC_id
][
i
]
=-
1
;
dl_ue_candidate_volte
[
CC_id
][
i
]
=-
1
;
}
// tmp last_dlsch_ue_id_volte
last_UE_id
[
CC_id
]
=
last_dlsch_ue_id_volte
[
CC_id
];
// save origin DL PDU number
DL_req
=
&
eNB
->
DL_req
[
CC_id
].
dl_config_request_body
;
saved_dlsch_dci
[
CC_id
]
=
DL_req
->
number_pdu
;
}
// for TDD: check that we have to act here, otherwise return
if
(
cc
[
0
].
tdd_Config
)
{
tdd_sfa
=
cc
[
0
].
tdd_Config
->
subframeAssignment
;
// Insert DLSCH(retransmission) UE into selected UE list
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
mbsfn_flag
[
CC_id
]
>
0
)
{
continue
;
}
DL_req
=
&
eNB
->
DL_req
[
CC_id
].
dl_config_request_body
;
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
if
(
UE_list
->
active
[
UE_id
]
==
FALSE
)
{
continue
;
}
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
if
(
rnti
==
NOT_A_RNTI
)
{
continue
;
}
switch
(
subframeP
)
{
case
0
:
// always continue
break
;
if
(
mac_eNB_get_rrc_status
(
module_idP
,
rnti
)
<
RRC_CONNECTED
)
{
continue
;
}
case
1
:
return
;
break
;
if
(
dlsch_ue_select
[
CC_id
].
ue_num
==
dlsch_ue_max_num_normal
[
CC_id
])
{
case
2
:
return
;
break
;
}
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
harq_pid
=
frame_subframe2_dl_harq_pid
(
cc
[
CC_id
].
tdd_Config
,
frameP
,
subframeP
);
round_1
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB1
];
round_2
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB2
];
case
3
:
if
((
tdd_sfa
!=
2
)
&&
(
tdd_sfa
!=
5
))
return
;
if
((
round_1
!=
8
)
||
(
round_2
!=
8
))
{
// retransmission
if
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
nb_rb
[
harq_pid
]
==
0
)
{
continue
;
}
break
;
switch
(
get_tmode
(
module_idP
,
CC_id
,
UE_id
))
{
case
0
:
LOG_E
(
MAC
,
"get_tmode failed
\n
"
);
return
;
case
1
:
case
2
:
case
7
:
if
(
get_bw_index
(
module_idP
,
CC_id
)
==
-
1
)
{
LOG_E
(
MAC
,
"get_bw_index failed
\n
"
);
return
;
}
aggregation
=
get_aggregation
(
get_bw_index
(
module_idP
,
CC_id
),
ue_sched_ctl
->
dl_cqi
[
CC_id
],
format1
);
break
;
case
4
:
if
((
tdd_sfa
!=
1
)
&&
(
tdd_sfa
!=
2
)
&&
(
tdd_sfa
!=
4
)
&&
(
tdd_sfa
!=
5
))
return
;
case
3
:
if
(
get_bw_index
(
module_idP
,
CC_id
)
==
-
1
)
{
LOG_E
(
MAC
,
"get_bw_index failed
\n
"
);
return
;
}
aggregation
=
get_aggregation
(
get_bw_index
(
module_idP
,
CC_id
),
ue_sched_ctl
->
dl_cqi
[
CC_id
],
format2A
);
break
;
break
;
default:
LOG_W
(
MAC
,
"Unsupported transmission mode %d
\n
"
,
get_tmode
(
module_idP
,
CC_id
,
UE_id
));
aggregation
=
2
;
break
;
}
if
(
UE_list
->
eNB_UE_stats
[
CC_id
][
UE_id
].
rrc_status
==
RRC_HO_EXECUTION
)
{
aggregation
=
4
;
}
case
5
:
break
;
format_flag
=
1
;
case
6
:
case
7
:
if
((
tdd_sfa
!=
3
)
&&
(
tdd_sfa
!=
4
)
&&
(
tdd_sfa
!=
5
))
return
;
if
(
!
CCE_allocation_infeasible
(
module_idP
,
CC_id
,
format_flag
,
subframeP
,
aggregation
,
rnti
))
{
dl_config_pdu
=
&
DL_req
->
dl_config_pdu_list
[
DL_req
->
number_pdu
];
dl_config_pdu
->
pdu_type
=
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
;
dl_config_pdu
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
=
rnti
;
dl_config_pdu
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti_type
=
(
format_flag
==
0
)
?
2
:
1
;
dl_config_pdu
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
=
aggregation
;
DL_req
->
number_pdu
++
;
nb_rbs_required
[
CC_id
][
UE_id
]
=
UE_list
->
UE_template
[
CC_id
][
UE_id
].
nb_rb
[
harq_pid
];
// Insert DLSCH(retransmission) UE into selected UE list
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
UE_id
=
UE_id
;
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
ue_priority
=
SCH_DL_RETRANS
;
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
rnti
=
rnti
;
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
nb_rb
=
nb_rbs_required
[
CC_id
][
UE_id
];
dlsch_ue_select
[
CC_id
].
ue_num
++
;
break
;
if
(
round_1
!=
8
)
{
ue_sched_ctl
->
select_tb
[
CC_id
][
harq_pid
]
=
TB1
;
}
else
if
(
round_2
!=
8
){
ue_sched_ctl
->
select_tb
[
CC_id
][
harq_pid
]
=
TB2
;
}
case
8
:
if
((
tdd_sfa
!=
2
)
&&
(
tdd_sfa
!=
3
)
&&
(
tdd_sfa
!=
4
)
&&
(
tdd_sfa
!=
5
))
return
;
if
(
dlsch_ue_select
[
CC_id
].
ue_num
==
dlsch_ue_max_num
[
CC_id
])
{
end_flag
[
CC_id
]
=
1
;
break
;
}
}
else
{
// if (cc[CC_id].tdd_Config != NULL) { //TDD
// set_ue_dai (subframeP,
// UE_id,
// CC_id,
// cc[CC_id].tdd_Config->subframeAssignment,
// UE_list);
// // update UL DAI after DLSCH scheduling
// set_ul_DAI(module_idP,UE_id,CC_id,frameP,subframeP);
// }
break
;
add_ue_dlsch_info
(
module_idP
,
CC_id
,
UE_id
,
subframeP
,
S_DL_NONE
,
rnti
);
end_flag
[
CC_id
]
=
1
;
break
;
}
}
}
}
case
9
:
if
(
tdd_sfa
==
0
)
return
;
if
(
cc_id_end
(
end_flag
)
==
1
)
{
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
DL_req
=
&
eNB
->
DL_req
[
CC_id
].
dl_config_request_body
;
DL_req
->
number_pdu
=
saved_dlsch_dci
[
CC_id
];
break
;
}
return
;
}
// Insert DLSCH(VoLTE) UE into selected UE list
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
mbsfn_flag
[
CC_id
]
>
0
)
{
continue
;
}
DL_req
=
&
eNB
->
DL_req
[
CC_id
].
dl_config_request_body
;
// (UE_id > last_dlsch_ue_id[CC_id])
for
(
UE_id
=
(
last_dlsch_ue_id_volte
[
CC_id
]
+
1
);
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
if
(
end_flag
[
CC_id
]
==
1
)
{
break
;
}
if
(
dlsch_ue_max_flag
[
CC_id
]
==
1
){
break
;
}
// 1loop last_dlsch_ue_id_volte+1 -> last_dlsch_ue_id_volte
for
(
UE_id_idx
=
0
;
UE_id_idx
<
NUMBER_OF_UE_MAX
;
UE_id_idx
++
)
{
UE_id
=
(
last_dlsch_ue_id_volte
[
CC_id
]
+
1
+
UE_id_idx
)
%
NUMBER_OF_UE_MAX
;
if
(
UE_list
->
active
[
UE_id
]
==
FALSE
)
{
continue
;
...
...
@@ -506,109 +326,340 @@ void dlsch_scheduler_pre_ue_select_fairRR(
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
for
(
i
=
0
;
i
<
dlsch_ue_select
[
CC_id
].
ue_num
;
i
++
)
{
if
(
dlsch_ue_select
[
CC_id
].
list
[
i
].
UE_id
==
UE_id
)
{
break
;
}
}
if
(
i
<
dlsch_ue_select
[
CC_id
].
ue_num
)
continue
;
harq_pid
=
frame_subframe2_dl_harq_pid
(
cc
[
CC_id
].
tdd_Config
,
frameP
,
subframeP
);
round_1
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB1
];
round_2
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB2
];
if
((
round_1
==
8
)
&&
(
round_2
==
8
)){
if
(
nb_rbs_required
[
CC_id
][
UE_id
]
==
0
)
{
if
(
dl_buffer_total
[
CC_id
][
UE_id
]
==
0
)
{
continue
;
}
dl_ue_candidate_volte
[
CC_id
][
index
]
=
UE_id
;
index
++
;
if
(
index
==
dlsch_ue_max_num_volte
[
CC_id
])
break
;
}
}
}
// CC_id
index
=
0
;
// Insert DLSCH(first transmission) UE into selected UE list (UE_id > last_dlsch_ue_id[CC_id])
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
switch
(
get_tmode
(
module_idP
,
CC_id
,
UE_id
))
{
case
1
:
case
2
:
case
7
:
aggregation
=
get_aggregation
(
get_bw_index
(
module_idP
,
CC_id
),
ue_sched_ctl
->
dl_cqi
[
CC_id
],
format1
);
break
;
for
(
UE_id_idx
=
0
;
UE_id_idx
<
NUMBER_OF_UE_MAX
;
UE_id_idx
++
)
{
UE_id
=
(
last_dlsch_ue_id
[
CC_id
]
+
1
+
UE_id_idx
)
%
NUMBER_OF_UE_MAX
;
case
3
:
aggregation
=
get_aggregation
(
get_bw_index
(
module_idP
,
CC_id
),
ue_sched_ctl
->
dl_cqi
[
CC_id
],
format2A
);
break
;
if
(
UE_list
->
active
[
UE_id
]
==
FALSE
)
{
continue
;
}
default:
LOG_W
(
MAC
,
"Unsupported transmission mode %d
\n
"
,
get_tmode
(
module_idP
,
CC_id
,
UE_id
));
aggregation
=
2
;
break
;
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
if
(
rnti
==
NOT_A_RNTI
){
continue
;
}
if
(
mac_eNB_get_rrc_status
(
module_idP
,
rnti
)
<
RRC_CONNECTED
)
{
continue
;
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
harq_pid
=
frame_subframe2_dl_harq_pid
(
cc
[
CC_id
].
tdd_Config
,
frameP
,
subframeP
);
round_1
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB1
];
round_2
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB2
];
if
((
round_1
==
8
)
&&
(
round_2
==
8
)){
if
(
dl_buffer_total
[
CC_id
][
UE_id
]
==
0
)
{
continue
;
}
dl_ue_candidate
[
CC_id
][
index
]
=
UE_id
;
index
++
;
if
(
ue_sched_ctl
->
dl_volte_ue_select_flag
==
TRUE
){
ue_sched_ctl
->
dl_volte_ue_select_flag
=
FALSE
;
/* VoLTE UE select flag set -> init */
}
if
(
index
==
dlsch_ue_max_num
[
CC_id
])
break
;
}
}
}
}
format_flag
=
1
;
void
select_ul_ue_candidate
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
){
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
COMMON_channels_t
*
cc
=
&
eNB
->
common_channels
[
0
];
int
CC_id
,
UE_id
;
UE_list_t
*
UE_list
=
&
eNB
->
UE_list
;
UE_sched_ctrl_t
*
UE_sched_ctl
=
NULL
;
int
ret
;
uint16_t
i
;
int
index
=
0
;
int
index_volte
=
0
;
uint8_t
volte_lcg
;
int
UE_id_idx
;
rnti_t
rnti
;
uint8_t
ulsch_ue_max_num
[
MAX_NUM_CCs
];
uint8_t
cc_id_flag
[
MAX_NUM_CCs
]
=
{
0
};
int
rrc_status
;
int
tdd_sfa
;
int
sched_frame
=
frameP
;
int
sched_subframe
=
(
subframeP
+
4
)
%
10
;
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
ulsch_ue_max_num
[
CC_id
]
=
RC
.
rrc
[
module_idP
]
->
configuration
.
radioresourceconfig
[
CC_id
].
ue_multiple_max
;
}
for
(
i
=
0
;
i
<
MAX_UE_MULTIPLEX
;
i
++
){
ul_ue_candidate
[
i
]
=-
1
;
ul_ue_candidate_volte
[
i
]
=-
1
;
}
// for TDD: check subframes where we have to act and return if nothing should be done now
if
(
cc
->
tdd_Config
)
{
tdd_sfa
=
cc
->
tdd_Config
->
subframeAssignment
;
if
(
!
CCE_allocation_infeasible
(
module_idP
,
CC_id
,
format_flag
,
subframeP
,
aggregation
,
rnti
))
{
dl_config_pdu
=
&
DL_req
->
dl_config_pdu_list
[
DL_req
->
number_pdu
];
dl_config_pdu
->
pdu_type
=
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
;
dl_config_pdu
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
=
rnti
;
dl_config_pdu
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti_type
=
(
format_flag
==
0
)
?
2
:
1
;
dl_config_pdu
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
=
aggregation
;
DL_req
->
number_pdu
++
;
// Insert DLSCH(first transmission) UE into selected selected UE list
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
ue_priority
=
SCH_DL_FIRST
;
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
nb_rb
=
nb_rbs_required
[
CC_id
][
UE_id
];
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
UE_id
=
UE_id
;
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
rnti
=
rnti
;
dlsch_ue_select
[
CC_id
].
ue_num
++
;
switch
(
subframeP
)
{
case
0
:
if
((
tdd_sfa
==
0
)
||
(
tdd_sfa
==
3
))
sched_subframe
=
4
;
else
if
(
tdd_sfa
==
6
)
sched_subframe
=
7
;
else
return
;
dlsch_ue_num_volte
[
CC_id
]
++
;
break
;
ue_sched_ctl
->
select_tb
[
CC_id
][
harq_pid
]
=
TB1
;
last_UE_id
[
CC_id
]
=
UE_id
;
case
1
:
if
((
tdd_sfa
==
0
)
||
(
tdd_sfa
==
1
))
sched_subframe
=
7
;
else
if
(
tdd_sfa
==
6
)
sched_subframe
=
8
;
else
return
;
if
(
dlsch_ue_num_volte
[
CC_id
]
==
dlsch_ue_max_num_volte
[
CC_id
])
{
dlsch_ue_max_flag
[
CC_id
]
=
1
;
break
;
}
break
;
case
2
:
// Don't schedule UL in subframe 2 for TDD
return
;
case
3
:
if
(
tdd_sfa
==
2
)
sched_subframe
=
7
;
else
return
;
break
;
case
4
:
if
(
tdd_sfa
==
1
)
sched_subframe
=
8
;
else
return
;
break
;
case
5
:
if
(
tdd_sfa
==
0
)
sched_subframe
=
9
;
else
if
(
tdd_sfa
==
6
)
sched_subframe
=
2
;
else
return
;
break
;
case
6
:
if
(
tdd_sfa
==
0
||
tdd_sfa
==
1
)
sched_subframe
=
2
;
else
if
(
tdd_sfa
==
6
)
sched_subframe
=
3
;
else
return
;
break
;
case
7
:
return
;
case
8
:
if
((
tdd_sfa
>=
2
)
&&
(
tdd_sfa
<=
5
))
sched_subframe
=
2
;
else
return
;
break
;
case
9
:
if
((
tdd_sfa
==
1
)
||
(
tdd_sfa
==
3
)
||
(
tdd_sfa
==
4
))
sched_subframe
=
3
;
else
if
(
tdd_sfa
==
6
)
sched_subframe
=
4
;
else
return
;
break
;
}
}
if
(
sched_subframe
<
subframeP
)
sched_frame
++
;
// Preparation of VoLTE UE
for
(
UE_id_idx
=
0
;
UE_id_idx
<
NUMBER_OF_UE_MAX
;
UE_id_idx
++
)
{
UE_id
=
(
UE_id_idx
+
last_ulsch_ue_id_volte
[
CC_id
]
+
1
)
%
NUMBER_OF_UE_MAX
;
if
(
UE_list
->
active
[
UE_id
]
==
FALSE
)
continue
;
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
if
(
rnti
==
NOT_A_RNTI
)
continue
;
CC_id
=
UE_PCCID
(
module_idP
,
UE_id
);
if
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
configured
==
FALSE
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
(
index_volte
>=
ulsch_ue_max_num
[
CC_id
])
||
(
cc_id_flag
[
CC_id
]
==
1
)
)
{
cc_id_flag
[
CC_id
]
=
1
;
ret
=
cc_id_end
(
cc_id_flag
);
if
(
ret
==
0
)
{
continue
;
}
if
(
ret
==
1
)
{
return
;
}
}
int
bytes_to_schedule
=
UE_list
->
UE_template
[
CC_id
][
UE_id
].
estimated_ul_buffer
-
UE_list
->
UE_template
[
CC_id
][
UE_id
].
scheduled_ul_bytes
;
if
(
bytes_to_schedule
<
0
)
{
bytes_to_schedule
=
0
;
UE_list
->
UE_template
[
CC_id
][
UE_id
].
scheduled_ul_bytes
=
0
;
}
UE_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
if
((
UE_sched_ctl
->
volte_configured
==
TRUE
)
&&
(
index_volte
<
ulsch_ue_max_num
[
CC_id
])
&&
(
UE_sched_ctl
->
ul_periodic_timer_exp_flag
==
TRUE
)
)
{
volte_lcg
=
UE_sched_ctl
->
volte_lcg
;
if
((
UE_list
->
UE_template
[
CC_id
][
UE_id
].
ul_buffer_info
[
volte_lcg
]
>
0
)
&&
(
bytes_to_schedule
>
0
))
{
ul_ue_candidate_volte
[
index_volte
]
=
UE_id
;
index_volte
++
;
continue
;
}
}
}
// Preparation of Newtx UE List
for
(
UE_id_idx
=
0
;
UE_id_idx
<
NUMBER_OF_UE_MAX
;
UE_id_idx
++
)
{
UE_id
=
(
UE_id_idx
+
last_ulsch_ue_id
[
CC_id
]
+
1
)
%
NUMBER_OF_UE_MAX
;
if
(
UE_list
->
active
[
UE_id
]
==
FALSE
)
continue
;
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
if
(
rnti
==
NOT_A_RNTI
)
continue
;
CC_id
=
UE_PCCID
(
module_idP
,
UE_id
);
if
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
configured
==
FALSE
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
(
index
>=
ulsch_ue_max_num
[
CC_id
])
||
(
cc_id_flag
[
CC_id
]
==
1
)
)
{
cc_id_flag
[
CC_id
]
=
1
;
ret
=
cc_id_end
(
cc_id_flag
);
if
(
ret
==
0
)
{
continue
;
}
if
(
ret
==
1
)
{
return
;
}
}
int
bytes_to_schedule
=
UE_list
->
UE_template
[
CC_id
][
UE_id
].
estimated_ul_buffer
-
UE_list
->
UE_template
[
CC_id
][
UE_id
].
scheduled_ul_bytes
;
if
(
bytes_to_schedule
<
0
)
{
bytes_to_schedule
=
0
;
UE_list
->
UE_template
[
CC_id
][
UE_id
].
scheduled_ul_bytes
=
0
;
}
if
(
(
index
+
index_volte
<
ulsch_ue_max_num
[
CC_id
])
)
{
if
(
bytes_to_schedule
>
0
)
{
ul_ue_candidate
[
index
]
=
UE_id
;
index
++
;
continue
;
}
if
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
ul_SR
>
0
)
{
ul_ue_candidate
[
index
]
=
UE_id
;
index
++
;
continue
;
}
rrc_status
=
mac_eNB_get_rrc_status
(
module_idP
,
rnti
);
UE_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
if
(
((
UE_sched_ctl
->
ul_inactivity_timer
>
64
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
))
||
((
UE_sched_ctl
->
ul_inactivity_timer
>
10
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
)
&&
(
rrc_status
<
RRC_CONNECTED
))
||
((
UE_sched_ctl
->
cqi_req_timer
>
64
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
)
&&
(
!
(
is_S_sf
(
&
eNB
->
common_channels
[
CC_id
],
subframeP
)))
&&
(
rrc_status
>=
RRC_CONNECTED
))
)
{
ul_ue_candidate
[
index
]
=
UE_id
;
index
++
;
continue
;
}
}
}
}
void
dlsch_scheduler_pre_ue_select_fairRR
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
,
int
*
mbsfn_flag
,
uint16_t
nb_rbs_required
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
DLSCH_UE_SELECT
dlsch_ue_select
[
MAX_NUM_CCs
])
{
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
COMMON_channels_t
*
cc
=
eNB
->
common_channels
;
UE_list_t
*
UE_list
=
&
eNB
->
UE_list
;
UE_sched_ctrl_t
*
ue_sched_ctl
;
uint8_t
CC_id
;
int
UE_id
;
int
last_UE_id
[
MAX_NUM_CCs
];
unsigned
char
harq_pid
=
0
;
rnti_t
rnti
;
uint16_t
i
;
unsigned
char
aggregation
;
int
format_flag
;
nfapi_dl_config_request_body_t
*
DL_req
;
nfapi_dl_config_request_pdu_t
*
dl_config_pdu
;
uint16_t
dlsch_ue_max_num
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
dlsch_ue_max_num_volte
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
dlsch_ue_max_num_normal
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
dlsch_ue_num_volte
[
MAX_NUM_CCs
]
=
{
0
};
uint16_t
saved_dlsch_dci
[
MAX_NUM_CCs
]
=
{
0
};
uint8_t
end_flag
[
MAX_NUM_CCs
]
=
{
0
};
uint8_t
dlsch_ue_max_flag
[
MAX_NUM_CCs
]
=
{
0
};
unsigned
char
round_1
=
0
;
unsigned
char
round_2
=
0
;
int
index
;
// Initialization
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
dlsch_ue_max_num
[
CC_id
]
=
(
uint16_t
)
RC
.
rrc
[
module_idP
]
->
configuration
.
radioresourceconfig
[
CC_id
].
ue_multiple_max
;
if
(
eNB
->
volte_dl_cycle
[
CC_id
]
!=
0
){
dlsch_ue_max_num_volte
[
CC_id
]
=
(
uint16_t
)(
dlsch_ue_max_num
[
CC_id
]
/
2
);
dlsch_ue_max_num_normal
[
CC_id
]
=
dlsch_ue_max_num
[
CC_id
]
-
dlsch_ue_max_num_volte
[
CC_id
];
}
else
{
dlsch_ue_max_num_normal
[
CC_id
]
=
dlsch_ue_max_num
[
CC_id
];
}
// tmp last_dlsch_ue_id_volte
last_UE_id
[
CC_id
]
=
last_dlsch_ue_id_volte
[
CC_id
];
// save origin DL PDU number
DL_req
=
&
eNB
->
DL_req
[
CC_id
].
dl_config_request_body
;
saved_dlsch_dci
[
CC_id
]
=
DL_req
->
number_pdu
;
}
if
(
dlsch_ue_select
[
CC_id
].
ue_num
==
dlsch_ue_max_num
[
CC_id
])
{
end_flag
[
CC_id
]
=
1
;
break
;
}
}
else
{
add_ue_dlsch_info
(
module_idP
,
CC_id
,
UE_id
,
subframeP
,
S_DL_NONE
,
rnti
);
end_flag
[
CC_id
]
=
1
;
break
;
}
}
// Insert DLSCH(retransmission) UE into selected UE list
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
mbsfn_flag
[
CC_id
]
>
0
)
{
continue
;
}
// (UE_id <= last_dlsch_ue_id[CC_id])
for
(
UE_id
=
0
;
UE_id
<=
last_dlsch_ue_id_volte
[
CC_id
];
UE_id
++
)
{
if
(
end_flag
[
CC_id
]
==
1
)
{
break
;
}
if
(
dlsch_ue_max_flag
[
CC_id
]
==
1
){
break
;
}
DL_req
=
&
eNB
->
DL_req
[
CC_id
].
dl_config_request_body
;
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
if
(
UE_list
->
active
[
UE_id
]
==
FALSE
)
{
continue
;
}
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
if
(
rnti
==
NOT_A_RNTI
){
if
(
rnti
==
NOT_A_RNTI
)
{
continue
;
}
...
...
@@ -616,31 +667,19 @@ void dlsch_scheduler_pre_ue_select_fairRR(
continue
;
}
/* VoLTE Judge */
UE_scheduling_control
=
&
(
UE_list
->
UE_sched_ctrl
[
UE_id
]);
if
(
(
UE_scheduling_control
->
volte_configured
==
FALSE
)
||
/* VoLTE Check */
(
UE_scheduling_control
->
dl_volte_ue_select_flag
==
FALSE
)
){
/* VoLTE UE select flag check */
continue
;
if
(
dlsch_ue_select
[
CC_id
].
ue_num
==
dlsch_ue_max_num_normal
[
CC_id
])
{
break
;
}
UE_scheduling_control
->
dl_volte_ue_select_flag
=
FALSE
;
/* VoLTE UE select flag set -> init */
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
for
(
i
=
0
;
i
<
dlsch_ue_select
[
CC_id
].
ue_num
;
i
++
)
{
if
(
dlsch_ue_select
[
CC_id
].
list
[
i
].
UE_id
==
UE_id
)
{
break
;
}
}
if
(
i
<
dlsch_ue_select
[
CC_id
].
ue_num
)
continue
;
harq_pid
=
frame_subframe2_dl_harq_pid
(
cc
[
CC_id
].
tdd_Config
,
frameP
,
subframeP
);
round_1
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB1
];
round_2
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB2
];
if
((
round_1
==
8
)
&&
(
round_2
==
8
)){
if
(
nb_rbs_required
[
CC_id
][
UE_id
]
==
0
)
{
if
(
((
UE_sched_ctl
->
ul_inactivity_timer
>
64
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
))
||
((
UE_sched_ctl
->
ul_inactivity_timer
>
10
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
)
&&
(
rrc_status
<
RRC_CONNECTED
))
||
((
UE_sched_ctl
->
cqi_req_timer
>
64
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
)
&&
(
!
(
is_S_sf
(
&
eNB
->
common_channels
[
CC_id
],
subframeP
)))
&&
(
rrc_status
>=
RRC_CONNECTED
))
)
{
continue
;
}
...
...
@@ -724,7 +763,12 @@ void dlsch_scheduler_pre_ue_select_fairRR(
DL_req
=
&
eNB
->
DL_req
[
CC_id
].
dl_config_request_body
;
for
(
UE_id
=
(
last_dlsch_ue_id
[
CC_id
]
+
1
);
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
// (UE_id > last_dlsch_ue_id[CC_id])
for
(
index
=
0
;
index
<
dlsch_ue_max_num_volte
[
CC_id
];
index
++
)
{
UE_id
=
dl_ue_candidate_volte
[
CC_id
][
index
];
if
(
UE_id
==-
1
){
break
;
}
if
(
end_flag
[
CC_id
]
==
1
)
{
break
;
}
...
...
@@ -742,7 +786,6 @@ void dlsch_scheduler_pre_ue_select_fairRR(
if
(
mac_eNB_get_rrc_status
(
module_idP
,
rnti
)
<
RRC_CONNECTED
)
{
continue
;
}
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
for
(
i
=
0
;
i
<
dlsch_ue_select
[
CC_id
].
ue_num
;
i
++
)
{
...
...
@@ -850,8 +893,8 @@ void dlsch_scheduler_pre_ue_select_fairRR(
break
;
}
}
}
}
last_dlsch_ue_id_volte
[
CC_id
]
=
last_UE_id
[
CC_id
];
}
// CC_id
if
(
cc_id_end
(
end_flag
)
==
1
)
{
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
...
...
@@ -900,13 +943,6 @@ void dlsch_scheduler_pre_ue_select_fairRR(
continue
;
harq_pid
=
frame_subframe2_dl_harq_pid
(
cc
[
CC_id
].
tdd_Config
,
frameP
,
subframeP
);
round_1
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB1
];
round_2
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
][
TB2
];
if
((
round_1
==
8
)
&&
(
round_2
==
8
)){
if
(
nb_rbs_required
[
CC_id
][
UE_id
]
==
0
)
{
continue
;
}
switch
(
get_tmode
(
module_idP
,
CC_id
,
UE_id
))
{
case
0
:
...
...
@@ -967,7 +1003,6 @@ void dlsch_scheduler_pre_ue_select_fairRR(
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
UE_id
=
UE_id
;
dlsch_ue_select
[
CC_id
].
list
[
dlsch_ue_select
[
CC_id
].
ue_num
].
rnti
=
rnti
;
dlsch_ue_select
[
CC_id
].
ue_num
++
;
ue_sched_ctl
->
select_tb
[
CC_id
][
harq_pid
]
=
TB1
;
if
(
dlsch_ue_select
[
CC_id
].
ue_num
==
dlsch_ue_max_num
[
CC_id
])
{
...
...
@@ -975,16 +1010,6 @@ void dlsch_scheduler_pre_ue_select_fairRR(
break
;
}
}
else
{
// if (cc[CC_id].tdd_Config != NULL) { //TDD
// set_ue_dai (subframeP,
// UE_id,
// CC_id,
// cc[CC_id].tdd_Config->subframeAssignment,
// UE_list);
// // update UL DAI after DLSCH scheduling
// set_ul_DAI(module_idP,UE_id,CC_id,frameP,subframeP);
// }
add_ue_dlsch_info
(
module_idP
,
CC_id
,
UE_id
,
...
...
@@ -996,7 +1021,6 @@ void dlsch_scheduler_pre_ue_select_fairRR(
}
}
}
}
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
DL_req
=
&
eNB
->
DL_req
[
CC_id
].
dl_config_request_body
;
...
...
@@ -1053,6 +1077,16 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
memset
(
min_rb_unit
,
0
,
sizeof
(
min_rb_unit
));
memset
(
MIMO_mode_indicator
[
0
],
0
,
MAX_NUM_CCs
*
N_RBG_MAX
*
sizeof
(
unsigned
char
));
// Initialize scheduling information for all active UEs
dlsch_scheduler_pre_processor_reset
(
Mod_id
,
0
,
frameP
,
subframeP
,
min_rb_unit
,
(
uint16_t
(
*
)[
NUMBER_OF_UE_MAX
])
nb_rbs_required
,
rballoc_sub
,
MIMO_mode_indicator
,
mbsfn_flag
);
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
mbsfn_flag
[
CC_id
]
>
0
)
// If this CC is allocated for MBSFN skip it here
continue
;
...
...
@@ -1068,16 +1102,6 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
continue
;
UE_id
=
i
;
// Initialize scheduling information for all active UEs
dlsch_scheduler_pre_processor_reset
(
Mod_id
,
0
,
frameP
,
subframeP
,
min_rb_unit
,
(
uint16_t
(
*
)[
NUMBER_OF_UE_MAX
])
nb_rbs_required
,
rballoc_sub
,
MIMO_mode_indicator
,
mbsfn_flag
);
if
(
cc
[
CC_id
].
tdd_Config
!=
NULL
)
{
//TDD
set_ue_dai
(
subframeP
,
UE_id
,
...
...
@@ -1107,9 +1131,6 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
step_size
=
2
;
}
/* VoLTE scheduling timing cal and get buffer size */
dlsch_scheduler_nb_rbs_required_lcid
(
Mod_id
,
frameP
,
subframeP
);
memset
(
nb_rbs_required
,
0
,
sizeof
(
uint16_t
)
*
MAX_NUM_CCs
*
NUMBER_OF_UE_MAX
);
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
if
(
pre_scd_activeUE
[
UE_id
]
!=
TRUE
)
...
...
@@ -1130,7 +1151,6 @@ void dlsch_scheduler_pre_processor_fairRR (module_id_t Mod_id,
}
#endif
dlsch_scheduler_pre_ue_select_fairRR
(
Mod_id
,
frameP
,
subframeP
,
mbsfn_flag
,
nb_rbs_required
,
dlsch_ue_select
);
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
average_rbs_per_user
[
CC_id
]
=
0
;
cc
=
&
RC
.
mac
[
Mod_id
]
->
common_channels
[
CC_id
];
...
...
@@ -3610,15 +3630,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
int
CC_id
,
UE_id
;
int
ret
;
uint16_t
i
;
uint8_t
ue_first_num
[
MAX_NUM_CCs
];
uint8_t
first_ue_total
[
MAX_NUM_CCs
][
20
];
uint8_t
first_ue_id
[
MAX_NUM_CCs
][
20
];
uint8_t
ue_volte_num
[
MAX_NUM_CCs
];
uint8_t
volte_ue_total
[
MAX_NUM_CCs
][
20
];
uint8_t
volte_ue_id
[
MAX_NUM_CCs
][
20
];
uint8_t
volte_lcg
;
uint8_t
ul_inactivity_num
[
MAX_NUM_CCs
];
uint8_t
ul_inactivity_id
[
MAX_NUM_CCs
][
20
];
// LTE_DL_FRAME_PARMS *frame_parms;
uint8_t
ulsch_ue_max_num
[
MAX_NUM_CCs
];
uint8_t
ulsch_ue_max_num_volte
[
MAX_NUM_CCs
];
...
...
@@ -3636,6 +3648,7 @@ void ulsch_scheduler_pre_ue_select_fairRR(
int
rrc_status
;
uint8_t
selected_volte_ue_num
=
0
;
uint8_t
l_last_ulsch_ue_id_volte
[
MAX_NUM_CCs
];
int
UE_id_idx
;
// Initialization
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
...
...
@@ -3651,9 +3664,6 @@ void ulsch_scheduler_pre_ue_select_fairRR(
}
ulsch_ue_max_num_normal
[
CC_id
]
=
ulsch_ue_max_num
[
CC_id
]
-
ulsch_ue_max_num_volte
[
CC_id
];
cc_id_flag
[
CC_id
]
=
0
;
ue_first_num
[
CC_id
]
=
0
;
ue_volte_num
[
CC_id
]
=
0
;
ul_inactivity_num
[
CC_id
]
=
0
;
l_last_ulsch_ue_id_volte
[
CC_id
]
=
last_ulsch_ue_id_volte
[
CC_id
];
}
...
...
@@ -3732,113 +3742,17 @@ void ulsch_scheduler_pre_ue_select_fairRR(
}
}
}
//
int
bytes_to_schedule
=
UE_list
->
UE_template
[
CC_id
][
UE_id
].
estimated_ul_buffer
-
UE_list
->
UE_template
[
CC_id
][
UE_id
].
scheduled_ul_bytes
;
if
(
bytes_to_schedule
<
0
)
{
bytes_to_schedule
=
0
;
UE_list
->
UE_template
[
CC_id
][
UE_id
].
scheduled_ul_bytes
=
0
;
}
UE_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
if
((
UE_sched_ctl
->
volte_configured
==
TRUE
)
&&
((
ulsch_ue_select
[
CC_id
].
ue_num
+
ue_volte_num
[
CC_id
])
<
ulsch_ue_max_num
[
CC_id
])
&&
(
UE_id
>
l_last_ulsch_ue_id_volte
[
CC_id
])
&&
(
UE_sched_ctl
->
ul_periodic_timer_exp_flag
==
TRUE
)
)
{
volte_lcg
=
UE_sched_ctl
->
volte_lcg
;
if
((
UE_list
->
UE_template
[
CC_id
][
UE_id
].
ul_buffer_info
[
volte_lcg
]
>
0
)
&&
(
bytes_to_schedule
>
0
))
{
volte_ue_id
[
CC_id
][
ue_volte_num
[
CC_id
]]
=
UE_id
;
volte_ue_total
[
CC_id
][
ue_volte_num
[
CC_id
]]
=
bytes_to_schedule
;
ue_volte_num
[
CC_id
]
++
;
continue
;
}
}
if
(
UE_id
>
last_ulsch_ue_id
[
CC_id
]
&&
((
ulsch_ue_select
[
CC_id
].
ue_num
+
ue_first_num
[
CC_id
]
+
ue_volte_num
[
CC_id
])
<
ulsch_ue_max_num
[
CC_id
])
)
{
if
(
bytes_to_schedule
>
0
)
{
first_ue_id
[
CC_id
][
ue_first_num
[
CC_id
]]
=
UE_id
;
first_ue_total
[
CC_id
][
ue_first_num
[
CC_id
]]
=
bytes_to_schedule
;
ue_first_num
[
CC_id
]
++
;
continue
;
}
if
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
ul_SR
>
0
)
{
first_ue_id
[
CC_id
][
ue_first_num
[
CC_id
]]
=
UE_id
;
first_ue_total
[
CC_id
]
[
ue_first_num
[
CC_id
]]
=
0
;
ue_first_num
[
CC_id
]
++
;
continue
;
}
rrc_status
=
mac_eNB_get_rrc_status
(
module_idP
,
rnti
);
if
(
((
UE_sched_ctl
->
ul_inactivity_timer
>
64
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
))
||
((
UE_sched_ctl
->
ul_inactivity_timer
>
10
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
)
&&
(
rrc_status
<
RRC_CONNECTED
))
||
((
UE_sched_ctl
->
cqi_req_timer
>
64
)
&&
(
UE_sched_ctl
->
ul_scheduled
==
0
)
&&
(
!
(
is_S_sf
(
&
eNB
->
common_channels
[
CC_id
],
subframeP
)))
&&
(
rrc_status
>=
RRC_CONNECTED
))
)
{
first_ue_id
[
CC_id
][
ue_first_num
[
CC_id
]]
=
UE_id
;
first_ue_total
[
CC_id
]
[
ue_first_num
[
CC_id
]]
=
0
;
ue_first_num
[
CC_id
]
++
;
continue
;
}
/*if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id] ) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
uint8_t ul_period = 0;
if (cc->tdd_Config) {
ul_period = 50;
} else {
ul_period = 20;
}
if ( ((UE_sched_ctl->ul_inactivity_timer>ul_period)&&(UE_sched_ctl->ul_scheduled==0)) ||
((UE_sched_ctl->ul_inactivity_timer>10)&&(UE_sched_ctl->ul_scheduled==0)&&(mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP,UE_id)) < RRC_CONNECTED))) {
ul_inactivity_id[CC_id][ul_inactivity_num[CC_id]]= UE_id;
ul_inactivity_num[CC_id] ++;
continue;
}
}*/
}
}
// Selection of VoLTE UEs(LastSelectedUE - LastUE)
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
HI_DCI0_req
=
&
eNB
->
HI_DCI0_req
[
CC_id
][
subframeP
].
hi_dci0_request_body
;
for
(
int
temp
=
0
;
temp
<
ue_volte_num
[
CC_id
];
temp
++
)
{
if
(
(
ulsch_ue_select
[
CC_id
].
ue_num
>=
ulsch_ue_max_num
[
CC_id
])
||
(
selected_volte_ue_num
>=
ulsch_ue_max_num_volte
[
CC_id
])
||
(
cc_id_flag
[
CC_id
]
==
1
)
)
{
cc_id_flag
[
CC_id
]
=
1
;
HI_DCI0_req
->
number_of_dci
=
saved_ulsch_dci
[
CC_id
];
break
;
}
hi_dci0_pdu
=
&
HI_DCI0_req
->
hi_dci0_pdu_list
[
HI_DCI0_req
->
number_of_dci
+
HI_DCI0_req
->
number_of_hi
];
format_flag
=
2
;
rnti
=
UE_RNTI
(
module_idP
,
volte_ue_id
[
CC_id
][
temp
]);
if
(
CCE_allocation_infeasible
(
module_idP
,
CC_id
,
format_flag
,
subframeP
,
aggregation
,
rnti
)
==
1
)
{
cc_id_flag
[
CC_id
]
=
1
;
break
;
}
else
{
hi_dci0_pdu
->
pdu_type
=
NFAPI_HI_DCI0_DCI_PDU_TYPE
;
hi_dci0_pdu
->
dci_pdu
.
dci_pdu_rel8
.
rnti
=
rnti
;
hi_dci0_pdu
->
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
=
aggregation
;
HI_DCI0_req
->
number_of_dci
++
;
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
ue_priority
=
SCH_UL_FIRST
;
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
ul_total_buffer
=
volte_ue_total
[
CC_id
][
temp
];
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
UE_id
=
volte_ue_id
[
CC_id
][
temp
];
ulsch_ue_select
[
CC_id
].
ue_num
++
;
selected_volte_ue_num
++
;
last_ulsch_ue_id_volte
[
CC_id
]
=
volte_ue_id
[
CC_id
][
temp
];
UE_list
->
UE_sched_ctrl
[
volte_ue_id
[
CC_id
][
temp
]].
ul_periodic_timer_exp_flag
=
FALSE
;
}
}
}
// Selection of VoLTE UEs
for
(
UE_id_idx
=
0
;
UE_id_idx
<
MAX_UE_MULTIPLEX
;
UE_id_idx
++
)
{
UE_id
=
ul_ue_candidate_volte
[
UE_id_idx
];
if
(
UE_id
==-
1
)
break
;
// Selection of VoLTE UEs(0 - LastSelectedUE)
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
if
(
UE_list
->
active
[
UE_id
]
==
FALSE
)
continue
;
...
...
@@ -3849,15 +3763,15 @@ void ulsch_scheduler_pre_ue_select_fairRR(
CC_id
=
UE_PCCID
(
module_idP
,
UE_id
);
if
(
UE_id
>
l_last_ulsch_ue_id_volte
[
CC_id
])
continue
;
if
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
configured
==
FALSE
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
// UL DCI
HI_DCI0_req
=
&
eNB
->
HI_DCI0_req
[
CC_id
][
subframeP
].
hi_dci0_request_body
;
if
(
(
ulsch_ue_select
[
CC_id
].
ue_num
>=
ulsch_ue_max_num
[
CC_id
])
||
(
cc_id_flag
[
CC_id
]
==
1
)
)
{
cc_id_flag
[
CC_id
]
=
1
;
HI_DCI0_req
->
number_of_dci
=
saved_ulsch_dci
[
CC_id
];
...
...
@@ -3927,49 +3841,10 @@ void ulsch_scheduler_pre_ue_select_fairRR(
}
}
// Selection of First Send UEs
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
HI_DCI0_req
=
&
eNB
->
HI_DCI0_req
[
CC_id
][
subframeP
].
hi_dci0_request_body
;
for
(
int
temp
=
0
;
temp
<
ue_first_num
[
CC_id
];
temp
++
)
{
if
(
(
ulsch_ue_select
[
CC_id
].
ue_num
>=
ulsch_ue_max_num
[
CC_id
])
||
(
cc_id_flag
[
CC_id
]
==
1
)
)
{
cc_id_flag
[
CC_id
]
=
1
;
HI_DCI0_req
->
number_of_dci
=
saved_ulsch_dci
[
CC_id
];
break
;
}
for
(
i
=
0
;
i
<
ulsch_ue_select
[
CC_id
].
ue_num
;
i
++
)
{
if
(
ulsch_ue_select
[
CC_id
].
list
[
i
].
UE_id
==
first_ue_id
[
CC_id
][
temp
])
{
break
;
}
}
if
(
i
<
ulsch_ue_select
[
CC_id
].
ue_num
){
ulsch_ue_select
[
CC_id
].
list
[
i
].
ul_total_buffer
+=
first_ue_total
[
CC_id
][
temp
];
LOG_D
(
MAC
,
"ulsch_scheduler_pre_ue_select_fairRR: UE volte select(last_ulsch_ue_id %d l_last_ulsch_ue_id_volte %d) add nonVoLTE buffer to scheduling %d %d
\n
"
,
last_ulsch_ue_id
[
CC_id
],
l_last_ulsch_ue_id_volte
[
CC_id
],
ulsch_ue_select
[
CC_id
].
list
[
i
].
ul_total_buffer
,
first_ue_total
[
CC_id
][
temp
]);
continue
;
}
hi_dci0_pdu
=
&
HI_DCI0_req
->
hi_dci0_pdu_list
[
HI_DCI0_req
->
number_of_dci
+
HI_DCI0_req
->
number_of_hi
];
format_flag
=
2
;
rnti
=
UE_RNTI
(
module_idP
,
first_ue_id
[
CC_id
][
temp
]);
if
(
CCE_allocation_infeasible
(
module_idP
,
CC_id
,
format_flag
,
subframeP
,
aggregation
,
rnti
)
==
1
)
{
cc_id_flag
[
CC_id
]
=
1
;
break
;
}
else
{
hi_dci0_pdu
->
pdu_type
=
NFAPI_HI_DCI0_DCI_PDU_TYPE
;
hi_dci0_pdu
->
dci_pdu
.
dci_pdu_rel8
.
rnti
=
rnti
;
hi_dci0_pdu
->
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
=
aggregation
;
HI_DCI0_req
->
number_of_dci
++
;
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
ue_priority
=
SCH_UL_FIRST
;
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
ul_total_buffer
=
first_ue_total
[
CC_id
][
temp
];
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
UE_id
=
first_ue_id
[
CC_id
][
temp
];
ulsch_ue_select
[
CC_id
].
ue_num
++
;
}
}
}
// Selection of UEs
for
(
UE_id_idx
=
0
;
UE_id_idx
<
MAX_UE_MULTIPLEX
;
UE_id_idx
++
)
{
UE_id
=
ul_ue_candidate
[
UE_id_idx
];
if
(
UE_id
==-
1
)
break
;
// Selection of UEs after Last Selected UE
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
UE_id
++
)
{
...
...
@@ -3983,15 +3858,15 @@ void ulsch_scheduler_pre_ue_select_fairRR(
CC_id
=
UE_PCCID
(
module_idP
,
UE_id
);
if
(
UE_id
>
last_ulsch_ue_id
[
CC_id
])
continue
;
if
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
configured
==
FALSE
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
// UL DCI
HI_DCI0_req
=
&
eNB
->
HI_DCI0_req
[
CC_id
][
subframeP
].
hi_dci0_request_body
;
if
(
(
ulsch_ue_select
[
CC_id
].
ue_num
>=
ulsch_ue_max_num
[
CC_id
])
||
(
cc_id_flag
[
CC_id
]
==
1
)
)
{
cc_id_flag
[
CC_id
]
=
1
;
HI_DCI0_req
->
number_of_dci
=
saved_ulsch_dci
[
CC_id
];
...
...
@@ -4006,15 +3881,6 @@ void ulsch_scheduler_pre_ue_select_fairRR(
}
}
for
(
i
=
0
;
i
<
ulsch_ue_select
[
CC_id
].
ue_num
;
i
++
)
{
if
(
ulsch_ue_select
[
CC_id
].
list
[
i
].
UE_id
==
UE_id
)
{
break
;
}
}
if
(
i
<
ulsch_ue_select
[
CC_id
].
ue_num
)
continue
;
HI_DCI0_req
=
&
eNB
->
HI_DCI0_req
[
CC_id
][
subframeP
].
hi_dci0_request_body
;
//SR BSR
UE_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
...
...
@@ -4024,6 +3890,20 @@ void ulsch_scheduler_pre_ue_select_fairRR(
bytes_to_schedule
=
0
;
UE_list
->
UE_template
[
CC_id
][
UE_id
].
scheduled_ul_bytes
=
0
;
}
for
(
i
=
0
;
i
<
ulsch_ue_select
[
CC_id
].
ue_num
;
i
++
)
{
if
(
ulsch_ue_select
[
CC_id
].
list
[
i
].
UE_id
==
UE_id
)
{
break
;
}
}
if
(
i
<
ulsch_ue_select
[
CC_id
].
ue_num
){
ulsch_ue_select
[
CC_id
].
list
[
i
].
ul_total_buffer
+=
bytes_to_schedule
;
LOG_D
(
MAC
,
"ulsch_scheduler_pre_ue_select_fairRR: UE volte select(last_ulsch_ue_id %d l_last_ulsch_ue_id_volte %d) add nonVoLTE buffer to scheduling %d %d
\n
"
,
last_ulsch_ue_id
[
CC_id
],
l_last_ulsch_ue_id_volte
[
CC_id
],
ulsch_ue_select
[
CC_id
].
list
[
i
].
ul_total_buffer
,
bytes_to_schedule
);
continue
;
}
rrc_status
=
mac_eNB_get_rrc_status
(
module_idP
,
rnti
);
if
(
(
bytes_to_schedule
>
0
)
||
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
ul_SR
>
0
)
||
...
...
@@ -4053,57 +3933,11 @@ void ulsch_scheduler_pre_ue_select_fairRR(
continue
;
}
}
//inactivity UE
/* if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id]) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
uint8_t ul_period = 0;
if (cc->tdd_Config) {
ul_period = 50;
} else {
ul_period = 20;
}
if ( ((UE_sched_ctl->ul_inactivity_timer>ul_period)&&(UE_sched_ctl->ul_scheduled==0)) ||
((UE_sched_ctl->ul_inactivity_timer>10)&&(UE_sched_ctl->ul_scheduled==0)&&(mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP,UE_id)) < RRC_CONNECTED))) {
ul_inactivity_id[CC_id][ul_inactivity_num[CC_id]]= UE_id;
ul_inactivity_num[CC_id]++;
continue;
}
}*/
}
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
HI_DCI0_req
=
&
eNB
->
HI_DCI0_req
[
CC_id
][
subframeP
].
hi_dci0_request_body
;
for
(
int
temp
=
0
;
temp
<
ul_inactivity_num
[
CC_id
];
temp
++
)
{
if
(
(
ulsch_ue_select
[
CC_id
].
ue_num
>=
ulsch_ue_max_num
[
CC_id
])
||
(
cc_id_flag
[
CC_id
]
==
1
)
)
{
HI_DCI0_req
=
&
eNB
->
HI_DCI0_req
[
CC_id
][
subframeP
].
hi_dci0_request_body
;
cc_id_flag
[
CC_id
]
=
1
;
break
;
}
hi_dci0_pdu
=
&
HI_DCI0_req
->
hi_dci0_pdu_list
[
HI_DCI0_req
->
number_of_dci
+
HI_DCI0_req
->
number_of_hi
];
format_flag
=
2
;
rnti
=
UE_RNTI
(
module_idP
,
ul_inactivity_id
[
CC_id
][
temp
]);
if
(
CCE_allocation_infeasible
(
module_idP
,
CC_id
,
format_flag
,
subframeP
,
aggregation
,
rnti
)
==
1
)
{
cc_id_flag
[
CC_id
]
=
1
;
continue
;
}
else
{
hi_dci0_pdu
->
pdu_type
=
NFAPI_HI_DCI0_DCI_PDU_TYPE
;
hi_dci0_pdu
->
dci_pdu
.
dci_pdu_rel8
.
rnti
=
rnti
;
hi_dci0_pdu
->
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
=
aggregation
;
HI_DCI0_req
->
number_of_dci
++
;
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
ue_priority
=
SCH_UL_INACTIVE
;
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
ul_total_buffer
=
0
;
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
UE_id
=
ul_inactivity_id
[
CC_id
][
temp
];
ulsch_ue_select
[
CC_id
].
ue_num
++
;
}
}
HI_DCI0_req
->
number_of_dci
=
saved_ulsch_dci
[
CC_id
];
}
return
;
}
...
...
openair2/LAYER2/MAC/eNB_scheduler_fairRR.h
View file @
83e067dc
...
...
@@ -122,6 +122,15 @@ void schedule_ulsch_rnti_fairRR(module_id_t module_idP,
unsigned
char
sched_subframeP
,
ULSCH_UE_SELECT
ulsch_ue_select
[
MAX_NUM_CCs
]);
void
select_dl_ue_candidate
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
);
void
select_ul_ue_candidate
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
);
/* extern */
extern
DLSCH_UE_SELECT
dlsch_ue_select
[
MAX_NUM_CCs
];
...
...
openair2/LAYER2/MAC/mac_proto.h
View file @
83e067dc
...
...
@@ -299,6 +299,8 @@ void dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
*/
void
eNB_dlsch_ulsch_scheduler
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
);
//, int calibration_flag);
void
update_ue_timers
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
subframeP
);
/* \brief Function to indicate a received preamble on PRACH. It initiates the RA procedure.
@param Mod_id Instance ID of eNB
@param preamble_index index of the received RA request
...
...
openair2/PHY_INTERFACE/IF_Module.c
View file @
83e067dc
...
...
@@ -773,8 +773,8 @@ void UL_indication(UL_IND_t *UL_info) {
sched_info
->
HI_DCI0_req
=
&
mac
->
HI_DCI0_req
[
CC_id
][
sched_info
->
subframe
];
if
(
is_UL_sf
(
&
mac
->
common_channels
[
CC_id
],
sched_info
->
subframe
)
<
0
)
{
return
;
}
//
return;
}
else
{
if
((
mac
->
common_channels
[
CC_id
].
tdd_Config
==
NULL
)
||
(
is_UL_sf
(
&
mac
->
common_channels
[
CC_id
],
sched_info
->
subframe
)
>
0
))
sched_info
->
UL_req
=
&
mac
->
UL_req
[
CC_id
];
...
...
@@ -799,6 +799,9 @@ void UL_indication(UL_IND_t *UL_info) {
}
ifi
->
schedule_response
(
sched_info
);
}
}
update_ue_timers
(
module_id
,(
UL_info
->
frame
+
((
UL_info
->
subframe
>
(
9
-
sf_ahead
-
1
))
?
1
:
0
))
%
1024
,(
UL_info
->
subframe
+
sf_ahead
+
1
)
%
10
);
LOG_D
(
PHY
,
"Schedule_response: SFN_SF:%d%d dl_pdus:%d
\n
"
,
sched_info
->
frame
,
sched_info
->
subframe
,
sched_info
->
DL_req
->
dl_config_request_body
.
number_pdu
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment