Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
spbro
OpenXG-RAN
Commits
25418e98
Commit
25418e98
authored
Jan 16, 2019
by
Louis Adrien Dufrene
Browse files
Options
Browse Files
Download
Plain Diff
merge from local dev branch
parents
7aa89331
0aff1045
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
240 additions
and
316 deletions
+240
-316
openair2/LAYER2/MAC/eNB_scheduler_fairRR.c
openair2/LAYER2/MAC/eNB_scheduler_fairRR.c
+1
-1
openair2/LAYER2/MAC/eNB_scheduler_primitives.c
openair2/LAYER2/MAC/eNB_scheduler_primitives.c
+132
-99
openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
+13
-112
openair2/LAYER2/MAC/mac_proto.h
openair2/LAYER2/MAC/mac_proto.h
+2
-2
openair2/LAYER2/MAC/pre_processor.c
openair2/LAYER2/MAC/pre_processor.c
+92
-102
No files found.
openair2/LAYER2/MAC/eNB_scheduler_fairRR.c
View file @
25418e98
...
...
@@ -2582,7 +2582,7 @@ schedule_ulsch_fairRR(module_id_t module_idP, frame_t frameP,
frame_parms
->
prach_config_common
.
prach_ConfigInfo
.
prach_ConfigIndex
,
frame_parms
->
prach_config_common
.
prach_ConfigInfo
.
prach_FreqOffset
,
0
,
//tdd_mapindex
frameP
);
//Nf
frameP
);
//Nf
--> shouldn't it be sched_frame ???
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
nb_rb
=
6
;
ulsch_ue_select
[
CC_id
].
list
[
ulsch_ue_select
[
CC_id
].
ue_num
].
UE_id
=
-
1
;
ulsch_ue_select
[
CC_id
].
ue_num
++
;
...
...
openair2/LAYER2/MAC/eNB_scheduler_primitives.c
View file @
25418e98
...
...
@@ -305,7 +305,6 @@ subframe2harqpid(COMMON_channels_t *cc,
}
else
{
switch
(
cc
->
tdd_Config
->
subframeAssignment
)
{
case
1
:
if
((
subframe
==
2
)
||
(
subframe
==
3
)
||
(
subframe
==
7
)
||
(
subframe
==
8
))
switch
(
subframe
)
{
case
2
:
case
3
:
...
...
@@ -322,7 +321,6 @@ subframe2harqpid(COMMON_channels_t *cc,
subframe
,
(
int
)
cc
->
tdd_Config
->
subframeAssignment
);
break
;
}
break
;
case
2
:
...
...
@@ -2622,11 +2620,19 @@ int get_nb_subband(int N_RB_DL) {
return
nb_sb
;
}
void
init_CCE_table
(
int
module_idP
,
int
CC_idP
)
{
/*
void
init_CCE_table(int module_idP,
int CC_idP)
{
memset(RC.mac[module_idP]->CCE_table[CC_idP], 0, 800 * sizeof(int));
}
*/
void
init_CCE_table
(
int
*
CCE_table
)
{
memset
(
CCE_table
,
0
,
800
*
sizeof
(
int
));
}
int
get_nCCE_offset
(
int
*
CCE_table
,
...
...
@@ -2895,19 +2901,21 @@ allocate_CCEs(int module_idP,
sub_frame_t
subframeP
,
int
test_onlyP
)
{
int
*
CCE_table
=
RC
.
mac
[
module_idP
]
->
CCE_table
[
CC_idP
];
nfapi_dl_config_request_body_t
*
DL_req
=
&
RC
.
mac
[
module_idP
]
->
DL_req
[
CC_idP
].
dl_config_request_body
;
nfapi_hi_dci0_request_body_t
*
HI_DCI0_req
=
&
RC
.
mac
[
module_idP
]
->
HI_DCI0_req
[
CC_idP
][
subframeP
].
hi_dci0_request_body
;
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
int
*
CCE_table
=
eNB
->
CCE_table
[
CC_idP
];
nfapi_dl_config_request_body_t
*
DL_req
=
&
eNB
->
DL_req
[
CC_idP
].
dl_config_request_body
;
nfapi_hi_dci0_request_body_t
*
HI_DCI0_req
=
&
eNB
->
HI_DCI0_req
[
CC_idP
][
subframeP
].
hi_dci0_request_body
;
nfapi_dl_config_request_pdu_t
*
dl_config_pdu
=
&
DL_req
->
dl_config_pdu_list
[
0
];
nfapi_hi_dci0_request_pdu_t
*
hi_dci0_pdu
=
&
HI_DCI0_req
->
hi_dci0_pdu_list
[
0
];
int
nCCE_max
=
get_nCCE_max
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_idP
],
1
,
subframeP
);
COMMON_channels_t
*
cc
=
&
eNB
->
common_channels
[
CC_idP
];
int
nCCE_max
=
get_nCCE_max
(
cc
,
1
,
subframeP
);
int
fCCE
;
int
i
,
j
,
idci
;
int
nCCE
=
0
;
int
max_symbol
;
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
COMMON_channels_t
*
cc
=
&
eNB
->
common_channels
[
CC_idP
];
int
ackNAK_absSF
=
get_pucch1_absSF
(
cc
,
(
frameP
*
10
+
subframeP
));
nfapi_dl_config_request_pdu_t
*
dl_config_pduLoop
;
nfapi_hi_dci0_request_pdu_t
*
hi_dci0_pduLoop
;
if
(
cc
->
tdd_Config
!=
NULL
&&
is_S_sf
(
cc
,
subframeP
)
>
0
)
max_symbol
=
2
;
...
...
@@ -2916,83 +2924,94 @@ allocate_CCEs(int module_idP,
nfapi_ul_config_request_body_t
*
ul_req
=
&
eNB
->
UL_req_tmp
[
CC_idP
][
ackNAK_absSF
%
10
].
ul_config_request_body
;
LOG_D
(
MAC
,
"Allocate CCEs subframe %d, test %d : (DL PDU %d, DL DCI %d, UL %d)
\n
"
,
subframeP
,
test_onlyP
,
DL_req
->
number_pdu
,
DL_req
->
number_dci
,
subframeP
,
test_onlyP
,
DL_req
->
number_pdu
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
);
DL_req
->
number_pdcch_ofdm_symbols
=
1
;
try_again:
init_CCE_table
(
module_idP
,
CC_idP
);
init_CCE_table
(
CCE_table
);
nCCE
=
0
;
for
(
i
=
0
,
idci
=
0
;
i
<
DL_req
->
number_pdu
;
i
++
)
{
dl_config_pduLoop
=
&
dl_config_pdu
[
i
];
// allocate DL common DCIs first
if
(
(
dl_config_pdu
[
i
].
pdu_type
==
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
)
&&
(
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti_type
==
2
)
)
{
if
(
dl_config_pduLoop
->
pdu_type
==
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
&&
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti_type
==
2
)
{
LOG_D
(
MAC
,
"Trying to allocate COMMON DCI %d/%d (%d,%d) : rnti %x, aggreg %d nCCE %d / %d (num_pdcch_symbols %d)
\n
"
,
idci
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
DL_req
->
number_pdcch_ofdm_symbols
);
idci
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
DL_req
->
number_pdcch_ofdm_symbols
);
if
(
nCCE
+
(
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
)
>
nCCE_max
)
{
if
(
nCCE
+
(
dl_config_pdu
Loop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
)
>
nCCE_max
)
{
if
(
DL_req
->
number_pdcch_ofdm_symbols
==
max_symbol
)
return
-
1
;
LOG_D
(
MAC
,
"Can't fit DCI allocations with %d PDCCH symbols, increasing by 1
\n
"
,
DL_req
->
number_pdcch_ofdm_symbols
);
DL_req
->
number_pdcch_ofdm_symbols
++
;
nCCE_max
=
get_nCCE_max
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_idP
],
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
nCCE_max
=
get_nCCE_max
(
cc
,
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
goto
try_again
;
}
// number of CCEs left can potentially hold this allocation
fCCE
=
get_nCCE_offset
(
CCE_table
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE_max
,
1
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE_max
,
1
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
subframeP
);
if
(
fCCE
==
-
1
)
{
if
(
DL_req
->
number_pdcch_ofdm_symbols
==
max_symbol
)
{
LOG_D
(
MAC
,
"subframe %d: Dropping Allocation for RNTI %x
\n
"
,
subframeP
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
);
subframeP
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
);
for
(
j
=
0
;
j
<=
i
;
j
++
)
{
if
(
dl_config_pdu
[
j
].
pdu_type
==
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
)
dl_config_pduLoop
=
&
dl_config_pdu
[
j
];
if
(
dl_config_pduLoop
->
pdu_type
==
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
)
LOG_D
(
MAC
,
"DCI %d/%d (%d,%d) : rnti %x dci format %d, aggreg %d nCCE %d / %d (num_pdcch_symbols %d)
\n
"
,
j
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
,
dl_config_pdu
[
j
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pdu
[
j
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
dci_format
,
dl_config_pdu
[
j
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
dl_config_pdu
Loop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pdu
Loop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
dci_format
,
dl_config_pdu
Loop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
DL_req
->
number_pdcch_ofdm_symbols
);
}
//dump_CCE_table(CCE_table,nCCE_max,subframeP,dci_alloc->rnti,1<<dci_alloc->L);
return
-
1
;
}
LOG_D
(
MAC
,
"Can't fit DCI allocations with %d PDCCH symbols (rnti condition), increasing by 1
\n
"
,
LOG_D
(
MAC
,
"Can't fit DCI allocations with %d PDCCH symbols (rnti condition), increasing by 1
\n
"
,
DL_req
->
number_pdcch_ofdm_symbols
);
DL_req
->
number_pdcch_ofdm_symbols
++
;
nCCE_max
=
get_nCCE_max
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_idP
],
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
nCCE_max
=
get_nCCE_max
(
cc
,
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
goto
try_again
;
}
// fCCE==-1
// the allocation is feasible, rnti rule passes
nCCE
+=
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
;
nCCE
+=
dl_config_pdu
Loop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
;
LOG_D
(
MAC
,
"Allocating at nCCE %d
\n
"
,
fCCE
);
if
((
test_onlyP
%
2
)
==
0
)
{
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
cce_idx
=
fCCE
;
dl_config_pdu
Loop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
cce_idx
=
fCCE
;
LOG_D
(
MAC
,
"Allocate COMMON DCI CCEs subframe %d, test %d => L %d fCCE %d
\n
"
,
subframeP
,
test_onlyP
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
fCCE
);
subframeP
,
test_onlyP
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
fCCE
);
}
idci
++
;
...
...
@@ -3001,50 +3020,55 @@ try_again:
// no try to allocate UL DCIs
for
(
i
=
0
;
i
<
HI_DCI0_req
->
number_of_dci
+
HI_DCI0_req
->
number_of_hi
;
i
++
)
{
hi_dci0_pduLoop
=
&
hi_dci0_pdu
[
i
];
// allocate UL DCIs
if
(
hi_dci0_pdu
[
i
].
pdu_type
==
NFAPI_HI_DCI0_DCI_PDU_TYPE
)
{
LOG_D
(
MAC
,
"Trying to allocate format 0 DCI %d/%d (%d,%d) : rnti %x, aggreg %d nCCE %d / %d (num_pdcch_symbols %d)
\n
"
,
idci
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
,
hi_dci0_pdu
[
i
].
dci_pdu
.
dci_pdu_rel8
.
rnti
,
hi_dci0_pdu
[
i
].
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
DL_req
->
number_pdcch_ofdm_symbols
);
idci
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
,
hi_dci0_pduLoop
->
dci_pdu
.
dci_pdu_rel8
.
rnti
,
hi_dci0_pduLoop
->
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
DL_req
->
number_pdcch_ofdm_symbols
);
if
(
nCCE
+
(
hi_dci0_pdu
[
i
].
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
)
>
nCCE_max
)
{
if
(
nCCE
+
hi_dci0_pduLoop
->
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
>
nCCE_max
)
{
if
(
DL_req
->
number_pdcch_ofdm_symbols
==
max_symbol
)
return
-
1
;
LOG_D
(
MAC
,
"Can't fit DCI allocations with %d PDCCH symbols, increasing by 1
\n
"
,
DL_req
->
number_pdcch_ofdm_symbols
);
DL_req
->
number_pdcch_ofdm_symbols
++
;
nCCE_max
=
get_nCCE_max
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_idP
],
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
nCCE_max
=
get_nCCE_max
(
cc
,
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
goto
try_again
;
}
// number of CCEs left can potentially hold this allocation
fCCE
=
get_nCCE_offset
(
CCE_table
,
hi_dci0_pdu
[
i
].
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
,
nCCE_max
,
0
,
hi_dci0_pdu
[
i
].
dci_pdu
.
dci_pdu_rel8
.
rnti
,
subframeP
);
hi_dci0_pdu
Loop
->
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
,
nCCE_max
,
0
,
hi_dci0_pdu
Loop
->
dci_pdu
.
dci_pdu_rel8
.
rnti
,
subframeP
);
if
(
fCCE
==
-
1
)
{
if
(
DL_req
->
number_pdcch_ofdm_symbols
==
max_symbol
)
{
LOG_D
(
MAC
,
"subframe %d: Dropping Allocation for RNTI %x
\n
"
,
subframeP
,
hi_dci0_pdu
[
i
].
dci_pdu
.
dci_pdu_rel8
.
rnti
);
subframeP
,
hi_dci0_pduLoop
->
dci_pdu
.
dci_pdu_rel8
.
rnti
);
for
(
j
=
0
;
j
<=
i
;
j
++
)
{
hi_dci0_pduLoop
=
&
hi_dci0_pdu
[
j
];
if
(
hi_dci0_pdu
[
j
].
pdu_type
==
NFAPI_HI_DCI0_DCI_PDU_TYPE
)
LOG_D
(
MAC
,
"DCI %d/%d (%d,%d) : rnti %x dci format %d, aggreg %d nCCE %d / %d (num_pdcch_symbols %d)
\n
"
,
j
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
,
hi_dci0_pdu
[
j
].
dci_pdu
.
dci_pdu_rel8
.
rnti
,
hi_dci0_pdu
[
j
].
dci_pdu
.
dci_pdu_rel8
.
dci_format
,
hi_dci0_pdu
[
j
].
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
,
hi_dci0_pdu
Loop
->
dci_pdu
.
dci_pdu_rel8
.
rnti
,
hi_dci0_pdu
Loop
->
dci_pdu
.
dci_pdu_rel8
.
dci_format
,
hi_dci0_pdu
Loop
->
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
DL_req
->
number_pdcch_ofdm_symbols
);
}
//dump_CCE_table(CCE_table,nCCE_max,subframeP,dci_alloc->rnti,1<<dci_alloc->L);
...
...
@@ -3054,18 +3078,20 @@ try_again:
LOG_D
(
MAC
,
"Can't fit DCI allocations with %d PDCCH symbols (rnti condition), increasing by 1
\n
"
,
DL_req
->
number_pdcch_ofdm_symbols
);
DL_req
->
number_pdcch_ofdm_symbols
++
;
nCCE_max
=
get_nCCE_max
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_idP
],
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
nCCE_max
=
get_nCCE_max
(
cc
,
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
goto
try_again
;
}
// fCCE==-1
// the allocation is feasible, rnti rule passes
nCCE
+=
hi_dci0_pdu
[
i
].
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
;
LOG_D
(
MAC
,
"Allocating at nCCE %d
\n
"
,
fCCE
);
nCCE
+=
hi_dci0_pduLoop
->
dci_pdu
.
dci_pdu_rel8
.
aggregation_level
;
LOG_D
(
MAC
,
"Allocating at nCCE %d
\n
"
,
fCCE
);
if
((
test_onlyP
%
2
)
==
0
)
{
hi_dci0_pdu
[
i
].
dci_pdu
.
dci_pdu_rel8
.
cce_index
=
fCCE
;
LOG_D
(
MAC
,
"Allocate CCEs subframe %d, test %d
\n
"
,
subframeP
,
test_onlyP
);
hi_dci0_pduLoop
->
dci_pdu
.
dci_pdu_rel8
.
cce_index
=
fCCE
;
LOG_D
(
MAC
,
"Allocate CCEs subframe %d, test %d
\n
"
,
subframeP
,
test_onlyP
);
}
idci
++
;
...
...
@@ -3073,51 +3099,56 @@ try_again:
}
// for i = 0 ... num_UL_DCIs
for
(
i
=
0
;
i
<
DL_req
->
number_pdu
;
i
++
)
{
dl_config_pduLoop
=
&
dl_config_pdu
[
i
];
// allocate DL UE specific DCIs
if
((
dl_config_pdu
[
i
].
pdu_type
==
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
)
&&
(
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti_type
==
1
))
{
&&
(
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti_type
==
1
))
{
LOG_D
(
MAC
,
"Trying to allocate DL UE-SPECIFIC DCI %d/%d (%d,%d) : rnti %x, aggreg %d nCCE %d / %d (num_pdcch_symbols %d)
\n
"
,
idci
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
idci
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
DL_req
->
number_pdcch_ofdm_symbols
);
if
(
nCCE
+
(
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
)
>
nCCE_max
)
{
if
(
nCCE
+
(
dl_config_pdu
Loop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
)
>
nCCE_max
)
{
if
(
DL_req
->
number_pdcch_ofdm_symbols
==
max_symbol
)
return
-
1
;
LOG_D
(
MAC
,
"Can't fit DCI allocations with %d PDCCH symbols, increasing by 1
\n
"
,
DL_req
->
number_pdcch_ofdm_symbols
);
LOG_D
(
MAC
,
"Can't fit DCI allocations with %d PDCCH symbols, increasing by 1
\n
"
,
DL_req
->
number_pdcch_ofdm_symbols
);
DL_req
->
number_pdcch_ofdm_symbols
++
;
nCCE_max
=
get_nCCE_max
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_idP
],
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
nCCE_max
=
get_nCCE_max
(
cc
,
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
goto
try_again
;
}
// number of CCEs left can potentially hold this allocation
fCCE
=
get_nCCE_offset
(
CCE_table
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE_max
,
0
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE_max
,
0
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
subframeP
);
if
(
fCCE
==
-
1
)
{
if
(
DL_req
->
number_pdcch_ofdm_symbols
==
max_symbol
)
{
LOG_I
(
MAC
,
"subframe %d: Dropping Allocation for RNTI %x
\n
"
,
subframeP
,
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
);
subframeP
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
);
for
(
j
=
0
;
j
<=
i
;
j
++
)
{
if
(
dl_config_pdu
[
j
].
pdu_type
==
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
)
dl_config_pduLoop
=
&
dl_config_pdu
[
j
];
if
(
dl_config_pduLoop
->
pdu_type
==
NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE
)
LOG_D
(
MAC
,
"DCI %d/%d (%d,%d) : rnti %x dci format %d, aggreg %d nCCE %d / %d (num_pdcch_symbols %d)
\n
"
,
j
,
DL_req
->
number_dci
+
HI_DCI0_req
->
number_of_dci
,
DL_req
->
number_dci
,
HI_DCI0_req
->
number_of_dci
,
dl_config_pdu
[
j
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pdu
[
j
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
dci_format
,
dl_config_pdu
[
j
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
rnti
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
dci_format
,
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
,
nCCE
,
nCCE_max
,
DL_req
->
number_pdcch_ofdm_symbols
);
...
...
@@ -3129,18 +3160,20 @@ try_again:
LOG_D
(
MAC
,
"Can't fit DCI allocations with %d PDCCH symbols (rnti condition), increasing by 1
\n
"
,
DL_req
->
number_pdcch_ofdm_symbols
);
DL_req
->
number_pdcch_ofdm_symbols
++
;
nCCE_max
=
get_nCCE_max
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_idP
],
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
nCCE_max
=
get_nCCE_max
(
cc
,
DL_req
->
number_pdcch_ofdm_symbols
,
subframeP
);
goto
try_again
;
}
// fCCE==-1
// the allocation is feasible, rnti rule passes
nCCE
+=
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
;
LOG_D
(
MAC
,
"Allocating at nCCE %d
\n
"
,
fCCE
);
nCCE
+=
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
aggregation_level
;
LOG_D
(
MAC
,
"Allocating at nCCE %d
\n
"
,
fCCE
);
if
((
test_onlyP
%
2
)
==
0
)
{
dl_config_pdu
[
i
].
dci_dl_pdu
.
dci_dl_pdu_rel8
.
cce_idx
=
fCCE
;
LOG_D
(
MAC
,
"Allocate CCEs subframe %d, test %d
\n
"
,
subframeP
,
test_onlyP
);
dl_config_pduLoop
->
dci_dl_pdu
.
dci_dl_pdu_rel8
.
cce_idx
=
fCCE
;
LOG_D
(
MAC
,
"Allocate CCEs subframe %d, test %d
\n
"
,
subframeP
,
test_onlyP
);
}
if
((
test_onlyP
/
2
)
==
1
)
{
...
...
openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
View file @
25418e98
...
...
@@ -133,31 +133,6 @@ rx_sdu(const module_id_t enb_mod_idP,
memset
(
rx_lcids
,
0
,
NB_RB_MAX
*
sizeof
(
unsigned
char
));
memset
(
rx_lengths
,
0
,
NB_RB_MAX
*
sizeof
(
unsigned
short
));
// LAD
/*
if (UE_id == -1) {
LOG_E(MAC, "Step 1\n");
LOG_W(MAC, "[MAC] UE_id = -1 ; RNTI = %x ; frame = %d ; subframe = %d ; sdu_length = %d ; sdu = %d\n",
rntiP,
frameP,
subframeP,
sdu_lenP,
*sduP);
}
*/
// LAD
/*
first_rb = UE_list->UE_template[CC_idP][UE_id].first_rb_ul[harq_pid];
LOG_W(MAC, "[MAC] UE_id = %d : first_rb = %d ; scheduled_ul_bytes = %d ; TBS_UL = %d ; frame = %d ; subframe = %d\n",
UE_id,
first_rb,
UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes,
UE_list->UE_template[CC_idP][UE_id].TBS_UL[harq_pid],
frameP,
subframeP);
*/
start_meas
(
&
mac
->
rx_ulsch_sdu
);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_RX_SDU
,
1
);
...
...
@@ -206,17 +181,7 @@ rx_sdu(const module_id_t enb_mod_idP,
mac_eNB_rrc_ul_in_sync
(
enb_mod_idP
,
CC_idP
,
frameP
,
subframeP
,
current_rnti
);
}
/* update bytes to schedule */
// LAD
/*
LOG_E(MAC, "Step 2\n");
LOG_W(MAC, "[MAC] UE_id != -1 and sduP != NULL : first_rb = %d ; scheduled_ul_bytes = %d ; TBS_UL = %d\n",
first_rb,
UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes,
UE_list->UE_template[CC_idP][UE_id].TBS_UL[harq_pid]);
*/
/* Update bytes to schedule */
UE_list
->
UE_template
[
CC_idP
][
UE_id
].
scheduled_ul_bytes
-=
UE_list
->
UE_template
[
CC_idP
][
UE_id
].
TBS_UL
[
harq_pid
];
if
(
UE_list
->
UE_template
[
CC_idP
][
UE_id
].
scheduled_ul_bytes
<
0
)
{
...
...
@@ -310,15 +275,6 @@ rx_sdu(const module_id_t enb_mod_idP,
first_rb
=
ra
[
RA_id
].
msg3_first_rb
;
// LAD
/*
LOG_E(MAC, "Step 3\n");
LOG_W(MAC, "[MAC] UE_id == -1 : first_rb = %d ; scheduled_ul_bytes = %d ; TBS_UL = %d\n",
first_rb,
UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes,
UE_list->UE_template[CC_idP][UE_id].TBS_UL[harq_pid]);
*/
if
(
sduP
==
NULL
)
{
// we've got an error on Msg3
LOG_W
(
MAC
,
"[eNB %d] CC_id %d, RA %d ULSCH in error in round %d/%d
\n
"
,
enb_mod_idP
,
...
...
@@ -333,26 +289,6 @@ rx_sdu(const module_id_t enb_mod_idP,
// first_rb = UE_list->UE_template[CC_idP][UE_id].first_rb_ul[harq_pid]; // UE_id = -1 !!!!
ra
[
RA_id
].
msg3_round
++
;
// LAD
/*
LOG_E(MAC, "Step 4\n");
LOG_W(MAC, "[MAC] [UEINFO1] UE_id = %d ; RNTI_ue_template = %x ; RNTI_sdu = %x\n",
UE_id,
UE_list->UE_template[CC_idP][UE_id].rnti,
current_rnti);
// LAD
LOG_W(MAC, "[MAC] UE_id == -1 and sduP == NULL : first_rb = %d ; scheduled_ul_bytes = %d ; TBS_UL = %d\n",
first_rb,
UE_list->UE_template[CC_idP][UE_id].scheduled_ul_bytes,
UE_list->UE_template[CC_idP][UE_id].TBS_UL[harq_pid]);
// LAD
LOG_W(MAC, "[MAC] [RAPROC] msg3_subframe = %d ; msg3_frame = %d\n",
ra[RA_id].Msg3_subframe,
ra[RA_id].Msg3_frame);
*/
/* Prepare handling of retransmission */
get_Msg3allocret
(
&
mac
->
common_channels
[
CC_idP
],
ra
[
RA_id
].
Msg3_subframe
,
...
...
@@ -360,13 +296,6 @@ rx_sdu(const module_id_t enb_mod_idP,
&
ra
[
RA_id
].
Msg3_frame
,
&
ra
[
RA_id
].
Msg3_subframe
);
// LAD
/*
LOG_W(MAC, "[MAC] [RAPROC] After update: msg3_subframe = %d ; msg3_frame = %d\n",
ra[RA_id].Msg3_subframe,
ra[RA_id].Msg3_frame);
*/
add_msg3
(
enb_mod_idP
,
CC_idP
,
&
ra
[
RA_id
],
frameP
,
subframeP
);
}
...
...
@@ -422,17 +351,6 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_list
->
UE_sched_ctrl
[
UE_id
].
round_UL
[
CC_idP
][
harq_pid
]
=
0
;
}
// LAD
/*
LOG_E(MAC, "Step 5\n");
LOG_W(MAC, "[MAC] [UEINFO2] UE_id = %d ; RNTI_ue_template = %x ; RNTI_sdu = %x ; frame = %d ; subframe = %d\n",
UE_id,
UE_list->UE_template[CC_idP][UE_id].rnti,
current_rnti,
frameP,
subframeP);
*/
/* Control element */
for
(
int
i
=
0
;
i
<
num_ce
;
i
++
)
{
T
(
T_ENB_MAC_UE_UL_CE
,
...
...
@@ -1278,44 +1196,27 @@ schedule_ulsch(module_id_t module_idP,
LTE_DL_FRAME_PARMS
*
frame_parms
=
&
(
RC
.
eNB
[
module_idP
][
CC_id
]
->
frame_parms
);
RA_t
*
ra_ptr
=
cc
->
ra
;
/* Louis-Adrien: Only set for FDD (for the moment)
* Hard coded for prach-ConfigIndex = 0 and prach-Freqoffset = 2
* ToDo: The PRACH resources should be added with modularity (here?)
/* From Louis-Adrien to François:
* The comment bloc below is to configure with a command line.
* I took it from the equivalent part in the fairRR scheduler (around line 2578 in eNB_scheduler_fairRR.c).
* As said in the meeting, it seems to work only for small TBS.
* The cause of false RA still present with this fix is to investigate.
*
* Note: in the get_prach_prb_offset() function below, the last argument is frameP in eNB_scheduler_fairRR.c
* I think it should be sched_frame instead. This parameter has only impacts in case TDD and preamble format 4.
* To confirm.
*/
/*
if (cc->tdd_Config == NULL) { // FDD
if (((sched_frame %2) == 0) && sched_subframe == 1) { // RACH frame and subframe
if (first_rb[CC_id] < 8) {
n_rb_ul_tab = to_prb(cc->ul_Bandwidth); // return total number of PRB
if (n_rb_ul_tab >= 8) {
first_rb[CC_id] = 8;
} else {
return;
}
}
}
}
*/
if (is_prach_subframe(frame_parms, sched_frame, sched_subframe) == 1) {
start_rb = get_prach_prb_offset(frame_parms,
frame_parms->prach_config_common.prach_ConfigInfo.prach_ConfigIndex,
frame_parms->prach_config_common.prach_ConfigInfo.prach_FreqOffset,
0, // tdd_mapindex
frameP
);
// Nf --> shouldn't it be sched_frame ???
sched_frame); // Nf
first_rb[CC_id] = start_rb + nb_rb;
// LAD
/*
LOG_W(MAC, "[MAC] Config Index = %d ; Freq_offset = %d ; first_rb = %d ; subframe = %d ; sched_subframe = %d\n",
frame_parms->prach_config_common.prach_ConfigInfo.prach_ConfigIndex,
frame_parms->prach_config_common.prach_ConfigInfo.prach_FreqOffset,
first_rb[CC_id],
subframeP,
sched_subframe);
*/
}
*/
/*
* Check if RA (Msg3) is active in this subframeP, if so skip the PRB used for Msg3
...
...
@@ -1328,7 +1229,7 @@ schedule_ulsch(module_id_t module_idP,
first_rb
[
CC_id
]
=
ra_ptr
->
msg3_first_rb
+
ra_ptr
->
msg3_nb_rb
;
}
/* Louis-Adrien: I couldn't find an interdiction of multiple Msg3 scheduling
* on the same resources. Also the performance improvement of breaking is low,
* on the same
time
resources. Also the performance improvement of breaking is low,
* since we will loop until the end, most of the time.
* I'm letting the break as a reminder, in case of misunderstanding the spec.
*/
...
...
openair2/LAYER2/MAC/mac_proto.h
View file @
25418e98
...
...
@@ -444,7 +444,7 @@ uint8_t get_aggregation(uint8_t bw_index, uint8_t cqi, uint8_t dci_fmt);
int8_t
find_active_UEs_with_traffic
(
module_id_t
module_idP
);
void
init_CCE_table
(
int
module_idP
,
int
CC_idP
);
void
init_CCE_table
(
int
*
CCE_table
);
int
get_nCCE_offset
(
int
*
CCE_table
,
const
unsigned
char
L
,
...
...
@@ -726,7 +726,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, int slice_idx, int fr
uint16_t
*
first_rb
);
void
store_ulsch_buffer
(
module_id_t
module_idP
,
int
frameP
,
sub_frame_t
subframeP
);
void
sort_ue_ul
(
module_id_t
module_idP
,
int
frameP
,
sub_frame_t
subframeP
);
void
sort_ue_ul
(
module_id_t
module_idP
,
int
slice_idx
,
int
frameP
,
sub_frame_t
subframeP
,
rnti_t
*
rntiTable
);
void
assign_max_mcs_min_rb
(
module_id_t
module_idP
,
int
slice_idx
,
int
frameP
,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
);
void
adjust_bsr_info
(
int
buffer_occupancy
,
uint16_t
TBS
,
...
...
openair2/LAYER2/MAC/pre_processor.c
View file @
25418e98
...
...
@@ -1618,14 +1618,13 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
int
N_RB_UL
=
0
;
uint16_t
available_rbs
,
first_rb_offset
;
rnti_t
rntiTable
[
MAX_MOBILES_PER_ENB
];
// Rnti array => Add SSR 12-2018
bool
continueTable
[
MAX_MOBILES_PER_ENB
];
// Loop continue flag array => Add SSR 12-2018
bool
sliceMember
;
// Slice membership flag => Add SSR 12-2018
LOG_D
(
MAC
,
"In ulsch_preprocessor: assign max mcs min rb
\n
"
);
// sort ues
LOG_D
(
MAC
,
"In ulsch_preprocessor: sort ue
\n
"
);
sort_ue_ul
(
module_idP
,
slice_idx
,
frameP
,
subframeP
,
rntiTable
);
// maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
LOG_D
(
MAC
,
"In ulsch_preprocessor: assign max mcs min rb
\n
"
);
assign_max_mcs_min_rb
(
module_idP
,
slice_idx
,
frameP
,
subframeP
,
first_rb
);
LOG_D
(
MAC
,
"In ulsch_preprocessor: sort ue
\n
"
);
// sort ues
sort_ue_ul
(
module_idP
,
frameP
,
subframeP
);
// we need to distribute RBs among UEs
// step1: reset the vars
uint8_t
CC_nb
=
(
uint8_t
)
RC
.
nb_mac_CC
[
module_idP
];
...
...
@@ -1639,21 +1638,7 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
// Step 1.5: Calculate total_ue_count
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
// Calculate continue condition
/*
if (UE_RNTI(module_idP, UE_id) == NOT_A_RNTI)
continue;
if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
continue;
if (!ue_ul_slice_membership(module_idP, UE_id, slice_idx))
continue;
*/
rntiTable
[
UE_id
]
=
UE_RNTI
(
module_idP
,
UE_id
);
sliceMember
=
ue_ul_slice_membership
(
module_idP
,
UE_id
,
slice_idx
);
continueTable
[
UE_id
]
=
(
rntiTable
[
UE_id
]
==
NOT_A_RNTI
||
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
||
!
sliceMember
);
// This is not the actual CC_id in the list
if
(
sliceMember
)
{
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
UE_template
=
&
UE_list
->
UE_template
[
CC_id
][
UE_id
];
...
...
@@ -1663,21 +1648,23 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
}
}
}
LOG_D
(
MAC
,
"In ulsch_preprocessor: step2
\n
"
);
// step 2: calculate the average rb per UE
LOG_D
(
MAC
,
"In ulsch_preprocessor: step2
\n
"
);
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
if
(
continueTable
[
UE_id
])
continue
;
//
if (continueTable[UE_id]) continue;
LOG_D
(
MAC
,
"In ulsch_preprocessor: handling UE %d/%x
\n
"
,
UE_id
,
LOG_D
(
MAC
,
"In ulsch_preprocessor: handling UE %d/%x
\n
"
,
UE_id
,
rntiTable
[
UE_id
]);
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
LOG_D
(
MAC
,
"In ulsch_preprocessor: handling UE %d/%x CCid %d
\n
"
,
UE_id
,
rntiTable
[
UE_id
],
CC_id
);
LOG_D
(
MAC
,
"In ulsch_preprocessor: handling UE %d/%x CCid %d
\n
"
,
UE_id
,
rntiTable
[
UE_id
],
CC_id
);
/*
if((mac_xface->get_nCCE_max(module_idP,CC_id,3,subframeP) - nCCE_to_be_used[CC_id]) > (1<<aggregation)) {
nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation);
...
...
@@ -1688,8 +1675,8 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_idx
]
=
nb_rbs_allowed_slice
(
sli
->
ul
[
slice_idx
].
pct
,
N_RB_UL
);
first_rb_offset
=
UE_list
->
first_rb_offset
[
CC_id
][
slice_idx
];
available_rbs
=
cmin
(
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_idx
],
N_RB_UL
-
first_rb
[
CC_id
]
-
first_rb_offset
);
available_rbs
=
cmin
(
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_idx
],
N_RB_UL
-
first_rb
[
CC_id
]
-
first_rb_offset
);
if
(
available_rbs
<
0
)
available_rbs
=
0
;
...
...
@@ -1702,21 +1689,27 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
average_rbs_per_user
[
CC_id
]
=
(
uint16_t
)
floor
(
available_rbs
/
total_ue_count
[
CC_id
]);
}
else
{
average_rbs_per_user
[
CC_id
]
=
1
;
LOG_W
(
MAC
,
"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)
\n
"
,
module_idP
,
frameP
,
subframeP
,
UE_id
,
CC_id
);
LOG_W
(
MAC
,
"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)
\n
"
,
module_idP
,
frameP
,
subframeP
,
UE_id
,
CC_id
);
}
if
(
total_ue_count
[
CC_id
]
>
0
)
{
LOG_D
(
MAC
,
"[eNB %d] Frame %d subframe %d: total ue to be scheduled %d
\n
"
,
module_idP
,
frameP
,
subframeP
,
total_ue_count
[
CC_id
]);
module_idP
,
frameP
,
subframeP
,
total_ue_count
[
CC_id
]);
}
}
}
// step 3: assigne RBS
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
if
(
continueTable
[
UE_id
])
continue
;
//
if (continueTable[UE_id]) continue;
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
// This is the actual CC_id in the list
...
...
@@ -1735,16 +1728,18 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
total_allocated_rbs
[
CC_id
]
+=
nb_allocated_rbs
[
CC_id
][
UE_id
];
LOG_D
(
MAC
,
"In ulsch_preprocessor: assigning %d RBs for UE %d/%x CCid %d, harq_pid %d
\n
"
,
nb_allocated_rbs
[
CC_id
][
UE_id
],
UE_id
,
rntiTable
[
UE_id
],
CC_id
,
LOG_D
(
MAC
,
"In ulsch_preprocessor: assigning %d RBs for UE %d/%x CCid %d, harq_pid %d
\n
"
,
nb_allocated_rbs
[
CC_id
][
UE_id
],
UE_id
,
rntiTable
[
UE_id
],
CC_id
,
harq_pid
);
}
}
// step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
if
(
continueTable
[
UE_id
])
continue
;
//
if (continueTable[UE_id]) continue;
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
...
...
@@ -1761,9 +1756,9 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
total_remaining_rbs
[
CC_id
]
++
;
}
while
(
(
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
]
>
0
)
&&
(
nb_allocated_rbs
[
CC_id
][
UE_id
]
<
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
])
&&
(
total_remaining_rbs
[
CC_id
]
>
0
)
)
{
while
(
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
]
>
0
&&
nb_allocated_rbs
[
CC_id
][
UE_id
]
<
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
]
&&
total_remaining_rbs
[
CC_id
]
>
0
)
{
nb_allocated_rbs
[
CC_id
][
UE_id
]
=
cmin
(
nb_allocated_rbs
[
CC_id
][
UE_id
]
+
1
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
]);
total_remaining_rbs
[
CC_id
]
--
;
total_allocated_rbs
[
CC_id
]
++
;
...
...
@@ -1771,9 +1766,13 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
]
=
nb_allocated_rbs
[
CC_id
][
UE_id
];
LOG_D
(
MAC
,
"******************UL Scheduling Information for UE%d CC_id %d ************************
\n
"
,
UE_id
,
CC_id
);
UE_id
,
CC_id
);
LOG_D
(
MAC
,
"[eNB %d] total RB allocated for UE%d CC_id %d = %d
\n
"
,
module_idP
,
UE_id
,
CC_id
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
]);
module_idP
,
UE_id
,
CC_id
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
]);
}
}
...
...
@@ -1781,38 +1780,26 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP,
}
void
assign_max_mcs_min_rb
(
module_id_t
module_idP
,
int
slice_idx
,
int
frameP
,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
)
{
assign_max_mcs_min_rb
(
module_id_t
module_idP
,
int
slice_idx
,
int
frameP
,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
)
{
int
i
;
uint16_t
n
,
UE_id
;
uint8_t
CC_id
;
rnti_t
rnti
=
-
1
;
int
mcs
;
int
rb_table_index
=
0
,
tbs
,
tx_power
;
eNB_MAC_INST
*
eNB
=
RC
.
mac
[
module_idP
];
UE_list_t
*
UE_list
=
&
eNB
->
UE_list
;
slice_info_t
*
sli
=
&
RC
.
mac
[
module_idP
]
->
slice_info
;
slice_info_t
*
sli
=
&
eNB
->
slice_info
;
UE_TEMPLATE
*
UE_template
;
UE_sched_ctrl
*
ue_sched_ctl
;
int
Ncp
;
int
N_RB_UL
;
int
first_rb_offset
,
available_rbs
;
for
(
i
=
0
;
i
<
MAX_MOBILES_PER_ENB
;
i
++
)
{
if
(
UE_list
->
active
[
i
]
!=
TRUE
)
continue
;
rnti
=
UE_RNTI
(
module_idP
,
i
);
if
(
rnti
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_ul_slice_membership
(
module_idP
,
i
,
slice_idx
))
continue
;
for
(
i
=
UE_list
->
head_ul
;
i
>=
0
;
i
=
UE_list
->
next_ul
[
i
])
{
if
(
UE_list
->
UE_sched_ctrl
[
i
].
phr_received
==
1
)
{
/* if we've received the power headroom information the UE, we can go to
* maximum mcs */
...
...
@@ -1827,21 +1814,21 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
AssertFatal
(
CC_id
<
RC
.
nb_mac_CC
[
module_idP
],
"CC_id %u should be < %u, loop n=%u < numactiveULCCs[%u]=%u"
,
CC_id
,
NFAPI_CC_MAX
,
n
,
UE_id
,
AssertFatal
(
CC_id
<
RC
.
nb_mac_CC
[
module_idP
],
"CC_id %u should be < %u, loop n=%u < numactiveULCCs[%u]=%u"
,
CC_id
,
NFAPI_CC_MAX
,
n
,
UE_id
,
UE_list
->
numactiveULCCs
[
UE_id
]);
UE_template
=
&
UE_list
->
UE_template
[
CC_id
][
UE_id
];
UE_template
->
pre_assigned_mcs_ul
=
mcs
;
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
Ncp
=
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
].
Ncp
;
N_RB_UL
=
to_prb
(
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
].
ul_Bandwidth
);
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_idx
]
=
nb_rbs_allowed_slice
(
sli
->
ul
[
slice_idx
].
pct
,
N_RB_UL
);
int
bytes_to_schedule
=
UE_template
->
estimated_ul_buffer
-
UE_template
->
scheduled_ul_bytes
;
Ncp
=
eNB
->
common_channels
[
CC_id
].
Ncp
;
N_RB_UL
=
to_prb
(
eNB
->
common_channels
[
CC_id
].
ul_Bandwidth
);
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_idx
]
=
nb_rbs_allowed_slice
(
sli
->
ul
[
slice_idx
].
pct
,
N_RB_UL
);
int
bytes_to_schedule
=
UE_template
->
estimated_ul_buffer
-
UE_template
->
scheduled_ul_bytes
;
if
(
bytes_to_schedule
<
0
)
bytes_to_schedule
=
0
;
int
bits_to_schedule
=
bytes_to_schedule
*
8
;
// if this UE has UL traffic
...
...
@@ -1851,9 +1838,7 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
// fixme: set use_srs flag
tx_power
=
estimate_ue_tx_power
(
tbs
,
rb_table
[
rb_table_index
],
0
,
Ncp
,
0
);
while
((((
UE_template
->
phr_info
-
tx_power
)
<
0
)
||
(
tbs
>
bits_to_schedule
))
&&
(
UE_template
->
pre_assigned_mcs_ul
>
3
))
{
while
((
UE_template
->
phr_info
-
tx_power
<
0
||
tbs
>
bits_to_schedule
)
&&
UE_template
->
pre_assigned_mcs_ul
>
3
)
{
// LOG_I(MAC,"UE_template->phr_info %d tx_power %d mcs %d\n", UE_template->phr_info,tx_power, mcs);
UE_template
->
pre_assigned_mcs_ul
--
;
tbs
=
get_TBS_UL
(
UE_template
->
pre_assigned_mcs_ul
,
rb_table
[
rb_table_index
])
<<
3
;
...
...
@@ -1861,13 +1846,13 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
}
first_rb_offset
=
UE_list
->
first_rb_offset
[
CC_id
][
slice_idx
];
available_rbs
=
cmin
(
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_idx
],
N_RB_UL
-
first_rb
[
CC_id
]
-
first_rb_offset
);
available_rbs
=
cmin
(
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_idx
],
N_RB_UL
-
first_rb
[
CC_id
]
-
first_rb_offset
);
while
(
(
tbs
<
bits_to_schedule
)
&&
(
rb_table
[
rb_table_index
]
<
available_rbs
)
&&
((
UE_template
->
phr_info
-
tx_power
)
>
0
)
&&
(
rb_table_index
<
32
)
)
{
while
(
tbs
<
bits_to_schedule
&&
rb_table
[
rb_table_index
]
<
available_rbs
&&
UE_template
->
phr_info
-
tx_power
>
0
&&
rb_table_index
<
32
)
{
rb_table_index
++
;
tbs
=
get_TBS_UL
(
UE_template
->
pre_assigned_mcs_ul
,
rb_table
[
rb_table_index
])
<<
3
;
tx_power
=
estimate_ue_tx_power
(
tbs
,
rb_table
[
rb_table_index
],
0
,
Ncp
,
0
);
...
...
@@ -1884,9 +1869,12 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP,
UE_template
->
pre_allocated_rb_table_index_ul
=
rb_table_index
;
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
]
=
rb_table
[
rb_table_index
];
LOG_D
(
MAC
,
"[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)
\n
"
,
module_idP
,
frameP
,
subframeP
,
UE_id
,
CC_id
,
LOG_D
(
MAC
,
"[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)
\n
"
,
module_idP
,
frameP
,
subframeP
,
UE_id
,
CC_id
,
UE_template
->
pre_assigned_mcs_ul
,
UE_template
->
pre_allocated_rb_table_index_ul
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_idx
],
...
...
@@ -1968,37 +1956,39 @@ static int ue_ul_compare(const void *_a, const void *_b, void *_params) {
return
0
;
}
void
sort_ue_ul
(
module_id_t
module_idP
,
int
frameP
,
sub_frame_t
subframeP
)
{
void
sort_ue_ul
(
module_id_t
module_idP
,
int
slice_idx
,
int
frameP
,
sub_frame_t
subframeP
,
rnti_t
*
rntiTable
)
{
int
i
;
int
list
[
MAX_MOBILES_PER_ENB
];
int
list_size
=
0
;
int
rnti
;
struct
sort_ue_ul_params
params
=
{
module_idP
,
frameP
,
subframeP
};
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
for
(
i
=
0
;
i
<
MAX_MOBILES_PER_ENB
;
i
++
)
{
if
(
UE_list
->
active
[
i
]
==
FALSE
)
continue
;
if
((
rnti
=
UE_RNTI
(
module_idP
,
i
))
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
list
[
list_size
]
=
i
;
list_size
++
;
rntiTable
[
i
]
=
UE_RNTI
(
module_idP
,
i
);
// Valid element and is not the actual CC_id in the list
if
(
UE_list
->
active
[
i
]
==
TRUE
&&
rntiTable
[
i
]
!=
NOT_A_RNTI
&&
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
!=
1
&&
ue_ul_slice_membership
(
module_idP
,
i
,
slice_idx
))
{
list
[
list_size
++
]
=
i
;
// Add to list
}
}
qsort_r
(
list
,
list_size
,
sizeof
(
int
),
ue_ul_compare
,
&
params
);
if
(
list_size
)
{
if
(
list_size
)
{
// At mimimum one list element
for
(
i
=
0
;
i
<
list_size
-
1
;
i
++
)
UE_list
->
next_ul
[
list
[
i
]]
=
list
[
i
+
1
];
UE_list
->
next_ul
[
list
[
list_size
-
1
]]
=
-
1
;
UE_list
->
head_ul
=
list
[
0
];
}
else
{
}
else
{
// No element
UE_list
->
head_ul
=
-
1
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment