Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wangwenhui
OpenXG-RAN
Commits
fc4d2fb2
Commit
fc4d2fb2
authored
Mar 27, 2018
by
Niccolò Iardella
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add sorting policy to UL pre-processor
parent
73dbcb1a
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
383 additions
and
262 deletions
+383
-262
openair2/COMMON/platform_types.h
openair2/COMMON/platform_types.h
+8
-0
openair2/LAYER2/MAC/defs.h
openair2/LAYER2/MAC/defs.h
+1
-0
openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
+7
-0
openair2/LAYER2/MAC/eNB_scheduler_ulsch.h
openair2/LAYER2/MAC/eNB_scheduler_ulsch.h
+4
-0
openair2/LAYER2/MAC/pre_processor.c
openair2/LAYER2/MAC/pre_processor.c
+343
-261
openair2/LAYER2/MAC/proto.h
openair2/LAYER2/MAC/proto.h
+20
-1
No files found.
openair2/COMMON/platform_types.h
View file @
fc4d2fb2
...
...
@@ -110,6 +110,14 @@ typedef enum {
CR_NUM
=
6
}
sorting_criterion_t
;
typedef
enum
{
CRU_ROUND
=
0
,
CRU_BUF
=
1
,
CRU_BTS
=
2
,
CRU_MCS
=
3
,
CRU_NUM
=
4
}
sorting_criterion_ul_t
;
typedef
enum
{
POL_FAIR
=
0
,
POL_GREEDY
=
1
,
...
...
openair2/LAYER2/MAC/defs.h
View file @
fc4d2fb2
...
...
@@ -982,6 +982,7 @@ typedef struct {
/// Sorting criteria for the UE list in the MAC preprocessor
uint16_t
sorting_criteria
[
MAX_NUM_SLICES
][
CR_NUM
];
uint16_t
sorting_criteria_ul
[
MAX_NUM_SLICES
][
CR_NUM
];
}
UE_list_t
;
...
...
openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
View file @
fc4d2fb2
...
...
@@ -1009,6 +1009,13 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
}
}
// Check for new sorting policy
if
(
slice_sorting_uplink_current
[
i
]
!=
slice_sorting_uplink
[
i
])
{
LOG_N
(
MAC
,
"[eNB %d][SLICE %d][UL] frame %d subframe %d: UE sorting policy has changed (%x-->%x)
\n
"
,
module_idP
,
i
,
frameP
,
subframeP
,
slice_sorting_uplink_current
[
i
],
slice_sorting_uplink
[
i
]);
slice_sorting_uplink_current
[
i
]
=
slice_sorting_uplink
[
i
];
}
// check if a new scheduler, and log the console
if
(
update_ul_scheduler_current
[
i
]
!=
update_ul_scheduler
[
i
]){
LOG_N
(
MAC
,
"[eNB %d][SLICE %d][UL] frame %d subframe %d: UL scheduler for this slice is updated: %s
\n
"
,
...
...
openair2/LAYER2/MAC/eNB_scheduler_ulsch.h
View file @
fc4d2fb2
...
...
@@ -52,6 +52,10 @@ float total_slice_percentage_current_uplink = 0;
int
slice_maxmcs_uplink
[
MAX_NUM_SLICES
]
=
{
20
,
20
,
20
,
20
};
int
slice_maxmcs_current_uplink
[
MAX_NUM_SLICES
]
=
{
20
,
20
,
20
,
20
};
// The lists of criteria that enforce the sorting policies of the slices
uint32_t
slice_sorting_uplink
[
MAX_NUM_SLICES
]
=
{
0x0123
,
0x0123
,
0x0123
,
0x0123
};
uint32_t
slice_sorting_uplink_current
[
MAX_NUM_SLICES
]
=
{
0x0123
,
0x0123
,
0x0123
,
0x0123
};
/*resource blocks allowed*/
uint16_t
nb_rbs_allowed_slice_uplink
[
MAX_NUM_CCs
][
MAX_NUM_SLICES
];
/*Slice Update */
...
...
openair2/LAYER2/MAC/pre_processor.c
View file @
fc4d2fb2
...
...
@@ -61,6 +61,7 @@ extern float slice_percentage[MAX_NUM_SLICES];
extern
float
slice_percentage_uplink
[
MAX_NUM_SLICES
];
extern
int
slice_position
[
MAX_NUM_SLICES
*
2
];
extern
uint32_t
slice_sorting
[
MAX_NUM_SLICES
];
extern
uint32_t
slice_sorting_uplink
[
MAX_NUM_SLICES
];
extern
int
slice_accounting
[
MAX_NUM_SLICES
];
extern
int
slice_maxmcs
[
MAX_NUM_SLICES
];
extern
int
slice_maxmcs_uplink
[
MAX_NUM_SLICES
];
...
...
@@ -176,6 +177,9 @@ store_dlsch_buffer(module_id_t Mod_id,
}
}
int
cqi2mcs
(
int
cqi
)
{
return
cqi_to_mcs
[
cqi
];
}
// This function returns the estimated number of RBs required by each UE for downlink scheduling
void
...
...
@@ -471,13 +475,32 @@ void decode_sorting_policy(module_id_t Mod_idP, slice_id_t slice_id) {
criterion
=
(
uint16_t
)
(
policy
>>
4
*
(
CR_NUM
-
1
-
i
)
&
mask
);
if
(
criterion
>=
CR_NUM
)
{
LOG_W
(
MAC
,
"Invalid criterion in slice %d policy, revert to default policy
\n
"
,
slice_id
);
slice_sorting
[
slice_id
]
=
0x1234
;
slice_sorting
[
slice_id
]
=
0x1234
5
;
break
;
}
UE_list
->
sorting_criteria
[
slice_id
][
i
]
=
criterion
;
}
}
void
decode_sorting_policy_ul
(
module_id_t
Mod_idP
,
slice_id_t
slice_id
)
{
int
i
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
Mod_idP
]
->
UE_list
;
uint32_t
policy
=
slice_sorting_uplink
[
slice_id
];
uint32_t
mask
=
0x0000000F
;
uint16_t
criterion
;
for
(
i
=
0
;
i
<
CRU_NUM
;
++
i
)
{
criterion
=
(
uint16_t
)
(
policy
>>
4
*
(
CRU_NUM
-
1
-
i
)
&
mask
);
if
(
criterion
>=
CRU_NUM
)
{
LOG_W
(
MAC
,
"Invalid criterion in uplink slice %d policy, revert to default policy
\n
"
,
slice_id
);
slice_sorting_uplink
[
slice_id
]
=
0x123
;
break
;
}
UE_list
->
sorting_criteria_ul
[
slice_id
][
i
]
=
criterion
;
}
}
void
decode_slice_positioning
(
module_id_t
Mod_idP
,
slice_id_t
slice_id
,
uint8_t
slice_allocation_mask
[
MAX_NUM_CCs
][
N_RBG_MAX
])
{
...
...
@@ -1688,19 +1711,22 @@ ulsch_scheduler_pre_processor(module_id_t module_idP,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
)
{
int16_t
i
;
uint16_t
UE_id
,
n
,
r
;
uint8_t
CC_id
,
harq_pid
;
uint16_t
nb_allocated_rbs
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
total_allocated_rbs
[
MAX_NUM_CCs
],
average_rbs_per_user
[
MAX_NUM_CCs
];
int16_t
total_remaining_rbs
[
MAX_NUM_CCs
];
uint16_t
total_ue_count
[
MAX_NUM_CCs
];
rnti_t
rnti
=
-
1
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
UE_TEMPLATE
*
UE_template
=
0
;
UE_sched_ctrl
*
ue_sched_ctl
;
int
N_RB_UL
=
0
;
uint16_t
UE_id
,
n
;
uint8_t
CC_id
;
uint16_t
total_ue_count
[
MAX_NUM_CCs
];
uint16_t
total_allocated_rbs
[
MAX_NUM_CCs
];
uint16_t
nb_allocated_rbs
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
];
UE_TEMPLATE
*
UE_template
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
// Reset
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
total_allocated_rbs
[
CC_id
]
=
0
;
total_ue_count
[
CC_id
]
=
0
;
}
LOG_D
(
MAC
,
"In ulsch_preprocessor: assign max mcs min rb
\n
"
);
// maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
...
...
@@ -1708,178 +1734,45 @@ ulsch_scheduler_pre_processor(module_id_t module_idP,
LOG_D
(
MAC
,
"In ulsch_preprocessor: sort ue
\n
"
);
// sort ues
sort_ue_ul
(
module_idP
,
frameP
,
subframeP
);
sort_ue_ul
(
module_idP
,
slice_id
,
frameP
,
subframeP
);
// we need to distribute RBs among UEs
// step1: reset the vars
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
total_allocated_rbs
[
CC_id
]
=
0
;
total_remaining_rbs
[
CC_id
]
=
0
;
average_rbs_per_user
[
CC_id
]
=
0
;
total_ue_count
[
CC_id
]
=
0
;
}
// Step 1.5: Calculate total_ue_count
for
(
i
=
UE_list
->
head_ul
;
i
>=
0
;
i
=
UE_list
->
next_ul
[
i
])
{
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
i
];
n
++
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
i
];
UE_template
=
&
UE_list
->
UE_template
[
CC_id
][
i
];
if
(
!
ue_slice_membership
(
i
,
slice_id
))
continue
;
if
(
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]
>
0
)
{
total_ue_count
[
CC_id
]
+=
1
;
}
}
}
LOG_D
(
MAC
,
"In ulsch_preprocessor: step2
\n
"
);
// step 2: calculate the average rb per UE
for
(
i
=
UE_list
->
head_ul
;
i
>=
0
;
i
=
UE_list
->
next_ul
[
i
])
{
rnti
=
UE_RNTI
(
module_idP
,
i
);
UE_id
=
i
;
if
(
rnti
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
LOG_D
(
MAC
,
"In ulsch_preprocessor: handling UE %d/%x
\n
"
,
UE_id
,
rnti
);
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
LOG_D
(
MAC
,
"In ulsch_preprocessor: handling UE %d/%x CCid %d
\n
"
,
UE_id
,
rnti
,
CC_id
);
average_rbs_per_user
[
CC_id
]
=
0
;
/*
if((mac_xface->get_nCCE_max(module_idP,CC_id,3,subframeP) - nCCE_to_be_used[CC_id]) > (1<<aggregation)) {
nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation);
max_num_ue_to_be_scheduled+=1;
} */
N_RB_UL
=
to_prb
(
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
].
ul_Bandwidth
);
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_id
]
=
nb_rbs_allowed_slice
(
slice_percentage_uplink
[
slice_id
],
N_RB_UL
);
if
(
total_ue_count
[
CC_id
]
==
0
)
{
average_rbs_per_user
[
CC_id
]
=
0
;
}
else
if
(
total_ue_count
[
CC_id
]
==
1
)
{
// increase the available RBs, special case,
average_rbs_per_user
[
CC_id
]
=
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_id
]
-
first_rb
[
CC_id
]
+
1
;
}
else
if
(
total_ue_count
[
CC_id
]
<=
(
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_id
]
-
first_rb
[
CC_id
]))
{
average_rbs_per_user
[
CC_id
]
=
(
uint16_t
)
floor
((
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_id
]
-
first_rb
[
CC_id
])
/
total_ue_count
[
CC_id
]);
}
else
{
average_rbs_per_user
[
CC_id
]
=
1
;
LOG_W
(
MAC
,
"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)
\n
"
,
module_idP
,
frameP
,
subframeP
,
UE_id
,
CC_id
);
}
if
(
total_ue_count
[
CC_id
]
>
0
)
LOG_D
(
MAC
,
"[eNB %d] Frame %d subframe %d: total ue to be scheduled %d
\n
"
,
module_idP
,
frameP
,
subframeP
,
total_ue_count
[
CC_id
]);
}
}
// step 3: assigne RBS
for
(
i
=
UE_list
->
head_ul
;
i
>=
0
;
i
=
UE_list
->
next_ul
[
i
])
{
rnti
=
UE_RNTI
(
module_idP
,
i
);
if
(
rnti
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
i
,
slice_id
))
continue
;
UE_id
=
i
;
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
UE_template
=
&
UE_list
->
UE_template
[
CC_id
][
UE_id
];
harq_pid
=
subframe2harqpid
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
],
frameP
,
subframeP
);
// mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,openair_harq_UL);
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
round_UL
[
CC_id
]
>
0
)
{
nb_allocated_rbs
[
CC_id
][
UE_id
]
=
UE_list
->
UE_template
[
CC_id
][
UE_id
].
nb_rb_ul
[
harq_pid
];
}
else
{
nb_allocated_rbs
[
CC_id
][
UE_id
]
=
cmin
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
pre_allocated_nb_rb_ul
[
slice_id
],
average_rbs_per_user
[
CC_id
]);
}
total_allocated_rbs
[
CC_id
]
+=
nb_allocated_rbs
[
CC_id
][
UE_id
];
LOG_D
(
MAC
,
"In ulsch_preprocessor: assigning %d RBs for UE %d/%x CCid %d, harq_pid %d
\n
"
,
nb_allocated_rbs
[
CC_id
][
UE_id
],
UE_id
,
rnti
,
CC_id
,
harq_pid
);
}
}
// step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly
for
(
r
=
0
;
r
<
2
;
r
++
)
{
for
(
i
=
UE_list
->
head_ul
;
i
>=
0
;
i
=
UE_list
->
next_ul
[
i
])
{
rnti
=
UE_RNTI
(
module_idP
,
i
);
if
(
rnti
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
i
,
slice_id
))
continue
;
UE_id
=
i
;
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
UE_template
=
&
UE_list
->
UE_template
[
CC_id
][
UE_id
];
total_remaining_rbs
[
CC_id
]
=
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_id
]
-
first_rb
[
CC_id
]
-
total_allocated_rbs
[
CC_id
];
if
(
total_ue_count
[
CC_id
]
==
1
)
{
total_remaining_rbs
[
CC_id
]
+=
1
;
}
ulsch_scheduler_pre_processor_accounting
(
module_idP
,
slice_id
,
frameP
,
subframeP
,
first_rb
,
total_ue_count
,
nb_allocated_rbs
,
total_allocated_rbs
);
ulsch_scheduler_pre_processor_allocation
(
module_idP
,
slice_id
,
frameP
,
subframeP
,
first_rb
,
total_ue_count
,
nb_allocated_rbs
,
total_allocated_rbs
);
// LOG
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
if
(
UE_RNTI
(
module_idP
,
UE_id
)
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
if
(
r
==
0
)
{
while
((
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]
>
0
)
&&
(
nb_allocated_rbs
[
CC_id
][
UE_id
]
<
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
])
&&
(
total_remaining_rbs
[
CC_id
]
>
0
))
{
nb_allocated_rbs
[
CC_id
][
UE_id
]
=
cmin
(
nb_allocated_rbs
[
CC_id
][
UE_id
]
+
1
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]);
total_remaining_rbs
[
CC_id
]
--
;
total_allocated_rbs
[
CC_id
]
++
;
}
}
else
{
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]
=
nb_allocated_rbs
[
CC_id
][
UE_id
];
LOG_D
(
MAC
,
"******************UL Scheduling Information for UE%d CC_id %d ************************
\n
"
,
UE_id
,
CC_id
);
LOG_D
(
MAC
,
"[eNB %d] total RB allocated for UE%d CC_id %d = %d
\n
"
,
module_idP
,
UE_id
,
CC_id
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]);
}
}
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
UE_template
=
&
UE_list
->
UE_template
[
CC_id
][
UE_id
];
LOG_D
(
MAC
,
"******************UL Scheduling Information for UE%d CC_id %d ************************
\n
"
,
UE_id
,
CC_id
);
LOG_D
(
MAC
,
"[eNB %d] total RB allocated for UE%d CC_id %d = %d
\n
"
,
module_idP
,
UE_id
,
CC_id
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]);
}
}
}
...
...
@@ -2018,60 +1911,254 @@ assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
}
}
void
ulsch_scheduler_pre_processor_accounting
(
module_id_t
module_idP
,
slice_id_t
slice_id
,
int
frameP
,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
,
uint16_t
total_ue_count
[
MAX_NUM_CCs
],
uint16_t
nb_allocated_rbs
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
uint16_t
total_allocated_rbs
[
MAX_NUM_CCs
])
{
int
n
;
int
CC_id
,
UE_id
,
N_RB_UL
,
harq_pid
;
uint16_t
available_rbs
;
UE_TEMPLATE
*
UE_template
;
UE_sched_ctrl
*
ue_sched_ctl
;
rnti_t
rnti
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
uint16_t
average_rbs_per_user
[
MAX_NUM_CCs
];
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
average_rbs_per_user
[
CC_id
]
=
0
;
}
// Step 1.5: Calculate total_ue_count
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
++
n
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
UE_template
=
&
UE_list
->
UE_template
[
CC_id
][
UE_id
];
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
if
(
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]
>
0
)
{
total_ue_count
[
CC_id
]
+=
1
;
}
}
}
LOG_D
(
MAC
,
"In ulsch_preprocessor: step2
\n
"
);
// step 2: calculate the average rb per UE
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
if
(
rnti
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
LOG_D
(
MAC
,
"In ulsch_preprocessor: handling UE %d/%x
\n
"
,
UE_id
,
rnti
);
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
++
n
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
LOG_D
(
MAC
,
"In ulsch_preprocessor: handling UE %d/%x CCid %d
\n
"
,
UE_id
,
rnti
,
CC_id
);
average_rbs_per_user
[
CC_id
]
=
0
;
N_RB_UL
=
to_prb
(
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
].
ul_Bandwidth
);
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_id
]
=
nb_rbs_allowed_slice
(
slice_percentage_uplink
[
slice_id
],
N_RB_UL
);
available_rbs
=
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_id
]
-
first_rb
[
CC_id
];
if
(
total_ue_count
[
CC_id
]
==
0
)
{
average_rbs_per_user
[
CC_id
]
=
0
;
}
else
if
(
total_ue_count
[
CC_id
]
==
1
)
{
// increase the available RBs, special case,
average_rbs_per_user
[
CC_id
]
=
(
uint16_t
)
(
available_rbs
+
1
);
}
else
if
(
total_ue_count
[
CC_id
]
<=
available_rbs
)
{
average_rbs_per_user
[
CC_id
]
=
(
uint16_t
)
floor
(
available_rbs
/
total_ue_count
[
CC_id
]);
}
else
{
average_rbs_per_user
[
CC_id
]
=
1
;
LOG_W
(
MAC
,
"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)
\n
"
,
module_idP
,
frameP
,
subframeP
,
UE_id
,
CC_id
);
}
if
(
total_ue_count
[
CC_id
]
>
0
)
LOG_D
(
MAC
,
"[eNB %d] Frame %d subframe %d: total ue to be scheduled %d
\n
"
,
module_idP
,
frameP
,
subframeP
,
total_ue_count
[
CC_id
]);
}
}
// step 3: assign RBS
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
if
(
rnti
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
++
n
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
// UE_template = &UE_list->UE_template[CC_id][UE_id];
harq_pid
=
subframe2harqpid
(
&
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
],
frameP
,
subframeP
);
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
round_UL
[
CC_id
]
>
0
)
{
nb_allocated_rbs
[
CC_id
][
UE_id
]
=
UE_list
->
UE_template
[
CC_id
][
UE_id
].
nb_rb_ul
[
harq_pid
];
}
else
{
nb_allocated_rbs
[
CC_id
][
UE_id
]
=
cmin
(
UE_list
->
UE_template
[
CC_id
][
UE_id
].
pre_allocated_nb_rb_ul
[
slice_id
],
average_rbs_per_user
[
CC_id
]);
}
total_allocated_rbs
[
CC_id
]
+=
nb_allocated_rbs
[
CC_id
][
UE_id
];
LOG_D
(
MAC
,
"In ulsch_preprocessor: assigning %d RBs for UE %d/%x CCid %d, harq_pid %d
\n
"
,
nb_allocated_rbs
[
CC_id
][
UE_id
],
UE_id
,
rnti
,
CC_id
,
harq_pid
);
}
}
}
void
ulsch_scheduler_pre_processor_allocation
(
module_id_t
module_idP
,
slice_id_t
slice_id
,
int
frameP
,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
,
uint16_t
total_ue_count
[
MAX_NUM_CCs
],
uint16_t
nb_allocated_rbs
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
uint16_t
total_allocated_rbs
[
MAX_NUM_CCs
])
{
int
n
;
int
CC_id
,
UE_id
;
uint16_t
available_rbs
;
UE_TEMPLATE
*
UE_template
;
UE_sched_ctrl
*
ue_sched_ctl
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
int16_t
total_remaining_rbs
[
MAX_NUM_CCs
];
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
total_remaining_rbs
[
CC_id
]
=
0
;
}
// step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly
for
(
UE_id
=
UE_list
->
head_ul
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next_ul
[
UE_id
])
{
if
(
UE_RNTI
(
module_idP
,
UE_id
)
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
for
(
n
=
0
;
n
<
UE_list
->
numactiveULCCs
[
UE_id
];
n
++
)
{
// This is the actual CC_id in the list
CC_id
=
UE_list
->
ordered_ULCCids
[
n
][
UE_id
];
UE_template
=
&
UE_list
->
UE_template
[
CC_id
][
UE_id
];
available_rbs
=
ue_sched_ctl
->
max_rbs_allowed_slice_uplink
[
CC_id
][
slice_id
]
-
first_rb
[
CC_id
];
total_remaining_rbs
[
CC_id
]
=
available_rbs
-
total_allocated_rbs
[
CC_id
];
if
(
total_ue_count
[
CC_id
]
==
1
)
{
total_remaining_rbs
[
CC_id
]
+=
1
;
}
while
((
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]
>
0
)
&&
(
nb_allocated_rbs
[
CC_id
][
UE_id
]
<
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
])
&&
(
total_remaining_rbs
[
CC_id
]
>
0
))
{
nb_allocated_rbs
[
CC_id
][
UE_id
]
=
cmin
(
nb_allocated_rbs
[
CC_id
][
UE_id
]
+
1
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]);
total_remaining_rbs
[
CC_id
]
--
;
total_allocated_rbs
[
CC_id
]
++
;
}
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]
=
nb_allocated_rbs
[
CC_id
][
UE_id
];
LOG_D
(
MAC
,
"******************UL Scheduling Information for UE%d CC_id %d ************************
\n
"
,
UE_id
,
CC_id
);
LOG_D
(
MAC
,
"[eNB %d] total RB allocated for UE%d CC_id %d = %d
\n
"
,
module_idP
,
UE_id
,
CC_id
,
UE_template
->
pre_allocated_nb_rb_ul
[
slice_id
]);
}
}
}
struct
sort_ue_ul_params
{
int
module_idP
;
int
frameP
;
int
subframeP
;
int
slice_id
;
};
static
int
ue_ul_compare
(
const
void
*
_a
,
const
void
*
_b
,
void
*
_params
)
{
struct
sort_ue_ul_params
*
params
=
_params
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
params
->
module_idP
]
->
UE_list
;
int
UE_id1
=
*
(
const
int
*
)
_a
;
int
UE_id2
=
*
(
const
int
*
)
_b
;
int
rnti1
=
UE_RNTI
(
params
->
module_idP
,
UE_id1
);
int
pCCid1
=
UE_PCCID
(
params
->
module_idP
,
UE_id1
);
int
round1
=
maxround
(
params
->
module_idP
,
rnti1
,
params
->
frameP
,
params
->
subframeP
,
1
);
int
rnti2
=
UE_RNTI
(
params
->
module_idP
,
UE_id2
);
int
pCCid2
=
UE_PCCID
(
params
->
module_idP
,
UE_id2
);
int
round2
=
maxround
(
params
->
module_idP
,
rnti2
,
params
->
frameP
,
params
->
subframeP
,
1
);
if
(
round1
>
round2
)
return
-
1
;
if
(
round1
<
round2
)
return
1
;
if
(
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
ul_buffer_info
[
LCGID0
]
>
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
ul_buffer_info
[
LCGID0
])
return
-
1
;
if
(
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
ul_buffer_info
[
LCGID0
]
<
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
ul_buffer_info
[
LCGID0
])
return
1
;
int
bytes_to_schedule1
=
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
estimated_ul_buffer
-
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
scheduled_ul_bytes
;
if
(
bytes_to_schedule1
<
0
)
bytes_to_schedule1
=
0
;
int
bytes_to_schedule2
=
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
estimated_ul_buffer
-
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
scheduled_ul_bytes
;
if
(
bytes_to_schedule2
<
0
)
bytes_to_schedule2
=
0
;
if
(
bytes_to_schedule1
>
bytes_to_schedule2
)
return
-
1
;
if
(
bytes_to_schedule1
<
bytes_to_schedule2
)
return
1
;
if
(
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
pre_assigned_mcs_ul
>
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
pre_assigned_mcs_ul
)
return
-
1
;
if
(
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
pre_assigned_mcs_ul
<
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
pre_assigned_mcs_ul
)
return
1
;
static
int
ue_ul_compare
(
const
void
*
_a
,
const
void
*
_b
,
void
*
_params
)
{
struct
sort_ue_ul_params
*
params
=
_params
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
params
->
module_idP
]
->
UE_list
;
return
0
;
int
i
;
int
slice_id
=
params
->
slice_id
;
int
UE_id1
=
*
(
const
int
*
)
_a
;
int
UE_id2
=
*
(
const
int
*
)
_b
;
int
rnti1
=
UE_RNTI
(
params
->
module_idP
,
UE_id1
);
int
pCCid1
=
UE_PCCID
(
params
->
module_idP
,
UE_id1
);
int
round1
=
maxround
(
params
->
module_idP
,
rnti1
,
params
->
frameP
,
params
->
subframeP
,
1
);
int
rnti2
=
UE_RNTI
(
params
->
module_idP
,
UE_id2
);
int
pCCid2
=
UE_PCCID
(
params
->
module_idP
,
UE_id2
);
int
round2
=
maxround
(
params
->
module_idP
,
rnti2
,
params
->
frameP
,
params
->
subframeP
,
1
);
int
bytes_to_schedule1
=
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
estimated_ul_buffer
-
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
scheduled_ul_bytes
;
int
bytes_to_schedule2
=
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
estimated_ul_buffer
-
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
scheduled_ul_bytes
;
if
(
bytes_to_schedule1
<
0
)
bytes_to_schedule1
=
0
;
if
(
bytes_to_schedule2
<
0
)
bytes_to_schedule2
=
0
;
for
(
i
=
0
;
i
<
CRU_NUM
;
++
i
)
{
switch
(
UE_list
->
sorting_criteria_ul
[
slice_id
][
i
])
{
case
CRU_ROUND
:
if
(
round1
>
round2
)
return
-
1
;
if
(
round1
<
round2
)
return
1
;
break
;
case
CRU_BUF
:
if
(
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
ul_buffer_info
[
LCGID0
]
>
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
ul_buffer_info
[
LCGID0
])
return
-
1
;
if
(
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
ul_buffer_info
[
LCGID0
]
<
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
ul_buffer_info
[
LCGID0
])
return
1
;
break
;
case
CRU_BTS
:
if
(
bytes_to_schedule1
>
bytes_to_schedule2
)
return
-
1
;
if
(
bytes_to_schedule1
<
bytes_to_schedule2
)
return
1
;
break
;
case
CRU_MCS
:
if
(
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
pre_assigned_mcs_ul
>
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
pre_assigned_mcs_ul
)
return
-
1
;
if
(
UE_list
->
UE_template
[
pCCid1
][
UE_id1
].
pre_assigned_mcs_ul
<
UE_list
->
UE_template
[
pCCid2
][
UE_id2
].
pre_assigned_mcs_ul
)
return
1
;
break
;
default:
break
;
}
}
return
0
;
#if 0
/* The above order derives from the following.
...
...
@@ -2099,38 +2186,37 @@ static int ue_ul_compare(const void *_a, const void *_b, void *_params)
#endif
}
void
sort_ue_ul
(
module_id_t
module_idP
,
int
frameP
,
sub_frame_t
subframeP
)
{
int
i
;
int
list
[
NUMBER_OF_UE_MAX
];
int
list_size
=
0
;
int
rnti
;
struct
sort_ue_ul_params
params
=
{
module_idP
,
frameP
,
subframeP
};
void
sort_ue_ul
(
module_id_t
module_idP
,
slice_id_t
slice_id
,
int
frameP
,
sub_frame_t
subframeP
)
{
int
i
;
int
list
[
NUMBER_OF_UE_MAX
];
int
list_size
=
0
;
struct
sort_ue_ul_params
params
=
{
module_idP
,
frameP
,
subframeP
,
slice_id
};
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
for
(
i
=
0
;
i
<
NUMBER_OF_UE_MAX
;
i
++
)
{
if
(
UE_list
->
active
[
i
]
==
FALSE
)
continue
;
if
((
rnti
=
UE_RNTI
(
module_idP
,
i
))
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
for
(
i
=
0
;
i
<
NUMBER_OF_UE_MAX
;
i
++
)
{
if
(
UE_list
->
active
[
i
]
==
FALSE
)
continue
;
if
(
UE_RNTI
(
module_idP
,
i
)
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
i
,
slice_id
))
continue
;
list
[
list_size
]
=
i
;
list_size
++
;
}
list
[
list_size
]
=
i
;
list_size
++
;
}
qsort_r
(
list
,
list_size
,
sizeof
(
int
),
ue_ul_compare
,
&
params
);
decode_sorting_policy_ul
(
module_idP
,
slice_id
);
if
(
list_size
)
{
for
(
i
=
0
;
i
<
list_size
-
1
;
i
++
)
UE_list
->
next_ul
[
list
[
i
]]
=
list
[
i
+
1
];
UE_list
->
next_ul
[
list
[
list_size
-
1
]]
=
-
1
;
UE_list
->
head_ul
=
list
[
0
];
}
else
{
UE_list
->
head_ul
=
-
1
;
}
qsort_r
(
list
,
list_size
,
sizeof
(
int
),
ue_ul_compare
,
&
params
);
if
(
list_size
)
{
for
(
i
=
0
;
i
<
list_size
-
1
;
i
++
)
UE_list
->
next_ul
[
list
[
i
]]
=
list
[
i
+
1
];
UE_list
->
next_ul
[
list
[
list_size
-
1
]]
=
-
1
;
UE_list
->
head_ul
=
list
[
0
];
}
else
{
UE_list
->
head_ul
=
-
1
;
}
#if 0
int UE_id1, UE_id2;
...
...
@@ -2199,7 +2285,3 @@ void sort_ue_ul(module_id_t module_idP, int frameP, sub_frame_t subframeP)
}
#endif
}
int
cqi2mcs
(
int
cqi
)
{
return
cqi_to_mcs
[
cqi
];
}
\ No newline at end of file
openair2/LAYER2/MAC/proto.h
View file @
fc4d2fb2
...
...
@@ -691,9 +691,28 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, slice_id_t slice_id,
uint16_t
*
first_rb
);
void
store_ulsch_buffer
(
module_id_t
module_idP
,
int
frameP
,
sub_frame_t
subframeP
);
void
sort_ue_ul
(
module_id_t
module_idP
,
int
frameP
,
sub_frame_t
subframeP
);
void
sort_ue_ul
(
module_id_t
module_idP
,
slice_id_t
slice_id
,
int
frameP
,
sub_frame_t
subframeP
);
void
assign_max_mcs_min_rb
(
module_id_t
module_idP
,
int
slice_id
,
int
frameP
,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
);
void
ulsch_scheduler_pre_processor_accounting
(
module_id_t
module_idP
,
slice_id_t
slice_id
,
int
frameP
,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
,
uint16_t
total_ue_count
[
MAX_NUM_CCs
],
uint16_t
nb_allocated_rbs
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
uint16_t
total_allocated_rbs
[
MAX_NUM_CCs
]);
void
ulsch_scheduler_pre_processor_allocation
(
module_id_t
module_idP
,
slice_id_t
slice_id
,
int
frameP
,
sub_frame_t
subframeP
,
uint16_t
*
first_rb
,
uint16_t
total_ue_count
[
MAX_NUM_CCs
],
uint16_t
nb_allocated_rbs
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
uint16_t
total_allocated_rbs
[
MAX_NUM_CCs
]);
void
adjust_bsr_info
(
int
buffer_occupancy
,
uint16_t
TBS
,
UE_TEMPLATE
*
UE_template
);
int
phy_stats_exist
(
module_id_t
Mod_id
,
int
rnti
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment