Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
常顺宇
OpenXG-RAN
Commits
0773229b
Commit
0773229b
authored
Dec 23, 2020
by
Robert Schmidt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Implement UL HARQ using NR_list_t
parent
a0d47c50
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
72 additions
and
67 deletions
+72
-67
doc/SW_archi.md
doc/SW_archi.md
+1
-1
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_phytest.c
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_phytest.c
+2
-0
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c
+62
-64
openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
+7
-2
No files found.
doc/SW_archi.md
View file @
0773229b
...
...
@@ -186,7 +186,7 @@ development], for FR2 does not exist yet.
3) allocate a CCE for the UE (and return if it is not possible)
4) Calculate DMRS stuff (nr_save_pusch_fields()) and the TBS.
5) Mark used resources in vrb_map_UL.
*
loop through all users: get a free HARQ PID
using select_ul_harq_pid()
and
*
loop through all users: get a free HARQ PID and
update statistics. Fill nFAPI structures directly for PUSCH, and call
config_uldci() and fill_dci_pdu_rel15() for DCI filling and PDCCH messages.
...
...
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_phytest.c
View file @
0773229b
...
...
@@ -477,6 +477,8 @@ void nr_ul_preprocessor_phytest(module_id_t module_id,
sched_pusch
->
mcs
=
mcs
;
sched_pusch
->
rbStart
=
rbStart
;
sched_pusch
->
rbSize
=
rbSize
;
/* get the PID of a HARQ process awaiting retransmission, or -1 for "any new" */
sched_pusch
->
ul_harq_pid
=
sched_ctrl
->
retrans_ul_harq
.
head
;
/* Calculate TBS from MCS */
sched_pusch
->
R
=
nr_get_code_rate_ul
(
mcs
,
ps
->
mcs_table
);
...
...
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c
View file @
0773229b
...
...
@@ -259,45 +259,47 @@ void handle_nr_ul_harq(module_id_t mod_id,
NR_UE_info_t
*
UE_info
=
&
RC
.
nrmac
[
mod_id
]
->
UE_info
;
NR_UE_sched_ctrl_t
*
sched_ctrl
=
&
UE_info
->
UE_sched_ctrl
[
UE_id
];
int
max_harq_rounds
=
4
;
// TODO define macro
uint8_t
hrq_id
=
crc_pdu
->
harq_id
;
NR_UE_ul_harq_t
*
cur_harq
=
&
sched_ctrl
->
ul_harq_processes
[
hrq_id
];
if
(
cur_harq
->
state
==
ACTIVE_SCHED
)
{
const
int8_t
harq_pid
=
sched_ctrl
->
feedback_ul_harq
.
head
;
if
(
crc_pdu
->
harq_id
!=
harq_pid
&&
harq_pid
<
0
)
{
LOG_W
(
MAC
,
"Unexpected ULSCH HARQ PID %d (have %d) for RNTI %04x (ignore this warning for RA)
\n
"
,
crc_pdu
->
harq_id
,
harq_pid
,
crc_pdu
->
rnti
);
return
;
}
DevAssert
(
harq_pid
==
crc_pdu
->
harq_id
);
remove_front_nr_list
(
&
sched_ctrl
->
feedback_ul_harq
);
NR_UE_ul_harq_t
*
harq
=
&
sched_ctrl
->
ul_harq_processes
[
harq_pid
];
DevAssert
(
harq
->
feedback_slot
==
slot
-
1
);
DevAssert
(
harq
->
is_waiting
);
harq
->
feedback_slot
=
-
1
;
harq
->
is_waiting
=
false
;
if
(
!
crc_pdu
->
tb_crc_status
)
{
cur_harq
->
ndi
^=
1
;
cur_harq
->
round
=
0
;
cur_harq
->
state
=
INACTIVE
;
// passed -> make inactive. can be used by scheduder for next grant
harq
->
ndi
^=
1
;
harq
->
round
=
0
;
LOG_D
(
MAC
,
"Ulharq id %d crc passed for RNTI %04x
\n
"
,
hrq_id
,
crc_pdu
->
rnti
);
}
else
{
cur_harq
->
round
++
;
cur_harq
->
state
=
ACTIVE_NOT_SCHED
;
LOG_D
(
MAC
,
"Ulharq id %d crc failed for RNTI %04x
\n
"
,
hrq_id
,
harq_pid
,
crc_pdu
->
rnti
);
}
if
(
!
(
cur_harq
->
round
<
max_harq_rounds
))
{
cur_harq
->
ndi
^=
1
;
cur_harq
->
state
=
INACTIVE
;
// failed after 4 rounds -> make inactive
cur_harq
->
round
=
0
;
add_tail_nr_list
(
&
sched_ctrl
->
available_ul_harq
,
harq_pid
);
}
else
if
(
harq
->
round
==
MAX_HARQ_ROUNDS
)
{
harq
->
ndi
^=
1
;
harq
->
round
=
0
;
LOG_D
(
MAC
,
"RNTI %04x: Ulharq id %d crc failed in all rounds
\n
"
,
crc_pdu
->
rnti
,
hrq_
id
);
harq_p
id
);
UE_info
->
mac_stats
[
UE_id
].
ulsch_errors
++
;
}
return
;
}
else
LOG_W
(
MAC
,
"Incorrect ULSCH HARQ PID %d or invalid state %d for RNTI %04x "
"(ignore this warning for RA)
\n
"
,
hrq_id
,
cur_harq
->
state
,
add_tail_nr_list
(
&
sched_ctrl
->
available_ul_harq
,
harq_pid
);
}
else
{
harq
->
round
++
;
LOG_D
(
MAC
,
"Ulharq id %d crc failed for RNTI %04x
\n
"
,
harq_pid
,
crc_pdu
->
rnti
);
add_tail_nr_list
(
&
sched_ctrl
->
retrans_ul_harq
,
harq_pid
);
}
}
/*
...
...
@@ -432,29 +434,6 @@ long get_K2(NR_BWP_Uplink_t *ubwp, int time_domain_assignment, int mu) {
return
3
;
}
int8_t
select_ul_harq_pid
(
NR_UE_sched_ctrl_t
*
sched_ctrl
)
{
const
uint8_t
max_ul_harq_pids
=
3
;
// temp: for testing
// schedule active harq processes
for
(
uint8_t
hrq_id
=
0
;
hrq_id
<
max_ul_harq_pids
;
hrq_id
++
)
{
NR_UE_ul_harq_t
*
cur_harq
=
&
sched_ctrl
->
ul_harq_processes
[
hrq_id
];
if
(
cur_harq
->
state
==
ACTIVE_NOT_SCHED
)
{
LOG_D
(
MAC
,
"Found ulharq id %d, scheduling it for retransmission
\n
"
,
hrq_id
);
return
hrq_id
;
}
}
// schedule new harq processes
for
(
uint8_t
hrq_id
=
0
;
hrq_id
<
max_ul_harq_pids
;
hrq_id
++
)
{
NR_UE_ul_harq_t
*
cur_harq
=
&
sched_ctrl
->
ul_harq_processes
[
hrq_id
];
if
(
cur_harq
->
state
==
INACTIVE
)
{
LOG_D
(
MAC
,
"Found new ulharq id %d, scheduling it
\n
"
,
hrq_id
);
return
hrq_id
;
}
}
LOG_E
(
MAC
,
"All UL HARQ processes are busy. Cannot schedule ULSCH
\n
"
);
return
-
1
;
}
void
nr_simple_ulsch_preprocessor
(
module_id_t
module_id
,
frame_t
frame
,
sub_frame_t
slot
,
...
...
@@ -502,6 +481,8 @@ void nr_simple_ulsch_preprocessor(module_id_t module_id,
sched_ctrl
->
sched_pusch
.
slot
=
sched_slot
;
sched_ctrl
->
sched_pusch
.
frame
=
sched_frame
;
/* get the PID of a HARQ process awaiting retransmission, or -1 otherwise */
sched_ctrl
->
sched_pusch
.
ul_harq_pid
=
sched_ctrl
->
retrans_ul_harq
.
head
;
const
int
target_ss
=
NR_SearchSpace__searchSpaceType_PR_ue_Specific
;
sched_ctrl
->
search_space
=
get_searchspace
(
sched_ctrl
->
active_bwp
,
target_ss
);
...
...
@@ -605,11 +586,28 @@ void nr_schedule_ulsch(module_id_t module_id,
uint16_t
rnti
=
UE_info
->
rnti
[
UE_id
];
int8_t
harq_id
=
select_ul_harq_pid
(
sched_ctrl
);
if
(
harq_id
<
0
)
return
;
int8_t
harq_id
=
sched_pusch
->
ul_harq_pid
;
if
(
harq_id
<
0
)
{
/* PP has not selected a specific HARQ Process, get a new one */
harq_id
=
sched_ctrl
->
available_ul_harq
.
head
;
AssertFatal
(
harq_id
>=
0
,
"no free HARQ process available for UE %d
\n
"
,
UE_id
);
remove_front_nr_list
(
&
sched_ctrl
->
available_ul_harq
);
}
else
{
/* PP selected a specific HARQ process. Check whether it will be a new
* transmission or a retransmission, and remove from the corresponding
* list */
if
(
sched_ctrl
->
ul_harq_processes
[
harq_id
].
round
==
0
)
remove_nr_list
(
&
sched_ctrl
->
available_ul_harq
,
harq_id
);
else
remove_nr_list
(
&
sched_ctrl
->
retrans_ul_harq
,
harq_id
);
}
NR_UE_ul_harq_t
*
cur_harq
=
&
sched_ctrl
->
ul_harq_processes
[
harq_id
];
cur_harq
->
state
=
ACTIVE_SCHED
;
cur_harq
->
last_tx_slot
=
sched_pusch
->
slot
;
DevAssert
(
!
cur_harq
->
is_waiting
);
add_tail_nr_list
(
&
sched_ctrl
->
feedback_ul_harq
,
harq_id
);
cur_harq
->
feedback_slot
=
sched_pusch
->
slot
;
cur_harq
->
is_waiting
=
true
;
int
rnti_types
[
2
]
=
{
NR_RNTI_C
,
0
};
...
...
openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
View file @
0773229b
...
...
@@ -336,6 +336,9 @@ typedef struct NR_sched_pusch {
uint16_t
R
;
uint8_t
Qm
;
uint32_t
tb_size
;
/// UL HARQ PID to use for this UE, or -1 for "any new"
int8_t
ul_harq_pid
;
}
NR_sched_pusch_t
;
typedef
struct
NR_UE_harq
{
...
...
@@ -360,10 +363,12 @@ typedef enum {
}
NR_UL_harq_states_t
;
typedef
struct
NR_UE_ul_harq
{
bool
is_waiting
;
uint8_t
ndi
;
uint8_t
round
;
uint16_t
last_tx_slot
;
NR_UL_harq_states_t
state
;
uint16_t
feedback_slot
;
/* TODO PUSCH of last transmission */
}
NR_UE_ul_harq_t
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment