Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG UE
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
OpenXG
OpenXG UE
Commits
6dd0e0cc
Commit
6dd0e0cc
authored
Sep 04, 2020
by
Sakthivel Velumani
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fixed dlsch_errors stats
parent
cb1d714b
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
67 additions
and
61 deletions
+67
-61
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c
+65
-0
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c
+0
-59
openair2/LAYER2/NR_MAC_gNB/mac_proto.h
openair2/LAYER2/NR_MAC_gNB/mac_proto.h
+1
-1
openair2/NR_PHY_INTERFACE/NR_IF_Module.c
openair2/NR_PHY_INTERFACE/NR_IF_Module.c
+1
-1
No files found.
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c
View file @
6dd0e0cc
...
...
@@ -38,6 +38,7 @@
#include "NR_MAC_gNB/nr_mac_gNB.h"
#include "NR_MAC_COMMON/nr_mac_extern.h"
#include "LAYER2/MAC/mac.h"
#include "LAYER2/NR_MAC_gNB/mac_proto.h"
/*NFAPI*/
#include "nfapi_nr_interface.h"
...
...
@@ -347,6 +348,70 @@ int nr_generate_dlsch_pdu(module_id_t module_idP,
return
offset
;
}
void
handle_nr_uci
(
NR_UL_IND_t
*
UL_info
,
NR_UE_sched_ctrl_t
*
sched_ctrl
,
NR_mac_stats_t
*
stats
,
int
target_snrx10
)
{
// TODO
int
max_harq_rounds
=
4
;
// TODO define macro
int
num_ucis
=
UL_info
->
uci_ind
.
num_ucis
;
nfapi_nr_uci_t
*
uci_list
=
UL_info
->
uci_ind
.
uci_list
;
for
(
int
i
=
0
;
i
<
num_ucis
;
i
++
)
{
switch
(
uci_list
[
i
].
pdu_type
)
{
case
NFAPI_NR_UCI_PDCCH_PDU_TYPE
:
break
;
case
NFAPI_NR_UCI_FORMAT_0_1_PDU_TYPE
:
{
//if (get_softmodem_params()->phy_test == 0) {
nfapi_nr_uci_pucch_pdu_format_0_1_t
*
uci_pdu
=
&
uci_list
[
i
].
pucch_pdu_format_0_1
;
// handle harq
int
harq_idx_s
=
0
;
// tpc (power control)
sched_ctrl
->
tpc1
=
nr_get_tpc
(
target_snrx10
,
uci_pdu
->
ul_cqi
,
30
);
// iterate over received harq bits
for
(
int
harq_bit
=
0
;
harq_bit
<
uci_pdu
->
harq
->
num_harq
;
harq_bit
++
)
{
// search for the right harq process
for
(
int
harq_idx
=
harq_idx_s
;
harq_idx
<
NR_MAX_NB_HARQ_PROCESSES
;
harq_idx
++
)
{
// if the gNB received ack with a good confidence
if
((
UL_info
->
slot
-
1
)
==
sched_ctrl
->
harq_processes
[
harq_idx
].
feedback_slot
)
{
if
((
uci_pdu
->
harq
->
harq_list
[
harq_bit
].
harq_value
==
1
)
&&
(
uci_pdu
->
harq
->
harq_confidence_level
==
0
))
{
// toggle NDI and reset round
sched_ctrl
->
harq_processes
[
harq_idx
].
ndi
^=
1
;
sched_ctrl
->
harq_processes
[
harq_idx
].
round
=
0
;
}
else
sched_ctrl
->
harq_processes
[
harq_idx
].
round
++
;
sched_ctrl
->
harq_processes
[
harq_idx
].
is_waiting
=
0
;
harq_idx_s
=
harq_idx
+
1
;
// if the max harq rounds was reached
if
(
sched_ctrl
->
harq_processes
[
harq_idx
].
round
==
max_harq_rounds
)
{
sched_ctrl
->
harq_processes
[
harq_idx
].
ndi
^=
1
;
sched_ctrl
->
harq_processes
[
harq_idx
].
round
=
0
;
stats
->
dlsch_errors
++
;
}
break
;
}
// if feedback slot processing is aborted
else
if
(((
UL_info
->
slot
-
1
)
>
sched_ctrl
->
harq_processes
[
harq_idx
].
feedback_slot
)
&&
(
sched_ctrl
->
harq_processes
[
harq_idx
].
is_waiting
))
{
sched_ctrl
->
harq_processes
[
harq_idx
].
round
++
;
if
(
sched_ctrl
->
harq_processes
[
harq_idx
].
round
==
max_harq_rounds
)
{
sched_ctrl
->
harq_processes
[
harq_idx
].
ndi
^=
1
;
sched_ctrl
->
harq_processes
[
harq_idx
].
round
=
0
;
}
sched_ctrl
->
harq_processes
[
harq_idx
].
is_waiting
=
0
;
}
}
}
//}
break
;
}
case
NFAPI_NR_UCI_FORMAT_2_3_4_PDU_TYPE
:
break
;
}
}
UL_info
->
uci_ind
.
num_ucis
=
0
;
}
/* functionalities of this function have been moved to nr_schedule_uss_dlsch_phytest */
void
nr_schedule_ue_spec
(
module_id_t
module_idP
,
frame_t
frameP
,
sub_frame_t
slotP
)
{
}
openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c
View file @
6dd0e0cc
...
...
@@ -298,65 +298,6 @@ void handle_nr_ul_harq(uint16_t slot, NR_UE_sched_ctrl_t *sched_ctrl, NR_mac_sta
LOG_E
(
MAC
,
"Incorrect ULSCH HARQ process %d or invalid state %d
\n
"
,
hrq_id
,
cur_harq
->
state
);
}
void
handle_nr_uci
(
NR_UL_IND_t
*
UL_info
,
NR_UE_sched_ctrl_t
*
sched_ctrl
,
int
target_snrx10
)
{
// TODO
int
max_harq_rounds
=
4
;
// TODO define macro
int
num_ucis
=
UL_info
->
uci_ind
.
num_ucis
;
nfapi_nr_uci_t
*
uci_list
=
UL_info
->
uci_ind
.
uci_list
;
for
(
int
i
=
0
;
i
<
num_ucis
;
i
++
)
{
switch
(
uci_list
[
i
].
pdu_type
)
{
case
NFAPI_NR_UCI_PDCCH_PDU_TYPE
:
break
;
case
NFAPI_NR_UCI_FORMAT_0_1_PDU_TYPE
:
{
//if (get_softmodem_params()->phy_test == 0) {
nfapi_nr_uci_pucch_pdu_format_0_1_t
*
uci_pdu
=
&
uci_list
[
i
].
pucch_pdu_format_0_1
;
// handle harq
int
harq_idx_s
=
0
;
// tpc (power control)
sched_ctrl
->
tpc1
=
nr_get_tpc
(
target_snrx10
,
uci_pdu
->
ul_cqi
,
30
);
// iterate over received harq bits
for
(
int
harq_bit
=
0
;
harq_bit
<
uci_pdu
->
harq
->
num_harq
;
harq_bit
++
)
{
// search for the right harq process
for
(
int
harq_idx
=
harq_idx_s
;
harq_idx
<
NR_MAX_NB_HARQ_PROCESSES
;
harq_idx
++
)
{
// if the gNB received ack with a good confidence or if the max harq rounds was reached
if
((
UL_info
->
slot
-
1
)
==
sched_ctrl
->
harq_processes
[
harq_idx
].
feedback_slot
)
{
if
(((
uci_pdu
->
harq
->
harq_list
[
harq_bit
].
harq_value
==
1
)
&&
(
uci_pdu
->
harq
->
harq_confidence_level
==
0
))
||
(
sched_ctrl
->
harq_processes
[
harq_idx
].
round
==
max_harq_rounds
))
{
// toggle NDI and reset round
sched_ctrl
->
harq_processes
[
harq_idx
].
ndi
^=
1
;
sched_ctrl
->
harq_processes
[
harq_idx
].
round
=
0
;
}
else
sched_ctrl
->
harq_processes
[
harq_idx
].
round
++
;
sched_ctrl
->
harq_processes
[
harq_idx
].
is_waiting
=
0
;
harq_idx_s
=
harq_idx
+
1
;
break
;
}
// if feedback slot processing is aborted
else
if
(((
UL_info
->
slot
-
1
)
>
sched_ctrl
->
harq_processes
[
harq_idx
].
feedback_slot
)
&&
(
sched_ctrl
->
harq_processes
[
harq_idx
].
is_waiting
))
{
sched_ctrl
->
harq_processes
[
harq_idx
].
round
++
;
if
(
sched_ctrl
->
harq_processes
[
harq_idx
].
round
==
max_harq_rounds
)
{
sched_ctrl
->
harq_processes
[
harq_idx
].
ndi
^=
1
;
sched_ctrl
->
harq_processes
[
harq_idx
].
round
=
0
;
}
sched_ctrl
->
harq_processes
[
harq_idx
].
is_waiting
=
0
;
}
}
}
//}
break
;
}
case
NFAPI_NR_UCI_FORMAT_2_3_4_PDU_TYPE
:
break
;
}
}
UL_info
->
uci_ind
.
num_ucis
=
0
;
}
/*
* When data are received on PHY and transmitted to MAC
*/
...
...
openair2/LAYER2/NR_MAC_gNB/mac_proto.h
View file @
6dd0e0cc
...
...
@@ -342,5 +342,5 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP,
void
handle_nr_ul_harq
(
uint16_t
slot
,
NR_UE_sched_ctrl_t
*
sched_ctrl
,
NR_mac_stats_t
*
stats
,
nfapi_nr_crc_t
crc_pdu
);
void
handle_nr_uci
(
NR_UL_IND_t
*
UL_info
,
NR_UE_sched_ctrl_t
*
sched_ctrl
,
int
target_snrx10
);
void
handle_nr_uci
(
NR_UL_IND_t
*
UL_info
,
NR_UE_sched_ctrl_t
*
sched_ctrl
,
NR_mac_stats_t
*
stats
,
int
target_snrx10
);
#endif
/*__LAYER2_NR_MAC_PROTO_H__*/
openair2/NR_PHY_INTERFACE/NR_IF_Module.c
View file @
6dd0e0cc
...
...
@@ -181,7 +181,7 @@ void NR_UL_indication(NR_UL_IND_t *UL_info) {
clear_nr_nfapi_information
(
mac
,
CC_id
,
UL_info
->
frame
,
UL_info
->
slot
);
handle_nr_rach
(
UL_info
);
handle_nr_uci
(
UL_info
,
&
mac
->
UE_list
.
UE_sched_ctrl
[
0
],
mac
->
pucch_target_snrx10
);
handle_nr_uci
(
UL_info
,
&
mac
->
UE_list
.
UE_sched_ctrl
[
0
],
&
mac
->
UE_list
.
mac_stats
[
0
],
mac
->
pucch_target_snrx10
);
// clear HI prior to handling ULSCH
mac
->
UL_dci_req
[
CC_id
].
numPdus
=
0
;
handle_nr_ulsch
(
UL_info
,
&
mac
->
UE_list
.
UE_sched_ctrl
[
0
],
&
mac
->
UE_list
.
mac_stats
[
0
]);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment