Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lizhongxiao
OpenXG-RAN
Commits
7fa69c51
Commit
7fa69c51
authored
Feb 17, 2024
by
Robert Schmidt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
nr_schedule_response is MONOLITHIC only
parent
e53bb08e
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
113 additions
and
75 deletions
+113
-75
openair1/SCHED_NR/fapi_nr_l1.c
openair1/SCHED_NR/fapi_nr_l1.c
+113
-75
No files found.
openair1/SCHED_NR/fapi_nr_l1.c
View file @
7fa69c51
...
...
@@ -155,9 +155,10 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO)
uint8_t
number_ul_tti_pdu
=
(
UL_tti_req
==
NULL
)
?
0
:
UL_tti_req
->
n_pdus
;
uint8_t
number_tx_data_pdu
=
(
TX_req
==
NULL
)
?
0
:
TX_req
->
Number_of_PDUs
;
if
(
NFAPI_MODE
==
NFAPI_MONOLITHIC
){
DevAssert
(
NFAPI_MODE
==
NFAPI_MONOLITHIC
);
if
(
slot_type
==
NR_DOWNLINK_SLOT
||
slot_type
==
NR_MIXED_SLOT
)
{
processingData_L1tx_t
*
msgTx
=
NULL
;
processingData_L1tx_t
*
msgTx
=
NULL
;
msgTx
=
gNB
->
msgDataTx
;
msgTx
->
num_pdsch_slot
=
0
;
msgTx
->
num_dl_pdcch
=
0
;
...
...
@@ -167,42 +168,58 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO)
/* store the sched_response_id for the TX thread to release it when done */
msgTx
->
sched_response_id
=
Sched_INFO
->
sched_response_id
;
for
(
int
i
=
0
;
i
<
number_dl_pdu
;
i
++
)
{
for
(
int
i
=
0
;
i
<
number_dl_pdu
;
i
++
)
{
nfapi_nr_dl_tti_request_pdu_t
*
dl_tti_pdu
=
&
DL_req
->
dl_tti_request_body
.
dl_tti_pdu_list
[
i
];
LOG_D
(
NR_PHY
,
"NFAPI: dl_pdu %d : type %d
\n
"
,
i
,
dl_tti_pdu
->
PDUType
);
LOG_D
(
NR_PHY
,
"NFAPI: dl_pdu %d : type %d
\n
"
,
i
,
dl_tti_pdu
->
PDUType
);
switch
(
dl_tti_pdu
->
PDUType
)
{
case
NFAPI_NR_DL_TTI_SSB_PDU_TYPE
:
handle_nr_nfapi_ssb_pdu
(
msgTx
,
frame
,
slot
,
dl_tti_pdu
);
handle_nr_nfapi_ssb_pdu
(
msgTx
,
frame
,
slot
,
dl_tti_pdu
);
break
;
case
NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE
:
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
DL_req
->
SFN
,
DL_req
->
Slot
);
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
DL_req
->
SFN
,
DL_req
->
Slot
);
msgTx
->
pdcch_pdu
[
msgTx
->
num_dl_pdcch
]
=
dl_tti_pdu
->
pdcch_pdu
;
msgTx
->
num_dl_pdcch
++
;
break
;
case
NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE
:
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
DL_req
->
SFN
,
DL_req
->
Slot
);
handle_nfapi_nr_csirs_pdu
(
msgTx
,
frame
,
slot
,
&
dl_tti_pdu
->
csi_rs_pdu
);
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
DL_req
->
SFN
,
DL_req
->
Slot
);
handle_nfapi_nr_csirs_pdu
(
msgTx
,
frame
,
slot
,
&
dl_tti_pdu
->
csi_rs_pdu
);
break
;
case
NFAPI_NR_DL_TTI_PDSCH_PDU_TYPE
:
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_DL_TTI_PDSCH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
DL_req
->
SFN
,
DL_req
->
Slot
);
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_DL_TTI_PDSCH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
DL_req
->
SFN
,
DL_req
->
Slot
);
nfapi_nr_dl_tti_pdsch_pdu_rel15_t
*
pdsch_pdu_rel15
=
&
dl_tti_pdu
->
pdsch_pdu
.
pdsch_pdu_rel15
;
uint16_t
pduIndex
=
pdsch_pdu_rel15
->
pduIndex
;
AssertFatal
(
TX_req
->
pdu_list
[
pduIndex
].
num_TLV
==
1
,
"TX_req->pdu_list[%d].num_TLV %d != 1
\n
"
,
pduIndex
,
TX_req
->
pdu_list
[
pduIndex
].
num_TLV
);
AssertFatal
(
TX_req
->
pdu_list
[
pduIndex
].
num_TLV
==
1
,
"TX_req->pdu_list[%d].num_TLV %d != 1
\n
"
,
pduIndex
,
TX_req
->
pdu_list
[
pduIndex
].
num_TLV
);
uint8_t
*
sdu
=
(
uint8_t
*
)
TX_req
->
pdu_list
[
pduIndex
].
TLVs
[
0
].
value
.
direct
;
AssertFatal
(
msgTx
->
num_pdsch_slot
<
gNB
->
max_nb_pdsch
,
"Number of PDSCH PDUs %d exceeded the limit %d
\n
"
,
msgTx
->
num_pdsch_slot
,
gNB
->
max_nb_pdsch
);
handle_nr_nfapi_pdsch_pdu
(
msgTx
,
&
dl_tti_pdu
->
pdsch_pdu
,
sdu
);
handle_nr_nfapi_pdsch_pdu
(
msgTx
,
&
dl_tti_pdu
->
pdsch_pdu
,
sdu
);
}
}
for
(
int
i
=
0
;
i
<
number_ul_dci_pdu
;
i
++
)
for
(
int
i
=
0
;
i
<
number_ul_dci_pdu
;
i
++
)
msgTx
->
ul_pdcch_pdu
[
i
]
=
UL_dci_req
->
ul_dci_pdu_list
[
i
];
/* Both the current thread and the TX thread will access the sched_info
...
...
@@ -215,28 +232,48 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO)
for
(
int
i
=
0
;
i
<
number_ul_tti_pdu
;
i
++
)
{
switch
(
UL_tti_req
->
pdus_list
[
i
].
pdu_type
)
{
case
NFAPI_NR_UL_CONFIG_PUSCH_PDU_TYPE
:
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_UL_TTI_PUSCH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
);
nr_fill_ulsch
(
gNB
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
&
UL_tti_req
->
pdus_list
[
i
].
pusch_pdu
);
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_UL_TTI_PUSCH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
);
nr_fill_ulsch
(
gNB
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
&
UL_tti_req
->
pdus_list
[
i
].
pusch_pdu
);
break
;
case
NFAPI_NR_UL_CONFIG_PUCCH_PDU_TYPE
:
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_UL_TTI_PUCCH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
);
nr_fill_pucch
(
gNB
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
&
UL_tti_req
->
pdus_list
[
i
].
pucch_pdu
);
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_UL_TTI_PUCCH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
);
nr_fill_pucch
(
gNB
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
&
UL_tti_req
->
pdus_list
[
i
].
pucch_pdu
);
break
;
case
NFAPI_NR_UL_CONFIG_PRACH_PDU_TYPE
:
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_UL_TTI_PRACH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
);
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_UL_TTI_PRACH_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
);
nfapi_nr_prach_pdu_t
*
prach_pdu
=
&
UL_tti_req
->
pdus_list
[
i
].
prach_pdu
;
nr_fill_prach
(
gNB
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
prach_pdu
);
if
(
gNB
->
RU_list
[
0
]
->
if_south
==
LOCAL_RF
||
gNB
->
RU_list
[
0
]
->
if_south
==
REMOTE_IF5
)
nr_fill_prach_ru
(
gNB
->
RU_list
[
0
],
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
prach_pdu
);
if
(
gNB
->
RU_list
[
0
]
->
if_south
==
LOCAL_RF
||
gNB
->
RU_list
[
0
]
->
if_south
==
REMOTE_IF5
)
nr_fill_prach_ru
(
gNB
->
RU_list
[
0
],
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
prach_pdu
);
break
;
case
NFAPI_NR_UL_CONFIG_SRS_PDU_TYPE
:
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_UL_CONFIG_SRS_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
);
nr_fill_srs
(
gNB
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
&
UL_tti_req
->
pdus_list
[
i
].
srs_pdu
);
LOG_D
(
NR_PHY
,
"frame %d, slot %d, Got NFAPI_NR_UL_CONFIG_SRS_PDU_TYPE for %d.%d
\n
"
,
frame
,
slot
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
);
nr_fill_srs
(
gNB
,
UL_tti_req
->
SFN
,
UL_tti_req
->
Slot
,
&
UL_tti_req
->
pdus_list
[
i
].
srs_pdu
);
break
;
}
}
}
/*
if (NFAPI_MODE == NFAPI_MODE_VNF) { //If VNF, oai_nfapi functions send respective p7 msgs to PNF for which nPDUs is greater than 0
// order wrong!!
...
...
@@ -252,6 +289,7 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO)
if (number_dl_pdu>0)
oai_nfapi_dl_tti_req(DL_req);
}
*/
/* this thread is done with the sched_info, decrease the reference counter */
if
((
slot_type
==
NR_DOWNLINK_SLOT
||
slot_type
==
NR_MIXED_SLOT
)
&&
NFAPI_MODE
==
NFAPI_MONOLITHIC
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment