Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ZhouShuya
OpenXG-RAN
Commits
7854133b
Commit
7854133b
authored
Feb 10, 2021
by
Sakthivel Velumani
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Removing some of the old pthreads
Leaving ru_thread and L1 stats thread
parent
31fb7f31
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
71 additions
and
1290 deletions
+71
-1290
executables/nr-gnb.c
executables/nr-gnb.c
+52
-648
executables/nr-ru.c
executables/nr-ru.c
+0
-642
executables/softmodem-common.h
executables/softmodem-common.h
+4
-0
openair1/PHY/defs_RU.h
openair1/PHY/defs_RU.h
+6
-0
openair1/PHY/defs_gNB.h
openair1/PHY/defs_gNB.h
+9
-0
No files found.
executables/nr-gnb.c
View file @
7854133b
...
@@ -81,7 +81,7 @@
...
@@ -81,7 +81,7 @@
#include "s1ap_eNB.h"
#include "s1ap_eNB.h"
#include "SIMULATION/ETH_TRANSPORT/proto.h"
#include "SIMULATION/ETH_TRANSPORT/proto.h"
#include <executables/softmodem-common.h>
#include "T.h"
#include "T.h"
...
@@ -111,30 +111,15 @@ time_stats_t softmodem_stats_rxtx_sf; // total tx time
...
@@ -111,30 +111,15 @@ time_stats_t softmodem_stats_rxtx_sf; // total tx time
time_stats_t
nfapi_meas
;
// total tx time
time_stats_t
nfapi_meas
;
// total tx time
time_stats_t
softmodem_stats_rx_sf
;
// total rx time
time_stats_t
softmodem_stats_rx_sf
;
// total rx time
/* mutex, cond and variable to serialize phy proc TX calls
* (this mechanism may be relaxed in the future for better
* performances)
*/
static
struct
{
pthread_mutex_t
mutex_phy_proc_tx
;
pthread_cond_t
cond_phy_proc_tx
;
volatile
uint8_t
phy_proc_CC_id
;
}
sync_phy_proc
;
extern
double
cpuf
;
extern
double
cpuf
;
void
init_gNB
(
int
,
int
);
void
init_gNB
(
int
,
int
);
void
stop_gNB
(
int
nb_inst
);
void
stop_gNB
(
int
nb_inst
);
int
wakeup_txfh
(
PHY_VARS_gNB
*
gNB
,
gNB_L1_rxtx_proc_t
*
proc
,
int
frame_tx
,
int
slot_tx
,
uint64_t
timestamp_tx
);
int
wakeup_tx
(
PHY_VARS_gNB
*
gNB
,
int
frame_rx
,
int
slot_rx
,
int
frame_tx
,
int
slot_tx
,
uint64_t
timestamp_tx
);
#include "executables/thread-common.h"
#include "executables/thread-common.h"
//extern PARALLEL_CONF_t get_thread_parallel_conf(void);
//extern PARALLEL_CONF_t get_thread_parallel_conf(void);
//extern WORKER_CONF_t get_thread_worker_conf(void);
//extern WORKER_CONF_t get_thread_worker_conf(void);
void
wakeup_prach_gNB
(
PHY_VARS_gNB
*
gNB
,
RU_t
*
ru
,
int
frame
,
int
subframe
);
extern
uint8_t
nfapi_mode
;
extern
uint8_t
nfapi_mode
;
extern
void
oai_subframe_ind
(
uint16_t
sfn
,
uint16_t
sf
);
extern
void
oai_subframe_ind
(
uint16_t
sfn
,
uint16_t
sf
);
extern
void
add_subframe
(
uint16_t
*
frameP
,
uint16_t
*
subframeP
,
int
offset
);
extern
void
add_subframe
(
uint16_t
*
frameP
,
uint16_t
*
subframeP
,
int
offset
);
...
@@ -142,41 +127,42 @@ extern void add_subframe(uint16_t *frameP, uint16_t *subframeP, int offset);
...
@@ -142,41 +127,42 @@ extern void add_subframe(uint16_t *frameP, uint16_t *subframeP, int offset);
//#define TICK_TO_US(ts) (ts.diff)
//#define TICK_TO_US(ts) (ts.diff)
#define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials)
#define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials)
static
inline
int
rxtx
(
PHY_VARS_gNB
*
gNB
,
int
frame_rx
,
int
slot_rx
,
int
frame_tx
,
int
slot_tx
,
char
*
thread_name
)
{
extern
void
init_td_thread
(
PHY_VARS_gNB
*
);
extern
void
init_te_thread
(
PHY_VARS_gNB
*
);
void
tx_func
(
void
*
param
)
{
processingData_L1_t
*
info
=
(
processingData_L1_t
*
)
param
;
PHY_VARS_gNB
*
gNB
=
info
->
gNB
;
int
frame_tx
=
info
->
frame_tx
;
int
slot_tx
=
info
->
slot_tx
;
phy_procedures_gNB_TX
(
gNB
,
frame_tx
,
slot_tx
,
1
);
// start FH TX processing
notifiedFIFO_elt_t
*
res
;
res
=
pullTpool
(
gNB
->
resp_RU_tx
,
gNB
->
threadPool
);
processingData_RU_t
*
syncMsg
=
(
processingData_RU_t
*
)
NotifiedFifoData
(
res
);
syncMsg
->
frame_tx
=
frame_tx
;
syncMsg
->
slot_tx
=
slot_tx
;
syncMsg
->
timestamp_tx
=
info
->
timestamp_tx
;
syncMsg
->
ru
=
gNB
->
RU_list
[
0
];
res
->
key
=
slot_tx
;
pushTpool
(
gNB
->
threadPool
,
res
);
}
void
rx_func
(
void
*
param
)
{
processingData_L1_t
*
info
=
(
processingData_L1_t
*
)
param
;
PHY_VARS_gNB
*
gNB
=
info
->
gNB
;
int
frame_rx
=
info
->
frame_rx
;
int
slot_rx
=
info
->
slot_rx
;
int
frame_tx
=
info
->
frame_tx
;
int
slot_tx
=
info
->
slot_tx
;
sl_ahead
=
sf_ahead
*
gNB
->
frame_parms
.
slots_per_subframe
;
sl_ahead
=
sf_ahead
*
gNB
->
frame_parms
.
slots_per_subframe
;
nfapi_nr_config_request_scf_t
*
cfg
=
&
gNB
->
gNB_config
;
nfapi_nr_config_request_scf_t
*
cfg
=
&
gNB
->
gNB_config
;
start_meas
(
&
softmodem_stats_rxtx_sf
);
//start_meas(&softmodem_stats_rxtx_sf);
// *******************************************************************
// NFAPI not yet supported for NR - this code has to be revised
if
(
nfapi_mode
==
1
)
{
// I am a PNF and I need to let nFAPI know that we have a (sub)frame tick
//add_subframe(&frame, &subframe, 4);
//oai_subframe_ind(proc->frame_tx, proc->subframe_tx);
//LOG_D(PHY, "oai_subframe_ind(frame:%u, subframe:%d) - NOT CALLED ********\n", frame, subframe);
start_meas
(
&
nfapi_meas
);
oai_subframe_ind
(
frame_rx
,
slot_rx
);
stop_meas
(
&
nfapi_meas
);
/*if (gNB->UL_INFO.rx_ind.rx_indication_body.number_of_pdus||
gNB->UL_INFO.harq_ind.harq_indication_body.number_of_harqs ||
gNB->UL_INFO.crc_ind.crc_indication_body.number_of_crcs ||
gNB->UL_INFO.rach_ind.number_of_pdus ||
gNB->UL_INFO.cqi_ind.number_of_cqis
) {
LOG_D(PHY, "UL_info[rx_ind:%05d:%d harqs:%05d:%d crcs:%05d:%d rach_pdus:%0d.%d:%d cqis:%d] RX:%04d%d TX:%04d%d \n",
NFAPI_SFNSF2DEC(gNB->UL_INFO.rx_ind.sfn_sf), gNB->UL_INFO.rx_ind.rx_indication_body.number_of_pdus,
NFAPI_SFNSF2DEC(gNB->UL_INFO.harq_ind.sfn_sf), gNB->UL_INFO.harq_ind.harq_indication_body.number_of_harqs,
NFAPI_SFNSF2DEC(gNB->UL_INFO.crc_ind.sfn_sf), gNB->UL_INFO.crc_ind.crc_indication_body.number_of_crcs,
gNB->UL_INFO.rach_ind.sfn, gNB->UL_INFO.rach_ind.slot,gNB->UL_INFO.rach_ind.number_of_pdus,
gNB->UL_INFO.cqi_ind.number_of_cqis,
frame_rx, slot_rx,
frame_tx, slot_tx);
}*/
}
// ****************************************
T
(
T_GNB_PHY_DL_TICK
,
T_INT
(
gNB
->
Mod_id
),
T_INT
(
frame_tx
),
T_INT
(
slot_tx
));
T
(
T_GNB_PHY_DL_TICK
,
T_INT
(
gNB
->
Mod_id
),
T_INT
(
frame_tx
),
T_INT
(
slot_tx
));
...
@@ -233,12 +219,6 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
...
@@ -233,12 +219,6 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
rnti_to_remove_count
=
0
;
rnti_to_remove_count
=
0
;
if
(
pthread_mutex_unlock
(
&
rnti_to_remove_mutex
))
exit
(
1
);
if
(
pthread_mutex_unlock
(
&
rnti_to_remove_mutex
))
exit
(
1
);
/*
// if this is IF5 or 3GPP_gNB
if (gNB && gNB->RU_list && gNB->RU_list[0] && gNB->RU_list[0]->function < NGFI_RAU_IF4p5) {
wakeup_prach_gNB(gNB,NULL,proc->frame_rx,proc->slot_rx);
}
*/
// Call the scheduler
// Call the scheduler
pthread_mutex_lock
(
&
gNB
->
UL_INFO_mutex
);
pthread_mutex_lock
(
&
gNB
->
UL_INFO_mutex
);
...
@@ -273,21 +253,24 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
...
@@ -273,21 +253,24 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
if
(
oai_exit
)
return
(
-
1
);
if
(
oai_exit
)
return
(
-
1
);
// *****************************************
//stop_meas( &softmodem_stats_rxtx_sf );
// TX processing for subframe n+sf_ahead
LOG_D
(
PHY
,
"%s() Exit proc[rx:%d%d tx:%d%d]
\n
"
,
__FUNCTION__
,
frame_rx
,
slot_rx
,
frame_tx
,
slot_tx
);
// run PHY TX procedures the one after the other for all CCs to avoid race conditions
// (may be relaxed in the future for performance reasons)
// *****************************************
if
(
tx_slot_type
==
NR_DOWNLINK_SLOT
||
tx_slot_type
==
NR_MIXED_SLOT
)
{
notifiedFIFO_elt_t
*
res
;
if
(
get_thread_parallel_conf
()
!=
PARALLEL_RU_L1_TRX_SPLIT
)
{
if
(
tx_slot_type
==
NR_DOWNLINK_SLOT
||
tx_slot_type
==
NR_MIXED_SLOT
)
{
phy_procedures_gNB_TX
(
gNB
,
frame_tx
,
slot_tx
,
1
);
res
=
pullTpool
(
gNB
->
resp_L1_tx
,
gNB
->
threadPool
);
}
processingData_L1_t
*
syncMsg
=
(
processingData_L1_t
*
)
NotifiedFifoData
(
res
);
syncMsg
->
gNB
=
gNB
;
syncMsg
->
frame_rx
=
frame_rx
;
syncMsg
->
slot_rx
=
slot_rx
;
syncMsg
->
frame_tx
=
frame_tx
;
syncMsg
->
slot_tx
=
slot_tx
;
syncMsg
->
timestamp_tx
=
info
->
timestamp_tx
;
res
->
key
=
slot_tx
;
pushTpool
(
gNB
->
threadPool
,
res
);
}
}
stop_meas
(
&
softmodem_stats_rxtx_sf
);
LOG_D
(
PHY
,
"%s() Exit proc[rx:%d%d tx:%d%d]
\n
"
,
__FUNCTION__
,
frame_rx
,
slot_rx
,
frame_tx
,
slot_tx
);
#if 0
#if 0
LOG_D(PHY, "rxtx:%lld nfapi:%lld phy:%lld tx:%lld rx:%lld prach:%lld ofdm:%lld ",
LOG_D(PHY, "rxtx:%lld nfapi:%lld phy:%lld tx:%lld rx:%lld prach:%lld ofdm:%lld ",
softmodem_stats_rxtx_sf.diff_now, nfapi_meas.diff_now,
softmodem_stats_rxtx_sf.diff_now, nfapi_meas.diff_now,
...
@@ -327,466 +310,7 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
...
@@ -327,466 +310,7 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
TICK_TO_US(gNB->ulsch_tc_intl2_stats)
TICK_TO_US(gNB->ulsch_tc_intl2_stats)
);
);
#endif
#endif
return
(
0
);
}
static
void
*
gNB_L1_thread_tx
(
void
*
param
)
{
PHY_VARS_gNB
*
gNB
=
(
PHY_VARS_gNB
*
)
param
;
gNB_L1_proc_t
*
gNB_proc
=
&
gNB
->
proc
;
gNB_L1_rxtx_proc_t
*
L1_proc_tx
=
&
gNB_proc
->
L1_proc_tx
;
//PHY_VARS_gNB *gNB = RC.gNB[0][proc->CC_id];
char
thread_name
[
100
];
sprintf
(
thread_name
,
"gNB_L1_thread_tx
\n
"
);
while
(
!
oai_exit
)
{
if
(
wait_on_condition
(
&
L1_proc_tx
->
mutex
,
&
L1_proc_tx
->
cond
,
&
L1_proc_tx
->
instance_cnt
,
thread_name
)
<
0
)
break
;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1
,
1
);
if
(
oai_exit
)
break
;
// *****************************************
// TX processing for subframe n+4
// run PHY TX procedures the one after the other for all CCs to avoid race conditions
// (may be relaxed in the future for performance reasons)
// *****************************************
int
frame_tx
=
L1_proc_tx
->
frame_tx
;
int
slot_tx
=
L1_proc_tx
->
slot_tx
;
uint64_t
timestamp_tx
=
L1_proc_tx
->
timestamp_tx
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_SLOT_NUMBER_TX1_GNB
,
slot_tx
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_GNB
,
frame_tx
);
phy_procedures_gNB_TX
(
gNB
,
frame_tx
,
slot_tx
,
1
);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_WAKEUP_TXFH
,
1
);
pthread_mutex_lock
(
&
L1_proc_tx
->
mutex
);
L1_proc_tx
->
instance_cnt
=
-
1
;
// the thread can now be woken up
if
(
pthread_cond_signal
(
&
L1_proc_tx
->
cond
)
!=
0
)
{
LOG_E
(
PHY
,
"[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread
\n
"
);
exit_fun
(
"ERROR pthread_cond_signal"
);
}
pthread_mutex_unlock
(
&
L1_proc_tx
->
mutex
);
wakeup_txfh
(
gNB
,
L1_proc_tx
,
frame_tx
,
slot_tx
,
timestamp_tx
);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_WAKEUP_TXFH
,
0
);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1
,
0
);
}
return
0
;
}
/*!
* \brief The RX UE-specific and TX thread of gNB.
* \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
static
void
*
gNB_L1_thread
(
void
*
param
)
{
static
int
gNB_thread_rxtx_status
;
PHY_VARS_gNB
*
gNB
=
(
PHY_VARS_gNB
*
)
param
;
gNB_L1_proc_t
*
gNB_proc
=
&
gNB
->
proc
;
gNB_L1_rxtx_proc_t
*
L1_proc
=
&
gNB_proc
->
L1_proc
;
//PHY_VARS_gNB *gNB = RC.gNB[0][proc->CC_id];
char
thread_name
[
100
];
// set default return value
// set default return value
gNB_thread_rxtx_status
=
0
;
sprintf
(
thread_name
,
"gNB_L1_thread"
);
while
(
!
oai_exit
)
{
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX0
,
0
);
if
(
wait_on_condition
(
&
L1_proc
->
mutex
,
&
L1_proc
->
cond
,
&
L1_proc
->
instance_cnt
,
thread_name
)
<
0
)
break
;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX0
,
1
);
int
frame_rx
=
L1_proc
->
frame_rx
;
int
slot_rx
=
L1_proc
->
slot_rx
;
int
frame_tx
=
L1_proc
->
frame_tx
;
int
slot_tx
=
L1_proc
->
slot_tx
;
uint64_t
timestamp_tx
=
L1_proc
->
timestamp_tx
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_SLOT_NUMBER_TX0_GNB
,
slot_tx
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_SLOT_NUMBER_RX0_GNB
,
slot_rx
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_GNB
,
frame_tx
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_GNB
,
frame_rx
);
if
(
oai_exit
)
break
;
if
(
gNB
->
CC_id
==
0
)
{
if
(
rxtx
(
gNB
,
frame_rx
,
slot_rx
,
frame_tx
,
slot_tx
,
thread_name
)
<
0
)
break
;
}
if
(
release_thread
(
&
L1_proc
->
mutex
,
&
L1_proc
->
instance_cnt
,
thread_name
)
<
0
)
break
;
if
(
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_TRX_SPLIT
)
wakeup_tx
(
gNB
,
frame_rx
,
slot_rx
,
frame_tx
,
slot_tx
,
timestamp_tx
);
else
if
(
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_SPLIT
)
wakeup_txfh
(
gNB
,
L1_proc
,
frame_tx
,
slot_tx
,
timestamp_tx
);
}
// while !oai_exit
LOG_D
(
PHY
,
" *** Exiting gNB thread RXn_TXnp4
\n
"
);
gNB_thread_rxtx_status
=
0
;
return
&
gNB_thread_rxtx_status
;
}
#if 0
// Wait for gNB application initialization to be complete (gNB registration to MME)
static void wait_system_ready (char *message, volatile int *start_flag) {
static char *indicator[] = {". ", ".. ", "... ", ".... ", ".....",
" ....", " ...", " ..", " .", " "
};
int i = 0;
while ((!oai_exit) && (*start_flag == 0)) {
LOG_N(EMU, message, indicator[i]);
fflush(stdout);
i = (i + 1) % (sizeof(indicator) / sizeof(indicator[0]));
usleep(200000);
}
LOG_D(EMU,"\n");
}
#endif
void
gNB_top
(
PHY_VARS_gNB
*
gNB
,
int
frame_rx
,
int
slot_rx
,
char
*
string
,
struct
RU_t_s
*
ru
)
{
gNB_L1_proc_t
*
proc
=
&
gNB
->
proc
;
gNB_L1_rxtx_proc_t
*
L1_proc
=
&
proc
->
L1_proc
;
NR_DL_FRAME_PARMS
*
fp
=
ru
->
nr_frame_parms
;
RU_proc_t
*
ru_proc
=&
ru
->
proc
;
proc
->
frame_rx
=
frame_rx
;
proc
->
slot_rx
=
slot_rx
;
sl_ahead
=
sf_ahead
*
fp
->
slots_per_subframe
;
if
(
!
oai_exit
)
{
T
(
T_ENB_MASTER_TICK
,
T_INT
(
0
),
T_INT
(
proc
->
frame_rx
),
T_INT
(
proc
->
slot_rx
));
L1_proc
->
timestamp_tx
=
ru_proc
->
timestamp_rx
+
(
sf_ahead
*
fp
->
samples_per_subframe
);
L1_proc
->
frame_rx
=
ru_proc
->
frame_rx
;
L1_proc
->
slot_rx
=
ru_proc
->
tti_rx
;
L1_proc
->
frame_tx
=
(
L1_proc
->
slot_rx
>
(
fp
->
slots_per_frame
-
1
-
(
fp
->
slots_per_subframe
*
sf_ahead
)))
?
(
L1_proc
->
frame_rx
+
1
)
&
1023
:
L1_proc
->
frame_rx
;
L1_proc
->
slot_tx
=
(
L1_proc
->
slot_rx
+
(
fp
->
slots_per_subframe
*
sf_ahead
))
%
fp
->
slots_per_frame
;
if
(
rxtx
(
gNB
,
L1_proc
->
frame_rx
,
L1_proc
->
slot_rx
,
L1_proc
->
frame_tx
,
L1_proc
->
slot_tx
,
string
)
<
0
)
LOG_E
(
PHY
,
"gNB %d CC_id %d failed during execution
\n
"
,
gNB
->
Mod_id
,
gNB
->
CC_id
);
ru_proc
->
timestamp_tx
=
L1_proc
->
timestamp_tx
;
ru_proc
->
tti_tx
=
L1_proc
->
slot_tx
;
ru_proc
->
frame_tx
=
L1_proc
->
frame_tx
;
}
}
int
wakeup_txfh
(
PHY_VARS_gNB
*
gNB
,
gNB_L1_rxtx_proc_t
*
proc
,
int
frame_tx
,
int
slot_tx
,
uint64_t
timestamp_tx
)
{
RU_t
*
ru
;
RU_proc_t
*
ru_proc
;
int
waitret
=
0
,
ret
=
0
,
time_ns
=
1000
*
1000
;
struct
timespec
now
,
abstime
;
// note this should depend on the numerology used by the TX L1 thread, set here for 500us slot time
// note this should depend on the numerology used by the TX L1 thread, set here for 500us slot time
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GAIN_CONTROL
,
1
);
time_ns
=
time_ns
/
gNB
->
frame_parms
.
slots_per_subframe
;
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
proc
->
mutex_RUs_tx
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
while
(
proc
->
instance_cnt_RUs
<
0
)
{
clock_gettime
(
CLOCK_REALTIME
,
&
now
);
abstime
.
tv_sec
=
now
.
tv_sec
;
abstime
.
tv_nsec
=
now
.
tv_nsec
+
time_ns
;
if
(
abstime
.
tv_nsec
>=
1000
*
1000
*
1000
)
{
abstime
.
tv_nsec
-=
1000
*
1000
*
1000
;
abstime
.
tv_sec
+=
1
;
}
if
((
waitret
=
pthread_cond_timedwait
(
&
proc
->
cond_RUs
,
&
proc
->
mutex_RUs_tx
,
&
abstime
))
==
0
)
break
;
// this unlocks mutex_rxtx while waiting and then locks it again
}
proc
->
instance_cnt_RUs
=
-
1
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE
,
proc
->
instance_cnt_RUs
);
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
proc
->
mutex_RUs_tx
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GAIN_CONTROL
,
0
);
if
(
waitret
==
ETIMEDOUT
)
{
LOG_W
(
PHY
,
"Dropping TX slot (%d.%d) because FH is blocked more than 1 slot times (500us)
\n
"
,
frame_tx
,
slot_tx
);
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
gNB
->
proc
.
mutex_RU_tx
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
gNB
->
proc
.
RU_mask_tx
=
0
;
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
gNB
->
proc
.
mutex_RU_tx
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
proc
->
mutex_RUs_tx
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
proc
->
instance_cnt_RUs
=
0
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE
,
proc
->
instance_cnt_RUs
);
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
proc
->
mutex_RUs_tx
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_UE
,
1
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_UE
,
0
);
return
(
-
1
);
}
for
(
int
i
=
0
;
i
<
gNB
->
num_RU
;
i
++
)
{
ru
=
gNB
->
RU_list
[
i
];
ru_proc
=
&
ru
->
proc
;
if
(
ru_proc
->
instance_cnt_gNBs
==
0
)
{
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST_UE
,
1
);
LOG_E
(
PHY
,
"Frame %d, subframe %d: TX FH thread busy, dropping Frame %d, subframe %d
\n
"
,
ru_proc
->
frame_tx
,
ru_proc
->
tti_tx
,
proc
->
frame_rx
,
proc
->
slot_rx
);
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
gNB
->
proc
.
mutex_RU_tx
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
gNB
->
proc
.
RU_mask_tx
=
0
;
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
gNB
->
proc
.
mutex_RU_tx
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST_UE
,
0
);
return
(
-
1
);
}
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
ru_proc
->
mutex_gNBs
))
==
0
,
"ERROR pthread_mutex_lock failed on mutex_gNBs L1_thread_tx with ret=%d
\n
"
,
ret
);
ru_proc
->
instance_cnt_gNBs
=
0
;
ru_proc
->
timestamp_tx
=
timestamp_tx
;
ru_proc
->
tti_tx
=
slot_tx
;
ru_proc
->
frame_tx
=
frame_tx
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_UE
,
ru_proc
->
instance_cnt_gNBs
);
LOG_D
(
PHY
,
"Signaling tx_thread_fh for %d.%d
\n
"
,
frame_tx
,
slot_tx
);
// the thread can now be woken up
AssertFatal
(
pthread_cond_signal
(
&
ru_proc
->
cond_gNBs
)
==
0
,
"[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread
\n
"
);
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
ru_proc
->
mutex_gNBs
))
==
0
,
"mutex_unlock returned %d
\n
"
,
ret
);
}
return
(
0
);
}
int
wakeup_tx
(
PHY_VARS_gNB
*
gNB
,
int
frame_rx
,
int
slot_rx
,
int
frame_tx
,
int
slot_tx
,
uint64_t
timestamp_tx
)
{
gNB_L1_rxtx_proc_t
*
L1_proc_tx
=
&
gNB
->
proc
.
L1_proc_tx
;
int
ret
;
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
L1_proc_tx
->
mutex
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
while
(
L1_proc_tx
->
instance_cnt
==
0
)
{
pthread_cond_wait
(
&
L1_proc_tx
->
cond
,
&
L1_proc_tx
->
mutex
);
}
L1_proc_tx
->
instance_cnt
=
0
;
L1_proc_tx
->
slot_rx
=
slot_rx
;
L1_proc_tx
->
frame_rx
=
frame_rx
;
L1_proc_tx
->
slot_tx
=
slot_tx
;
L1_proc_tx
->
frame_tx
=
frame_tx
;
L1_proc_tx
->
timestamp_tx
=
timestamp_tx
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_UE
,
1
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_UE
,
0
);
// the thread can now be woken up
// the thread can now be woken up
AssertFatal
(
pthread_cond_signal
(
&
L1_proc_tx
->
cond
)
==
0
,
"ERROR pthread_cond_signal for gNB L1 thread
\n
"
);
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
L1_proc_tx
->
mutex
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
return
(
0
);
}
int
wakeup_rxtx
(
PHY_VARS_gNB
*
gNB
,
RU_t
*
ru
)
{
gNB_L1_proc_t
*
proc
=&
gNB
->
proc
;
gNB_L1_rxtx_proc_t
*
L1_proc
=&
proc
->
L1_proc
;
NR_DL_FRAME_PARMS
*
fp
=
&
gNB
->
frame_parms
;
RU_proc_t
*
ru_proc
=&
ru
->
proc
;
int
ret
;
int
i
;
struct
timespec
abstime
;
int
time_ns
=
50000
;
int
wait_timer
=
0
;
bool
do_last_check
=
1
;
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
proc
->
mutex_RU
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
for
(
i
=
0
;
i
<
gNB
->
num_RU
;
i
++
)
{
if
(
ru
==
gNB
->
RU_list
[
i
])
{
if
((
proc
->
RU_mask
&
(
1
<<
i
))
>
0
)
LOG_E
(
PHY
,
"gNB %d frame %d, subframe %d : previous information from RU %d (num_RU %d,mask %x) has not been served yet!
\n
"
,
gNB
->
Mod_id
,
proc
->
frame_rx
,
proc
->
slot_rx
,
ru
->
idx
,
gNB
->
num_RU
,
proc
->
RU_mask
);
proc
->
RU_mask
|=
(
1
<<
i
);
}
}
if
(
proc
->
RU_mask
!=
(
1
<<
gNB
->
num_RU
)
-
1
)
{
// not all RUs have provided their information so return
LOG_E
(
PHY
,
"Not all RUs have provided their info
\n
"
);
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
proc
->
mutex_RU
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
return
(
0
);
}
else
{
// all RUs have provided their information so continue on and wakeup gNB processing
proc
->
RU_mask
=
0
;
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
proc
->
mutex_RU
))
==
0
,
"muex_unlock returns %d
\n
"
,
ret
);
}
// wake up TX for subframe n+sf_ahead
// lock the TX mutex and make sure the thread is ready
while
(
wait_timer
<
200
)
{
clock_gettime
(
CLOCK_REALTIME
,
&
abstime
);
abstime
.
tv_nsec
=
abstime
.
tv_nsec
+
time_ns
;
if
(
abstime
.
tv_nsec
>=
1000
*
1000
*
1000
)
{
abstime
.
tv_nsec
-=
1000
*
1000
*
1000
;
abstime
.
tv_sec
+=
1
;
}
AssertFatal
((
ret
=
pthread_mutex_timedlock
(
&
L1_proc
->
mutex
,
&
abstime
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
if
(
L1_proc
->
instance_cnt
==
0
)
{
// L1_thread is busy so wait for a bit
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
L1_proc
->
mutex
))
==
0
,
"muex_unlock return %d
\n
"
,
ret
);
wait_timer
+=
50
;
usleep
(
50
);
}
else
{
do_last_check
=
0
;
break
;
}
}
if
(
do_last_check
)
{
AssertFatal
((
ret
=
pthread_mutex_timedlock
(
&
L1_proc
->
mutex
,
&
abstime
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
if
(
L1_proc
->
instance_cnt
==
0
)
{
// L1_thread is busy so abort the subframe
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
L1_proc
->
mutex
))
==
0
,
"muex_unlock return %d
\n
"
,
ret
);
LOG_W
(
PHY
,
"L1_thread isn't ready in %d.%d, aborting RX processing
\n
"
,
ru_proc
->
frame_rx
,
ru_proc
->
tti_rx
);
return
(
-
1
);
}
}
++
L1_proc
->
instance_cnt
;
// We have just received and processed the common part of a subframe, say n.
// TS_rx is the last received timestamp (start of 1st slot), TS_tx is the desired
// transmitted timestamp of the next TX slot (first).
// The last (TS_rx mod samples_per_frame) was n*samples_per_tti,
// we want to generate subframe (n+sf_ahead), so TS_tx = TX_rx+sf_ahead*samples_per_tti,
// and proc->slot_tx = proc->slot_rx+sf_ahead
L1_proc
->
timestamp_tx
=
ru_proc
->
timestamp_rx
+
(
sf_ahead
*
fp
->
samples_per_subframe
);
L1_proc
->
frame_rx
=
ru_proc
->
frame_rx
;
L1_proc
->
slot_rx
=
ru_proc
->
tti_rx
;
L1_proc
->
frame_tx
=
(
L1_proc
->
slot_rx
>
(
fp
->
slots_per_frame
-
1
-
(
fp
->
slots_per_subframe
*
sf_ahead
)))
?
(
L1_proc
->
frame_rx
+
1
)
&
1023
:
L1_proc
->
frame_rx
;
L1_proc
->
slot_tx
=
(
L1_proc
->
slot_rx
+
(
fp
->
slots_per_subframe
*
sf_ahead
))
%
fp
->
slots_per_frame
;
LOG_D
(
PHY
,
"wakeupL1: passing parameter IC = %d, RX: %d.%d, TX: %d.%d to L1 sf_ahead = %d
\n
"
,
L1_proc
->
instance_cnt
,
L1_proc
->
frame_rx
,
L1_proc
->
slot_rx
,
L1_proc
->
frame_tx
,
L1_proc
->
slot_tx
,
sf_ahead
);
pthread_mutex_unlock
(
&
L1_proc
->
mutex
);
// the thread can now be woken up
if
(
pthread_cond_signal
(
&
L1_proc
->
cond
)
!=
0
)
{
LOG_E
(
PHY
,
"[gNB] ERROR pthread_cond_signal for gNB RXn-TXnp4 thread
\n
"
);
exit_fun
(
"ERROR pthread_cond_signal"
);
return
(
-
1
);
}
return
(
0
);
}
/*
void wakeup_prach_gNB(PHY_VARS_gNB *gNB,RU_t *ru,int frame,int subframe) {
gNB_L1_proc_t *proc = &gNB->proc;
LTE_DL_FRAME_PARMS *fp=&gNB->frame_parms;
int i;
if (ru!=NULL) {
pthread_mutex_lock(&proc->mutex_RU_PRACH);
for (i=0;i<gNB->num_RU;i++) {
if (ru == gNB->RU_list[i]) {
LOG_D(PHY,"frame %d, subframe %d: RU %d for gNB %d signals PRACH (mask %x, num_RU %d)\n",frame,subframe,i,gNB->Mod_id,proc->RU_mask_prach,gNB->num_RU);
if ((proc->RU_mask_prach&(1<<i)) > 0)
LOG_E(PHY,"gNB %d frame %d, subframe %d : previous information (PRACH) from RU %d (num_RU %d, mask %x) has not been served yet!\n",
gNB->Mod_id,frame,subframe,ru->idx,gNB->num_RU,proc->RU_mask_prach);
proc->RU_mask_prach |= (1<<i);
}
}
if (proc->RU_mask_prach != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return
pthread_mutex_unlock(&proc->mutex_RU_PRACH);
return;
}
else { // all RUs have provided their information so continue on and wakeup gNB processing
proc->RU_mask_prach = 0;
pthread_mutex_unlock(&proc->mutex_RU_PRACH);
}
}
// check if we have to detect PRACH first
if (is_prach_subframe(fp,frame,subframe)>0) {
LOG_D(PHY,"Triggering prach processing, frame %d, subframe %d\n",frame,subframe);
if (proc->instance_cnt_prach == 0) {
LOG_W(PHY,"[gNB] Frame %d Subframe %d, dropping PRACH\n", frame,subframe);
return;
}
// wake up thread for PRACH RX
if (pthread_mutex_lock(&proc->mutex_prach) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_mutex_lock for gNB PRACH thread %d (IC %d)\n", proc->thread_index, proc->instance_cnt_prach);
exit_fun( "error locking mutex_prach" );
return;
}
++proc->instance_cnt_prach;
// set timing for prach thread
proc->frame_prach = frame;
proc->subframe_prach = subframe;
// the thread can now be woken up
if (pthread_cond_signal(&proc->cond_prach) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB PRACH thread %d\n", proc->thread_index);
exit_fun( "ERROR pthread_cond_signal" );
return;
}
pthread_mutex_unlock( &proc->mutex_prach );
}
}*/
/*!
* \brief The prach receive thread of gNB.
* \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
/*
static void* gNB_thread_prach( void* param ) {
static int gNB_thread_prach_status;
PHY_VARS_gNB *gNB= (PHY_VARS_gNB *)param;
gNB_L1_proc_t *proc = &gNB->proc;
// set default return value
gNB_thread_prach_status = 0;
while (!oai_exit) {
if (oai_exit) break;
if (wait_on_condition(&proc->mutex_prach,&proc->cond_prach,&proc->instance_cnt_prach,"gNB_prach_thread") < 0) break;
LOG_D(PHY,"Running gNB prach procedures\n");
prach_procedures(gNB ,0);
if (release_thread(&proc->mutex_prach,&proc->instance_cnt_prach,"gNB_prach_thread") < 0) break;
}
LOG_I(PHY, "Exiting gNB thread PRACH\n");
gNB_thread_prach_status = 0;
return &gNB_thread_prach_status;
}
}
*/
extern
void
init_td_thread
(
PHY_VARS_gNB
*
);
extern
void
init_te_thread
(
PHY_VARS_gNB
*
);
static
void
*
process_stats_thread
(
void
*
param
)
{
static
void
*
process_stats_thread
(
void
*
param
)
{
PHY_VARS_gNB
*
gNB
=
(
PHY_VARS_gNB
*
)
param
;
PHY_VARS_gNB
*
gNB
=
(
PHY_VARS_gNB
*
)
param
;
...
@@ -807,90 +331,10 @@ static void *process_stats_thread(void *param) {
...
@@ -807,90 +331,10 @@ static void *process_stats_thread(void *param) {
return
(
NULL
);
return
(
NULL
);
}
}
void
init_gNB_proc
(
int
inst
)
{
int
i
=
0
;
int
CC_id
=
0
;
PHY_VARS_gNB
*
gNB
;
gNB_L1_proc_t
*
proc
;
gNB_L1_rxtx_proc_t
*
L1_proc
,
*
L1_proc_tx
;
// LOG_I(PHY,"%s(inst:%d) RC.nb_nr_CC[inst]:%d \n",__FUNCTION__,inst,RC.nb_nr_CC[inst]);
gNB
=
RC
.
gNB
[
inst
];
LOG_I
(
PHY
,
"Initializing gNB processes instance:%d CC_id %d
\n
"
,
inst
,
CC_id
);
proc
=
&
gNB
->
proc
;
L1_proc
=
&
proc
->
L1_proc
;
L1_proc_tx
=
&
proc
->
L1_proc_tx
;
L1_proc
->
instance_cnt
=
-
1
;
L1_proc_tx
->
instance_cnt
=
-
1
;
L1_proc
->
instance_cnt_RUs
=
0
;
L1_proc_tx
->
instance_cnt_RUs
=
0
;
proc
->
instance_cnt_prach
=
-
1
;
proc
->
instance_cnt_asynch_rxtx
=
-
1
;
proc
->
CC_id
=
CC_id
;
proc
->
first_rx
=
1
;
proc
->
first_tx
=
1
;
proc
->
RU_mask
=
0
;
proc
->
RU_mask_tx
=
(
1
<<
gNB
->
num_RU
)
-
1
;
proc
->
RU_mask_prach
=
0
;
pthread_mutex_init
(
&
gNB
->
UL_INFO_mutex
,
NULL
);
pthread_mutex_init
(
&
L1_proc
->
mutex
,
NULL
);
pthread_mutex_init
(
&
L1_proc_tx
->
mutex
,
NULL
);
pthread_cond_init
(
&
L1_proc
->
cond
,
NULL
);
pthread_cond_init
(
&
L1_proc_tx
->
cond
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_prach
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_asynch_rxtx
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_RU
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_RU_tx
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_RU_PRACH
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_prach
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_asynch_rxtx
,
NULL
);
LOG_I
(
PHY
,
"gNB->single_thread_flag:%d
\n
"
,
gNB
->
single_thread_flag
);
if
(
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_SPLIT
||
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_TRX_SPLIT
)
{
threadCreate
(
&
L1_proc
->
pthread
,
gNB_L1_thread
,
gNB
,
"L1_proc"
,
-
1
,
OAI_PRIORITY_RT
);
threadCreate
(
&
L1_proc_tx
->
pthread
,
gNB_L1_thread_tx
,
gNB
,
"L1_proc_tx"
,
-
1
,
OAI_PRIORITY_RT
);
}
if
(
opp_enabled
==
1
)
threadCreate
(
&
proc
->
L1_stats_thread
,
process_stats_thread
,(
void
*
)
gNB
,
"time_meas"
,
-
1
,
OAI_PRIORITY_RT_LOW
);
//pthread_create( &proc->pthread_prach, attr_prach, gNB_thread_prach, gNB );
char
name
[
16
];
if
(
gNB
->
single_thread_flag
==
0
)
{
snprintf
(
name
,
sizeof
(
name
),
"L1 %d"
,
i
);
pthread_setname_np
(
L1_proc
->
pthread
,
name
);
snprintf
(
name
,
sizeof
(
name
),
"L1TX %d"
,
i
);
pthread_setname_np
(
L1_proc_tx
->
pthread
,
name
);
}
AssertFatal
(
proc
->
instance_cnt_prach
==
-
1
,
"instance_cnt_prach = %d
\n
"
,
proc
->
instance_cnt_prach
);
/* setup PHY proc TX sync mechanism */
pthread_mutex_init
(
&
sync_phy_proc
.
mutex_phy_proc_tx
,
NULL
);
pthread_cond_init
(
&
sync_phy_proc
.
cond_phy_proc_tx
,
NULL
);
sync_phy_proc
.
phy_proc_CC_id
=
0
;
gNB
->
threadPool
=
(
tpool_t
*
)
malloc
(
sizeof
(
tpool_t
));
gNB
->
respDecode
=
(
notifiedFIFO_t
*
)
malloc
(
sizeof
(
notifiedFIFO_t
));
int
numCPU
=
sysconf
(
_SC_NPROCESSORS_ONLN
);
uint32_t
num_threads_pusch
;
paramdef_t
PUSCHThreads
[]
=
NUM_THREADS_DESC
;
config_get
(
PUSCHThreads
,
sizeof
(
PUSCHThreads
)
/
sizeof
(
paramdef_t
),
NULL
);
int
threadCnt
=
min
(
numCPU
,
num_threads_pusch
);
char
ul_pool
[
80
];
sprintf
(
ul_pool
,
"-1"
);
int
s_offset
=
0
;
for
(
int
icpu
=
1
;
icpu
<
threadCnt
;
icpu
++
)
{
sprintf
(
ul_pool
+
2
+
s_offset
,
",-1"
);
s_offset
+=
3
;
}
initTpool
(
ul_pool
,
gNB
->
threadPool
,
false
);
initNotifiedFIFO
(
gNB
->
respDecode
);
}
void
init_gNB_Tpool
(
int
inst
)
{
void
init_gNB_Tpool
(
int
inst
)
{
PHY_VARS_gNB
*
gNB
;
PHY_VARS_gNB
*
gNB
;
gNB
=
RC
.
gNB
[
inst
];
gNB
=
RC
.
gNB
[
inst
];
gNB_L1_proc_t
*
proc
=
&
gNB
->
proc
;
// ULSCH decoding threadpool
// ULSCH decoding threadpool
gNB
->
threadPool
=
(
tpool_t
*
)
malloc
(
sizeof
(
tpool_t
));
gNB
->
threadPool
=
(
tpool_t
*
)
malloc
(
sizeof
(
tpool_t
));
...
@@ -922,6 +366,9 @@ void init_gNB_Tpool(int inst) {
...
@@ -922,6 +366,9 @@ void init_gNB_Tpool(int inst) {
// RU TX result FIFO
// RU TX result FIFO
gNB
->
resp_RU_tx
=
(
notifiedFIFO_t
*
)
malloc
(
sizeof
(
notifiedFIFO_t
));
gNB
->
resp_RU_tx
=
(
notifiedFIFO_t
*
)
malloc
(
sizeof
(
notifiedFIFO_t
));
initNotifiedFIFO
(
gNB
->
resp_RU_tx
);
initNotifiedFIFO
(
gNB
->
resp_RU_tx
);
// Stats measurement thread
if
(
opp_enabled
==
1
)
threadCreate
(
&
proc
->
L1_stats_thread
,
process_stats_thread
,(
void
*
)
gNB
,
"time_meas"
,
-
1
,
OAI_PRIORITY_RT_LOW
);
}
}
...
@@ -929,56 +376,13 @@ void init_gNB_Tpool(int inst) {
...
@@ -929,56 +376,13 @@ void init_gNB_Tpool(int inst) {
* \brief Terminate gNB TX and RX threads.
* \brief Terminate gNB TX and RX threads.
*/
*/
void
kill_gNB_proc
(
int
inst
)
{
void
kill_gNB_proc
(
int
inst
)
{
int
*
status
;
PHY_VARS_gNB
*
gNB
;
PHY_VARS_gNB
*
gNB
;
gNB_L1_proc_t
*
proc
;
gNB_L1_rxtx_proc_t
*
L1_proc
,
*
L1_proc_tx
;
gNB
=
RC
.
gNB
[
inst
];
gNB
=
RC
.
gNB
[
inst
];
proc
=
&
gNB
->
proc
;
L1_proc
=
&
proc
->
L1_proc
;
L1_proc_tx
=
&
proc
->
L1_proc_tx
;
LOG_I
(
PHY
,
"Killing TX inst %d
\n
"
,
inst
);
if
(
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_SPLIT
||
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_TRX_SPLIT
)
{
pthread_mutex_lock
(
&
L1_proc
->
mutex
);
L1_proc
->
instance_cnt
=
0
;
pthread_cond_signal
(
&
L1_proc
->
cond
);
pthread_mutex_unlock
(
&
L1_proc
->
mutex
);
pthread_mutex_lock
(
&
L1_proc_tx
->
mutex
);
L1_proc_tx
->
instance_cnt
=
0
;
pthread_cond_signal
(
&
L1_proc_tx
->
cond
);
pthread_mutex_unlock
(
&
L1_proc_tx
->
mutex
);
}
proc
->
instance_cnt_prach
=
0
;
pthread_cond_signal
(
&
proc
->
cond_prach
);
pthread_cond_signal
(
&
proc
->
cond_asynch_rxtx
);
pthread_cond_broadcast
(
&
sync_phy_proc
.
cond_phy_proc_tx
);
// LOG_D(PHY, "joining pthread_prach\n");
// pthread_join( proc->pthread_prach, (void**)&status );
LOG_I
(
PHY
,
"Destroying prach mutex/cond
\n
"
);
pthread_mutex_destroy
(
&
proc
->
mutex_prach
);
pthread_cond_destroy
(
&
proc
->
cond_prach
);
LOG_I
(
PHY
,
"Destroying UL_INFO mutex
\n
"
);
LOG_I
(
PHY
,
"Destroying UL_INFO mutex
\n
"
);
pthread_mutex_destroy
(
&
gNB
->
UL_INFO_mutex
);
pthread_mutex_destroy
(
&
gNB
->
UL_INFO_mutex
);
if
(
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_SPLIT
||
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_TRX_SPLIT
)
{
LOG_I
(
PHY
,
"Joining L1_proc mutex/cond
\n
"
);
pthread_join
(
L1_proc
->
pthread
,
(
void
**
)
&
status
);
LOG_I
(
PHY
,
"Joining L1_proc_tx mutex/cond
\n
"
);
pthread_join
(
L1_proc_tx
->
pthread
,
(
void
**
)
&
status
);
}
LOG_I
(
PHY
,
"Destroying L1_proc mutex/cond
\n
"
);
pthread_mutex_destroy
(
&
L1_proc
->
mutex
);
pthread_cond_destroy
(
&
L1_proc
->
cond
);
LOG_I
(
PHY
,
"Destroying L1_proc_tx mutex/cond
\n
"
);
pthread_mutex_destroy
(
&
L1_proc_tx
->
mutex
);
pthread_cond_destroy
(
&
L1_proc_tx
->
cond
);
pthread_mutex_destroy
(
&
proc
->
mutex_RU
);
pthread_mutex_destroy
(
&
proc
->
mutex_RU_tx
);
}
}
...
...
executables/nr-ru.c
View file @
7854133b
...
@@ -120,22 +120,6 @@ extern int emulate_rf;
...
@@ -120,22 +120,6 @@ extern int emulate_rf;
extern
int
numerology
;
extern
int
numerology
;
extern
int
usrp_tx_thread
;
extern
int
usrp_tx_thread
;
typedef
struct
processingData_L1
{
int
frame_rx
;
int
frame_tx
;
int
slot_rx
;
int
slot_tx
;
openair0_timestamp
timestamp_tx
;
PHY_VARS_gNB
*
gNB
;
}
processingData_L1_t
;
typedef
struct
processingData_RU
{
int
frame_tx
;
int
slot_tx
;
openair0_timestamp
timestamp_tx
;
RU_t
*
ru
;
}
processingData_RU_t
;
/*************************************************************/
/*************************************************************/
/* Functions to attach and configure RRU */
/* Functions to attach and configure RRU */
...
@@ -805,101 +789,6 @@ void tx_rf(RU_t *ru,int frame,int slot, uint64_t timestamp) {
...
@@ -805,101 +789,6 @@ void tx_rf(RU_t *ru,int frame,int slot, uint64_t timestamp) {
/*!
* \brief The Asynchronous RX/TX FH thread of RAU/RCC/gNB/RRU.
* This handles the RX FH for an asynchronous RRU/UE
* \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
void
*
ru_thread_asynch_rxtx
(
void
*
param
)
{
static
int
ru_thread_asynch_rxtx_status
;
RU_t
*
ru
=
(
RU_t
*
)
param
;
RU_proc_t
*
proc
=
&
ru
->
proc
;
nfapi_nr_config_request_scf_t
*
cfg
=
&
ru
->
config
;
int
slot
=
0
,
frame
=
0
;
// wait for top-level synchronization and do one acquisition to get timestamp for setting frame/subframe
wait_sync
(
"ru_thread_asynch_rxtx"
);
// wait for top-level synchronization and do one acquisition to get timestamp for setting frame/subframe
printf
(
"waiting for devices (ru_thread_asynch_rx)
\n
"
);
wait_on_condition
(
&
proc
->
mutex_asynch_rxtx
,
&
proc
->
cond_asynch_rxtx
,
&
proc
->
instance_cnt_asynch_rxtx
,
"thread_asynch"
);
printf
(
"devices ok (ru_thread_asynch_rx)
\n
"
);
while
(
!
oai_exit
)
{
if
(
slot
==
ru
->
nr_frame_parms
->
slots_per_frame
)
{
slot
=
0
;
frame
++
;
frame
&=
1023
;
}
else
{
slot
++
;
}
LOG_D
(
PHY
,
"ru_thread_asynch_rxtx: Waiting on incoming fronthaul
\n
"
);
// asynchronous receive from north (RRU IF4/IF5)
if
(
ru
->
fh_north_asynch_in
)
{
if
((
nr_slot_select
(
cfg
,
frame
,
slot
)
&
NR_DOWNLINK_SLOT
)
>
0
)
ru
->
fh_north_asynch_in
(
ru
,
&
frame
,
&
slot
);
}
else
AssertFatal
(
1
==
0
,
"Unknown function in ru_thread_asynch_rxtx
\n
"
);
}
ru_thread_asynch_rxtx_status
=
0
;
return
(
&
ru_thread_asynch_rxtx_status
);
}
/*!
* \brief The prach receive thread of RU.
* \param param is a \ref RU_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
void
*
ru_thread_prach
(
void
*
param
)
{
static
int
ru_thread_prach_status
;
RU_t
*
ru
=
(
RU_t
*
)
param
;
RU_proc_t
*
proc
=
(
RU_proc_t
*
)
&
ru
->
proc
;
// set default return value
ru_thread_prach_status
=
0
;
while
(
RC
.
ru_mask
>
0
)
{
usleep
(
1e6
);
LOG_I
(
PHY
,
"%s() RACH waiting for RU to be configured
\n
"
,
__FUNCTION__
);
}
LOG_I
(
PHY
,
"%s() RU configured - RACH processing thread running
\n
"
,
__FUNCTION__
);
while
(
!
oai_exit
)
{
if
(
wait_on_condition
(
&
proc
->
mutex_prach
,
&
proc
->
cond_prach
,
&
proc
->
instance_cnt_prach
,
"ru_prach_thread"
)
<
0
)
break
;
/*VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_RU_PRACH_RX, 1 );
if (ru->gNB_list[0]){
prach_procedures(
ru->gNB_list[0],0
);
}
else {
rx_prach(NULL,
ru,
NULL,
NULL,
NULL,
proc->frame_prach,
0,0
);
}
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_RU_PRACH_RX, 0 );*/
if
(
release_thread
(
&
proc
->
mutex_prach
,
&
proc
->
instance_cnt_prach
,
"ru_prach_thread"
)
<
0
)
break
;
}
LOG_I
(
PHY
,
"Exiting RU thread PRACH
\n
"
);
ru_thread_prach_status
=
0
;
return
&
ru_thread_prach_status
;
}
int
wakeup_synch
(
RU_t
*
ru
)
{
int
wakeup_synch
(
RU_t
*
ru
)
{
struct
timespec
wait
;
struct
timespec
wait
;
...
@@ -999,63 +888,6 @@ void do_ru_synch(RU_t *ru) {
...
@@ -999,63 +888,6 @@ void do_ru_synch(RU_t *ru) {
ru
->
rfdevice
.
trx_set_freq_func
(
&
ru
->
rfdevice
,
ru
->
rfdevice
.
openair0_cfg
,
0
);
ru
->
rfdevice
.
trx_set_freq_func
(
&
ru
->
rfdevice
,
ru
->
rfdevice
.
openair0_cfg
,
0
);
}
}
void
wakeup_gNB_L1s
(
RU_t
*
ru
)
{
int
i
;
PHY_VARS_gNB
**
gNB_list
=
ru
->
gNB_list
;
LOG_D
(
PHY
,
"wakeup_gNB_L1s (num %d) for RU %d ru->gNB_top:%p
\n
"
,
ru
->
num_gNB
,
ru
->
idx
,
ru
->
gNB_top
);
if
(
ru
->
num_gNB
==
1
&&
ru
->
gNB_top
!=
0
&&
get_thread_parallel_conf
()
==
PARALLEL_SINGLE_THREAD
)
{
// call gNB function directly
char
string
[
20
];
sprintf
(
string
,
"Incoming RU %u"
,
ru
->
idx
);
LOG_D
(
PHY
,
"RU %d Call gNB_top
\n
"
,
ru
->
idx
);
ru
->
gNB_top
(
gNB_list
[
0
],
ru
->
proc
.
frame_rx
,
ru
->
proc
.
tti_rx
,
string
,
ru
);
}
else
{
LOG_D
(
PHY
,
"ru->num_gNB:%d
\n
"
,
ru
->
num_gNB
);
for
(
i
=
0
;
i
<
ru
->
num_gNB
;
i
++
)
{
LOG_D
(
PHY
,
"ru->wakeup_rxtx:%p
\n
"
,
ru
->
nr_wakeup_rxtx
);
if
(
ru
->
nr_wakeup_rxtx
!=
0
&&
ru
->
nr_wakeup_rxtx
(
gNB_list
[
i
],
ru
)
<
0
)
{
LOG_E
(
PHY
,
"could not wakeup gNB rxtx process for subframe %d
\n
"
,
ru
->
proc
.
tti_rx
);
}
}
}
}
int
wakeup_prach_ru
(
RU_t
*
ru
)
{
struct
timespec
wait
;
wait
.
tv_sec
=
0
;
wait
.
tv_nsec
=
5000000L
;
if
(
pthread_mutex_timedlock
(
&
ru
->
proc
.
mutex_prach
,
&
wait
)
!=
0
)
{
LOG_E
(
PHY
,
"[RU] ERROR pthread_mutex_lock for RU prach thread (IC %d)
\n
"
,
ru
->
proc
.
instance_cnt_prach
);
exit_fun
(
"error locking mutex_rxtx"
);
return
(
-
1
);
}
if
(
ru
->
proc
.
instance_cnt_prach
==-
1
)
{
++
ru
->
proc
.
instance_cnt_prach
;
ru
->
proc
.
frame_prach
=
ru
->
proc
.
frame_rx
;
ru
->
proc
.
subframe_prach
=
ru
->
proc
.
tti_rx
;
// DJP - think prach_procedures() is looking at gNB frame_prach
if
(
ru
->
gNB_list
[
0
])
{
ru
->
gNB_list
[
0
]
->
proc
.
frame_prach
=
ru
->
proc
.
frame_rx
;
ru
->
gNB_list
[
0
]
->
proc
.
slot_prach
=
ru
->
proc
.
tti_rx
;
}
LOG_I
(
PHY
,
"RU %d: waking up PRACH thread
\n
"
,
ru
->
idx
);
// the thread can now be woken up
AssertFatal
(
pthread_cond_signal
(
&
ru
->
proc
.
cond_prach
)
==
0
,
"ERROR pthread_cond_signal for RU prach thread
\n
"
);
}
else
LOG_W
(
PHY
,
"RU prach thread busy, skipping
\n
"
);
pthread_mutex_unlock
(
&
ru
->
proc
.
mutex_prach
);
return
(
0
);
}
// this is for RU with local RF unit
// this is for RU with local RF unit
void
fill_rf_config
(
RU_t
*
ru
,
char
*
rf_config_file
)
{
void
fill_rf_config
(
RU_t
*
ru
,
char
*
rf_config_file
)
{
int
i
;
int
i
;
...
@@ -1371,320 +1203,6 @@ void ru_tx_func(void *param) {
...
@@ -1371,320 +1203,6 @@ void ru_tx_func(void *param) {
}
//else emulate_rf
}
//else emulate_rf
}
}
void
*
ru_thread_tx
(
void
*
param
)
{
RU_t
*
ru
=
(
RU_t
*
)
param
;
RU_proc_t
*
proc
=
&
ru
->
proc
;
NR_DL_FRAME_PARMS
*
fp
=
ru
->
nr_frame_parms
;
PHY_VARS_gNB
*
gNB
;
gNB_L1_proc_t
*
gNB_proc
;
gNB_L1_rxtx_proc_t
*
L1_proc
;
char
filename
[
40
];
int
print_frame
=
8
;
int
i
=
0
;
int
ret
;
wait_on_condition
(
&
proc
->
mutex_FH1
,
&
proc
->
cond_FH1
,
&
proc
->
instance_cnt_FH1
,
"ru_thread_tx"
);
printf
(
"ru_thread_tx ready
\n
"
);
while
(
!
oai_exit
)
{
LOG_D
(
PHY
,
"ru_thread_tx: Waiting for TX processing
\n
"
);
// wait until eNBs are finished subframe RX n and TX n+4
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_RU_TX_WAIT
,
1
);
wait_on_condition
(
&
proc
->
mutex_gNBs
,
&
proc
->
cond_gNBs
,
&
proc
->
instance_cnt_gNBs
,
"ru_thread_tx"
);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_RU_TX_WAIT
,
0
);
ret
=
pthread_mutex_lock
(
&
proc
->
mutex_gNBs
);
AssertFatal
(
ret
==
0
,
"mutex_lock return %d
\n
"
,
ret
);
int
frame_tx
=
proc
->
frame_tx
;
int
tti_tx
=
proc
->
tti_tx
;
uint64_t
timestamp_tx
=
proc
->
timestamp_tx
;
ret
=
pthread_mutex_unlock
(
&
proc
->
mutex_gNBs
);
AssertFatal
(
ret
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
if
(
oai_exit
)
break
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_RU
,
frame_tx
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_TTI_NUMBER_TX0_RU
,
tti_tx
);
// do TX front-end processing if needed (precoding and/or IDFTs)
if
(
ru
->
feptx_prec
)
ru
->
feptx_prec
(
ru
,
frame_tx
,
tti_tx
);
// do OFDM with/without TX front-end processing if needed
if
((
ru
->
fh_north_asynch_in
==
NULL
)
&&
(
ru
->
feptx_ofdm
))
ru
->
feptx_ofdm
(
ru
,
frame_tx
,
tti_tx
);
if
(
!
emulate_rf
)
{
// do outgoing fronthaul (south) if needed
if
((
ru
->
fh_north_asynch_in
==
NULL
)
&&
(
ru
->
fh_south_out
))
ru
->
fh_south_out
(
ru
,
frame_tx
,
tti_tx
,
timestamp_tx
);
if
(
ru
->
fh_north_out
)
ru
->
fh_north_out
(
ru
);
}
else
{
if
(
proc
->
frame_tx
==
print_frame
)
{
for
(
i
=
0
;
i
<
ru
->
nb_tx
;
i
++
)
{
if
(
proc
->
tti_tx
==
0
)
{
sprintf
(
filename
,
"gNBdataF_frame%d_sl%d.m"
,
print_frame
,
proc
->
tti_tx
);
LOG_M
(
filename
,
"txdataF_frame"
,
&
ru
->
gNB_list
[
0
]
->
common_vars
.
txdataF
[
i
][
0
],
fp
->
samples_per_frame_wCP
,
1
,
1
);
sprintf
(
filename
,
"tx%ddataF_frame%d_sl%d.m"
,
i
,
print_frame
,
proc
->
tti_tx
);
LOG_M
(
filename
,
"txdataF_frame"
,
&
ru
->
common
.
txdataF
[
i
][
0
],
fp
->
samples_per_frame_wCP
,
1
,
1
);
sprintf
(
filename
,
"tx%ddataF_BF_frame%d_sl%d.m"
,
i
,
print_frame
,
proc
->
tti_tx
);
LOG_M
(
filename
,
"txdataF_BF_frame"
,
&
ru
->
common
.
txdataF_BF
[
i
][
0
],
fp
->
samples_per_subframe_wCP
,
1
,
1
);
}
if
(
proc
->
tti_tx
==
9
)
{
sprintf
(
filename
,
"tx%ddata_frame%d.m"
,
i
,
print_frame
);
LOG_M
(
filename
,
"txdata_frame"
,
&
ru
->
common
.
txdata
[
i
][
0
],
fp
->
samples_per_frame
,
1
,
1
);
sprintf
(
filename
,
"tx%ddata_frame%d.dat"
,
i
,
print_frame
);
FILE
*
output_fd
=
fopen
(
filename
,
"w"
);
if
(
output_fd
)
{
fwrite
(
&
ru
->
common
.
txdata
[
i
][
0
],
sizeof
(
int32_t
),
fp
->
samples_per_frame
,
output_fd
);
fclose
(
output_fd
);
}
else
{
LOG_E
(
PHY
,
"Cannot write to file %s
\n
"
,
filename
);
}
}
//if(proc->tti_tx == 9)
}
//for (i=0; i<ru->nb_tx; i++)
}
//if(proc->frame_tx == print_frame)
}
//else emulate_rf
release_thread
(
&
proc
->
mutex_gNBs
,
&
proc
->
instance_cnt_gNBs
,
"ru_thread_tx"
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_UE
,
proc
->
instance_cnt_gNBs
);
for
(
i
=
0
;
i
<
ru
->
num_gNB
;
i
++
)
{
gNB
=
ru
->
gNB_list
[
i
];
gNB_proc
=
&
gNB
->
proc
;
L1_proc
=
(
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_TRX_SPLIT
)
?
&
gNB_proc
->
L1_proc_tx
:
&
gNB_proc
->
L1_proc
;
ret
=
pthread_mutex_lock
(
&
gNB_proc
->
mutex_RU_tx
);
AssertFatal
(
ret
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
for
(
int
j
=
0
;
j
<
gNB
->
num_RU
;
j
++
)
{
if
(
ru
==
gNB
->
RU_list
[
j
])
{
if
((
gNB_proc
->
RU_mask_tx
&
(
1
<<
j
))
>
0
)
LOG_E
(
PHY
,
"gNB %d frame %d, subframe %d : previous information from RU tx %d (num_RU %d,mask %x) has not been served yet!
\n
"
,
gNB
->
Mod_id
,
gNB_proc
->
frame_rx
,
gNB_proc
->
slot_rx
,
ru
->
idx
,
gNB
->
num_RU
,
gNB_proc
->
RU_mask_tx
);
gNB_proc
->
RU_mask_tx
|=
(
1
<<
j
);
}
}
if
(
gNB_proc
->
RU_mask_tx
!=
(
1
<<
gNB
->
num_RU
)
-
1
)
{
// not all RUs have provided their information so return
ret
=
pthread_mutex_unlock
(
&
gNB_proc
->
mutex_RU_tx
);
AssertFatal
(
ret
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
}
else
{
// all RUs TX are finished so send the ready signal to gNB processing
gNB_proc
->
RU_mask_tx
=
0
;
ret
=
pthread_mutex_unlock
(
&
gNB_proc
->
mutex_RU_tx
);
AssertFatal
(
ret
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
ret
=
pthread_mutex_lock
(
&
L1_proc
->
mutex_RUs_tx
);
AssertFatal
(
ret
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
// the thread can now be woken up
if
(
L1_proc
->
instance_cnt_RUs
==
-
1
)
{
L1_proc
->
instance_cnt_RUs
=
0
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE
,
L1_proc
->
instance_cnt_RUs
);
AssertFatal
(
pthread_cond_signal
(
&
L1_proc
->
cond_RUs
)
==
0
,
"ERROR pthread_cond_signal for gNB_L1_thread
\n
"
);
}
//else AssertFatal(1==0,"gNB TX thread is not ready\n");
ret
=
pthread_mutex_unlock
(
&
L1_proc
->
mutex_RUs_tx
);
AssertFatal
(
ret
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
}
}
}
release_thread
(
&
proc
->
mutex_FH1
,
&
proc
->
instance_cnt_FH1
,
"ru_thread_tx"
);
return
0
;
}
void
tx_func
(
void
*
param
)
{
processingData_L1_t
*
info
=
(
processingData_L1_t
*
)
param
;
PHY_VARS_gNB
*
gNB
=
info
->
gNB
;
int
frame_tx
=
info
->
frame_tx
;
int
slot_tx
=
info
->
slot_tx
;
phy_procedures_gNB_TX
(
gNB
,
frame_tx
,
slot_tx
,
1
);
// start FH TX processing
notifiedFIFO_elt_t
*
res
;
res
=
pullTpool
(
gNB
->
resp_RU_tx
,
gNB
->
threadPool
);
processingData_RU_t
*
syncMsg
=
(
processingData_RU_t
*
)
NotifiedFifoData
(
res
);
syncMsg
->
frame_tx
=
frame_tx
;
syncMsg
->
slot_tx
=
slot_tx
;
syncMsg
->
timestamp_tx
=
info
->
timestamp_tx
;
syncMsg
->
ru
=
gNB
->
RU_list
[
0
];
res
->
key
=
slot_tx
;
pushTpool
(
gNB
->
threadPool
,
res
);
}
void
rx_func
(
void
*
param
)
{
processingData_L1_t
*
info
=
(
processingData_L1_t
*
)
param
;
PHY_VARS_gNB
*
gNB
=
info
->
gNB
;
int
frame_rx
=
info
->
frame_rx
;
int
slot_rx
=
info
->
slot_rx
;
int
frame_tx
=
info
->
frame_tx
;
int
slot_tx
=
info
->
slot_tx
;
sl_ahead
=
sf_ahead
*
gNB
->
frame_parms
.
slots_per_subframe
;
nfapi_nr_config_request_scf_t
*
cfg
=
&
gNB
->
gNB_config
;
//start_meas(&softmodem_stats_rxtx_sf);
T
(
T_GNB_PHY_DL_TICK
,
T_INT
(
gNB
->
Mod_id
),
T_INT
(
frame_tx
),
T_INT
(
slot_tx
));
/* hack to remove UEs */
extern
int
rnti_to_remove
[
10
];
extern
volatile
int
rnti_to_remove_count
;
extern
pthread_mutex_t
rnti_to_remove_mutex
;
if
(
pthread_mutex_lock
(
&
rnti_to_remove_mutex
))
exit
(
1
);
int
up_removed
=
0
;
int
down_removed
=
0
;
int
pucch_removed
=
0
;
for
(
int
i
=
0
;
i
<
rnti_to_remove_count
;
i
++
)
{
LOG_W
(
PHY
,
"to remove rnti %d
\n
"
,
rnti_to_remove
[
i
]);
void
clean_gNB_ulsch
(
NR_gNB_ULSCH_t
*
ulsch
);
void
clean_gNB_dlsch
(
NR_gNB_DLSCH_t
*
dlsch
);
int
j
;
for
(
j
=
0
;
j
<
NUMBER_OF_NR_ULSCH_MAX
;
j
++
)
if
(
gNB
->
ulsch
[
j
][
0
]
->
rnti
==
rnti_to_remove
[
i
])
{
gNB
->
ulsch
[
j
][
0
]
->
rnti
=
0
;
gNB
->
ulsch
[
j
][
0
]
->
harq_mask
=
0
;
//clean_gNB_ulsch(gNB->ulsch[j][0]);
int
h
;
for
(
h
=
0
;
h
<
NR_MAX_ULSCH_HARQ_PROCESSES
;
h
++
)
{
gNB
->
ulsch
[
j
][
0
]
->
harq_processes
[
h
]
->
status
=
SCH_IDLE
;
gNB
->
ulsch
[
j
][
0
]
->
harq_processes
[
h
]
->
round
=
0
;
gNB
->
ulsch
[
j
][
0
]
->
harq_processes
[
h
]
->
handled
=
0
;
}
up_removed
++
;
}
for
(
j
=
0
;
j
<
NUMBER_OF_NR_DLSCH_MAX
;
j
++
)
if
(
gNB
->
dlsch
[
j
][
0
]
->
rnti
==
rnti_to_remove
[
i
])
{
gNB
->
dlsch
[
j
][
0
]
->
rnti
=
0
;
gNB
->
dlsch
[
j
][
0
]
->
harq_mask
=
0
;
//clean_gNB_dlsch(gNB->dlsch[j][0]);
down_removed
++
;
}
for
(
j
=
0
;
j
<
NUMBER_OF_NR_PUCCH_MAX
;
j
++
)
if
(
gNB
->
pucch
[
j
]
->
active
>
0
&&
gNB
->
pucch
[
j
]
->
pucch_pdu
.
rnti
==
rnti_to_remove
[
i
])
{
gNB
->
pucch
[
j
]
->
active
=
0
;
gNB
->
pucch
[
j
]
->
pucch_pdu
.
rnti
=
0
;
pucch_removed
++
;
}
#if 0
for (j = 0; j < NUMBER_OF_NR_PDCCH_MAX; j++)
gNB->pdcch_pdu[j].frame = -1;
for (j = 0; j < NUMBER_OF_NR_PDCCH_MAX; j++)
gNB->ul_pdcch_pdu[j].frame = -1;
for (j = 0; j < NUMBER_OF_NR_PRACH_MAX; j++)
gNB->prach_vars.list[j].frame = -1;
#endif
}
if
(
rnti_to_remove_count
)
LOG_W
(
PHY
,
"to remove rnti_to_remove_count=%d, up_removed=%d down_removed=%d pucch_removed=%d
\n
"
,
rnti_to_remove_count
,
up_removed
,
down_removed
,
pucch_removed
);
rnti_to_remove_count
=
0
;
if
(
pthread_mutex_unlock
(
&
rnti_to_remove_mutex
))
exit
(
1
);
// Call the scheduler
pthread_mutex_lock
(
&
gNB
->
UL_INFO_mutex
);
gNB
->
UL_INFO
.
frame
=
frame_rx
;
gNB
->
UL_INFO
.
slot
=
slot_rx
;
gNB
->
UL_INFO
.
module_id
=
gNB
->
Mod_id
;
gNB
->
UL_INFO
.
CC_id
=
gNB
->
CC_id
;
gNB
->
if_inst
->
NR_UL_indication
(
&
gNB
->
UL_INFO
);
pthread_mutex_unlock
(
&
gNB
->
UL_INFO_mutex
);
// RX processing
int
tx_slot_type
=
nr_slot_select
(
cfg
,
frame_tx
,
slot_tx
);
int
rx_slot_type
=
nr_slot_select
(
cfg
,
frame_rx
,
slot_rx
);
if
(
rx_slot_type
==
NR_UPLINK_SLOT
||
rx_slot_type
==
NR_MIXED_SLOT
)
{
// UE-specific RX processing for subframe n
// TODO: check if this is correct for PARALLEL_RU_L1_TRX_SPLIT
// Do PRACH RU processing
L1_nr_prach_procedures
(
gNB
,
frame_rx
,
slot_rx
);
//apply the rx signal rotation here
apply_nr_rotation_ul
(
&
gNB
->
frame_parms
,
gNB
->
common_vars
.
rxdataF
[
0
],
slot_rx
,
0
,
gNB
->
frame_parms
.
Ncp
==
EXTENDED
?
12
:
14
,
gNB
->
frame_parms
.
ofdm_symbol_size
);
phy_procedures_gNB_uespec_RX
(
gNB
,
frame_rx
,
slot_rx
);
}
if
(
oai_exit
)
return
(
-
1
);
//stop_meas( &softmodem_stats_rxtx_sf );
LOG_D
(
PHY
,
"%s() Exit proc[rx:%d%d tx:%d%d]
\n
"
,
__FUNCTION__
,
frame_rx
,
slot_rx
,
frame_tx
,
slot_tx
);
notifiedFIFO_elt_t
*
res
;
if
(
tx_slot_type
==
NR_DOWNLINK_SLOT
||
tx_slot_type
==
NR_MIXED_SLOT
)
{
res
=
pullTpool
(
gNB
->
resp_L1_tx
,
gNB
->
threadPool
);
processingData_L1_t
*
syncMsg
=
(
processingData_L1_t
*
)
NotifiedFifoData
(
res
);
syncMsg
->
gNB
=
gNB
;
syncMsg
->
frame_rx
=
frame_rx
;
syncMsg
->
slot_rx
=
slot_rx
;
syncMsg
->
frame_tx
=
frame_tx
;
syncMsg
->
slot_tx
=
slot_tx
;
syncMsg
->
timestamp_tx
=
info
->
timestamp_tx
;
res
->
key
=
slot_tx
;
pushTpool
(
gNB
->
threadPool
,
res
);
}
#if 0
LOG_D(PHY, "rxtx:%lld nfapi:%lld phy:%lld tx:%lld rx:%lld prach:%lld ofdm:%lld ",
softmodem_stats_rxtx_sf.diff_now, nfapi_meas.diff_now,
TICK_TO_US(gNB->phy_proc),
TICK_TO_US(gNB->phy_proc_tx),
TICK_TO_US(gNB->phy_proc_rx),
TICK_TO_US(gNB->rx_prach),
TICK_TO_US(gNB->ofdm_mod_stats),
softmodem_stats_rxtx_sf.diff_now, nfapi_meas.diff_now);
LOG_D(PHY,
"dlsch[enc:%lld mod:%lld scr:%lld rm:%lld t:%lld i:%lld] rx_dft:%lld ",
TICK_TO_US(gNB->dlsch_encoding_stats),
TICK_TO_US(gNB->dlsch_modulation_stats),
TICK_TO_US(gNB->dlsch_scrambling_stats),
TICK_TO_US(gNB->dlsch_rate_matching_stats),
TICK_TO_US(gNB->dlsch_turbo_encoding_stats),
TICK_TO_US(gNB->dlsch_interleaving_stats),
TICK_TO_US(gNB->rx_dft_stats));
LOG_D(PHY," ulsch[ch:%lld freq:%lld dec:%lld demod:%lld ru:%lld ",
TICK_TO_US(gNB->ulsch_channel_estimation_stats),
TICK_TO_US(gNB->ulsch_freq_offset_estimation_stats),
TICK_TO_US(gNB->ulsch_decoding_stats),
TICK_TO_US(gNB->ulsch_demodulation_stats),
TICK_TO_US(gNB->ulsch_rate_unmatching_stats));
LOG_D(PHY, "td:%lld dei:%lld dem:%lld llr:%lld tci:%lld ",
TICK_TO_US(gNB->ulsch_turbo_decoding_stats),
TICK_TO_US(gNB->ulsch_deinterleaving_stats),
TICK_TO_US(gNB->ulsch_demultiplexing_stats),
TICK_TO_US(gNB->ulsch_llr_stats),
TICK_TO_US(gNB->ulsch_tc_init_stats));
LOG_D(PHY, "tca:%lld tcb:%lld tcg:%lld tce:%lld l1:%lld l2:%lld]\n\n",
TICK_TO_US(gNB->ulsch_tc_alpha_stats),
TICK_TO_US(gNB->ulsch_tc_beta_stats),
TICK_TO_US(gNB->ulsch_tc_gamma_stats),
TICK_TO_US(gNB->ulsch_tc_ext_stats),
TICK_TO_US(gNB->ulsch_tc_intl1_stats),
TICK_TO_US(gNB->ulsch_tc_intl2_stats)
);
#endif
}
void
*
ru_thread
(
void
*
param
)
{
void
*
ru_thread
(
void
*
param
)
{
static
int
ru_thread_status
;
static
int
ru_thread_status
;
RU_t
*
ru
=
(
RU_t
*
)
param
;
RU_t
*
ru
=
(
RU_t
*
)
param
;
...
@@ -1771,16 +1289,6 @@ void *ru_thread( void *param ) {
...
@@ -1771,16 +1289,6 @@ void *ru_thread( void *param ) {
else
LOG_I
(
PHY
,
"RU %d rf device ready
\n
"
,
ru
->
idx
);
else
LOG_I
(
PHY
,
"RU %d rf device ready
\n
"
,
ru
->
idx
);
}
else
LOG_I
(
PHY
,
"RU %d no rf device
\n
"
,
ru
->
idx
);
}
else
LOG_I
(
PHY
,
"RU %d no rf device
\n
"
,
ru
->
idx
);
// if an asnych_rxtx thread exists
// wakeup the thread because the devices are ready at this point
if
((
ru
->
fh_south_asynch_in
)
||
(
ru
->
fh_north_asynch_in
))
{
pthread_mutex_lock
(
&
proc
->
mutex_asynch_rxtx
);
proc
->
instance_cnt_asynch_rxtx
=
0
;
pthread_mutex_unlock
(
&
proc
->
mutex_asynch_rxtx
);
pthread_cond_signal
(
&
proc
->
cond_asynch_rxtx
);
}
else
LOG_I
(
PHY
,
"RU %d no asynch_south interface
\n
"
,
ru
->
idx
);
// if this is a slave RRU, try to synchronize on the DL frequency
// if this is a slave RRU, try to synchronize on the DL frequency
if
((
ru
->
is_slave
)
&&
(
ru
->
if_south
==
LOCAL_RF
))
do_ru_synch
(
ru
);
if
((
ru
->
is_slave
)
&&
(
ru
->
if_south
==
LOCAL_RF
))
do_ru_synch
(
ru
);
...
@@ -1797,11 +1305,6 @@ void *ru_thread( void *param ) {
...
@@ -1797,11 +1305,6 @@ void *ru_thread( void *param ) {
}
}
}
}
pthread_mutex_lock
(
&
proc
->
mutex_FH1
);
proc
->
instance_cnt_FH1
=
0
;
pthread_mutex_unlock
(
&
proc
->
mutex_FH1
);
pthread_cond_signal
(
&
proc
->
cond_FH1
);
// This is a forever while loop, it loops over subframes which are scheduled by incoming samples from HW devices
// This is a forever while loop, it loops over subframes which are scheduled by incoming samples from HW devices
while
(
!
oai_exit
)
{
while
(
!
oai_exit
)
{
// these are local subframe/frame counters to check that we are in synch with the fronthaul timing.
// these are local subframe/frame counters to check that we are in synch with the fronthaul timing.
...
@@ -1829,18 +1332,7 @@ void *ru_thread( void *param ) {
...
@@ -1829,18 +1332,7 @@ void *ru_thread( void *param ) {
proc
->
frame_tx
,
proc
->
tti_tx
,
proc
->
frame_tx
,
proc
->
tti_tx
,
RC
.
gNB
[
0
]
->
proc
.
frame_rx
,
RC
.
gNB
[
0
]
->
proc
.
slot_rx
,
RC
.
gNB
[
0
]
->
proc
.
frame_rx
,
RC
.
gNB
[
0
]
->
proc
.
slot_rx
,
RC
.
gNB
[
0
]
->
proc
.
frame_tx
);
RC
.
gNB
[
0
]
->
proc
.
frame_tx
);
/*
LOG_D(PHY,"RU thread (do_prach %d, is_prach_subframe %d), received frame %d, subframe %d\n",
ru->do_prach,
is_prach_subframe(fp, proc->frame_rx, proc->tti_rx),
proc->frame_rx,proc->tti_rx);
if ((ru->do_prach>0) && (is_prach_subframe(fp, proc->frame_rx, proc->tti_rx)==1)) {
wakeup_prach_ru(ru);
}*/
// adjust for timing offset between RU
//printf("~~~~~~~~~~~~~~~~~~~~~~~~~~%d.%d in ru_thread is in process\n", proc->frame_rx, proc->tti_rx);
if
(
ru
->
idx
!=
0
)
proc
->
frame_tx
=
(
proc
->
frame_tx
+
proc
->
frame_offset
)
&
1023
;
if
(
ru
->
idx
!=
0
)
proc
->
frame_tx
=
(
proc
->
frame_tx
+
proc
->
frame_offset
)
&
1023
;
// do RX front-end processing (frequency-shift, dft) if needed
// do RX front-end processing (frequency-shift, dft) if needed
...
@@ -1924,82 +1416,6 @@ void *ru_thread( void *param ) {
...
@@ -1924,82 +1416,6 @@ void *ru_thread( void *param ) {
ru_thread_status
=
0
;
ru_thread_status
=
0
;
return
&
ru_thread_status
;
return
&
ru_thread_status
;
}
}
/*
// This thread run the initial synchronization like a UE
void *ru_thread_synch(void *arg) {
RU_t *ru = (RU_t*)arg;
NR_DL_FRAME_PARMS *fp=ru->nr_frame_parms;
int32_t sync_pos,sync_pos2;
uint32_t peak_val;
uint32_t sync_corr[307200] __attribute__((aligned(32)));
static int ru_thread_synch_status;
wait_sync("ru_thread_synch");
// initialize variables for PSS detection
lte_sync_time_init(ru->nr_frame_parms);
while (!oai_exit) {
// wait to be woken up
if (wait_on_condition(&ru->proc.mutex_synch,&ru->proc.cond_synch,&ru->proc.instance_cnt_synch,"ru_thread_synch")<0) break;
// if we're not in synch, then run initial synch
if (ru->in_synch == 0) {
// run intial synch like UE
LOG_I(PHY,"Running initial synchronization\n");
sync_pos = lte_sync_time_gNB(ru->common.rxdata,
fp,
fp->samples_per_subframe*5,
&peak_val,
sync_corr);
LOG_I(PHY,"RU synch: %d, val %d\n",sync_pos,peak_val);
if (sync_pos >= 0) {
if (sync_pos >= fp->nb_prefix_samples)
sync_pos2 = sync_pos - fp->nb_prefix_samples;
else
sync_pos2 = sync_pos + (fp->samples_per_subframe*10) - fp->nb_prefix_samples;
if (fp->frame_type == FDD) {
// PSS is hypothesized in last symbol of first slot in Frame
int sync_pos_slot = (fp->samples_per_subframe>>1) - fp->ofdm_symbol_size - fp->nb_prefix_samples;
if (sync_pos2 >= sync_pos_slot)
ru->rx_offset = sync_pos2 - sync_pos_slot;
else
ru->rx_offset = (fp->samples_per_subframe*10) + sync_pos2 - sync_pos_slot;
}
else {
}
LOG_I(PHY,"Estimated sync_pos %d, peak_val %d => timing offset %d\n",sync_pos,peak_val,ru->rx_offset);
if ((peak_val > 300000) && (sync_pos > 0)) {
// if (sync_pos++ > 3) {
write_output("ru_sync.m","sync",(void*)&sync_corr[0],fp->samples_per_subframe*5,1,2);
write_output("ru_rx.m","rxs",(void*)ru->ru_time.rxdata[0][0],fp->samples_per_subframe*10,1,1);
exit(-1);
}
ru->in_synch=1;
}
}
if (release_thread(&ru->proc.mutex_synch,&ru->proc.instance_cnt_synch,"ru_synch_thread") < 0) break;
} // oai_exit
ru_thread_synch_status = 0;
return &ru_thread_synch_status;
}
*/
int
nr_start_if
(
struct
RU_t_s
*
ru
,
struct
PHY_VARS_gNB_s
*
gNB
)
{
int
nr_start_if
(
struct
RU_t_s
*
ru
,
struct
PHY_VARS_gNB_s
*
gNB
)
{
return
(
ru
->
ifdevice
.
trx_start_func
(
&
ru
->
ifdevice
));
return
(
ru
->
ifdevice
.
trx_start_func
(
&
ru
->
ifdevice
));
...
@@ -2026,12 +1442,7 @@ void init_RU_proc(RU_t *ru) {
...
@@ -2026,12 +1442,7 @@ void init_RU_proc(RU_t *ru) {
proc
=
&
ru
->
proc
;
proc
=
&
ru
->
proc
;
memset
((
void
*
)
proc
,
0
,
sizeof
(
RU_proc_t
));
memset
((
void
*
)
proc
,
0
,
sizeof
(
RU_proc_t
));
proc
->
ru
=
ru
;
proc
->
ru
=
ru
;
proc
->
instance_cnt_prach
=
-
1
;
proc
->
instance_cnt_synch
=
-
1
;
proc
->
instance_cnt_synch
=
-
1
;
proc
->
instance_cnt_FH
=
-
1
;
proc
->
instance_cnt_FH1
=
-
1
;
proc
->
instance_cnt_gNBs
=
-
1
;
proc
->
instance_cnt_asynch_rxtx
=
-
1
;
proc
->
instance_cnt_emulateRF
=
-
1
;
proc
->
instance_cnt_emulateRF
=
-
1
;
proc
->
first_rx
=
1
;
proc
->
first_rx
=
1
;
proc
->
first_tx
=
1
;
proc
->
first_tx
=
1
;
...
@@ -2042,44 +1453,15 @@ void init_RU_proc(RU_t *ru) {
...
@@ -2042,44 +1453,15 @@ void init_RU_proc(RU_t *ru) {
for
(
i
=
0
;
i
<
10
;
i
++
)
proc
->
symbol_mask
[
i
]
=
0
;
for
(
i
=
0
;
i
<
10
;
i
++
)
proc
->
symbol_mask
[
i
]
=
0
;
pthread_mutex_init
(
&
proc
->
mutex_prach
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_asynch_rxtx
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_synch
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_synch
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_FH
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_FH1
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_emulateRF
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_emulateRF
,
NULL
);
pthread_mutex_init
(
&
proc
->
mutex_gNBs
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_prach
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_FH
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_FH1
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_emulateRF
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_emulateRF
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_asynch_rxtx
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_synch
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_synch
,
NULL
);
pthread_cond_init
(
&
proc
->
cond_gNBs
,
NULL
);
threadCreate
(
&
proc
->
pthread_FH
,
ru_thread
,
(
void
*
)
ru
,
"thread_FH"
,
-
1
,
OAI_PRIORITY_RT_MAX
);
threadCreate
(
&
proc
->
pthread_FH
,
ru_thread
,
(
void
*
)
ru
,
"thread_FH"
,
-
1
,
OAI_PRIORITY_RT_MAX
);
if
(
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_SPLIT
||
get_thread_parallel_conf
()
==
PARALLEL_RU_L1_TRX_SPLIT
)
threadCreate
(
&
proc
->
pthread_FH1
,
ru_thread_tx
,
(
void
*
)
ru
,
"thread_FH1"
,
-
1
,
OAI_PRIORITY_RT
);
if
(
emulate_rf
)
if
(
emulate_rf
)
threadCreate
(
&
proc
->
pthread_emulateRF
,
emulatedRF_thread
,
(
void
*
)
proc
,
"emulateRF"
,
-
1
,
OAI_PRIORITY_RT
);
threadCreate
(
&
proc
->
pthread_emulateRF
,
emulatedRF_thread
,
(
void
*
)
proc
,
"emulateRF"
,
-
1
,
OAI_PRIORITY_RT
);
if
(
ru
->
function
==
NGFI_RRU_IF4p5
)
{
threadCreate
(
&
proc
->
pthread_prach
,
ru_thread_prach
,
(
void
*
)
ru
,
"RACH"
,
-
1
,
OAI_PRIORITY_RT
);
///tmp deactivation of synch thread
// if (ru->is_slave == 1) pthread_create( &proc->pthread_synch, attr_synch, ru_thread_synch, (void*)ru);
if
((
ru
->
if_timing
==
synch_to_other
)
||
(
ru
->
function
==
NGFI_RRU_IF5
)
||
(
ru
->
function
==
NGFI_RRU_IF4p5
))
threadCreate
(
&
proc
->
pthread_asynch_rxtx
,
ru_thread_asynch_rxtx
,
(
void
*
)
ru
,
"asynch_rxtx"
,
-
1
,
OAI_PRIORITY_RT
);
snprintf
(
name
,
sizeof
(
name
),
"ru_thread_FH %d"
,
ru
->
idx
);
pthread_setname_np
(
proc
->
pthread_FH
,
name
);
}
else
if
(
ru
->
function
==
gNodeB_3GPP
&&
ru
->
if_south
==
LOCAL_RF
)
{
// DJP - need something else to distinguish between monolithic and PNF
LOG_I
(
PHY
,
"%s() DJP - added creation of pthread_prach
\n
"
,
__FUNCTION__
);
threadCreate
(
&
proc
->
pthread_prach
,
ru_thread_prach
,
(
void
*
)
ru
,
"RACH"
,
-
1
,
OAI_PRIORITY_RT
);
}
if
(
get_thread_worker_conf
()
==
WORKER_ENABLE
)
{
if
(
get_thread_worker_conf
()
==
WORKER_ENABLE
)
{
if
(
ru
->
feprx
)
nr_init_feprx_thread
(
ru
);
if
(
ru
->
feprx
)
nr_init_feprx_thread
(
ru
);
...
@@ -2092,26 +1474,10 @@ void init_RU_proc(RU_t *ru) {
...
@@ -2092,26 +1474,10 @@ void init_RU_proc(RU_t *ru) {
void
kill_NR_RU_proc
(
int
inst
)
{
void
kill_NR_RU_proc
(
int
inst
)
{
RU_t
*
ru
=
RC
.
ru
[
inst
];
RU_t
*
ru
=
RC
.
ru
[
inst
];
RU_proc_t
*
proc
=
&
ru
->
proc
;
RU_proc_t
*
proc
=
&
ru
->
proc
;
pthread_mutex_lock
(
&
proc
->
mutex_FH
);
proc
->
instance_cnt_FH
=
0
;
pthread_mutex_unlock
(
&
proc
->
mutex_FH
);
pthread_cond_signal
(
&
proc
->
cond_FH
);
pthread_mutex_lock
(
&
proc
->
mutex_prach
);
proc
->
instance_cnt_prach
=
0
;
pthread_mutex_unlock
(
&
proc
->
mutex_prach
);
pthread_cond_signal
(
&
proc
->
cond_prach
);
pthread_mutex_lock
(
&
proc
->
mutex_synch
);
pthread_mutex_lock
(
&
proc
->
mutex_synch
);
proc
->
instance_cnt_synch
=
0
;
proc
->
instance_cnt_synch
=
0
;
pthread_mutex_unlock
(
&
proc
->
mutex_synch
);
pthread_mutex_unlock
(
&
proc
->
mutex_synch
);
pthread_cond_signal
(
&
proc
->
cond_synch
);
pthread_cond_signal
(
&
proc
->
cond_synch
);
pthread_mutex_lock
(
&
proc
->
mutex_gNBs
);
proc
->
instance_cnt_gNBs
=
0
;
pthread_mutex_unlock
(
&
proc
->
mutex_gNBs
);
pthread_cond_signal
(
&
proc
->
cond_gNBs
);
pthread_mutex_lock
(
&
proc
->
mutex_asynch_rxtx
);
proc
->
instance_cnt_asynch_rxtx
=
0
;
pthread_mutex_unlock
(
&
proc
->
mutex_asynch_rxtx
);
pthread_cond_signal
(
&
proc
->
cond_asynch_rxtx
);
LOG_D
(
PHY
,
"Joining pthread_FH
\n
"
);
LOG_D
(
PHY
,
"Joining pthread_FH
\n
"
);
pthread_join
(
proc
->
pthread_FH
,
NULL
);
pthread_join
(
proc
->
pthread_FH
,
NULL
);
...
@@ -2161,16 +1527,8 @@ void kill_NR_RU_proc(int inst) {
...
@@ -2161,16 +1527,8 @@ void kill_NR_RU_proc(int inst) {
pthread_join
(
ru
->
ru_stats_thread
,
NULL
);
pthread_join
(
ru
->
ru_stats_thread
,
NULL
);
}
}
pthread_mutex_destroy
(
&
proc
->
mutex_prach
);
pthread_mutex_destroy
(
&
proc
->
mutex_asynch_rxtx
);
pthread_mutex_destroy
(
&
proc
->
mutex_synch
);
pthread_mutex_destroy
(
&
proc
->
mutex_synch
);
pthread_mutex_destroy
(
&
proc
->
mutex_FH
);
pthread_mutex_destroy
(
&
proc
->
mutex_gNBs
);
pthread_cond_destroy
(
&
proc
->
cond_prach
);
pthread_cond_destroy
(
&
proc
->
cond_FH
);
pthread_cond_destroy
(
&
proc
->
cond_asynch_rxtx
);
pthread_cond_destroy
(
&
proc
->
cond_synch
);
pthread_cond_destroy
(
&
proc
->
cond_synch
);
pthread_cond_destroy
(
&
proc
->
cond_gNBs
);
}
}
int
check_capabilities
(
RU_t
*
ru
,
RRU_capabilities_t
*
cap
)
int
check_capabilities
(
RU_t
*
ru
,
RRU_capabilities_t
*
cap
)
...
...
executables/softmodem-common.h
View file @
7854133b
...
@@ -240,6 +240,10 @@ extern char *get_softmodem_function(uint64_t *sofmodemfunc_mask_ptr);
...
@@ -240,6 +240,10 @@ extern char *get_softmodem_function(uint64_t *sofmodemfunc_mask_ptr);
extern
void
set_softmodem_sighandler
(
void
);
extern
void
set_softmodem_sighandler
(
void
);
extern
uint64_t
downlink_frequency
[
MAX_NUM_CCs
][
4
];
extern
uint64_t
downlink_frequency
[
MAX_NUM_CCs
][
4
];
extern
int32_t
uplink_frequency_offset
[
MAX_NUM_CCs
][
4
];
extern
int32_t
uplink_frequency_offset
[
MAX_NUM_CCs
][
4
];
void
tx_func
(
void
*
param
);
void
rx_func
(
void
*
param
);
void
ru_tx_func
(
void
*
param
);
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
openair1/PHY/defs_RU.h
View file @
7854133b
...
@@ -741,4 +741,10 @@ typedef struct RRU_config_s {
...
@@ -741,4 +741,10 @@ typedef struct RRU_config_s {
MBSFN_config_t
MBSFN_config
[
8
];
MBSFN_config_t
MBSFN_config
[
8
];
}
RRU_config_t
;
}
RRU_config_t
;
typedef
struct
processingData_RU
{
int
frame_tx
;
int
slot_tx
;
openair0_timestamp
timestamp_tx
;
RU_t
*
ru
;
}
processingData_RU_t
;
#endif //__PHY_DEFS_RU__H__
#endif //__PHY_DEFS_RU__H__
openair1/PHY/defs_gNB.h
View file @
7854133b
...
@@ -875,4 +875,13 @@ union ldpcReqUnion {
...
@@ -875,4 +875,13 @@ union ldpcReqUnion {
uint64_t
p
;
uint64_t
p
;
};
};
typedef
struct
processingData_L1
{
int
frame_rx
;
int
frame_tx
;
int
slot_rx
;
int
slot_tx
;
openair0_timestamp
timestamp_tx
;
PHY_VARS_gNB
*
gNB
;
}
processingData_L1_t
;
#endif
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment