Commit 377bbaef authored by Florian Kaltenberger's avatar Florian Kaltenberger

Merge branch 'develop-nr-merge2-parallel' into 'develop-nr'

Develop nr merge2 parallel

See merge request oai/openairinterface5g!443
parents 01aead2b d849dbd0
...@@ -189,7 +189,17 @@ const char* eurecomVariablesNames[] = { ...@@ -189,7 +189,17 @@ const char* eurecomVariablesNames[] = {
"ue0_trx_write_ns_missing", "ue0_trx_write_ns_missing",
"enb_thread_rxtx_CPUID", "enb_thread_rxtx_CPUID",
"ru_thread_CPUID", "ru_thread_CPUID",
"ru_thread_tx_CPUID" "ru_thread_tx_CPUID",
/*signal for NR*/
"frame_number_TX0_gNB",
"frame_number_TX1_gNB",
"frame_number_RX0_gNB",
"frame_number_RX1_gNB",
"subframe_number_TX0_gNB",
"subframe_number_TX1_gNB",
"subframe_number_RX0_gNB",
"subframe_number_RX1_gNB"
}; };
const char* eurecomFunctionsNames[] = { const char* eurecomFunctionsNames[] = {
...@@ -408,6 +418,10 @@ const char* eurecomFunctionsNames[] = { ...@@ -408,6 +418,10 @@ const char* eurecomFunctionsNames[] = {
"pdcch_interleaving", "pdcch_interleaving",
"pdcch_tx", "pdcch_tx",
/*NR softmodem signal*/
"gNB_thread_rxtx0",
"gNB_thread_rxtx1"
}; };
struct vcd_module_s vcd_modules[] = { struct vcd_module_s vcd_modules[] = {
......
...@@ -167,6 +167,17 @@ typedef enum { ...@@ -167,6 +167,17 @@ typedef enum {
VCD_SIGNAL_DUMPER_VARIABLES_CPUID_ENB_THREAD_RXTX, VCD_SIGNAL_DUMPER_VARIABLES_CPUID_ENB_THREAD_RXTX,
VCD_SIGNAL_DUMPER_VARIABLES_CPUID_RU_THREAD, VCD_SIGNAL_DUMPER_VARIABLES_CPUID_RU_THREAD,
VCD_SIGNAL_DUMPER_VARIABLES_CPUID_RU_THREAD_TX, VCD_SIGNAL_DUMPER_VARIABLES_CPUID_RU_THREAD_TX,
/*signal for NR*/
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_GNB,
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_GNB,
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_GNB,
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_GNB,
VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_GNB,
VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX1_GNB,
VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX0_GNB,
VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX1_GNB,
VCD_SIGNAL_DUMPER_VARIABLES_END VCD_SIGNAL_DUMPER_VARIABLES_END
} vcd_signal_dump_variables; } vcd_signal_dump_variables;
...@@ -389,6 +400,10 @@ typedef enum { ...@@ -389,6 +400,10 @@ typedef enum {
VCD_SIGNAL_DUMPER_FUNCTIONS_PDCCH_INTERLEAVING, VCD_SIGNAL_DUMPER_FUNCTIONS_PDCCH_INTERLEAVING,
VCD_SIGNAL_DUMPER_FUNCTIONS_PDCCH_TX, VCD_SIGNAL_DUMPER_FUNCTIONS_PDCCH_TX,
/*NR softmodem signal*/
VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX0,
VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1,
VCD_SIGNAL_DUMPER_FUNCTIONS_END VCD_SIGNAL_DUMPER_FUNCTIONS_END
} vcd_signal_dump_functions; } vcd_signal_dump_functions;
......
...@@ -41,10 +41,10 @@ typedef struct { ...@@ -41,10 +41,10 @@ typedef struct {
} T_cache_t; } T_cache_t;
/* number of VCD functions (to be kept up to date! see in T_messages.txt) */ /* number of VCD functions (to be kept up to date! see in T_messages.txt) */
#define VCD_NUM_FUNCTIONS 190 #define VCD_NUM_FUNCTIONS 192//190
/* number of VCD variables (to be kept up to date! see in T_messages.txt) */ /* number of VCD variables (to be kept up to date! see in T_messages.txt) */
#define VCD_NUM_VARIABLES 128 #define VCD_NUM_VARIABLES 136//128
/* first VCD function (to be kept up to date! see in T_messages.txt) */ /* first VCD function (to be kept up to date! see in T_messages.txt) */
#define VCD_FIRST_FUNCTION ((uintptr_t)T_VCD_FUNCTION_RT_SLEEP) #define VCD_FIRST_FUNCTION ((uintptr_t)T_VCD_FUNCTION_RT_SLEEP)
......
...@@ -1680,6 +1680,48 @@ ID = VCD_VARIABLE_CPUID_RU_THREAD_TX ...@@ -1680,6 +1680,48 @@ ID = VCD_VARIABLE_CPUID_RU_THREAD_TX
FORMAT = ulong,value FORMAT = ulong,value
VCD_NAME = ru_thread_tx_CPUID VCD_NAME = ru_thread_tx_CPUID
#variable for gNB
ID = VCD_VARIABLE_FRAME_NUMBER_TX0_GNB
DESC = VCD variable FRAME_NUMBER_TX0_GNB
GROUP = ALL:VCD:ENB:VCD_VARIABLE
FORMAT = ulong,value
VCD_NAME = frame_number_TX0_gNB
ID = VCD_VARIABLE_FRAME_NUMBER_TX1_GNB
DESC = VCD variable FRAME_NUMBER_TX1_GNB
GROUP = ALL:VCD:ENB:VCD_VARIABLE
FORMAT = ulong,value
VCD_NAME = frame_number_TX1_gNB
ID = VCD_VARIABLE_FRAME_NUMBER_RX0_GNB
DESC = VCD variable FRAME_NUMBER_RX0_GNB
GROUP = ALL:VCD:ENB:VCD_VARIABLE
FORMAT = ulong,value
VCD_NAME = frame_number_RX0_gNB
ID = VCD_VARIABLE_FRAME_NUMBER_RX1_GNB
DESC = VCD variable FRAME_NUMBER_RX1_GNB
GROUP = ALL:VCD:ENB:VCD_VARIABLE
FORMAT = ulong,value
VCD_NAME = frame_number_RX1_gNB
ID = VCD_VARIABLE_SUBFRAME_NUMBER_TX0_GNB
DESC = VCD variable SUBFRAME_NUMBER_TX0_GNB
GROUP = ALL:VCD:ENB:VCD_VARIABLE
FORMAT = ulong,value
VCD_NAME = subframe_number_TX0_gNB
ID = VCD_VARIABLE_SUBFRAME_NUMBER_TX1_GNB
DESC = VCD variable SUBFRAME_NUMBER_TX1_GNB
GROUP = ALL:VCD:ENB:VCD_VARIABLE
FORMAT = ulong,value
VCD_NAME = subframe_number_TX1_gNB
ID = VCD_VARIABLE_SUBFRAME_NUMBER_RX0_GNB
DESC = VCD variable SUBFRAME_NUMBER_RX0_GNB
GROUP = ALL:VCD:ENB:VCD_VARIABLE
FORMAT = ulong,value
VCD_NAME = subframe_number_RX0_gNB
ID = VCD_VARIABLE_SUBFRAME_NUMBER_RX1_GNB
DESC = VCD variable SUBFRAME_NUMBER_RX1_GNB
GROUP = ALL:VCD:ENB:VCD_VARIABLE
FORMAT = ulong,value
VCD_NAME = subframe_number_RX1_gNB
#functions #functions
ID = VCD_FUNCTION_RT_SLEEP ID = VCD_FUNCTION_RT_SLEEP
...@@ -2637,3 +2679,15 @@ ID = VCD_FUNCTION_PDCCH_TX ...@@ -2637,3 +2679,15 @@ ID = VCD_FUNCTION_PDCCH_TX
GROUP = ALL:VCD:ENB:VCD_FUNCTION GROUP = ALL:VCD:ENB:VCD_FUNCTION
FORMAT = int,value FORMAT = int,value
VCD_NAME = pdcch_tx VCD_NAME = pdcch_tx
#function for gNB
ID = VCD_FUNCTION_gNB_PROC_RXTX0
DESC = VCD function gNB_PROC_RXTX0
GROUP = ALL:VCD:ENB:VCD_FUNCTION
FORMAT = int,value
VCD_NAME = gNB_thread_rxtx0
ID = VCD_FUNCTION_gNB_PROC_RXTX1
DESC = VCD function gNB_PROC_RXTX1
GROUP = ALL:VCD:ENB:VCD_FUNCTION
FORMAT = int,value
VCD_NAME = gNB_thread_rxtx1
...@@ -49,7 +49,7 @@ void nr_pdcch_scrambling(uint32_t *in, ...@@ -49,7 +49,7 @@ void nr_pdcch_scrambling(uint32_t *in,
void nr_fill_dci_and_dlsch(PHY_VARS_gNB *gNB, void nr_fill_dci_and_dlsch(PHY_VARS_gNB *gNB,
int frame, int frame,
int subframe, int subframe,
gNB_rxtx_proc_t *proc, gNB_L1_rxtx_proc_t *proc,
NR_gNB_DCI_ALLOC_t *dci_alloc, NR_gNB_DCI_ALLOC_t *dci_alloc,
nfapi_nr_dl_config_request_pdu_t *pdu); nfapi_nr_dl_config_request_pdu_t *pdu);
......
...@@ -108,7 +108,7 @@ void nr_fill_cce_list(NR_gNB_DCI_ALLOC_t* dci_alloc, uint16_t n_shift, uint8_t m ...@@ -108,7 +108,7 @@ void nr_fill_cce_list(NR_gNB_DCI_ALLOC_t* dci_alloc, uint16_t n_shift, uint8_t m
void nr_fill_dci_and_dlsch(PHY_VARS_gNB *gNB, void nr_fill_dci_and_dlsch(PHY_VARS_gNB *gNB,
int frame, int frame,
int subframe, int subframe,
gNB_rxtx_proc_t *proc, gNB_L1_rxtx_proc_t *proc,
NR_gNB_DCI_ALLOC_t *dci_alloc, NR_gNB_DCI_ALLOC_t *dci_alloc,
nfapi_nr_dl_config_request_pdu_t *pdu) nfapi_nr_dl_config_request_pdu_t *pdu)
{ {
......
...@@ -99,22 +99,29 @@ typedef struct { ...@@ -99,22 +99,29 @@ typedef struct {
int frame_rx; int frame_rx;
/// \brief Instance count for RXn-TXnp4 processing thread. /// \brief Instance count for RXn-TXnp4 processing thread.
/// \internal This variable is protected by \ref mutex_rxtx. /// \internal This variable is protected by \ref mutex_rxtx.
int instance_cnt_rxtx; int instance_cnt;
/// pthread structure for RXn-TXnp4 processing thread /// pthread structure for RXn-TXnp4 processing thread
pthread_t pthread_rxtx; pthread_t pthread;
/// pthread attributes for RXn-TXnp4 processing thread /// pthread attributes for RXn-TXnp4 processing thread
pthread_attr_t attr_rxtx; pthread_attr_t attr;
/// condition variable for tx processing thread /// condition variable for tx processing thread
pthread_cond_t cond_rxtx; pthread_cond_t cond;
/// mutex for RXn-TXnp4 processing thread /// mutex for RXn-TXnp4 processing thread
pthread_mutex_t mutex_rxtx; pthread_mutex_t mutex;
/// scheduling parameters for RXn-TXnp4 thread /// scheduling parameters for RXn-TXnp4 thread
struct sched_param sched_param_rxtx; struct sched_param sched_param_rxtx;
} gNB_rxtx_proc_t;
/// \internal This variable is protected by \ref mutex_RUs.
int instance_cnt_RUs;
/// condition variable for tx processing thread
pthread_cond_t cond_RUs;
/// mutex for RXn-TXnp4 processing thread
pthread_mutex_t mutex_RUs;
} gNB_L1_rxtx_proc_t;
/// Context data structure for eNB subframe processing /// Context data structure for eNB subframe processing
typedef struct gNB_proc_t_s { typedef struct gNB_L1_proc_t_s {
/// Component Carrier index /// Component Carrier index
uint8_t CC_id; uint8_t CC_id;
/// thread index /// thread index
...@@ -181,17 +188,19 @@ typedef struct gNB_proc_t_s { ...@@ -181,17 +188,19 @@ typedef struct gNB_proc_t_s {
pthread_mutex_t mutex_asynch_rxtx; pthread_mutex_t mutex_asynch_rxtx;
/// mutex for RU access to eNB processing (PDSCH/PUSCH) /// mutex for RU access to eNB processing (PDSCH/PUSCH)
pthread_mutex_t mutex_RU; pthread_mutex_t mutex_RU;
/// mutex for RU_tx access to eNB_tx processing (PDSCH/PUSCH)
pthread_mutex_t mutex_RU_tx;
/// mutex for RU access to eNB processing (PRACH) /// mutex for RU access to eNB processing (PRACH)
pthread_mutex_t mutex_RU_PRACH; pthread_mutex_t mutex_RU_PRACH;
/// mutex for RU access to eNB processing (PRACH BR) /// mutex for RU access to eNB processing (PRACH BR)
pthread_mutex_t mutex_RU_PRACH_br; pthread_mutex_t mutex_RU_PRACH_br;
/// mask for RUs serving eNB (PDSCH/PUSCH) /// mask for RUs serving eNB (PDSCH/PUSCH)
int RU_mask; int RU_mask, RU_mask_tx;
/// mask for RUs serving eNB (PRACH) /// mask for RUs serving eNB (PRACH)
int RU_mask_prach; int RU_mask_prach;
/// set of scheduling variables RXn-TXnp4 threads /// set of scheduling variables RXn-TXnp4 threads
gNB_rxtx_proc_t proc_rxtx[2]; gNB_L1_rxtx_proc_t L1_proc, L1_proc_tx;
} gNB_proc_t; } gNB_L1_proc_t;
...@@ -251,7 +260,7 @@ typedef struct PHY_VARS_gNB_s { ...@@ -251,7 +260,7 @@ typedef struct PHY_VARS_gNB_s {
module_id_t Mod_id; module_id_t Mod_id;
uint8_t CC_id; uint8_t CC_id;
uint8_t configured; uint8_t configured;
gNB_proc_t proc; gNB_L1_proc_t proc;
int single_thread_flag; int single_thread_flag;
int abstraction_flag; int abstraction_flag;
int num_RU; int num_RU;
......
...@@ -37,7 +37,7 @@ int oai_nfapi_tx_req(nfapi_tx_request_t *tx_req); ...@@ -37,7 +37,7 @@ int oai_nfapi_tx_req(nfapi_tx_request_t *tx_req);
extern uint8_t nfapi_mode; extern uint8_t nfapi_mode;
void handle_nr_nfapi_bch_pdu(PHY_VARS_gNB *gNB, void handle_nr_nfapi_bch_pdu(PHY_VARS_gNB *gNB,
gNB_rxtx_proc_t *proc, gNB_L1_rxtx_proc_t *proc,
nfapi_nr_dl_config_request_pdu_t *dl_config_pdu, nfapi_nr_dl_config_request_pdu_t *dl_config_pdu,
uint8_t *sdu) uint8_t *sdu)
{ {
...@@ -56,7 +56,7 @@ void handle_nr_nfapi_bch_pdu(PHY_VARS_gNB *gNB, ...@@ -56,7 +56,7 @@ void handle_nr_nfapi_bch_pdu(PHY_VARS_gNB *gNB,
void handle_nfapi_nr_dci_dl_pdu(PHY_VARS_gNB *gNB, void handle_nfapi_nr_dci_dl_pdu(PHY_VARS_gNB *gNB,
int frame, int subframe, int frame, int subframe,
gNB_rxtx_proc_t *proc, gNB_L1_rxtx_proc_t *proc,
nfapi_nr_dl_config_request_pdu_t *dl_config_pdu) nfapi_nr_dl_config_request_pdu_t *dl_config_pdu)
{ {
int idx = subframe&1; int idx = subframe&1;
...@@ -73,7 +73,7 @@ void handle_nfapi_nr_dci_dl_pdu(PHY_VARS_gNB *gNB, ...@@ -73,7 +73,7 @@ void handle_nfapi_nr_dci_dl_pdu(PHY_VARS_gNB *gNB,
void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
PHY_VARS_gNB *gNB; PHY_VARS_gNB *gNB;
gNB_rxtx_proc_t *proc; gNB_L1_rxtx_proc_t *proc;
// copy data from L2 interface into L1 structures // copy data from L2 interface into L1 structures
module_id_t Mod_id = Sched_INFO->module_id; module_id_t Mod_id = Sched_INFO->module_id;
uint8_t CC_id = Sched_INFO->CC_id; uint8_t CC_id = Sched_INFO->CC_id;
...@@ -87,7 +87,7 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){ ...@@ -87,7 +87,7 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
AssertFatal(RC.gNB[Mod_id][CC_id]!=NULL,"RC.gNB[%d][%d] is null\n",Mod_id,CC_id); AssertFatal(RC.gNB[Mod_id][CC_id]!=NULL,"RC.gNB[%d][%d] is null\n",Mod_id,CC_id);
gNB = RC.gNB[Mod_id][CC_id]; gNB = RC.gNB[Mod_id][CC_id];
proc = &gNB->proc.proc_rxtx[0]; proc = &gNB->proc.L1_proc;
uint8_t number_dl_pdu = DL_req->dl_config_request_body.number_pdu; uint8_t number_dl_pdu = DL_req->dl_config_request_body.number_pdu;
......
...@@ -41,5 +41,5 @@ ...@@ -41,5 +41,5 @@
void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO); void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO);
void handle_nfapi_nr_dci_dl_pdu(PHY_VARS_gNB *gNB, void handle_nfapi_nr_dci_dl_pdu(PHY_VARS_gNB *gNB,
int frame, int subframe, int frame, int subframe,
gNB_rxtx_proc_t *proc, gNB_L1_rxtx_proc_t *proc,
nfapi_nr_dl_config_request_pdu_t *dl_config_pdu); nfapi_nr_dl_config_request_pdu_t *dl_config_pdu);
...@@ -152,7 +152,7 @@ void nr_common_signal_procedures (PHY_VARS_gNB *gNB,int frame, int subframe) { ...@@ -152,7 +152,7 @@ void nr_common_signal_procedures (PHY_VARS_gNB *gNB,int frame, int subframe) {
} }
void phy_procedures_gNB_TX(PHY_VARS_gNB *gNB, void phy_procedures_gNB_TX(PHY_VARS_gNB *gNB,
gNB_rxtx_proc_t *proc, gNB_L1_rxtx_proc_t *proc,
int do_meas) int do_meas)
{ {
int aa; int aa;
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
lte_subframe_t nr_subframe_select (nfapi_nr_config_request_t *cfg, unsigned char subframe); lte_subframe_t nr_subframe_select (nfapi_nr_config_request_t *cfg, unsigned char subframe);
void nr_set_ssb_first_subcarrier(nfapi_nr_config_request_t *cfg, NR_DL_FRAME_PARMS *fp); void nr_set_ssb_first_subcarrier(nfapi_nr_config_request_t *cfg, NR_DL_FRAME_PARMS *fp);
void phy_procedures_gNB_TX(PHY_VARS_gNB *gNB, gNB_rxtx_proc_t *proc, int do_meas); void phy_procedures_gNB_TX(PHY_VARS_gNB *gNB, gNB_L1_rxtx_proc_t *proc, int do_meas);
void nr_common_signal_procedures (PHY_VARS_gNB *gNB,int frame, int subframe); void nr_common_signal_procedures (PHY_VARS_gNB *gNB,int frame, int subframe);
void nr_init_feptx_thread(RU_t *ru,pthread_attr_t *attr_feptx); void nr_init_feptx_thread(RU_t *ru,pthread_attr_t *attr_feptx);
void nr_feptx_ofdm(RU_t *ru); void nr_feptx_ofdm(RU_t *ru);
......
...@@ -77,6 +77,10 @@ ...@@ -77,6 +77,10 @@
#include "NR_EUTRA-MBSFN-SubframeConfig.h" #include "NR_EUTRA-MBSFN-SubframeConfig.h"
extern uint16_t sf_ahead; extern uint16_t sf_ahead;
extern void set_parallel_conf(char *parallel_conf);
extern void set_worker_conf(char *worker_conf);
extern PARALLEL_CONF_t get_thread_parallel_conf(void);
extern WORKER_CONF_t get_thread_worker_conf(void);
void RCconfig_nr_flexran() void RCconfig_nr_flexran()
{ {
...@@ -290,7 +294,7 @@ void RCconfig_NR_L1(void) { ...@@ -290,7 +294,7 @@ void RCconfig_NR_L1(void) {
} }
if(strcmp(*(L1_ParamList.paramarray[j][L1_TRANSPORT_N_PREFERENCE_IDX].strptr), "local_mac") == 0) { if(strcmp(*(L1_ParamList.paramarray[j][L1_TRANSPORT_N_PREFERENCE_IDX].strptr), "local_mac") == 0) {
sf_ahead = 2; // Need 4 subframe gap between RX and TX //sf_ahead = 2; // Need 4 subframe gap between RX and TX
}else if (strcmp(*(L1_ParamList.paramarray[j][L1_TRANSPORT_N_PREFERENCE_IDX].strptr), "nfapi") == 0) { }else if (strcmp(*(L1_ParamList.paramarray[j][L1_TRANSPORT_N_PREFERENCE_IDX].strptr), "nfapi") == 0) {
RC.gNB[j][0]->eth_params_n.local_if_name = strdup(*(L1_ParamList.paramarray[j][L1_LOCAL_N_IF_NAME_IDX].strptr)); RC.gNB[j][0]->eth_params_n.local_if_name = strdup(*(L1_ParamList.paramarray[j][L1_LOCAL_N_IF_NAME_IDX].strptr));
RC.gNB[j][0]->eth_params_n.my_addr = strdup(*(L1_ParamList.paramarray[j][L1_LOCAL_N_ADDRESS_IDX].strptr)); RC.gNB[j][0]->eth_params_n.my_addr = strdup(*(L1_ParamList.paramarray[j][L1_LOCAL_N_ADDRESS_IDX].strptr));
...@@ -301,7 +305,7 @@ void RCconfig_NR_L1(void) { ...@@ -301,7 +305,7 @@ void RCconfig_NR_L1(void) {
RC.gNB[j][0]->eth_params_n.remote_portd = *(L1_ParamList.paramarray[j][L1_REMOTE_N_PORTD_IDX].iptr); RC.gNB[j][0]->eth_params_n.remote_portd = *(L1_ParamList.paramarray[j][L1_REMOTE_N_PORTD_IDX].iptr);
RC.gNB[j][0]->eth_params_n.transp_preference = ETH_UDP_MODE; RC.gNB[j][0]->eth_params_n.transp_preference = ETH_UDP_MODE;
sf_ahead = 2; // Cannot cope with 4 subframes betweem RX and TX - set it to 2 //sf_ahead = 2; // Cannot cope with 4 subframes betweem RX and TX - set it to 2
RC.nb_nr_macrlc_inst = 1; // This is used by mac_top_init_gNB() RC.nb_nr_macrlc_inst = 1; // This is used by mac_top_init_gNB()
...@@ -400,7 +404,7 @@ void RCconfig_nr_macrlc() { ...@@ -400,7 +404,7 @@ void RCconfig_nr_macrlc() {
RC.nrmac[j]->eth_params_s.remote_portd = *(MacRLC_ParamList.paramarray[j][MACRLC_REMOTE_S_PORTD_IDX].iptr); RC.nrmac[j]->eth_params_s.remote_portd = *(MacRLC_ParamList.paramarray[j][MACRLC_REMOTE_S_PORTD_IDX].iptr);
RC.nrmac[j]->eth_params_s.transp_preference = ETH_UDP_MODE; RC.nrmac[j]->eth_params_s.transp_preference = ETH_UDP_MODE;
sf_ahead = 2; // Cannot cope with 4 subframes betweem RX and TX - set it to 2 //sf_ahead = 2; // Cannot cope with 4 subframes betweem RX and TX - set it to 2
printf("**************** vnf_port:%d\n", RC.mac[j]->eth_params_s.my_portc); printf("**************** vnf_port:%d\n", RC.mac[j]->eth_params_s.my_portc);
configure_nfapi_vnf(RC.nrmac[j]->eth_params_s.my_addr, RC.nrmac[j]->eth_params_s.my_portc); configure_nfapi_vnf(RC.nrmac[j]->eth_params_s.my_addr, RC.nrmac[j]->eth_params_s.my_portc);
...@@ -795,6 +799,10 @@ void RCconfig_NRRRC(MessageDef *msg_p, uint32_t i, gNB_RRC_INST *rrc) { ...@@ -795,6 +799,10 @@ void RCconfig_NRRRC(MessageDef *msg_p, uint32_t i, gNB_RRC_INST *rrc) {
} }
NRRRC_CONFIGURATION_REQ (msg_p).N_RB_DL[j]= N_RB_DL; NRRRC_CONFIGURATION_REQ (msg_p).N_RB_DL[j]= N_RB_DL;
if(N_RB_DL == 217) sf_ahead = 2;
else if(N_RB_DL == 106) sf_ahead = 4;
else AssertFatal (0,"Failed to parse gNB configuration file %s, gnb %d unknown value \"%d\" for N_RB_DL choice: 106, 217 !\n",
RC.config_file_name, i, N_RB_DL);
/* /*
if ((N_RB_DL!=6) && (N_RB_DL!=15) && (N_RB_DL!=25) && (N_RB_DL!=50) && (N_RB_DL!=75) && (N_RB_DL!=100)) { if ((N_RB_DL!=6) && (N_RB_DL!=15) && (N_RB_DL!=25) && (N_RB_DL!=50) && (N_RB_DL!=75) && (N_RB_DL!=100)) {
...@@ -2976,6 +2984,35 @@ int RCconfig_NR_S1(MessageDef *msg_p, uint32_t i) { ...@@ -2976,6 +2984,35 @@ int RCconfig_NR_S1(MessageDef *msg_p, uint32_t i) {
return 0; return 0;
} }
int RCconfig_nr_parallel(void) {
char *parallel_conf = NULL;
char *worker_conf = NULL;
extern char *parallel_config;
extern char *worker_config;
paramdef_t ThreadParams[] = THREAD_CONF_DESC;
paramlist_def_t THREADParamList = {THREAD_CONFIG_STRING_THREAD_STRUCT,NULL,0};
config_getlist( &THREADParamList,NULL,0,NULL);
if(THREADParamList.numelt>0) {
config_getlist( &THREADParamList,ThreadParams,sizeof(ThreadParams)/sizeof(paramdef_t),NULL);
parallel_conf = strdup(*(THREADParamList.paramarray[0][THREAD_PARALLEL_IDX].strptr));
} else {
parallel_conf = strdup("PARALLEL_RU_L1_TRX_SPLIT");
}
if(THREADParamList.numelt>0) {
config_getlist( &THREADParamList,ThreadParams,sizeof(ThreadParams)/sizeof(paramdef_t),NULL);
worker_conf = strdup(*(THREADParamList.paramarray[0][THREAD_WORKER_IDX].strptr));
} else {
worker_conf = strdup("WORKER_ENABLE");
}
if(parallel_config == NULL) set_parallel_conf(parallel_conf);
if(worker_config == NULL) set_worker_conf(worker_conf);
return 0;
}
void NRRCConfig(void) { void NRRCConfig(void) {
paramlist_def_t MACRLCParamList = {CONFIG_STRING_MACRLC_LIST,NULL,0}; paramlist_def_t MACRLCParamList = {CONFIG_STRING_MACRLC_LIST,NULL,0};
...@@ -3016,7 +3053,7 @@ void NRRCConfig(void) { ...@@ -3016,7 +3053,7 @@ void NRRCConfig(void) {
config_getlist( &RUParamList,NULL,0, NULL); config_getlist( &RUParamList,NULL,0, NULL);
RC.nb_RU = RUParamList.numelt; RC.nb_RU = RUParamList.numelt;
RCconfig_parallel(); RCconfig_nr_parallel();
} }
...@@ -1099,3 +1099,29 @@ typedef enum { ...@@ -1099,3 +1099,29 @@ typedef enum {
#define MACRLC_REMOTE_S_PORTD_IDX 16 #define MACRLC_REMOTE_S_PORTD_IDX 16
#define MACRLC_SCHED_MODE_IDX 17 #define MACRLC_SCHED_MODE_IDX 17
#define MACRLC_PHY_TEST_IDX 18 #define MACRLC_PHY_TEST_IDX 18
/* thread configuration parameters section name */
#define THREAD_CONFIG_STRING_THREAD_STRUCT "THREAD_STRUCT"
/* thread configuration parameters names */
#define THREAD_CONFIG_STRING_PARALLEL "parallel_config"
#define THREAD_CONFIG_STRING_WORKER "worker_config"
#define THREAD_PARALLEL_IDX 0
#define THREAD_WORKER_IDX 1
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------*/
/* thread configuration parameters */
/* optname helpstr paramflags XXXptr defXXXval type numelt */
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------*/
#define THREAD_CONF_DESC { \
{THREAD_CONFIG_STRING_PARALLEL, CONFIG_HLP_PARALLEL, 0, strptr:NULL, defstrval:"PARALLEL_RU_L1_TRX_SPLIT", TYPE_STRING, 0}, \
{THREAD_CONFIG_STRING_WORKER, CONFIG_HLP_WORKER, 0, strptr:NULL, defstrval:"WORKER_ENABLE", TYPE_STRING, 0} \
}
#define CONFIG_HLP_WORKER "coding and FEP worker thread WORKER_DISABLE or WORKER_ENABLE\n"
#define CONFIG_HLP_PARALLEL "PARALLEL_SINGLE_THREAD, PARALLEL_RU_L1_SPLIT, or PARALLEL_RU_L1_TRX_SPLIT(RU_L1_TRX_SPLIT by defult)\n"
/*-------------------------------------------------------------------------------------------------------------------------------------------------------------*/
...@@ -259,6 +259,15 @@ RUs = ( ...@@ -259,6 +259,15 @@ RUs = (
} }
); );
THREAD_STRUCT = (
{
#three config for level of parallelism "PARALLEL_SINGLE_THREAD", "PARALLEL_RU_L1_SPLIT", or "PARALLEL_RU_L1_TRX_SPLIT"
parallel_config = "PARALLEL_RU_L1_TRX_SPLIT";
#two option for worker "WORKER_DISABLE" or "WORKER_ENABLE"
worker_config = "WORKER_DISABLE";
}
);
NETWORK_CONTROLLER : NETWORK_CONTROLLER :
{ {
FLEXRAN_ENABLED = "no"; FLEXRAN_ENABLED = "no";
......
Active_gNBs = ( "gNB-Eurecom-5GNRBox");
# Asn1_verbosity, choice in: none, info, annoying
Asn1_verbosity = "none";
gNBs =
(
{
////////// Identification parameters:
gNB_ID = 0xe00;
cell_type = "CELL_MACRO_GNB";
gNB_name = "gNB-Eurecom-5GNRBox";
// Tracking area code, 0x0000 and 0xfffe are reserved values
tracking_area_code = 1;
plmn_list = ({mcc = 208; mnc = 93; mnc_length = 2;});
tr_s_preference = "local_mac"
////////// Physical parameters:
component_carriers = (
{
node_function = "3GPP_gNODEB";
node_timing = "synch_to_ext_device";
node_synch_ref = 0;
frame_type = "FDD";
DL_prefix_type = "NORMAL";
UL_prefix_type = "NORMAL";
eutra_band = 22;
downlink_frequency = 3510000000L;
uplink_frequency_offset = -120000000;
Nid_cell = 0;
N_RB_DL = 217;
nb_antenna_ports = 1;
nb_antennas_tx = 1;
nb_antennas_rx = 1;
tx_gain = 90;
rx_gain = 125;
MIB_subCarrierSpacingCommon = 30;
MIB_ssb_SubcarrierOffset = 0;
MIB_dmrs_TypeA_Position = 2;
pdcch_ConfigSIB1 = 0;
SIB1_frequencyOffsetSSB = "khz5";
SIB1_ssb_PeriodicityServingCell = 5;
SIB1_ss_PBCH_BlockPower = -60;
absoluteFrequencySSB = 0;
DL_FreqBandIndicatorNR = 15;
DL_absoluteFrequencyPointA = 15;
DL_offsetToCarrier = 15;
DL_SCS_SubcarrierSpacing = "kHz30";
DL_SCS_SpecificCarrier_k0 = 0;
DL_carrierBandwidth = 15;
DL_locationAndBandwidth = 15;
DL_BWP_SubcarrierSpacing = "kHz30";
DL_BWP_prefix_type = "NORMAL";
UL_FreqBandIndicatorNR = 15;
UL_absoluteFrequencyPointA = 13;
UL_additionalSpectrumEmission = 3;
UL_p_Max = -1;
UL_frequencyShift7p5khz = "TRUE";
UL_offsetToCarrier = 10;
UL_SCS_SubcarrierSpacing = "kHz30";
UL_SCS_SpecificCarrier_k0 = 0;
UL_carrierBandwidth = 15;
UL_locationAndBandwidth = 15;
UL_BWP_SubcarrierSpacing = "kHz30";
UL_BWP_prefix_type = "NORMAL";
UL_timeAlignmentTimerCommon = "infinity";
ServingCellConfigCommon_n_TimingAdvanceOffset = "n0"
ServingCellConfigCommon_ssb_PositionsInBurst_PR = "shortBitmap";
ServingCellConfigCommon_ssb_periodicityServingCell = 10;
ServingCellConfigCommon_dmrs_TypeA_Position = 2;
NIA_SubcarrierSpacing = "kHz15";
ServingCellConfigCommon_ss_PBCH_BlockPower = -60;
referenceSubcarrierSpacing = "kHz15";
dl_UL_TransmissionPeriodicity = "ms0p5";
nrofDownlinkSlots = 10;
nrofDownlinkSymbols = 10;
nrofUplinkSlots = 10;
nrofUplinkSymbols = 10;
rach_totalNumberOfRA_Preambles = 63;
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_choice = "oneEighth";
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_oneEighth = 4;
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_oneFourth = 8;
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_oneHalf = 16;
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_one = 24;
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_two = 32;
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_four = 8;
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_eight = 4;
rach_ssb_perRACH_OccasionAndCB_PreamblesPerSSB_sixteen = 2;
rach_groupBconfigured = "ENABLE";
rach_ra_Msg3SizeGroupA = 56;
rach_messagePowerOffsetGroupB = "dB0";
rach_numberOfRA_PreamblesGroupA = 32;
rach_ra_ContentionResolutionTimer = 8;
rsrp_ThresholdSSB = 64;
rsrp_ThresholdSSB_SUL = 64;
prach_RootSequenceIndex_choice = "l839";
prach_RootSequenceIndex_l839 = 0;
prach_RootSequenceIndex_l139 = 0;
prach_msg1_SubcarrierSpacing = "kHz30";
restrictedSetConfig = "unrestrictedSet";
msg3_transformPrecoding = "ENABLE";
prach_ConfigurationIndex = 10;
prach_msg1_FDM = "one";
prach_msg1_FrequencyStart = 10;
zeroCorrelationZoneConfig = 10;
preambleReceivedTargetPower = -150;
preambleTransMax = 6;
powerRampingStep = "dB0";
ra_ResponseWindow = 8;
groupHoppingEnabledTransformPrecoding = "ENABLE";
msg3_DeltaPreamble = 0;
p0_NominalWithGrant = 0;
PUSCH_TimeDomainResourceAllocation_k2 = 0;
PUSCH_TimeDomainResourceAllocation_mappingType = "typeA";
PUSCH_TimeDomainResourceAllocation_startSymbolAndLength = 0;
pucch_ResourceCommon = 0;
pucch_GroupHopping = "neither";
hoppingId = 0;
p0_nominal = -30;
PDSCH_TimeDomainResourceAllocation_k0 = 2;
PDSCH_TimeDomainResourceAllocation_mappingType = "typeA";
PDSCH_TimeDomainResourceAllocation_startSymbolAndLength = 0;
rateMatchPatternId = 0;
RateMatchPattern_patternType = "bitmaps";
symbolsInResourceBlock = "oneSlot";
periodicityAndPattern = 2;
RateMatchPattern_controlResourceSet = 5;
RateMatchPattern_subcarrierSpacing = "kHz30";
RateMatchPattern_mode = "dynamic";
controlResourceSetZero = 0;
searchSpaceZero = 0;
searchSpaceSIB1 = 10;
searchSpaceOtherSystemInformation = 10;
pagingSearchSpace = 10;
ra_SearchSpace = 10;
PDCCH_common_controlResourceSetId = 5;
PDCCH_common_ControlResourceSet_duration = 2;
PDCCH_cce_REG_MappingType = "nonInterleaved";
PDCCH_reg_BundleSize = 3;
PDCCH_interleaverSize = 3;
PDCCH_shiftIndex = 10;
PDCCH_precoderGranularity = "sameAsREG-bundle";
PDCCH_TCI_StateId = 32;
tci_PresentInDCI = "ENABLE";
PDCCH_DMRS_ScramblingID = 0;
SearchSpaceId = 10;
commonSearchSpaces_controlResourceSetId = 5;
SearchSpace_monitoringSlotPeriodicityAndOffset_choice = "sl1";
SearchSpace_monitoringSlotPeriodicityAndOffset_value = 0;
SearchSpace_duration = 2;
SearchSpace_nrofCandidates_aggregationLevel1 = 0;
SearchSpace_nrofCandidates_aggregationLevel2 = 0;
SearchSpace_nrofCandidates_aggregationLevel4 = 0;
SearchSpace_nrofCandidates_aggregationLevel8 = 0;
SearchSpace_nrofCandidates_aggregationLevel16 = 0;
SearchSpace_searchSpaceType = "common";
Common_dci_Format2_0_nrofCandidates_SFI_aggregationLevel1 = 1;
Common_dci_Format2_0_nrofCandidates_SFI_aggregationLevel2 = 1;
Common_dci_Format2_0_nrofCandidates_SFI_aggregationLevel4 = 1;
Common_dci_Format2_0_nrofCandidates_SFI_aggregationLevel8 = 1;
Common_dci_Format2_0_nrofCandidates_SFI_aggregationLevel16 = 1;
Common_dci_Format2_3_monitoringPeriodicity = 1;
Common_dci_Format2_3_nrofPDCCH_Candidates = 1;
ue_Specific__dci_Formats = "formats0-0-And-1-0";
RateMatchPatternLTE_CRS_carrierFreqDL = 6;
RateMatchPatternLTE_CRS_carrierBandwidthDL = 6;
RateMatchPatternLTE_CRS_nrofCRS_Ports = 1;
RateMatchPatternLTE_CRS_v_Shift = 0;
RateMatchPatternLTE_CRS_radioframeAllocationPeriod = 1;
RateMatchPatternLTE_CRS_radioframeAllocationOffset = 0;
RateMatchPatternLTE_CRS_subframeAllocation_choice = "oneFrame";
}
);
srb1_parameters :
{
# timer_poll_retransmit = (ms) [5, 10, 15, 20,... 250, 300, 350, ... 500]
timer_poll_retransmit = 80;
# timer_reordering = (ms) [0,5, ... 100, 110, 120, ... ,200]
timer_reordering = 35;
# timer_reordering = (ms) [0,5, ... 250, 300, 350, ... ,500]
timer_status_prohibit = 0;
# poll_pdu = [4, 8, 16, 32 , 64, 128, 256, infinity(>10000)]
poll_pdu = 4;
# poll_byte = (kB) [25,50,75,100,125,250,375,500,750,1000,1250,1500,2000,3000,infinity(>10000)]
poll_byte = 99999;
# max_retx_threshold = [1, 2, 3, 4 , 6, 8, 16, 32]
max_retx_threshold = 4;
}
# ------- SCTP definitions
SCTP :
{
# Number of streams to use in input/output
SCTP_INSTREAMS = 2;
SCTP_OUTSTREAMS = 2;
};
////////// MME parameters:
mme_ip_address = ( { ipv4 = "192.168.12.26";
ipv6 = "192:168:30::17";
active = "yes";
preference = "ipv4";
}
);
NETWORK_INTERFACES :
{
GNB_INTERFACE_NAME_FOR_S1_MME = "eth0";
GNB_IPV4_ADDRESS_FOR_S1_MME = "192.168.12.111/24";
GNB_INTERFACE_NAME_FOR_S1U = "eth0";
GNB_IPV4_ADDRESS_FOR_S1U = "192.168.12.111/24";
GNB_PORT_FOR_S1U = 2152; # Spec 2152
};
}
);
MACRLCs = (
{
num_cc = 1;
tr_s_preference = "local_L1";
tr_n_preference = "local_RRC";
}
);
L1s = (
{
num_cc = 1;
tr_n_preference = "local_mac";
}
);
RUs = (
{
local_rf = "yes"
nb_tx = 1
nb_rx = 1
att_tx = 0
att_rx = 0;
bands = [7];
max_pdschReferenceSignalPower = -27;
max_rxgain = 114;
eNB_instances = [0];
sdr_addrs = "addr=192.168.10.2,mgmt_addr=192.168.100.8";
clock_src = "external";
}
);
THREAD_STRUCT = (
{
#three config for level of parallelism "PARALLEL_SINGLE_THREAD", "PARALLEL_RU_L1_SPLIT", or "PARALLEL_RU_L1_TRX_SPLIT"
parallel_config = "PARALLEL_RU_L1_TRX_SPLIT";
#two option for worker "WORKER_DISABLE" or "WORKER_ENABLE"
worker_config = "WORKER_ENABLE";
}
);
log_config :
{
global_log_level ="info";
global_log_verbosity ="medium";
hw_log_level ="info";
hw_log_verbosity ="medium";
phy_log_level ="info";
phy_log_verbosity ="medium";
mac_log_level ="info";
mac_log_verbosity ="high";
rlc_log_level ="info";
rlc_log_verbosity ="medium";
pdcp_log_level ="info";
pdcp_log_verbosity ="medium";
rrc_log_level ="info";
rrc_log_verbosity ="medium";
};
[*]
[*] GTKWave Analyzer v3.3.58 (w)1999-2014 BSI
[*] Tue Nov 6 14:54:14 2018
[*]
[dumpfile] "/tmp/openair_dump_eNB.vcd"
[dumpfile_mtime] "Tue Nov 6 14:50:45 2018"
[dumpfile_size] 1894074
[savefile] "/homes/wangts/openairinterface5g/targets/RT/USER/gNB_usrp.gtkw"
[timestart] 1238454000
[size] 1920 1018
[pos] -159 -155
*-20.848083 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
[sst_width] 386
[signals_width] 344
[sst_expanded] 1
[sst_vpaned_height] 421
@28
functions.trx_read
functions.trx_write
@24
variables.trx_ts[63:0]
variables.trx_tst[63:0]
@28
functions.eNB_thread_rxtx0
@24
variables.frame_number_RX0_RU[63:0]
variables.subframe_number_RX0_RU[63:0]
variables.frame_number_TX0_RU[63:0]
variables.subframe_number_TX0_RU[63:0]
@28
functions.mac_schedule_dlsch
functions.macxface_eNB_dlsch_ulsch_scheduler
functions.macxface_ue_scheduler
functions.phy_eNB_ofdm_mod_l
@24
variables.frame_number_RX0_gNB[63:0]
variables.subframe_number_RX0_gNB[63:0]
variables.frame_number_TX0_gNB[63:0]
variables.subframe_number_TX0_gNB[63:0]
@28
functions.gNB_thread_rxtx0
@24
variables.frame_number_RX1_gNB[63:0]
variables.subframe_number_RX1_gNB[63:0]
variables.frame_number_TX1_gNB[63:0]
variables.subframe_number_TX1_gNB[63:0]
@28
functions.gNB_thread_rxtx1
functions.phy_procedures_ru_feprx0
functions.phy_procedures_ru_feprx0
functions.phy_procedures_ru_feprx1
functions.phy_procedures_ru_feptx_ofdm0
functions.phy_procedures_ru_feptx_ofdm1
functions.phy_procedures_ru_feptx_prec0
functions.phy_procedures_ru_feptx_prec1
[pattern_trace] 1
[pattern_trace] 0
...@@ -143,6 +143,11 @@ extern double cpuf; ...@@ -143,6 +143,11 @@ extern double cpuf;
void init_gNB(int,int); void init_gNB(int,int);
void stop_gNB(int nb_inst); void stop_gNB(int nb_inst);
int wakeup_txfh(gNB_L1_rxtx_proc_t *proc,PHY_VARS_gNB *gNB);
int wakeup_tx(PHY_VARS_gNB *gNB);
extern PARALLEL_CONF_t get_thread_parallel_conf(void);
extern WORKER_CONF_t get_thread_worker_conf(void);
void wakeup_prach_gNB(PHY_VARS_gNB *gNB,RU_t *ru,int frame,int subframe); void wakeup_prach_gNB(PHY_VARS_gNB *gNB,RU_t *ru,int frame,int subframe);
...@@ -154,7 +159,7 @@ extern void add_subframe(uint16_t *frameP, uint16_t *subframeP, int offset); ...@@ -154,7 +159,7 @@ extern void add_subframe(uint16_t *frameP, uint16_t *subframeP, int offset);
#define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials) #define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials)
static inline int rxtx(PHY_VARS_gNB *gNB,gNB_rxtx_proc_t *proc, char *thread_name) { static inline int rxtx(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc, char *thread_name) {
start_meas(&softmodem_stats_rxtx_sf); start_meas(&softmodem_stats_rxtx_sf);
// ******************************************************************* // *******************************************************************
...@@ -227,7 +232,9 @@ static inline int rxtx(PHY_VARS_gNB *gNB,gNB_rxtx_proc_t *proc, char *thread_nam ...@@ -227,7 +232,9 @@ static inline int rxtx(PHY_VARS_gNB *gNB,gNB_rxtx_proc_t *proc, char *thread_nam
//if (wait_CCs(proc)<0) return(-1); //if (wait_CCs(proc)<0) return(-1);
if (oai_exit) return(-1); if (oai_exit) return(-1);
phy_procedures_gNB_TX(gNB, proc, 1); if(get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD){
phy_procedures_gNB_TX(gNB, proc, 1);
}
stop_meas( &softmodem_stats_rxtx_sf ); stop_meas( &softmodem_stats_rxtx_sf );
...@@ -278,36 +285,89 @@ static inline int rxtx(PHY_VARS_gNB *gNB,gNB_rxtx_proc_t *proc, char *thread_nam ...@@ -278,36 +285,89 @@ static inline int rxtx(PHY_VARS_gNB *gNB,gNB_rxtx_proc_t *proc, char *thread_nam
return(0); return(0);
} }
static void* gNB_L1_thread_tx(void* param) {
PHY_VARS_gNB *gNB = (PHY_VARS_gNB*)param;
gNB_L1_proc_t *gNB_proc = &gNB->proc;
gNB_L1_rxtx_proc_t *proc = &gNB_proc->L1_proc_tx;
//PHY_VARS_gNB *gNB = RC.gNB[0][proc->CC_id];
char thread_name[100];
sprintf(thread_name,"TXnp4_%d\n",&gNB->proc.L1_proc == proc ? 0 : 1);
//thread_top_init(thread_name,1,470000,500000,500000);
//wait_sync("tx_thread");
while (!oai_exit) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 0 );
if (wait_on_condition(&proc->mutex,&proc->cond,&proc->instance_cnt,thread_name)<0) break;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 1 );
if (oai_exit) break;
// *****************************************
// TX processing for subframe n+4
// run PHY TX procedures the one after the other for all CCs to avoid race conditions
// (may be relaxed in the future for performance reasons)
// *****************************************
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX1_GNB,proc->subframe_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX1_GNB,proc->subframe_rx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_GNB,proc->frame_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_GNB,proc->frame_rx);
phy_procedures_gNB_TX(gNB, proc, 1);
pthread_mutex_lock( &proc->mutex );
proc->instance_cnt = -1;
// the thread can now be woken up
if (pthread_cond_signal(&proc->cond) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
}
pthread_mutex_unlock( &proc->mutex );
wakeup_txfh(proc,gNB);
}
return 0;
}
/*! /*!
* \brief The RX UE-specific and TX thread of gNB. * \brief The RX UE-specific and TX thread of gNB.
* \param param is a \ref gNB_proc_t structure which contains the info what to process. * \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed. * \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/ */
static void* gNB_thread_rxtx( void* param ) { static void* gNB_L1_thread( void* param ) {
static int gNB_thread_rxtx_status; static int gNB_thread_rxtx_status;
PHY_VARS_gNB *gNB = (PHY_VARS_gNB*)param;
gNB_L1_proc_t *gNB_proc = &gNB->proc;
gNB_L1_rxtx_proc_t *proc = &gNB_proc->L1_proc;
//PHY_VARS_gNB *gNB = RC.gNB[0][proc->CC_id];
gNB_rxtx_proc_t *proc = (gNB_rxtx_proc_t*)param;
PHY_VARS_gNB *gNB = RC.gNB[0][proc->CC_id];
char thread_name[100]; char thread_name[100];
// set default return value // set default return value
gNB_thread_rxtx_status = 0; gNB_thread_rxtx_status = 0;
sprintf(thread_name,"RXn_TXnp4_%d",&gNB->proc.proc_rxtx[0] == proc ? 0 : 1); sprintf(thread_name,"RXn_TXnp4_%d",&gNB->proc.L1_proc == proc ? 0 : 1);
thread_top_init(thread_name,1,850000L,1000000L,2000000L); //thread_top_init(thread_name,1,850000L,1000000L,2000000L);
while (!oai_exit) { while (!oai_exit) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0+(proc->subframe_rx&1), 0 );
if (wait_on_condition(&proc->mutex_rxtx,&proc->cond_rxtx,&proc->instance_cnt_rxtx,thread_name)<0) break; VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX0, 0 );
if (wait_on_condition(&proc->mutex,&proc->cond,&proc->instance_cnt,thread_name)<0) break;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX0, 1 );
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0+(proc->subframe_rx&1), 1 ); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_GNB,proc->subframe_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX0_GNB,proc->subframe_rx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_GNB,proc->frame_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_GNB,proc->frame_rx);
if (oai_exit) break; if (oai_exit) break;
...@@ -316,12 +376,19 @@ static void* gNB_thread_rxtx( void* param ) { ...@@ -316,12 +376,19 @@ static void* gNB_thread_rxtx( void* param ) {
if (rxtx(gNB,proc,thread_name) < 0) break; if (rxtx(gNB,proc,thread_name) < 0) break;
} }
if(get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT){
if (release_thread(&proc->mutex_rxtx,&proc->instance_cnt_rxtx,thread_name)<0) break; phy_procedures_gNB_TX(gNB, proc, 1);
}
if (release_thread(&proc->mutex,&proc->instance_cnt,thread_name)<0) break;
if(get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT){
wakeup_tx(gNB);
}
else if(get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT){
wakeup_txfh(proc,gNB);
}
} // while !oai_exit } // while !oai_exit
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0+(proc->subframe_rx&1), 0 );
LOG_D(PHY, " *** Exiting gNB thread RXn_TXnp4\n"); LOG_D(PHY, " *** Exiting gNB thread RXn_TXnp4\n");
...@@ -355,8 +422,8 @@ static void wait_system_ready (char *message, volatile int *start_flag) { ...@@ -355,8 +422,8 @@ static void wait_system_ready (char *message, volatile int *start_flag) {
void gNB_top(PHY_VARS_gNB *gNB, int frame_rx, int subframe_rx, char *string, struct RU_t_s *ru) void gNB_top(PHY_VARS_gNB *gNB, int frame_rx, int subframe_rx, char *string, struct RU_t_s *ru)
{ {
gNB_proc_t *proc = &gNB->proc; gNB_L1_proc_t *proc = &gNB->proc;
gNB_rxtx_proc_t *proc_rxtx = &proc->proc_rxtx[0]; gNB_L1_rxtx_proc_t *L1_proc = &proc->L1_proc;
NR_DL_FRAME_PARMS *fp = ru->nr_frame_parms; NR_DL_FRAME_PARMS *fp = ru->nr_frame_parms;
RU_proc_t *ru_proc=&ru->proc; RU_proc_t *ru_proc=&ru->proc;
...@@ -366,27 +433,121 @@ void gNB_top(PHY_VARS_gNB *gNB, int frame_rx, int subframe_rx, char *string, str ...@@ -366,27 +433,121 @@ void gNB_top(PHY_VARS_gNB *gNB, int frame_rx, int subframe_rx, char *string, str
if (!oai_exit) { if (!oai_exit) {
T(T_ENB_MASTER_TICK, T_INT(0), T_INT(proc->frame_rx), T_INT(proc->subframe_rx)); T(T_ENB_MASTER_TICK, T_INT(0), T_INT(proc->frame_rx), T_INT(proc->subframe_rx));
proc_rxtx->timestamp_tx = ru_proc->timestamp_rx + (sf_ahead*fp->samples_per_subframe); L1_proc->timestamp_tx = ru_proc->timestamp_rx + (sf_ahead*fp->samples_per_subframe);
proc_rxtx->frame_rx = ru_proc->frame_rx; L1_proc->frame_rx = ru_proc->frame_rx;
proc_rxtx->subframe_rx = ru_proc->subframe_rx; L1_proc->subframe_rx = ru_proc->subframe_rx;
proc_rxtx->frame_tx = (proc_rxtx->subframe_rx > (9-sf_ahead)) ? (proc_rxtx->frame_rx+1)&1023 : proc_rxtx->frame_rx; L1_proc->frame_tx = (L1_proc->subframe_rx > (9-sf_ahead)) ? (L1_proc->frame_rx+1)&1023 : L1_proc->frame_rx;
proc_rxtx->subframe_tx = (proc_rxtx->subframe_rx + sf_ahead)%10; L1_proc->subframe_tx = (L1_proc->subframe_rx + sf_ahead)%10;
if (rxtx(gNB,proc_rxtx,string) < 0) LOG_E(PHY,"gNB %d CC_id %d failed during execution\n",gNB->Mod_id,gNB->CC_id); if (rxtx(gNB,L1_proc,string) < 0) LOG_E(PHY,"gNB %d CC_id %d failed during execution\n",gNB->Mod_id,gNB->CC_id);
ru_proc->timestamp_tx = proc_rxtx->timestamp_tx; ru_proc->timestamp_tx = L1_proc->timestamp_tx;
ru_proc->subframe_tx = proc_rxtx->subframe_tx; ru_proc->subframe_tx = L1_proc->subframe_tx;
ru_proc->frame_tx = proc_rxtx->frame_tx; ru_proc->frame_tx = L1_proc->frame_tx;
} }
} }
int wakeup_txfh(gNB_L1_rxtx_proc_t *proc,PHY_VARS_gNB *gNB) {
int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) { RU_t *ru;
RU_proc_t *ru_proc;
struct timespec wait;
wait.tv_sec=0;
wait.tv_nsec=5000000L;
//printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~inside wakeup_txfh %d.%d IC_RU = %d\n", proc->frame_tx, proc->subframe_tx, proc->instance_cnt_RUs);
if(wait_on_condition(&proc->mutex_RUs,&proc->cond_RUs,&proc->instance_cnt_RUs,"wakeup_txfh")<0) {
LOG_E(PHY,"Frame %d, subframe %d: TX FH not ready\n", proc->frame_tx, proc->subframe_tx);
return(-1);
}
pthread_mutex_lock(&gNB->proc.mutex_RU_tx);
gNB->proc.RU_mask_tx = 0;
pthread_mutex_unlock(&gNB->proc.mutex_RU_tx);
if (release_thread(&proc->mutex_RUs,&proc->instance_cnt_RUs,"wakeup_txfh")<0) return(-1);
for(int i=0; i<gNB->num_RU; i++)
{
ru = gNB->RU_list[i];
ru_proc = &ru->proc;
if (ru_proc->instance_cnt_gNBs == 0) {
LOG_E(PHY,"Frame %d, subframe %d: TX FH thread busy, dropping Frame %d, subframe %d\n", ru_proc->frame_tx, ru_proc->subframe_tx, proc->frame_rx, proc->subframe_rx);
return(-1);
}
if (pthread_mutex_timedlock(&ru_proc->mutex_gNBs,&wait) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_mutex_lock for eNB TX1 thread %d (IC %d)\n", ru_proc->subframe_rx&1,ru_proc->instance_cnt_gNBs );
exit_fun( "error locking mutex_gNB" );
return(-1);
}
ru_proc->instance_cnt_gNBs = 0;
ru_proc->timestamp_tx = proc->timestamp_tx;
ru_proc->subframe_tx = proc->subframe_tx;
ru_proc->frame_tx = proc->frame_tx;
// the thread can now be woken up
if (pthread_cond_signal(&ru_proc->cond_gNBs) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
return(-1);
}
pthread_mutex_unlock( &ru_proc->mutex_gNBs );
}
return(0);
}
int wakeup_tx(PHY_VARS_gNB *gNB) {
gNB_L1_proc_t *proc=&gNB->proc;
gNB_L1_rxtx_proc_t *L1_proc_tx = &proc->L1_proc_tx;
gNB_L1_rxtx_proc_t *L1_proc = &proc->L1_proc;
struct timespec wait;
wait.tv_sec=0;
wait.tv_nsec=5000000L;
if (pthread_mutex_timedlock(&L1_proc_tx->mutex,&wait) != 0) {
LOG_E(PHY, "[SCHED][eNB] ERROR locking mutex for eNB L1_thread_tx\n");
exit_fun("ERROR pthread_lock");
return(-1);
}
while(L1_proc_tx->instance_cnt == 0){
pthread_cond_wait(&L1_proc_tx->cond,&L1_proc_tx->mutex);
}
L1_proc_tx->instance_cnt = 0;
gNB_proc_t *proc=&gNB->proc;
L1_proc_tx->subframe_rx = L1_proc->subframe_rx;
L1_proc_tx->frame_rx = L1_proc->frame_rx;
L1_proc_tx->subframe_tx = L1_proc->subframe_tx;
L1_proc_tx->frame_tx = L1_proc->frame_tx;
L1_proc_tx->timestamp_tx = L1_proc->timestamp_tx;
// the thread can now be woken up
if (pthread_cond_signal(&L1_proc_tx->cond) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
return(-1);
}
pthread_mutex_unlock( &L1_proc_tx->mutex);
return(0);
}
gNB_rxtx_proc_t *proc_rxtx=&proc->proc_rxtx[proc->frame_rx&1]; int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) {
gNB_L1_proc_t *proc=&gNB->proc;
gNB_L1_rxtx_proc_t *L1_proc=&proc->L1_proc;
NR_DL_FRAME_PARMS *fp = &gNB->frame_parms; NR_DL_FRAME_PARMS *fp = &gNB->frame_parms;
RU_proc_t *ru_proc=&ru->proc;
int i; int i;
struct timespec wait; struct timespec wait;
...@@ -415,24 +576,24 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) { ...@@ -415,24 +576,24 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) {
wait.tv_nsec=5000000L; wait.tv_nsec=5000000L;
/* accept some delay in processing - up to 5ms */ /* accept some delay in processing - up to 5ms */
for (i = 0; i < 10 && proc_rxtx->instance_cnt_rxtx == 0; i++) { for (i = 0; i < 10 && L1_proc->instance_cnt == 0; i++) {
LOG_W( PHY,"[gNB] Frame %d Subframe %d, gNB RXn-TXnp4 thread busy!! (cnt_rxtx %i)\n", proc_rxtx->frame_tx, proc_rxtx->subframe_tx, proc_rxtx->instance_cnt_rxtx); LOG_W( PHY,"[gNB] Frame %d Subframe %d, gNB RXn-TXnp4 thread busy!! (cnt %i)\n", L1_proc->frame_tx, L1_proc->subframe_tx, L1_proc->instance_cnt);
usleep(500); usleep(500);
} }
if (proc_rxtx->instance_cnt_rxtx == 0) { if (L1_proc->instance_cnt == 0) {
exit_fun( "TX thread busy" ); exit_fun( "TX thread busy" );
return(-1); return(-1);
} }
// wake up TX for subframe n+sf_ahead // wake up TX for subframe n+sf_ahead
// lock the TX mutex and make sure the thread is ready // lock the TX mutex and make sure the thread is ready
if (pthread_mutex_timedlock(&proc_rxtx->mutex_rxtx,&wait) != 0) { if (pthread_mutex_timedlock(&L1_proc->mutex,&wait) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_mutex_lock for gNB RXTX thread %d (IC %d)\n", proc_rxtx->subframe_rx&1,proc_rxtx->instance_cnt_rxtx ); LOG_E( PHY, "[gNB] ERROR pthread_mutex_lock for gNB RXTX thread %d (IC %d)\n", L1_proc->subframe_rx&1,L1_proc->instance_cnt );
exit_fun( "error locking mutex_rxtx" ); exit_fun( "error locking mutex" );
return(-1); return(-1);
} }
++proc_rxtx->instance_cnt_rxtx; ++L1_proc->instance_cnt;
// We have just received and processed the common part of a subframe, say n. // We have just received and processed the common part of a subframe, say n.
// TS_rx is the last received timestamp (start of 1st slot), TS_tx is the desired // TS_rx is the last received timestamp (start of 1st slot), TS_tx is the desired
...@@ -440,27 +601,29 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) { ...@@ -440,27 +601,29 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) {
// The last (TS_rx mod samples_per_frame) was n*samples_per_tti, // The last (TS_rx mod samples_per_frame) was n*samples_per_tti,
// we want to generate subframe (n+sf_ahead), so TS_tx = TX_rx+sf_ahead*samples_per_tti, // we want to generate subframe (n+sf_ahead), so TS_tx = TX_rx+sf_ahead*samples_per_tti,
// and proc->subframe_tx = proc->subframe_rx+sf_ahead // and proc->subframe_tx = proc->subframe_rx+sf_ahead
proc_rxtx->timestamp_tx = proc->timestamp_rx + (sf_ahead*fp->samples_per_subframe); L1_proc->timestamp_tx = ru_proc->timestamp_rx + (sf_ahead*fp->samples_per_subframe);
proc_rxtx->frame_rx = proc->frame_rx; L1_proc->frame_rx = ru_proc->frame_rx;
proc_rxtx->subframe_rx = proc->subframe_rx; L1_proc->subframe_rx = ru_proc->subframe_rx;
proc_rxtx->frame_tx = (proc_rxtx->subframe_rx > (9-sf_ahead)) ? (proc_rxtx->frame_rx+1)&1023 : proc_rxtx->frame_rx; L1_proc->frame_tx = (L1_proc->subframe_rx > (9-sf_ahead)) ? (L1_proc->frame_rx+1)&1023 : L1_proc->frame_rx;
proc_rxtx->subframe_tx = (proc_rxtx->subframe_rx + sf_ahead)%10; L1_proc->subframe_tx = (L1_proc->subframe_rx + sf_ahead)%10;
//printf("~~~~~~~~~~~~~~~~~~~~~~passing parameter IC = %d, RX: %d.%d, TX: %d.%d to L1 sf_ahead = %d\n", L1_proc->instance_cnt, L1_proc->frame_rx, L1_proc->subframe_rx, L1_proc->frame_tx, L1_proc->subframe_tx, sf_ahead);
// the thread can now be woken up // the thread can now be woken up
if (pthread_cond_signal(&proc_rxtx->cond_rxtx) != 0) { if (pthread_cond_signal(&L1_proc->cond) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB RXn-TXnp4 thread\n"); LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB RXn-TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" ); exit_fun( "ERROR pthread_cond_signal" );
return(-1); return(-1);
} }
pthread_mutex_unlock( &proc_rxtx->mutex_rxtx ); pthread_mutex_unlock( &L1_proc->mutex );
return(0); return(0);
} }
/* /*
void wakeup_prach_gNB(PHY_VARS_gNB *gNB,RU_t *ru,int frame,int subframe) { void wakeup_prach_gNB(PHY_VARS_gNB *gNB,RU_t *ru,int frame,int subframe) {
gNB_proc_t *proc = &gNB->proc; gNB_L1_proc_t *proc = &gNB->proc;
LTE_DL_FRAME_PARMS *fp=&gNB->frame_parms; LTE_DL_FRAME_PARMS *fp=&gNB->frame_parms;
int i; int i;
...@@ -519,7 +682,7 @@ void wakeup_prach_gNB(PHY_VARS_gNB *gNB,RU_t *ru,int frame,int subframe) { ...@@ -519,7 +682,7 @@ void wakeup_prach_gNB(PHY_VARS_gNB *gNB,RU_t *ru,int frame,int subframe) {
/*! /*!
* \brief The prach receive thread of gNB. * \brief The prach receive thread of gNB.
* \param param is a \ref gNB_proc_t structure which contains the info what to process. * \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed. * \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/ */
/* /*
...@@ -528,7 +691,7 @@ static void* gNB_thread_prach( void* param ) { ...@@ -528,7 +691,7 @@ static void* gNB_thread_prach( void* param ) {
PHY_VARS_gNB *gNB= (PHY_VARS_gNB *)param; PHY_VARS_gNB *gNB= (PHY_VARS_gNB *)param;
gNB_proc_t *proc = &gNB->proc; gNB_L1_proc_t *proc = &gNB->proc;
// set default return value // set default return value
gNB_thread_prach_status = 0; gNB_thread_prach_status = 0;
...@@ -568,8 +731,8 @@ void init_gNB_proc(int inst) { ...@@ -568,8 +731,8 @@ void init_gNB_proc(int inst) {
int i=0; int i=0;
int CC_id; int CC_id;
PHY_VARS_gNB *gNB; PHY_VARS_gNB *gNB;
gNB_proc_t *proc; gNB_L1_proc_t *proc;
gNB_rxtx_proc_t *proc_rxtx; gNB_L1_rxtx_proc_t *L1_proc,*L1_proc_tx;
pthread_attr_t *attr0=NULL,*attr1=NULL; pthread_attr_t *attr0=NULL,*attr1=NULL;
//*attr_prach=NULL; //*attr_prach=NULL;
...@@ -582,27 +745,32 @@ void init_gNB_proc(int inst) { ...@@ -582,27 +745,32 @@ void init_gNB_proc(int inst) {
#endif #endif
proc = &gNB->proc; proc = &gNB->proc;
proc_rxtx = proc->proc_rxtx; L1_proc = &proc->L1_proc;
proc_rxtx[0].instance_cnt_rxtx = -1; L1_proc_tx = &proc->L1_proc_tx;
proc_rxtx[1].instance_cnt_rxtx = -1; L1_proc->instance_cnt = -1;
L1_proc_tx->instance_cnt = -1;
L1_proc->instance_cnt_RUs = 0;
L1_proc_tx->instance_cnt_RUs = 0;
proc->instance_cnt_prach = -1; proc->instance_cnt_prach = -1;
proc->instance_cnt_asynch_rxtx = -1; proc->instance_cnt_asynch_rxtx = -1;
proc->CC_id = CC_id; proc->CC_id = CC_id;
proc->first_rx=1; proc->first_rx =1;
proc->first_tx=1; proc->first_tx =1;
proc->RU_mask=0; proc->RU_mask =0;
proc->RU_mask_prach=0; proc->RU_mask_tx = (1<<gNB->num_RU)-1;
proc->RU_mask_prach =0;
pthread_mutex_init( &gNB->UL_INFO_mutex, NULL); pthread_mutex_init( &gNB->UL_INFO_mutex, NULL);
pthread_mutex_init( &proc_rxtx[0].mutex_rxtx, NULL); pthread_mutex_init( &L1_proc->mutex, NULL);
pthread_mutex_init( &proc_rxtx[1].mutex_rxtx, NULL); pthread_mutex_init( &L1_proc_tx->mutex, NULL);
pthread_cond_init( &proc_rxtx[0].cond_rxtx, NULL); pthread_cond_init( &L1_proc->cond, NULL);
pthread_cond_init( &proc_rxtx[1].cond_rxtx, NULL); pthread_cond_init( &L1_proc_tx->cond, NULL);
pthread_mutex_init( &proc->mutex_prach, NULL); pthread_mutex_init( &proc->mutex_prach, NULL);
pthread_mutex_init( &proc->mutex_asynch_rxtx, NULL); pthread_mutex_init( &proc->mutex_asynch_rxtx, NULL);
pthread_mutex_init( &proc->mutex_RU,NULL); pthread_mutex_init( &proc->mutex_RU,NULL);
pthread_mutex_init( &proc->mutex_RU_tx,NULL);
pthread_mutex_init( &proc->mutex_RU_PRACH,NULL); pthread_mutex_init( &proc->mutex_RU_PRACH,NULL);
pthread_cond_init( &proc->cond_prach, NULL); pthread_cond_init( &proc->cond_prach, NULL);
...@@ -612,23 +780,25 @@ void init_gNB_proc(int inst) { ...@@ -612,23 +780,25 @@ void init_gNB_proc(int inst) {
pthread_attr_init( &proc->attr_asynch_rxtx); pthread_attr_init( &proc->attr_asynch_rxtx);
// pthread_attr_init( &proc->attr_td); // pthread_attr_init( &proc->attr_td);
// pthread_attr_init( &proc->attr_te); // pthread_attr_init( &proc->attr_te);
pthread_attr_init( &proc_rxtx[0].attr_rxtx); pthread_attr_init( &L1_proc->attr);
pthread_attr_init( &proc_rxtx[1].attr_rxtx); pthread_attr_init( &L1_proc_tx->attr);
attr0 = &L1_proc->attr;
attr1 = &L1_proc_tx->attr;
LOG_I(PHY,"gNB->single_thread_flag:%d\n", gNB->single_thread_flag); LOG_I(PHY,"gNB->single_thread_flag:%d\n", gNB->single_thread_flag);
if (gNB->single_thread_flag==0) { if (get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) {
pthread_create( &proc_rxtx[0].pthread_rxtx, attr0, gNB_thread_rxtx, &proc_rxtx[0] ); pthread_create( &L1_proc->pthread, attr0, gNB_L1_thread, gNB );
pthread_create( &proc_rxtx[1].pthread_rxtx, attr1, gNB_thread_rxtx, &proc_rxtx[1] ); pthread_create( &L1_proc_tx->pthread, attr1, gNB_L1_thread_tx, gNB);
} }
//pthread_create( &proc->pthread_prach, attr_prach, gNB_thread_prach, gNB ); //pthread_create( &proc->pthread_prach, attr_prach, gNB_thread_prach, gNB );
char name[16]; char name[16];
if (gNB->single_thread_flag==0) { if (gNB->single_thread_flag==0) {
snprintf( name, sizeof(name), "RXTX0 %d", i ); snprintf( name, sizeof(name), "L1 %d", i );
pthread_setname_np( proc_rxtx[0].pthread_rxtx, name ); pthread_setname_np( L1_proc->pthread, name );
snprintf( name, sizeof(name), "RXTX1 %d", i ); snprintf( name, sizeof(name), "L1TX %d", i );
pthread_setname_np( proc_rxtx[1].pthread_rxtx, name ); pthread_setname_np( L1_proc_tx->pthread, name );
} }
AssertFatal(proc->instance_cnt_prach == -1,"instance_cnt_prach = %d\n",proc->instance_cnt_prach); AssertFatal(proc->instance_cnt_prach == -1,"instance_cnt_prach = %d\n",proc->instance_cnt_prach);
...@@ -651,23 +821,27 @@ void kill_gNB_proc(int inst) { ...@@ -651,23 +821,27 @@ void kill_gNB_proc(int inst) {
int *status; int *status;
PHY_VARS_gNB *gNB; PHY_VARS_gNB *gNB;
gNB_proc_t *proc; gNB_L1_proc_t *proc;
gNB_rxtx_proc_t *proc_rxtx; gNB_L1_rxtx_proc_t *L1_proc, *L1_proc_tx;
for (int CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) { for (int CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
gNB=RC.gNB[inst][CC_id]; gNB=RC.gNB[inst][CC_id];
proc = &gNB->proc; proc = &gNB->proc;
proc_rxtx = &proc->proc_rxtx[0]; L1_proc = &proc->L1_proc;
L1_proc_tx = &proc->L1_proc_tx;
LOG_I(PHY, "Killing TX CC_id %d inst %d\n", CC_id, inst ); LOG_I(PHY, "Killing TX CC_id %d inst %d\n", CC_id, inst );
if (gNB->single_thread_flag==0) { if (get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) {
pthread_mutex_lock(&proc_rxtx[0].mutex_rxtx); pthread_mutex_lock(&L1_proc->mutex);
proc_rxtx[0].instance_cnt_rxtx = 0; L1_proc->instance_cnt = 0;
pthread_mutex_unlock(&proc_rxtx[0].mutex_rxtx); pthread_cond_signal(&L1_proc->cond);
pthread_mutex_lock(&proc_rxtx[1].mutex_rxtx); pthread_mutex_unlock(&L1_proc->mutex);
proc_rxtx[1].instance_cnt_rxtx = 0;
pthread_mutex_unlock(&proc_rxtx[1].mutex_rxtx); pthread_mutex_lock(&L1_proc_tx->mutex);
L1_proc_tx->instance_cnt = 0;
pthread_cond_signal(&L1_proc_tx->cond);
pthread_mutex_unlock(&L1_proc_tx->mutex);
} }
proc->instance_cnt_prach = 0; proc->instance_cnt_prach = 0;
pthread_cond_signal( &proc->cond_prach ); pthread_cond_signal( &proc->cond_prach );
...@@ -684,16 +858,22 @@ void kill_gNB_proc(int inst) { ...@@ -684,16 +858,22 @@ void kill_gNB_proc(int inst) {
LOG_I(PHY, "Destroying UL_INFO mutex\n"); LOG_I(PHY, "Destroying UL_INFO mutex\n");
pthread_mutex_destroy(&gNB->UL_INFO_mutex); pthread_mutex_destroy(&gNB->UL_INFO_mutex);
int i; if (get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) {
if (gNB->single_thread_flag==0) { LOG_I(PHY, "Joining L1_proc mutex/cond\n");
for (i=0;i<2;i++) { pthread_join( L1_proc->pthread, (void**)&status );
LOG_I(PHY, "Joining rxtx[%d] mutex/cond\n",i); LOG_I(PHY, "Joining L1_proc_tx mutex/cond\n");
pthread_join( proc_rxtx[i].pthread_rxtx, (void**)&status ); pthread_join( L1_proc_tx->pthread, (void**)&status );
LOG_I(PHY, "Destroying rxtx[%d] mutex/cond\n",i);
pthread_mutex_destroy( &proc_rxtx[i].mutex_rxtx );
pthread_cond_destroy( &proc_rxtx[i].cond_rxtx );
}
} }
LOG_I(PHY, "Destroying L1_proc mutex/cond\n");
pthread_mutex_destroy( &L1_proc->mutex );
pthread_cond_destroy( &L1_proc->cond );
LOG_I(PHY, "Destroying L1_proc_tx mutex/cond\n");
pthread_mutex_destroy( &L1_proc_tx->mutex );
pthread_cond_destroy( &L1_proc_tx->cond );
pthread_mutex_destroy( &proc->mutex_RU );
pthread_mutex_destroy( &proc->mutex_RU_tx );
} }
} }
......
...@@ -118,6 +118,9 @@ extern void nr_phy_init_RU(RU_t*); ...@@ -118,6 +118,9 @@ extern void nr_phy_init_RU(RU_t*);
extern void nr_phy_free_RU(RU_t*); extern void nr_phy_free_RU(RU_t*);
extern void nr_phy_config_request(NR_PHY_Config_t *gNB); extern void nr_phy_config_request(NR_PHY_Config_t *gNB);
extern PARALLEL_CONF_t get_thread_parallel_conf(void);
extern WORKER_CONF_t get_thread_worker_conf(void);
void init_RU(char*); void init_RU(char*);
void stop_RU(int nb_ru); void stop_RU(int nb_ru);
void do_ru_sync(RU_t *ru); void do_ru_sync(RU_t *ru);
...@@ -768,8 +771,6 @@ void rx_rf(RU_t *ru,int *frame,int *subframe) { ...@@ -768,8 +771,6 @@ void rx_rf(RU_t *ru,int *frame,int *subframe) {
if (ru == RC.ru[0]) { if (ru == RC.ru[0]) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_RU, proc->frame_rx ); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_RU, proc->frame_rx );
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX0_RU, proc->subframe_rx ); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX0_RU, proc->subframe_rx );
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_RU, proc->frame_tx );
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_RU, proc->subframe_tx );
} }
if (proc->first_rx == 0) { if (proc->first_rx == 0) {
...@@ -841,6 +842,8 @@ void tx_rf(RU_t *ru) { ...@@ -841,6 +842,8 @@ void tx_rf(RU_t *ru) {
flags = 4; // start of burst and end of burst (only one DL SF between two UL) flags = 4; // start of burst and end of burst (only one DL SF between two UL)
sf_extension = ru->N_TA_offset<<1; sf_extension = ru->N_TA_offset<<1;
} */ } */
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_RU, proc->frame_tx );
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_RU, proc->subframe_tx );
for (i=0; i<ru->nb_tx; i++) for (i=0; i<ru->nb_tx; i++)
txp[i] = (void*)&ru->common.txdata[i][(proc->subframe_tx*fp->samples_per_subframe)-sf_extension]; txp[i] = (void*)&ru->common.txdata[i][(proc->subframe_tx*fp->samples_per_subframe)-sf_extension];
...@@ -874,7 +877,7 @@ void tx_rf(RU_t *ru) { ...@@ -874,7 +877,7 @@ void tx_rf(RU_t *ru) {
/*! /*!
* \brief The Asynchronous RX/TX FH thread of RAU/RCC/gNB/RRU. * \brief The Asynchronous RX/TX FH thread of RAU/RCC/gNB/RRU.
* This handles the RX FH for an asynchronous RRU/UE * This handles the RX FH for an asynchronous RRU/UE
* \param param is a \ref gNB_proc_t structure which contains the info what to process. * \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed. * \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/ */
static void* ru_thread_asynch_rxtx( void* param ) { static void* ru_thread_asynch_rxtx( void* param ) {
...@@ -1131,14 +1134,14 @@ void do_ru_synch(RU_t *ru) { ...@@ -1131,14 +1134,14 @@ void do_ru_synch(RU_t *ru) {
void wakeup_gNBs(RU_t *ru) { void wakeup_gNB_L1s(RU_t *ru) {
int i; int i;
PHY_VARS_gNB **gNB_list = ru->gNB_list; PHY_VARS_gNB **gNB_list = ru->gNB_list;
LOG_D(PHY,"wakeup_gNBs (num %d) for RU %d ru->gNB_top:%p\n",ru->num_gNB,ru->idx, ru->gNB_top); LOG_D(PHY,"wakeup_gNB_L1s (num %d) for RU %d ru->gNB_top:%p\n",ru->num_gNB,ru->idx, ru->gNB_top);
if (ru->num_gNB==1 && ru->gNB_top!=0) { if (ru->num_gNB==1 && ru->gNB_top!=0 && get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD) {
// call gNB function directly // call gNB function directly
char string[20]; char string[20];
...@@ -1153,7 +1156,6 @@ void wakeup_gNBs(RU_t *ru) { ...@@ -1153,7 +1156,6 @@ void wakeup_gNBs(RU_t *ru) {
for (i=0;i<ru->num_gNB;i++) for (i=0;i<ru->num_gNB;i++)
{ {
LOG_D(PHY,"ru->wakeup_rxtx:%p\n", ru->nr_wakeup_rxtx); LOG_D(PHY,"ru->wakeup_rxtx:%p\n", ru->nr_wakeup_rxtx);
if (ru->nr_wakeup_rxtx!=0 && ru->nr_wakeup_rxtx(gNB_list[i],ru) < 0) if (ru->nr_wakeup_rxtx!=0 && ru->nr_wakeup_rxtx(gNB_list[i],ru) < 0)
{ {
LOG_E(PHY,"could not wakeup gNB rxtx process for subframe %d\n", ru->proc.subframe_rx); LOG_E(PHY,"could not wakeup gNB rxtx process for subframe %d\n", ru->proc.subframe_rx);
...@@ -1378,6 +1380,120 @@ static void* ru_stats_thread(void* param) { ...@@ -1378,6 +1380,120 @@ static void* ru_stats_thread(void* param) {
return(NULL); return(NULL);
} }
static void* ru_thread_tx( void* param ) {
RU_t *ru = (RU_t*)param;
RU_proc_t *proc = &ru->proc;
PHY_VARS_gNB *gNB;
gNB_L1_proc_t *gNB_proc;
gNB_L1_rxtx_proc_t *L1_proc;
NR_DL_FRAME_PARMS *fp = ru->nr_frame_parms;
char filename[40];
int print_frame = 2;
int i = 0;
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
thread_top_init("ru_thread_tx",1,400000,500000,500000);
//CPU_SET(5, &cpuset);
//pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
//wait_sync("ru_thread_tx");
wait_on_condition(&proc->mutex_FH1,&proc->cond_FH1,&proc->instance_cnt_FH1,"ru_thread_tx");
printf( "ru_thread_tx ready\n");
while (!oai_exit) {
if (oai_exit) break;
LOG_I(PHY,"ru_thread_tx: Waiting for TX processing\n");
// wait until eNBs are finished subframe RX n and TX n+4
wait_on_condition(&proc->mutex_gNBs,&proc->cond_gNBs,&proc->instance_cnt_gNBs,"ru_thread_tx");
if (oai_exit) break;
//printf("~~~~~~~~~~~~~~~~start process for ru_thread_tx %d.%d\n", proc->frame_tx, proc->subframe_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_RU, proc->frame_tx );
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_RU, proc->subframe_tx );
// do TX front-end processing if needed (precoding and/or IDFTs)
if (ru->feptx_prec) ru->feptx_prec(ru);
// do OFDM if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru);
if(!emulate_rf){
// do outgoing fronthaul (south) if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru);
if (ru->fh_north_out) ru->fh_north_out(ru);
}
else
{
if(proc->frame_tx == print_frame)
{
for (i=0; i<ru->nb_tx; i++)
{
sprintf(filename,"tx%ddataF_frame%d_sf%d.m", i, print_frame, proc->subframe_tx);
LOG_M(filename,"txdataF_frame",&ru->common.txdataF_BF[i][0],fp->samples_per_subframe_wCP, 1, 1);
if(proc->subframe_tx == 9)
{
sprintf(filename,"tx%ddata_frame%d.m", i, print_frame);
LOG_M(filename,"txdata_frame",&ru->common.txdata[i][0],fp->samples_per_frame, 1, 1);
sprintf(filename,"tx%ddata_frame%d.dat", i, print_frame);
FILE *output_fd = fopen(filename,"w");
if (output_fd) {
fwrite(&ru->common.txdata[i][0],
sizeof(int32_t),
fp->samples_per_frame,
output_fd);
fclose(output_fd);
}
else {
LOG_E(PHY,"Cannot write to file %s\n",filename);
}
}//if(proc->subframe_tx == 9)
}//for (i=0; i<ru->nb_tx; i++)
}//if(proc->frame_tx == print_frame)
}//else emulate_rf
release_thread(&proc->mutex_gNBs,&proc->instance_cnt_gNBs,"ru_thread_tx");
for(i = 0; i<ru->num_gNB; i++)
{
gNB = ru->gNB_list[i];
gNB_proc = &gNB->proc;
L1_proc = (get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT)? &gNB_proc->L1_proc_tx : &gNB_proc->L1_proc;
pthread_mutex_lock(&gNB_proc->mutex_RU_tx);
for (int j=0;j<gNB->num_RU;j++) {
if (ru == gNB->RU_list[j]) {
if ((gNB_proc->RU_mask_tx&(1<<j)) > 0)
LOG_E(PHY,"eNB %d frame %d, subframe %d : previous information from RU tx %d (num_RU %d,mask %x) has not been served yet!\n",
gNB->Mod_id,gNB_proc->frame_rx,gNB_proc->subframe_rx,ru->idx,gNB->num_RU,gNB_proc->RU_mask_tx);
gNB_proc->RU_mask_tx |= (1<<j);
}
}
if (gNB_proc->RU_mask_tx != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return
pthread_mutex_unlock(&gNB_proc->mutex_RU_tx);
}
else { // all RUs TX are finished so send the ready signal to eNB processing
gNB_proc->RU_mask_tx = 0;
pthread_mutex_unlock(&gNB_proc->mutex_RU_tx);
pthread_mutex_lock( &L1_proc->mutex_RUs);
L1_proc->instance_cnt_RUs = 0;
// the thread can now be woken up
if (pthread_cond_signal(&L1_proc->cond_RUs) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
}
pthread_mutex_unlock( &L1_proc->mutex_RUs );
}
}
}
release_thread(&proc->mutex_FH1,&proc->instance_cnt_FH1,"ru_thread_tx");
return 0;
}
static void* ru_thread( void* param ) { static void* ru_thread( void* param ) {
static int ru_thread_status; static int ru_thread_status;
...@@ -1468,6 +1584,10 @@ static void* ru_thread( void* param ) { ...@@ -1468,6 +1584,10 @@ static void* ru_thread( void* param ) {
if ((ru->is_slave) && (ru->if_south == LOCAL_RF)) do_ru_synch(ru); if ((ru->is_slave) && (ru->if_south == LOCAL_RF)) do_ru_synch(ru);
} }
pthread_mutex_lock(&proc->mutex_FH1);
proc->instance_cnt_FH1 = 0;
pthread_mutex_unlock(&proc->mutex_FH1);
pthread_cond_signal(&proc->cond_FH1);
// This is a forever while loop, it loops over subframes which are scheduled by incoming samples from HW devices // This is a forever while loop, it loops over subframes which are scheduled by incoming samples from HW devices
while (!oai_exit) { while (!oai_exit) {
...@@ -1503,6 +1623,7 @@ static void* ru_thread( void* param ) { ...@@ -1503,6 +1623,7 @@ static void* ru_thread( void* param ) {
}*/ }*/
// adjust for timing offset between RU // adjust for timing offset between RU
//printf("~~~~~~~~~~~~~~~~~~~~~~~~~~%d.%d in ru_thread is in process\n", proc->frame_rx, proc->subframe_rx);
if (ru->idx!=0) proc->frame_tx = (proc->frame_tx+proc->frame_offset)&1023; if (ru->idx!=0) proc->frame_tx = (proc->frame_tx+proc->frame_offset)&1023;
...@@ -1514,53 +1635,52 @@ static void* ru_thread( void* param ) { ...@@ -1514,53 +1635,52 @@ static void* ru_thread( void* param ) {
wakeup_slaves(proc); wakeup_slaves(proc);
// wakeup all gNB processes waiting for this RU // wakeup all gNB processes waiting for this RU
if (ru->num_gNB>0) wakeup_gNBs(ru); if (ru->num_gNB>0) wakeup_gNB_L1s(ru);
// wait until gNBs are finished subframe RX n and TX n+sf_ahead if(get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD && ru->num_eNB==0)
wait_on_condition(&proc->mutex_gNBs,&proc->cond_gNBs,&proc->instance_cnt_gNBs,"ru_thread");
// do TX front-end processing if needed (precoding and/or IDFTs)
if (ru->feptx_prec) ru->feptx_prec(ru);
// do OFDM if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru);
if(!emulate_rf)
{ {
// do outgoing fronthaul (south) if needed // do TX front-end processing if needed (precoding and/or IDFTs)
if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru); if (ru->feptx_prec) ru->feptx_prec(ru);
// do OFDM if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru);
if(!emulate_rf)
{
// do outgoing fronthaul (south) if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru);
if (ru->fh_north_out) ru->fh_north_out(ru); if (ru->fh_north_out) ru->fh_north_out(ru);
} }
else else
{
if(proc->frame_tx == print_frame)
{ {
for (i=0; i<ru->nb_tx; i++) if(proc->frame_tx == print_frame)
{ {
sprintf(filename,"tx%ddataF_frame%d_sf%d.m", i, print_frame, proc->subframe_tx); for (i=0; i<ru->nb_tx; i++)
LOG_M(filename,"txdataF_frame",&ru->common.txdataF_BF[i][0],fp->samples_per_subframe_wCP, 1, 1);
if(proc->subframe_tx == 9)
{ {
sprintf(filename,"tx%ddata_frame%d.m", i, print_frame); sprintf(filename,"tx%ddataF_frame%d_sf%d.m", i, print_frame, proc->subframe_tx);
LOG_M(filename,"txdata_frame",&ru->common.txdata[i][0],fp->samples_per_frame, 1, 1); LOG_M(filename,"txdataF_frame",&ru->common.txdataF_BF[i][0],fp->samples_per_subframe_wCP, 1, 1);
sprintf(filename,"tx%ddata_frame%d.dat", i, print_frame); if(proc->subframe_tx == 9)
FILE *output_fd = fopen(filename,"w"); {
if (output_fd) { sprintf(filename,"tx%ddata_frame%d.m", i, print_frame);
fwrite(&ru->common.txdata[i][0], LOG_M(filename,"txdata_frame",&ru->common.txdata[i][0],fp->samples_per_frame, 1, 1);
sizeof(int32_t), sprintf(filename,"tx%ddata_frame%d.dat", i, print_frame);
fp->samples_per_frame, FILE *output_fd = fopen(filename,"w");
output_fd); if (output_fd) {
fclose(output_fd); fwrite(&ru->common.txdata[i][0],
} sizeof(int32_t),
else { fp->samples_per_frame,
LOG_E(PHY,"Cannot write to file %s\n",filename); output_fd);
} fclose(output_fd);
} }
} else {
} LOG_E(PHY,"Cannot write to file %s\n",filename);
//else if (proc->frame_tx > print_frame) oai_exit = 1; }
} }//if(proc->subframe_tx == 9)
}//for (i=0; i<ru->nb_tx; i++)
}//if(proc->frame_tx == print_frame)
}//else emulate_rf
proc->emulate_rf_busy = 0;
}//if(get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD)
} }
...@@ -1680,7 +1800,7 @@ void init_RU_proc(RU_t *ru) { ...@@ -1680,7 +1800,7 @@ void init_RU_proc(RU_t *ru) {
int i=0; int i=0;
RU_proc_t *proc; RU_proc_t *proc;
pthread_attr_t *attr_FH=NULL,*attr_prach=NULL,*attr_asynch=NULL, *attr_emulateRF=NULL;// *attr_synch=NULL; pthread_attr_t *attr_FH=NULL, *attr_FH1=NULL,*attr_prach=NULL,*attr_asynch=NULL, *attr_emulateRF=NULL;// *attr_synch=NULL;
//pthread_attr_t *attr_fep=NULL; //pthread_attr_t *attr_fep=NULL;
#if (RRC_VERSION >= MAKE_VERSION(14, 0, 0)) #if (RRC_VERSION >= MAKE_VERSION(14, 0, 0))
//pthread_attr_t *attr_prach_br=NULL; //pthread_attr_t *attr_prach_br=NULL;
...@@ -1697,6 +1817,8 @@ void init_RU_proc(RU_t *ru) { ...@@ -1697,6 +1817,8 @@ void init_RU_proc(RU_t *ru) {
proc->instance_cnt_prach = -1; proc->instance_cnt_prach = -1;
proc->instance_cnt_synch = -1; ; proc->instance_cnt_synch = -1; ;
proc->instance_cnt_FH = -1; proc->instance_cnt_FH = -1;
proc->instance_cnt_FH1 = -1;
proc->instance_cnt_gNBs = -1;
proc->instance_cnt_asynch_rxtx = -1; proc->instance_cnt_asynch_rxtx = -1;
proc->instance_cnt_emulateRF = -1; proc->instance_cnt_emulateRF = -1;
proc->first_rx = 1; proc->first_rx = 1;
...@@ -1711,17 +1833,20 @@ void init_RU_proc(RU_t *ru) { ...@@ -1711,17 +1833,20 @@ void init_RU_proc(RU_t *ru) {
pthread_mutex_init( &proc->mutex_asynch_rxtx, NULL); pthread_mutex_init( &proc->mutex_asynch_rxtx, NULL);
pthread_mutex_init( &proc->mutex_synch,NULL); pthread_mutex_init( &proc->mutex_synch,NULL);
pthread_mutex_init( &proc->mutex_FH,NULL); pthread_mutex_init( &proc->mutex_FH,NULL);
pthread_mutex_init( &proc->mutex_FH1,NULL);
pthread_mutex_init( &proc->mutex_emulateRF,NULL); pthread_mutex_init( &proc->mutex_emulateRF,NULL);
pthread_mutex_init( &proc->mutex_gNBs, NULL); pthread_mutex_init( &proc->mutex_gNBs, NULL);
pthread_cond_init( &proc->cond_prach, NULL); pthread_cond_init( &proc->cond_prach, NULL);
pthread_cond_init( &proc->cond_FH, NULL); pthread_cond_init( &proc->cond_FH, NULL);
pthread_cond_init( &proc->cond_FH1, NULL);
pthread_cond_init( &proc->cond_emulateRF, NULL); pthread_cond_init( &proc->cond_emulateRF, NULL);
pthread_cond_init( &proc->cond_asynch_rxtx, NULL); pthread_cond_init( &proc->cond_asynch_rxtx, NULL);
pthread_cond_init( &proc->cond_synch,NULL); pthread_cond_init( &proc->cond_synch,NULL);
pthread_cond_init( &proc->cond_gNBs, NULL); pthread_cond_init( &proc->cond_gNBs, NULL);
pthread_attr_init( &proc->attr_FH); pthread_attr_init( &proc->attr_FH);
pthread_attr_init( &proc->attr_FH1);
pthread_attr_init( &proc->attr_emulateRF); pthread_attr_init( &proc->attr_emulateRF);
pthread_attr_init( &proc->attr_prach); pthread_attr_init( &proc->attr_prach);
pthread_attr_init( &proc->attr_synch); pthread_attr_init( &proc->attr_synch);
...@@ -1731,6 +1856,7 @@ void init_RU_proc(RU_t *ru) { ...@@ -1731,6 +1856,7 @@ void init_RU_proc(RU_t *ru) {
#ifndef DEADLINE_SCHEDULER #ifndef DEADLINE_SCHEDULER
attr_FH = &proc->attr_FH; attr_FH = &proc->attr_FH;
attr_FH1 = &proc->attr_FH1;
attr_emulateRF = &proc->attr_emulateRF; attr_emulateRF = &proc->attr_emulateRF;
attr_prach = &proc->attr_prach; attr_prach = &proc->attr_prach;
//attr_synch = &proc->attr_synch; //attr_synch = &proc->attr_synch;
...@@ -1738,6 +1864,8 @@ void init_RU_proc(RU_t *ru) { ...@@ -1738,6 +1864,8 @@ void init_RU_proc(RU_t *ru) {
#endif #endif
pthread_create( &proc->pthread_FH, attr_FH, ru_thread, (void*)ru ); pthread_create( &proc->pthread_FH, attr_FH, ru_thread, (void*)ru );
if (get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT)
pthread_create( &proc->pthread_FH1, attr_FH1, ru_thread_tx, (void*)ru );
if(emulate_rf) if(emulate_rf)
pthread_create( &proc->pthread_emulateRF, attr_emulateRF, emulatedRF_thread, (void*)proc ); pthread_create( &proc->pthread_emulateRF, attr_emulateRF, emulatedRF_thread, (void*)proc );
if (ru->function == NGFI_RRU_IF4p5) { if (ru->function == NGFI_RRU_IF4p5) {
......
...@@ -86,7 +86,10 @@ ...@@ -86,7 +86,10 @@
#define CONFIG_HLP_TNOFORK "to ease debugging with gdb\n" #define CONFIG_HLP_TNOFORK "to ease debugging with gdb\n"
#define CONFIG_HLP_DISABLNBIOT "disable nb-iot, even if defined in config\n" #define CONFIG_HLP_DISABLNBIOT "disable nb-iot, even if defined in config\n"
#define CONFIG_HLP_NUMEROLOGY "adding numerology for 5G\n"
#define CONFIG_HLP_EMULATE_RF "Emulated RF enabled(disable by defult)\n" #define CONFIG_HLP_EMULATE_RF "Emulated RF enabled(disable by defult)\n"
#define CONFIG_HLP_PARALLEL_CMD "three config for level of parallelism 'PARALLEL_SINGLE_THREAD', 'PARALLEL_RU_L1_SPLIT', or 'PARALLEL_RU_L1_TRX_SPLIT'\n"
#define CONFIG_HLP_WORKER_CMD "two option for worker 'WORKER_DISABLE' or 'WORKER_ENABLE'\n"
/***************************************************************************************************************************************/ /***************************************************************************************************************************************/
/* command line options definitions, CMDLINE_XXXX_DESC macros are used to initialize paramdef_t arrays which are then used as argument /* command line options definitions, CMDLINE_XXXX_DESC macros are used to initialize paramdef_t arrays which are then used as argument
...@@ -167,8 +170,10 @@ ...@@ -167,8 +170,10 @@
{"P" , CONFIG_HLP_L2MONP, 0, strptr:(char **)&in_path, defstrval:"/tmp/oai_opt.pcap", TYPE_STRING, sizeof(in_path)}, \ {"P" , CONFIG_HLP_L2MONP, 0, strptr:(char **)&in_path, defstrval:"/tmp/oai_opt.pcap", TYPE_STRING, sizeof(in_path)}, \
{"q" , CONFIG_HLP_STMON, PARAMFLAG_BOOL, iptr:&opp_enabled, defintval:0, TYPE_INT, 0}, \ {"q" , CONFIG_HLP_STMON, PARAMFLAG_BOOL, iptr:&opp_enabled, defintval:0, TYPE_INT, 0}, \
{"S" , CONFIG_HLP_MSLOTS, PARAMFLAG_BOOL, u8ptr:&exit_missed_slots, defintval:1, TYPE_UINT8, 0}, \ {"S" , CONFIG_HLP_MSLOTS, PARAMFLAG_BOOL, u8ptr:&exit_missed_slots, defintval:1, TYPE_UINT8, 0}, \
{"T" , CONFIG_HLP_TDD, PARAMFLAG_BOOL, iptr:&tddflag, defintval:0, TYPE_INT, 0}, \ {"T" , CONFIG_HLP_TDD, PARAMFLAG_BOOL, iptr:&tddflag, defintval:0, TYPE_INT, 0}, {"numerology" , CONFIG_HLP_NUMEROLOGY, PARAMFLAG_BOOL, iptr:&numerology, defintval:0, TYPE_INT, 0}, \
{"emulate-rf" , CONFIG_HLP_EMULATE_RF, PARAMFLAG_BOOL, iptr:&emulate_rf, defintval:0, TYPE_INT, 0}, \ {"emulate-rf" , CONFIG_HLP_EMULATE_RF, PARAMFLAG_BOOL, iptr:&emulate_rf, defintval:0, TYPE_INT, 0}, \
{"parallel-config", CONFIG_HLP_PARALLEL_CMD,0, strptr:(char **)&parallel_config, defstrval:NULL, TYPE_STRING, 0}, \
{"worker-config", CONFIG_HLP_WORKER_CMD, 0, strptr:(char **)&worker_config, defstrval:NULL, TYPE_STRING, 0}, \
{"nbiot-disable", CONFIG_HLP_DISABLNBIOT,PARAMFLAG_BOOL, iptr:&nonbiotflag, defintval:0, TYPE_INT, 0} \ {"nbiot-disable", CONFIG_HLP_DISABLNBIOT,PARAMFLAG_BOOL, iptr:&nonbiotflag, defintval:0, TYPE_INT, 0} \
} }
......
...@@ -165,9 +165,31 @@ uint8_t nb_antenna_rx = 1; ...@@ -165,9 +165,31 @@ uint8_t nb_antenna_rx = 1;
char ref[128] = "internal"; char ref[128] = "internal";
char channels[128] = "0"; char channels[128] = "0";
int codingw = 0; char *parallel_config = NULL;
int fepw = 0; char *worker_config = NULL;
static THREAD_STRUCT thread_struct;
void set_parallel_conf(char *parallel_conf)
{
if(strcmp(parallel_conf,"PARALLEL_SINGLE_THREAD")==0) thread_struct.parallel_conf = PARALLEL_SINGLE_THREAD;
else if(strcmp(parallel_conf,"PARALLEL_RU_L1_SPLIT")==0) thread_struct.parallel_conf = PARALLEL_RU_L1_SPLIT;
else if(strcmp(parallel_conf,"PARALLEL_RU_L1_TRX_SPLIT")==0) thread_struct.parallel_conf = PARALLEL_RU_L1_TRX_SPLIT;
printf("[CONFIG] parallel conf is set to %d\n",thread_struct.parallel_conf);
}
void set_worker_conf(char *worker_conf)
{
if(strcmp(worker_conf,"WORKER_DISABLE")==0) thread_struct.worker_conf = WORKER_DISABLE;
else if(strcmp(worker_conf,"WORKER_ENABLE")==0) thread_struct.worker_conf = WORKER_ENABLE;
printf("[CONFIG] worker conf is set to %d\n",thread_struct.worker_conf);
}
PARALLEL_CONF_t get_thread_parallel_conf(void)
{
return thread_struct.parallel_conf;
}
WORKER_CONF_t get_thread_worker_conf(void)
{
return thread_struct.worker_conf;
}
int rx_input_level_dBm; int rx_input_level_dBm;
//static int online_log_messages=0; //static int online_log_messages=0;
......
...@@ -85,9 +85,9 @@ ...@@ -85,9 +85,9 @@
#define CONFIG_HLP_TNOFORK "to ease debugging with gdb\n" #define CONFIG_HLP_TNOFORK "to ease debugging with gdb\n"
#define CONFIG_HLP_NUMEROLOGY "adding numerology for 5G\n" #define CONFIG_HLP_NUMEROLOGY "adding numerology for 5G\n"
#define CONFIG_HLP_CODINGW "coding worker thread enable(disable by defult)\n"
#define CONFIG_HLP_FEPW "FEP worker thread enabled(disable by defult)\n"
#define CONFIG_HLP_EMULATE_RF "Emulated RF enabled(disable by defult)\n" #define CONFIG_HLP_EMULATE_RF "Emulated RF enabled(disable by defult)\n"
#define CONFIG_HLP_PARALLEL_CMD "three config for level of parallelism 'PARALLEL_SINGLE_THREAD', 'PARALLEL_RU_L1_SPLIT', or 'PARALLEL_RU_L1_TRX_SPLIT'\n"
#define CONFIG_HLP_WORKER_CMD "two option for worker 'WORKER_DISABLE' or 'WORKER_ENABLE'\n"
#define CONFIG_HLP_DISABLNBIOT "disable nb-iot, even if defined in config\n" #define CONFIG_HLP_DISABLNBIOT "disable nb-iot, even if defined in config\n"
...@@ -179,8 +179,8 @@ ...@@ -179,8 +179,8 @@
{"T" , CONFIG_HLP_TDD, PARAMFLAG_BOOL, iptr:&tddflag, defintval:0, TYPE_INT, 0}, \ {"T" , CONFIG_HLP_TDD, PARAMFLAG_BOOL, iptr:&tddflag, defintval:0, TYPE_INT, 0}, \
{"numerology" , CONFIG_HLP_NUMEROLOGY, PARAMFLAG_BOOL, iptr:&numerology, defintval:0, TYPE_INT, 0}, \ {"numerology" , CONFIG_HLP_NUMEROLOGY, PARAMFLAG_BOOL, iptr:&numerology, defintval:0, TYPE_INT, 0}, \
{"emulate-rf" , CONFIG_HLP_EMULATE_RF, PARAMFLAG_BOOL, iptr:&emulate_rf, defintval:0, TYPE_INT, 0}, \ {"emulate-rf" , CONFIG_HLP_EMULATE_RF, PARAMFLAG_BOOL, iptr:&emulate_rf, defintval:0, TYPE_INT, 0}, \
{"codingw" , CONFIG_HLP_CODINGW, PARAMFLAG_BOOL, iptr:&codingw, defintval:0, TYPE_INT, 0}, \ {"parallel-config", CONFIG_HLP_PARALLEL_CMD,0, strptr:(char **)&parallel_config, defstrval:NULL, TYPE_STRING, 0}, \
{"fepw" , CONFIG_HLP_FEPW, PARAMFLAG_BOOL, iptr:&fepw, defintval:0, TYPE_INT, 0}, \ {"worker-config", CONFIG_HLP_WORKER_CMD, 0, strptr:(char **)&worker_config, defstrval:NULL, TYPE_STRING, 0}, \
{"nbiot-disable", CONFIG_HLP_DISABLNBIOT, PARAMFLAG_BOOL, iptr:&nonbiotflag, defintval:0, TYPE_INT, 0} \ {"nbiot-disable", CONFIG_HLP_DISABLNBIOT, PARAMFLAG_BOOL, iptr:&nonbiotflag, defintval:0, TYPE_INT, 0} \
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment