Commit 51c55f6f authored by Remi Hardy's avatar Remi Hardy

Merge remote-tracking branch 'origin/gnb-threadpool' into integration_2021_wk10

parents 1142acf0 62a7165c
......@@ -234,6 +234,7 @@ L1s = (
{
num_cc = 1;
tr_n_preference = "local_mac";
pusch_proc_threads = 8;
}
);
......
......@@ -88,7 +88,8 @@ int write_file_matlab(const char *fname,
void *data,
int length,
int dec,
unsigned int format)
unsigned int format,
int multiVec)
{
FILE *fp=NULL;
int i;
......@@ -100,8 +101,7 @@ int write_file_matlab(const char *fname,
//printf("Writing %d elements of type %d to %s\n",length,format,fname);
if (format == 10 || format ==11 || format == 12 || format == 13 || format == 14) {
if (format == 10 || format ==11 || format == 12 || format == 13 || format == 14 || multiVec) {
fp = fopen(fname,"a+");
} else if (format != 10 && format !=11 && format != 12 && format != 13 && format != 14) {
fp = fopen(fname,"w+");
......@@ -137,8 +137,7 @@ int write_file_matlab(const char *fname,
return(0);
}
if (format != 10 && format !=11 && format != 12 && format != 13 && format != 14)
if ((format != 10 && format !=11 && format != 12 && format != 13 && format != 14) || multiVec)
fprintf(fp,"%s = [",vname);
switch (format) {
......@@ -247,7 +246,7 @@ int write_file_matlab(const char *fname,
AssertFatal(false, "unknown dump format: %d\n", format);
}
if (format != 10 && format !=11 && format !=12 && format != 13 && format != 15) {
if ((format != 10 && format !=11 && format !=12 && format != 13 && format != 15) || multiVec) {
fprintf(fp,"];\n");
fclose(fp);
return(0);
......
......@@ -335,6 +335,7 @@ typedef struct {
@param length length of data vector to output
@param dec decimation level
@param format data format (0 = real 16-bit, 1 = complex 16-bit,2 real 32-bit, 3 complex 32-bit,4 = real 8-bit, 5 = complex 8-bit)
@param multiVec create new file or append to existing (useful for writing multiple vectors to same file. Just call the function multiple times with same file name and with this parameter set to 1)
*/
#define MATLAB_RAW (1<<31)
#define MATLAB_SHORT 0
......@@ -354,7 +355,7 @@ typedef struct {
#define MATLAB_CSHORT_BRACKET2 14
#define MATLAB_CSHORT_BRACKET3 15
int32_t write_file_matlab(const char *fname, const char *vname, void *data, int length, int dec, unsigned int format);
int32_t write_file_matlab(const char *fname, const char *vname, void *data, int length, int dec, unsigned int format, int multiVec);
/*----------------macro definitions for reading log configuration from the config module */
#define CONFIG_STRING_LOG_PREFIX "log_config"
......@@ -414,7 +415,7 @@ int32_t write_file_matlab(const char *fname, const char *vname, void *data, int
/* bitmask dependent macros, to generate debug file such as matlab file or message dump */
# define LOG_DUMPFLAG(D) (g_log->dump_mask & D)
# define LOG_M(file, vector, data, len, dec, format) do { write_file_matlab(file, vector, data, len, dec, format);} while(0)/* */
# define LOG_M(file, vector, data, len, dec, format) do { write_file_matlab(file, vector, data, len, dec, format, 0);} while(0)/* */
/* define variable only used in LOG macro's */
# define LOG_VAR(A,B) A B
# else /* T_TRACER: remove all debugging and tracing messages, except errors */
......@@ -431,7 +432,7 @@ int32_t write_file_matlab(const char *fname, const char *vname, void *data, int
# define LOG_DUMPFLAG(D) (g_log->debug_mask & D)
# define LOG_DUMPMSG(c, f, b, s, x...) do { if(g_log->dump_mask & f) log_dump(c, b, s, LOG_DUMP_CHAR, x) ;} while (0) /* */
# define LOG_M(file, vector, data, len, dec, format) do { write_file_matlab(file, vector, data, len, dec, format);} while(0)
# define LOG_M(file, vector, data, len, dec, format) do { write_file_matlab(file, vector, data, len, dec, format, 0);} while(0)
# define LOG_VAR(A,B) A B
# endif /* T_TRACER */
/* avoid warnings for variables only used in LOG macro's but set outside debug section */
......@@ -439,9 +440,10 @@ int32_t write_file_matlab(const char *fname, const char *vname, void *data, int
#define LOG_USEDINLOG_VAR(A,B) GCC_NOTUSED A B
/* unfiltered macros, useful for simulators or messages at init time, before log is configured */
#define LOG_UM(file, vector, data, len, dec, format) do { write_file_matlab(file, vector, data, len, dec, format);} while(0)
#define LOG_UM(file, vector, data, len, dec, format) do { write_file_matlab(file, vector, data, len, dec, format, 0);} while(0)
#define LOG_UI(c, x...) do {logRecord_mt(__FILE__, __FUNCTION__, __LINE__,c, OAILOG_INFO, x) ; } while(0)
#define LOG_UDUMPMSG(c, b, s, f, x...) do { log_dump(c, b, s, f, x) ;} while (0) /* */
# define LOG_MM(file, vector, data, len, dec, format) do { write_file_matlab(file, vector, data, len, dec, format, 1);} while(0)
/* @}*/
......
......@@ -81,10 +81,11 @@
#include "s1ap_eNB.h"
#include "SIMULATION/ETH_TRANSPORT/proto.h"
#include <executables/softmodem-common.h>
#include "T.h"
#include "nfapi/oai_integration/vendor_ext.h"
#include <nfapi/oai_integration/nfapi_pnf.h>
//#define DEBUG_THREADS 1
//#define USRP_DEBUG 1
......@@ -92,17 +93,6 @@
// extern openair0_device openair0;
extern volatile int start_gNB;
extern volatile int start_UE;
extern volatile int oai_exit;
extern openair0_config_t openair0_cfg[MAX_CARDS];
extern int transmission_mode;
extern uint16_t sf_ahead;
extern uint16_t sl_ahead;
//pthread_t main_gNB_thread;
time_stats_t softmodem_stats_mt; // main thread
......@@ -111,40 +101,43 @@ time_stats_t softmodem_stats_rxtx_sf; // total tx time
time_stats_t nfapi_meas; // total tx time
time_stats_t softmodem_stats_rx_sf; // total rx time
/* mutex, cond and variable to serialize phy proc TX calls
* (this mechanism may be relaxed in the future for better
* performances)
*/
static struct {
pthread_mutex_t mutex_phy_proc_tx;
pthread_cond_t cond_phy_proc_tx;
volatile uint8_t phy_proc_CC_id;
} sync_phy_proc;
extern double cpuf;
#include "executables/thread-common.h"
void init_gNB(int,int);
void stop_gNB(int nb_inst);
int wakeup_txfh(PHY_VARS_gNB *gNB, gNB_L1_rxtx_proc_t *proc, int frame_tx, int slot_tx, uint64_t timestamp_tx);
int wakeup_tx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_tx, int slot_tx, uint64_t timestamp_tx);
#include "executables/thread-common.h"
//extern PARALLEL_CONF_t get_thread_parallel_conf(void);
//extern WORKER_CONF_t get_thread_worker_conf(void);
//#define TICK_TO_US(ts) (ts.diff)
#define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials)
void wakeup_prach_gNB(PHY_VARS_gNB *gNB, RU_t *ru, int frame, int subframe);
void tx_func(void *param) {
extern uint8_t nfapi_mode;
extern void oai_subframe_ind(uint16_t sfn, uint16_t sf);
extern void oai_slot_ind(uint16_t sfn, uint16_t slot);
extern void add_subframe(uint16_t *frameP, uint16_t *subframeP, int offset);
processingData_L1_t *info = (processingData_L1_t *) param;
PHY_VARS_gNB *gNB = info->gNB;
int frame_tx = info->frame_tx;
int slot_tx = info->slot_tx;
//#define TICK_TO_US(ts) (ts.diff)
#define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials)
phy_procedures_gNB_TX(gNB, frame_tx,slot_tx, 1);
static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_tx, int slot_tx, char *thread_name) {
// start FH TX processing
notifiedFIFO_elt_t *res;
res = pullTpool(gNB->resp_RU_tx, gNB->threadPool);
processingData_RU_t *syncMsg = (processingData_RU_t *)NotifiedFifoData(res);
syncMsg->frame_tx = frame_tx;
syncMsg->slot_tx = slot_tx;
syncMsg->timestamp_tx = info->timestamp_tx;
syncMsg->ru = gNB->RU_list[0];
res->key = slot_tx;
pushTpool(gNB->threadPool, res);
}
void rx_func(void *param) {
processingData_L1_t *info = (processingData_L1_t *) param;
PHY_VARS_gNB *gNB = info->gNB;
int frame_rx = info->frame_rx;
int slot_rx = info->slot_rx;
int frame_tx = info->frame_tx;
int slot_tx = info->slot_tx;
sl_ahead = sf_ahead*gNB->frame_parms.slots_per_subframe;
nfapi_nr_config_request_scf_t *cfg = &gNB->gNB_config;
......@@ -235,12 +228,6 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
rnti_to_remove_count = 0;
if (pthread_mutex_unlock(&rnti_to_remove_mutex)) exit(1);
/*
// if this is IF5 or 3GPP_gNB
if (gNB && gNB->RU_list && gNB->RU_list[0] && gNB->RU_list[0]->function < NGFI_RAU_IF4p5) {
wakeup_prach_gNB(gNB,NULL,proc->frame_rx,proc->slot_rx);
}
*/
// Call the scheduler
pthread_mutex_lock(&gNB->UL_INFO_mutex);
......@@ -273,23 +260,24 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
phy_procedures_gNB_uespec_RX(gNB, frame_rx, slot_rx);
}
if (oai_exit) return(-1);
stop_meas( &softmodem_stats_rxtx_sf );
LOG_D(PHY,"%s() Exit proc[rx:%d%d tx:%d%d]\n", __FUNCTION__, frame_rx, slot_rx, frame_tx, slot_tx);
// *****************************************
// TX processing for subframe n+sf_ahead
// run PHY TX procedures the one after the other for all CCs to avoid race conditions
// (may be relaxed in the future for performance reasons)
// *****************************************
notifiedFIFO_elt_t *res;
if (tx_slot_type == NR_DOWNLINK_SLOT || tx_slot_type == NR_MIXED_SLOT) {
if(get_thread_parallel_conf() != PARALLEL_RU_L1_TRX_SPLIT) {
phy_procedures_gNB_TX(gNB, frame_tx,slot_tx, 1);
}
res = pullTpool(gNB->resp_L1_tx, gNB->threadPool);
processingData_L1_t *syncMsg = (processingData_L1_t *)NotifiedFifoData(res);
syncMsg->gNB = gNB;
syncMsg->frame_rx = frame_rx;
syncMsg->slot_rx = slot_rx;
syncMsg->frame_tx = frame_tx;
syncMsg->slot_tx = slot_tx;
syncMsg->timestamp_tx = info->timestamp_tx;
res->key = slot_tx;
pushTpool(gNB->threadPool, res);
}
stop_meas( &softmodem_stats_rxtx_sf );
LOG_D(PHY,"%s() Exit proc[rx:%d%d tx:%d%d]\n", __FUNCTION__, frame_rx, slot_rx, frame_tx, slot_tx);
#if 0
LOG_D(PHY, "rxtx:%lld nfapi:%lld phy:%lld tx:%lld rx:%lld prach:%lld ofdm:%lld ",
softmodem_stats_rxtx_sf.diff_now, nfapi_meas.diff_now,
......@@ -329,466 +317,7 @@ static inline int rxtx(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, int frame_t
TICK_TO_US(gNB->ulsch_tc_intl2_stats)
);
#endif
return(0);
}
static void *gNB_L1_thread_tx(void *param) {
PHY_VARS_gNB *gNB = (PHY_VARS_gNB *)param;
gNB_L1_proc_t *gNB_proc = &gNB->proc;
gNB_L1_rxtx_proc_t *L1_proc_tx = &gNB_proc->L1_proc_tx;
//PHY_VARS_gNB *gNB = RC.gNB[0][proc->CC_id];
char thread_name[100];
sprintf(thread_name,"gNB_L1_thread_tx\n");
while (!oai_exit) {
if (wait_on_condition(&L1_proc_tx->mutex,&L1_proc_tx->cond,&L1_proc_tx->instance_cnt,thread_name)<0) break;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 1 );
if (oai_exit) break;
// *****************************************
// TX processing for subframe n+4
// run PHY TX procedures the one after the other for all CCs to avoid race conditions
// (may be relaxed in the future for performance reasons)
// *****************************************
int frame_tx = L1_proc_tx->frame_tx;
int slot_tx = L1_proc_tx->slot_tx;
uint64_t timestamp_tx = L1_proc_tx->timestamp_tx;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SLOT_NUMBER_TX1_GNB,slot_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_GNB,frame_tx);
phy_procedures_gNB_TX(gNB, frame_tx,slot_tx, 1);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_WAKEUP_TXFH, 1 );
pthread_mutex_lock( &L1_proc_tx->mutex );
L1_proc_tx->instance_cnt = -1;
// the thread can now be woken up
if (pthread_cond_signal(&L1_proc_tx->cond) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
}
pthread_mutex_unlock(&L1_proc_tx->mutex);
wakeup_txfh(gNB,L1_proc_tx,frame_tx,slot_tx,timestamp_tx);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_WAKEUP_TXFH, 0 );
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 0 );
}
return 0;
}
/*!
* \brief The RX UE-specific and TX thread of gNB.
* \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
static void *gNB_L1_thread( void *param ) {
static int gNB_thread_rxtx_status;
PHY_VARS_gNB *gNB = (PHY_VARS_gNB *)param;
gNB_L1_proc_t *gNB_proc = &gNB->proc;
gNB_L1_rxtx_proc_t *L1_proc = &gNB_proc->L1_proc;
//PHY_VARS_gNB *gNB = RC.gNB[0][proc->CC_id];
char thread_name[100];
// set default return value
// set default return value
gNB_thread_rxtx_status = 0;
sprintf(thread_name,"gNB_L1_thread");
while (!oai_exit) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX0, 0 );
if (wait_on_condition(&L1_proc->mutex,&L1_proc->cond,&L1_proc->instance_cnt,thread_name)<0) break;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX0, 1 );
int frame_rx = L1_proc->frame_rx;
int slot_rx = L1_proc->slot_rx;
int frame_tx = L1_proc->frame_tx;
int slot_tx = L1_proc->slot_tx;
uint64_t timestamp_tx = L1_proc->timestamp_tx;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SLOT_NUMBER_TX0_GNB,slot_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SLOT_NUMBER_RX0_GNB,slot_rx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_GNB,frame_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_GNB,frame_rx);
if (oai_exit) break;
if (gNB->CC_id==0) {
if (rxtx(gNB,frame_rx,slot_rx,frame_tx,slot_tx,thread_name) < 0) break;
}
if (release_thread(&L1_proc->mutex,&L1_proc->instance_cnt,thread_name)<0) break;
if(get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) wakeup_tx(gNB,frame_rx,slot_rx,frame_tx,slot_tx,timestamp_tx);
else if(get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT) wakeup_txfh(gNB,L1_proc,frame_tx,slot_tx,timestamp_tx);
} // while !oai_exit
LOG_D(PHY, " *** Exiting gNB thread RXn_TXnp4\n");
gNB_thread_rxtx_status = 0;
return &gNB_thread_rxtx_status;
}
#if 0
// Wait for gNB application initialization to be complete (gNB registration to MME)
static void wait_system_ready (char *message, volatile int *start_flag) {
static char *indicator[] = {". ", ".. ", "... ", ".... ", ".....",
" ....", " ...", " ..", " .", " "
};
int i = 0;
while ((!oai_exit) && (*start_flag == 0)) {
LOG_N(EMU, message, indicator[i]);
fflush(stdout);
i = (i + 1) % (sizeof(indicator) / sizeof(indicator[0]));
usleep(200000);
}
LOG_D(EMU,"\n");
}
#endif
void gNB_top(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, char *string, struct RU_t_s *ru) {
gNB_L1_proc_t *proc = &gNB->proc;
gNB_L1_rxtx_proc_t *L1_proc = &proc->L1_proc;
NR_DL_FRAME_PARMS *fp = ru->nr_frame_parms;
RU_proc_t *ru_proc=&ru->proc;
proc->frame_rx = frame_rx;
proc->slot_rx = slot_rx;
sl_ahead = sf_ahead*fp->slots_per_subframe;
if (!oai_exit) {
T(T_ENB_MASTER_TICK, T_INT(0), T_INT(proc->frame_rx), T_INT(proc->slot_rx));
L1_proc->timestamp_tx = ru_proc->timestamp_rx + (sf_ahead*fp->samples_per_subframe);
L1_proc->frame_rx = ru_proc->frame_rx;
L1_proc->slot_rx = ru_proc->tti_rx;
L1_proc->frame_tx = (L1_proc->slot_rx > (fp->slots_per_frame-1-(fp->slots_per_subframe*sf_ahead))) ? (L1_proc->frame_rx+1)&1023 : L1_proc->frame_rx;
L1_proc->slot_tx = (L1_proc->slot_rx + (fp->slots_per_subframe*sf_ahead))%fp->slots_per_frame;
if (rxtx(gNB,L1_proc->frame_rx,L1_proc->slot_rx,L1_proc->frame_tx,L1_proc->slot_tx,string) < 0) LOG_E(PHY,"gNB %d CC_id %d failed during execution\n",gNB->Mod_id,gNB->CC_id);
ru_proc->timestamp_tx = L1_proc->timestamp_tx;
ru_proc->tti_tx = L1_proc->slot_tx;
ru_proc->frame_tx = L1_proc->frame_tx;
}
}
int wakeup_txfh(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot_tx,uint64_t timestamp_tx) {
RU_t *ru;
RU_proc_t *ru_proc;
int waitret = 0, ret = 0, time_ns = 1000*1000;
struct timespec now, abstime;
// note this should depend on the numerology used by the TX L1 thread, set here for 500us slot time
// note this should depend on the numerology used by the TX L1 thread, set here for 500us slot time
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GAIN_CONTROL,1);
time_ns = time_ns/gNB->frame_parms.slots_per_subframe;
AssertFatal((ret = pthread_mutex_lock(&proc->mutex_RUs_tx))==0,"mutex_lock returns %d\n",ret);
while (proc->instance_cnt_RUs < 0) {
clock_gettime(CLOCK_REALTIME, &now);
abstime.tv_sec = now.tv_sec;
abstime.tv_nsec = now.tv_nsec + time_ns;
if (abstime.tv_nsec >= 1000*1000*1000) {
abstime.tv_nsec -= 1000*1000*1000;
abstime.tv_sec += 1;
}
if((waitret = pthread_cond_timedwait(&proc->cond_RUs,&proc->mutex_RUs_tx,&abstime)) == 0) break; // this unlocks mutex_rxtx while waiting and then locks it again
}
proc->instance_cnt_RUs = -1;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE,proc->instance_cnt_RUs);
AssertFatal((ret = pthread_mutex_unlock(&proc->mutex_RUs_tx))==0,"mutex_unlock returns %d\n",ret);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GAIN_CONTROL,0);
if (waitret == ETIMEDOUT) {
LOG_W(PHY,"Dropping TX slot (%d.%d) because FH is blocked more than 1 slot times (500us)\n",frame_tx,slot_tx);
AssertFatal((ret=pthread_mutex_lock(&gNB->proc.mutex_RU_tx))==0,"mutex_lock returns %d\n",ret);
gNB->proc.RU_mask_tx = 0;
AssertFatal((ret=pthread_mutex_unlock(&gNB->proc.mutex_RU_tx))==0,"mutex_unlock returns %d\n",ret);
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_RUs_tx))==0,"mutex_lock returns %d\n",ret);
proc->instance_cnt_RUs = 0;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE,proc->instance_cnt_RUs);
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RUs_tx))==0,"mutex_unlock returns %d\n",ret);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_UE,1);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_UE,0);
return(-1);
}
for(int i=0; i<gNB->num_RU; i++)
{
ru = gNB->RU_list[i];
ru_proc = &ru->proc;
if (ru_proc->instance_cnt_gNBs == 0) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST_UE, 1);
LOG_E(PHY,"Frame %d, subframe %d: TX FH thread busy, dropping Frame %d, subframe %d\n", ru_proc->frame_tx, ru_proc->tti_tx, proc->frame_rx, proc->slot_rx);
AssertFatal((ret=pthread_mutex_lock(&gNB->proc.mutex_RU_tx))==0,"mutex_lock returns %d\n",ret);
gNB->proc.RU_mask_tx = 0;
AssertFatal((ret=pthread_mutex_unlock(&gNB->proc.mutex_RU_tx))==0,"mutex_unlock returns %d\n",ret);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST_UE, 0);
return(-1);
}
AssertFatal((ret = pthread_mutex_lock(&ru_proc->mutex_gNBs))==0,"ERROR pthread_mutex_lock failed on mutex_gNBs L1_thread_tx with ret=%d\n",ret);
ru_proc->instance_cnt_gNBs = 0;
ru_proc->timestamp_tx = timestamp_tx;
ru_proc->tti_tx = slot_tx;
ru_proc->frame_tx = frame_tx;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_UE, ru_proc->instance_cnt_gNBs);
LOG_D(PHY,"Signaling tx_thread_fh for %d.%d\n",frame_tx,slot_tx);
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&ru_proc->cond_gNBs) == 0,
"[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n");
AssertFatal((ret=pthread_mutex_unlock(&ru_proc->mutex_gNBs))==0,"mutex_unlock returned %d\n",ret);
}
return(0);
}
int wakeup_tx(PHY_VARS_gNB *gNB,int frame_rx,int slot_rx,int frame_tx,int slot_tx,uint64_t timestamp_tx) {
gNB_L1_rxtx_proc_t *L1_proc_tx = &gNB->proc.L1_proc_tx;
int ret;
AssertFatal((ret = pthread_mutex_lock(&L1_proc_tx->mutex))==0,"mutex_lock returns %d\n",ret);
while(L1_proc_tx->instance_cnt == 0) {
pthread_cond_wait(&L1_proc_tx->cond,&L1_proc_tx->mutex);
}
L1_proc_tx->instance_cnt = 0;
L1_proc_tx->slot_rx = slot_rx;
L1_proc_tx->frame_rx = frame_rx;
L1_proc_tx->slot_tx = slot_tx;
L1_proc_tx->frame_tx = frame_tx;
L1_proc_tx->timestamp_tx = timestamp_tx;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_UE,1);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_UE,0);
// the thread can now be woken up
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&L1_proc_tx->cond) == 0, "ERROR pthread_cond_signal for gNB L1 thread\n");
AssertFatal((ret=pthread_mutex_unlock(&L1_proc_tx->mutex))==0,"mutex_unlock returns %d\n",ret);
return(0);
}
int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) {
gNB_L1_proc_t *proc=&gNB->proc;
gNB_L1_rxtx_proc_t *L1_proc=&proc->L1_proc;
NR_DL_FRAME_PARMS *fp = &gNB->frame_parms;
RU_proc_t *ru_proc=&ru->proc;
int ret;
int i;
struct timespec abstime;
int time_ns = 50000;
int wait_timer = 0;
bool do_last_check = 1;
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_RU))==0,"mutex_lock returns %d\n",ret);
for (i=0; i<gNB->num_RU; i++) {
if (ru == gNB->RU_list[i]) {
if ((proc->RU_mask&(1<<i)) > 0)
LOG_E(PHY,"gNB %d frame %d, subframe %d : previous information from RU %d (num_RU %d,mask %x) has not been served yet!\n",
gNB->Mod_id,proc->frame_rx,proc->slot_rx,ru->idx,gNB->num_RU,proc->RU_mask);
proc->RU_mask |= (1<<i);
}
}
if (proc->RU_mask != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return
LOG_E(PHY,"Not all RUs have provided their info\n");
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RU))==0,"mutex_unlock returns %d\n",ret);
return(0);
}
else { // all RUs have provided their information so continue on and wakeup gNB processing
proc->RU_mask = 0;
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RU))==0,"muex_unlock returns %d\n",ret);
}
// wake up TX for subframe n+sf_ahead
// lock the TX mutex and make sure the thread is ready
while (wait_timer < 200) {
clock_gettime(CLOCK_REALTIME, &abstime);
abstime.tv_nsec = abstime.tv_nsec + time_ns;
if (abstime.tv_nsec >= 1000*1000*1000) {
abstime.tv_nsec -= 1000*1000*1000;
abstime.tv_sec += 1;
}
AssertFatal((ret=pthread_mutex_timedlock(&L1_proc->mutex, &abstime)) == 0,"mutex_lock returns %d\n", ret);
if (L1_proc->instance_cnt == 0) { // L1_thread is busy so wait for a bit
AssertFatal((ret=pthread_mutex_unlock( &L1_proc->mutex))==0,"muex_unlock return %d\n",ret);
wait_timer += 50;
usleep(50);
}
else {
do_last_check = 0;
break;
}
}
if (do_last_check) {
AssertFatal((ret=pthread_mutex_timedlock(&L1_proc->mutex, &abstime)) == 0,"mutex_lock returns %d\n", ret);
if (L1_proc->instance_cnt == 0) { // L1_thread is busy so abort the subframe
AssertFatal((ret=pthread_mutex_unlock( &L1_proc->mutex))==0,"muex_unlock return %d\n",ret);
LOG_W(PHY,"L1_thread isn't ready in %d.%d, aborting RX processing\n",ru_proc->frame_rx,ru_proc->tti_rx);
return (-1);
}
}
++L1_proc->instance_cnt;
// We have just received and processed the common part of a subframe, say n.
// TS_rx is the last received timestamp (start of 1st slot), TS_tx is the desired
// transmitted timestamp of the next TX slot (first).
// The last (TS_rx mod samples_per_frame) was n*samples_per_tti,
// we want to generate subframe (n+sf_ahead), so TS_tx = TX_rx+sf_ahead*samples_per_tti,
// and proc->slot_tx = proc->slot_rx+sf_ahead
L1_proc->timestamp_tx = ru_proc->timestamp_rx + (sf_ahead*fp->samples_per_subframe);
L1_proc->frame_rx = ru_proc->frame_rx;
L1_proc->slot_rx = ru_proc->tti_rx;
L1_proc->frame_tx = (L1_proc->slot_rx > (fp->slots_per_frame-1-(fp->slots_per_subframe*sf_ahead))) ? (L1_proc->frame_rx+1)&1023 : L1_proc->frame_rx;
L1_proc->slot_tx = (L1_proc->slot_rx + (fp->slots_per_subframe*sf_ahead))%fp->slots_per_frame;
LOG_D(PHY,"wakeupL1: passing parameter IC = %d, RX: %d.%d, TX: %d.%d to L1 sf_ahead = %d\n", L1_proc->instance_cnt, L1_proc->frame_rx, L1_proc->slot_rx, L1_proc->frame_tx, L1_proc->slot_tx, sf_ahead);
pthread_mutex_unlock( &L1_proc->mutex );
// the thread can now be woken up
if (pthread_cond_signal(&L1_proc->cond) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB RXn-TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
return(-1);
}
return(0);
}
/*
void wakeup_prach_gNB(PHY_VARS_gNB *gNB,RU_t *ru,int frame,int subframe) {
gNB_L1_proc_t *proc = &gNB->proc;
LTE_DL_FRAME_PARMS *fp=&gNB->frame_parms;
int i;
if (ru!=NULL) {
pthread_mutex_lock(&proc->mutex_RU_PRACH);
for (i=0;i<gNB->num_RU;i++) {
if (ru == gNB->RU_list[i]) {
LOG_D(PHY,"frame %d, subframe %d: RU %d for gNB %d signals PRACH (mask %x, num_RU %d)\n",frame,subframe,i,gNB->Mod_id,proc->RU_mask_prach,gNB->num_RU);
if ((proc->RU_mask_prach&(1<<i)) > 0)
LOG_E(PHY,"gNB %d frame %d, subframe %d : previous information (PRACH) from RU %d (num_RU %d, mask %x) has not been served yet!\n",
gNB->Mod_id,frame,subframe,ru->idx,gNB->num_RU,proc->RU_mask_prach);
proc->RU_mask_prach |= (1<<i);
}
}
if (proc->RU_mask_prach != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return
pthread_mutex_unlock(&proc->mutex_RU_PRACH);
return;
}
else { // all RUs have provided their information so continue on and wakeup gNB processing
proc->RU_mask_prach = 0;
pthread_mutex_unlock(&proc->mutex_RU_PRACH);
}
}
// check if we have to detect PRACH first
if (is_prach_subframe(fp,frame,subframe)>0) {
LOG_D(PHY,"Triggering prach processing, frame %d, subframe %d\n",frame,subframe);
if (proc->instance_cnt_prach == 0) {
LOG_W(PHY,"[gNB] Frame %d Subframe %d, dropping PRACH\n", frame,subframe);
return;
}
// wake up thread for PRACH RX
if (pthread_mutex_lock(&proc->mutex_prach) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_mutex_lock for gNB PRACH thread %d (IC %d)\n", proc->thread_index, proc->instance_cnt_prach);
exit_fun( "error locking mutex_prach" );
return;
}
++proc->instance_cnt_prach;
// set timing for prach thread
proc->frame_prach = frame;
proc->subframe_prach = subframe;
// the thread can now be woken up
if (pthread_cond_signal(&proc->cond_prach) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB PRACH thread %d\n", proc->thread_index);
exit_fun( "ERROR pthread_cond_signal" );
return;
}
pthread_mutex_unlock( &proc->mutex_prach );
}
}*/
/*!
* \brief The prach receive thread of gNB.
* \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
/*
static void* gNB_thread_prach( void* param ) {
static int gNB_thread_prach_status;
PHY_VARS_gNB *gNB= (PHY_VARS_gNB *)param;
gNB_L1_proc_t *proc = &gNB->proc;
// set default return value
gNB_thread_prach_status = 0;
while (!oai_exit) {
if (oai_exit) break;
if (wait_on_condition(&proc->mutex_prach,&proc->cond_prach,&proc->instance_cnt_prach,"gNB_prach_thread") < 0) break;
LOG_D(PHY,"Running gNB prach procedures\n");
prach_procedures(gNB ,0);
if (release_thread(&proc->mutex_prach,&proc->instance_cnt_prach,"gNB_prach_thread") < 0) break;
}
LOG_I(PHY, "Exiting gNB thread PRACH\n");
gNB_thread_prach_status = 0;
return &gNB_thread_prach_status;
}
*/
extern void init_td_thread(PHY_VARS_gNB *);
extern void init_te_thread(PHY_VARS_gNB *);
static void *process_stats_thread(void *param) {
PHY_VARS_gNB *gNB = (PHY_VARS_gNB *)param;
......@@ -809,71 +338,13 @@ static void *process_stats_thread(void *param) {
return(NULL);
}
void init_gNB_proc(int inst) {
int i=0;
int CC_id = 0;
void init_gNB_Tpool(int inst) {
PHY_VARS_gNB *gNB;
gNB_L1_proc_t *proc;
gNB_L1_rxtx_proc_t *L1_proc,*L1_proc_tx;
// LOG_I(PHY,"%s(inst:%d) RC.nb_nr_CC[inst]:%d \n",__FUNCTION__,inst,RC.nb_nr_CC[inst]);
gNB = RC.gNB[inst];
LOG_I(PHY,"Initializing gNB processes instance:%d CC_id %d \n",inst,CC_id);
proc = &gNB->proc;
L1_proc = &proc->L1_proc;
L1_proc_tx = &proc->L1_proc_tx;
L1_proc->instance_cnt = -1;
L1_proc_tx->instance_cnt = -1;
L1_proc->instance_cnt_RUs = 0;
L1_proc_tx->instance_cnt_RUs = 0;
proc->instance_cnt_prach = -1;
proc->instance_cnt_asynch_rxtx = -1;
proc->CC_id = CC_id;
proc->first_rx =1;
proc->first_tx =1;
proc->RU_mask =0;
proc->RU_mask_tx = (1<<gNB->num_RU)-1;
proc->RU_mask_prach =0;
pthread_mutex_init( &gNB->UL_INFO_mutex, NULL);
pthread_mutex_init( &L1_proc->mutex, NULL);
pthread_mutex_init( &L1_proc_tx->mutex, NULL);
pthread_cond_init( &L1_proc->cond, NULL);
pthread_cond_init( &L1_proc_tx->cond, NULL);
pthread_mutex_init( &proc->mutex_prach, NULL);
pthread_mutex_init( &proc->mutex_asynch_rxtx, NULL);
pthread_mutex_init( &proc->mutex_RU,NULL);
pthread_mutex_init( &proc->mutex_RU_tx,NULL);
pthread_mutex_init( &proc->mutex_RU_PRACH,NULL);
pthread_cond_init( &proc->cond_prach, NULL);
pthread_cond_init( &proc->cond_asynch_rxtx, NULL);
LOG_I(PHY,"gNB->single_thread_flag:%d\n", gNB->single_thread_flag);
if (get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) {
threadCreate( &L1_proc->pthread, gNB_L1_thread, gNB, "L1_proc", -1, OAI_PRIORITY_RT );
threadCreate( &L1_proc_tx->pthread, gNB_L1_thread_tx, gNB,"L1_proc_tx", -1, OAI_PRIORITY_RT);
}
if(opp_enabled == 1) threadCreate(&proc->L1_stats_thread, process_stats_thread,(void *)gNB, "time_meas", -1, OAI_PRIORITY_RT_LOW);
//pthread_create( &proc->pthread_prach, attr_prach, gNB_thread_prach, gNB );
char name[16];
if (gNB->single_thread_flag==0) {
snprintf( name, sizeof(name), "L1 %d", i );
pthread_setname_np( L1_proc->pthread, name );
snprintf( name, sizeof(name), "L1TX %d", i );
pthread_setname_np( L1_proc_tx->pthread, name );
}
AssertFatal(proc->instance_cnt_prach == -1,"instance_cnt_prach = %d\n",proc->instance_cnt_prach);
/* setup PHY proc TX sync mechanism */
pthread_mutex_init(&sync_phy_proc.mutex_phy_proc_tx, NULL);
pthread_cond_init(&sync_phy_proc.cond_phy_proc_tx, NULL);
sync_phy_proc.phy_proc_CC_id = 0;
gNB_L1_proc_t *proc = &gNB->proc;
// ULSCH decoding threadpool
gNB->threadPool = (tpool_t*)malloc(sizeof(tpool_t));
gNB->respDecode = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
int numCPU = sysconf(_SC_NPROCESSORS_ONLN);
int threadCnt = min(numCPU, gNB->pusch_proc_threads);
char ul_pool[80];
......@@ -884,70 +355,40 @@ void init_gNB_proc(int inst) {
s_offset += 3;
}
initTpool(ul_pool, gNB->threadPool, false);
// ULSCH decoder result FIFO
gNB->respDecode = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->respDecode);
}
// L1 RX result FIFO
gNB->resp_L1 = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->resp_L1);
// L1 TX result FIFO
gNB->resp_L1_tx = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->resp_L1_tx);
// RU TX result FIFO
gNB->resp_RU_tx = (notifiedFIFO_t*) malloc(sizeof(notifiedFIFO_t));
initNotifiedFIFO(gNB->resp_RU_tx);
// Stats measurement thread
if(opp_enabled == 1) threadCreate(&proc->L1_stats_thread, process_stats_thread,(void *)gNB, "time_meas", -1, OAI_PRIORITY_RT_LOW);
}
/*!
* \brief Terminate gNB TX and RX threads.
*/
void kill_gNB_proc(int inst) {
int *status;
PHY_VARS_gNB *gNB;
gNB_L1_proc_t *proc;
gNB_L1_rxtx_proc_t *L1_proc, *L1_proc_tx;
gNB=RC.gNB[inst];
proc = &gNB->proc;
L1_proc = &proc->L1_proc;
L1_proc_tx = &proc->L1_proc_tx;
LOG_I(PHY, "Killing TX inst %d\n",inst );
if (get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) {
pthread_mutex_lock(&L1_proc->mutex);
L1_proc->instance_cnt = 0;
pthread_cond_signal(&L1_proc->cond);
pthread_mutex_unlock(&L1_proc->mutex);
pthread_mutex_lock(&L1_proc_tx->mutex);
L1_proc_tx->instance_cnt = 0;
pthread_cond_signal(&L1_proc_tx->cond);
pthread_mutex_unlock(&L1_proc_tx->mutex);
}
proc->instance_cnt_prach = 0;
pthread_cond_signal( &proc->cond_prach );
pthread_cond_signal( &proc->cond_asynch_rxtx );
pthread_cond_broadcast(&sync_phy_proc.cond_phy_proc_tx);
// LOG_D(PHY, "joining pthread_prach\n");
// pthread_join( proc->pthread_prach, (void**)&status );
LOG_I(PHY, "Destroying prach mutex/cond\n");
pthread_mutex_destroy( &proc->mutex_prach );
pthread_cond_destroy( &proc->cond_prach );
LOG_I(PHY, "Destroying UL_INFO mutex\n");
pthread_mutex_destroy(&gNB->UL_INFO_mutex);
if (get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) {
LOG_I(PHY, "Joining L1_proc mutex/cond\n");
pthread_join( L1_proc->pthread, (void **)&status );
LOG_I(PHY, "Joining L1_proc_tx mutex/cond\n");
pthread_join( L1_proc_tx->pthread, (void **)&status );
}
LOG_I(PHY, "Destroying L1_proc mutex/cond\n");
pthread_mutex_destroy( &L1_proc->mutex );
pthread_cond_destroy( &L1_proc->cond );
LOG_I(PHY, "Destroying L1_proc_tx mutex/cond\n");
pthread_mutex_destroy( &L1_proc_tx->mutex );
pthread_cond_destroy( &L1_proc_tx->cond );
pthread_mutex_destroy( &proc->mutex_RU );
pthread_mutex_destroy( &proc->mutex_RU_tx );
}
void reset_opp_meas(void) {
int sfn;
reset_meas(&softmodem_stats_mt);
......@@ -1015,15 +456,9 @@ printf("after %p\n", gNB->common_vars.rxdataF[aa]);
* (not tested in other modes).
*/
//init_precoding_weights(RC.gNB[inst]);
init_gNB_proc(inst);
init_gNB_Tpool(inst);
}
for (ru_id=0; ru_id<RC.nb_RU; ru_id++) {
AssertFatal(RC.ru[ru_id]!=NULL,"ru_id %d is null\n",ru_id);
RC.ru[ru_id]->nr_wakeup_rxtx = wakeup_rxtx;
// RC.ru[ru_id]->wakeup_prach_eNB = wakeup_prach_gNB;
RC.ru[ru_id]->gNB_top = gNB_top;
}
}
void init_gNB(int single_thread_flag,int wait_for_sync) {
......
......@@ -788,259 +788,6 @@ void tx_rf(RU_t *ru,int frame,int slot, uint64_t timestamp) {
}
}
/*!
* \brief The Asynchronous RX/TX FH thread of RAU/RCC/gNB/RRU.
* This handles the RX FH for an asynchronous RRU/UE
* \param param is a \ref gNB_L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
void *ru_thread_asynch_rxtx( void *param ) {
static int ru_thread_asynch_rxtx_status;
RU_t *ru = (RU_t *)param;
RU_proc_t *proc = &ru->proc;
nfapi_nr_config_request_scf_t *cfg = &ru->config;
int slot=0, frame=0;
// wait for top-level synchronization and do one acquisition to get timestamp for setting frame/subframe
wait_sync("ru_thread_asynch_rxtx");
// wait for top-level synchronization and do one acquisition to get timestamp for setting frame/subframe
printf( "waiting for devices (ru_thread_asynch_rx)\n");
wait_on_condition(&proc->mutex_asynch_rxtx,&proc->cond_asynch_rxtx,&proc->instance_cnt_asynch_rxtx,"thread_asynch");
printf( "devices ok (ru_thread_asynch_rx)\n");
while (!oai_exit) {
if (slot==ru->nr_frame_parms->slots_per_frame) {
slot=0;
frame++;
frame&=1023;
} else {
slot++;
}
LOG_D(PHY,"ru_thread_asynch_rxtx: Waiting on incoming fronthaul\n");
// asynchronous receive from north (RRU IF4/IF5)
if (ru->fh_north_asynch_in) {
if ((nr_slot_select(cfg,frame,slot) & NR_DOWNLINK_SLOT)>0)
ru->fh_north_asynch_in(ru,&frame,&slot);
} else AssertFatal(1==0,"Unknown function in ru_thread_asynch_rxtx\n");
}
ru_thread_asynch_rxtx_status=0;
return(&ru_thread_asynch_rxtx_status);
}
/*!
* \brief The prach receive thread of RU.
* \param param is a \ref RU_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
void *ru_thread_prach( void *param ) {
static int ru_thread_prach_status;
RU_t *ru = (RU_t *)param;
RU_proc_t *proc = (RU_proc_t *)&ru->proc;
// set default return value
ru_thread_prach_status = 0;
while (RC.ru_mask>0) {
usleep(1e6);
LOG_I(PHY,"%s() RACH waiting for RU to be configured\n", __FUNCTION__);
}
LOG_I(PHY,"%s() RU configured - RACH processing thread running\n", __FUNCTION__);
while (!oai_exit) {
if (wait_on_condition(&proc->mutex_prach,&proc->cond_prach,&proc->instance_cnt_prach,"ru_prach_thread") < 0) break;
/*VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_RU_PRACH_RX, 1 );
if (ru->gNB_list[0]){
prach_procedures(
ru->gNB_list[0],0
);
}
else {
rx_prach(NULL,
ru,
NULL,
NULL,
NULL,
proc->frame_prach,
0,0
);
}
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_RU_PRACH_RX, 0 );*/
if (release_thread(&proc->mutex_prach,&proc->instance_cnt_prach,"ru_prach_thread") < 0) break;
}
LOG_I(PHY, "Exiting RU thread PRACH\n");
ru_thread_prach_status = 0;
return &ru_thread_prach_status;
}
int wakeup_synch(RU_t *ru) {
struct timespec wait;
wait.tv_sec=0;
wait.tv_nsec=5000000L;
// wake up synch thread
// lock the synch mutex and make sure the thread is ready
if (pthread_mutex_timedlock(&ru->proc.mutex_synch,&wait) != 0) {
LOG_E( PHY, "[RU] ERROR pthread_mutex_lock for RU synch thread (IC %d)\n", ru->proc.instance_cnt_synch );
exit_fun( "error locking mutex_synch" );
return(-1);
}
++ru->proc.instance_cnt_synch;
// the thread can now be woken up
if (pthread_cond_signal(&ru->proc.cond_synch) != 0) {
LOG_E( PHY, "[RU] ERROR pthread_cond_signal for RU synch thread\n");
exit_fun( "ERROR pthread_cond_signal" );
return(-1);
}
pthread_mutex_unlock( &ru->proc.mutex_synch );
return(0);
}
void do_ru_synch(RU_t *ru) {
NR_DL_FRAME_PARMS *fp = ru->nr_frame_parms;
RU_proc_t *proc = &ru->proc;
int i;
void *rxp[2],*rxp2[2];
int32_t dummy_rx[ru->nb_rx][fp->samples_per_subframe] __attribute__((aligned(32)));
int rxs;
int ic;
// initialize the synchronization buffer to the common_vars.rxdata
for (int i=0; i<ru->nb_rx; i++)
rxp[i] = &ru->common.rxdata[i][0];
double temp_freq1 = ru->rfdevice.openair0_cfg->rx_freq[0];
double temp_freq2 = ru->rfdevice.openair0_cfg->tx_freq[0];
for (i=0; i<4; i++) {
ru->rfdevice.openair0_cfg->rx_freq[i] = ru->rfdevice.openair0_cfg->tx_freq[i];
ru->rfdevice.openair0_cfg->tx_freq[i] = temp_freq1;
}
ru->rfdevice.trx_set_freq_func(&ru->rfdevice,ru->rfdevice.openair0_cfg,0);
while ((ru->in_synch ==0)&&(!oai_exit)) {
// read in frame
rxs = ru->rfdevice.trx_read_func(&ru->rfdevice,
&(proc->timestamp_rx),
rxp,
fp->samples_per_subframe*10,
ru->nb_rx);
if (rxs != fp->samples_per_subframe*10) LOG_E(PHY,"requested %d samples, got %d\n",fp->samples_per_subframe*10,rxs);
// wakeup synchronization processing thread
wakeup_synch(ru);
ic=0;
while ((ic>=0)&&(!oai_exit)) {
// continuously read in frames, 1ms at a time,
// until we are done with the synchronization procedure
for (i=0; i<ru->nb_rx; i++)
rxp2[i] = (void *)&dummy_rx[i][0];
for (i=0; i<10; i++)
rxs = ru->rfdevice.trx_read_func(&ru->rfdevice,
&(proc->timestamp_rx),
rxp2,
fp->samples_per_subframe,
ru->nb_rx);
pthread_mutex_lock(&ru->proc.mutex_synch);
ic = ru->proc.instance_cnt_synch;
pthread_mutex_unlock(&ru->proc.mutex_synch);
} // ic>=0
} // in_synch==0
// read in rx_offset samples
LOG_I(PHY,"Resynchronizing by %d samples\n",ru->rx_offset);
rxs = ru->rfdevice.trx_read_func(&ru->rfdevice,
&(proc->timestamp_rx),
rxp,
ru->rx_offset,
ru->nb_rx);
for (i=0; i<4; i++) {
ru->rfdevice.openair0_cfg->rx_freq[i] = temp_freq1;
ru->rfdevice.openair0_cfg->tx_freq[i] = temp_freq2;
}
ru->rfdevice.trx_set_freq_func(&ru->rfdevice,ru->rfdevice.openair0_cfg,0);
}
void wakeup_gNB_L1s(RU_t *ru) {
int i;
PHY_VARS_gNB **gNB_list = ru->gNB_list;
LOG_D(PHY,"wakeup_gNB_L1s (num %d) for RU %d ru->gNB_top:%p\n",ru->num_gNB,ru->idx, ru->gNB_top);
if (ru->num_gNB==1 && ru->gNB_top!=0 && get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD) {
// call gNB function directly
char string[20];
sprintf(string,"Incoming RU %u",ru->idx);
LOG_D(PHY,"RU %d Call gNB_top\n",ru->idx);
ru->gNB_top(gNB_list[0],ru->proc.frame_rx,ru->proc.tti_rx,string,ru);
} else {
LOG_D(PHY,"ru->num_gNB:%d\n", ru->num_gNB);
for (i=0; i<ru->num_gNB; i++) {
LOG_D(PHY,"ru->wakeup_rxtx:%p\n", ru->nr_wakeup_rxtx);
if (ru->nr_wakeup_rxtx!=0 && ru->nr_wakeup_rxtx(gNB_list[i],ru) < 0) {
LOG_E(PHY,"could not wakeup gNB rxtx process for subframe %d\n", ru->proc.tti_rx);
}
}
}
}
int wakeup_prach_ru(RU_t *ru) {
struct timespec wait;
wait.tv_sec=0;
wait.tv_nsec=5000000L;
if (pthread_mutex_timedlock(&ru->proc.mutex_prach,&wait) !=0) {
LOG_E( PHY, "[RU] ERROR pthread_mutex_lock for RU prach thread (IC %d)\n", ru->proc.instance_cnt_prach);
exit_fun( "error locking mutex_rxtx" );
return(-1);
}
if (ru->proc.instance_cnt_prach==-1) {
++ru->proc.instance_cnt_prach;
ru->proc.frame_prach = ru->proc.frame_rx;
ru->proc.subframe_prach = ru->proc.tti_rx;
// DJP - think prach_procedures() is looking at gNB frame_prach
if (ru->gNB_list[0]) {
ru->gNB_list[0]->proc.frame_prach = ru->proc.frame_rx;
ru->gNB_list[0]->proc.slot_prach = ru->proc.tti_rx;
}
LOG_I(PHY,"RU %d: waking up PRACH thread\n",ru->idx);
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&ru->proc.cond_prach) == 0, "ERROR pthread_cond_signal for RU prach thread\n");
} else LOG_W(PHY,"RU prach thread busy, skipping\n");
pthread_mutex_unlock( &ru->proc.mutex_prach );
return(0);
}
// this is for RU with local RF unit
void fill_rf_config(RU_t *ru, char *rf_config_file) {
int i;
......@@ -1299,136 +1046,61 @@ void *ru_stats_thread(void *param) {
return(NULL);
}
void *ru_thread_tx( void *param ) {
RU_t *ru = (RU_t *)param;
RU_proc_t *proc = &ru->proc;
NR_DL_FRAME_PARMS *fp = ru->nr_frame_parms;
PHY_VARS_gNB *gNB;
gNB_L1_proc_t *gNB_proc;
gNB_L1_rxtx_proc_t *L1_proc;
char filename[40];
int print_frame = 8;
int i = 0;
int ret;
wait_on_condition(&proc->mutex_FH1,&proc->cond_FH1,&proc->instance_cnt_FH1,"ru_thread_tx");
printf( "ru_thread_tx ready\n");
while (!oai_exit) {
LOG_D(PHY,"ru_thread_tx: Waiting for TX processing\n");
// wait until eNBs are finished subframe RX n and TX n+4
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_RU_TX_WAIT, 1 );
wait_on_condition(&proc->mutex_gNBs,&proc->cond_gNBs,&proc->instance_cnt_gNBs,"ru_thread_tx");
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_RU_TX_WAIT, 0 );
ret = pthread_mutex_lock(&proc->mutex_gNBs);
AssertFatal(ret == 0,"mutex_lock return %d\n",ret);
int frame_tx=proc->frame_tx;
int tti_tx =proc->tti_tx;
uint64_t timestamp_tx = proc->timestamp_tx;
void ru_tx_func(void *param) {
ret = pthread_mutex_unlock(&proc->mutex_gNBs);
AssertFatal(ret == 0,"mutex_lock returns %d\n",ret);
if (oai_exit) break;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_RU, frame_tx );
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_TTI_NUMBER_TX0_RU, tti_tx );
processingData_RU_t *info = (processingData_RU_t *) param;
RU_t *ru = info->ru;
NR_DL_FRAME_PARMS *fp = ru->nr_frame_parms;
int frame_tx = info->frame_tx;
int slot_tx = info->slot_tx;
int print_frame = 8;
char filename[40];
// do TX front-end processing if needed (precoding and/or IDFTs)
if (ru->feptx_prec) ru->feptx_prec(ru,frame_tx,tti_tx);
// do TX front-end processing if needed (precoding and/or IDFTs)
if (ru->feptx_prec) ru->feptx_prec(ru,frame_tx,slot_tx);
// do OFDM with/without TX front-end processing if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru,frame_tx,tti_tx);
// do OFDM with/without TX front-end processing if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru,frame_tx,slot_tx);
if(!emulate_rf) {
// do outgoing fronthaul (south) if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru,frame_tx,tti_tx,timestamp_tx);
if(!emulate_rf) {
// do outgoing fronthaul (south) if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru,frame_tx,slot_tx,info->timestamp_tx);
if (ru->fh_north_out) ru->fh_north_out(ru);
} else {
if(proc->frame_tx == print_frame) {
for (i=0; i<ru->nb_tx; i++) {
if (ru->fh_north_out) ru->fh_north_out(ru);
} else {
if(frame_tx == print_frame) {
for (int i=0; i<ru->nb_tx; i++) {
if(proc->tti_tx == 0) {
sprintf(filename,"gNBdataF_frame%d_sl%d.m", print_frame, proc->tti_tx);
LOG_M(filename,"txdataF_frame",&ru->gNB_list[0]->common_vars.txdataF[i][0],fp->samples_per_frame_wCP, 1, 1);
if(slot_tx == 0) {
sprintf(filename,"gNBdataF_frame%d_sl%d.m", print_frame, slot_tx);
LOG_M(filename,"txdataF_frame",&ru->gNB_list[0]->common_vars.txdataF[i][0],fp->samples_per_frame_wCP, 1, 1);
sprintf(filename,"tx%ddataF_frame%d_sl%d.m", i, print_frame, proc->tti_tx);
LOG_M(filename,"txdataF_frame",&ru->common.txdataF[i][0],fp->samples_per_frame_wCP, 1, 1);
sprintf(filename,"tx%ddataF_frame%d_sl%d.m", i, print_frame, slot_tx);
LOG_M(filename,"txdataF_frame",&ru->common.txdataF[i][0],fp->samples_per_frame_wCP, 1, 1);
sprintf(filename,"tx%ddataF_BF_frame%d_sl%d.m", i, print_frame, proc->tti_tx);
LOG_M(filename,"txdataF_BF_frame",&ru->common.txdataF_BF[i][0],fp->samples_per_subframe_wCP, 1, 1);
}
if(proc->tti_tx == 9) {
sprintf(filename,"tx%ddata_frame%d.m", i, print_frame);
LOG_M(filename,"txdata_frame",&ru->common.txdata[i][0],fp->samples_per_frame, 1, 1);
sprintf(filename,"tx%ddata_frame%d.dat", i, print_frame);
FILE *output_fd = fopen(filename,"w");
if (output_fd) {
fwrite(&ru->common.txdata[i][0],
sizeof(int32_t),
fp->samples_per_frame,
output_fd);
fclose(output_fd);
} else {
LOG_E(PHY,"Cannot write to file %s\n",filename);
}
}//if(proc->tti_tx == 9)
}//for (i=0; i<ru->nb_tx; i++)
}//if(proc->frame_tx == print_frame)
}//else emulate_rf
release_thread(&proc->mutex_gNBs,&proc->instance_cnt_gNBs,"ru_thread_tx");
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_UE, proc->instance_cnt_gNBs);
for(i = 0; i<ru->num_gNB; i++) {
gNB = ru->gNB_list[i];
gNB_proc = &gNB->proc;
L1_proc = (get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT)? &gNB_proc->L1_proc_tx : &gNB_proc->L1_proc;
ret = pthread_mutex_lock(&gNB_proc->mutex_RU_tx);
AssertFatal(ret == 0,"mutex_lock returns %d\n",ret);
for (int j=0; j<gNB->num_RU; j++) {
if (ru == gNB->RU_list[j]) {
if ((gNB_proc->RU_mask_tx&(1<<j)) > 0)
LOG_E(PHY,"gNB %d frame %d, subframe %d : previous information from RU tx %d (num_RU %d,mask %x) has not been served yet!\n",
gNB->Mod_id,gNB_proc->frame_rx,gNB_proc->slot_rx,ru->idx,gNB->num_RU,gNB_proc->RU_mask_tx);
gNB_proc->RU_mask_tx |= (1<<j);
sprintf(filename,"tx%ddataF_BF_frame%d_sl%d.m", i, print_frame, slot_tx);
LOG_M(filename,"txdataF_BF_frame",&ru->common.txdataF_BF[i][0],fp->samples_per_subframe_wCP, 1, 1);
}
}
if (gNB_proc->RU_mask_tx != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return
ret = pthread_mutex_unlock(&gNB_proc->mutex_RU_tx);
AssertFatal(ret == 0,"mutex_unlock returns %d\n",ret);
} else { // all RUs TX are finished so send the ready signal to gNB processing
gNB_proc->RU_mask_tx = 0;
ret = pthread_mutex_unlock(&gNB_proc->mutex_RU_tx);
AssertFatal(ret == 0,"mutex_unlock returns %d\n",ret);
ret = pthread_mutex_lock(&L1_proc->mutex_RUs_tx);
AssertFatal(ret == 0,"mutex_lock returns %d\n",ret);
// the thread can now be woken up
if (L1_proc->instance_cnt_RUs == -1) {
L1_proc->instance_cnt_RUs = 0;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE,L1_proc->instance_cnt_RUs);
AssertFatal(pthread_cond_signal(&L1_proc->cond_RUs) == 0,
"ERROR pthread_cond_signal for gNB_L1_thread\n");
} //else AssertFatal(1==0,"gNB TX thread is not ready\n");
ret = pthread_mutex_unlock(&L1_proc->mutex_RUs_tx);
AssertFatal(ret == 0,"mutex_unlock returns %d\n",ret);
}
}
}
release_thread(&proc->mutex_FH1,&proc->instance_cnt_FH1,"ru_thread_tx");
return 0;
if(slot_tx == 9) {
sprintf(filename,"tx%ddata_frame%d.m", i, print_frame);
LOG_M(filename,"txdata_frame",&ru->common.txdata[i][0],fp->samples_per_frame, 1, 1);
sprintf(filename,"tx%ddata_frame%d.dat", i, print_frame);
FILE *output_fd = fopen(filename,"w");
if (output_fd) {
fwrite(&ru->common.txdata[i][0],
sizeof(int32_t),
fp->samples_per_frame,
output_fd);
fclose(output_fd);
} else {
LOG_E(PHY,"Cannot write to file %s\n",filename);
}
}//if(slot_tx == 9)
}//for (i=0; i<ru->nb_tx; i++)
}//if(frame_tx == print_frame)
}//else emulate_rf
}
void *ru_thread( void *param ) {
......@@ -1436,12 +1108,11 @@ void *ru_thread( void *param ) {
RU_t *ru = (RU_t *)param;
RU_proc_t *proc = &ru->proc;
NR_DL_FRAME_PARMS *fp = ru->nr_frame_parms;
PHY_VARS_gNB *gNB = RC.gNB[0];
int ret;
int slot = fp->slots_per_frame-1;
int frame = 1023;
char filename[40], threadname[40];
int print_frame = 8;
int i = 0;
char threadname[40];
int aa;
nfapi_nr_config_request_scf_t *cfg = &ru->config;
......@@ -1501,6 +1172,15 @@ void *ru_thread( void *param ) {
pthread_mutex_unlock(&RC.ru_mutex);
wait_sync("ru_thread");
notifiedFIFO_elt_t *msg = newNotifiedFIFO_elt(sizeof(processingData_L1_t),0,gNB->resp_L1,rx_func);
notifiedFIFO_elt_t *msgL1Tx = newNotifiedFIFO_elt(sizeof(processingData_L1_t),0,gNB->resp_L1_tx,tx_func);
notifiedFIFO_elt_t *msgRUTx = newNotifiedFIFO_elt(sizeof(processingData_L1_t),0,gNB->resp_RU_tx,ru_tx_func);
processingData_L1_t *syncMsg;
notifiedFIFO_elt_t *res;
pushNotifiedFIFO(gNB->resp_L1,msg); // to unblock the process in the beginning
pushNotifiedFIFO(gNB->resp_L1_tx,msgL1Tx); // to unblock the process in the beginning
pushNotifiedFIFO(gNB->resp_RU_tx,msgRUTx); // to unblock the process in the beginning
if(!emulate_rf) {
// Start RF device if any
if (ru->start_rf) {
......@@ -1509,19 +1189,6 @@ void *ru_thread( void *param ) {
else LOG_I(PHY,"RU %d rf device ready\n",ru->idx);
} else LOG_I(PHY,"RU %d no rf device\n",ru->idx);
// if an asnych_rxtx thread exists
// wakeup the thread because the devices are ready at this point
if ((ru->fh_south_asynch_in)||(ru->fh_north_asynch_in)) {
pthread_mutex_lock(&proc->mutex_asynch_rxtx);
proc->instance_cnt_asynch_rxtx=0;
pthread_mutex_unlock(&proc->mutex_asynch_rxtx);
pthread_cond_signal(&proc->cond_asynch_rxtx);
} else LOG_I(PHY,"RU %d no asynch_south interface\n",ru->idx);
// if this is a slave RRU, try to synchronize on the DL frequency
if ((ru->is_slave) && (ru->if_south == LOCAL_RF)) do_ru_synch(ru);
// start trx write thread
if(usrp_tx_thread == 1){
if (ru->start_write_thread){
......@@ -1535,11 +1202,6 @@ void *ru_thread( void *param ) {
}
}
pthread_mutex_lock(&proc->mutex_FH1);
proc->instance_cnt_FH1 = 0;
pthread_mutex_unlock(&proc->mutex_FH1);
pthread_cond_signal(&proc->cond_FH1);
// This is a forever while loop, it loops over subframes which are scheduled by incoming samples from HW devices
while (!oai_exit) {
// these are local subframe/frame counters to check that we are in synch with the fronthaul timing.
......@@ -1557,31 +1219,23 @@ void *ru_thread( void *param ) {
if (ru->fh_south_in) ru->fh_south_in(ru,&frame,&slot);
else AssertFatal(1==0, "No fronthaul interface at south port");
proc->timestamp_tx = proc->timestamp_rx + (sf_ahead*fp->samples_per_subframe);
proc->frame_tx = (proc->tti_rx > (fp->slots_per_frame-1-(fp->slots_per_subframe*sf_ahead))) ? (proc->frame_rx+1)&1023 : proc->frame_rx;
proc->tti_tx = (proc->tti_rx + (fp->slots_per_subframe*sf_ahead))%fp->slots_per_frame;
LOG_D(PHY,"AFTER fh_south_in - SFN/SL:%d%d RU->proc[RX:%d.%d TX:%d.%d] RC.gNB[0]:[RX:%d%d TX(SFN):%d]\n",
frame,slot,
proc->frame_rx,proc->tti_rx,
proc->frame_tx,proc->tti_tx,
RC.gNB[0]->proc.frame_rx,RC.gNB[0]->proc.slot_rx,
RC.gNB[0]->proc.frame_tx);
/*
LOG_D(PHY,"RU thread (do_prach %d, is_prach_subframe %d), received frame %d, subframe %d\n",
ru->do_prach,
is_prach_subframe(fp, proc->frame_rx, proc->tti_rx),
proc->frame_rx,proc->tti_rx);
if ((ru->do_prach>0) && (is_prach_subframe(fp, proc->frame_rx, proc->tti_rx)==1)) {
wakeup_prach_ru(ru);
}*/
// adjust for timing offset between RU
//printf("~~~~~~~~~~~~~~~~~~~~~~~~~~%d.%d in ru_thread is in process\n", proc->frame_rx, proc->tti_rx);
if (ru->idx!=0) proc->frame_tx = (proc->frame_tx+proc->frame_offset)&1023;
// do RX front-end processing (frequency-shift, dft) if needed
int slot_type = nr_slot_select(cfg,proc->frame_rx,proc->tti_rx);
if (slot_type == NR_UPLINK_SLOT || slot_type == NR_MIXED_SLOT) {
//if (proc->tti_rx==8) {
if (ru->feprx) {
ru->feprx(ru,proc->tti_rx);
......@@ -1631,50 +1285,17 @@ void *ru_thread( void *param ) {
}
// At this point, all information for subframe has been received on FH interface
res = pullTpool(gNB->resp_L1, gNB->threadPool);
syncMsg = (processingData_L1_t *)NotifiedFifoData(res);
syncMsg->gNB = gNB;
syncMsg->frame_rx = proc->frame_rx;
syncMsg->slot_rx = proc->tti_rx;
syncMsg->frame_tx = proc->frame_tx;
syncMsg->slot_tx = proc->tti_tx;
syncMsg->timestamp_tx = proc->timestamp_tx;
res->key = proc->tti_rx;
pushTpool(gNB->threadPool, res);
// wakeup all gNB processes waiting for this RU
if (ru->num_gNB>0) wakeup_gNB_L1s(ru);
if(get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD || ru->num_gNB==0) {
// do TX front-end processing if needed (precoding and/or IDFTs)
if (ru->feptx_prec) ru->feptx_prec(ru,proc->frame_tx,proc->tti_tx);
// do OFDM with/without TX front-end processing if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru,proc->frame_tx,proc->tti_tx);
if(!emulate_rf) {
// do outgoing fronthaul (south) if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru,proc->frame_tx,proc->tti_tx,proc->timestamp_tx);
if (ru->fh_north_out) ru->fh_north_out(ru);
} else {
if(proc->frame_tx == print_frame) {
for (i=0; i<ru->nb_tx; i++) {
sprintf(filename,"tx%ddataF_frame%d_sl%d.m", i, print_frame, proc->tti_tx);
LOG_M(filename,"txdataF_frame",&ru->common.txdataF_BF[i][0],fp->samples_per_slot_wCP, 1, 1);
if(proc->tti_tx == 9) {
sprintf(filename,"tx%ddata_frame%d.m", i, print_frame);
LOG_M(filename,"txdata_frame",&ru->common.txdata[i][0],fp->samples_per_frame, 1, 1);
sprintf(filename,"tx%ddata_frame%d.dat", i, print_frame);
FILE *output_fd = fopen(filename,"w");
if (output_fd) {
fwrite(&ru->common.txdata[i][0],
sizeof(int32_t),
fp->samples_per_frame,
output_fd);
fclose(output_fd);
} else {
LOG_E(PHY,"Cannot write to file %s\n",filename);
}
}//if(proc->tti_tx == 9)
}//for (i=0; i<ru->nb_tx; i++)
}//if(proc->frame_tx == print_frame)
}//else emulate_rf
proc->emulate_rf_busy = 0;
}//if(get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD)
}
printf( "Exiting ru_thread \n");
......@@ -1685,85 +1306,13 @@ void *ru_thread( void *param ) {
else LOG_I(PHY,"RU %d rf device stopped\n",ru->idx);
}
delNotifiedFIFO_elt(msg);
delNotifiedFIFO_elt(msgL1Tx);
delNotifiedFIFO_elt(msgRUTx);
ru_thread_status = 0;
return &ru_thread_status;
}
/*
// This thread run the initial synchronization like a UE
void *ru_thread_synch(void *arg) {
RU_t *ru = (RU_t*)arg;
NR_DL_FRAME_PARMS *fp=ru->nr_frame_parms;
int32_t sync_pos,sync_pos2;
uint32_t peak_val;
uint32_t sync_corr[307200] __attribute__((aligned(32)));
static int ru_thread_synch_status;
wait_sync("ru_thread_synch");
// initialize variables for PSS detection
lte_sync_time_init(ru->nr_frame_parms);
while (!oai_exit) {
// wait to be woken up
if (wait_on_condition(&ru->proc.mutex_synch,&ru->proc.cond_synch,&ru->proc.instance_cnt_synch,"ru_thread_synch")<0) break;
// if we're not in synch, then run initial synch
if (ru->in_synch == 0) {
// run intial synch like UE
LOG_I(PHY,"Running initial synchronization\n");
sync_pos = lte_sync_time_gNB(ru->common.rxdata,
fp,
fp->samples_per_subframe*5,
&peak_val,
sync_corr);
LOG_I(PHY,"RU synch: %d, val %d\n",sync_pos,peak_val);
if (sync_pos >= 0) {
if (sync_pos >= fp->nb_prefix_samples)
sync_pos2 = sync_pos - fp->nb_prefix_samples;
else
sync_pos2 = sync_pos + (fp->samples_per_subframe*10) - fp->nb_prefix_samples;
if (fp->frame_type == FDD) {
// PSS is hypothesized in last symbol of first slot in Frame
int sync_pos_slot = (fp->samples_per_subframe>>1) - fp->ofdm_symbol_size - fp->nb_prefix_samples;
if (sync_pos2 >= sync_pos_slot)
ru->rx_offset = sync_pos2 - sync_pos_slot;
else
ru->rx_offset = (fp->samples_per_subframe*10) + sync_pos2 - sync_pos_slot;
}
else {
}
LOG_I(PHY,"Estimated sync_pos %d, peak_val %d => timing offset %d\n",sync_pos,peak_val,ru->rx_offset);
if ((peak_val > 300000) && (sync_pos > 0)) {
// if (sync_pos++ > 3) {
write_output("ru_sync.m","sync",(void*)&sync_corr[0],fp->samples_per_subframe*5,1,2);
write_output("ru_rx.m","rxs",(void*)ru->ru_time.rxdata[0][0],fp->samples_per_subframe*10,1,1);
exit(-1);
}
ru->in_synch=1;
}
}
if (release_thread(&ru->proc.mutex_synch,&ru->proc.instance_cnt_synch,"ru_synch_thread") < 0) break;
} // oai_exit
ru_thread_synch_status = 0;
return &ru_thread_synch_status;
}
*/
int nr_start_if(struct RU_t_s *ru, struct PHY_VARS_gNB_s *gNB) {
return(ru->ifdevice.trx_start_func(&ru->ifdevice));
......@@ -1785,17 +1334,10 @@ int start_write_thread(RU_t *ru) {
void init_RU_proc(RU_t *ru) {
int i=0;
RU_proc_t *proc;
char name[100];
LOG_I(PHY,"Initializing RU proc %d (%s,%s),\n",ru->idx,NB_functions[ru->function],NB_timing[ru->if_timing]);
proc = &ru->proc;
memset((void *)proc,0,sizeof(RU_proc_t));
proc->ru = ru;
proc->instance_cnt_prach = -1;
proc->instance_cnt_synch = -1;
proc->instance_cnt_FH = -1;
proc->instance_cnt_FH1 = -1;
proc->instance_cnt_gNBs = -1;
proc->instance_cnt_asynch_rxtx = -1;
proc->instance_cnt_emulateRF = -1;
proc->first_rx = 1;
proc->first_tx = 1;
......@@ -1806,44 +1348,13 @@ void init_RU_proc(RU_t *ru) {
for (i=0; i<10; i++) proc->symbol_mask[i]=0;
pthread_mutex_init( &proc->mutex_prach, NULL);
pthread_mutex_init( &proc->mutex_asynch_rxtx, NULL);
pthread_mutex_init( &proc->mutex_synch,NULL);
pthread_mutex_init( &proc->mutex_FH,NULL);
pthread_mutex_init( &proc->mutex_FH1,NULL);
pthread_mutex_init( &proc->mutex_emulateRF,NULL);
pthread_mutex_init( &proc->mutex_gNBs, NULL);
pthread_cond_init( &proc->cond_prach, NULL);
pthread_cond_init( &proc->cond_FH, NULL);
pthread_cond_init( &proc->cond_FH1, NULL);
pthread_cond_init( &proc->cond_emulateRF, NULL);
pthread_cond_init( &proc->cond_asynch_rxtx, NULL);
pthread_cond_init( &proc->cond_synch,NULL);
pthread_cond_init( &proc->cond_gNBs, NULL);
threadCreate( &proc->pthread_FH, ru_thread, (void *)ru, "thread_FH", -1, OAI_PRIORITY_RT_MAX );
if (get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT)
threadCreate( &proc->pthread_FH1, ru_thread_tx, (void *)ru, "thread_FH1", -1, OAI_PRIORITY_RT );
if(emulate_rf)
threadCreate( &proc->pthread_emulateRF, emulatedRF_thread, (void *)proc, "emulateRF", -1, OAI_PRIORITY_RT );
if (ru->function == NGFI_RRU_IF4p5) {
threadCreate( &proc->pthread_prach, ru_thread_prach, (void *)ru, "RACH", -1, OAI_PRIORITY_RT );
///tmp deactivation of synch thread
// if (ru->is_slave == 1) pthread_create( &proc->pthread_synch, attr_synch, ru_thread_synch, (void*)ru);
if ((ru->if_timing == synch_to_other) ||
(ru->function == NGFI_RRU_IF5) ||
(ru->function == NGFI_RRU_IF4p5)) threadCreate( &proc->pthread_asynch_rxtx, ru_thread_asynch_rxtx, (void *)ru, "asynch_rxtx", -1, OAI_PRIORITY_RT );
snprintf( name, sizeof(name), "ru_thread_FH %d", ru->idx );
pthread_setname_np( proc->pthread_FH, name );
} else if (ru->function == gNodeB_3GPP && ru->if_south == LOCAL_RF) { // DJP - need something else to distinguish between monolithic and PNF
LOG_I(PHY,"%s() DJP - added creation of pthread_prach\n", __FUNCTION__);
threadCreate( &proc->pthread_prach, ru_thread_prach, (void *)ru,"RACH", -1, OAI_PRIORITY_RT );
}
if (get_thread_worker_conf() == WORKER_ENABLE) {
if (ru->feprx) nr_init_feprx_thread(ru);
......@@ -1856,46 +1367,9 @@ void init_RU_proc(RU_t *ru) {
void kill_NR_RU_proc(int inst) {
RU_t *ru = RC.ru[inst];
RU_proc_t *proc = &ru->proc;
pthread_mutex_lock(&proc->mutex_FH);
proc->instance_cnt_FH = 0;
pthread_mutex_unlock(&proc->mutex_FH);
pthread_cond_signal(&proc->cond_FH);
pthread_mutex_lock(&proc->mutex_prach);
proc->instance_cnt_prach = 0;
pthread_mutex_unlock(&proc->mutex_prach);
pthread_cond_signal(&proc->cond_prach);
pthread_mutex_lock(&proc->mutex_synch);
proc->instance_cnt_synch = 0;
pthread_mutex_unlock(&proc->mutex_synch);
pthread_cond_signal(&proc->cond_synch);
pthread_mutex_lock(&proc->mutex_gNBs);
proc->instance_cnt_gNBs = 0;
pthread_mutex_unlock(&proc->mutex_gNBs);
pthread_cond_signal(&proc->cond_gNBs);
pthread_mutex_lock(&proc->mutex_asynch_rxtx);
proc->instance_cnt_asynch_rxtx = 0;
pthread_mutex_unlock(&proc->mutex_asynch_rxtx);
pthread_cond_signal(&proc->cond_asynch_rxtx);
LOG_D(PHY, "Joining pthread_FH\n");
pthread_join(proc->pthread_FH, NULL);
if (ru->function == NGFI_RRU_IF4p5) {
LOG_D(PHY, "Joining pthread_prach\n");
pthread_join(proc->pthread_prach, NULL);
if (ru->is_slave) {
LOG_D(PHY, "Joining pthread_\n");
pthread_join(proc->pthread_synch, NULL);
}
if ((ru->if_timing == synch_to_other) ||
(ru->function == NGFI_RRU_IF5) ||
(ru->function == NGFI_RRU_IF4p5)) {
LOG_D(PHY, "Joining pthread_asynch_rxtx\n");
pthread_join(proc->pthread_asynch_rxtx, NULL);
}
}
if (get_nprocs() >= 2) {
if (ru->feprx) {
pthread_mutex_lock(&proc->mutex_fep);
......@@ -1924,17 +1398,6 @@ void kill_NR_RU_proc(int inst) {
LOG_D(PHY, "Joining ru_stats_thread\n");
pthread_join(ru->ru_stats_thread, NULL);
}
pthread_mutex_destroy(&proc->mutex_prach);
pthread_mutex_destroy(&proc->mutex_asynch_rxtx);
pthread_mutex_destroy(&proc->mutex_synch);
pthread_mutex_destroy(&proc->mutex_FH);
pthread_mutex_destroy(&proc->mutex_gNBs);
pthread_cond_destroy(&proc->cond_prach);
pthread_cond_destroy(&proc->cond_FH);
pthread_cond_destroy(&proc->cond_asynch_rxtx);
pthread_cond_destroy(&proc->cond_synch);
pthread_cond_destroy(&proc->cond_gNBs);
}
int check_capabilities(RU_t *ru,RRU_capabilities_t *cap)
......
......@@ -244,6 +244,13 @@ extern char *get_softmodem_function(uint64_t *sofmodemfunc_mask_ptr);
extern void set_softmodem_sighandler(void);
extern uint64_t downlink_frequency[MAX_NUM_CCs][4];
extern int32_t uplink_frequency_offset[MAX_NUM_CCs][4];
extern uint16_t sl_ahead;
extern uint16_t sf_ahead;
extern volatile int oai_exit;
void tx_func(void *param);
void rx_func(void *param);
void ru_tx_func(void *param);
extern uint8_t nfapi_mode;
#ifdef __cplusplus
}
......
......@@ -105,6 +105,8 @@ typedef struct {
uint8_t O_ACK;
/// Index of current HARQ round for this ULSCH
uint8_t round;
/// Last Ndi for this harq process
uint8_t ndi;
/// pointer to pdu from MAC interface (TS 36.212 V15.4.0, Sec 5.1 p. 8)
unsigned char *a;
/// Pointer to the payload + CRC
......
......@@ -202,7 +202,6 @@ NR_UE_ULSCH_t *new_nr_ue_ulsch(uint16_t N_RB_UL,
for (i=0; i<number_of_harq_pids; i++) {
ulsch->harq_processes[i]->round=0;
}
return(ulsch);
}
}
......@@ -257,7 +256,6 @@ int nr_ulsch_encoding(NR_UE_ULSCH_t *ulsch,
Ilbrm = 0;
Tbslbrm = 950984; //max tbs
Coderate = 0.0;
harq_process->round = nr_rv_round_map_ue[harq_process->pusch_pdu.pusch_data.rv_index];
///////////
/////////////////////////////////////////////////////////////////////////////////////////
......@@ -267,8 +265,7 @@ int nr_ulsch_encoding(NR_UE_ULSCH_t *ulsch,
LOG_D(PHY,"ulsch coding nb_rb %d, Nl = %d\n", nb_rb, harq_process->pusch_pdu.nrOfLayers);
LOG_D(PHY,"ulsch coding A %d G %d mod_order %d\n", A,G, mod_order);
// if (harq_process->Ndi == 1) { // this is a new packet
if (harq_process->round == 0) { // this is a new packet
if (harq_process->ndi != harq_process->pusch_pdu.pusch_data.new_data_indicator) { // this is a new packet
#ifdef DEBUG_ULSCH_CODING
printf("encoding thinks this is a new packet \n");
#endif
......@@ -415,7 +412,7 @@ int nr_ulsch_encoding(NR_UE_ULSCH_t *ulsch,
///////////
///////////////////////////////////////////////////////////////////////////////
harq_process->ndi = harq_process->pusch_pdu.pusch_data.new_data_indicator;
}
F = harq_process->F;
Kr = harq_process->K;
......
......@@ -282,30 +282,6 @@ static void genericWaterFall (OAIgraph_t *graph, scopeSample_t *values, const in
graph->iteration++;
}
static void genericLogPowerPerAntena(OAIgraph_t *graph, const int nb_ant, const scopeSample_t **data, const int len) {
float *values, *time;
oai_xygraph_getbuff(graph, &time, &values, len, 0);
for (int ant=0; ant<nb_ant; ant++) {
if (data[ant] != NULL) {
float *values, *time;
oai_xygraph_getbuff(graph, &time, &values, len, ant);
for (int i=0; i<len; i+=8) {
float *vals=values+i;
const scopeSample_t *in=&(data[ant][i]);
// TRY AUTOMATIC simd BY GCC
for (int k=0; k<8; k++ ) {
vals[k] = 10*log10(1.0+SquaredNorm(in[k]));
}
}
oai_xygraph(graph,time,values, len, ant, 10);
}
}
}
static void genericPowerPerAntena(OAIgraph_t *graph, const int nb_ant, const scopeSample_t **data, const int len) {
float *values, *time;
oai_xygraph_getbuff(graph, &time, &values, len, 0);
......
......@@ -741,4 +741,10 @@ typedef struct RRU_config_s {
MBSFN_config_t MBSFN_config[8];
} RRU_config_t;
typedef struct processingData_RU {
int frame_tx;
int slot_tx;
openair0_timestamp timestamp_tx;
RU_t *ru;
} processingData_RU_t;
#endif //__PHY_DEFS_RU__H__
......@@ -830,6 +830,9 @@ typedef struct PHY_VARS_gNB_s {
time_stats_t ulsch_freq_offset_estimation_stats;
*/
notifiedFIFO_t *respDecode;
notifiedFIFO_t *resp_L1;
notifiedFIFO_t *resp_L1_tx;
notifiedFIFO_t *resp_RU_tx;
tpool_t *threadPool;
int nbDecode;
uint8_t pusch_proc_threads;
......@@ -871,4 +874,13 @@ union ldpcReqUnion {
uint64_t p;
};
typedef struct processingData_L1 {
int frame_rx;
int frame_tx;
int slot_rx;
int slot_tx;
openair0_timestamp timestamp_tx;
PHY_VARS_gNB *gNB;
} processingData_L1_t;
#endif
......@@ -79,8 +79,8 @@ int32_t uplink_frequency_offset[MAX_NUM_CCs][4];
double cpuf;
int sf_ahead=4 ;
int sl_ahead=0;
uint16_t sf_ahead=4 ;
uint16_t sl_ahead=0;
//uint8_t nfapi_mode = 0;
uint64_t downlink_frequency[MAX_NUM_CCs][4];
......@@ -248,7 +248,10 @@ int main(int argc, char **argv)
int i,aa;//,l;
double sigma2, sigma2_dB=10, SNR, snr0=-2.0, snr1=2.0;
uint8_t snr1set=0;
float roundStats[50];
double roundStats[500] = {0};
double blerStats[500] = {0};
double berStats[500] = {0};
double snrStats[500] = {0};
float effRate;
//float psnr;
float eff_tp_check = 0.7;
......@@ -1114,7 +1117,9 @@ int main(int argc, char **argv)
if (UE_harq_process->harq_ack.ack==1) effRate += ((float)TBS)/round;
} // noise trials
blerStats[snrRun] = (float) n_errors / (float) n_trials;
roundStats[snrRun]/=((float)n_trials);
berStats[snrRun] = (double)errors_scrambling/available_bits/n_trials;
effRate /= n_trials;
printf("*****************************************\n");
printf("SNR %f, (false positive %f)\n", SNR,
......@@ -1122,7 +1127,7 @@ int main(int argc, char **argv)
printf("*****************************************\n");
printf("\n");
dump_pdsch_stats(gNB);
printf("SNR %f : n_errors (negative CRC) = %d/%d, Avg round %.2f, Channel BER %e, Eff Rate %.4f bits/slot, Eff Throughput %.2f, TBS %u bits/slot\n", SNR, n_errors, n_trials,roundStats[snrRun],(double)errors_scrambling/available_bits/n_trials,effRate,effRate/TBS*100,TBS);
printf("SNR %f : n_errors (negative CRC) = %d/%d, Avg round %.2f, Channel BER %e, BLER %.2f, Eff Rate %.4f bits/slot, Eff Throughput %.2f, TBS %u bits/slot\n", SNR, n_errors, n_trials,roundStats[snrRun],berStats[snrRun],blerStats[snrRun],effRate,effRate/TBS*100,TBS);
printf("\n");
if (print_perf==1) {
......@@ -1187,15 +1192,19 @@ int main(int argc, char **argv)
break;
}
//if ((float)n_errors/(float)n_trials <= target_error_rate) {
if (effRate > (eff_tp_check*TBS)) {
printf("PDSCH test OK\n");
break;
}
snrStats[snrRun] = SNR;
snrRun++;
} // NSR
LOG_M("dlsimStats.m","SNR",snrStats,snrRun,1,7);
LOG_MM("dlsimStats.m","BLER",blerStats,snrRun,1,7);
LOG_MM("dlsimStats.m","BER",berStats,snrRun,1,7);
LOG_MM("dlsimStats.m","rounds",roundStats,snrRun,1,7);
/*if (n_trials>1) {
printf("HARQ stats:\nSNR\tRounds\n");
psnr = snr0;
......
......@@ -64,7 +64,7 @@ double cpuf;
extern uint16_t prach_root_sequence_map0_3[838];
openair0_config_t openair0_cfg[MAX_CARDS];
//uint8_t nfapi_mode=0;
int sl_ahead = 0;
uint16_t sl_ahead = 0;
//void dump_nr_prach_config(NR_DL_FRAME_PARMS *frame_parms,uint8_t subframe);
......
......@@ -75,9 +75,9 @@ PHY_VARS_NR_UE *UE;
RAN_CONTEXT_t RC;
int32_t uplink_frequency_offset[MAX_NUM_CCs][4];
int sf_ahead=4 ;
uint16_t sf_ahead=4 ;
int slot_ahead=6 ;
int sl_ahead=0;
uint16_t sl_ahead=0;
double cpuf;
//uint8_t nfapi_mode = 0;
uint64_t downlink_frequency[MAX_NUM_CCs][4];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment