Commit c1152f30 authored by Raymond Knopp's avatar Raymond Knopp

testing merge with develop-nr. removal of pthread_mutex_timedlock, we were...

testing merge with develop-nr. removal of pthread_mutex_timedlock, we were using the wrong time argument (needed to be absolute).
parent b447f0b9
...@@ -293,13 +293,13 @@ static void *gNB_L1_thread_tx(void *param) { ...@@ -293,13 +293,13 @@ static void *gNB_L1_thread_tx(void *param) {
pthread_mutex_lock( &L1_proc_tx->mutex ); pthread_mutex_lock( &L1_proc_tx->mutex );
L1_proc_tx->instance_cnt = -1; L1_proc_tx->instance_cnt = -1;
pthread_mutex_unlock(&L1_proc_tx->mutex);
// the thread can now be woken up // the thread can now be woken up
if (pthread_cond_signal(&L1_proc_tx->cond) != 0) { if (pthread_cond_signal(&L1_proc_tx->cond) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n"); LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" ); exit_fun( "ERROR pthread_cond_signal" );
} }
pthread_mutex_unlock( &L1_proc_tx->mutex );
wakeup_txfh(gNB,L1_proc_tx,frame_tx,slot_tx,timestamp_tx); wakeup_txfh(gNB,L1_proc_tx,frame_tx,slot_tx,timestamp_tx);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 0 ); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 0 );
} }
...@@ -414,46 +414,51 @@ int wakeup_txfh(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot ...@@ -414,46 +414,51 @@ int wakeup_txfh(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot
RU_t *ru; RU_t *ru;
RU_proc_t *ru_proc; RU_proc_t *ru_proc;
int waitret; int waitret,ret;
struct timespec wait; struct timespec wait;
wait.tv_sec=0; wait.tv_sec=0;
wait.tv_nsec=10000000L; wait.tv_nsec=10000000L;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE,proc->instance_cnt_RUs);
// note this should depend on the numerology used by the TX L1 thread, set here for 500us slot time // note this should depend on the numerology used by the TX L1 thread, set here for 500us slot time
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GAIN_CONTROL,1);
waitret=timedwait_on_condition(&proc->mutex_RUs_tx,&proc->cond_RUs,&proc->instance_cnt_RUs,"wakeup_txfh",500000); waitret=timedwait_on_condition(&proc->mutex_RUs_tx,&proc->cond_RUs,&proc->instance_cnt_RUs,"wakeup_txfh",500000);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GAIN_CONTROL,0);
AssertFatal(release_thread(&proc->mutex_RUs_tx,&proc->instance_cnt_RUs,"wakeup_txfh")==0, "error releaseing gNB lock on RUs\n");
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE,proc->instance_cnt_RUs);
if (waitret == ETIMEDOUT) { if (waitret == ETIMEDOUT) {
LOG_W(PHY,"Dropping TX slot (%d.%d) because FH is blocked more than 2 slot times (1000us)\n",frame_tx,slot_tx); LOG_W(PHY,"Dropping TX slot (%d.%d) because FH is blocked more than 2 slot times (1000us)\n",frame_tx,slot_tx);
pthread_mutex_lock(&gNB->proc.mutex_RU_tx); AssertFatal((ret=pthread_mutex_lock(&gNB->proc.mutex_RU_tx))==0,"mutex_lock returns %d\n",ret);
gNB->proc.RU_mask_tx = 0; gNB->proc.RU_mask_tx = 0;
pthread_mutex_unlock(&gNB->proc.mutex_RU_tx); AssertFatal((ret=pthread_mutex_unlock(&gNB->proc.mutex_RU_tx))==0,"mutex_unlock returns %d\n",ret);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_UE,1); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_UE,1);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_UE,0); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_UE,0);
return(-1); return(-1);
} }
else if (release_thread(&proc->mutex_RUs_tx,&proc->instance_cnt_RUs,"wakeup_txfh")<0) return(-1);
for(int i=0; i<gNB->num_RU; i++) for(int i=0; i<gNB->num_RU; i++)
{ {
ru = gNB->RU_list[i]; ru = gNB->RU_list[i];
ru_proc = &ru->proc; ru_proc = &ru->proc;
AssertFatal((ret = pthread_mutex_lock(&ru_proc->mutex_gNBs))==0,"ERROR pthread_mutex_lock failed on mutex_gNBs L1_thread_tx with ret=%d\n",ret);
if (ru_proc->instance_cnt_gNBs == 0) { if (ru_proc->instance_cnt_gNBs == 0) {
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST_UE, 1);
LOG_E(PHY,"Frame %d, subframe %d: TX FH thread busy, dropping Frame %d, subframe %d\n", ru_proc->frame_tx, ru_proc->tti_tx, proc->frame_rx, proc->slot_rx); LOG_E(PHY,"Frame %d, subframe %d: TX FH thread busy, dropping Frame %d, subframe %d\n", ru_proc->frame_tx, ru_proc->tti_tx, proc->frame_rx, proc->slot_rx);
pthread_mutex_lock(&gNB->proc.mutex_RU_tx); AssertFatal((ret=pthread_mutex_lock(&gNB->proc.mutex_RU_tx))==0,"mutex_lock returns %d\n",ret);
gNB->proc.RU_mask_tx = 0; gNB->proc.RU_mask_tx = 0;
pthread_mutex_unlock(&gNB->proc.mutex_RU_tx); AssertFatal((ret=pthread_mutex_unlock(&gNB->proc.mutex_RU_tx))==0,"mutex_unlock returns %d\n",ret);
return(-1); AssertFatal((ret=pthread_mutex_unlock( &ru_proc->mutex_gNBs ))==0,"mutex_unlock return %d\n",ret);
}
if ((waitret = pthread_mutex_timedlock(&ru_proc->mutex_gNBs,&wait)) == ETIMEDOUT) { VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST_UE, 0);
LOG_W( PHY, "[eNB] ERROR pthread_mutex_lock timed out on mutex_gNBs L1_thread_tx (timeout)\n");
return(-1); return(-1);
} }
else AssertFatal(waitret==0,"pthread_mutex_timedlock returned %d\n",waitret);
ru_proc->instance_cnt_gNBs = 0; ru_proc->instance_cnt_gNBs = 0;
ru_proc->timestamp_tx = timestamp_tx; ru_proc->timestamp_tx = timestamp_tx;
...@@ -461,15 +466,13 @@ int wakeup_txfh(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot ...@@ -461,15 +466,13 @@ int wakeup_txfh(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot
ru_proc->frame_tx = frame_tx; ru_proc->frame_tx = frame_tx;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_UE, ru_proc->instance_cnt_gNBs); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_UE, ru_proc->instance_cnt_gNBs);
pthread_mutex_unlock( &ru_proc->mutex_gNBs );
LOG_D(PHY,"Signaling tx_thread_fh for %d.%d\n",frame_tx,slot_tx); LOG_D(PHY,"Signaling tx_thread_fh for %d.%d\n",frame_tx,slot_tx);
// the thread can now be woken up // the thread can now be woken up
if (pthread_cond_signal(&ru_proc->cond_gNBs) != 0) { AssertFatal(pthread_cond_signal(&ru_proc->cond_gNBs) == 0,
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n"); "[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" ); AssertFatal((ret=pthread_mutex_unlock(&ru_proc->mutex_gNBs))==0,"mutex_unlock returned %d\n",ret);
return(-1);
}
} }
return(0); return(0);
} }
...@@ -479,21 +482,15 @@ int wakeup_tx(PHY_VARS_gNB *gNB,int frame_rx,int slot_rx,int frame_tx,int slot_t ...@@ -479,21 +482,15 @@ int wakeup_tx(PHY_VARS_gNB *gNB,int frame_rx,int slot_rx,int frame_tx,int slot_t
gNB_L1_rxtx_proc_t *L1_proc_tx = &gNB->proc.L1_proc_tx; gNB_L1_rxtx_proc_t *L1_proc_tx = &gNB->proc.L1_proc_tx;
int ret;
struct timespec wait; struct timespec wait;
wait.tv_sec=0; wait.tv_sec=0;
wait.tv_nsec=5000000L; wait.tv_nsec=5000000L;
if (pthread_mutex_timedlock(&L1_proc_tx->mutex,&wait) != 0) { AssertFatal((ret = pthread_mutex_lock(&L1_proc_tx->mutex))==0,"mutex_lock returns %d\n",ret);
LOG_E(PHY, "[SCHED][eNB] ERROR locking mutex for eNB L1_thread_tx\n");
exit_fun("ERROR pthread_lock");
return(-1);
}
if (L1_proc_tx->instance_cnt == -2) { // L1_thread_tx isn't ready yet so return
pthread_mutex_unlock( &L1_proc_tx->mutex);
return(0);
}
while(L1_proc_tx->instance_cnt == 0){ while(L1_proc_tx->instance_cnt == 0){
pthread_cond_wait(&L1_proc_tx->cond,&L1_proc_tx->mutex); pthread_cond_wait(&L1_proc_tx->cond,&L1_proc_tx->mutex);
...@@ -508,18 +505,14 @@ int wakeup_tx(PHY_VARS_gNB *gNB,int frame_rx,int slot_rx,int frame_tx,int slot_t ...@@ -508,18 +505,14 @@ int wakeup_tx(PHY_VARS_gNB *gNB,int frame_rx,int slot_rx,int frame_tx,int slot_t
L1_proc_tx->frame_tx = frame_tx; L1_proc_tx->frame_tx = frame_tx;
L1_proc_tx->timestamp_tx = timestamp_tx; L1_proc_tx->timestamp_tx = timestamp_tx;
pthread_mutex_unlock( &L1_proc_tx->mutex);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_UE,1); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_UE,1);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_UE,0); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_UE,0);
// the thread can now be woken up // the thread can now be woken up
if (pthread_cond_signal(&L1_proc_tx->cond) != 0) { AssertFatal(pthread_cond_signal(&L1_proc_tx->cond) == 0, "ERROR pthread_cond_signal for gNB L1 thread\n");
LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
return(-1);
}
AssertFatal((ret=pthread_mutex_unlock(&L1_proc_tx->mutex))==0,"mutex_unlock returns %d\n",ret);
return(0); return(0);
} }
...@@ -529,11 +522,11 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) { ...@@ -529,11 +522,11 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) {
gNB_L1_rxtx_proc_t *L1_proc=&proc->L1_proc; gNB_L1_rxtx_proc_t *L1_proc=&proc->L1_proc;
NR_DL_FRAME_PARMS *fp = &gNB->frame_parms; NR_DL_FRAME_PARMS *fp = &gNB->frame_parms;
RU_proc_t *ru_proc=&ru->proc; RU_proc_t *ru_proc=&ru->proc;
int ret;
int i; int i;
struct timespec wait; struct timespec wait;
pthread_mutex_lock(&proc->mutex_RU); AssertFatal((ret=pthread_mutex_lock(&proc->mutex_RU))==0,"mutex_lock returns %d\n",ret);
for (i=0;i<gNB->num_RU;i++) { for (i=0;i<gNB->num_RU;i++) {
if (ru == gNB->RU_list[i]) { if (ru == gNB->RU_list[i]) {
if ((proc->RU_mask&(1<<i)) > 0) if ((proc->RU_mask&(1<<i)) > 0)
...@@ -544,12 +537,12 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) { ...@@ -544,12 +537,12 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) {
} }
if (proc->RU_mask != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return if (proc->RU_mask != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return
LOG_E(PHY,"Not all RUs have provided their info\n"); LOG_E(PHY,"Not all RUs have provided their info\n");
pthread_mutex_unlock(&proc->mutex_RU); AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RU))==0,"mutex_unlock returns %d\n",ret);
return(0); return(0);
} }
else { // all RUs have provided their information so continue on and wakeup gNB processing else { // all RUs have provided their information so continue on and wakeup gNB processing
proc->RU_mask = 0; proc->RU_mask = 0;
pthread_mutex_unlock(&proc->mutex_RU); AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RU))==0,"muex_unlock returns %d\n",ret);
} }
...@@ -558,19 +551,10 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) { ...@@ -558,19 +551,10 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) {
// wake up TX for subframe n+sl_ahead // wake up TX for subframe n+sl_ahead
// lock the TX mutex and make sure the thread is ready // lock the TX mutex and make sure the thread is ready
if (pthread_mutex_timedlock(&L1_proc->mutex,&wait) != 0) { AssertFatal((ret=pthread_mutex_lock(&L1_proc->mutex)) == 0,"mutex_lock returns %d\n", ret);
LOG_E( PHY, "[gNB] ERROR pthread_mutex_lock for gNB L1 thread %d (IC %d)\n", L1_proc->slot_rx&1,L1_proc->instance_cnt );
exit_fun( "error locking mutex" );
return(-1);
}
if (L1_proc->instance_cnt==-2) { // L1_thread isn't ready yet so return
pthread_mutex_unlock( &L1_proc->mutex );
return(0);
}
if (L1_proc->instance_cnt == 0) { // L1_thread is busy so abort the subframe if (L1_proc->instance_cnt == 0) { // L1_thread is busy so abort the subframe
pthread_mutex_unlock( &L1_proc->mutex ); AssertFatal((ret=pthread_mutex_unlock( &L1_proc->mutex))==0,"muex_unlock return %d\n",ret);
LOG_W(PHY,"L1_thread isn't ready in %d.%d, aborting RX processing\n",ru_proc->frame_rx,ru_proc->tti_rx); LOG_W(PHY,"L1_thread isn't ready in %d.%d, aborting RX processing\n",ru_proc->frame_rx,ru_proc->tti_rx);
} }
......
...@@ -657,7 +657,7 @@ void rx_rf(RU_t *ru,int *frame,int *slot) { ...@@ -657,7 +657,7 @@ void rx_rf(RU_t *ru,int *frame,int *slot) {
proc->timestamp_rx = 0; proc->timestamp_rx = 0;
} else { } else {
if (proc->timestamp_rx - old_ts != fp->samples_per_slot) { if (proc->timestamp_rx - old_ts != fp->samples_per_slot) {
LOG_I(PHY,"rx_rf: rfdevice timing drift of %"PRId64" samples (ts_off %"PRId64")\n",proc->timestamp_rx - old_ts - fp->samples_per_slot,ru->ts_offset); LOG_D(PHY,"rx_rf: rfdevice timing drift of %"PRId64" samples (ts_off %"PRId64")\n",proc->timestamp_rx - old_ts - fp->samples_per_slot,ru->ts_offset);
ru->ts_offset += (proc->timestamp_rx - old_ts - fp->samples_per_slot); ru->ts_offset += (proc->timestamp_rx - old_ts - fp->samples_per_slot);
proc->timestamp_rx = ts-ru->ts_offset; proc->timestamp_rx = ts-ru->ts_offset;
} }
...@@ -669,7 +669,7 @@ void rx_rf(RU_t *ru,int *frame,int *slot) { ...@@ -669,7 +669,7 @@ void rx_rf(RU_t *ru,int *frame,int *slot) {
proc->timestamp_tx = proc->timestamp_rx+(sl_ahead*fp->samples_per_slot); proc->timestamp_tx = proc->timestamp_rx+(sl_ahead*fp->samples_per_slot);
proc->tti_tx = (proc->tti_rx+sl_ahead)%fp->slots_per_frame; proc->tti_tx = (proc->tti_rx+sl_ahead)%fp->slots_per_frame;
proc->frame_tx = (proc->tti_rx>(fp->slots_per_frame-1-sl_ahead)) ? (proc->frame_rx+1)&1023 : proc->frame_rx; proc->frame_tx = (proc->tti_rx>(fp->slots_per_frame-1-sl_ahead)) ? (proc->frame_rx+1)&1023 : proc->frame_rx;
LOG_I(PHY,"RU %d/%d TS %llu (off %d), frame %d, slot %d.%d / %d\n", LOG_D(PHY,"RU %d/%d TS %llu (off %d), frame %d, slot %d.%d / %d\n",
ru->idx, ru->idx,
0, 0,
(unsigned long long int)proc->timestamp_rx, (unsigned long long int)proc->timestamp_rx,
...@@ -1206,27 +1206,25 @@ static void *ru_thread_tx( void *param ) { ...@@ -1206,27 +1206,25 @@ static void *ru_thread_tx( void *param ) {
char filename[40]; char filename[40];
int print_frame = 8; int print_frame = 8;
int i = 0; int i = 0;
cpu_set_t cpuset; int ret;
CPU_ZERO(&cpuset);
//CPU_SET(5, &cpuset);
//pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
//wait_sync("ru_thread_tx");
wait_on_condition(&proc->mutex_FH1,&proc->cond_FH1,&proc->instance_cnt_FH1,"ru_thread_tx"); wait_on_condition(&proc->mutex_FH1,&proc->cond_FH1,&proc->instance_cnt_FH1,"ru_thread_tx");
printf( "ru_thread_tx ready\n"); printf( "ru_thread_tx ready\n");
while (!oai_exit) { while (!oai_exit) {
if (oai_exit) break; if (oai_exit) break;
LOG_I(PHY,"ru_thread_tx: Waiting for TX processing\n"); LOG_D(PHY,"ru_thread_tx: Waiting for TX processing\n");
// wait until eNBs are finished subframe RX n and TX n+4 // wait until eNBs are finished subframe RX n and TX n+4
wait_on_condition(&proc->mutex_gNBs,&proc->cond_gNBs,&proc->instance_cnt_gNBs,"ru_thread_tx"); wait_on_condition(&proc->mutex_gNBs,&proc->cond_gNBs,&proc->instance_cnt_gNBs,"ru_thread_tx");
pthread_mutex_lock(&proc->mutex_gNBs); AssertFatal((ret=pthread_mutex_lock(&proc->mutex_gNBs))==0,"mutex_lock return %d\n",ret);
int frame_tx=proc->frame_tx; int frame_tx=proc->frame_tx;
int tti_tx =proc->tti_tx; int tti_tx =proc->tti_tx;
uint64_t timestamp_tx = proc->timestamp_tx; uint64_t timestamp_tx = proc->timestamp_tx;
pthread_mutex_unlock(&proc->mutex_gNBs); AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_gNBs))==0,"mutex_lock returns %d\n",ret);
if (oai_exit) break; if (oai_exit) break;
...@@ -1277,7 +1275,7 @@ static void *ru_thread_tx( void *param ) { ...@@ -1277,7 +1275,7 @@ static void *ru_thread_tx( void *param ) {
gNB = ru->gNB_list[i]; gNB = ru->gNB_list[i];
gNB_proc = &gNB->proc; gNB_proc = &gNB->proc;
L1_proc = (get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT)? &gNB_proc->L1_proc_tx : &gNB_proc->L1_proc; L1_proc = (get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT)? &gNB_proc->L1_proc_tx : &gNB_proc->L1_proc;
pthread_mutex_lock(&gNB_proc->mutex_RU_tx); AssertFatal((ret=pthread_mutex_lock(&gNB_proc->mutex_RU_tx))==0,"mutex_lock returns %d\n",ret);
for (int j=0; j<gNB->num_RU; j++) { for (int j=0; j<gNB->num_RU; j++) {
if (ru == gNB->RU_list[j]) { if (ru == gNB->RU_list[j]) {
...@@ -1290,21 +1288,21 @@ static void *ru_thread_tx( void *param ) { ...@@ -1290,21 +1288,21 @@ static void *ru_thread_tx( void *param ) {
} }
if (gNB_proc->RU_mask_tx != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return if (gNB_proc->RU_mask_tx != (1<<gNB->num_RU)-1) { // not all RUs have provided their information so return
pthread_mutex_unlock(&gNB_proc->mutex_RU_tx); AssertFatal((ret=pthread_mutex_unlock(&gNB_proc->mutex_RU_tx))==0,"mutex_unlock returns %d\n",ret);
} else { // all RUs TX are finished so send the ready signal to eNB processing } else { // all RUs TX are finished so send the ready signal to gNB processing
gNB_proc->RU_mask_tx = 0; gNB_proc->RU_mask_tx = 0;
pthread_mutex_unlock(&gNB_proc->mutex_RU_tx); AssertFatal((ret=pthread_mutex_unlock(&gNB_proc->mutex_RU_tx))==0,"mutex_unlock returns %d\n",ret);
pthread_mutex_lock( &L1_proc->mutex_RUs_tx); AssertFatal((ret=pthread_mutex_lock(&L1_proc->mutex_RUs_tx))==0,"mutex_lock returns %d\n",ret);
// the thread can now be woken up
if (L1_proc->instance_cnt_RUs==-1) {
AssertFatal(pthread_cond_signal(&L1_proc->cond_RUs) == 0,
"ERROR pthread_cond_signal for gNB_L1_thread\n");
} else AssertFatal(1==0,"gNB TX thread is not ready\n");
L1_proc->instance_cnt_RUs = 0; L1_proc->instance_cnt_RUs = 0;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE,L1_proc->instance_cnt_RUs); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE,L1_proc->instance_cnt_RUs);
pthread_mutex_unlock( &L1_proc->mutex_RUs_tx ); AssertFatal((ret=pthread_mutex_unlock(&L1_proc->mutex_RUs_tx))==0,"mutex_unlock returns %d\n",ret);
// the thread can now be woken up
if (pthread_cond_signal(&L1_proc->cond_RUs) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
}
} }
} }
} }
......
...@@ -213,6 +213,7 @@ uint8_t nr_generate_pdsch(NR_gNB_DLSCH_t dlsch, ...@@ -213,6 +213,7 @@ uint8_t nr_generate_pdsch(NR_gNB_DLSCH_t dlsch,
uint16_t encoded_length = nb_symbols*Qm; uint16_t encoded_length = nb_symbols*Qm;
/// CRC, coding, interleaving and rate matching /// CRC, coding, interleaving and rate matching
AssertFatal(harq->pdu!=NULL,"harq->pdu is null\n");
nr_dlsch_encoding(harq->pdu, frame,slot, &dlsch, &frame_parms); nr_dlsch_encoding(harq->pdu, frame,slot, &dlsch, &frame_parms);
#ifdef DEBUG_DLSCH #ifdef DEBUG_DLSCH
printf("PDSCH encoding:\nPayload:\n"); printf("PDSCH encoding:\nPayload:\n");
......
...@@ -284,6 +284,7 @@ void nr_fill_dlsch(PHY_VARS_gNB *gNB, ...@@ -284,6 +284,7 @@ void nr_fill_dlsch(PHY_VARS_gNB *gNB,
NR_DL_gNB_HARQ_t **harq = dlsch->harq_processes; NR_DL_gNB_HARQ_t **harq = dlsch->harq_processes;
/// DLSCH struct /// DLSCH struct
memcpy((void*)&harq[dlsch->harq_ids[frame%2][slot]]->dlsch_pdu, (void*)dlsch_pdu, sizeof(nfapi_nr_dl_config_dlsch_pdu)); memcpy((void*)&harq[dlsch->harq_ids[frame%2][slot]]->dlsch_pdu, (void*)dlsch_pdu, sizeof(nfapi_nr_dl_config_dlsch_pdu));
AssertFatal(sdu!=NULL,"sdu is null\n");
harq[dlsch->harq_ids[frame%2][slot]]->pdu = sdu; harq[dlsch->harq_ids[frame%2][slot]]->pdu = sdu;
......
...@@ -150,7 +150,7 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){ ...@@ -150,7 +150,7 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
uint16_t invalid_pdu = pdu_index == -1; uint16_t invalid_pdu = pdu_index == -1;
uint8_t *sdu = invalid_pdu ? NULL : pdu_index >= tx_pdus ? NULL : TX_req->tx_request_body.tx_pdu_list[pdu_index].segments[0].segment_data; uint8_t *sdu = invalid_pdu ? NULL : pdu_index >= tx_pdus ? NULL : TX_req->tx_request_body.tx_pdu_list[pdu_index].segments[0].segment_data;
AssertFatal(sdu!=NULL,"sdu is null, pdu_index %d, tx_pdus %d\n");
handle_nr_nfapi_dlsch_pdu(gNB,frame,slot,&dl_config_pdu->dlsch_pdu, sdu); handle_nr_nfapi_dlsch_pdu(gNB,frame,slot,&dl_config_pdu->dlsch_pdu, sdu);
do_oai=1; do_oai=1;
} }
......
...@@ -163,22 +163,12 @@ void nr_schedule_css_dlsch_phytest(module_id_t module_idP, ...@@ -163,22 +163,12 @@ void nr_schedule_css_dlsch_phytest(module_id_t module_idP,
TX_req->pdu_index = nr_mac->pdu_index[CC_id]++; TX_req->pdu_index = nr_mac->pdu_index[CC_id]++;
TX_req->num_segments = 1; TX_req->num_segments = 1;
TX_req->segments[0].segment_length = 8; TX_req->segments[0].segment_length = 8;
TX_req->segments[0].segment_data = cc[CC_id].RAR_pdu.payload; TX_req->segments[0].segment_data = cc[CC_id].RAR_pdu.payload[0];
nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus++; nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus++;
nr_mac->TX_req[CC_id].sfn_sf = sfn_sf; nr_mac->TX_req[CC_id].sfn_sf = sfn_sf;
nr_mac->TX_req[CC_id].tx_request_body.tl.tag = NFAPI_TX_REQUEST_BODY_TAG; nr_mac->TX_req[CC_id].tx_request_body.tl.tag = NFAPI_TX_REQUEST_BODY_TAG;
nr_mac->TX_req[CC_id].header.message_id = NFAPI_TX_REQUEST; nr_mac->TX_req[CC_id].header.message_id = NFAPI_TX_REQUEST;
TX_req = &nr_mac->TX_req[CC_id].tx_request_body.tx_pdu_list[nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus+1];
TX_req->pdu_length = dlsch_pdu_rel15->transport_block_size;
TX_req->pdu_index = nr_mac->pdu_index[CC_id]++;
TX_req->num_segments = 1;
TX_req->segments[0].segment_length = TX_req->pdu_length;
TX_req->segments[0].segment_data = nr_mac->UE_list.DLSCH_pdu[CC_id][0][0].payload;
nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus++;
nr_mac->TX_req[CC_id].sfn_sf = sfn_sf;
nr_mac->TX_req[CC_id].tx_request_body.tl.tag = NFAPI_TX_REQUEST_BODY_TAG;
nr_mac->TX_req[CC_id].header.message_id = NFAPI_TX_REQUEST;
} }
} }
...@@ -303,20 +293,12 @@ void nr_schedule_uss_dlsch_phytest(module_id_t module_idP, ...@@ -303,20 +293,12 @@ void nr_schedule_uss_dlsch_phytest(module_id_t module_idP,
dl_req->number_pdu+=2; dl_req->number_pdu+=2;
TX_req = &nr_mac->TX_req[CC_id].tx_request_body.tx_pdu_list[nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus]; TX_req = &nr_mac->TX_req[CC_id].tx_request_body.tx_pdu_list[nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus];
TX_req->pdu_length = 6;
TX_req->pdu_index = nr_mac->pdu_index[CC_id]++;
TX_req->num_segments = 1;
TX_req->segments[0].segment_length = 8;
nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus++;
nr_mac->TX_req[CC_id].sfn_sf = sfn_sf;
nr_mac->TX_req[CC_id].tx_request_body.tl.tag = NFAPI_TX_REQUEST_BODY_TAG;
nr_mac->TX_req[CC_id].header.message_id = NFAPI_TX_REQUEST;
TX_req = &nr_mac->TX_req[CC_id].tx_request_body.tx_pdu_list[nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus+1];
TX_req->pdu_length = dlsch_pdu_rel15->transport_block_size; TX_req->pdu_length = dlsch_pdu_rel15->transport_block_size;
TX_req->pdu_index = nr_mac->pdu_index[CC_id]++; TX_req->pdu_index = nr_mac->pdu_index[CC_id]++;
TX_req->num_segments = 1; TX_req->num_segments = 1;
TX_req->segments[0].segment_length = 8; TX_req->segments[0].segment_data = nr_mac->UE_list.DLSCH_pdu[CC_id][0][0].payload;
TX_req->segments[0].segment_length = dlsch_pdu_rel15->transport_block_size+2;
nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus++; nr_mac->TX_req[CC_id].tx_request_body.number_of_pdus++;
nr_mac->TX_req[CC_id].sfn_sf = sfn_sf; nr_mac->TX_req[CC_id].sfn_sf = sfn_sf;
nr_mac->TX_req[CC_id].tx_request_body.tl.tag = NFAPI_TX_REQUEST_BODY_TAG; nr_mac->TX_req[CC_id].tx_request_body.tl.tag = NFAPI_TX_REQUEST_BODY_TAG;
......
...@@ -58,7 +58,7 @@ int8_t mac_rrc_nr_data_req(const module_id_t Mod_idP, ...@@ -58,7 +58,7 @@ int8_t mac_rrc_nr_data_req(const module_id_t Mod_idP,
uint8_t sfn_msb = (uint8_t)((frameP>>4)&0x3f); uint8_t sfn_msb = (uint8_t)((frameP>>4)&0x3f);
#ifdef DEBUG_RRC #ifdef DEBUG_RRC
LOG_I(RRC,"[eNB %d] mac_rrc_data_req to SRB ID=%d\n",Mod_idP,Srb_id); LOG_D(RRC,"[eNB %d] mac_rrc_data_req to SRB ID=%d\n",Mod_idP,Srb_id);
#endif #endif
gNB_RRC_INST *rrc; gNB_RRC_INST *rrc;
......
[*] [*]
[*] GTKWave Analyzer v3.3.86 (w)1999-2017 BSI [*] GTKWave Analyzer v3.3.61 (w)1999-2014 BSI
[*] Wed May 8 09:33:31 2019 [*] Sat May 18 17:25:11 2019
[*] [*]
[dumpfile] "/tmp/openair_dump_gNB40.vcd" [dumpfile] "/tmp/openair_dump_gNB40.vcd"
[dumpfile_mtime] "Wed May 8 09:32:07 2019" [dumpfile_mtime] "Sat May 18 17:11:31 2019"
[dumpfile_size] 1965634 [dumpfile_size] 53148516
[savefile] "/home/espagne/raymond/openairinterface5g/targets/RT/USER/gNB_usrp.gtkw" [savefile] "/home/caracal/raymond/openairinterface5g/targets/RT/USER/gNB_usrp.gtkw"
[timestart] 8147990000 [timestart] 11552775390
[size] 1920 859 [size] 1840 795
[pos] -1 -1 [pos] -1 -1
*-21.848083 8158741030 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 *-13.848083 11552814436 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
[sst_width] 386 [sst_width] 386
[signals_width] 344 [signals_width] 344
[sst_expanded] 1 [sst_expanded] 1
...@@ -19,16 +19,16 @@ functions.trx_read ...@@ -19,16 +19,16 @@ functions.trx_read
functions.trx_write functions.trx_write
@420 @420
variables.frame_number_TX0_UE[63:0] variables.frame_number_TX0_UE[63:0]
@421
variables.frame_number_TX1_UE[63:0] variables.frame_number_TX1_UE[63:0]
@28
functions.ue_gain_control
@420 @420
variables.frame_number_RX1_UE[63:0] variables.frame_number_RX1_UE[63:0]
@25
variables.trx_ts_ue[63:0]
@24 @24
variables.trx_ts[63:0] variables.trx_ts[63:0]
variables.trx_tst[63:0] variables.trx_tst[63:0]
@28
functions.eNB_thread_rxtx0
@24
variables.frame_number_RX0_RU[63:0] variables.frame_number_RX0_RU[63:0]
variables.tti_number_RX0_RU[63:0] variables.tti_number_RX0_RU[63:0]
variables.frame_number_TX0_RU[63:0] variables.frame_number_TX0_RU[63:0]
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment