Commit 869b9a5c authored by Wang Tsu-Han's avatar Wang Tsu-Han

fix for phy-test UL and adding wakeup_txfh

parent 2194bc85
......@@ -2478,7 +2478,7 @@ void fill_ulsch(PHY_VARS_eNB *eNB,nfapi_ul_config_ulsch_pdu *ulsch_pdu,int frame
else if(ulsch->harq_processes[harq_pid]->n_DMRS == 7)
ulsch->harq_processes[harq_pid]->n_DMRS2 = 9;
LOG_I(PHY,"[eNB %d][PUSCH %d] Programming PUSCH with n_DMRS2 %d (cshift %d) for Frame %d, Subframe %d\n",
LOG_D(PHY,"[eNB %d][PUSCH %d] Programming PUSCH with n_DMRS2 %d (cshift %d) for Frame %d, Subframe %d\n",
eNB->Mod_id,harq_pid,ulsch->harq_processes[harq_pid]->n_DMRS2,ulsch->harq_processes[harq_pid]->n_DMRS,
frame,subframe);
......@@ -2507,7 +2507,7 @@ void fill_ulsch(PHY_VARS_eNB *eNB,nfapi_ul_config_ulsch_pdu *ulsch_pdu,int frame
else ulsch->harq_processes[harq_pid]->round++;
ulsch->rnti = ulsch_pdu->ulsch_pdu_rel8.rnti;
LOG_I(PHY,"Filling ULSCH %x (UE_id %d) (new_ulsch %d) for Frame %d, Subframe %d : harq_pid %d, first_rb %d, nb_rb %d, rvidx %d, Qm %d, TBS %d, round %d \n",
LOG_D(PHY,"Filling ULSCH %x (UE_id %d) (new_ulsch %d) for Frame %d, Subframe %d : harq_pid %d, first_rb %d, nb_rb %d, rvidx %d, Qm %d, TBS %d, round %d \n",
ulsch->rnti,
UE_id,
new_ulsch,
......
......@@ -42,6 +42,7 @@
#include "UTIL/LOG/vcd_signal_dumper.h"
#include "UTIL/LOG/log.h"
#include <syscall.h>
#include "targets/RT/USER/rt_wrapper.h"
//#define DEBUG_DLSCH_CODING
//#define DEBUG_DLSCH_FREE 1
......@@ -386,7 +387,7 @@ void *te_thread(void *param) {
te_params *tep = (te_params *)param;
wait_sync("te_thread");
//wait_sync("te_thread");
while (!oai_exit) {
......
......@@ -2511,7 +2511,12 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB,
switch (mod_order1) {
case 2:
qam_table_s1 = NULL;
if (pilots) {
qam_table_s1 = qpsk_table_b1;
}
else {
qam_table_s1 = qpsk_table_a1;
}
break;
case 4:
if (pilots) {
......
......@@ -1501,6 +1501,7 @@ uint8_t generate_dci_top_emul(PHY_VARS_eNB *phy_vars_eNB,
void generate_64qam_table(void);
void generate_16qam_table(void);
void generate_qpsk_table(void);
uint16_t extract_crc(uint8_t *dci,uint8_t DCI_LENGTH);
......
......@@ -51,6 +51,7 @@
#include "UTIL/LOG/vcd_signal_dumper.h"
//#define DEBUG_ULSCH_DECODING
#include "targets/RT/USER/rt_wrapper.h"
extern int codingw;
......@@ -422,7 +423,7 @@ void *td_thread(void *param) {
thread_top_init("td_thread",1,200000,250000,500000);
pthread_setname_np( pthread_self(),"td processing");
LOG_I(PHY,"thread td created id=%ld\n", syscall(__NR_gettid));
wait_sync("td_thread");
//wait_sync("td_thread");
while (!oai_exit) {
......@@ -819,7 +820,7 @@ int ulsch_decoding_data_all(PHY_VARS_eNB *eNB,int UE_id,int harq_pid,int llr8_fl
int ret = 0;
LTE_eNB_ULSCH_t *ulsch = eNB->ulsch[UE_id];
LTE_UL_eNB_HARQ_t *ulsch_harq = ulsch->harq_processes[harq_pid];
if(ulsch_harq->C>3 && get_nprocs()>=2 && codingw)
if(codingw)
{
ret = ulsch_decoding_data_2thread(eNB,UE_id,harq_pid,llr8_flag);
}
......
......@@ -493,7 +493,7 @@ void handle_nfapi_ul_pdu(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc,
if (ul_config_pdu->pdu_type == NFAPI_UL_CONFIG_ULSCH_PDU_TYPE) {
AssertFatal((UE_id = find_ulsch(ul_config_pdu->ulsch_pdu.ulsch_pdu_rel8.rnti,eNB,SEARCH_EXIST_OR_FREE))>=0,
"No existing UE ULSCH for rnti %x\n",rel8->rnti);
LOG_I(PHY,"Applying UL config for UE %d, rnti %x for frame %d, subframe %d\n",
LOG_D(PHY,"Applying UL config for UE %d, rnti %x for frame %d, subframe %d\n",
UE_id,rel8->rnti,frame,subframe);
fill_ulsch(eNB,&ul_config_pdu->ulsch_pdu,frame,subframe);
......@@ -603,7 +603,7 @@ void schedule_response(Sched_Rsp_t *Sched_INFO)
eNB->pdcch_vars[subframe&1].num_dci = 0;
eNB->phich_vars[subframe&1].num_hi = 0;
LOG_I(PHY,"NFAPI: Frame %d, Subframe %d: received %d dl_pdu, %d tx_req, %d hi_dci0_config_req, %d UL_config \n",
LOG_D(PHY,"NFAPI: Frame %d, Subframe %d: received %d dl_pdu, %d tx_req, %d hi_dci0_config_req, %d UL_config \n",
frame,subframe,number_dl_pdu,TX_req->tx_request_body.number_of_pdus,number_hi_dci0_pdu,number_ul_pdu);
......@@ -708,7 +708,7 @@ void schedule_response(Sched_Rsp_t *Sched_INFO)
for (i=0;i<number_ul_pdu;i++) {
ul_config_pdu = &UL_req->ul_config_request_body.ul_config_pdu_list[i];
LOG_I(PHY,"NFAPI: ul_pdu %d : type %d\n",i,ul_config_pdu->pdu_type);
LOG_D(PHY,"NFAPI: ul_pdu %d : type %d\n",i,ul_config_pdu->pdu_type);
AssertFatal(ul_config_pdu->pdu_type == NFAPI_UL_CONFIG_ULSCH_PDU_TYPE ||
ul_config_pdu->pdu_type == NFAPI_UL_CONFIG_ULSCH_HARQ_PDU_TYPE ||
ul_config_pdu->pdu_type == NFAPI_UL_CONFIG_ULSCH_CQI_RI_PDU_TYPE ||
......
......@@ -1294,14 +1294,11 @@ void pusch_procedures(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc)
if (fp->frame_type == FDD) harq_pid = ((10*frame) + subframe)&7;
else harq_pid = subframe%10;
LOG_I(PHY,"Frame %d, subframe %d: PUSCH procedures, harq_pid %d\n",
frame,subframe,harq_pid);
LOG_I(PHY,"rnti = %x\n",eNB->ulsch[0]->rnti);
for (i=0; i<NUMBER_OF_UE_MAX; i++) {
ulsch = eNB->ulsch[i];
ulsch_harq = ulsch->harq_processes[harq_pid];
if (ulsch->rnti>0) LOG_I(PHY,"Frame %d, subframe %d: PUSCH procedures, harq_pid %d, UE %d/%x, status %d, harq_frame %d, harq_subframe %d ulsch_handled %d\n",
if (ulsch->rnti>0) LOG_D(PHY,"Frame %d, subframe %d: PUSCH procedures, harq_pid %d, UE %d/%x, status %d, harq_frame %d, harq_subframe %d ulsch_handled %d\n",
frame,subframe,harq_pid,i,ulsch->rnti,ulsch_harq->status,ulsch_harq->frame,ulsch_harq->subframe, ulsch_harq->handled);
if ((ulsch) &&
......@@ -1331,7 +1328,7 @@ void pusch_procedures(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc)
fp->pusch_config_common.ul_ReferenceSignalsPUSCH.cyclicShift +
nPRS)%12;
LOG_I(PHY,
LOG_D(PHY,
"[eNB %d][PUSCH %d] Frame %d Subframe %d Demodulating PUSCH: dci_alloc %d, rar_alloc %d, round %d, first_rb %d, nb_rb %d, Qm %d, TBS %d, rv %d, cyclic_shift %d (n_DMRS2 %d, cyclicShift_common %d, nprs %d), O_ACK %d, beta_cqi %d \n",
eNB->Mod_id,harq_pid,frame,subframe,
ulsch_harq->dci_alloc,
......
......@@ -148,7 +148,7 @@ static void *feptx_thread(void *param) {
LOG_I(PHY,"thread feptx created id=%ld\n", syscall(__NR_gettid));
//CPU_SET(6, &cpuset);
//pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
wait_sync("feptx_thread");
//wait_sync("feptx_thread");
......@@ -445,7 +445,7 @@ static void *fep_thread(void *param) {
CPU_ZERO(&cpuset);
//CPU_SET(2, &cpuset);
//pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
wait_sync("fep_thread");
//wait_sync("fep_thread");
while (!oai_exit) {
......
......@@ -201,9 +201,8 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
uint32_t cqi_req = 0,cshift,ndi,tpc = 1;
int32_t normalized_rx_power;
int32_t target_rx_power= 178;
int n;
int CC_id = 0;
int N_RB_UL;
int nb_rb = 20;
eNB_MAC_INST *eNB = RC.mac[module_idP];
COMMON_channels_t *cc = eNB->common_channels;
UE_list_t *UE_list=&eNB->UE_list;
......@@ -227,8 +226,6 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
//rnti = UE_RNTI(module_idP,UE_id);
N_RB_UL = to_prb(cc[CC_id].ul_Bandwidth);
printf("////////////////////////////////////*************************N_RB_UL = %d\n",N_RB_UL);
//leave out first RB for PUCCH
first_rb[CC_id] = 1;
// loop over all active UEs
......@@ -242,7 +239,7 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
LOG_I(MAC,"Scheduling for frame %d, subframe %d => harq_pid %d\n",sched_frame,sched_subframe,harq_pid);
RC.eNB[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP*10)+subframeP] = UE_template->ul_total_buffer;
printf("////////////////////////////////////*************************ul_total_buffer = %d\n",UE_template->ul_total_buffer);
//printf("////////////////////////////////////*************************ul_total_buffer = %d\n",UE_template->ul_total_buffer);
......@@ -251,7 +248,7 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
// this is the normalized RX power and this should be constant (regardless of mcs
normalized_rx_power = UE_sched_ctrl->pusch_snr[CC_id];
printf("////////////////////////////////////*************************normalized_rx_power = %d\n",normalized_rx_power);
//printf("////////////////////////////////////*************************normalized_rx_power = %d\n",normalized_rx_power);
// new transmission
......@@ -266,16 +263,16 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
UE_template->TBS_UL[harq_pid] = get_TBS_UL(UE_template->mcs_UL[harq_pid],N_RB_UL-1);
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx+=N_RB_UL-1;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = get_TBS_UL(mcs,N_RB_UL-1);
UE_template->TBS_UL[harq_pid] = get_TBS_UL(mcs,nb_rb);
UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += nb_rb;
UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = get_TBS_UL(mcs,nb_rb);
// buffer_occupancy -= TBS;
// bad indices : 20 (40 PRB), 21 (45 PRB), 22 (48 PRB)
//store for possible retransmission
UE_template->nb_rb_ul[harq_pid] = N_RB_UL-1;
UE_template->nb_rb_ul[harq_pid] = nb_rb;
UE_template->first_rb_ul[harq_pid] = first_rb[CC_id];
UE_sched_ctrl->ul_scheduled |= (1<<harq_pid);
......@@ -296,7 +293,7 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.transmission_power = 6000;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb[CC_id];
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block = 20;//N_RB_UL-1;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block = nb_rb;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.mcs_1 = mcs;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cyclic_shift_2_for_drms = cshift;
hi_dci0_pdu->dci_pdu.dci_pdu_rel8.frequency_hopping_enabled_flag = 0;
......@@ -318,7 +315,7 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
eNB->ul_handle,
rnti,
first_rb[CC_id], // resource_block_start
20,//N_RB_UL-1, // number_of_resource_blocks
nb_rb, // number_of_resource_blocks
mcs,
cshift, // cyclic_shift_2_for_drms
0, // frequency_hopping_enabled_flag
......@@ -329,7 +326,7 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
0, // ul_tx_mode
0, // current_tx_nb
0, // n_srs
get_TBS_UL(mcs,N_RB_UL-1)
get_TBS_UL(mcs,nb_rb)
);
#ifdef Rel14
if (UE_template->rach_resource_type>0) { // This is a BL/CE UE allocation
......@@ -352,7 +349,7 @@ void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t s
S_UL_SCHEDULED);
// increment first rb for next UE allocation
first_rb[CC_id]+= N_RB_UL -1;
first_rb[CC_id]+= nb_rb;
} // loop of CC_id
......
......@@ -146,6 +146,7 @@ void init_eNB(int,int);
void stop_eNB(int nb_inst);
int wakeup_tx(PHY_VARS_eNB *eNB,RU_proc_t *ru_proc);
int wakeup_txfh(eNB_rxtx_proc_t *proc,RU_proc_t *ru_proc);
void wakeup_prach_eNB(PHY_VARS_eNB *eNB,RU_t *ru,int frame,int subframe);
#ifdef Rel14
void wakeup_prach_eNB_br(PHY_VARS_eNB *eNB,RU_t *ru,int frame,int subframe);
......@@ -183,7 +184,7 @@ static inline int rxtx(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc, char *thread_nam
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ULSCH_SCHEDULER , 0 );
if(get_nprocs() > 8)
if(get_nprocs() >= 8)
{
wakeup_tx(eNB,eNB->proc.ru_proc);
}
......@@ -192,13 +193,7 @@ static inline int rxtx(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc, char *thread_nam
if(oai_exit) return(-1);
phy_procedures_eNB_TX(eNB, proc, no_relay, NULL, 1);
pthread_mutex_lock(&eNB->proc.ru_proc->mutex_eNBs);
++eNB->proc.ru_proc->instance_cnt_eNBs;
eNB->proc.ru_proc->timestamp_tx = proc->timestamp_tx;
eNB->proc.ru_proc->subframe_tx = proc->subframe_tx;
eNB->proc.ru_proc->frame_tx = proc->frame_tx;
pthread_cond_signal(&eNB->proc.ru_proc->cond_eNBs);
pthread_mutex_unlock(&eNB->proc.ru_proc->mutex_eNBs);
wakeup_txfh(proc,eNB->proc.ru_proc);
}
else
{
......@@ -222,7 +217,7 @@ static void* tx_thread(void* param) {
sprintf(thread_name,"TXnp4_%d\n",&eNB->proc.proc_rxtx[0] == proc ? 0 : 1);
thread_top_init(thread_name,1,470000,500000,500000);
wait_sync("tx_thread");
//wait_sync("tx_thread");
while (!oai_exit) {
......@@ -241,13 +236,7 @@ static void* tx_thread(void* param) {
phy_procedures_eNB_TX(eNB, proc, no_relay, NULL, 1);
if (release_thread(&proc->mutex_rxtx,&proc->instance_cnt_rxtx,thread_name)<0) break;
pthread_mutex_lock(&eNB_proc->ru_proc->mutex_eNBs);
++eNB_proc->ru_proc->instance_cnt_eNBs;
eNB_proc->ru_proc->timestamp_tx = proc->timestamp_tx;
eNB_proc->ru_proc->subframe_tx = proc->subframe_tx;
eNB_proc->ru_proc->frame_tx = proc->frame_tx;
pthread_cond_signal(&eNB_proc->ru_proc->cond_eNBs);
pthread_mutex_unlock(&eNB_proc->ru_proc->mutex_eNBs);
wakeup_txfh(proc,eNB_proc->ru_proc);
}
return 0;
......@@ -284,7 +273,7 @@ static void* eNB_thread_rxtx( void* param ) {
//CPU_SET(3, &cpuset);
//pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
wait_sync("eNB_thread_rxtx");
//wait_sync("eNB_thread_rxtx");
while (!oai_exit) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0+(proc->subframe_rx&1), 0 );
......@@ -344,7 +333,6 @@ static void wait_system_ready (char *message, volatile int *start_flag) {
void eNB_top(PHY_VARS_eNB *eNB, int frame_rx, int subframe_rx, char *string,RU_t *ru)
{
RU_proc_t *ru_proc = &ru->proc;
eNB_proc_t *proc = &eNB->proc;
eNB_rxtx_proc_t *proc_rxtx = &proc->proc_rxtx[0];
......@@ -370,6 +358,36 @@ void eNB_top(PHY_VARS_eNB *eNB, int frame_rx, int subframe_rx, char *string,RU_t
}
}
int wakeup_txfh(eNB_rxtx_proc_t *proc,RU_proc_t *ru_proc) {
struct timespec wait;
wait.tv_sec=0;
wait.tv_nsec=5000000L;
if (pthread_mutex_timedlock(&ru_proc->mutex_eNBs,&wait) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_mutex_lock for eNB TX1 thread %d (IC %d)\n", ru_proc->subframe_rx&1,ru_proc->instance_cnt_eNBs );
exit_fun( "error locking mutex_eNB" );
return(-1);
}
++ru_proc->instance_cnt_eNBs;
ru_proc->timestamp_tx = proc->timestamp_tx;
ru_proc->subframe_tx = proc->subframe_tx;
ru_proc->frame_tx = proc->frame_tx;
// the thread can now be woken up
if (pthread_cond_signal(&ru_proc->cond_eNBs) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n");
exit_fun( "ERROR pthread_cond_signal" );
return(-1);
}
pthread_mutex_unlock( &ru_proc->mutex_eNBs );
return(0);
}
int wakeup_tx(PHY_VARS_eNB *eNB,RU_proc_t *ru_proc) {
eNB_proc_t *proc=&eNB->proc;
......@@ -453,7 +471,10 @@ int wakeup_rxtx(PHY_VARS_eNB *eNB,RU_t *ru) {
wait.tv_sec=0;
wait.tv_nsec=5000000L;
/* accept some delay in processing - up to 5ms */
/* accept some delay in processing - up to 50us */
if (proc_rxtx->instance_cnt_rxtx == 0) {
usleep(50);
}
if (proc_rxtx->instance_cnt_rxtx == 0) {
LOG_E(PHY,"Frame %d, subframe %d: RXTX0 thread busy, dropping\n",proc_rxtx->frame_rx,proc_rxtx->subframe_rx);
return(-1);
......@@ -467,10 +488,6 @@ int wakeup_rxtx(PHY_VARS_eNB *eNB,RU_t *ru) {
return(-1);
}
/*if (proc_rxtx->instance_cnt_rxtx == 0) {
LOG_E(PHY,"Frame %d, subframe %d: RXTX0 thread busy, dropping\n",proc_rxtx->frame_rx,proc_rxtx->subframe_rx);
return(-1);
}*/
++proc_rxtx->instance_cnt_rxtx;
......@@ -644,7 +661,7 @@ static void* eNB_thread_prach( void* param ) {
thread_top_init("eNB_thread_prach",1,500000,1000000,20000000);
wait_sync("eNB_thread_prach");
//wait_sync("eNB_thread_prach");
while (!oai_exit) {
......
......@@ -986,7 +986,7 @@ static void* ru_thread_prach( void* param ) {
ru_thread_prach_status = 0;
thread_top_init("ru_thread_prach",1,500000,1000000,20000000);
wait_sync("ru_thread_prach");
//wait_sync("ru_thread_prach");
while (!oai_exit) {
......@@ -1026,7 +1026,7 @@ static void* ru_thread_prach_br( void* param ) {
ru_thread_prach_status = 0;
thread_top_init("ru_thread_prach_br",1,500000,1000000,20000000);
wait_sync("ru_thread_prach_br");
//wait_sync("ru_thread_prach_br");
while (!oai_exit) {
......@@ -1406,7 +1406,6 @@ static void* ru_stats_thread(void* param) {
static void* ru_thread_tx( void* param ) {
RU_t *ru = (RU_t*)param;
RU_proc_t *proc = &ru->proc;
int subframe=0, frame=0;
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
......@@ -1415,7 +1414,7 @@ static void* ru_thread_tx( void* param ) {
//CPU_SET(5, &cpuset);
//pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
wait_sync("ru_thread_tx");
//wait_sync("ru_thread_tx");
wait_on_condition(&proc->mutex_FH1,&proc->cond_FH1,&proc->instance_cnt_FH1,"ru_thread_tx");
......@@ -1430,13 +1429,13 @@ static void* ru_thread_tx( void* param ) {
// wait until eNBs are finished subframe RX n and TX n+4
wait_on_condition(&proc->mutex_eNBs,&proc->cond_eNBs,&proc->instance_cnt_eNBs,"ru_thread_tx");
#ifdef EMULATE_RF
#else
// do TX front-end processing if needed (precoding and/or IDFTs)
if (ru->feptx_prec) ru->feptx_prec(ru);
// do OFDM if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru);
#ifdef EMULATE_RF
#else
// do outgoing fronthaul (south) if needed
if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru);
......@@ -1445,7 +1444,7 @@ static void* ru_thread_tx( void* param ) {
release_thread(&proc->mutex_eNBs,&proc->instance_cnt_eNBs,"ru_thread_tx");
}
release_thread(&proc->mutex_FH1,&proc->instance_cnt_FH1,"ru_thread_tx");
return 0;
}
......@@ -1543,7 +1542,7 @@ static void* ru_thread( void* param ) {
if ((ru->is_slave) && (ru->if_south == LOCAL_RF)) do_ru_synch(ru);
pthread_mutex_lock(&proc->mutex_FH1);
proc->instance_cnt_FH1=0;
proc->instance_cnt_FH1 = 0;
pthread_mutex_unlock(&proc->mutex_FH1);
pthread_cond_signal(&proc->cond_FH1);
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment