Commit 26d5eb90 authored by Raymond Knopp's avatar Raymond Knopp

intermediate commit to synch machine

parent 2da6fb8e
......@@ -186,12 +186,12 @@ uint8_t nr_generate_dci_top(NR_gNB_PDCCH pdcch_vars,
cset_start_symb = pdcch_params.first_symbol;
cset_nsymb = pdcch_params.n_symb;
dci_idx = 0;
LOG_I(PHY, "Coreset starting subcarrier %d on symbol %d (%d symbols)\n", cset_start_sc, cset_start_symb, cset_nsymb);
LOG_D(PHY, "Coreset starting subcarrier %d on symbol %d (%d symbols)\n", cset_start_sc, cset_start_symb, cset_nsymb);
// DMRS length is per OFDM symbol
uint16_t dmrs_length = (pdcch_params.precoder_granularity == NFAPI_NR_CSET_ALL_CONTIGUOUS_RBS)?
(pdcch_params.n_rb*6) : (dci_alloc.L*36/cset_nsymb); //2(QPSK)*3(per RB)*6(REG per CCE)
uint16_t encoded_length = dci_alloc.L*108; //2(QPSK)*9(per RB)*6(REG per CCE)
LOG_I(PHY, "DMRS length per symbol %d\t DCI encoded length %d\n", dmrs_length, encoded_length);
LOG_D(PHY, "DMRS length per symbol %d\t DCI encoded length %d\n", dmrs_length, encoded_length);
/// DMRS QPSK modulation
/*There is a need to shift from which index the pregenerated DMRS sequence is used
......@@ -201,7 +201,7 @@ uint8_t nr_generate_dci_top(NR_gNB_PDCCH pdcch_vars,
gold_pdcch_dmrs[symb] += (pdcch_params.rb_offset*3)>>5;
dmrs_offset = (pdcch_params.rb_offset*3)&0x1f;
LOG_I(PHY, "PDCCH DMRS offset %d\n", dmrs_offset);
LOG_D(PHY, "PDCCH DMRS offset %d\n", dmrs_offset);
}
for (int symb=cset_start_symb; symb<cset_start_symb + pdcch_params.n_symb; symb++) {
......
......@@ -71,7 +71,7 @@ void nr_fill_cce_list(NR_gNB_DCI_ALLOC_t* dci_alloc, uint16_t n_shift, uint8_t m
tmp = L * (( Y + (m*N_cce)/(L*M_s_max) + n_CI ) % CEILIDIV(N_cce,L));
LOG_I(PHY, "CCE list generation for candidate %d: bundle size %d ilv size %d tmp %d\n", m, bsize, R, tmp);
LOG_D(PHY, "CCE list generation for candidate %d: bundle size %d ilv size %d tmp %d\n", m, bsize, R, tmp);
for (uint8_t cce_idx=0; cce_idx<L; cce_idx++) {
cce = &dci_alloc->cce_list[cce_idx];
cce->cce_idx = tmp + cce_idx;
......@@ -495,8 +495,8 @@ void nr_fill_dci_and_dlsch(PHY_VARS_gNB *gNB,
break;
}
LOG_I(PHY, "DCI PDU: [0]->0x%lx \t [1]->0x%lx \n",dci_pdu[0], dci_pdu[1]);
LOG_I(PHY, "DCI type %d payload (size %d) generated on candidate %d\n", dci_alloc->pdcch_params.dci_format, dci_alloc->size, cand_idx);
LOG_D(PHY, "DCI PDU: [0]->0x%lx \t [1]->0x%lx \n",dci_pdu[0], dci_pdu[1]);
LOG_D(PHY, "DCI type %d payload (size %d) generated on candidate %d\n", dci_alloc->pdcch_params.dci_format, dci_alloc->size, cand_idx);
/// DLSCH struct
memcpy((void*)&harq[dci_alloc->harq_pid]->dlsch_pdu, (void*)dlsch_pdu, sizeof(nfapi_nr_dl_config_dlsch_pdu));
......
......@@ -107,7 +107,7 @@ void nr_get_tbs(nfapi_nr_dl_config_dlsch_pdu *dlsch_pdu,
nfapi_nr_dl_config_dci_dl_pdu dci_pdu,
nfapi_nr_config_request_t config) {
LOG_I(MAC, "TBS calculation\n");
LOG_D(MAC, "TBS calculation\n");
nfapi_nr_dl_config_pdcch_parameters_rel15_t params_rel15 = dci_pdu.pdcch_params_rel15;
nfapi_nr_dl_config_dlsch_pdu_rel15_t *dlsch_rel15 = &dlsch_pdu->dlsch_pdu_rel15;
......@@ -121,7 +121,7 @@ void nr_get_tbs(nfapi_nr_dl_config_dlsch_pdu *dlsch_pdu,
uint8_t N_sh_symb = dlsch_rel15->nb_symbols;
uint8_t Imcs = dlsch_rel15->mcs_idx;
uint16_t N_RE_prime = NR_NB_SC_PER_RB*N_sh_symb - N_PRB_DMRS - N_PRB_oh;
LOG_I(MAC, "N_RE_prime %d for %d symbols %d DMRS per PRB and %d overhead\n", N_RE_prime, N_sh_symb, N_PRB_DMRS, N_PRB_oh);
LOG_D(MAC, "N_RE_prime %d for %d symbols %d DMRS per PRB and %d overhead\n", N_RE_prime, N_sh_symb, N_PRB_DMRS, N_PRB_oh);
uint16_t N_RE, Ninfo, Ninfo_prime, C, TBS=0, R;
uint8_t table_idx, Qm, n, scale;
......@@ -167,7 +167,7 @@ void nr_get_tbs(nfapi_nr_dl_config_dlsch_pdu *dlsch_pdu,
dlsch_rel15->nb_re_dmrs = N_PRB_DMRS;
dlsch_rel15->nb_mod_symbols = N_RE_prime*dlsch_rel15->n_prb*dlsch_rel15->nb_codewords;
LOG_I(MAC, "TBS %d : N_RE %d N_PRB_DMRS %d N_sh_symb %d N_PRB_oh %d Ninfo %d Ninfo_prime %d R %d Qm %d table %d scale %d nb_symbols %d\n",
LOG_D(MAC, "TBS %d : N_RE %d N_PRB_DMRS %d N_sh_symb %d N_PRB_oh %d Ninfo %d Ninfo_prime %d R %d Qm %d table %d scale %d nb_symbols %d\n",
TBS, N_RE, N_PRB_DMRS, N_sh_symb, N_PRB_oh, Ninfo, Ninfo_prime, R, Qm, table_idx, scale, dlsch_rel15->nb_mod_symbols);
}
......
......@@ -45,7 +45,7 @@ void handle_nr_nfapi_bch_pdu(PHY_VARS_gNB *gNB,
AssertFatal(dl_config_pdu->bch_pdu_rel15.length == 3, "BCH PDU has length %d != 3\n",
dl_config_pdu->bch_pdu_rel15.length);
LOG_I(PHY,"pbch_pdu[0]: %x,pbch_pdu[1]: %x,gNB->pbch_pdu[2]: %x\n",sdu[0],sdu[1],sdu[2]);
LOG_D(PHY,"pbch_pdu[0]: %x,pbch_pdu[1]: %x,gNB->pbch_pdu[2]: %x\n",sdu[0],sdu[1],sdu[2]);
gNB->pbch_pdu[0] = sdu[2];
gNB->pbch_pdu[1] = sdu[1];
gNB->pbch_pdu[2] = sdu[0];
......@@ -63,12 +63,12 @@ void handle_nfapi_nr_dci_dl_pdu(PHY_VARS_gNB *gNB,
int idx = slot&1;
NR_gNB_PDCCH *pdcch_vars = &gNB->pdcch_vars;
LOG_I(PHY,"Frame %d, Slot %d: DCI processing - populating pdcch_vars->dci_alloc[%d] proc:slot_tx:%d idx:%d pdcch_vars->num_dci:%d\n",frame,slot, pdcch_vars->num_dci, proc->slot_tx, idx, pdcch_vars->num_dci);
LOG_D(PHY,"Frame %d, Slot %d: DCI processing - populating pdcch_vars->dci_alloc[%d] proc:slot_tx:%d idx:%d pdcch_vars->num_dci:%d\n",frame,slot, pdcch_vars->num_dci, proc->slot_tx, idx, pdcch_vars->num_dci);
// copy dci configuration into gNB structure
nr_fill_dci_and_dlsch(gNB,frame,slot,proc,&pdcch_vars->dci_alloc[pdcch_vars->num_dci],&dl_config_pdu->dci_dl_pdu,&dl_config_dlsch_pdu->dlsch_pdu);
LOG_I(PHY,"Frame %d, Slot %d: DCI processing - populated pdcch_vars->dci_alloc[%d] proc:slot_tx:%d idx:%d pdcch_vars->num_dci:%d\n",proc->frame_tx,proc->slot_tx, pdcch_vars->num_dci, proc->slot_tx, idx, pdcch_vars->num_dci);
LOG_D(PHY,"Frame %d, Slot %d: DCI processing - populated pdcch_vars->dci_alloc[%d] proc:slot_tx:%d idx:%d pdcch_vars->num_dci:%d\n",proc->frame_tx,proc->slot_tx, pdcch_vars->num_dci, proc->slot_tx, idx, pdcch_vars->num_dci);
}
......@@ -112,7 +112,6 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
for (i=0;i<number_dl_pdu;i++) {
dl_config_pdu = &DL_req->dl_config_request_body.dl_config_pdu_list[i];
LOG_D(PHY,"NFAPI: dl_pdu %d : type %d\n",i,dl_config_pdu->pdu_type);
printf("NFAPI: dl_pdu %d : type %d\n",i,dl_config_pdu->pdu_type);
switch (dl_config_pdu->pdu_type) {
case NFAPI_NR_DL_CONFIG_BCH_PDU_TYPE:
AssertFatal(dl_config_pdu->bch_pdu_rel15.pdu_index < TX_req->tx_request_body.number_of_pdus,
......
......@@ -161,7 +161,7 @@ void phy_procedures_gNB_TX(PHY_VARS_gNB *gNB,
num_pdsch_rnti = gNB->pdcch_vars.num_pdsch_rnti;
if (num_dci) {
LOG_I(PHY, "[gNB %d] Frame %d slot %d \
LOG_D(PHY, "[gNB %d] Frame %d slot %d \
Calling nr_generate_dci_top (number of DCI %d)\n", gNB->Mod_id, frame, slot, num_dci);
if (nfapi_mode == 0 || nfapi_mode == 1) {
......@@ -171,7 +171,7 @@ void phy_procedures_gNB_TX(PHY_VARS_gNB *gNB,
AMP, *fp, *cfg);
if (num_pdsch_rnti) {
LOG_I(PHY, "PDSCH generation started (%d)\n", num_pdsch_rnti);
LOG_D(PHY, "PDSCH generation started (%d)\n", num_pdsch_rnti);
nr_generate_pdsch(*gNB->dlsch[0][0],
gNB->pdcch_vars.dci_alloc[0],
gNB->nr_gold_pdsch_dmrs[slot],
......
......@@ -60,7 +60,6 @@
#include "RRC_config_tools.h"
#include "enb_paramdef.h"
extern uint16_t sf_ahead;
extern void set_parallel_conf(char *parallel_conf);
extern void set_worker_conf(char *worker_conf);
extern PARALLEL_CONF_t get_thread_parallel_conf(void);
......@@ -263,7 +262,6 @@ void RCconfig_L1(void) {
}
if (strcmp(*(L1_ParamList.paramarray[j][L1_TRANSPORT_N_PREFERENCE_IDX].strptr), "local_mac") == 0) {
sf_ahead = 4; // Need 4 subframe gap between RX and TX
} else if (strcmp(*(L1_ParamList.paramarray[j][L1_TRANSPORT_N_PREFERENCE_IDX].strptr), "nfapi") == 0) {
RC.eNB[j][0]->eth_params_n.local_if_name = strdup(*(L1_ParamList.paramarray[j][L1_LOCAL_N_IF_NAME_IDX].strptr));
RC.eNB[j][0]->eth_params_n.my_addr = strdup(*(L1_ParamList.paramarray[j][L1_LOCAL_N_ADDRESS_IDX].strptr));
......@@ -273,7 +271,6 @@ void RCconfig_L1(void) {
RC.eNB[j][0]->eth_params_n.my_portd = *(L1_ParamList.paramarray[j][L1_LOCAL_N_PORTD_IDX].iptr);
RC.eNB[j][0]->eth_params_n.remote_portd = *(L1_ParamList.paramarray[j][L1_REMOTE_N_PORTD_IDX].iptr);
RC.eNB[j][0]->eth_params_n.transp_preference = ETH_UDP_MODE;
sf_ahead = 2; // Cannot cope with 4 subframes betweem RX and TX - set it to 2
RC.nb_macrlc_inst = 1; // This is used by mac_top_init_eNB()
// This is used by init_eNB_afterRU()
RC.nb_CC = (int *)malloc((1+RC.nb_inst)*sizeof(int));
......@@ -359,7 +356,6 @@ void RCconfig_macrlc() {
RC.mac[j]->eth_params_s.my_portd = *(MacRLC_ParamList.paramarray[j][MACRLC_LOCAL_S_PORTD_IDX].iptr);
RC.mac[j]->eth_params_s.remote_portd = *(MacRLC_ParamList.paramarray[j][MACRLC_REMOTE_S_PORTD_IDX].iptr);
RC.mac[j]->eth_params_s.transp_preference = ETH_UDP_MODE;
sf_ahead = 2; // Cannot cope with 4 subframes betweem RX and TX - set it to 2
printf("**************** vnf_port:%d\n", RC.mac[j]->eth_params_s.my_portc);
configure_nfapi_vnf(RC.mac[j]->eth_params_s.my_addr, RC.mac[j]->eth_params_s.my_portc);
printf("**************** RETURNED FROM configure_nfapi_vnf() vnf_port:%d\n", RC.mac[j]->eth_params_s.my_portc);
......
......@@ -61,7 +61,7 @@ void nr_schedule_css_dlsch_phytest(module_id_t module_idP,
int scs = get_dlscs(cfg);
int slots_per_frame = get_spf(cfg);
for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
LOG_I(MAC, "Scheduling common search space DCI type 1 for CC_id %d\n",CC_id);
LOG_D(MAC, "Scheduling common search space DCI type 1 for CC_id %d\n",CC_id);
dl_req = &nr_mac->DL_req[CC_id].dl_config_request_body;
......@@ -115,7 +115,7 @@ void nr_schedule_css_dlsch_phytest(module_id_t module_idP,
pdu_rel15->pucch_resource_indicator = 7;
pdu_rel15->pdsch_to_harq_feedback_timing_indicator = 7;
LOG_I(MAC, "[gNB scheduler phytest] DCI type 1 payload: freq_alloc %d, time_alloc %d, vrb to prb %d, mcs %d tb_scaling %d ndi %d rv %d\n",
LOG_D(MAC, "[gNB scheduler phytest] DCI type 1 payload: freq_alloc %d, time_alloc %d, vrb to prb %d, mcs %d tb_scaling %d ndi %d rv %d\n",
pdu_rel15->frequency_domain_assignment,
pdu_rel15->time_domain_assignment,
pdu_rel15->vrb_to_prb_mapping,
......@@ -145,7 +145,7 @@ void nr_schedule_css_dlsch_phytest(module_id_t module_idP,
params_rel15->sfn_mod2,
params_rel15->first_slot);
nr_get_tbs(&dl_config_dlsch_pdu->dlsch_pdu, dl_config_dci_pdu->dci_dl_pdu, *cfg);
LOG_I(MAC, "DLSCH PDU: start PRB %d n_PRB %d start symbol %d nb_symbols %d nb_layers %d nb_codewords %d mcs %d\n",
LOG_D(MAC, "DLSCH PDU: start PRB %d n_PRB %d start symbol %d nb_symbols %d nb_layers %d nb_codewords %d mcs %d\n",
dlsch_pdu_rel15->start_prb,
dlsch_pdu_rel15->n_prb,
dlsch_pdu_rel15->start_symbol,
......@@ -206,7 +206,7 @@ void nr_schedule_uss_dlsch_phytest(module_id_t module_idP,
int scs = get_dlscs(cfg);
int slots_per_frame = get_spf(cfg);
for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
LOG_I(MAC, "Scheduling UE specific search space DCI type 1 for CC_id %d\n",CC_id);
LOG_D(MAC, "Scheduling UE specific search space DCI type 1 for CC_id %d\n",CC_id);
nfapi_nr_coreset_t* coreset = &nr_mac->coreset[CC_id][1];
nfapi_nr_search_space_t* search_space = &nr_mac->search_space[CC_id][1];
......@@ -259,7 +259,7 @@ void nr_schedule_uss_dlsch_phytest(module_id_t module_idP,
pdu_rel15->pucch_resource_indicator = 7;
pdu_rel15->pdsch_to_harq_feedback_timing_indicator = 7;
LOG_I(MAC, "[gNB scheduler phytest] DCI type 1 payload: freq_alloc %d, time_alloc %d, vrb to prb %d, mcs %d tb_scaling %d ndi %d rv %d\n",
LOG_D(MAC, "[gNB scheduler phytest] DCI type 1 payload: freq_alloc %d, time_alloc %d, vrb to prb %d, mcs %d tb_scaling %d ndi %d rv %d\n",
pdu_rel15->frequency_domain_assignment,
pdu_rel15->time_domain_assignment,
pdu_rel15->vrb_to_prb_mapping,
......@@ -273,7 +273,7 @@ void nr_schedule_uss_dlsch_phytest(module_id_t module_idP,
params_rel15->dci_format = NFAPI_NR_DL_DCI_FORMAT_1_0;
//params_rel15->aggregation_level = 1;
LOG_I(MAC, "DCI params: rnti %d, rnti_type %d, dci_format %d, config type %d\n \
LOG_D(MAC, "DCI params: rnti %d, rnti_type %d, dci_format %d, config type %d\n \
coreset params: mux_pattern %d, n_rb %d, n_symb %d, rb_offset %d \n \
ss params : first symb %d, ss type %d\n",
params_rel15->rnti,
......@@ -287,7 +287,7 @@ void nr_schedule_uss_dlsch_phytest(module_id_t module_idP,
params_rel15->first_symbol,
params_rel15->search_space_type);
nr_get_tbs(&dl_config_dlsch_pdu->dlsch_pdu, dl_config_dci_pdu->dci_dl_pdu, *cfg);
LOG_I(MAC, "DLSCH PDU: start PRB %d n_PRB %d start symbol %d nb_symbols %d nb_layers %d nb_codewords %d mcs %d\n",
LOG_D(MAC, "DLSCH PDU: start PRB %d n_PRB %d start symbol %d nb_symbols %d nb_layers %d nb_codewords %d mcs %d\n",
dlsch_pdu_rel15->start_prb,
dlsch_pdu_rel15->n_prb,
dlsch_pdu_rel15->start_symbol,
......
......@@ -59,7 +59,7 @@ int8_t mac_rrc_nr_data_req(const module_id_t Mod_idP,
#ifdef DEBUG_RRC
int i;
LOG_I(RRC,"[eNB %d] mac_rrc_data_req to SRB ID=%d\n",Mod_idP,Srb_id);
LOG_D(RRC,"[eNB %d] mac_rrc_data_req to SRB ID=%d\n",Mod_idP,Srb_id);
#endif
gNB_RRC_INST *rrc;
......@@ -77,11 +77,11 @@ int8_t mac_rrc_nr_data_req(const module_id_t Mod_idP,
(void *)mib,
carrier->MIB,
24);
LOG_I(NR_RRC,"Encoded MIB for frame %d sfn_msb %d (%p), bits %lu\n",frameP,sfn_msb,carrier->MIB,enc_rval.encoded);
LOG_D(NR_RRC,"Encoded MIB for frame %d sfn_msb %d (%p), bits %lu\n",frameP,sfn_msb,carrier->MIB,enc_rval.encoded);
buffer_pP[0]=carrier->MIB[0];
buffer_pP[1]=carrier->MIB[1];
buffer_pP[2]=carrier->MIB[2];
LOG_I(NR_RRC,"MIB PDU buffer_pP[0]=%x , buffer_pP[1]=%x, buffer_pP[2]=%x\n",buffer_pP[0],buffer_pP[1],buffer_pP[2]);
LOG_D(NR_RRC,"MIB PDU buffer_pP[0]=%x , buffer_pP[1]=%x, buffer_pP[2]=%x\n",buffer_pP[0],buffer_pP[1],buffer_pP[2]);
AssertFatal (enc_rval.encoded > 0, "ASN1 message encoding failed (%s, %lu)!\n",
enc_rval.failed_type->name, enc_rval.encoded);
return(3);
......
......@@ -1095,6 +1095,7 @@ extern "C" {
device->type=USRP_X300_DEV; //treat it as X300 for now
usrp_master_clock = 122.88e6;
args += boost::str(boost::format(",master_clock_rate=%f") % usrp_master_clock);
//args += ", send_buff_size=33554432";
}
if (device_adds[0].get("type") == "x300") {
......
[*]
[*] GTKWave Analyzer v3.3.58 (w)1999-2014 BSI
[*] Tue Nov 6 14:54:14 2018
[*] GTKWave Analyzer v3.3.61 (w)1999-2014 BSI
[*] Wed May 1 00:14:54 2019
[*]
[dumpfile] "/tmp/openair_dump_eNB.vcd"
[dumpfile_mtime] "Tue Nov 6 14:50:45 2018"
[dumpfile_size] 1894074
[savefile] "/homes/wangts/openairinterface5g/targets/RT/USER/gNB_usrp.gtkw"
[timestart] 1238454000
[size] 1920 1018
[pos] -159 -155
*-20.848083 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
[dumpfile_mtime] "Wed May 1 00:10:57 2019"
[dumpfile_size] 22152850
[savefile] "/home/caracal/raymond/openairinterface5g/targets/RT/USER/gNB_usrp.gtkw"
[timestart] 11537525000
[size] 1859 841
[pos] -1 -1
*-19.848083 11540069031 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1
[sst_width] 386
[signals_width] 344
[sst_expanded] 1
......@@ -47,7 +47,6 @@ variables.slot_number_TX1_gNB[63:0]
@28
functions.gNB_thread_rxtx1
functions.phy_procedures_ru_feprx0
functions.phy_procedures_ru_feprx0
functions.phy_procedures_ru_feprx1
functions.phy_procedures_ru_feptx_ofdm0
functions.phy_procedures_ru_feptx_ofdm1
......
......@@ -143,7 +143,7 @@ extern double cpuf;
void init_gNB(int,int);
void stop_gNB(int nb_inst);
int wakeup_txfh(gNB_L1_rxtx_proc_t *proc,PHY_VARS_gNB *gNB);
int wakeup_txfh(gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot_tx,uint64_t timestamp_tx,PHY_VARS_gNB *gNB);
int wakeup_tx(PHY_VARS_gNB *gNB);
extern PARALLEL_CONF_t get_thread_parallel_conf(void);
extern WORKER_CONF_t get_thread_worker_conf(void);
......@@ -301,7 +301,6 @@ static void* gNB_L1_thread_tx(void* param) {
while (!oai_exit) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 0 );
if (wait_on_condition(&proc->mutex,&proc->cond,&proc->instance_cnt,thread_name)<0) break;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 1 );
if (oai_exit) break;
......@@ -319,6 +318,9 @@ static void* gNB_L1_thread_tx(void* param) {
phy_procedures_gNB_TX(gNB, proc, 1);
pthread_mutex_lock( &proc->mutex );
int slot_tx = proc->slot_tx;
int frame_tx = proc->frame_tx;
uint64_t timestamp_tx = proc->timestamp_tx;
proc->instance_cnt = -1;
// the thread can now be woken up
if (pthread_cond_signal(&proc->cond) != 0) {
......@@ -326,7 +328,8 @@ static void* gNB_L1_thread_tx(void* param) {
exit_fun( "ERROR pthread_cond_signal" );
}
pthread_mutex_unlock( &proc->mutex );
wakeup_txfh(proc,gNB);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_gNB_PROC_RXTX1, 0 );
wakeup_txfh(proc,frame_tx,slot_tx,timestamp_tx,gNB);
}
return 0;
......@@ -379,13 +382,10 @@ static void* gNB_L1_thread( void* param ) {
if(get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT){
phy_procedures_gNB_TX(gNB, proc, 1);
}
if(get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) wakeup_tx(gNB);
else if(get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT) wakeup_txfh(proc,proc->frame_tx,proc->slot_tx,proc->timestamp_tx,gNB);
if (release_thread(&proc->mutex,&proc->instance_cnt,thread_name)<0) break;
if(get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT){
wakeup_tx(gNB);
}
else if(get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT){
wakeup_txfh(proc,gNB);
}
} // while !oai_exit
......@@ -446,7 +446,7 @@ void gNB_top(PHY_VARS_gNB *gNB, int frame_rx, int slot_rx, char *string, struct
}
}
int wakeup_txfh(gNB_L1_rxtx_proc_t *proc,PHY_VARS_gNB *gNB) {
int wakeup_txfh(gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot_tx,uint64_t timestamp_tx,PHY_VARS_gNB *gNB) {
RU_t *ru;
RU_proc_t *ru_proc;
......@@ -481,11 +481,11 @@ int wakeup_txfh(gNB_L1_rxtx_proc_t *proc,PHY_VARS_gNB *gNB) {
}
ru_proc->instance_cnt_gNBs = 0;
ru_proc->timestamp_tx = proc->timestamp_tx;
ru_proc->tti_tx = proc->slot_tx;
ru_proc->frame_tx = proc->frame_tx;
ru_proc->timestamp_tx = timestamp_tx;
ru_proc->tti_tx = slot_tx;
ru_proc->frame_tx = frame_tx;
LOG_I(PHY,"Signaling tx_thread_fh for %d.%d\n",ru_proc->frame_tx,ru_proc->tti_tx);
LOG_D(PHY,"Signaling tx_thread_fh for %d.%d\n",ru_proc->frame_tx,ru_proc->tti_tx);
// the thread can now be woken up
if (pthread_cond_signal(&ru_proc->cond_gNBs) != 0) {
LOG_E( PHY, "[gNB] ERROR pthread_cond_signal for gNB TXnp4 thread\n");
......@@ -607,7 +607,7 @@ int wakeup_rxtx(PHY_VARS_gNB *gNB,RU_t *ru) {
L1_proc->frame_tx = (L1_proc->slot_rx > (fp->slots_per_frame-1-sl_ahead)) ? (L1_proc->frame_rx+1)&1023 : L1_proc->frame_rx;
L1_proc->slot_tx = (L1_proc->slot_rx + sl_ahead)%fp->slots_per_frame;
LOG_I(PHY,"wakeupL1: passing parameter IC = %d, RX: %d.%d, TX: %d.%d to L1 sl_ahead = %d\n", L1_proc->instance_cnt, L1_proc->frame_rx, L1_proc->slot_rx, L1_proc->frame_tx, L1_proc->slot_tx, sl_ahead);
LOG_D(PHY,"wakeupL1: passing parameter IC = %d, RX: %d.%d, TX: %d.%d to L1 sl_ahead = %d\n", L1_proc->instance_cnt, L1_proc->frame_rx, L1_proc->slot_rx, L1_proc->frame_tx, L1_proc->slot_tx, sl_ahead);
// the thread can now be woken up
if (pthread_cond_signal(&L1_proc->cond) != 0) {
......
......@@ -661,7 +661,7 @@ void rx_rf(RU_t *ru,int *frame,int *slot) {
old_ts = proc->timestamp_rx;
LOG_I(PHY,"Reading %d samples for slot %d (%p)\n",fp->samples_per_slot,*slot,rxp[0]);
LOG_D(PHY,"Reading %d samples for slot %d (%p)\n",fp->samples_per_slot,*slot,rxp[0]);
if(emulate_rf){
wait_on_condition(&proc->mutex_emulateRF,&proc->cond_emulateRF,&proc->instance_cnt_emulateRF,"emulatedRF_thread");
......@@ -701,15 +701,6 @@ void rx_rf(RU_t *ru,int *frame,int *slot) {
proc->tti_rx = (proc->timestamp_rx / fp->samples_per_slot)%fp->slots_per_frame;
// synchronize first reception to frame 0 subframe 0
proc->timestamp_tx = proc->timestamp_rx+(sl_ahead*fp->samples_per_slot);
proc->tti_tx = (proc->tti_rx+sl_ahead)%fp->slots_per_frame;
proc->frame_tx = (proc->tti_rx>(fp->slots_per_frame-1-sl_ahead)) ? (proc->frame_rx+1)&1023 : proc->frame_rx;
LOG_I(PHY,"RU %d/%d TS %llu (off %d), frame %d, slot %d.%d / %d\n",
ru->idx,
0,
(unsigned long long int)proc->timestamp_rx,
(int)ru->ts_offset,proc->frame_rx,proc->tti_rx,proc->tti_tx,fp->slots_per_frame);
// dump VCD output for first RU in list
if (ru == RC.ru[0]) {
......@@ -807,7 +798,7 @@ void tx_rf(RU_t *ru) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_TRX_WRITE, 0 );
AssertFatal(txs == siglen+sf_extension,"TX : Timeout (sent %d/%d)\n",txs, siglen);
AssertFatal(txs == (siglen+sf_extension),"TX : Timeout (sent %d/%d)\n",txs, siglen);
}
}
......@@ -1084,7 +1075,7 @@ static inline int wakeup_prach_ru(RU_t *ru) {
ru->gNB_list[0]->proc.frame_prach = ru->proc.frame_rx;
ru->gNB_list[0]->proc.slot_prach = ru->proc.tti_rx;
}
LOG_I(PHY,"RU %d: waking up PRACH thread\n",ru->idx);
LOG_D(PHY,"RU %d: waking up PRACH thread\n",ru->idx);
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&ru->proc.cond_prach) == 0, "ERROR pthread_cond_signal for RU prach thread\n");
}
......@@ -1308,7 +1299,7 @@ static void* ru_thread_tx( void* param ) {
if (oai_exit) break;
LOG_I(PHY,"ru_thread_tx: Waiting for TX processing\n");
LOG_D(PHY,"ru_thread_tx: Waiting for TX processing\n");
// wait until eNBs are finished subframe RX n and TX n+4
wait_on_condition(&proc->mutex_gNBs,&proc->cond_gNBs,&proc->instance_cnt_gNBs,"ru_thread_tx");
if (oai_exit) break;
......@@ -1414,7 +1405,7 @@ static void* ru_thread( void* param ) {
sprintf(threadname,"ru_thread %d",ru->idx);
thread_top_init(threadname,0,870000,1000000,1000000);
LOG_I(PHY,"Starting RU %d (%s,%s),\n",ru->idx,NB_functions[ru->function],NB_timing[ru->if_timing]);
LOG_D(PHY,"Starting RU %d (%s,%s),\n",ru->idx,NB_functions[ru->function],NB_timing[ru->if_timing]);
if(emulate_rf){
fill_rf_config(ru,ru->rf_config_file);
......@@ -1505,7 +1496,7 @@ static void* ru_thread( void* param ) {
if (ru->fh_south_in) ru->fh_south_in(ru,&frame,&slot);
else AssertFatal(1==0, "No fronthaul interface at south port");
LOG_I(PHY,"AFTER fh_south_in - SFN/SL:%d%d RU->proc[RX:%d.%d TX:%d.%d] RC.gNB[0][0]:[RX:%d%d TX(SFN):%d]\n",
LOG_D(PHY,"AFTER fh_south_in - SFN/SL:%d%d RU->proc[RX:%d.%d TX:%d.%d] RC.gNB[0][0]:[RX:%d%d TX(SFN):%d]\n",
frame,slot,
proc->frame_rx,proc->tti_rx,
proc->frame_tx,proc->tti_tx,
......@@ -1534,7 +1525,7 @@ static void* ru_thread( void* param ) {
// wakeup all gNB processes waiting for this RU
if (ru->num_gNB>0) wakeup_gNB_L1s(ru);
if(get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD && ru->num_eNB==0)
if(get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD && ru->num_gNB==0)
{
// do TX front-end processing if needed (precoding and/or IDFTs)
if (ru->feptx_prec) ru->feptx_prec(ru);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment