Commit b904ce79 authored by Cedric Roux's avatar Cedric Roux

rewrite synchronisation in oaisim/channel simulation

The idea is as follows.

oaisim waits for all tasks to be done for a TTI.
To do so it waits for last_rx_timestamp == current_rx_timestamp,
for all UEs and eNBs.

When it's done, it increases current_rx_timestamp by samples_per_tti,
to inform UEs and eNBs that a new TTI is ready.

In the functions [UE|eNB]_trx_read, we wait for a new TTI to be ready,
that is we wait for current_rx_timestamp != last_rx_timestamp.

The UE has also to wait for processing threads to be done.
In the eNB it's not necessary because the processing is all
done in one thread, that is: read subframe, process this subframe,
emit subframe+4, and then back to read subframe.

The code is not clean. We should not use usleep but semaphores.

Also, do_[DL|UL]_sig should not read from rxdata and write to txdata,
but use internal buffers.

The functions [UE|eNB]_trx_read should call do_[DL|UL]_sig and then
copy from the internal buffers of the channel simulator into rxdata.

The functions [UE|eNB]_trx_write should write data passed into the
internal buffers of the channel simulator.
parent 14aa9678
...@@ -457,7 +457,8 @@ l2l1_task_state_t l2l1_state = L2L1_WAITTING; ...@@ -457,7 +457,8 @@ l2l1_task_state_t l2l1_state = L2L1_WAITTING;
extern openair0_timestamp current_eNB_rx_timestamp[NUMBER_OF_eNB_MAX][MAX_NUM_CCs]; extern openair0_timestamp current_eNB_rx_timestamp[NUMBER_OF_eNB_MAX][MAX_NUM_CCs];
extern openair0_timestamp current_UE_rx_timestamp[NUMBER_OF_UE_MAX][MAX_NUM_CCs]; extern openair0_timestamp current_UE_rx_timestamp[NUMBER_OF_UE_MAX][MAX_NUM_CCs];
extern openair0_timestamp last_eNB_rx_timestamp[NUMBER_OF_eNB_MAX][MAX_NUM_CCs];
extern openair0_timestamp last_UE_rx_timestamp[NUMBER_OF_UE_MAX][MAX_NUM_CCs];
/*------------------------------------------------------------------------------*/ /*------------------------------------------------------------------------------*/
void * void *
...@@ -731,37 +732,40 @@ l2l1_task (void *args_p) ...@@ -731,37 +732,40 @@ l2l1_task (void *args_p)
clear_eNB_transport_info (oai_emulation.info.nb_enb_local); clear_eNB_transport_info (oai_emulation.info.nb_enb_local);
CC_id=0;
int all_done=0; int all_done=0;
while (all_done==0) { while (all_done==0) {
pthread_mutex_lock(&subframe_mutex); int i;
int subframe_eNB_mask_local = subframe_eNB_mask; all_done = 1;
int subframe_UE_mask_local = subframe_UE_mask; for (i = oai_emulation.info.first_enb_local;
pthread_mutex_unlock(&subframe_mutex); i < oai_emulation.info.first_enb_local + oai_emulation.info.nb_enb_local;
LOG_D(EMU,"Frame %d, Subframe %d: Checking masks %x,%x\n",frame,sf,subframe_eNB_mask,subframe_UE_mask); i++)
if ((subframe_eNB_mask_local == ((1<<NB_eNB_INST)-1)) && for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++)
(subframe_UE_mask_local == ((1<<NB_UE_INST)-1))) if (last_eNB_rx_timestamp[i][CC_id] != current_eNB_rx_timestamp[i][CC_id]) {
all_done=1; all_done = 0;
else break;
}
if (all_done == 1)
for (i = 0; i < NB_UE_INST; i++)
for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++)
if (last_UE_rx_timestamp[i][CC_id] != current_UE_rx_timestamp[i][CC_id]) {
all_done = 0;
break;
}
if (all_done == 0)
usleep(500); usleep(500);
} }
//clear subframe masks for next round
pthread_mutex_lock(&subframe_mutex);
subframe_eNB_mask=0;
subframe_UE_mask=0;
pthread_mutex_unlock(&subframe_mutex);
// increment timestamps // increment timestamps
for (eNB_inst = oai_emulation.info.first_enb_local; for (eNB_inst = oai_emulation.info.first_enb_local;
(eNB_inst (eNB_inst
< (oai_emulation.info.first_enb_local < (oai_emulation.info.first_enb_local
+ oai_emulation.info.nb_enb_local)); + oai_emulation.info.nb_enb_local));
eNB_inst++) { eNB_inst++) {
for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++)
current_eNB_rx_timestamp[eNB_inst][CC_id] += PHY_vars_eNB_g[eNB_inst][CC_id]->frame_parms.samples_per_tti; current_eNB_rx_timestamp[eNB_inst][CC_id] += PHY_vars_eNB_g[eNB_inst][CC_id]->frame_parms.samples_per_tti;
} }
for (UE_inst = 0; UE_inst<NB_UE_INST;UE_inst++) { for (UE_inst = 0; UE_inst<NB_UE_INST;UE_inst++) {
for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++)
current_UE_rx_timestamp[UE_inst][CC_id] += PHY_vars_UE_g[UE_inst][CC_id]->frame_parms.samples_per_tti; current_UE_rx_timestamp[UE_inst][CC_id] += PHY_vars_UE_g[UE_inst][CC_id]->frame_parms.samples_per_tti;
} }
......
...@@ -998,13 +998,15 @@ int UE_trx_set_gains(openair0_device *device, openair0_config_t *openair0_cfg) { ...@@ -998,13 +998,15 @@ int UE_trx_set_gains(openair0_device *device, openair0_config_t *openair0_cfg) {
extern pthread_mutex_t subframe_mutex; extern pthread_mutex_t subframe_mutex;
extern int subframe_eNB_mask,subframe_UE_mask; extern int subframe_eNB_mask,subframe_UE_mask;
int eNB_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void **buff, int nsamps, int cc) { int eNB_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void **buff, int nsamps, int cc)
{
int ret = nsamps;
int eNB_id = device->Mod_id; int eNB_id = device->Mod_id;
int CC_id = device->CC_id; int CC_id = device->CC_id;
int subframe; int subframe;
int sample_count=0; int read_samples, max_samples;
openair0_timestamp last = last_eNB_rx_timestamp[eNB_id][CC_id];
*ptimestamp = last_eNB_rx_timestamp[eNB_id][CC_id]; *ptimestamp = last_eNB_rx_timestamp[eNB_id][CC_id];
...@@ -1014,19 +1016,24 @@ int eNB_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void * ...@@ -1014,19 +1016,24 @@ int eNB_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void *
(*ptimestamp/PHY_vars_eNB_g[eNB_id][CC_id]->frame_parms.samples_per_tti)%10); (*ptimestamp/PHY_vars_eNB_g[eNB_id][CC_id]->frame_parms.samples_per_tti)%10);
// if we're at a subframe boundary generate UL signals for this eNB // if we're at a subframe boundary generate UL signals for this eNB
while (sample_count<nsamps) { while (nsamps) {
while (current_eNB_rx_timestamp[eNB_id][CC_id]< while (current_eNB_rx_timestamp[eNB_id][CC_id] == last) {
(nsamps+last_eNB_rx_timestamp[eNB_id][CC_id])) {
LOG_D(EMU,"eNB: current TS %llu, last TS %llu, sleeping\n",current_eNB_rx_timestamp[eNB_id][CC_id],last_eNB_rx_timestamp[eNB_id][CC_id]); LOG_D(EMU,"eNB: current TS %llu, last TS %llu, sleeping\n",current_eNB_rx_timestamp[eNB_id][CC_id],last_eNB_rx_timestamp[eNB_id][CC_id]);
usleep(500); usleep(500);
} }
// tell top-level we are busy read_samples = nsamps;
pthread_mutex_lock(&subframe_mutex); max_samples = current_eNB_rx_timestamp[eNB_id][CC_id]-last;
subframe_eNB_mask|=(1<<eNB_id); if (read_samples > max_samples)
pthread_mutex_unlock(&subframe_mutex); read_samples = max_samples;
last += read_samples;
nsamps -= read_samples;
if (current_eNB_rx_timestamp[eNB_id][CC_id] == last) {
subframe = (last/PHY_vars_eNB_g[eNB_id][CC_id]->frame_parms.samples_per_tti)%10;
//subframe = (subframe+9) % 10;
subframe = (last_eNB_rx_timestamp[eNB_id][CC_id]/PHY_vars_eNB_g[eNB_id][CC_id]->frame_parms.samples_per_tti)%10;
LOG_D(PHY,"eNB_trx_read generating UL subframe %d (Ts %llu, current TS %llu)\n", LOG_D(PHY,"eNB_trx_read generating UL subframe %d (Ts %llu, current TS %llu)\n",
subframe,(unsigned long long)*ptimestamp, subframe,(unsigned long long)*ptimestamp,
(unsigned long long)current_eNB_rx_timestamp[eNB_id][CC_id]); (unsigned long long)current_eNB_rx_timestamp[eNB_id][CC_id]);
...@@ -1041,22 +1048,23 @@ int eNB_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void * ...@@ -1041,22 +1048,23 @@ int eNB_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void *
eNB_id, eNB_id,
CC_id); CC_id);
last_eNB_rx_timestamp[eNB_id][CC_id] += PHY_vars_eNB_g[eNB_id][CC_id]->frame_parms.samples_per_tti; last_eNB_rx_timestamp[eNB_id][CC_id] = last;
sample_count += PHY_vars_eNB_g[eNB_id][CC_id]->frame_parms.samples_per_tti; }
} }
last_eNB_rx_timestamp[eNB_id][CC_id] = last;
return ret;
return(nsamps);
} }
int UE_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void **buff, int nsamps, int cc) { int UE_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void **buff, int nsamps, int cc)
{
int ret = nsamps;
int UE_id = device->Mod_id; int UE_id = device->Mod_id;
int CC_id = device->CC_id; int CC_id = device->CC_id;
int subframe; int subframe;
int sample_count=0; int read_samples, max_samples;
int read_size; openair0_timestamp last = last_UE_rx_timestamp[UE_id][CC_id];
*ptimestamp = last_UE_rx_timestamp[UE_id][CC_id]; *ptimestamp = last_UE_rx_timestamp[UE_id][CC_id];
...@@ -1065,14 +1073,19 @@ int UE_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void ** ...@@ -1065,14 +1073,19 @@ int UE_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void **
(unsigned long long)last_UE_rx_timestamp[UE_id][CC_id], (unsigned long long)last_UE_rx_timestamp[UE_id][CC_id],
cc); cc);
if (nsamps < PHY_vars_UE_g[UE_id][CC_id]->frame_parms.samples_per_tti) while (nsamps) {
read_size = nsamps; /* wait for all processing to be finished */
else while (1) {
read_size = PHY_vars_UE_g[UE_id][CC_id]->frame_parms.samples_per_tti; PHY_VARS_UE *UE = PHY_vars_UE_g[UE_id][0];
int ready = 1;
while (sample_count<nsamps) { int i;
while (current_UE_rx_timestamp[UE_id][CC_id] < for (i = 0; i < 2; i++)
(last_UE_rx_timestamp[UE_id][CC_id]+read_size)) { if (UE->proc.proc_rxtx[i].instance_cnt_rxtx >= 0) ready = 0;
if (UE->proc.instance_cnt_synch >= 0) ready = 0;
if (ready) break;
usleep(500);
}
while (current_UE_rx_timestamp[UE_id][CC_id] == last) {
LOG_D(EMU,"UE_trx_read : current TS %d, last TS %d, sleeping\n",current_UE_rx_timestamp[UE_id][CC_id],last_UE_rx_timestamp[UE_id][CC_id]); LOG_D(EMU,"UE_trx_read : current TS %d, last TS %d, sleeping\n",current_UE_rx_timestamp[UE_id][CC_id],last_UE_rx_timestamp[UE_id][CC_id]);
usleep(500); usleep(500);
...@@ -1080,26 +1093,23 @@ int UE_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void ** ...@@ -1080,26 +1093,23 @@ int UE_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void **
LOG_D(EMU,"UE_trx_read : current TS %d, last TS %d, sleeping\n",current_UE_rx_timestamp[UE_id][CC_id],last_UE_rx_timestamp[UE_id][CC_id]); LOG_D(EMU,"UE_trx_read : current TS %d, last TS %d, sleeping\n",current_UE_rx_timestamp[UE_id][CC_id],last_UE_rx_timestamp[UE_id][CC_id]);
// tell top-level we are busy read_samples = nsamps;
pthread_mutex_lock(&subframe_mutex); max_samples = current_UE_rx_timestamp[UE_id][CC_id]-last;
subframe_UE_mask|=(1<<UE_id); if (read_samples > max_samples)
pthread_mutex_unlock(&subframe_mutex); read_samples = max_samples;
// otherwise we have one subframe here so generate the received signal
subframe = (last_UE_rx_timestamp[UE_id][CC_id]/PHY_vars_UE_g[UE_id][CC_id]->frame_parms.samples_per_tti)%10;
if ((last_UE_rx_timestamp[UE_id][CC_id]%PHY_vars_UE_g[UE_id][CC_id]->frame_parms.samples_per_tti) > 0)
subframe++;
last_UE_rx_timestamp[UE_id][CC_id] += read_size; last += read_samples;
sample_count += read_size; nsamps -= read_samples;
if (subframe > 9) if (current_UE_rx_timestamp[UE_id][CC_id] == last) {
return(nsamps); // we have one subframe here so generate the received signal
subframe = (last/PHY_vars_UE_g[UE_id][CC_id]->frame_parms.samples_per_tti)%10;
//subframe = (subframe+9) % 10;
LOG_D(PHY,"UE_trx_read generating DL subframe %d (Ts %llu, current TS %llu)\n", LOG_D(PHY,"UE_trx_read generating DL subframe %d (Ts %llu, current TS %llu)\n",
subframe,(unsigned long long)*ptimestamp, subframe,(unsigned long long)*ptimestamp,
(unsigned long long)current_UE_rx_timestamp[UE_id][CC_id]); (unsigned long long)current_UE_rx_timestamp[UE_id][CC_id]);
do_DL_sig(eNB2UE, do_DL_sig(eNB2UE,
enb_data, enb_data,
ue_data, ue_data,
...@@ -1109,11 +1119,13 @@ int UE_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void ** ...@@ -1109,11 +1119,13 @@ int UE_trx_read(openair0_device *device, openair0_timestamp *ptimestamp, void **
UE_id, UE_id,
CC_id); CC_id);
last_UE_rx_timestamp[UE_id][CC_id] = last;
}
} }
last_UE_rx_timestamp[UE_id][CC_id] = last;
return(nsamps); return ret;
} }
int eNB_trx_write(openair0_device *device,openair0_timestamp timestamp, void **buff, int nsamps, int cc, int flags) { int eNB_trx_write(openair0_device *device,openair0_timestamp timestamp, void **buff, int nsamps, int cc, int flags) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment