Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
promise
OpenXG-RAN
Commits
2e6d9d7d
Commit
2e6d9d7d
authored
Apr 11, 2019
by
laurent
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Remove table of thread_id in the main UE thread
parent
7b3615de
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
94 additions
and
77 deletions
+94
-77
common/utils/threadPool/thread-pool.h
common/utils/threadPool/thread-pool.h
+29
-21
executables/nr-ue.c
executables/nr-ue.c
+65
-56
No files found.
common/utils/threadPool/thread-pool.h
View file @
2e6d9d7d
...
...
@@ -77,16 +77,18 @@ static inline void delNotifiedFIFO_elt(notifiedFIFO_elt_t *elt) {
//LOG_W(UTIL,"delNotifiedFIFO on something not allocated by newNotifiedFIFO\n");
}
static
inline
void
initNotifiedFIFO_nothreadSafe
(
notifiedFIFO_t
*
nf
)
{
nf
->
inF
=
NULL
;
nf
->
outF
=
NULL
;
}
static
inline
void
initNotifiedFIFO
(
notifiedFIFO_t
*
nf
)
{
mutexinit
(
nf
->
lockF
);
condinit
(
nf
->
notifF
);
nf
->
inF
=
NULL
;
nf
->
outF
=
NULL
;
initNotifiedFIFO_nothreadSafe
(
nf
);
// No delete function: the creator has only to free the memory
}
static
inline
void
pushNotifiedFIFO
(
notifiedFIFO_t
*
nf
,
notifiedFIFO_elt_t
*
msg
)
{
mutexlock
(
nf
->
lockF
);
static
inline
void
pushNotifiedFIFO_nothreadSafe
(
notifiedFIFO_t
*
nf
,
notifiedFIFO_elt_t
*
msg
)
{
msg
->
next
=
NULL
;
if
(
nf
->
outF
==
NULL
)
...
...
@@ -96,22 +98,39 @@ static inline void pushNotifiedFIFO(notifiedFIFO_t *nf, notifiedFIFO_elt_t *msg)
nf
->
inF
->
next
=
msg
;
nf
->
inF
=
msg
;
condbroadcast
(
nf
->
notifF
);
mutexunlock
(
nf
->
lockF
);
}
static
inline
notifiedFIFO_elt_t
*
pullNotifiedFIFO
(
notifiedFIFO_t
*
nf
)
{
static
inline
void
pushNotifiedFIFO
(
notifiedFIFO_t
*
nf
,
notifiedFIFO_elt_t
*
msg
)
{
mutexlock
(
nf
->
lockF
);
pushNotifiedFIFO_nothreadSafe
(
nf
,
msg
);
condbroadcast
(
nf
->
notifF
);
mutexunlock
(
nf
->
lockF
);
}
while
(
nf
->
outF
==
NULL
)
condwait
(
nf
->
notifF
,
nf
->
lockF
);
static
inline
notifiedFIFO_elt_t
*
pullNotifiedFIFO_nothreadSafe
(
notifiedFIFO_t
*
nf
)
{
if
(
nf
->
outF
==
NULL
)
return
NULL
;
notifiedFIFO_elt_t
*
ret
=
nf
->
outF
;
if
(
nf
->
outF
==
nf
->
outF
->
next
)
LOG_E
(
TMR
,
"Circular list in thread pool: push several times the same buffer is forbidden
\n
"
);
nf
->
outF
=
nf
->
outF
->
next
;
if
(
nf
->
outF
==
NULL
)
nf
->
inF
=
NULL
;
return
ret
;
}
static
inline
notifiedFIFO_elt_t
*
pullNotifiedFIFO
(
notifiedFIFO_t
*
nf
)
{
mutexlock
(
nf
->
lockF
);
notifiedFIFO_elt_t
*
ret
;
while
((
ret
=
pullNotifiedFIFO_nothreadSafe
(
nf
))
==
NULL
)
condwait
(
nf
->
notifF
,
nf
->
lockF
);
mutexunlock
(
nf
->
lockF
);
return
ret
;
}
...
...
@@ -122,18 +141,7 @@ static inline notifiedFIFO_elt_t *pollNotifiedFIFO(notifiedFIFO_t *nf) {
if
(
tmp
!=
0
)
return
NULL
;
notifiedFIFO_elt_t
*
ret
=
nf
->
outF
;
if
(
ret
!=
NULL
)
{
if
(
nf
->
outF
==
nf
->
outF
->
next
)
LOG_E
(
TMR
,
"Circular list in thread pool: push several times the same buffer is forbidden
\n
"
);
nf
->
outF
=
nf
->
outF
->
next
;
}
if
(
nf
->
outF
==
NULL
)
nf
->
inF
=
NULL
;
notifiedFIFO_elt_t
*
ret
=
pullNotifiedFIFO_nothreadSafe
(
nf
);
mutexunlock
(
nf
->
lockF
);
return
ret
;
}
...
...
executables/nr-ue.c
View file @
2e6d9d7d
...
...
@@ -156,7 +156,7 @@ PHY_VARS_NR_UE *init_nr_ue_vars(NR_DL_FRAME_PARMS *frame_parms,
*/
typedef
struct
syncData_s
{
UE_nr_rxtx_proc_t
*
proc
;
UE_nr_rxtx_proc_t
proc
;
PHY_VARS_NR_UE
*
UE
;
}
syncData_t
;
...
...
@@ -239,7 +239,7 @@ static void UE_synch(void *arg) {
case
pbch
:
LOG_I
(
PHY
,
"[UE thread Synch] Running Initial Synch (mode %d)
\n
"
,
UE
->
mode
);
if
(
nr_initial_sync
(
syncD
->
proc
,
UE
,
UE
->
mode
)
==
0
)
{
if
(
nr_initial_sync
(
&
syncD
->
proc
,
UE
,
UE
->
mode
)
==
0
)
{
freq_offset
=
UE
->
common_vars
.
freq_offset
;
// frequency offset computed with pss in initial sync
hw_slot_offset
=
(
UE
->
rx_offset
<<
1
)
/
UE
->
frame_parms
.
samples_per_slot
;
LOG_I
(
PHY
,
"Got synch: hw_slot_offset %d, carrier off %d Hz, rxgain %d (DL %u, UL %u), UE_scan_carrier %d
\n
"
,
...
...
@@ -548,40 +548,45 @@ void *UE_thread(void *arg) {
initNotifiedFIFO
(
&
nf
);
int
nbSlotProcessing
=
0
;
int
thread_idx
=
0
;
notifiedFIFO_elt_t
*
processingMsg
[
RX_NB_TH
];
notifiedFIFO_t
freeBlocks
;
initNotifiedFIFO_nothreadSafe
(
&
freeBlocks
);
for
(
int
i
=
0
;
i
<
RX_NB_TH
;
i
++
)
{
processingMsg
[
i
]
=
newNotifiedFIFO_elt
(
sizeof
(
processingData_t
),
0
,
&
nf
,
UE_processing
);
processingData_t
*
tmp
=
(
processingData_t
*
)
NotifiedFifoData
(
processingMsg
[
i
]);
tmp
->
UE
=
UE
;
}
for
(
int
i
=
0
;
i
<
RX_NB_TH
+
1
;
i
++
)
// RX_NB_TH working + 1 we are making to be pushed
pushNotifiedFIFO_nothreadSafe
(
&
freeBlocks
,
newNotifiedFIFO_elt
(
sizeof
(
processingData_t
),
0
,
&
nf
,
UE_processing
));
bool
syncRunning
=
false
;
notifiedFIFO_elt_t
*
syncMsg
=
newNotifiedFIFO_elt
(
sizeof
(
syncData_t
),
0
,
&
nf
,
UE_synch
);
syncData_t
*
syncD
=
(
syncData_t
*
)
NotifiedFifoData
(
syncMsg
);
syncD
->
UE
=
UE
;
syncD
->
proc
=&
((
processingData_t
*
)
NotifiedFifoData
(
processingMsg
[
0
]))
->
proc
;
const
int
nb_slot_frame
=
10
*
UE
->
frame_parms
.
slots_per_subframe
;
int
absolute_slot
,
decoded_frame_rx
=
INT_MAX
,
trashed_frames
=
0
;
int
absolute_slot
=
0
,
decoded_frame_rx
=
INT_MAX
,
trashed_frames
=
0
;
while
(
!
oai_exit
)
{
if
(
!
syncD
->
UE
->
is_synchronized
)
{
if
(
syncRunning
)
{
notifiedFIFO_elt_t
*
res
=
tryPullTpool
(
&
nf
,
Tpool
);
if
(
res
)
{
syncRunning
=
false
;
syncData_t
*
tmp
=
(
syncData_t
*
)
NotifiedFifoData
(
res
);
// shift the frame index with all the frames we trashed meanwhile we perform the synch search
decoded_frame_rx
=
(
tmp
->
proc
.
decoded_frame_rx
+
trashed_frames
)
%
MAX_FRAME_NUMBER
;
delNotifiedFIFO_elt
(
res
);
}
else
{
trashFrame
(
UE
,
&
timestamp
);
trashed_frames
++
;
continue
;
}
}
else
{
}
AssertFatal
(
!
syncRunning
,
"At this point synchronisation can't be running
\n
"
);
if
(
!
UE
->
is_synchronized
)
{
readFrame
(
UE
,
&
timestamp
);
pushTpool
(
Tpool
,
syncMsg
);
notifiedFIFO_elt_t
*
Msg
=
newNotifiedFIFO_elt
(
sizeof
(
syncData_t
),
0
,
&
nf
,
UE_synch
);
syncData_t
*
syncMsg
=
(
syncData_t
*
)
NotifiedFifoData
(
Msg
);
syncMsg
->
UE
=
UE
;
memset
(
&
syncMsg
->
proc
,
0
,
sizeof
(
syncMsg
->
proc
));
pushTpool
(
Tpool
,
Msg
);
trashed_frames
=
0
;
syncRunning
=
true
;
}
continue
;
}
...
...
@@ -599,33 +604,32 @@ void *UE_thread(void *arg) {
UE
->
frame_parms
.
nb_antennas_rx
),
""
);
// we have the decoded frame index in the return of the synch process
// and we shifted above to the first slot of next frame
// the synch thread proc context is hard linked to regular processing thread context, thread id = 0
UE_nr_rxtx_proc_t
*
proc
=&
(((
processingData_t
*
)
NotifiedFifoData
(
processingMsg
[
0
]))
->
proc
);
// shift the frame index with all the frames we trashed meanwhile we perform the synch search
proc
->
decoded_frame_rx
=
(
proc
->
decoded_frame_rx
+
trashed_frames
)
%
MAX_FRAME_NUMBER
;
decoded_frame_rx
=
proc
->
decoded_frame_rx
;
// we do ++ first in the regular processing, so it will be 0;
decoded_frame_rx
++
;
// we do ++ first in the regular processing, so it will be beging of frame;
absolute_slot
=
decoded_frame_rx
*
nb_slot_frame
+
nb_slot_frame
-
1
;
while
(
tryPullTpool
(
&
nf
,
Tpool
)
!=
NULL
)
{
};
continue
;
}
absolute_slot
++
;
// whatever means thread_idx
// Fix me: will be wrong when slot 1 is slow, as slot 2 finishes
// Slot 3 will overlap if RX_NB_TH is 2
// this is general failure in UE !!!
thread_idx
=
absolute_slot
%
RX_NB_TH
;
int
slot_nr
=
absolute_slot
%
nb_slot_frame
;
UE_nr_rxtx_proc_t
*
proc
=&
(((
processingData_t
*
)
NotifiedFifoData
(
processingMsg
[
thread_idx
]))
->
proc
);
notifiedFIFO_elt_t
*
msgToPush
;
AssertFatal
((
msgToPush
=
pullNotifiedFIFO_nothreadSafe
(
&
freeBlocks
))
!=
NULL
,
"chained list failure"
);
processingData_t
*
curMsg
=
(
processingData_t
*
)
NotifiedFifoData
(
msgToPush
);
curMsg
->
UE
=
UE
;
// update thread index for received subframe
proc
->
nr_tti_rx
=
slot_nr
;
UE
->
current_thread_id
[
slot_nr
]
=
thread_idx
;
proc
->
subframe_rx
=
table_sf_slot
[
slot_nr
];
proc
->
nr_tti_tx
=
(
absolute_slot
+
DURATION_RX_TO_TX
)
%
nb_slot_frame
;
proc
->
subframe_tx
=
proc
->
nr_tti_rx
;
proc
->
frame_rx
=
(
absolute_slot
/
nb_slot_frame
)
%
MAX_FRAME_NUMBER
;
proc
->
frame_tx
=
(
(
absolute_slot
+
DURATION_RX_TO_TX
)
/
nb_slot_frame
)
%
MAX_FRAME_NUMBER
;
proc
->
decoded_frame_rx
=-
1
;
curMsg
->
proc
.
nr_tti_rx
=
slot_nr
;
curMsg
->
UE
->
current_thread_id
[
slot_nr
]
=
thread_idx
;
curMsg
->
proc
.
subframe_rx
=
table_sf_slot
[
slot_nr
];
curMsg
->
proc
.
nr_tti_tx
=
(
absolute_slot
+
DURATION_RX_TO_TX
)
%
nb_slot_frame
;
curMsg
->
proc
.
subframe_tx
=
curMsg
->
proc
.
nr_tti_rx
;
curMsg
->
proc
.
frame_rx
=
(
absolute_slot
/
nb_slot_frame
)
%
MAX_FRAME_NUMBER
;
curMsg
->
proc
.
frame_tx
=
(
(
absolute_slot
+
DURATION_RX_TO_TX
)
/
nb_slot_frame
)
%
MAX_FRAME_NUMBER
;
curMsg
->
proc
.
decoded_frame_rx
=-
1
;
LOG_D
(
PHY
,
"Process slot %d thread Idx %d
\n
"
,
slot_nr
,
thread_idx
);
for
(
int
i
=
0
;
i
<
UE
->
frame_parms
.
nb_antennas_rx
;
i
++
)
...
...
@@ -634,7 +638,7 @@ void *UE_thread(void *arg) {
slot_nr
*
UE
->
frame_parms
.
samples_per_slot
];
for
(
int
i
=
0
;
i
<
UE
->
frame_parms
.
nb_antennas_tx
;
i
++
)
txp
[
i
]
=
(
void
*
)
&
UE
->
common_vars
.
txdata
[
i
][
proc
->
nr_tti_tx
*
UE
->
frame_parms
.
samples_per_slot
];
txp
[
i
]
=
(
void
*
)
&
UE
->
common_vars
.
txdata
[
i
][
curMsg
->
proc
.
nr_tti_tx
*
UE
->
frame_parms
.
samples_per_slot
];
int
readBlockSize
,
writeBlockSize
;
...
...
@@ -683,31 +687,33 @@ void *UE_thread(void *arg) {
LOG_E
(
PHY
,
"can't compensate: diff =%d
\n
"
,
first_symbols
);
}
proc
->
timestamp_tx
=
timestamp
+
curMsg
->
proc
.
timestamp_tx
=
timestamp
+
(
DURATION_RX_TO_TX
*
UE
->
frame_parms
.
samples_per_slot
)
-
UE
->
frame_parms
.
ofdm_symbol_size
-
UE
->
frame_parms
.
nb_prefix_samples0
;
notifiedFIFO_elt_t
*
res
;
while
(
nbSlotProcessing
>=
RX_NB_TH
)
{
while
(
nbSlotProcessing
>=
RX_NB_TH
)
{
if
(
(
res
=
tryPullTpool
(
&
nf
,
Tpool
))
!=
NULL
)
{
nbSlotProcessing
--
;
processingData_t
*
tmp
=
(
processingData_t
*
)
res
->
msgData
;
if
(
tmp
->
proc
.
decoded_frame_rx
!=
-
1
)
decoded_frame_rx
=
tmp
->
proc
.
decoded_frame_rx
;
pushNotifiedFIFO_nothreadSafe
(
&
freeBlocks
,
res
);
}
usleep
(
200
);
}
if
(
decoded_frame_rx
!=
proc
->
frame_rx
&&
((
decoded_frame_rx
+
1
)
%
MAX_FRAME_NUMBER
)
!=
proc
->
frame_rx
)
if
(
decoded_frame_rx
!=
curMsg
->
proc
.
frame_rx
&&
((
decoded_frame_rx
+
1
)
%
MAX_FRAME_NUMBER
)
!=
curMsg
->
proc
.
frame_rx
)
LOG_D
(
PHY
,
"Decoded frame index (%d) is not compatible with current context (%d), UE should go back to synch mode
\n
"
,
decoded_frame_rx
,
proc
->
frame_rx
);
decoded_frame_rx
,
curMsg
->
proc
.
frame_rx
);
nbSlotProcessing
++
;
processingMsg
[
thread_idx
]
->
key
=
slot_nr
;
pushTpool
(
Tpool
,
processingMsg
[
thread_idx
]
);
msgToPush
->
key
=
slot_nr
;
pushTpool
(
Tpool
,
msgToPush
);
if
(
getenv
(
"RFSIMULATOR"
))
{
// FixMe: Wait previous thread is done, because race conditions seems too bad
...
...
@@ -716,8 +722,11 @@ void *UE_thread(void *arg) {
res
=
pullTpool
(
&
nf
,
Tpool
);
nbSlotProcessing
--
;
processingData_t
*
tmp
=
(
processingData_t
*
)
res
->
msgData
;
if
(
tmp
->
proc
.
decoded_frame_rx
!=
-
1
)
decoded_frame_rx
=
tmp
->
proc
.
decoded_frame_rx
;
pushNotifiedFIFO_nothreadSafe
(
&
freeBlocks
,
res
);
}
}
// while !oai_exit
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment