Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
O
OpenXG-RAN
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wangwenhui
OpenXG-RAN
Commits
7a6b8579
Commit
7a6b8579
authored
Jan 24, 2020
by
Raphael Defosseux
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'origin/mutex_fix' into develop_integration_2020_w04
parents
01470da9
b7a027f9
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
13 additions
and
11 deletions
+13
-11
executables/nr-gnb.c
executables/nr-gnb.c
+9
-6
executables/nr-ru.c
executables/nr-ru.c
+2
-2
targets/RT/USER/lte-enb.c
targets/RT/USER/lte-enb.c
+2
-3
No files found.
executables/nr-gnb.c
View file @
7a6b8579
...
...
@@ -417,10 +417,14 @@ int wakeup_txfh(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot
// note this should depend on the numerology used by the TX L1 thread, set here for 500us slot time
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GAIN_CONTROL
,
1
);
waitret
=
wait_on_condition
(
&
proc
->
mutex_RUs_tx
,
&
proc
->
cond_RUs
,
&
proc
->
instance_cnt_RUs
,
"wakeup_txfh"
);
AssertFatal
(
release_thread
(
&
proc
->
mutex_RUs_tx
,
&
proc
->
instance_cnt_RUs
,
"wakeup_txfh"
)
==
0
,
"error releaseing gNB lock on RUs
\n
"
);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
(
VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GAIN_CONTROL
,
0
);
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
proc
->
mutex_RUs_tx
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
while
(
proc
->
instance_cnt_RUs
<
0
)
{
pthread_cond_wait
(
&
proc
->
cond_RUs
,
&
proc
->
mutex_RUs_tx
);
// this unlocks mutex_rxtx while waiting and then locks it again
}
proc
->
instance_cnt_RUs
=
-
1
;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE
,
proc
->
instance_cnt_RUs
);
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
proc
->
mutex_RUs_tx
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE
,
proc
->
instance_cnt_RUs
);
...
...
@@ -446,7 +450,6 @@ int wakeup_txfh(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot
ru
=
gNB
->
RU_list
[
i
];
ru_proc
=
&
ru
->
proc
;
//AssertFatal((ret = pthread_mutex_lock(&ru_proc->mutex_gNBs))==0,"ERROR pthread_mutex_lock failed on mutex_gNBs L1_thread_tx with ret=%d\n",ret);
if
(
ru_proc
->
instance_cnt_gNBs
==
0
)
{
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST_UE
,
1
);
...
...
@@ -454,11 +457,11 @@ int wakeup_txfh(PHY_VARS_gNB *gNB,gNB_L1_rxtx_proc_t *proc,int frame_tx,int slot
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
gNB
->
proc
.
mutex_RU_tx
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
gNB
->
proc
.
RU_mask_tx
=
0
;
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
gNB
->
proc
.
mutex_RU_tx
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
//AssertFatal((ret=pthread_mutex_unlock( &ru_proc->mutex_gNBs ))==0,"mutex_unlock return %d\n",ret);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST_UE
,
0
);
return
(
-
1
);
}
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
ru_proc
->
mutex_gNBs
))
==
0
,
"ERROR pthread_mutex_lock failed on mutex_gNBs L1_thread_tx with ret=%d
\n
"
,
ret
);
ru_proc
->
instance_cnt_gNBs
=
0
;
ru_proc
->
timestamp_tx
=
timestamp_tx
;
...
...
executables/nr-ru.c
View file @
7a6b8579
...
...
@@ -1343,11 +1343,11 @@ static void *ru_thread_tx( void *param ) {
ret
=
pthread_mutex_lock
(
&
L1_proc
->
mutex_RUs_tx
);
AssertFatal
(
ret
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
// the thread can now be woken up
//
if (L1_proc->instance_cnt_RUs == -1) {
if
(
L1_proc
->
instance_cnt_RUs
==
-
1
)
{
L1_proc
->
instance_cnt_RUs
=
0
;
AssertFatal
(
pthread_cond_signal
(
&
L1_proc
->
cond_RUs
)
==
0
,
"ERROR pthread_cond_signal for gNB_L1_thread
\n
"
);
//
} //else AssertFatal(1==0,"gNB TX thread is not ready\n");
}
//else AssertFatal(1==0,"gNB TX thread is not ready\n");
ret
=
pthread_mutex_unlock
(
&
L1_proc
->
mutex_RUs_tx
);
AssertFatal
(
ret
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME
(
VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE
,
L1_proc
->
instance_cnt_RUs
);
...
...
targets/RT/USER/lte-enb.c
View file @
7a6b8579
...
...
@@ -512,17 +512,16 @@ int wakeup_txfh(PHY_VARS_eNB *eNB,
continue
;
//hacking only works when all RU_tx works on the same subframe #TODO: adding mask stuff
}
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
ru_proc
->
mutex_eNBs
))
==
0
,
"ERROR pthread_mutex_lock failed on mutex_eNBs L1_thread_tx with ret=%d
\n
"
,
ret
);
if
(
ru_proc
->
instance_cnt_eNBs
==
0
)
{
LOG_E
(
PHY
,
"Frame %d, subframe %d: TX FH thread busy, dropping Frame %d, subframe %d
\n
"
,
ru_proc
->
frame_tx
,
ru_proc
->
tti_tx
,
proc
->
frame_rx
,
proc
->
subframe_rx
);
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
eNB
->
proc
.
mutex_RU_tx
))
==
0
,
"mutex_lock returns %d
\n
"
,
ret
);
eNB
->
proc
.
RU_mask_tx
=
0
;
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
eNB
->
proc
.
mutex_RU_tx
))
==
0
,
"mutex_unlock returns %d
\n
"
,
ret
);
AssertFatal
((
ret
=
pthread_mutex_unlock
(
&
ru_proc
->
mutex_eNBs
))
==
0
,
"mutex_unlock return %d
\n
"
,
ret
);
return
(
-
1
);
}
AssertFatal
((
ret
=
pthread_mutex_lock
(
&
ru_proc
->
mutex_eNBs
))
==
0
,
"ERROR pthread_mutex_lock failed on mutex_eNBs L1_thread_tx with ret=%d
\n
"
,
ret
);
ru_proc
->
instance_cnt_eNBs
=
0
;
ru_proc
->
timestamp_tx
=
timestamp_tx
;
ru_proc
->
tti_tx
=
subframe_tx
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment