Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
D
dma_ip_drivers
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Libraries
dma_ip_drivers
Commits
781c12f2
Unverified
Commit
781c12f2
authored
Feb 27, 2020
by
Karen Xie
Committed by
GitHub
Feb 27, 2020
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #50 from jascondley/REF41_SCHED_ATOMIC
Fixes a design error revealed by running on aarch64
parents
54c1de28
d1f334b1
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
9 additions
and
9 deletions
+9
-9
XDMA/linux-kernel/libxdma/libxdma.c
XDMA/linux-kernel/libxdma/libxdma.c
+8
-8
XDMA/linux-kernel/libxdma/libxdma.h
XDMA/linux-kernel/libxdma/libxdma.h
+1
-1
No files found.
XDMA/linux-kernel/libxdma/libxdma.c
View file @
781c12f2
...
@@ -3514,7 +3514,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
...
@@ -3514,7 +3514,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
sg
=
sgt
->
sgl
;
sg
=
sgt
->
sgl
;
nents
=
req
->
sw_desc_cnt
;
nents
=
req
->
sw_desc_cnt
;
spin
_lock
(
&
engine
->
desc_lock
);
mutex
_lock
(
&
engine
->
desc_lock
);
while
(
nents
)
{
while
(
nents
)
{
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -3523,7 +3523,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
...
@@ -3523,7 +3523,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
/* build transfer */
/* build transfer */
rv
=
transfer_init
(
engine
,
req
,
&
req
->
tfer
[
0
]);
rv
=
transfer_init
(
engine
,
req
,
&
req
->
tfer
[
0
]);
if
(
rv
<
0
)
{
if
(
rv
<
0
)
{
spin
_unlock
(
&
engine
->
desc_lock
);
mutex
_unlock
(
&
engine
->
desc_lock
);
goto
unmap_sgl
;
goto
unmap_sgl
;
}
}
xfer
=
&
req
->
tfer
[
0
];
xfer
=
&
req
->
tfer
[
0
];
...
@@ -3547,7 +3547,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
...
@@ -3547,7 +3547,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
rv
=
transfer_queue
(
engine
,
xfer
);
rv
=
transfer_queue
(
engine
,
xfer
);
if
(
rv
<
0
)
{
if
(
rv
<
0
)
{
spin
_unlock
(
&
engine
->
desc_lock
);
mutex
_unlock
(
&
engine
->
desc_lock
);
pr_info
(
"unable to submit %s, %d.
\n
"
,
engine
->
name
,
rv
);
pr_info
(
"unable to submit %s, %d.
\n
"
,
engine
->
name
,
rv
);
goto
unmap_sgl
;
goto
unmap_sgl
;
}
}
...
@@ -3566,7 +3566,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
...
@@ -3566,7 +3566,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
desc_count
);
desc_count
);
rv
=
engine_service_poll
(
engine
,
desc_count
);
rv
=
engine_service_poll
(
engine
,
desc_count
);
if
(
rv
<
0
)
{
if
(
rv
<
0
)
{
spin
_unlock
(
&
engine
->
desc_lock
);
mutex
_unlock
(
&
engine
->
desc_lock
);
pr_err
(
"Failed to service polling
\n
"
);
pr_err
(
"Failed to service polling
\n
"
);
goto
unmap_sgl
;
goto
unmap_sgl
;
}
}
...
@@ -3649,11 +3649,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
...
@@ -3649,11 +3649,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
tfer_idx
++
;
tfer_idx
++
;
if
(
rv
<
0
)
{
if
(
rv
<
0
)
{
spin
_unlock
(
&
engine
->
desc_lock
);
mutex
_unlock
(
&
engine
->
desc_lock
);
goto
unmap_sgl
;
goto
unmap_sgl
;
}
}
}
/* while (sg) */
}
/* while (sg) */
spin
_unlock
(
&
engine
->
desc_lock
);
mutex
_unlock
(
&
engine
->
desc_lock
);
unmap_sgl:
unmap_sgl:
if
(
!
dma_mapped
&&
sgt
->
nents
)
{
if
(
!
dma_mapped
&&
sgt
->
nents
)
{
...
@@ -4131,7 +4131,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
...
@@ -4131,7 +4131,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
engine
=
xdev
->
engine_h2c
;
engine
=
xdev
->
engine_h2c
;
for
(
i
=
0
;
i
<
XDMA_CHANNEL_NUM_MAX
;
i
++
,
engine
++
)
{
for
(
i
=
0
;
i
<
XDMA_CHANNEL_NUM_MAX
;
i
++
,
engine
++
)
{
spin_lock_init
(
&
engine
->
lock
);
spin_lock_init
(
&
engine
->
lock
);
spin_lock
_init
(
&
engine
->
desc_lock
);
mutex
_init
(
&
engine
->
desc_lock
);
INIT_LIST_HEAD
(
&
engine
->
transfer_list
);
INIT_LIST_HEAD
(
&
engine
->
transfer_list
);
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
init_swait_queue_head
(
&
engine
->
shutdown_wq
);
init_swait_queue_head
(
&
engine
->
shutdown_wq
);
...
@@ -4145,7 +4145,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
...
@@ -4145,7 +4145,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
engine
=
xdev
->
engine_c2h
;
engine
=
xdev
->
engine_c2h
;
for
(
i
=
0
;
i
<
XDMA_CHANNEL_NUM_MAX
;
i
++
,
engine
++
)
{
for
(
i
=
0
;
i
<
XDMA_CHANNEL_NUM_MAX
;
i
++
,
engine
++
)
{
spin_lock_init
(
&
engine
->
lock
);
spin_lock_init
(
&
engine
->
lock
);
spin_lock
_init
(
&
engine
->
desc_lock
);
mutex
_init
(
&
engine
->
desc_lock
);
INIT_LIST_HEAD
(
&
engine
->
transfer_list
);
INIT_LIST_HEAD
(
&
engine
->
transfer_list
);
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
init_swait_queue_head
(
&
engine
->
shutdown_wq
);
init_swait_queue_head
(
&
engine
->
shutdown_wq
);
...
...
XDMA/linux-kernel/libxdma/libxdma.h
View file @
781c12f2
...
@@ -511,7 +511,7 @@ struct xdma_engine {
...
@@ -511,7 +511,7 @@ struct xdma_engine {
u32
irq_bitmask
;
/* IRQ bit mask for this engine */
u32
irq_bitmask
;
/* IRQ bit mask for this engine */
struct
work_struct
work
;
/* Work queue for interrupt handling */
struct
work_struct
work
;
/* Work queue for interrupt handling */
s
pinlock_t
desc_lock
;
/* protects concurrent access */
s
truct
mutex
desc_lock
;
/* protects concurrent access */
dma_addr_t
desc_bus
;
dma_addr_t
desc_bus
;
struct
xdma_desc
*
desc
;
struct
xdma_desc
*
desc
;
int
desc_idx
;
/* current descriptor index */
int
desc_idx
;
/* current descriptor index */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment