Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
D
dma_ip_drivers
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Libraries
dma_ip_drivers
Commits
5c7b8b10
Commit
5c7b8b10
authored
Aug 17, 2020
by
Karen Xie
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
XDMA: fix code format/style warnings
parent
017b4bd9
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
206 additions
and
194 deletions
+206
-194
XDMA/linux-kernel/libxdma/libxdma.c
XDMA/linux-kernel/libxdma/libxdma.c
+143
-132
XDMA/linux-kernel/libxdma/libxdma.h
XDMA/linux-kernel/libxdma/libxdma.h
+12
-9
XDMA/linux-kernel/xdma/cdev_ctrl.c
XDMA/linux-kernel/xdma/cdev_ctrl.c
+2
-2
XDMA/linux-kernel/xdma/cdev_sgdma.c
XDMA/linux-kernel/xdma/cdev_sgdma.c
+36
-36
XDMA/linux-kernel/xdma/cdev_xvc.c
XDMA/linux-kernel/xdma/cdev_xvc.c
+1
-1
XDMA/linux-kernel/xdma/xdma_cdev.c
XDMA/linux-kernel/xdma/xdma_cdev.c
+11
-12
XDMA/linux-kernel/xdma/xdma_mod.c
XDMA/linux-kernel/xdma/xdma_mod.c
+0
-1
XDMA/linux-kernel/xdma/xdma_mod.h
XDMA/linux-kernel/xdma/xdma_mod.h
+1
-1
No files found.
XDMA/linux-kernel/libxdma/libxdma.c
View file @
5c7b8b10
...
@@ -38,7 +38,6 @@
...
@@ -38,7 +38,6 @@
#include "version.h"
#include "version.h"
#define DRV_MODULE_NAME "libxdma"
#define DRV_MODULE_NAME "libxdma"
#define DRV_MODULE_DESC "Xilinx XDMA Base Driver"
#define DRV_MODULE_DESC "Xilinx XDMA Base Driver"
#define DRV_MODULE_RELDATE "Dec. 2019"
static
char
version
[]
=
static
char
version
[]
=
DRV_MODULE_DESC
" "
DRV_MODULE_NAME
" v"
DRV_MODULE_VERSION
"
\n
"
;
DRV_MODULE_DESC
" "
DRV_MODULE_NAME
" v"
DRV_MODULE_VERSION
"
\n
"
;
...
@@ -75,18 +74,21 @@ MODULE_PARM_DESC(desc_blen_max,
...
@@ -75,18 +74,21 @@ MODULE_PARM_DESC(desc_blen_max,
/* Kernel version adaptative code */
/* Kernel version adaptative code */
#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE
#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE
/* since 4.18, using simple wait queues is not recommended
/* since 4.18, using simple wait queues is not recommended
* except for realtime constraint (see swait.h comments)
* except for realtime constraint (see swait.h comments)
* and will likely be removed in future kernel versions
* and will likely be removed in future kernel versions
*/
*/
#define xlx_wake_up swake_up_one
#define xlx_wake_up swake_up_one
#define xlx_wait_event_interruptible_timeout swait_event_interruptible_timeout_exclusive
#define xlx_wait_event_interruptible_timeout \
swait_event_interruptible_timeout_exclusive
#elif KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#elif KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#define xlx_wake_up swake_up
#define xlx_wake_up swake_up
#define xlx_wait_event_interruptible_timeout swait_event_interruptible_timeout
#define xlx_wait_event_interruptible_timeout \
swait_event_interruptible_timeout
#else
#else
#define xlx_wake_up wake_up_interruptible
#define xlx_wake_up wake_up_interruptible
#define xlx_wait_event_interruptible_timeout wait_event_interruptible_timeout
#define xlx_wait_event_interruptible_timeout \
wait_event_interruptible_timeout
#endif
#endif
...
@@ -676,10 +678,9 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine)
...
@@ -676,10 +678,9 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine)
/* Add credits for Streaming mode C2H */
/* Add credits for Streaming mode C2H */
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
if
(
enable_credit_mp
){
if
(
enable_credit_mp
)
//write_register(RX_BUF_PAGES,&engine->sgdma_regs->credits);
write_register
(
engine
->
desc_used
,
write_register
(
engine
->
desc_used
,
&
engine
->
sgdma_regs
->
credits
,
0
);
&
engine
->
sgdma_regs
->
credits
,
0
);
}
}
}
/* initialize number of descriptors of dequeued transfers */
/* initialize number of descriptors of dequeued transfers */
...
@@ -929,7 +930,7 @@ engine_service_final_transfer(struct xdma_engine *engine,
...
@@ -929,7 +930,7 @@ engine_service_final_transfer(struct xdma_engine *engine,
*/
*/
WARN_ON
(
*
pdesc_completed
>
transfer
->
desc_num
);
WARN_ON
(
*
pdesc_completed
>
transfer
->
desc_num
);
}
}
/* mark transfer as succesfully completed */
/* mark transfer as succes
s
fully completed */
transfer
->
state
=
TRANSFER_STATE_COMPLETED
;
transfer
->
state
=
TRANSFER_STATE_COMPLETED
;
}
}
...
@@ -2869,9 +2870,9 @@ struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine)
...
@@ -2869,9 +2870,9 @@ struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine)
engine
->
name
);
engine
->
name
);
/* free up the buffer allocated for perf run */
/* free up the buffer allocated for perf run */
if
(
engine
->
perf_buf_virt
)
if
(
engine
->
perf_buf_virt
)
dma_free_coherent
(
&
engine
->
xdev
->
pdev
->
dev
,
size
,
dma_free_coherent
(
&
engine
->
xdev
->
pdev
->
dev
,
engine
->
perf_buf_virt
,
size
,
engine
->
perf_buf_virt
,
engine
->
perf_buf_bus
);
engine
->
perf_buf_bus
);
engine
->
perf_buf_virt
=
NULL
;
engine
->
perf_buf_virt
=
NULL
;
list_del
(
&
transfer
->
entry
);
list_del
(
&
transfer
->
entry
);
}
else
{
}
else
{
...
@@ -3117,7 +3118,8 @@ static void transfer_destroy(struct xdma_dev *xdev, struct xdma_transfer *xfer)
...
@@ -3117,7 +3118,8 @@ static void transfer_destroy(struct xdma_dev *xdev, struct xdma_transfer *xfer)
}
}
static
int
transfer_build
(
struct
xdma_engine
*
engine
,
static
int
transfer_build
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
,
unsigned
int
desc_max
)
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
,
unsigned
int
desc_max
)
{
{
struct
sw_desc
*
sdesc
=
&
(
req
->
sdesc
[
req
->
sw_desc_idx
]);
struct
sw_desc
*
sdesc
=
&
(
req
->
sdesc
[
req
->
sw_desc_idx
]);
int
i
=
0
;
int
i
=
0
;
...
@@ -3139,10 +3141,13 @@ static int transfer_build(struct xdma_engine *engine,
...
@@ -3139,10 +3141,13 @@ static int transfer_build(struct xdma_engine *engine,
req
->
ep_addr
+=
sdesc
->
len
;
req
->
ep_addr
+=
sdesc
->
len
;
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
memset
(
xfer
->
res_virt
+
j
,
0
,
sizeof
(
struct
xdma_result
));
memset
(
xfer
->
res_virt
+
j
,
0
,
xfer
->
desc_virt
[
j
].
src_addr_lo
=
cpu_to_le32
(
PCI_DMA_L
(
bus
));
sizeof
(
struct
xdma_result
));
xfer
->
desc_virt
[
j
].
src_addr_hi
=
cpu_to_le32
(
PCI_DMA_H
(
bus
));
xfer
->
desc_virt
[
j
].
src_addr_lo
=
bus
+=
sizeof
(
struct
xdma_result
);
cpu_to_le32
(
PCI_DMA_L
(
bus
));
xfer
->
desc_virt
[
j
].
src_addr_hi
=
cpu_to_le32
(
PCI_DMA_H
(
bus
));
bus
+=
sizeof
(
struct
xdma_result
);
}
}
}
}
...
@@ -3151,7 +3156,8 @@ static int transfer_build(struct xdma_engine *engine,
...
@@ -3151,7 +3156,8 @@ static int transfer_build(struct xdma_engine *engine,
}
}
static
int
transfer_init
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
static
int
transfer_init
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
{
{
unsigned
int
desc_max
=
min_t
(
unsigned
int
,
unsigned
int
desc_max
=
min_t
(
unsigned
int
,
req
->
sw_desc_cnt
-
req
->
sw_desc_idx
,
req
->
sw_desc_cnt
-
req
->
sw_desc_idx
,
...
@@ -3167,7 +3173,7 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
...
@@ -3167,7 +3173,7 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
/* lock the engine state */
/* lock the engine state */
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
/* initialize wait queue */
/* initialize wait queue */
#if
LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,
0)
#if
LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6,
0)
init_swait_queue_head
(
&
xfer
->
wq
);
init_swait_queue_head
(
&
xfer
->
wq
);
#else
#else
init_waitqueue_head
(
&
xfer
->
wq
);
init_waitqueue_head
(
&
xfer
->
wq
);
...
@@ -3177,24 +3183,28 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
...
@@ -3177,24 +3183,28 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
xfer
->
dir
=
engine
->
dir
;
xfer
->
dir
=
engine
->
dir
;
xfer
->
desc_virt
=
engine
->
desc
+
engine
->
desc_idx
;
xfer
->
desc_virt
=
engine
->
desc
+
engine
->
desc_idx
;
xfer
->
res_virt
=
engine
->
cyclic_result
+
engine
->
desc_idx
;
xfer
->
res_virt
=
engine
->
cyclic_result
+
engine
->
desc_idx
;
xfer
->
desc_bus
=
engine
->
desc_bus
+
(
sizeof
(
struct
xdma_desc
)
*
engine
->
desc_idx
);
xfer
->
desc_bus
=
engine
->
desc_bus
+
xfer
->
res_bus
=
engine
->
cyclic_result_bus
+
(
sizeof
(
struct
xdma_result
)
*
engine
->
desc_idx
);
(
sizeof
(
struct
xdma_desc
)
*
engine
->
desc_idx
);
xfer
->
res_bus
=
engine
->
cyclic_result_bus
+
(
sizeof
(
struct
xdma_result
)
*
engine
->
desc_idx
);
xfer
->
desc_index
=
engine
->
desc_idx
;
xfer
->
desc_index
=
engine
->
desc_idx
;
/* TODO: Need to handle desc_used >= XDMA_TRANSFER_MAX_DESC */
/* TODO: Need to handle desc_used >= XDMA_TRANSFER_MAX_DESC for aio calls */
if
((
engine
->
desc_idx
+
desc_max
)
>=
XDMA_TRANSFER_MAX_DESC
)
if
((
engine
->
desc_idx
+
desc_max
)
>=
XDMA_TRANSFER_MAX_DESC
)
desc_max
=
XDMA_TRANSFER_MAX_DESC
-
engine
->
desc_idx
;
desc_max
=
XDMA_TRANSFER_MAX_DESC
-
engine
->
desc_idx
;
transfer_desc_init
(
xfer
,
desc_max
);
transfer_desc_init
(
xfer
,
desc_max
);
dbg_sg
(
"xfer= %p transfer->desc_bus = 0x%llx.
\n
"
,
xfer
,
(
u64
)
xfer
->
desc_bus
);
dbg_sg
(
"xfer= %p transfer->desc_bus = 0x%llx.
\n
"
,
transfer_build
(
engine
,
req
,
xfer
,
desc_max
);
xfer
,
(
u64
)
xfer
->
desc_bus
);
transfer_build
(
engine
,
req
,
xfer
,
desc_max
);
/* Contiguous descriptors cannot cross PAGE boundry
/*
* The 1st descriptor may start in the middle of the page,
* Contiguous descriptors cannot cross PAGE boundary
* calculate the 1st block of adj desc accordingly */
* The 1st descriptor may start in the middle of the page,
* calculate the 1st block of adj desc accordingly
*/
desc_align
=
128
-
(
engine
->
desc_idx
%
128
)
-
1
;
desc_align
=
128
-
(
engine
->
desc_idx
%
128
)
-
1
;
if
(
desc_align
>
(
desc_max
-
1
))
if
(
desc_align
>
(
desc_max
-
1
))
desc_align
=
desc_max
-
1
;
desc_align
=
desc_max
-
1
;
...
@@ -3210,8 +3220,9 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
...
@@ -3210,8 +3220,9 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
xdma_desc_control_set
(
xfer
->
desc_virt
+
last
,
control
);
xdma_desc_control_set
(
xfer
->
desc_virt
+
last
,
control
);
xfer
->
desc_num
=
desc_max
;
xfer
->
desc_num
=
desc_max
;
engine
->
desc_idx
=
(
engine
->
desc_idx
+
desc_max
)
%
XDMA_TRANSFER_MAX_DESC
;
engine
->
desc_idx
=
(
engine
->
desc_idx
+
desc_max
)
%
engine
->
desc_used
+=
desc_max
;
XDMA_TRANSFER_MAX_DESC
;
engine
->
desc_used
+=
desc_max
;
/* fill in adjacent numbers */
/* fill in adjacent numbers */
for
(
i
=
0
;
i
<
xfer
->
desc_num
&&
desc_align
;
i
++
,
desc_align
--
)
for
(
i
=
0
;
i
<
xfer
->
desc_num
&&
desc_align
;
i
++
,
desc_align
--
)
...
@@ -3226,7 +3237,8 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
...
@@ -3226,7 +3237,8 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
static
int
transfer_init_cyclic
(
struct
xdma_engine
*
engine
,
static
int
transfer_init_cyclic
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
{
{
unsigned
int
desc_max
=
unsigned
int
desc_max
=
min_t
(
unsigned
int
,
req
->
sw_desc_cnt
-
req
->
sw_desc_idx
,
min_t
(
unsigned
int
,
req
->
sw_desc_cnt
-
req
->
sw_desc_idx
,
...
@@ -3558,12 +3570,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
...
@@ -3558,12 +3570,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
done
);
done
);
/* For C2H streaming use writeback results */
/* For C2H streaming use writeback results */
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
if
(
engine
->
streaming
&&
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
{
engine
->
dir
==
DMA_FROM_DEVICE
)
{
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
done
+=
result
[
i
].
length
;
done
+=
result
[
i
].
length
;
}
}
else
}
else
done
+=
xfer
->
len
;
done
+=
xfer
->
len
;
rv
=
0
;
rv
=
0
;
...
@@ -3610,8 +3621,8 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
...
@@ -3610,8 +3621,8 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
engine
->
desc_used
-=
xfer
->
desc_num
;
engine
->
desc_used
-=
xfer
->
desc_num
;
transfer_destroy
(
xdev
,
xfer
);
transfer_destroy
(
xdev
,
xfer
);
/* use multiple transfers per request if we could not fit
all data within
/* use multiple transfers per request if we could not fit
* single descriptor chain.
*
all data within
single descriptor chain.
*/
*/
tfer_idx
++
;
tfer_idx
++
;
...
@@ -3638,8 +3649,9 @@ unmap_sgl:
...
@@ -3638,8 +3649,9 @@ unmap_sgl:
}
}
EXPORT_SYMBOL_GPL
(
xdma_xfer_submit
);
EXPORT_SYMBOL_GPL
(
xdma_xfer_submit
);
ssize_t
xdma_xfer_completion
(
void
*
cb_hndl
,
void
*
dev_hndl
,
int
channel
,
bool
write
,
u64
ep_addr
,
ssize_t
xdma_xfer_completion
(
void
*
cb_hndl
,
void
*
dev_hndl
,
int
channel
,
struct
sg_table
*
sgt
,
bool
dma_mapped
,
int
timeout_ms
)
bool
write
,
u64
ep_addr
,
struct
sg_table
*
sgt
,
bool
dma_mapped
,
int
timeout_ms
)
{
{
struct
xdma_dev
*
xdev
=
(
struct
xdma_dev
*
)
dev_hndl
;
struct
xdma_dev
*
xdev
=
(
struct
xdma_dev
*
)
dev_hndl
;
...
@@ -3655,23 +3667,23 @@ ssize_t xdma_xfer_completion(void *cb_hndl, void *dev_hndl, int channel, bool wr
...
@@ -3655,23 +3667,23 @@ ssize_t xdma_xfer_completion(void *cb_hndl, void *dev_hndl, int channel, bool wr
struct
xdma_result
*
result
;
struct
xdma_result
*
result
;
if
(
write
==
1
)
{
if
(
write
==
1
)
{
if
(
channel
>=
xdev
->
h2c_channel_max
)
{
if
(
channel
>=
xdev
->
h2c_channel_max
)
{
pr_warn
(
"H2C channel %d >= %d.
\n
"
,
pr_warn
(
"H2C channel %d >= %d.
\n
"
,
channel
,
xdev
->
h2c_channel_max
);
channel
,
xdev
->
h2c_channel_max
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
engine
=
&
xdev
->
engine_h2c
[
channel
];
engine
=
&
xdev
->
engine_h2c
[
channel
];
}
else
if
(
write
==
0
)
{
}
else
if
(
write
==
0
)
{
if
(
channel
>=
xdev
->
c2h_channel_max
)
{
if
(
channel
>=
xdev
->
c2h_channel_max
)
{
pr_warn
(
"C2H channel %d >= %d.
\n
"
,
pr_warn
(
"C2H channel %d >= %d.
\n
"
,
channel
,
xdev
->
c2h_channel_max
);
channel
,
xdev
->
c2h_channel_max
);
return
-
EINVAL
;
}
engine
=
&
xdev
->
engine_c2h
[
channel
];
}
else
{
pr_warn
(
"write %d, exp. 0|1.
\n
"
,
write
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
engine
=
&
xdev
->
engine_c2h
[
channel
];
}
else
{
pr_warn
(
"write %d, exp. 0|1.
\n
"
,
write
);
return
-
EINVAL
;
}
if
(
!
engine
)
{
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
pr_err
(
"dma engine NULL
\n
"
);
...
@@ -3691,53 +3703,50 @@ ssize_t xdma_xfer_completion(void *cb_hndl, void *dev_hndl, int channel, bool wr
...
@@ -3691,53 +3703,50 @@ ssize_t xdma_xfer_completion(void *cb_hndl, void *dev_hndl, int channel, bool wr
while
(
nents
)
{
while
(
nents
)
{
xfer
=
&
req
->
tfer
[
tfer_idx
];
xfer
=
&
req
->
tfer
[
tfer_idx
];
nents
-=
xfer
->
desc_num
;
nents
-=
xfer
->
desc_num
;
switch
(
xfer
->
state
)
{
switch
(
xfer
->
state
)
{
case
TRANSFER_STATE_COMPLETED
:
case
TRANSFER_STATE_COMPLETED
:
dbg_tfr
(
"transfer %p, %u, ep 0x%llx compl, +%lu.
\n
"
,
dbg_tfr
(
"transfer %p, %u, ep 0x%llx compl, +%lu.
\n
"
,
xfer
,
xfer
->
len
,
req
->
ep_addr
-
xfer
->
len
,
xfer
,
xfer
->
len
,
req
->
ep_addr
-
xfer
->
len
,
done
);
done
);
result
=
xfer
->
res_virt
;
result
=
xfer
->
res_virt
;
dbg_tfr
(
"transfer %p, %u, ep 0x%llx compl, +%lu.
\n
"
,
/* For C2H streaming use writeback results */
xfer
,
xfer
->
len
,
req
->
ep_addr
-
xfer
->
len
,
done
);
if
(
engine
->
streaming
&&
/* For C2H streaming use writeback results */
engine
->
dir
==
DMA_FROM_DEVICE
)
{
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
{
done
+=
result
[
i
].
length
;
done
+=
result
[
i
].
length
;
}
else
}
done
+=
xfer
->
len
;
}
else
rv
=
0
;
done
+=
xfer
->
len
;
break
;
case
TRANSFER_STATE_FAILED
:
rv
=
0
;
pr_info
(
"xfer 0x%p,%u, failed, ep 0x%llx.
\n
"
,
break
;
xfer
,
xfer
->
len
,
req
->
ep_addr
-
xfer
->
len
);
case
TRANSFER_STATE_FAILED
:
pr_info
(
"xfer 0x%p,%u, failed, ep 0x%llx.
\n
"
,
xfer
,
xfer
->
len
,
req
->
ep_addr
-
xfer
->
len
);
#ifdef __LIBXDMA_DEBUG__
#ifdef __LIBXDMA_DEBUG__
transfer_dump
(
xfer
);
transfer_dump
(
xfer
);
sgt_dump
(
sgt
);
sgt_dump
(
sgt
);
#endif
#endif
rv
=
-
EIO
;
rv
=
-
EIO
;
break
;
break
;
default:
default:
/* transfer can still be in-flight */
/* transfer can still be in-flight */
pr_info
(
"xfer 0x%p,%u, s 0x%x timed out, ep 0x%llx.
\n
"
,
pr_info
(
"xfer 0x%p,%u, s 0x%x timed out, ep 0x%llx.
\n
"
,
xfer
,
xfer
->
len
,
xfer
->
state
,
req
->
ep_addr
);
xfer
,
xfer
->
len
,
xfer
->
state
,
req
->
ep_addr
);
engine_status_read
(
engine
,
0
,
1
);
engine_status_read
(
engine
,
0
,
1
);
engine_status_dump
(
engine
);
engine_status_dump
(
engine
);
transfer_abort
(
engine
,
xfer
);
transfer_abort
(
engine
,
xfer
);
xdma_engine_stop
(
engine
);
xdma_engine_stop
(
engine
);
#ifdef __LIBXDMA_DEBUG__
#ifdef __LIBXDMA_DEBUG__
transfer_dump
(
xfer
);
transfer_dump
(
xfer
);
sgt_dump
(
sgt
);
sgt_dump
(
sgt
);
#endif
#endif
rv
=
-
ERESTARTSYS
;
rv
=
-
ERESTARTSYS
;
break
;
break
;
}
}
transfer_destroy
(
xdev
,
xfer
);
transfer_destroy
(
xdev
,
xfer
);
engine
->
desc_used
-=
xfer
->
desc_num
;
engine
->
desc_used
-=
xfer
->
desc_num
;
...
@@ -3763,13 +3772,14 @@ unmap_sgl:
...
@@ -3763,13 +3772,14 @@ unmap_sgl:
EXPORT_SYMBOL_GPL
(
xdma_xfer_completion
);
EXPORT_SYMBOL_GPL
(
xdma_xfer_completion
);
ssize_t
xdma_xfer_submit_nowait
(
void
*
cb_hndl
,
void
*
dev_hndl
,
int
channel
,
bool
write
,
u64
ep_addr
,
ssize_t
xdma_xfer_submit_nowait
(
void
*
cb_hndl
,
void
*
dev_hndl
,
int
channel
,
struct
sg_table
*
sgt
,
bool
dma_mapped
,
int
timeout_ms
)
bool
write
,
u64
ep_addr
,
struct
sg_table
*
sgt
,
bool
dma_mapped
,
int
timeout_ms
)
{
{
struct
xdma_dev
*
xdev
=
(
struct
xdma_dev
*
)
dev_hndl
;
struct
xdma_dev
*
xdev
=
(
struct
xdma_dev
*
)
dev_hndl
;
struct
xdma_engine
*
engine
;
struct
xdma_engine
*
engine
;
struct
xdma_io_cb
*
cb
=
(
struct
xdma_io_cb
*
)
cb_hndl
;
struct
xdma_io_cb
*
cb
=
(
struct
xdma_io_cb
*
)
cb_hndl
;
int
rv
=
0
,
tfer_idx
=
0
;
int
rv
=
0
,
tfer_idx
=
0
;
struct
scatterlist
*
sg
=
sgt
->
sgl
;
struct
scatterlist
*
sg
=
sgt
->
sgl
;
int
nents
;
int
nents
;
enum
dma_data_direction
dir
=
write
?
DMA_TO_DEVICE
:
DMA_FROM_DEVICE
;
enum
dma_data_direction
dir
=
write
?
DMA_TO_DEVICE
:
DMA_FROM_DEVICE
;
...
@@ -3800,16 +3810,16 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
...
@@ -3800,16 +3810,16 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
return
-
EINVAL
;
return
-
EINVAL
;
}
}
if
(
!
engine
)
{
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
if
(
engine
->
magic
!=
MAGIC_ENGINE
)
{
if
(
engine
->
magic
!=
MAGIC_ENGINE
)
{
pr_err
(
"%s has invalid magic number %lx
\n
"
,
engine
->
name
,
pr_err
(
"%s has invalid magic number %lx
\n
"
,
engine
->
magic
);
engine
->
name
,
engine
->
magic
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
xdev
=
engine
->
xdev
;
xdev
=
engine
->
xdev
;
if
(
xdma_device_flag_check
(
xdev
,
XDEV_FLAG_OFFLINE
))
{
if
(
xdma_device_flag_check
(
xdev
,
XDEV_FLAG_OFFLINE
))
{
...
@@ -3865,12 +3875,13 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
...
@@ -3865,12 +3875,13 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
pr_info
(
"transfer_init failed
\n
"
);
pr_info
(
"transfer_init failed
\n
"
);
if
(
!
dma_mapped
&&
sgt
->
nents
)
{
if
(
!
dma_mapped
&&
sgt
->
nents
)
{
pci_unmap_sg
(
xdev
->
pdev
,
sgt
->
sgl
,
sgt
->
orig_nents
,
dir
);
pci_unmap_sg
(
xdev
->
pdev
,
sgt
->
sgl
,
sgt
->
nents
=
0
;
sgt
->
orig_nents
,
dir
);
}
sgt
->
nents
=
0
;
}
/* Transfer failed return BUSY */
/* Transfer failed return BUSY */
if
(
cb
->
io_done
)
if
(
cb
->
io_done
)
cb
->
io_done
((
unsigned
long
)
cb
,
-
EBUSY
);
cb
->
io_done
((
unsigned
long
)
cb
,
-
EBUSY
);
goto
rel_req
;
goto
rel_req
;
...
@@ -3888,8 +3899,8 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
...
@@ -3888,8 +3899,8 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
xfer
->
sgt
=
sgt
;
xfer
->
sgt
=
sgt
;
}
}
dbg_tfr
(
"xfer %p, len %u, ep 0x%llx, sg %u/%u. nents = %d
\n
"
,
xfer
,
dbg_tfr
(
"xfer %p, len %u, ep 0x%llx, sg %u/%u. nents = %d
\n
"
,
xfer
->
len
,
req
->
ep_addr
,
req
->
sw_desc_idx
,
xfer
,
xfer
->
len
,
req
->
ep_addr
,
req
->
sw_desc_idx
,
req
->
sw_desc_cnt
,
nents
);
req
->
sw_desc_cnt
,
nents
);
#ifdef __LIBXDMA_DEBUG__
#ifdef __LIBXDMA_DEBUG__
...
@@ -3902,13 +3913,13 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
...
@@ -3902,13 +3913,13 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
goto
unmap_sgl
;
goto
unmap_sgl
;
}
}
/* use multiple transfers per request if we could not fit all
data within
/* use multiple transfers per request if we could not fit all
* single descriptor chain.
*
data within
single descriptor chain.
*/
*/
tfer_idx
++
;
tfer_idx
++
;
}
}
return
-
EIOCBQUEUED
;
return
-
EIOCBQUEUED
;
unmap_sgl:
unmap_sgl:
if
(
!
dma_mapped
&&
sgt
->
nents
)
{
if
(
!
dma_mapped
&&
sgt
->
nents
)
{
...
@@ -3927,7 +3938,7 @@ EXPORT_SYMBOL_GPL(xdma_xfer_submit_nowait);
...
@@ -3927,7 +3938,7 @@ EXPORT_SYMBOL_GPL(xdma_xfer_submit_nowait);
int
xdma_performance_submit
(
struct
xdma_dev
*
xdev
,
struct
xdma_engine
*
engine
)
int
xdma_performance_submit
(
struct
xdma_dev
*
xdev
,
struct
xdma_engine
*
engine
)
{
{
u32
max_consistent_size
=
XDMA_PERF_NUM_DESC
*
32
*
1024
;
/*
1024 pages,
4MB */
u32
max_consistent_size
=
XDMA_PERF_NUM_DESC
*
32
*
1024
;
/* 4MB */
struct
xdma_transfer
*
transfer
;
struct
xdma_transfer
*
transfer
;
u64
ep_addr
=
0
;
u64
ep_addr
=
0
;
int
num_desc_in_a_loop
=
XDMA_PERF_NUM_DESC
;
int
num_desc_in_a_loop
=
XDMA_PERF_NUM_DESC
;
...
@@ -3948,9 +3959,10 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
...
@@ -3948,9 +3959,10 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
num_desc_in_a_loop
=
size
/
size_in_desc
;
num_desc_in_a_loop
=
size
/
size_in_desc
;
}
}
engine
->
perf_buf_virt
=
dma_alloc_coherent
(
&
xdev
->
pdev
->
dev
,
size_in_desc
,
engine
->
perf_buf_virt
=
dma_alloc_coherent
(
&
xdev
->
pdev
->
dev
,
&
engine
->
perf_buf_bus
,
size_in_desc
,
GFP_KERNEL
);
&
engine
->
perf_buf_bus
,
GFP_KERNEL
);
if
(
!
engine
->
perf_buf_virt
)
{
if
(
!
engine
->
perf_buf_virt
)
{
pr_err
(
"dev %s, %s DMA allocation OOM.
\n
"
,
pr_err
(
"dev %s, %s DMA allocation OOM.
\n
"
,
dev_name
(
&
xdev
->
pdev
->
dev
),
engine
->
name
);
dev_name
(
&
xdev
->
pdev
->
dev
),
engine
->
name
);
...
@@ -3998,8 +4010,7 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
...
@@ -3998,8 +4010,7 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
for
(
i
=
0
;
i
<
transfer
->
desc_num
;
i
++
)
{
for
(
i
=
0
;
i
<
transfer
->
desc_num
;
i
++
)
{
struct
xdma_desc
*
desc
=
transfer
->
desc_virt
+
i
;
struct
xdma_desc
*
desc
=
transfer
->
desc_virt
+
i
;
dma_addr_t
rc_bus_addr
=
engine
->
perf_buf_bus
/* +
dma_addr_t
rc_bus_addr
=
engine
->
perf_buf_bus
;
(size_in_desc * i)*/
;
/* fill in descriptor entry with transfer details */
/* fill in descriptor entry with transfer details */
xdma_desc_set
(
desc
,
rc_bus_addr
,
ep_addr
,
size_in_desc
,
xdma_desc_set
(
desc
,
rc_bus_addr
,
ep_addr
,
size_in_desc
,
...
@@ -4040,8 +4051,8 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
...
@@ -4040,8 +4051,8 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
err_dma_desc:
err_dma_desc:
if
(
free_desc
&&
engine
->
desc
)
if
(
free_desc
&&
engine
->
desc
)
dma_free_coherent
(
&
xdev
->
pdev
->
dev
,
dma_free_coherent
(
&
xdev
->
pdev
->
dev
,
num_desc_in_a_loop
*
sizeof
(
struct
xdma_desc
),
num_desc_in_a_loop
*
sizeof
(
struct
xdma_desc
),
engine
->
desc
,
engine
->
desc_bus
);
engine
->
desc
,
engine
->
desc_bus
);
engine
->
desc
=
NULL
;
engine
->
desc
=
NULL
;
err_engine_desc:
err_engine_desc:
if
(
transfer
)
if
(
transfer
)
...
@@ -4050,8 +4061,8 @@ err_engine_desc:
...
@@ -4050,8 +4061,8 @@ err_engine_desc:
transfer
=
NULL
;
transfer
=
NULL
;
err_engine_transfer:
err_engine_transfer:
if
(
engine
->
perf_buf_virt
)
if
(
engine
->
perf_buf_virt
)
dma_free_coherent
(
&
xdev
->
pdev
->
dev
,
size_in_desc
,
engine
->
perf_buf_virt
,
dma_free_coherent
(
&
xdev
->
pdev
->
dev
,
size_in_desc
,
engine
->
perf_buf_bus
);
engine
->
perf_buf_virt
,
engine
->
perf_buf_bus
);
engine
->
perf_buf_virt
=
NULL
;
engine
->
perf_buf_virt
=
NULL
;
return
rv
;
return
rv
;
}
}
...
@@ -4772,7 +4783,7 @@ static int transfer_monitor_cyclic(struct xdma_engine *engine,
...
@@ -4772,7 +4783,7 @@ static int transfer_monitor_cyclic(struct xdma_engine *engine,
transfer
->
wq
,
transfer
->
wq
,
(
engine
->
rx_head
!=
engine
->
rx_tail
||
(
engine
->
rx_head
!=
engine
->
rx_tail
||
engine
->
rx_overrun
),
engine
->
rx_overrun
),
msecs_to_jiffies
(
timeout_ms
));
msecs_to_jiffies
(
timeout_ms
));
dbg_tfr
(
"%s: wait returns %d, rx %d/%d, overrun %d.
\n
"
,
dbg_tfr
(
"%s: wait returns %d, rx %d/%d, overrun %d.
\n
"
,
engine
->
name
,
rc
,
engine
->
rx_head
,
engine
->
name
,
rc
,
engine
->
rx_head
,
...
...
XDMA/linux-kernel/libxdma/libxdma.h
View file @
5c7b8b10
...
@@ -57,9 +57,9 @@
...
@@ -57,9 +57,9 @@
* interrupts per engine, rad2_vul.sv:237
* interrupts per engine, rad2_vul.sv:237
* .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]),
* .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]),
*/
*/
#define XDMA_ENG_IRQ_NUM
(1)
#define XDMA_ENG_IRQ_NUM
(1)
#define MAX_EXTRA_ADJ
0x3F
#define MAX_EXTRA_ADJ
(0x3F)
#define RX_STATUS_EOP
(1)
#define RX_STATUS_EOP
(1)
/* Target internal components on XDMA control BAR */
/* Target internal components on XDMA control BAR */
#define XDMA_OFS_INT_CTRL (0x2000UL)
#define XDMA_OFS_INT_CTRL (0x2000UL)
...
@@ -410,12 +410,12 @@ struct sw_desc {
...
@@ -410,12 +410,12 @@ struct sw_desc {
struct
xdma_transfer
{
struct
xdma_transfer
{
struct
list_head
entry
;
/* queue of non-completed transfers */
struct
list_head
entry
;
/* queue of non-completed transfers */
struct
xdma_desc
*
desc_virt
;
/* virt addr of the 1st descriptor */
struct
xdma_desc
*
desc_virt
;
/* virt addr of the 1st descriptor */
struct
xdma_result
*
res_virt
;
/* virt addr of result for
c2h streaming */
struct
xdma_result
*
res_virt
;
/* virt addr of result,
c2h streaming */
dma_addr_t
res_bus
;
/* bus addr for result descriptors */
dma_addr_t
res_bus
;
/* bus addr for result descriptors */
dma_addr_t
desc_bus
;
/* bus addr of the first descriptor */
dma_addr_t
desc_bus
;
/* bus addr of the first descriptor */
int
desc_adjacent
;
/* adjacent descriptors at desc_bus */
int
desc_adjacent
;
/* adjacent descriptors at desc_bus */
int
desc_num
;
/* number of descriptors in transfer */
int
desc_num
;
/* number of descriptors in transfer */
int
desc_index
;
/* index for
first descriptor
in transfer */
int
desc_index
;
/* index for
1st desc.
in transfer */
enum
dma_data_direction
dir
;
enum
dma_data_direction
dir
;
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
struct
swait_queue_head
wq
;
struct
swait_queue_head
wq
;
...
@@ -438,7 +438,9 @@ struct xdma_request_cb {
...
@@ -438,7 +438,9 @@ struct xdma_request_cb {
unsigned
int
total_len
;
unsigned
int
total_len
;
u64
ep_addr
;
u64
ep_addr
;
struct
xdma_transfer
tfer
[
2
];
/* Use two transfers in case single request needs to be split */
/* Use two transfers in case single request needs to be split */
struct
xdma_transfer
tfer
[
2
];
struct
xdma_io_cb
*
cb
;
struct
xdma_io_cb
*
cb
;
unsigned
int
sw_desc_idx
;
unsigned
int
sw_desc_idx
;
...
@@ -484,8 +486,9 @@ struct xdma_engine {
...
@@ -484,8 +486,9 @@ struct xdma_engine {
dma_addr_t
cyclic_result_bus
;
/* bus addr for transfer */
dma_addr_t
cyclic_result_bus
;
/* bus addr for transfer */
struct
xdma_request_cb
*
cyclic_req
;
struct
xdma_request_cb
*
cyclic_req
;
struct
sg_table
cyclic_sgt
;
struct
sg_table
cyclic_sgt
;
u8
*
perf_buf_virt
;
u8
*
perf_buf_virt
;
dma_addr_t
perf_buf_bus
;
/* bus address */
dma_addr_t
perf_buf_bus
;
/* bus address */
u8
eop_found
;
/* used only for cyclic(rx:c2h) */
u8
eop_found
;
/* used only for cyclic(rx:c2h) */
int
eop_count
;
int
eop_count
;
int
rx_tail
;
/* follows the HW */
int
rx_tail
;
/* follows the HW */
...
...
XDMA/linux-kernel/xdma/cdev_ctrl.c
View file @
5c7b8b10
...
@@ -25,9 +25,9 @@
...
@@ -25,9 +25,9 @@
#include "cdev_ctrl.h"
#include "cdev_ctrl.h"
#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE
#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE
#define xlx_access_ok(X,
Y,Z) access_ok(Y,
Z)
#define xlx_access_ok(X,
Y, Z) access_ok(Y,
Z)
#else
#else
#define xlx_access_ok(X,
Y,Z) access_ok(X,Y,
Z)
#define xlx_access_ok(X,
Y, Z) access_ok(X, Y,
Z)
#endif
#endif
/*
/*
...
...
XDMA/linux-kernel/xdma/cdev_sgdma.c
View file @
5c7b8b10
...
@@ -57,16 +57,15 @@ static void async_io_handler(unsigned long cb_hndl, int err)
...
@@ -57,16 +57,15 @@ static void async_io_handler(unsigned long cb_hndl, int err)
int
lock_stat
;
int
lock_stat
;
int
rv
;
int
rv
;
if
(
NULL
==
caio
)
{
if
(
caio
==
NULL
)
{
pr_err
(
"Invalid work struct
\n
"
);
pr_err
(
"Invalid work struct
\n
"
);
return
;
return
;
}
}
xcdev
=
(
struct
xdma_cdev
*
)
caio
->
iocb
->
ki_filp
->
private_data
;
xcdev
=
(
struct
xdma_cdev
*
)
caio
->
iocb
->
ki_filp
->
private_data
;
rv
=
xcdev_check
(
__func__
,
xcdev
,
1
);
rv
=
xcdev_check
(
__func__
,
xcdev
,
1
);
if
(
rv
<
0
)
if
(
rv
<
0
)
return
;
return
;
/* Safeguarding for cancel requests */
/* Safeguarding for cancel requests */
lock_stat
=
spin_trylock
(
&
caio
->
lock
);
lock_stat
=
spin_trylock
(
&
caio
->
lock
);
...
@@ -80,13 +79,13 @@ static void async_io_handler(unsigned long cb_hndl, int err)
...
@@ -80,13 +79,13 @@ static void async_io_handler(unsigned long cb_hndl, int err)
goto
skip_tran
;
goto
skip_tran
;
}
}
engine
=
xcdev
->
engine
;
engine
=
xcdev
->
engine
;
xdev
=
xcdev
->
xdev
;
xdev
=
xcdev
->
xdev
;
if
(
!
err
)
if
(
!
err
)
numbytes
=
xdma_xfer_completion
((
void
*
)
cb
,
xdev
,
engine
->
channel
,
cb
->
write
,
cb
->
ep_addr
,
&
cb
->
sgt
,
numbytes
=
xdma_xfer_completion
((
void
*
)
cb
,
xdev
,
0
,
sgdma_timeout
*
1000
);
engine
->
channel
,
cb
->
write
,
cb
->
ep_addr
,
&
cb
->
sgt
,
0
,
sgdma_timeout
*
1000
);
char_sgdma_unmap_user_buf
(
cb
,
cb
->
write
);
char_sgdma_unmap_user_buf
(
cb
,
cb
->
write
);
...
@@ -97,9 +96,7 @@ static void async_io_handler(unsigned long cb_hndl, int err)
...
@@ -97,9 +96,7 @@ static void async_io_handler(unsigned long cb_hndl, int err)
caio
->
cmpl_cnt
++
;
caio
->
cmpl_cnt
++
;
caio
->
res
+=
numbytes
;
caio
->
res
+=
numbytes
;
if
(
caio
->
cmpl_cnt
==
caio
->
req_cnt
)
{
if
(
caio
->
cmpl_cnt
==
caio
->
req_cnt
)
{
res
=
caio
->
res
;
res
=
caio
->
res
;
res2
=
caio
->
res2
;
res2
=
caio
->
res2
;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
...
@@ -112,13 +109,10 @@ skip_tran:
...
@@ -112,13 +109,10 @@ skip_tran:
kmem_cache_free
(
cdev_cache
,
caio
);
kmem_cache_free
(
cdev_cache
,
caio
);
kfree
(
cb
);
kfree
(
cb
);
return
;
return
;
}
}
else
spin_unlock
(
&
caio
->
lock
);
{
return
;
spin_unlock
(
&
caio
->
lock
);
return
;
}
skip_dev_lock:
skip_dev_lock:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
caio
->
iocb
->
ki_complete
(
caio
->
iocb
,
numbytes
,
-
EBUSY
);
caio
->
iocb
->
ki_complete
(
caio
->
iocb
,
numbytes
,
-
EBUSY
);
...
@@ -408,15 +402,16 @@ static ssize_t char_sgdma_write(struct file *file, const char __user *buf,
...
@@ -408,15 +402,16 @@ static ssize_t char_sgdma_write(struct file *file, const char __user *buf,
}
}
static
ssize_t
char_sgdma_read
(
struct
file
*
file
,
char
__user
*
buf
,
static
ssize_t
char_sgdma_read
(
struct
file
*
file
,
char
__user
*
buf
,
size_t
count
,
loff_t
*
pos
)
size_t
count
,
loff_t
*
pos
)
{
{
return
char_sgdma_read_write
(
file
,
buf
,
count
,
pos
,
0
);
return
char_sgdma_read_write
(
file
,
buf
,
count
,
pos
,
0
);
}
}
static
ssize_t
cdev_aio_write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
io
,
static
ssize_t
cdev_aio_write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
io
,
unsigned
long
count
,
loff_t
pos
)
unsigned
long
count
,
loff_t
pos
)
{
{
struct
xdma_cdev
*
xcdev
=
(
struct
xdma_cdev
*
)
iocb
->
ki_filp
->
private_data
;
struct
xdma_cdev
*
xcdev
=
(
struct
xdma_cdev
*
)
iocb
->
ki_filp
->
private_data
;
struct
cdev_async_io
*
caio
;
struct
cdev_async_io
*
caio
;
struct
xdma_engine
*
engine
;
struct
xdma_engine
*
engine
;
struct
xdma_dev
*
xdev
;
struct
xdma_dev
*
xdev
;
...
@@ -425,11 +420,11 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
...
@@ -425,11 +420,11 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
if
(
!
xcdev
)
{
if
(
!
xcdev
)
{
pr_info
(
"file 0x%p, xcdev NULL, %llu, pos %llu, W %d.
\n
"
,
pr_info
(
"file 0x%p, xcdev NULL, %llu, pos %llu, W %d.
\n
"
,
iocb
->
ki_filp
,
(
u64
)
count
,
(
u64
)
pos
,
1
);
iocb
->
ki_filp
,
(
u64
)
count
,
(
u64
)
pos
,
1
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
engine
=
xcdev
->
engine
;
engine
=
xcdev
->
engine
;
xdev
=
xcdev
->
xdev
;
xdev
=
xcdev
->
xdev
;
if
(
engine
->
dir
!=
DMA_TO_DEVICE
)
{
if
(
engine
->
dir
!=
DMA_TO_DEVICE
)
{
...
@@ -460,21 +455,23 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
...
@@ -460,21 +455,23 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
caio
->
cb
[
i
].
write
=
true
;
caio
->
cb
[
i
].
write
=
true
;
caio
->
cb
[
i
].
private
=
caio
;
caio
->
cb
[
i
].
private
=
caio
;
caio
->
cb
[
i
].
io_done
=
async_io_handler
;
caio
->
cb
[
i
].
io_done
=
async_io_handler
;
rv
=
check_transfer_align
(
engine
,
caio
->
cb
[
i
].
buf
,
caio
->
cb
[
i
].
len
,
pos
,
1
);
rv
=
check_transfer_align
(
engine
,
caio
->
cb
[
i
].
buf
,
caio
->
cb
[
i
].
len
,
pos
,
1
);
if
(
rv
)
{
if
(
rv
)
{
pr_info
(
"Invalid transfer alignment detected
\n
"
);
pr_info
(
"Invalid transfer alignment detected
\n
"
);
kmem_cache_free
(
cdev_cache
,
caio
);
kmem_cache_free
(
cdev_cache
,
caio
);
return
rv
;
return
rv
;
}
}
rv
=
char_sgdma_map_user_buf_to_sgl
(
&
caio
->
cb
[
i
],
true
);
rv
=
char_sgdma_map_user_buf_to_sgl
(
&
caio
->
cb
[
i
],
true
);
if
(
rv
<
0
)
{
if
(
rv
<
0
)
return
rv
;
return
rv
;
}
rv
=
xdma_xfer_submit_nowait
((
void
*
)
&
caio
->
cb
[
i
],
xdev
,
engine
->
channel
,
caio
->
cb
[
i
].
write
,
caio
->
cb
[
i
].
ep_addr
,
&
caio
->
cb
[
i
].
sgt
,
rv
=
xdma_xfer_submit_nowait
((
void
*
)
&
caio
->
cb
[
i
],
xdev
,
0
,
sgdma_timeout
*
1000
);
engine
->
channel
,
caio
->
cb
[
i
].
write
,
}
caio
->
cb
[
i
].
ep_addr
,
&
caio
->
cb
[
i
].
sgt
,
0
,
sgdma_timeout
*
1000
);
}
if
(
engine
->
cmplthp
)
if
(
engine
->
cmplthp
)
xdma_kthread_wakeup
(
engine
->
cmplthp
);
xdma_kthread_wakeup
(
engine
->
cmplthp
);
...
@@ -484,10 +481,11 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
...
@@ -484,10 +481,11 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
static
ssize_t
cdev_aio_read
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
io
,
static
ssize_t
cdev_aio_read
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
io
,
unsigned
long
count
,
loff_t
pos
)
unsigned
long
count
,
loff_t
pos
)
{
{
struct
xdma_cdev
*
xcdev
=
(
struct
xdma_cdev
*
)
iocb
->
ki_filp
->
private_data
;
struct
xdma_cdev
*
xcdev
=
(
struct
xdma_cdev
*
)
iocb
->
ki_filp
->
private_data
;
struct
cdev_async_io
*
caio
;
struct
cdev_async_io
*
caio
;
struct
xdma_engine
*
engine
;
struct
xdma_engine
*
engine
;
struct
xdma_dev
*
xdev
;
struct
xdma_dev
*
xdev
;
...
@@ -496,7 +494,7 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
...
@@ -496,7 +494,7 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
if
(
!
xcdev
)
{
if
(
!
xcdev
)
{
pr_info
(
"file 0x%p, xcdev NULL, %llu, pos %llu, W %d.
\n
"
,
pr_info
(
"file 0x%p, xcdev NULL, %llu, pos %llu, W %d.
\n
"
,
iocb
->
ki_filp
,
(
u64
)
count
,
(
u64
)
pos
,
1
);
iocb
->
ki_filp
,
(
u64
)
count
,
(
u64
)
pos
,
1
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
...
@@ -532,7 +530,8 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
...
@@ -532,7 +530,8 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
caio
->
cb
[
i
].
private
=
caio
;
caio
->
cb
[
i
].
private
=
caio
;
caio
->
cb
[
i
].
io_done
=
async_io_handler
;
caio
->
cb
[
i
].
io_done
=
async_io_handler
;
rv
=
check_transfer_align
(
engine
,
caio
->
cb
[
i
].
buf
,
caio
->
cb
[
i
].
len
,
pos
,
1
);
rv
=
check_transfer_align
(
engine
,
caio
->
cb
[
i
].
buf
,
caio
->
cb
[
i
].
len
,
pos
,
1
);
if
(
rv
)
{
if
(
rv
)
{
pr_info
(
"Invalid transfer alignment detected
\n
"
);
pr_info
(
"Invalid transfer alignment detected
\n
"
);
kmem_cache_free
(
cdev_cache
,
caio
);
kmem_cache_free
(
cdev_cache
,
caio
);
...
@@ -540,12 +539,13 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
...
@@ -540,12 +539,13 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
}
}
rv
=
char_sgdma_map_user_buf_to_sgl
(
&
caio
->
cb
[
i
],
true
);
rv
=
char_sgdma_map_user_buf_to_sgl
(
&
caio
->
cb
[
i
],
true
);
if
(
rv
<
0
)
{
if
(
rv
<
0
)
return
rv
;
return
rv
;
}
rv
=
xdma_xfer_submit_nowait
((
void
*
)
&
caio
->
cb
[
i
],
xdev
,
engine
->
channel
,
caio
->
cb
[
i
].
write
,
caio
->
cb
[
i
].
ep_addr
,
&
caio
->
cb
[
i
].
sgt
,
rv
=
xdma_xfer_submit_nowait
((
void
*
)
&
caio
->
cb
[
i
],
xdev
,
0
,
sgdma_timeout
*
1000
);
engine
->
channel
,
caio
->
cb
[
i
].
write
,
caio
->
cb
[
i
].
ep_addr
,
&
caio
->
cb
[
i
].
sgt
,
0
,
sgdma_timeout
*
1000
);
}
}
if
(
engine
->
cmplthp
)
if
(
engine
->
cmplthp
)
...
...
XDMA/linux-kernel/xdma/cdev_xvc.c
View file @
5c7b8b10
...
@@ -136,7 +136,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
...
@@ -136,7 +136,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
total_bits
=
xvc_obj
.
length
;
total_bits
=
xvc_obj
.
length
;
total_bytes
=
(
total_bits
+
7
)
>>
3
;
total_bytes
=
(
total_bits
+
7
)
>>
3
;
buffer
=
(
unsigned
char
*
)
kmalloc
(
total_bytes
*
3
,
GFP_KERNEL
);
buffer
=
kmalloc
(
total_bytes
*
3
,
GFP_KERNEL
);
if
(
!
buffer
)
{
if
(
!
buffer
)
{
pr_info
(
"OOM %u, op 0x%x, len %u bits, %u bytes.
\n
"
,
pr_info
(
"OOM %u, op 0x%x, len %u bits, %u bytes.
\n
"
,
3
*
total_bytes
,
opcode
,
total_bits
,
total_bytes
);
3
*
total_bytes
,
opcode
,
total_bits
,
total_bytes
);
...
...
XDMA/linux-kernel/xdma/xdma_cdev.c
View file @
5c7b8b10
...
@@ -607,21 +607,20 @@ int xdma_cdev_init(void)
...
@@ -607,21 +607,20 @@ int xdma_cdev_init(void)
g_xdma_class
=
class_create
(
THIS_MODULE
,
XDMA_NODE_NAME
);
g_xdma_class
=
class_create
(
THIS_MODULE
,
XDMA_NODE_NAME
);
if
(
IS_ERR
(
g_xdma_class
))
{
if
(
IS_ERR
(
g_xdma_class
))
{
dbg_init
(
XDMA_NODE_NAME
": failed to create class"
);
dbg_init
(
XDMA_NODE_NAME
": failed to create class"
);
return
-
1
;
return
-
EINVAL
;
}
}
/* using kmem_cache_create to enable sequential cleanup */
/* using kmem_cache_create to enable sequential cleanup */
cdev_cache
=
kmem_cache_create
(
"cdev_cache"
,
cdev_cache
=
kmem_cache_create
(
"cdev_cache"
,
sizeof
(
struct
cdev_async_io
),
sizeof
(
struct
cdev_async_io
),
0
,
0
,
SLAB_HWCACHE_ALIGN
,
NULL
);
SLAB_HWCACHE_ALIGN
,
NULL
);
if
(
!
cdev_cache
)
{
if
(
!
cdev_cache
)
{
pr_info
(
"memory allocation for cdev_cache failed. OOM
\n
"
);
pr_info
(
"memory allocation for cdev_cache failed. OOM
\n
"
);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
xdma_threads_create
(
8
);
xdma_threads_create
(
num_online_cpus
()
);
return
0
;
return
0
;
}
}
...
...
XDMA/linux-kernel/xdma/xdma_mod.c
View file @
5c7b8b10
...
@@ -32,7 +32,6 @@
...
@@ -32,7 +32,6 @@
#define DRV_MODULE_NAME "xdma"
#define DRV_MODULE_NAME "xdma"
#define DRV_MODULE_DESC "Xilinx XDMA Reference Driver"
#define DRV_MODULE_DESC "Xilinx XDMA Reference Driver"
#define DRV_MODULE_RELDATE "Feb. 2018"
static
char
version
[]
=
static
char
version
[]
=
DRV_MODULE_DESC
" "
DRV_MODULE_NAME
" v"
DRV_MODULE_VERSION
"
\n
"
;
DRV_MODULE_DESC
" "
DRV_MODULE_NAME
" v"
DRV_MODULE_VERSION
"
\n
"
;
...
...
XDMA/linux-kernel/xdma/xdma_mod.h
View file @
5c7b8b10
...
@@ -101,7 +101,7 @@ struct xdma_pci_dev {
...
@@ -101,7 +101,7 @@ struct xdma_pci_dev {
struct
cdev_async_io
{
struct
cdev_async_io
{
struct
kiocb
*
iocb
;
struct
kiocb
*
iocb
;
struct
xdma_io_cb
*
cb
;
struct
xdma_io_cb
*
cb
;
bool
write
;
bool
write
;
bool
cancel
;
bool
cancel
;
int
cmpl_cnt
;
int
cmpl_cnt
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment