Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
D
dma_ip_drivers
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Libraries
dma_ip_drivers
Commits
5c7b8b10
Commit
5c7b8b10
authored
Aug 17, 2020
by
Karen Xie
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
XDMA: fix code format/style warnings
parent
017b4bd9
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
206 additions
and
194 deletions
+206
-194
XDMA/linux-kernel/libxdma/libxdma.c
XDMA/linux-kernel/libxdma/libxdma.c
+143
-132
XDMA/linux-kernel/libxdma/libxdma.h
XDMA/linux-kernel/libxdma/libxdma.h
+12
-9
XDMA/linux-kernel/xdma/cdev_ctrl.c
XDMA/linux-kernel/xdma/cdev_ctrl.c
+2
-2
XDMA/linux-kernel/xdma/cdev_sgdma.c
XDMA/linux-kernel/xdma/cdev_sgdma.c
+36
-36
XDMA/linux-kernel/xdma/cdev_xvc.c
XDMA/linux-kernel/xdma/cdev_xvc.c
+1
-1
XDMA/linux-kernel/xdma/xdma_cdev.c
XDMA/linux-kernel/xdma/xdma_cdev.c
+11
-12
XDMA/linux-kernel/xdma/xdma_mod.c
XDMA/linux-kernel/xdma/xdma_mod.c
+0
-1
XDMA/linux-kernel/xdma/xdma_mod.h
XDMA/linux-kernel/xdma/xdma_mod.h
+1
-1
No files found.
XDMA/linux-kernel/libxdma/libxdma.c
View file @
5c7b8b10
...
...
@@ -38,7 +38,6 @@
#include "version.h"
#define DRV_MODULE_NAME "libxdma"
#define DRV_MODULE_DESC "Xilinx XDMA Base Driver"
#define DRV_MODULE_RELDATE "Dec. 2019"
static
char
version
[]
=
DRV_MODULE_DESC
" "
DRV_MODULE_NAME
" v"
DRV_MODULE_VERSION
"
\n
"
;
...
...
@@ -80,13 +79,16 @@ MODULE_PARM_DESC(desc_blen_max,
* and will likely be removed in future kernel versions
*/
#define xlx_wake_up swake_up_one
#define xlx_wait_event_interruptible_timeout swait_event_interruptible_timeout_exclusive
#define xlx_wait_event_interruptible_timeout \
swait_event_interruptible_timeout_exclusive
#elif KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#define xlx_wake_up swake_up
#define xlx_wait_event_interruptible_timeout swait_event_interruptible_timeout
#define xlx_wait_event_interruptible_timeout \
swait_event_interruptible_timeout
#else
#define xlx_wake_up wake_up_interruptible
#define xlx_wait_event_interruptible_timeout wait_event_interruptible_timeout
#define xlx_wait_event_interruptible_timeout \
wait_event_interruptible_timeout
#endif
...
...
@@ -676,10 +678,9 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine)
/* Add credits for Streaming mode C2H */
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
if
(
enable_credit_mp
){
//write_register(RX_BUF_PAGES,&engine->sgdma_regs->credits);
write_register
(
engine
->
desc_used
,
&
engine
->
sgdma_regs
->
credits
,
0
);
}
if
(
enable_credit_mp
)
write_register
(
engine
->
desc_used
,
&
engine
->
sgdma_regs
->
credits
,
0
);
}
/* initialize number of descriptors of dequeued transfers */
...
...
@@ -929,7 +930,7 @@ engine_service_final_transfer(struct xdma_engine *engine,
*/
WARN_ON
(
*
pdesc_completed
>
transfer
->
desc_num
);
}
/* mark transfer as succesfully completed */
/* mark transfer as succes
s
fully completed */
transfer
->
state
=
TRANSFER_STATE_COMPLETED
;
}
...
...
@@ -2869,8 +2870,8 @@ struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine)
engine
->
name
);
/* free up the buffer allocated for perf run */
if
(
engine
->
perf_buf_virt
)
dma_free_coherent
(
&
engine
->
xdev
->
pdev
->
dev
,
size
,
engine
->
perf_buf_virt
,
dma_free_coherent
(
&
engine
->
xdev
->
pdev
->
dev
,
size
,
engine
->
perf_buf_virt
,
engine
->
perf_buf_bus
);
engine
->
perf_buf_virt
=
NULL
;
list_del
(
&
transfer
->
entry
);
...
...
@@ -3117,7 +3118,8 @@ static void transfer_destroy(struct xdma_dev *xdev, struct xdma_transfer *xfer)
}
static
int
transfer_build
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
,
unsigned
int
desc_max
)
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
,
unsigned
int
desc_max
)
{
struct
sw_desc
*
sdesc
=
&
(
req
->
sdesc
[
req
->
sw_desc_idx
]);
int
i
=
0
;
...
...
@@ -3139,9 +3141,12 @@ static int transfer_build(struct xdma_engine *engine,
req
->
ep_addr
+=
sdesc
->
len
;
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
memset
(
xfer
->
res_virt
+
j
,
0
,
sizeof
(
struct
xdma_result
));
xfer
->
desc_virt
[
j
].
src_addr_lo
=
cpu_to_le32
(
PCI_DMA_L
(
bus
));
xfer
->
desc_virt
[
j
].
src_addr_hi
=
cpu_to_le32
(
PCI_DMA_H
(
bus
));
memset
(
xfer
->
res_virt
+
j
,
0
,
sizeof
(
struct
xdma_result
));
xfer
->
desc_virt
[
j
].
src_addr_lo
=
cpu_to_le32
(
PCI_DMA_L
(
bus
));
xfer
->
desc_virt
[
j
].
src_addr_hi
=
cpu_to_le32
(
PCI_DMA_H
(
bus
));
bus
+=
sizeof
(
struct
xdma_result
);
}
...
...
@@ -3151,7 +3156,8 @@ static int transfer_build(struct xdma_engine *engine,
}
static
int
transfer_init
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
static
int
transfer_init
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
{
unsigned
int
desc_max
=
min_t
(
unsigned
int
,
req
->
sw_desc_cnt
-
req
->
sw_desc_idx
,
...
...
@@ -3167,7 +3173,7 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
/* lock the engine state */
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
/* initialize wait queue */
#if
LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,
0)
#if
LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6,
0)
init_swait_queue_head
(
&
xfer
->
wq
);
#else
init_waitqueue_head
(
&
xfer
->
wq
);
...
...
@@ -3177,24 +3183,28 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
xfer
->
dir
=
engine
->
dir
;
xfer
->
desc_virt
=
engine
->
desc
+
engine
->
desc_idx
;
xfer
->
res_virt
=
engine
->
cyclic_result
+
engine
->
desc_idx
;
xfer
->
desc_bus
=
engine
->
desc_bus
+
(
sizeof
(
struct
xdma_desc
)
*
engine
->
desc_idx
);
xfer
->
res_bus
=
engine
->
cyclic_result_bus
+
(
sizeof
(
struct
xdma_result
)
*
engine
->
desc_idx
);
xfer
->
desc_bus
=
engine
->
desc_bus
+
(
sizeof
(
struct
xdma_desc
)
*
engine
->
desc_idx
);
xfer
->
res_bus
=
engine
->
cyclic_result_bus
+
(
sizeof
(
struct
xdma_result
)
*
engine
->
desc_idx
);
xfer
->
desc_index
=
engine
->
desc_idx
;
/* TODO: Need to handle desc_used >= XDMA_TRANSFER_MAX_DESC */
/* TODO: Need to handle desc_used >= XDMA_TRANSFER_MAX_DESC for aio calls */
if
((
engine
->
desc_idx
+
desc_max
)
>=
XDMA_TRANSFER_MAX_DESC
)
if
((
engine
->
desc_idx
+
desc_max
)
>=
XDMA_TRANSFER_MAX_DESC
)
desc_max
=
XDMA_TRANSFER_MAX_DESC
-
engine
->
desc_idx
;
transfer_desc_init
(
xfer
,
desc_max
);
dbg_sg
(
"xfer= %p transfer->desc_bus = 0x%llx.
\n
"
,
xfer
,
(
u64
)
xfer
->
desc_bus
);
transfer_build
(
engine
,
req
,
xfer
,
desc_max
);
dbg_sg
(
"xfer= %p transfer->desc_bus = 0x%llx.
\n
"
,
xfer
,
(
u64
)
xfer
->
desc_bus
);
transfer_build
(
engine
,
req
,
xfer
,
desc_max
);
/* Contiguous descriptors cannot cross PAGE boundry
/*
* Contiguous descriptors cannot cross PAGE boundary
* The 1st descriptor may start in the middle of the page,
* calculate the 1st block of adj desc accordingly */
* calculate the 1st block of adj desc accordingly
*/
desc_align
=
128
-
(
engine
->
desc_idx
%
128
)
-
1
;
if
(
desc_align
>
(
desc_max
-
1
))
desc_align
=
desc_max
-
1
;
...
...
@@ -3210,8 +3220,9 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
xdma_desc_control_set
(
xfer
->
desc_virt
+
last
,
control
);
xfer
->
desc_num
=
desc_max
;
engine
->
desc_idx
=
(
engine
->
desc_idx
+
desc_max
)
%
XDMA_TRANSFER_MAX_DESC
;
engine
->
desc_used
+=
desc_max
;
engine
->
desc_idx
=
(
engine
->
desc_idx
+
desc_max
)
%
XDMA_TRANSFER_MAX_DESC
;
engine
->
desc_used
+=
desc_max
;
/* fill in adjacent numbers */
for
(
i
=
0
;
i
<
xfer
->
desc_num
&&
desc_align
;
i
++
,
desc_align
--
)
...
...
@@ -3226,7 +3237,8 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req
static
int
transfer_init_cyclic
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
{
unsigned
int
desc_max
=
min_t
(
unsigned
int
,
req
->
sw_desc_cnt
-
req
->
sw_desc_idx
,
...
...
@@ -3558,12 +3570,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
done
);
/* For C2H streaming use writeback results */
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
{
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
done
+=
result
[
i
].
length
;
}
}
else
}
else
done
+=
xfer
->
len
;
rv
=
0
;
...
...
@@ -3610,8 +3621,8 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
engine
->
desc_used
-=
xfer
->
desc_num
;
transfer_destroy
(
xdev
,
xfer
);
/* use multiple transfers per request if we could not fit
all data within
* single descriptor chain.
/* use multiple transfers per request if we could not fit
*
all data within
single descriptor chain.
*/
tfer_idx
++
;
...
...
@@ -3638,8 +3649,9 @@ unmap_sgl:
}
EXPORT_SYMBOL_GPL
(
xdma_xfer_submit
);
ssize_t
xdma_xfer_completion
(
void
*
cb_hndl
,
void
*
dev_hndl
,
int
channel
,
bool
write
,
u64
ep_addr
,
struct
sg_table
*
sgt
,
bool
dma_mapped
,
int
timeout_ms
)
ssize_t
xdma_xfer_completion
(
void
*
cb_hndl
,
void
*
dev_hndl
,
int
channel
,
bool
write
,
u64
ep_addr
,
struct
sg_table
*
sgt
,
bool
dma_mapped
,
int
timeout_ms
)
{
struct
xdma_dev
*
xdev
=
(
struct
xdma_dev
*
)
dev_hndl
;
...
...
@@ -3691,22 +3703,19 @@ ssize_t xdma_xfer_completion(void *cb_hndl, void *dev_hndl, int channel, bool wr
while
(
nents
)
{
xfer
=
&
req
->
tfer
[
tfer_idx
];
nents
-=
xfer
->
desc_num
;
switch
(
xfer
->
state
)
{
switch
(
xfer
->
state
)
{
case
TRANSFER_STATE_COMPLETED
:
dbg_tfr
(
"transfer %p, %u, ep 0x%llx compl, +%lu.
\n
"
,
xfer
,
xfer
->
len
,
req
->
ep_addr
-
xfer
->
len
,
done
);
xfer
,
xfer
->
len
,
req
->
ep_addr
-
xfer
->
len
,
done
);
result
=
xfer
->
res_virt
;
dbg_tfr
(
"transfer %p, %u, ep 0x%llx compl, +%lu.
\n
"
,
xfer
,
xfer
->
len
,
req
->
ep_addr
-
xfer
->
len
,
done
);
/* For C2H streaming use writeback results */
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
{
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
done
+=
result
[
i
].
length
;
}
}
else
}
else
done
+=
xfer
->
len
;
rv
=
0
;
...
...
@@ -3763,13 +3772,14 @@ unmap_sgl:
EXPORT_SYMBOL_GPL
(
xdma_xfer_completion
);
ssize_t
xdma_xfer_submit_nowait
(
void
*
cb_hndl
,
void
*
dev_hndl
,
int
channel
,
bool
write
,
u64
ep_addr
,
struct
sg_table
*
sgt
,
bool
dma_mapped
,
int
timeout_ms
)
ssize_t
xdma_xfer_submit_nowait
(
void
*
cb_hndl
,
void
*
dev_hndl
,
int
channel
,
bool
write
,
u64
ep_addr
,
struct
sg_table
*
sgt
,
bool
dma_mapped
,
int
timeout_ms
)
{
struct
xdma_dev
*
xdev
=
(
struct
xdma_dev
*
)
dev_hndl
;
struct
xdma_engine
*
engine
;
struct
xdma_io_cb
*
cb
=
(
struct
xdma_io_cb
*
)
cb_hndl
;
int
rv
=
0
,
tfer_idx
=
0
;
int
rv
=
0
,
tfer_idx
=
0
;
struct
scatterlist
*
sg
=
sgt
->
sgl
;
int
nents
;
enum
dma_data_direction
dir
=
write
?
DMA_TO_DEVICE
:
DMA_FROM_DEVICE
;
...
...
@@ -3806,8 +3816,8 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
}
if
(
engine
->
magic
!=
MAGIC_ENGINE
)
{
pr_err
(
"%s has invalid magic number %lx
\n
"
,
engine
->
name
,
engine
->
magic
);
pr_err
(
"%s has invalid magic number %lx
\n
"
,
engine
->
name
,
engine
->
magic
);
return
-
EINVAL
;
}
...
...
@@ -3865,12 +3875,13 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
pr_info
(
"transfer_init failed
\n
"
);
if
(
!
dma_mapped
&&
sgt
->
nents
)
{
pci_unmap_sg
(
xdev
->
pdev
,
sgt
->
sgl
,
sgt
->
orig_nents
,
dir
);
pci_unmap_sg
(
xdev
->
pdev
,
sgt
->
sgl
,
sgt
->
orig_nents
,
dir
);
sgt
->
nents
=
0
;
}
/* Transfer failed return BUSY */
if
(
cb
->
io_done
)
if
(
cb
->
io_done
)
cb
->
io_done
((
unsigned
long
)
cb
,
-
EBUSY
);
goto
rel_req
;
...
...
@@ -3888,8 +3899,8 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
xfer
->
sgt
=
sgt
;
}
dbg_tfr
(
"xfer %p, len %u, ep 0x%llx, sg %u/%u. nents = %d
\n
"
,
xfer
,
xfer
->
len
,
req
->
ep_addr
,
req
->
sw_desc_idx
,
dbg_tfr
(
"xfer %p, len %u, ep 0x%llx, sg %u/%u. nents = %d
\n
"
,
xfer
,
xfer
->
len
,
req
->
ep_addr
,
req
->
sw_desc_idx
,
req
->
sw_desc_cnt
,
nents
);
#ifdef __LIBXDMA_DEBUG__
...
...
@@ -3902,8 +3913,8 @@ ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool
goto
unmap_sgl
;
}
/* use multiple transfers per request if we could not fit all
data within
* single descriptor chain.
/* use multiple transfers per request if we could not fit all
*
data within
single descriptor chain.
*/
tfer_idx
++
;
}
...
...
@@ -3927,7 +3938,7 @@ EXPORT_SYMBOL_GPL(xdma_xfer_submit_nowait);
int
xdma_performance_submit
(
struct
xdma_dev
*
xdev
,
struct
xdma_engine
*
engine
)
{
u32
max_consistent_size
=
XDMA_PERF_NUM_DESC
*
32
*
1024
;
/*
1024 pages,
4MB */
u32
max_consistent_size
=
XDMA_PERF_NUM_DESC
*
32
*
1024
;
/* 4MB */
struct
xdma_transfer
*
transfer
;
u64
ep_addr
=
0
;
int
num_desc_in_a_loop
=
XDMA_PERF_NUM_DESC
;
...
...
@@ -3948,7 +3959,8 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
num_desc_in_a_loop
=
size
/
size_in_desc
;
}
engine
->
perf_buf_virt
=
dma_alloc_coherent
(
&
xdev
->
pdev
->
dev
,
size_in_desc
,
engine
->
perf_buf_virt
=
dma_alloc_coherent
(
&
xdev
->
pdev
->
dev
,
size_in_desc
,
&
engine
->
perf_buf_bus
,
GFP_KERNEL
);
if
(
!
engine
->
perf_buf_virt
)
{
...
...
@@ -3998,8 +4010,7 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
for
(
i
=
0
;
i
<
transfer
->
desc_num
;
i
++
)
{
struct
xdma_desc
*
desc
=
transfer
->
desc_virt
+
i
;
dma_addr_t
rc_bus_addr
=
engine
->
perf_buf_bus
/* +
(size_in_desc * i)*/
;
dma_addr_t
rc_bus_addr
=
engine
->
perf_buf_bus
;
/* fill in descriptor entry with transfer details */
xdma_desc_set
(
desc
,
rc_bus_addr
,
ep_addr
,
size_in_desc
,
...
...
@@ -4050,8 +4061,8 @@ err_engine_desc:
transfer
=
NULL
;
err_engine_transfer:
if
(
engine
->
perf_buf_virt
)
dma_free_coherent
(
&
xdev
->
pdev
->
dev
,
size_in_desc
,
engine
->
perf_buf_virt
,
engine
->
perf_buf_bus
);
dma_free_coherent
(
&
xdev
->
pdev
->
dev
,
size_in_desc
,
engine
->
perf_buf_virt
,
engine
->
perf_buf_bus
);
engine
->
perf_buf_virt
=
NULL
;
return
rv
;
}
...
...
XDMA/linux-kernel/libxdma/libxdma.h
View file @
5c7b8b10
...
...
@@ -58,7 +58,7 @@
* .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]),
*/
#define XDMA_ENG_IRQ_NUM (1)
#define MAX_EXTRA_ADJ
0x3F
#define MAX_EXTRA_ADJ
(0x3F)
#define RX_STATUS_EOP (1)
/* Target internal components on XDMA control BAR */
...
...
@@ -410,12 +410,12 @@ struct sw_desc {
struct
xdma_transfer
{
struct
list_head
entry
;
/* queue of non-completed transfers */
struct
xdma_desc
*
desc_virt
;
/* virt addr of the 1st descriptor */
struct
xdma_result
*
res_virt
;
/* virt addr of result for
c2h streaming */
struct
xdma_result
*
res_virt
;
/* virt addr of result,
c2h streaming */
dma_addr_t
res_bus
;
/* bus addr for result descriptors */
dma_addr_t
desc_bus
;
/* bus addr of the first descriptor */
int
desc_adjacent
;
/* adjacent descriptors at desc_bus */
int
desc_num
;
/* number of descriptors in transfer */
int
desc_index
;
/* index for
first descriptor
in transfer */
int
desc_index
;
/* index for
1st desc.
in transfer */
enum
dma_data_direction
dir
;
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
struct
swait_queue_head
wq
;
...
...
@@ -438,7 +438,9 @@ struct xdma_request_cb {
unsigned
int
total_len
;
u64
ep_addr
;
struct
xdma_transfer
tfer
[
2
];
/* Use two transfers in case single request needs to be split */
/* Use two transfers in case single request needs to be split */
struct
xdma_transfer
tfer
[
2
];
struct
xdma_io_cb
*
cb
;
unsigned
int
sw_desc_idx
;
...
...
@@ -486,6 +488,7 @@ struct xdma_engine {
struct
sg_table
cyclic_sgt
;
u8
*
perf_buf_virt
;
dma_addr_t
perf_buf_bus
;
/* bus address */
u8
eop_found
;
/* used only for cyclic(rx:c2h) */
int
eop_count
;
int
rx_tail
;
/* follows the HW */
...
...
XDMA/linux-kernel/xdma/cdev_ctrl.c
View file @
5c7b8b10
...
...
@@ -25,9 +25,9 @@
#include "cdev_ctrl.h"
#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE
#define xlx_access_ok(X,
Y,Z) access_ok(Y,
Z)
#define xlx_access_ok(X,
Y, Z) access_ok(Y,
Z)
#else
#define xlx_access_ok(X,
Y,Z) access_ok(X,Y,
Z)
#define xlx_access_ok(X,
Y, Z) access_ok(X, Y,
Z)
#endif
/*
...
...
XDMA/linux-kernel/xdma/cdev_sgdma.c
View file @
5c7b8b10
...
...
@@ -57,13 +57,12 @@ static void async_io_handler(unsigned long cb_hndl, int err)
int
lock_stat
;
int
rv
;
if
(
NULL
==
caio
)
{
if
(
caio
==
NULL
)
{
pr_err
(
"Invalid work struct
\n
"
);
return
;
}
xcdev
=
(
struct
xdma_cdev
*
)
caio
->
iocb
->
ki_filp
->
private_data
;
rv
=
xcdev_check
(
__func__
,
xcdev
,
1
);
if
(
rv
<
0
)
return
;
...
...
@@ -80,13 +79,13 @@ static void async_io_handler(unsigned long cb_hndl, int err)
goto
skip_tran
;
}
engine
=
xcdev
->
engine
;
xdev
=
xcdev
->
xdev
;
if
(
!
err
)
numbytes
=
xdma_xfer_completion
((
void
*
)
cb
,
xdev
,
engine
->
channel
,
cb
->
write
,
cb
->
ep_addr
,
&
cb
->
sgt
,
0
,
sgdma_timeout
*
1000
);
numbytes
=
xdma_xfer_completion
((
void
*
)
cb
,
xdev
,
engine
->
channel
,
cb
->
write
,
cb
->
ep_addr
,
&
cb
->
sgt
,
0
,
sgdma_timeout
*
1000
);
char_sgdma_unmap_user_buf
(
cb
,
cb
->
write
);
...
...
@@ -97,9 +96,7 @@ static void async_io_handler(unsigned long cb_hndl, int err)
caio
->
cmpl_cnt
++
;
caio
->
res
+=
numbytes
;
if
(
caio
->
cmpl_cnt
==
caio
->
req_cnt
)
{
if
(
caio
->
cmpl_cnt
==
caio
->
req_cnt
)
{
res
=
caio
->
res
;
res2
=
caio
->
res2
;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
...
...
@@ -113,12 +110,9 @@ skip_tran:
kfree
(
cb
);
return
;
}
else
{
spin_unlock
(
&
caio
->
lock
);
return
;
}
skip_dev_lock:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
caio
->
iocb
->
ki_complete
(
caio
->
iocb
,
numbytes
,
-
EBUSY
);
...
...
@@ -416,7 +410,8 @@ static ssize_t char_sgdma_read(struct file *file, char __user *buf,
static
ssize_t
cdev_aio_write
(
struct
kiocb
*
iocb
,
const
struct
iovec
*
io
,
unsigned
long
count
,
loff_t
pos
)
{
struct
xdma_cdev
*
xcdev
=
(
struct
xdma_cdev
*
)
iocb
->
ki_filp
->
private_data
;
struct
xdma_cdev
*
xcdev
=
(
struct
xdma_cdev
*
)
iocb
->
ki_filp
->
private_data
;
struct
cdev_async_io
*
caio
;
struct
xdma_engine
*
engine
;
struct
xdma_dev
*
xdev
;
...
...
@@ -460,7 +455,8 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
caio
->
cb
[
i
].
write
=
true
;
caio
->
cb
[
i
].
private
=
caio
;
caio
->
cb
[
i
].
io_done
=
async_io_handler
;
rv
=
check_transfer_align
(
engine
,
caio
->
cb
[
i
].
buf
,
caio
->
cb
[
i
].
len
,
pos
,
1
);
rv
=
check_transfer_align
(
engine
,
caio
->
cb
[
i
].
buf
,
caio
->
cb
[
i
].
len
,
pos
,
1
);
if
(
rv
)
{
pr_info
(
"Invalid transfer alignment detected
\n
"
);
kmem_cache_free
(
cdev_cache
,
caio
);
...
...
@@ -468,11 +464,12 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
}
rv
=
char_sgdma_map_user_buf_to_sgl
(
&
caio
->
cb
[
i
],
true
);
if
(
rv
<
0
)
{
if
(
rv
<
0
)
return
rv
;
}
rv
=
xdma_xfer_submit_nowait
((
void
*
)
&
caio
->
cb
[
i
],
xdev
,
engine
->
channel
,
caio
->
cb
[
i
].
write
,
caio
->
cb
[
i
].
ep_addr
,
&
caio
->
cb
[
i
].
sgt
,
rv
=
xdma_xfer_submit_nowait
((
void
*
)
&
caio
->
cb
[
i
],
xdev
,
engine
->
channel
,
caio
->
cb
[
i
].
write
,
caio
->
cb
[
i
].
ep_addr
,
&
caio
->
cb
[
i
].
sgt
,
0
,
sgdma_timeout
*
1000
);
}
...
...
@@ -487,7 +484,8 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
unsigned
long
count
,
loff_t
pos
)
{
struct
xdma_cdev
*
xcdev
=
(
struct
xdma_cdev
*
)
iocb
->
ki_filp
->
private_data
;
struct
xdma_cdev
*
xcdev
=
(
struct
xdma_cdev
*
)
iocb
->
ki_filp
->
private_data
;
struct
cdev_async_io
*
caio
;
struct
xdma_engine
*
engine
;
struct
xdma_dev
*
xdev
;
...
...
@@ -532,7 +530,8 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
caio
->
cb
[
i
].
private
=
caio
;
caio
->
cb
[
i
].
io_done
=
async_io_handler
;
rv
=
check_transfer_align
(
engine
,
caio
->
cb
[
i
].
buf
,
caio
->
cb
[
i
].
len
,
pos
,
1
);
rv
=
check_transfer_align
(
engine
,
caio
->
cb
[
i
].
buf
,
caio
->
cb
[
i
].
len
,
pos
,
1
);
if
(
rv
)
{
pr_info
(
"Invalid transfer alignment detected
\n
"
);
kmem_cache_free
(
cdev_cache
,
caio
);
...
...
@@ -540,11 +539,12 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
}
rv
=
char_sgdma_map_user_buf_to_sgl
(
&
caio
->
cb
[
i
],
true
);
if
(
rv
<
0
)
{
if
(
rv
<
0
)
return
rv
;
}
rv
=
xdma_xfer_submit_nowait
((
void
*
)
&
caio
->
cb
[
i
],
xdev
,
engine
->
channel
,
caio
->
cb
[
i
].
write
,
caio
->
cb
[
i
].
ep_addr
,
&
caio
->
cb
[
i
].
sgt
,
rv
=
xdma_xfer_submit_nowait
((
void
*
)
&
caio
->
cb
[
i
],
xdev
,
engine
->
channel
,
caio
->
cb
[
i
].
write
,
caio
->
cb
[
i
].
ep_addr
,
&
caio
->
cb
[
i
].
sgt
,
0
,
sgdma_timeout
*
1000
);
}
...
...
XDMA/linux-kernel/xdma/cdev_xvc.c
View file @
5c7b8b10
...
...
@@ -136,7 +136,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
total_bits
=
xvc_obj
.
length
;
total_bytes
=
(
total_bits
+
7
)
>>
3
;
buffer
=
(
unsigned
char
*
)
kmalloc
(
total_bytes
*
3
,
GFP_KERNEL
);
buffer
=
kmalloc
(
total_bytes
*
3
,
GFP_KERNEL
);
if
(
!
buffer
)
{
pr_info
(
"OOM %u, op 0x%x, len %u bits, %u bytes.
\n
"
,
3
*
total_bytes
,
opcode
,
total_bits
,
total_bytes
);
...
...
XDMA/linux-kernel/xdma/xdma_cdev.c
View file @
5c7b8b10
...
...
@@ -607,21 +607,20 @@ int xdma_cdev_init(void)
g_xdma_class
=
class_create
(
THIS_MODULE
,
XDMA_NODE_NAME
);
if
(
IS_ERR
(
g_xdma_class
))
{
dbg_init
(
XDMA_NODE_NAME
": failed to create class"
);
return
-
1
;
return
-
EINVAL
;
}
/* using kmem_cache_create to enable sequential cleanup */
cdev_cache
=
kmem_cache_create
(
"cdev_cache"
,
sizeof
(
struct
cdev_async_io
),
0
,
SLAB_HWCACHE_ALIGN
,
NULL
);
sizeof
(
struct
cdev_async_io
),
0
,
SLAB_HWCACHE_ALIGN
,
NULL
);
if
(
!
cdev_cache
)
{
pr_info
(
"memory allocation for cdev_cache failed. OOM
\n
"
);
return
-
ENOMEM
;
}
xdma_threads_create
(
8
);
xdma_threads_create
(
num_online_cpus
()
);
return
0
;
}
...
...
XDMA/linux-kernel/xdma/xdma_mod.c
View file @
5c7b8b10
...
...
@@ -32,7 +32,6 @@
#define DRV_MODULE_NAME "xdma"
#define DRV_MODULE_DESC "Xilinx XDMA Reference Driver"
#define DRV_MODULE_RELDATE "Feb. 2018"
static
char
version
[]
=
DRV_MODULE_DESC
" "
DRV_MODULE_NAME
" v"
DRV_MODULE_VERSION
"
\n
"
;
...
...
XDMA/linux-kernel/xdma/xdma_mod.h
View file @
5c7b8b10
...
...
@@ -101,7 +101,7 @@ struct xdma_pci_dev {
struct
cdev_async_io
{
struct
kiocb
*
iocb
;
struct
xdma_io_cb
*
cb
;
struct
xdma_io_cb
*
cb
;
bool
write
;
bool
cancel
;
int
cmpl_cnt
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment