Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
D
dma_ip_drivers
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Libraries
dma_ip_drivers
Commits
7642657b
Commit
7642657b
authored
Oct 30, 2020
by
Karen Xie
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
XDMA: remove dead code
parent
279ce21a
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
10 additions
and
992 deletions
+10
-992
XDMA/linux-kernel/xdma/cdev_sgdma.c
XDMA/linux-kernel/xdma/cdev_sgdma.c
+1
-4
XDMA/linux-kernel/xdma/libxdma.c
XDMA/linux-kernel/xdma/libxdma.c
+9
-973
XDMA/linux-kernel/xdma/libxdma.h
XDMA/linux-kernel/xdma/libxdma.h
+0
-15
No files found.
XDMA/linux-kernel/xdma/cdev_sgdma.c
View file @
7642657b
...
...
@@ -807,11 +807,8 @@ static int char_sgdma_close(struct inode *inode, struct file *file)
engine
=
xcdev
->
engine
;
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
if
(
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
engine
->
device_open
=
0
;
if
(
engine
->
cyclic_req
)
return
xdma_cyclic_transfer_teardown
(
engine
);
}
return
0
;
}
...
...
XDMA/linux-kernel/xdma/libxdma.c
View file @
7642657b
...
...
@@ -692,11 +692,10 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine)
dbg_tfr
(
"%s(%s): transfer=0x%p.
\n
"
,
__func__
,
engine
->
name
,
transfer
);
/* Add credits for Streaming mode C2H */
if
(
en
gine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
{
if
(
enable_credit_mp
)
write_register
(
engine
->
desc_used
,
if
(
en
able_credit_mp
&&
engine
->
streaming
&&
engine
->
dir
==
DMA_FROM_DEVICE
)
write_register
(
engine
->
desc_used
,
&
engine
->
sgdma_regs
->
credits
,
0
);
}
/* initialize number of descriptors of dequeued transfers */
engine
->
desc_dequeued
=
0
;
...
...
@@ -992,239 +991,6 @@ static int engine_service_perf(struct xdma_engine *engine, u32 desc_completed)
return
0
;
}
static
int
engine_transfer_dequeue
(
struct
xdma_engine
*
engine
)
{
struct
xdma_transfer
*
transfer
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
/* pick first transfer on the queue (was submitted to the engine) */
transfer
=
list_entry
(
engine
->
transfer_list
.
next
,
struct
xdma_transfer
,
entry
);
if
(
!
transfer
||
transfer
!=
&
engine
->
cyclic_req
->
tfer
[
1
])
{
pr_err
(
"%s, xfer 0x%p != 0x%p.
\n
"
,
engine
->
name
,
transfer
,
&
engine
->
cyclic_req
->
tfer
[
1
]);
return
-
EINVAL
;
}
dbg_tfr
(
"%s engine completed cyclic transfer 0x%p (%d desc).
\n
"
,
engine
->
name
,
transfer
,
transfer
->
desc_num
);
/* remove completed transfer from list */
list_del
(
engine
->
transfer_list
.
next
);
return
0
;
}
static
int
engine_ring_process
(
struct
xdma_engine
*
engine
)
{
struct
xdma_result
*
result
;
int
start
;
int
eop_count
=
0
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
result
=
engine
->
cyclic_result
;
if
(
!
result
)
{
pr_err
(
"%s Cyclic transfer resources not available.
\n
"
,
engine
->
name
);
return
-
EINVAL
;
}
/* where we start receiving in the ring buffer */
start
=
engine
->
rx_tail
;
/* iterate through all newly received RX result descriptors */
dbg_tfr
(
"%s, result %d, 0x%x, len 0x%x.
\n
"
,
engine
->
name
,
engine
->
rx_tail
,
result
[
engine
->
rx_tail
].
status
,
result
[
engine
->
rx_tail
].
length
);
while
(
result
[
engine
->
rx_tail
].
status
&&
!
engine
->
rx_overrun
)
{
/* EOP bit set in result? */
if
(
result
[
engine
->
rx_tail
].
status
&
RX_STATUS_EOP
)
eop_count
++
;
/* increment tail pointer */
engine
->
rx_tail
=
(
engine
->
rx_tail
+
1
)
%
CYCLIC_RX_PAGES_MAX
;
dbg_tfr
(
"%s, head %d, tail %d, 0x%x, len 0x%x.
\n
"
,
engine
->
name
,
engine
->
rx_head
,
engine
->
rx_tail
,
result
[
engine
->
rx_tail
].
status
,
result
[
engine
->
rx_tail
].
length
);
/* overrun? */
if
(
engine
->
rx_tail
==
engine
->
rx_head
)
{
dbg_tfr
(
"%s: overrun
\n
"
,
engine
->
name
);
/* flag to user space that overrun has occurred */
engine
->
rx_overrun
=
1
;
}
}
return
eop_count
;
}
static
int
engine_service_cyclic_polled
(
struct
xdma_engine
*
engine
)
{
int
eop_count
;
int
rc
=
0
;
struct
xdma_poll_wb
*
writeback_data
;
u32
sched_limit
=
0
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
if
(
engine
->
magic
!=
MAGIC_ENGINE
)
{
pr_err
(
"%s has invalid magic number %lx
\n
"
,
engine
->
name
,
engine
->
magic
);
return
-
EINVAL
;
}
eop_count
=
engine
->
eop_count
;
writeback_data
=
(
struct
xdma_poll_wb
*
)
engine
->
poll_mode_addr_virt
;
while
(
eop_count
==
0
)
{
if
(
sched_limit
!=
0
)
{
if
((
sched_limit
%
NUM_POLLS_PER_SCHED
)
==
0
)
schedule
();
}
sched_limit
++
;
/* Monitor descriptor writeback address for errors */
if
((
writeback_data
->
completed_desc_count
)
&
WB_ERR_MASK
)
{
rc
=
-
1
;
break
;
}
eop_count
=
engine_ring_process
(
engine
);
if
(
eop_count
<
0
)
{
pr_err
(
"Failed to process engine ring
\n
"
);
return
eop_count
;
}
}
if
(
eop_count
==
0
)
{
rc
=
engine_status_read
(
engine
,
1
,
0
);
if
(
rc
<
0
)
{
pr_err
(
"Failed to read engine status
\n
"
);
return
rc
;
}
if
((
engine
->
running
)
&&
!
(
engine
->
status
&
XDMA_STAT_BUSY
))
{
/* transfers on queue? */
if
(
!
list_empty
(
&
engine
->
transfer_list
))
{
rc
=
engine_transfer_dequeue
(
engine
);
if
(
rc
<
0
)
{
pr_err
(
"Failed to dequeue transfer
\n
"
);
return
rc
;
}
}
rc
=
engine_service_shutdown
(
engine
);
if
(
rc
<
0
)
{
pr_err
(
"Failed to shutdown engine
\n
"
);
return
rc
;
}
}
}
eop_count
--
;
engine
->
eop_count
=
eop_count
;
return
rc
;
}
static
int
engine_service_cyclic_interrupt
(
struct
xdma_engine
*
engine
)
{
int
eop_count
=
0
;
struct
xdma_transfer
*
xfer
;
int
rv
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
if
(
engine
->
magic
!=
MAGIC_ENGINE
)
{
pr_err
(
"%s has invalid magic number %lx
\n
"
,
engine
->
name
,
engine
->
magic
);
return
-
EINVAL
;
}
rv
=
engine_status_read
(
engine
,
1
,
0
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to read engine status
\n
"
);
return
rv
;
}
eop_count
=
engine_ring_process
(
engine
);
if
(
eop_count
<
0
)
{
pr_err
(
"Failed to process engine ring
\n
"
);
return
eop_count
;
}
/*
* wake any reader on EOP, as one or more packets are now in
* the RX buffer
*/
xfer
=
&
engine
->
cyclic_req
->
tfer
[
0
];
if
(
enable_credit_mp
)
{
xlx_wake_up
(
&
xfer
->
wq
);
}
else
{
if
(
eop_count
>
0
)
{
/* awake task on transfer's wait queue */
dbg_tfr
(
"wake_up_interruptible() due to %d EOP's
\n
"
,
eop_count
);
engine
->
eop_found
=
1
;
xlx_wake_up
(
&
xfer
->
wq
);
}
}
/* engine was running but is no longer busy? */
if
((
engine
->
running
)
&&
!
(
engine
->
status
&
XDMA_STAT_BUSY
))
{
/* transfers on queue? */
if
(
!
list_empty
(
&
engine
->
transfer_list
))
{
rv
=
engine_transfer_dequeue
(
engine
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to dequeue transfer
\n
"
);
return
rv
;
}
}
rv
=
engine_service_shutdown
(
engine
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to shutdown engine
\n
"
);
return
rv
;
}
}
return
0
;
}
/* must be called with engine->lock already acquired */
static
int
engine_service_cyclic
(
struct
xdma_engine
*
engine
)
{
int
rc
=
0
;
dbg_tfr
(
"%s()
\n
"
,
__func__
);
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
if
(
engine
->
magic
!=
MAGIC_ENGINE
)
{
pr_err
(
"%s has invalid magic number %lx
\n
"
,
engine
->
name
,
engine
->
magic
);
return
-
EINVAL
;
}
if
(
poll_mode
)
rc
=
engine_service_cyclic_polled
(
engine
);
else
rc
=
engine_service_cyclic_interrupt
(
engine
);
return
rc
;
}
static
int
engine_service_resume
(
struct
xdma_engine
*
engine
)
{
struct
xdma_transfer
*
transfer_started
;
...
...
@@ -1401,19 +1167,12 @@ static void engine_service_work(struct work_struct *work)
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
dbg_tfr
(
"engine_service() for %s engine %p
\n
"
,
engine
->
name
,
engine
);
if
(
engine
->
cyclic_req
)
{
rv
=
engine_service_cyclic
(
engine
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to service cyclic engine
\n
"
);
goto
unlock
;
}
}
else
{
rv
=
engine_service
(
engine
,
0
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to service engine
\n
"
);
goto
unlock
;
}
rv
=
engine_service
(
engine
,
0
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to service engine
\n
"
);
goto
unlock
;
}
/* re-enable interrupts for this engine */
if
(
engine
->
xdev
->
msix_enabled
)
{
write_register
(
...
...
@@ -1523,10 +1282,7 @@ int engine_service_poll(struct xdma_engine *engine,
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
dbg_tfr
(
"%s service.
\n
"
,
engine
->
name
);
if
(
engine
->
cyclic_req
)
rv
=
engine_service_cyclic
(
engine
);
else
rv
=
engine_service
(
engine
,
desc_wb
);
rv
=
engine_service
(
engine
,
desc_wb
);
spin_unlock_irqrestore
(
&
engine
->
lock
,
flags
);
return
rv
;
...
...
@@ -2551,24 +2307,6 @@ static int xdma_desc_control_set(struct xdma_desc *first, u32 control_field)
return
0
;
}
/* xdma_desc_clear -- Clear bits in control field of a descriptor. */
static
int
xdma_desc_control_clear
(
struct
xdma_desc
*
first
,
u32
clear_mask
)
{
/* remember magic and adjacent number */
u32
control
=
le32_to_cpu
(
first
->
control
);
if
(
clear_mask
&
~
(
LS_BYTE_MASK
))
{
pr_err
(
"Invalid clear mask
\n
"
);
return
-
EINVAL
;
}
/* merge adjacent and control field */
control
&=
(
~
clear_mask
);
/* write control and next_adjacent */
first
->
control
=
cpu_to_le32
(
control
);
return
0
;
}
/* xdma_desc_done - recycle cache-coherent linked list of descriptors.
*
* @dev Pointer to pci_dev
...
...
@@ -3237,68 +2975,6 @@ static int transfer_init(struct xdma_engine *engine,
return
0
;
}
static
int
transfer_init_cyclic
(
struct
xdma_engine
*
engine
,
struct
xdma_request_cb
*
req
,
struct
xdma_transfer
*
xfer
)
{
unsigned
int
desc_max
=
min_t
(
unsigned
int
,
req
->
sw_desc_cnt
-
req
->
sw_desc_idx
,
XDMA_TRANSFER_MAX_DESC
);
int
i
=
0
;
u32
control
;
int
rv
;
memset
(
xfer
,
0
,
sizeof
(
*
xfer
));
/* initialize wait queue */
#if HAS_SWAKE_UP
init_swait_queue_head
(
&
xfer
->
wq
);
#else
init_waitqueue_head
(
&
xfer
->
wq
);
#endif
/* remember direction of transfer */
xfer
->
dir
=
engine
->
dir
;
xfer
->
desc_virt
=
engine
->
desc
;
xfer
->
desc_bus
=
engine
->
desc_bus
;
rv
=
transfer_desc_init
(
xfer
,
desc_max
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to initialize descriptors
\n
"
);
return
rv
;
}
dbg_sg
(
"transfer->desc_bus = 0x%llx.
\n
"
,
(
u64
)
xfer
->
desc_bus
);
transfer_build
(
engine
,
req
,
xfer
,
desc_max
);
/* stop engine, EOP for AXI ST, req IRQ on last descriptor */
control
=
XDMA_DESC_STOPPED
;
control
|=
XDMA_DESC_EOP
;
control
|=
XDMA_DESC_COMPLETED
;
rv
=
xdma_desc_control_set
(
xfer
->
desc_virt
+
desc_max
-
1
,
control
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to set desc control
\n
"
);
return
rv
;
}
xfer
->
desc_num
=
desc_max
;
xfer
->
desc_adjacent
=
1
;
dbg_sg
(
"transfer 0x%p has %d descriptors
\n
"
,
xfer
,
xfer
->
desc_num
);
/* fill in adjacent numbers */
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
{
u32
next_adj
=
xdma_get_next_adj
(
xfer
->
desc_num
-
i
-
1
,
(
xfer
->
desc_virt
+
i
)
->
next_lo
);
dbg_desc
(
"set next adj at index %d to %u
\n
"
,
i
,
next_adj
);
xdma_desc_adjacent
(
xfer
->
desc_virt
+
i
,
next_adj
);
}
return
0
;
}
#ifdef __LIBXDMA_DEBUG__
static
void
sgt_dump
(
struct
sg_table
*
sgt
)
{
...
...
@@ -4697,646 +4373,6 @@ int xdma_user_isr_disable(void *dev_hndl, unsigned int mask)
return
0
;
}
/* makes an existing transfer cyclic */
static
void
xdma_transfer_cyclic
(
struct
xdma_transfer
*
transfer
)
{
/* link last descriptor to first descriptor */
xdma_desc_link
(
transfer
->
desc_virt
+
transfer
->
desc_num
-
1
,
transfer
->
desc_virt
,
transfer
->
desc_bus
);
/* remember transfer is cyclic */
transfer
->
cyclic
=
1
;
}
static
int
transfer_monitor_cyclic
(
struct
xdma_engine
*
engine
,
struct
xdma_transfer
*
transfer
,
int
timeout_ms
)
{
struct
xdma_result
*
result
;
int
rc
=
0
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
if
(
!
transfer
)
{
pr_err
(
"%s: xfer empty.
\n
"
,
engine
->
name
);
return
-
EINVAL
;
}
result
=
engine
->
cyclic_result
;
if
(
!
result
)
{
pr_err
(
"%s Cyclic transfer resources not available.
\n
"
,
engine
->
name
);
return
-
EINVAL
;
}
if
(
poll_mode
)
{
int
i
;
for
(
i
=
0
;
i
<
5
;
i
++
)
{
rc
=
engine_service_poll
(
engine
,
0
);
if
(
rc
)
{
pr_info
(
"%s service_poll failed %d.
\n
"
,
engine
->
name
,
rc
);
rc
=
-
ERESTARTSYS
;
}
if
(
result
[
engine
->
rx_head
].
status
)
{
rc
=
0
;
break
;
}
}
}
else
{
if
(
enable_credit_mp
)
{
dbg_tfr
(
"%s: rx_head=%d,rx_tail=%d, wait ...
\n
"
,
engine
->
name
,
engine
->
rx_head
,
engine
->
rx_tail
);
if
(
timeout_ms
>
0
)
rc
=
xlx_wait_event_interruptible_timeout
(
transfer
->
wq
,
(
engine
->
rx_head
!=
engine
->
rx_tail
||
engine
->
rx_overrun
),
msecs_to_jiffies
(
timeout_ms
));
else
rc
=
xlx_wait_event_interruptible
(
transfer
->
wq
,
(
engine
->
rx_head
!=
engine
->
rx_tail
||
engine
->
rx_overrun
));
dbg_tfr
(
"%s: wait returns %d, rx %d/%d, overrun %d.
\n
"
,
engine
->
name
,
rc
,
engine
->
rx_head
,
engine
->
rx_tail
,
engine
->
rx_overrun
);
}
else
{
if
(
timeout_ms
>
0
)
rc
=
xlx_wait_event_interruptible_timeout
(
transfer
->
wq
,
engine
->
eop_found
,
msecs_to_jiffies
(
timeout_ms
));
else
rc
=
xlx_wait_event_interruptible
(
transfer
->
wq
,
engine
->
eop_found
);
dbg_tfr
(
"%s: wait returns %d, eop_found %d.
\n
"
,
engine
->
name
,
rc
,
engine
->
eop_found
);
}
/* condition evaluated to false after the timeout elapsed */
if
(
rc
==
0
)
rc
=
-
ETIME
;
/* condition evaluated to true */
else
if
(
rc
>
0
)
rc
=
0
;
}
return
rc
;
}
static
struct
scatterlist
*
sglist_index
(
struct
sg_table
*
sgt
,
unsigned
int
idx
)
{
struct
scatterlist
*
sg
=
sgt
->
sgl
;
int
i
;
if
(
idx
>=
sgt
->
orig_nents
)
return
NULL
;
if
(
!
idx
)
return
sg
;
for
(
i
=
0
;
i
<
idx
;
i
++
,
sg
=
sg_next
(
sg
))
;
return
sg
;
}
static
int
copy_cyclic_to_user
(
struct
xdma_engine
*
engine
,
int
pkt_length
,
int
head
,
char
__user
*
buf
,
size_t
count
)
{
struct
scatterlist
*
sg
;
int
more
=
pkt_length
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
if
(
!
buf
)
{
pr_err
(
"%s invalid user buffer pointer
\n
"
,
engine
->
name
);
return
-
EINVAL
;
}
dbg_tfr
(
"%s, pkt_len %d, head %d, user buf idx %u.
\n
"
,
engine
->
name
,
pkt_length
,
head
,
engine
->
user_buffer_index
);
sg
=
sglist_index
(
&
engine
->
cyclic_sgt
,
head
);
if
(
!
sg
)
{
pr_info
(
"%s, head %d OOR, sgl %u.
\n
"
,
engine
->
name
,
head
,
engine
->
cyclic_sgt
.
orig_nents
);
return
-
EIO
;
}
/* EOP found? Transfer anything from head to EOP */
while
(
more
)
{
unsigned
int
copy
=
more
>
PAGE_SIZE
?
PAGE_SIZE
:
more
;
unsigned
int
blen
=
count
-
engine
->
user_buffer_index
;
int
rv
;
if
(
copy
>
blen
)
copy
=
blen
;
dbg_tfr
(
"%s sg %d, 0x%p, copy %u to user %u.
\n
"
,
engine
->
name
,
head
,
sg
,
copy
,
engine
->
user_buffer_index
);
rv
=
copy_to_user
(
&
buf
[
engine
->
user_buffer_index
],
page_address
(
sg_page
(
sg
)),
copy
);
if
(
rv
)
{
pr_info
(
"%s copy_to_user %u failed %d
\n
"
,
engine
->
name
,
copy
,
rv
);
return
-
EIO
;
}
more
-=
copy
;
engine
->
user_buffer_index
+=
copy
;
if
(
engine
->
user_buffer_index
==
count
)
{
/* user buffer used up */
break
;
}
head
++
;
if
(
head
>=
CYCLIC_RX_PAGES_MAX
)
{
head
=
0
;
sg
=
engine
->
cyclic_sgt
.
sgl
;
}
else
sg
=
sg_next
(
sg
);
}
return
pkt_length
;
}
static
int
complete_cyclic
(
struct
xdma_engine
*
engine
,
char
__user
*
buf
,
size_t
count
)
{
struct
xdma_result
*
result
;
int
pkt_length
=
0
;
int
fault
=
0
;
int
eop
=
0
;
int
head
;
int
rc
=
0
;
int
num_credit
=
0
;
unsigned
long
flags
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
result
=
engine
->
cyclic_result
;
if
(
!
result
)
{
pr_err
(
"%s Cyclic transfer resources not available.
\n
"
,
engine
->
name
);
return
-
EINVAL
;
}
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
/* where the host currently is in the ring buffer */
head
=
engine
->
rx_head
;
/* iterate over newly received results */
while
((
engine
->
rx_head
!=
engine
->
rx_tail
||
engine
->
rx_overrun
)
&&
pkt_length
<
(
int
)
count
)
{
WARN_ON
(
result
[
engine
->
rx_head
].
status
==
0
);
dbg_tfr
(
"%s, result[%d].status = 0x%x length = 0x%x.
\n
"
,
engine
->
name
,
engine
->
rx_head
,
result
[
engine
->
rx_head
].
status
,
result
[
engine
->
rx_head
].
length
);
if
((
result
[
engine
->
rx_head
].
status
>>
16
)
!=
C2H_WB
)
{
pr_info
(
"%s, result[%d].status 0x%x, no magic.
\n
"
,
engine
->
name
,
engine
->
rx_head
,
result
[
engine
->
rx_head
].
status
);
fault
=
1
;
}
else
if
(
result
[
engine
->
rx_head
].
length
>
PAGE_SIZE
)
{
pr_info
(
"%s, result[%d].len 0x%x, > PAGE_SIZE 0x%lx.
\n
"
,
engine
->
name
,
engine
->
rx_head
,
result
[
engine
->
rx_head
].
length
,
PAGE_SIZE
);
fault
=
1
;
}
else
if
(
result
[
engine
->
rx_head
].
length
==
0
)
{
pr_info
(
"%s, result[%d].length 0x%x.
\n
"
,
engine
->
name
,
engine
->
rx_head
,
result
[
engine
->
rx_head
].
length
);
fault
=
1
;
/* valid result */
}
else
{
pkt_length
+=
result
[
engine
->
rx_head
].
length
;
num_credit
++
;
/* seen eop? */
//if (result[engine->rx_head].status & RX_STATUS_EOP)
if
(
result
[
engine
->
rx_head
].
status
&
RX_STATUS_EOP
)
{
eop
=
1
;
engine
->
eop_found
=
1
;
}
dbg_tfr
(
"%s, pkt_length=%d (%s)
\n
"
,
engine
->
name
,
pkt_length
,
eop
?
"with EOP"
:
"no EOP yet"
);
}
/* clear result */
result
[
engine
->
rx_head
].
status
=
0
;
result
[
engine
->
rx_head
].
length
=
0
;
/* proceed head pointer so we make progress, even when fault */
engine
->
rx_head
=
(
engine
->
rx_head
+
1
)
%
CYCLIC_RX_PAGES_MAX
;
/* stop processing if a fault/eop was detected */
if
(
fault
||
eop
)
break
;
}
spin_unlock_irqrestore
(
&
engine
->
lock
,
flags
);
if
(
fault
)
return
-
EIO
;
rc
=
copy_cyclic_to_user
(
engine
,
pkt_length
,
head
,
buf
,
count
);
engine
->
rx_overrun
=
0
;
/* if copy is successful, release credits */
if
(
rc
>
0
)
write_register
(
num_credit
,
&
engine
->
sgdma_regs
->
credits
,
0
);
return
rc
;
}
ssize_t
xdma_engine_read_cyclic
(
struct
xdma_engine
*
engine
,
char
__user
*
buf
,
size_t
count
,
int
timeout_ms
)
{
int
i
=
0
;
int
rc
=
0
;
int
rc_len
=
0
;
struct
xdma_transfer
*
transfer
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
if
(
engine
->
magic
!=
MAGIC_ENGINE
)
{
pr_err
(
"%s has invalid magic number %lx
\n
"
,
engine
->
name
,
engine
->
magic
);
return
-
EINVAL
;
}
transfer
=
&
engine
->
cyclic_req
->
tfer
[
0
];
if
(
!
transfer
)
{
pr_err
(
"Invalid DMA transfer
\n
"
);
return
-
EINVAL
;
}
engine
->
user_buffer_index
=
0
;
do
{
rc
=
transfer_monitor_cyclic
(
engine
,
transfer
,
timeout_ms
);
if
(
rc
<
0
)
return
rc
;
rc
=
complete_cyclic
(
engine
,
buf
,
count
);
if
(
rc
<
0
)
return
rc
;
rc_len
+=
rc
;
if
(
rc_len
>=
(
int
)
count
)
return
rc_len
;
i
++
;
if
(
i
>
10
)
break
;
}
while
(
!
engine
->
eop_found
);
//Always reset EOP found indication regardless of credit mechanims
engine
->
eop_found
=
0
;
return
rc_len
;
}
static
void
sgt_free_with_pages
(
struct
sg_table
*
sgt
,
int
dir
,
struct
pci_dev
*
pdev
)
{
struct
scatterlist
*
sg
=
sgt
->
sgl
;
int
npages
=
sgt
->
orig_nents
;
int
i
;
for
(
i
=
0
;
i
<
npages
;
i
++
,
sg
=
sg_next
(
sg
))
{
struct
page
*
pg
=
sg_page
(
sg
);
dma_addr_t
bus
=
sg_dma_address
(
sg
);
if
(
pg
)
{
if
(
pdev
)
pci_unmap_page
(
pdev
,
bus
,
PAGE_SIZE
,
dir
);
__free_page
(
pg
);
}
else
break
;
}
sg_free_table
(
sgt
);
memset
(
sgt
,
0
,
sizeof
(
struct
sg_table
));
}
static
int
sgt_alloc_with_pages
(
struct
sg_table
*
sgt
,
unsigned
int
npages
,
int
dir
,
struct
pci_dev
*
pdev
)
{
struct
scatterlist
*
sg
;
int
i
;
if
(
sg_alloc_table
(
sgt
,
npages
,
GFP_KERNEL
))
{
pr_info
(
"sgt OOM.
\n
"
);
return
-
ENOMEM
;
}
sg
=
sgt
->
sgl
;
for
(
i
=
0
;
i
<
npages
;
i
++
,
sg
=
sg_next
(
sg
))
{
struct
page
*
pg
=
alloc_page
(
GFP_KERNEL
);
if
(
!
pg
)
{
pr_info
(
"%d/%u, page OOM.
\n
"
,
i
,
npages
);
goto
err_out
;
}
if
(
pdev
)
{
dma_addr_t
bus
=
pci_map_page
(
pdev
,
pg
,
0
,
PAGE_SIZE
,
dir
);
if
(
unlikely
(
pci_dma_mapping_error
(
pdev
,
bus
)))
{
pr_info
(
"%d/%u, page 0x%p map err.
\n
"
,
i
,
npages
,
pg
);
__free_page
(
pg
);
goto
err_out
;
}
sg_dma_address
(
sg
)
=
bus
;
sg_dma_len
(
sg
)
=
PAGE_SIZE
;
}
sg_set_page
(
sg
,
pg
,
PAGE_SIZE
,
0
);
}
sgt
->
orig_nents
=
sgt
->
nents
=
npages
;
return
0
;
err_out:
sgt_free_with_pages
(
sgt
,
dir
,
pdev
);
return
-
ENOMEM
;
}
int
xdma_cyclic_transfer_setup
(
struct
xdma_engine
*
engine
)
{
struct
xdma_dev
*
xdev
;
struct
xdma_transfer
*
xfer
;
dma_addr_t
bus
;
unsigned
long
flags
;
int
i
;
int
rc
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
xdev
=
engine
->
xdev
;
if
(
!
xdev
)
{
pr_err
(
"Invalid DMA devie
\n
"
);
return
-
EINVAL
;
}
if
(
engine
->
cyclic_req
)
{
dbg_tfr
(
"%s: exclusive access already taken.
\n
"
,
engine
->
name
);
return
-
EBUSY
;
}
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
engine
->
rx_tail
=
0
;
engine
->
rx_head
=
0
;
engine
->
rx_overrun
=
0
;
engine
->
eop_found
=
0
;
rc
=
sgt_alloc_with_pages
(
&
engine
->
cyclic_sgt
,
CYCLIC_RX_PAGES_MAX
,
engine
->
dir
,
xdev
->
pdev
);
if
(
rc
<
0
)
{
pr_info
(
"%s cyclic pages %u OOM.
\n
"
,
engine
->
name
,
CYCLIC_RX_PAGES_MAX
);
goto
err_out
;
}
engine
->
cyclic_req
=
xdma_init_request
(
&
engine
->
cyclic_sgt
,
0
);
if
(
!
engine
->
cyclic_req
)
{
pr_info
(
"%s cyclic request OOM.
\n
"
,
engine
->
name
);
rc
=
-
ENOMEM
;
goto
err_out
;
}
#ifdef __LIBXDMA_DEBUG__
xdma_request_cb_dump
(
engine
->
cyclic_req
);
#endif
xfer
=
&
engine
->
cyclic_req
->
tfer
[
0
];
rc
=
transfer_init_cyclic
(
engine
,
engine
->
cyclic_req
,
xfer
);
if
(
rc
<
0
)
goto
err_out
;
/* replace source addresses with result write-back addresses */
memset
(
engine
->
cyclic_result
,
0
,
CYCLIC_RX_PAGES_MAX
*
sizeof
(
struct
xdma_result
));
bus
=
engine
->
cyclic_result_bus
;
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
{
xfer
->
desc_virt
[
i
].
src_addr_lo
=
cpu_to_le32
(
PCI_DMA_L
(
bus
));
xfer
->
desc_virt
[
i
].
src_addr_hi
=
cpu_to_le32
(
PCI_DMA_H
(
bus
));
bus
+=
sizeof
(
struct
xdma_result
);
}
/* set control of all descriptors */
for
(
i
=
0
;
i
<
xfer
->
desc_num
;
i
++
)
{
rc
=
xdma_desc_control_clear
(
xfer
->
desc_virt
+
i
,
LS_BYTE_MASK
);
if
(
rc
<
0
)
{
pr_err
(
"Failed to clear desc control
\n
"
);
goto
err_out
;
}
rc
=
xdma_desc_control_set
(
xfer
->
desc_virt
+
i
,
XDMA_DESC_EOP
|
XDMA_DESC_COMPLETED
);
if
(
rc
<
0
)
{
pr_err
(
"Failed to set desc control
\n
"
);
goto
err_out
;
}
}
/* make this a cyclic transfer */
xdma_transfer_cyclic
(
xfer
);
#ifdef __LIBXDMA_DEBUG__
transfer_dump
(
xfer
);
#endif
if
(
enable_credit_mp
)
{
//write_register(RX_BUF_PAGES,&engine->sgdma_regs->credits);
write_register
(
128
,
&
engine
->
sgdma_regs
->
credits
,
0
);
}
/* start cyclic transfer */
rc
=
transfer_queue
(
engine
,
xfer
);
if
(
rc
<
0
)
{
pr_err
(
"Failed to queue transfer
\n
"
);
goto
err_out
;
}
xfer
->
last_in_request
=
1
;
spin_unlock_irqrestore
(
&
engine
->
lock
,
flags
);
return
0
;
/* unwind on errors */
err_out:
if
(
engine
->
cyclic_req
)
{
xdma_request_free
(
engine
->
cyclic_req
);
engine
->
cyclic_req
=
NULL
;
}
if
(
engine
->
cyclic_sgt
.
orig_nents
)
{
sgt_free_with_pages
(
&
engine
->
cyclic_sgt
,
engine
->
dir
,
xdev
->
pdev
);
engine
->
cyclic_sgt
.
orig_nents
=
0
;
engine
->
cyclic_sgt
.
nents
=
0
;
engine
->
cyclic_sgt
.
sgl
=
NULL
;
}
spin_unlock_irqrestore
(
&
engine
->
lock
,
flags
);
return
rc
;
}
static
int
cyclic_shutdown_polled
(
struct
xdma_engine
*
engine
)
{
int
rv
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
spin_lock
(
&
engine
->
lock
);
dbg_tfr
(
"Polling for shutdown completion
\n
"
);
do
{
rv
=
engine_status_read
(
engine
,
1
,
0
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to read engine status
\n
"
);
goto
failure
;
}
schedule
();
}
while
(
engine
->
status
&
XDMA_STAT_BUSY
);
if
((
engine
->
running
)
&&
!
(
engine
->
status
&
XDMA_STAT_BUSY
))
{
dbg_tfr
(
"Engine has stopped
\n
"
);
if
(
!
list_empty
(
&
engine
->
transfer_list
))
{
rv
=
engine_transfer_dequeue
(
engine
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to dequeue transfer
\n
"
);
goto
failure
;
}
}
rv
=
engine_service_shutdown
(
engine
);
if
(
rv
<
0
)
{
pr_err
(
"Failed to shutdown engine
\n
"
);
goto
failure
;
}
}
failure:
dbg_tfr
(
"Shutdown completion polling done
\n
"
);
spin_unlock
(
&
engine
->
lock
);
return
rv
;
}
static
int
cyclic_shutdown_interrupt
(
struct
xdma_engine
*
engine
)
{
int
rc
;
if
(
!
engine
)
{
pr_err
(
"dma engine NULL
\n
"
);
return
-
EINVAL
;
}
rc
=
xlx_wait_event_interruptible_timeout
(
engine
->
shutdown_wq
,
!
engine
->
running
,
msecs_to_jiffies
(
10000
));
#if 0
if (rc) {
dbg_tfr("wait_event_interruptible=%d\n", rc);
return rc;
}
#endif
if
(
engine
->
running
)
{
pr_info
(
"%s still running?!, %d
\n
"
,
engine
->
name
,
rc
);
return
-
EINVAL
;
}
return
rc
;
}
int
xdma_cyclic_transfer_teardown
(
struct
xdma_engine
*
engine
)
{
int
rc
;
struct
xdma_dev
*
xdev
=
engine
->
xdev
;
struct
xdma_transfer
*
transfer
;
unsigned
long
flags
;
transfer
=
engine_cyclic_stop
(
engine
);
if
(
transfer
==
NULL
)
{
pr_err
(
"Failed to stop cyclic engine
\n
"
);
return
-
EINVAL
;
}
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
if
(
transfer
)
{
dbg_tfr
(
"%s: stop transfer 0x%p.
\n
"
,
engine
->
name
,
transfer
);
if
(
transfer
!=
&
engine
->
cyclic_req
->
tfer
[
0
])
{
pr_info
(
"%s unexpected transfer 0x%p/0x%p
\n
"
,
engine
->
name
,
transfer
,
&
engine
->
cyclic_req
->
tfer
[
0
]);
}
}
/* allow engine to be serviced after stop request */
spin_unlock_irqrestore
(
&
engine
->
lock
,
flags
);
/* wait for engine to be no longer running */
if
(
poll_mode
)
rc
=
cyclic_shutdown_polled
(
engine
);
else
rc
=
cyclic_shutdown_interrupt
(
engine
);
if
(
rc
<
0
)
{
pr_err
(
"Failed to shutdown cyclic transfers
\n
"
);
return
rc
;
}
/* obtain spin lock to atomically remove resources */
spin_lock_irqsave
(
&
engine
->
lock
,
flags
);
if
(
engine
->
cyclic_req
)
{
xdma_request_free
(
engine
->
cyclic_req
);
engine
->
cyclic_req
=
NULL
;
}
if
(
engine
->
cyclic_sgt
.
orig_nents
)
{
sgt_free_with_pages
(
&
engine
->
cyclic_sgt
,
engine
->
dir
,
xdev
->
pdev
);
engine
->
cyclic_sgt
.
orig_nents
=
0
;
engine
->
cyclic_sgt
.
nents
=
0
;
engine
->
cyclic_sgt
.
sgl
=
NULL
;
}
spin_unlock_irqrestore
(
&
engine
->
lock
,
flags
);
return
0
;
}
int
engine_addrmode_set
(
struct
xdma_engine
*
engine
,
unsigned
long
arg
)
{
int
rv
;
...
...
XDMA/linux-kernel/xdma/libxdma.h
View file @
7642657b
...
...
@@ -508,20 +508,9 @@ struct xdma_engine {
/* Members applicable to AXI-ST C2H (cyclic) transfers */
struct
xdma_result
*
cyclic_result
;
dma_addr_t
cyclic_result_bus
;
/* bus addr for transfer */
struct
xdma_request_cb
*
cyclic_req
;
struct
sg_table
cyclic_sgt
;
u8
*
perf_buf_virt
;
dma_addr_t
perf_buf_bus
;
/* bus address */
u8
eop_found
;
/* used only for cyclic(rx:c2h) */
int
eop_count
;
int
rx_tail
;
/* follows the HW */
int
rx_head
;
/* where the SW reads from */
int
rx_overrun
;
/* flag if overrun occured */
/* for copy from cyclic buffer to user buffer */
unsigned
int
user_buffer_index
;
/* Members associated with polled mode support */
u8
*
poll_mode_addr_virt
;
/* virt addr for descriptor writeback */
dma_addr_t
poll_mode_bus
;
/* bus addr for descriptor writeback */
...
...
@@ -681,10 +670,6 @@ struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine);
void
enable_perf
(
struct
xdma_engine
*
engine
);
void
get_perf_stats
(
struct
xdma_engine
*
engine
);
int
xdma_cyclic_transfer_setup
(
struct
xdma_engine
*
engine
);
int
xdma_cyclic_transfer_teardown
(
struct
xdma_engine
*
engine
);
ssize_t
xdma_engine_read_cyclic
(
struct
xdma_engine
*
engine
,
char
__user
*
buf
,
size_t
count
,
int
timeout_ms
);
int
engine_addrmode_set
(
struct
xdma_engine
*
engine
,
unsigned
long
arg
);
int
engine_service_poll
(
struct
xdma_engine
*
engine
,
u32
expected_desc_count
);
#endif
/* XDMA_LIB_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment