Commit f1e834be authored by Julien's avatar Julien

libxdma: fix next adjacent descriptors

Fix the setting of the next adjacent fields in descriptors.

Following commit 5faf23ec the next_adj field of all descriptors is set
according to the index of the descriptor rather than its address which
causes issues when dma_alloc_coherent doesn't return an address which is
page aligned (which happens).
Moreover, in the case of a transfer which number of descriptors is
bigger than a full page, the next_adj field is set to the maximum (63)
for all descriptors untill the last page of descriptors where it starts
decreasing.
Last, even before this commit, the next_adj field inside a block of
adjacent descriptors is not decreasing untill coming near page end,
which is not compliant with what the documentation says :

"Every descriptor in the descriptor list must accurately describe the descriptor
or block ofdescriptors that follows. In a block of adjacent descriptors, the
Nxt_adj value decrements from the first descriptor to the second to last
descriptor which has a value of zero. Likewise, eachdescriptor in the block
points to the next descriptor in the block, except for the last descriptor
which might point to a new block or might terminate the list."

This commit aligns the blocks of adjacent descriptors to
XDMA_MAX_ADJ_BLOCK_SIZE and makes the next_adj field decrease inside
each block untill the second to last descriptor in the block or in the
full transfer. The size of the page being a multiple of the size of the
block (4096 = sizeof(xdma_desc) * 128 =
sizeof(xdma_desc) * 2 * XDMA_MAX_ADJ_BLOCK_SIZE
parent e5dfada9
...@@ -601,6 +601,35 @@ static int engine_start_mode_config(struct xdma_engine *engine) ...@@ -601,6 +601,35 @@ static int engine_start_mode_config(struct xdma_engine *engine)
return 0; return 0;
} }
/**
* xdma_get_next_adj()
*
* Get the number for adjacent descriptors to set in a descriptor, based on the
* remaining number of descriptors and the lower bits of the address of the
* next descriptor.
* Since the number of descriptors in a page (XDMA_PAGE_SIZE) is 128 and the
* maximum size of a block of adjacent descriptors is 64 (63 max adjacent
* descriptors for any descriptor), align the blocks of adjacent descriptors
* to the block size.
*/
static u32 xdma_get_next_adj(unsigned int remaining, u32 next_lo)
{
unsigned int next_index;
dbg_desc("%s: remaining_desc %u, next_lo 0x%x\n",__func__, remaining,
next_lo);
if (remaining <= 1)
return 0;
/* shift right 5 times corresponds to a division by
* sizeof(xdma_desc) = 32
*/
next_index = ((next_lo & (XDMA_PAGE_SIZE - 1)) >> 5) %
XDMA_MAX_ADJ_BLOCK_SIZE;
return min(XDMA_MAX_ADJ_BLOCK_SIZE - next_index - 1, remaining - 1);
}
/** /**
* engine_start() - start an idle engine with its first transfer on queue * engine_start() - start an idle engine with its first transfer on queue
* *
...@@ -620,8 +649,7 @@ static int engine_start_mode_config(struct xdma_engine *engine) ...@@ -620,8 +649,7 @@ static int engine_start_mode_config(struct xdma_engine *engine)
static struct xdma_transfer *engine_start(struct xdma_engine *engine) static struct xdma_transfer *engine_start(struct xdma_engine *engine)
{ {
struct xdma_transfer *transfer; struct xdma_transfer *transfer;
u32 w; u32 w, next_adj;
int extra_adj = 0;
int rv; int rv;
if (!engine) { if (!engine) {
...@@ -681,15 +709,14 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine) ...@@ -681,15 +709,14 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine)
(unsigned long)(&engine->sgdma_regs->first_desc_hi) - (unsigned long)(&engine->sgdma_regs->first_desc_hi) -
(unsigned long)(&engine->sgdma_regs)); (unsigned long)(&engine->sgdma_regs));
if (transfer->desc_adjacent > 0) { next_adj = xdma_get_next_adj(transfer->desc_adjacent,
extra_adj = transfer->desc_adjacent - 1; cpu_to_le32(PCI_DMA_L(transfer->desc_bus)));
if (extra_adj > MAX_EXTRA_ADJ)
extra_adj = MAX_EXTRA_ADJ; dbg_tfr("iowrite32(0x%08x to 0x%p) (first_desc_adjacent)\n", next_adj,
}
dbg_tfr("iowrite32(0x%08x to 0x%p) (first_desc_adjacent)\n", extra_adj,
(void *)&engine->sgdma_regs->first_desc_adjacent); (void *)&engine->sgdma_regs->first_desc_adjacent);
write_register( write_register(
extra_adj, &engine->sgdma_regs->first_desc_adjacent, next_adj, &engine->sgdma_regs->first_desc_adjacent,
(unsigned long)(&engine->sgdma_regs->first_desc_adjacent) - (unsigned long)(&engine->sgdma_regs->first_desc_adjacent) -
(unsigned long)(&engine->sgdma_regs)); (unsigned long)(&engine->sgdma_regs));
...@@ -2434,6 +2461,7 @@ static int transfer_desc_init(struct xdma_transfer *transfer, int count) ...@@ -2434,6 +2461,7 @@ static int transfer_desc_init(struct xdma_transfer *transfer, int count)
desc_virt[i].next_lo = cpu_to_le32(PCI_DMA_L(desc_bus)); desc_virt[i].next_lo = cpu_to_le32(PCI_DMA_L(desc_bus));
desc_virt[i].next_hi = cpu_to_le32(PCI_DMA_H(desc_bus)); desc_virt[i].next_hi = cpu_to_le32(PCI_DMA_H(desc_bus));
desc_virt[i].bytes = cpu_to_le32(0); desc_virt[i].bytes = cpu_to_le32(0);
desc_virt[i].control = cpu_to_le32(DESC_MAGIC); desc_virt[i].control = cpu_to_le32(DESC_MAGIC);
} }
/* { i = number - 1 } */ /* { i = number - 1 } */
...@@ -2486,16 +2514,12 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second, ...@@ -2486,16 +2514,12 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second,
} }
/* xdma_desc_adjacent -- Set how many descriptors are adjacent to this one */ /* xdma_desc_adjacent -- Set how many descriptors are adjacent to this one */
static void xdma_desc_adjacent(struct xdma_desc *desc, int next_adjacent) static void xdma_desc_adjacent(struct xdma_desc *desc, u32 next_adjacent)
{ {
/* remember reserved and control bits */ /* remember reserved and control bits */
u32 control = le32_to_cpu(desc->control) & 0xffffc0ffUL; u32 control = le32_to_cpu(desc->control) & 0x0000f0ffUL;
/* merge adjacent and control field */
if (next_adjacent) control |= 0xAD4B0000UL | (next_adjacent << 8);
next_adjacent = next_adjacent - 1;
if (next_adjacent > MAX_EXTRA_ADJ)
next_adjacent = MAX_EXTRA_ADJ;
control |= (next_adjacent << 8);
/* write control and next_adjacent */ /* write control and next_adjacent */
desc->control = cpu_to_le32(control); desc->control = cpu_to_le32(control);
} }
...@@ -3141,7 +3165,6 @@ static int transfer_init(struct xdma_engine *engine, ...@@ -3141,7 +3165,6 @@ static int transfer_init(struct xdma_engine *engine,
unsigned int desc_max = min_t(unsigned int, unsigned int desc_max = min_t(unsigned int,
req->sw_desc_cnt - req->sw_desc_idx, req->sw_desc_cnt - req->sw_desc_idx,
XDMA_TRANSFER_MAX_DESC); XDMA_TRANSFER_MAX_DESC);
unsigned int desc_align = 0;
int i = 0; int i = 0;
int last = 0; int last = 0;
u32 control; u32 control;
...@@ -3179,16 +3202,7 @@ static int transfer_init(struct xdma_engine *engine, ...@@ -3179,16 +3202,7 @@ static int transfer_init(struct xdma_engine *engine,
xfer, (u64)xfer->desc_bus); xfer, (u64)xfer->desc_bus);
transfer_build(engine, req, xfer, desc_max); transfer_build(engine, req, xfer, desc_max);
/* xfer->desc_adjacent = desc_max;
* Contiguous descriptors cannot cross PAGE boundary
* The 1st descriptor may start in the middle of the page,
* calculate the 1st block of adj desc accordingly
*/
desc_align = 128 - (engine->desc_idx % 128) - 1;
if (desc_align > (desc_max - 1))
desc_align = desc_max - 1;
xfer->desc_adjacent = desc_align;
/* terminate last descriptor */ /* terminate last descriptor */
last = desc_max - 1; last = desc_max - 1;
...@@ -3204,11 +3218,13 @@ static int transfer_init(struct xdma_engine *engine, ...@@ -3204,11 +3218,13 @@ static int transfer_init(struct xdma_engine *engine,
engine->desc_used += desc_max; engine->desc_used += desc_max;
/* fill in adjacent numbers */ /* fill in adjacent numbers */
for (i = 0; i < xfer->desc_num && desc_align; i++, desc_align--) for (i = 0; i < xfer->desc_num; i++) {
xdma_desc_adjacent(xfer->desc_virt + i, desc_align); u32 next_adj = xdma_get_next_adj(xfer->desc_num - i - 1,
(xfer->desc_virt + i)->next_lo);
for (; i < xfer->desc_num; i++) dbg_desc("set next adj at index %d to %u\n", i, next_adj);
xdma_desc_adjacent(xfer->desc_virt + i, xfer->desc_num - i - 1); xdma_desc_adjacent(xfer->desc_virt + i, next_adj);
}
spin_unlock_irqrestore(&engine->lock, flags); spin_unlock_irqrestore(&engine->lock, flags);
return 0; return 0;
...@@ -3266,8 +3282,12 @@ static int transfer_init_cyclic(struct xdma_engine *engine, ...@@ -3266,8 +3282,12 @@ static int transfer_init_cyclic(struct xdma_engine *engine,
dbg_sg("transfer 0x%p has %d descriptors\n", xfer, xfer->desc_num); dbg_sg("transfer 0x%p has %d descriptors\n", xfer, xfer->desc_num);
/* fill in adjacent numbers */ /* fill in adjacent numbers */
for (i = 0; i < xfer->desc_num; i++) for (i = 0; i < xfer->desc_num; i++) {
xdma_desc_adjacent(xfer->desc_virt + i, xfer->desc_num - i - 1); u32 next_adj = xdma_get_next_adj(xfer->desc_num - i - 1,
(xfer->desc_virt + i)->next_lo);
dbg_desc("set next adj at index %d to %u\n", i, next_adj);
xdma_desc_adjacent(xfer->desc_virt + i, next_adj);
}
return 0; return 0;
} }
......
...@@ -58,7 +58,8 @@ ...@@ -58,7 +58,8 @@
* .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]), * .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]),
*/ */
#define XDMA_ENG_IRQ_NUM (1) #define XDMA_ENG_IRQ_NUM (1)
#define MAX_EXTRA_ADJ (0x3F) #define XDMA_MAX_ADJ_BLOCK_SIZE 0x40
#define XDMA_PAGE_SIZE 0x1000
#define RX_STATUS_EOP (1) #define RX_STATUS_EOP (1)
/* Target internal components on XDMA control BAR */ /* Target internal components on XDMA control BAR */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment