Commit 1abe739a authored by Sujatha Banoth's avatar Sujatha Banoth

Merge branch 'master' of https://github.com/Xilinx/dma_ip_drivers

parents 0eb756ff a1d510f4
...@@ -40,7 +40,7 @@ MODULE_PARM_DESC(poll_mode, "Set 1 for hw polling, default is 0 (interrupts)"); ...@@ -40,7 +40,7 @@ MODULE_PARM_DESC(poll_mode, "Set 1 for hw polling, default is 0 (interrupts)");
static unsigned int interrupt_mode; static unsigned int interrupt_mode;
module_param(interrupt_mode, uint, 0644); module_param(interrupt_mode, uint, 0644);
MODULE_PARM_DESC(interrupt_mode, "0 - MSI-x , 1 - MSI, 2 - Legacy"); MODULE_PARM_DESC(interrupt_mode, "0 - Auto , 1 - MSI, 2 - Legacy, 3 - MSI-x");
static unsigned int enable_credit_mp = 1; static unsigned int enable_credit_mp = 1;
module_param(enable_credit_mp, uint, 0644); module_param(enable_credit_mp, uint, 0644);
...@@ -56,7 +56,7 @@ MODULE_PARM_DESC(desc_blen_max, ...@@ -56,7 +56,7 @@ MODULE_PARM_DESC(desc_blen_max,
#define XDMA_PERF_NUM_DESC 128 #define XDMA_PERF_NUM_DESC 128
/* Kernel version adaptative code */ /* Kernel version adaptative code */
#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP_ONE
/* since 4.18, using simple wait queues is not recommended /* since 4.18, using simple wait queues is not recommended
* except for realtime constraint (see swait.h comments) * except for realtime constraint (see swait.h comments)
* and will likely be removed in future kernel versions * and will likely be removed in future kernel versions
...@@ -64,7 +64,7 @@ MODULE_PARM_DESC(desc_blen_max, ...@@ -64,7 +64,7 @@ MODULE_PARM_DESC(desc_blen_max,
#define xlx_wake_up swake_up_one #define xlx_wake_up swake_up_one
#define xlx_wait_event_interruptible_timeout \ #define xlx_wait_event_interruptible_timeout \
swait_event_interruptible_timeout_exclusive swait_event_interruptible_timeout_exclusive
#elif KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #elif HAS_SWAKE_UP
#define xlx_wake_up swake_up #define xlx_wake_up swake_up
#define xlx_wait_event_interruptible_timeout \ #define xlx_wait_event_interruptible_timeout \
swait_event_interruptible_timeout swait_event_interruptible_timeout
...@@ -540,6 +540,7 @@ static int xdma_engine_stop(struct xdma_engine *engine) ...@@ -540,6 +540,7 @@ static int xdma_engine_stop(struct xdma_engine *engine)
(unsigned long)(&engine->regs)); (unsigned long)(&engine->regs));
/* dummy read of status register to flush all previous writes */ /* dummy read of status register to flush all previous writes */
dbg_tfr("%s(%s) done\n", __func__, engine->name); dbg_tfr("%s(%s) done\n", __func__, engine->name);
engine->running = 0;
return 0; return 0;
} }
...@@ -601,6 +602,35 @@ static int engine_start_mode_config(struct xdma_engine *engine) ...@@ -601,6 +602,35 @@ static int engine_start_mode_config(struct xdma_engine *engine)
return 0; return 0;
} }
/**
* xdma_get_next_adj()
*
* Get the number for adjacent descriptors to set in a descriptor, based on the
* remaining number of descriptors and the lower bits of the address of the
* next descriptor.
* Since the number of descriptors in a page (XDMA_PAGE_SIZE) is 128 and the
* maximum size of a block of adjacent descriptors is 64 (63 max adjacent
* descriptors for any descriptor), align the blocks of adjacent descriptors
* to the block size.
*/
static u32 xdma_get_next_adj(unsigned int remaining, u32 next_lo)
{
unsigned int next_index;
dbg_desc("%s: remaining_desc %u, next_lo 0x%x\n",__func__, remaining,
next_lo);
if (remaining <= 1)
return 0;
/* shift right 5 times corresponds to a division by
* sizeof(xdma_desc) = 32
*/
next_index = ((next_lo & (XDMA_PAGE_SIZE - 1)) >> 5) %
XDMA_MAX_ADJ_BLOCK_SIZE;
return min(XDMA_MAX_ADJ_BLOCK_SIZE - next_index - 1, remaining - 1);
}
/** /**
* engine_start() - start an idle engine with its first transfer on queue * engine_start() - start an idle engine with its first transfer on queue
* *
...@@ -620,8 +650,7 @@ static int engine_start_mode_config(struct xdma_engine *engine) ...@@ -620,8 +650,7 @@ static int engine_start_mode_config(struct xdma_engine *engine)
static struct xdma_transfer *engine_start(struct xdma_engine *engine) static struct xdma_transfer *engine_start(struct xdma_engine *engine)
{ {
struct xdma_transfer *transfer; struct xdma_transfer *transfer;
u32 w; u32 w, next_adj;
int extra_adj = 0;
int rv; int rv;
if (!engine) { if (!engine) {
...@@ -681,22 +710,21 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine) ...@@ -681,22 +710,21 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine)
(unsigned long)(&engine->sgdma_regs->first_desc_hi) - (unsigned long)(&engine->sgdma_regs->first_desc_hi) -
(unsigned long)(&engine->sgdma_regs)); (unsigned long)(&engine->sgdma_regs));
if (transfer->desc_adjacent > 0) { next_adj = xdma_get_next_adj(transfer->desc_adjacent,
extra_adj = transfer->desc_adjacent - 1; cpu_to_le32(PCI_DMA_L(transfer->desc_bus)));
if (extra_adj > MAX_EXTRA_ADJ)
extra_adj = MAX_EXTRA_ADJ; dbg_tfr("iowrite32(0x%08x to 0x%p) (first_desc_adjacent)\n", next_adj,
}
dbg_tfr("iowrite32(0x%08x to 0x%p) (first_desc_adjacent)\n", extra_adj,
(void *)&engine->sgdma_regs->first_desc_adjacent); (void *)&engine->sgdma_regs->first_desc_adjacent);
write_register( write_register(
extra_adj, &engine->sgdma_regs->first_desc_adjacent, next_adj, &engine->sgdma_regs->first_desc_adjacent,
(unsigned long)(&engine->sgdma_regs->first_desc_adjacent) - (unsigned long)(&engine->sgdma_regs->first_desc_adjacent) -
(unsigned long)(&engine->sgdma_regs)); (unsigned long)(&engine->sgdma_regs));
dbg_tfr("ioread32(0x%p) (dummy read flushes writes).\n", dbg_tfr("ioread32(0x%p) (dummy read flushes writes).\n",
&engine->regs->status); &engine->regs->status);
#if KERNEL_VERSION(5, 1, 0) >= LINUX_VERSION_CODE #if HAS_MMIOWB
mmiowb(); mmiowb();
#endif #endif
...@@ -735,7 +763,6 @@ static int engine_service_shutdown(struct xdma_engine *engine) ...@@ -735,7 +763,6 @@ static int engine_service_shutdown(struct xdma_engine *engine)
pr_err("Failed to stop engine\n"); pr_err("Failed to stop engine\n");
return rv; return rv;
} }
engine->running = 0;
/* awake task on engine's shutdown wait queue */ /* awake task on engine's shutdown wait queue */
xlx_wake_up(&engine->shutdown_wq); xlx_wake_up(&engine->shutdown_wq);
...@@ -1427,7 +1454,7 @@ static u32 engine_service_wb_monitor(struct xdma_engine *engine, ...@@ -1427,7 +1454,7 @@ static u32 engine_service_wb_monitor(struct xdma_engine *engine,
else if (desc_wb >= expected_wb) else if (desc_wb >= expected_wb)
break; break;
/* RTO - prevent system from hanging in polled mode */ /* prevent system from hanging in polled mode */
if (time_after(jiffies, timeout)) { if (time_after(jiffies, timeout)) {
dbg_tfr("Polling timeout occurred"); dbg_tfr("Polling timeout occurred");
dbg_tfr("desc_wb = 0x%08x, expected 0x%08x\n", desc_wb, dbg_tfr("desc_wb = 0x%08x, expected 0x%08x\n", desc_wb,
...@@ -1681,7 +1708,7 @@ static irqreturn_t xdma_channel_irq(int irq, void *dev_id) ...@@ -1681,7 +1708,7 @@ static irqreturn_t xdma_channel_irq(int irq, void *dev_id)
schedule_work(&engine->work); schedule_work(&engine->work);
/* /*
* RTO - need to protect access here if multiple MSI-X are used for * need to protect access here if multiple MSI-X are used for
* user interrupts * user interrupts
*/ */
xdev->irq_count++; xdev->irq_count++;
...@@ -1933,7 +1960,7 @@ fail: ...@@ -1933,7 +1960,7 @@ fail:
*/ */
/* /*
* RTO - code to detect if MSI/MSI-X capability exists is derived * code to detect if MSI/MSI-X capability exists is derived
* from linux/pci/msi.c - pci_msi_check_device * from linux/pci/msi.c - pci_msi_check_device
*/ */
...@@ -1992,7 +2019,7 @@ static int enable_msi_msix(struct xdma_dev *xdev, struct pci_dev *pdev) ...@@ -1992,7 +2019,7 @@ static int enable_msi_msix(struct xdma_dev *xdev, struct pci_dev *pdev)
return -EINVAL; return -EINVAL;
} }
if (!interrupt_mode && msi_msix_capable(pdev, PCI_CAP_ID_MSIX)) { if ((interrupt_mode == 3 || !interrupt_mode) && msi_msix_capable(pdev, PCI_CAP_ID_MSIX)) {
int req_nvec = xdev->c2h_channel_max + xdev->h2c_channel_max + int req_nvec = xdev->c2h_channel_max + xdev->h2c_channel_max +
xdev->user_max; xdev->user_max;
...@@ -2014,7 +2041,7 @@ static int enable_msi_msix(struct xdma_dev *xdev, struct pci_dev *pdev) ...@@ -2014,7 +2041,7 @@ static int enable_msi_msix(struct xdma_dev *xdev, struct pci_dev *pdev)
xdev->msix_enabled = 1; xdev->msix_enabled = 1;
} else if (interrupt_mode == 1 && } else if ((interrupt_mode == 1 || !interrupt_mode) &&
msi_msix_capable(pdev, PCI_CAP_ID_MSI)) { msi_msix_capable(pdev, PCI_CAP_ID_MSI)) {
/* enable message signalled interrupts */ /* enable message signalled interrupts */
dbg_init("pci_enable_msi()\n"); dbg_init("pci_enable_msi()\n");
...@@ -2296,11 +2323,16 @@ static int irq_legacy_setup(struct xdma_dev *xdev, struct pci_dev *pdev) ...@@ -2296,11 +2323,16 @@ static int irq_legacy_setup(struct xdma_dev *xdev, struct pci_dev *pdev)
int rv; int rv;
pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &val); pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &val);
if (val == 0) {
dbg_init("Legacy interrupt not supported\n");
return -EINVAL;
}
dbg_init("Legacy Interrupt register value = %d\n", val); dbg_init("Legacy Interrupt register value = %d\n", val);
if (val > 1) { if (val > 1) {
val--; val--;
w = (val << 24) | (val << 16) | (val << 8) | val; w = (val << 24) | (val << 16) | (val << 8) | val;
/* Program IRQ Block Channel vactor and IRQ Block User vector /* Program IRQ Block Channel vector and IRQ Block User vector
* with Legacy interrupt value * with Legacy interrupt value
*/ */
reg = xdev->bar[xdev->config_bar_idx] + 0x2080; // IRQ user reg = xdev->bar[xdev->config_bar_idx] + 0x2080; // IRQ user
...@@ -2434,6 +2466,7 @@ static int transfer_desc_init(struct xdma_transfer *transfer, int count) ...@@ -2434,6 +2466,7 @@ static int transfer_desc_init(struct xdma_transfer *transfer, int count)
desc_virt[i].next_lo = cpu_to_le32(PCI_DMA_L(desc_bus)); desc_virt[i].next_lo = cpu_to_le32(PCI_DMA_L(desc_bus));
desc_virt[i].next_hi = cpu_to_le32(PCI_DMA_H(desc_bus)); desc_virt[i].next_hi = cpu_to_le32(PCI_DMA_H(desc_bus));
desc_virt[i].bytes = cpu_to_le32(0); desc_virt[i].bytes = cpu_to_le32(0);
desc_virt[i].control = cpu_to_le32(DESC_MAGIC); desc_virt[i].control = cpu_to_le32(DESC_MAGIC);
} }
/* { i = number - 1 } */ /* { i = number - 1 } */
...@@ -2461,8 +2494,7 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second, ...@@ -2461,8 +2494,7 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second,
* remember reserved control in first descriptor, but zero * remember reserved control in first descriptor, but zero
* extra_adjacent! * extra_adjacent!
*/ */
/* RTO - what's this about? Shouldn't it be 0x0000c0ffUL? */ u32 control = le32_to_cpu(first->control) & 0x00FFC0ffUL;
u32 control = le32_to_cpu(first->control) & 0x0000f0ffUL;
/* second descriptor given? */ /* second descriptor given? */
if (second) { if (second) {
/* /*
...@@ -2486,16 +2518,12 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second, ...@@ -2486,16 +2518,12 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second,
} }
/* xdma_desc_adjacent -- Set how many descriptors are adjacent to this one */ /* xdma_desc_adjacent -- Set how many descriptors are adjacent to this one */
static void xdma_desc_adjacent(struct xdma_desc *desc, int next_adjacent) static void xdma_desc_adjacent(struct xdma_desc *desc, u32 next_adjacent)
{ {
/* remember reserved and control bits */ /* remember reserved and control bits */
u32 control = le32_to_cpu(desc->control) & 0xffffc0ffUL; u32 control = le32_to_cpu(desc->control) & 0x0000f0ffUL;
/* merge adjacent and control field */
if (next_adjacent) control |= 0xAD4B0000UL | (next_adjacent << 8);
next_adjacent = next_adjacent - 1;
if (next_adjacent > MAX_EXTRA_ADJ)
next_adjacent = MAX_EXTRA_ADJ;
control |= (next_adjacent << 8);
/* write control and next_adjacent */ /* write control and next_adjacent */
desc->control = cpu_to_le32(control); desc->control = cpu_to_le32(control);
} }
...@@ -2717,7 +2745,6 @@ static void engine_alignments(struct xdma_engine *engine) ...@@ -2717,7 +2745,6 @@ static void engine_alignments(struct xdma_engine *engine)
dbg_init("engine %p name %s alignments=0x%08x\n", engine, engine->name, dbg_init("engine %p name %s alignments=0x%08x\n", engine, engine->name,
(int)w); (int)w);
/* RTO - add some macros to extract these fields */
align_bytes = (w & 0x00ff0000U) >> 16; align_bytes = (w & 0x00ff0000U) >> 16;
granularity_bytes = (w & 0x0000ff00U) >> 8; granularity_bytes = (w & 0x0000ff00U) >> 8;
address_bits = (w & 0x000000ffU); address_bits = (w & 0x000000ffU);
...@@ -2839,7 +2866,6 @@ struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine) ...@@ -2839,7 +2866,6 @@ struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine)
pr_err("Failed to stop engine\n"); pr_err("Failed to stop engine\n");
return NULL; return NULL;
} }
engine->running = 0;
if (transfer->cyclic) { if (transfer->cyclic) {
if (engine->xdma_perf) if (engine->xdma_perf)
...@@ -2883,9 +2909,8 @@ static int engine_writeback_setup(struct xdma_engine *engine) ...@@ -2883,9 +2909,8 @@ static int engine_writeback_setup(struct xdma_engine *engine)
} }
/* /*
* RTO - doing the allocation per engine is wasteful since a full page * better to allocate one page for the whole device during probe()
* is allocated each time - better to allocate one page for the whole * and set per-engine offsets here
* device during probe() and set per-engine offsets here
*/ */
writeback = (struct xdma_poll_wb *)engine->poll_mode_addr_virt; writeback = (struct xdma_poll_wb *)engine->poll_mode_addr_virt;
writeback->completed_desc_count = 0; writeback->completed_desc_count = 0;
...@@ -3141,7 +3166,6 @@ static int transfer_init(struct xdma_engine *engine, ...@@ -3141,7 +3166,6 @@ static int transfer_init(struct xdma_engine *engine,
unsigned int desc_max = min_t(unsigned int, unsigned int desc_max = min_t(unsigned int,
req->sw_desc_cnt - req->sw_desc_idx, req->sw_desc_cnt - req->sw_desc_idx,
XDMA_TRANSFER_MAX_DESC); XDMA_TRANSFER_MAX_DESC);
unsigned int desc_align = 0;
int i = 0; int i = 0;
int last = 0; int last = 0;
u32 control; u32 control;
...@@ -3152,7 +3176,7 @@ static int transfer_init(struct xdma_engine *engine, ...@@ -3152,7 +3176,7 @@ static int transfer_init(struct xdma_engine *engine,
/* lock the engine state */ /* lock the engine state */
spin_lock_irqsave(&engine->lock, flags); spin_lock_irqsave(&engine->lock, flags);
/* initialize wait queue */ /* initialize wait queue */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) #if HAS_SWAKE_UP
init_swait_queue_head(&xfer->wq); init_swait_queue_head(&xfer->wq);
#else #else
init_waitqueue_head(&xfer->wq); init_waitqueue_head(&xfer->wq);
...@@ -3168,7 +3192,7 @@ static int transfer_init(struct xdma_engine *engine, ...@@ -3168,7 +3192,7 @@ static int transfer_init(struct xdma_engine *engine,
(sizeof(struct xdma_result) * engine->desc_idx); (sizeof(struct xdma_result) * engine->desc_idx);
xfer->desc_index = engine->desc_idx; xfer->desc_index = engine->desc_idx;
/* TODO: Need to handle desc_used >= XDMA_TRANSFER_MAX_DESC */ /* Need to handle desc_used >= XDMA_TRANSFER_MAX_DESC */
if ((engine->desc_idx + desc_max) >= XDMA_TRANSFER_MAX_DESC) if ((engine->desc_idx + desc_max) >= XDMA_TRANSFER_MAX_DESC)
desc_max = XDMA_TRANSFER_MAX_DESC - engine->desc_idx; desc_max = XDMA_TRANSFER_MAX_DESC - engine->desc_idx;
...@@ -3179,16 +3203,7 @@ static int transfer_init(struct xdma_engine *engine, ...@@ -3179,16 +3203,7 @@ static int transfer_init(struct xdma_engine *engine,
xfer, (u64)xfer->desc_bus); xfer, (u64)xfer->desc_bus);
transfer_build(engine, req, xfer, desc_max); transfer_build(engine, req, xfer, desc_max);
/* xfer->desc_adjacent = desc_max;
* Contiguous descriptors cannot cross PAGE boundary
* The 1st descriptor may start in the middle of the page,
* calculate the 1st block of adj desc accordingly
*/
desc_align = 128 - (engine->desc_idx % 128) - 1;
if (desc_align > (desc_max - 1))
desc_align = desc_max - 1;
xfer->desc_adjacent = desc_align;
/* terminate last descriptor */ /* terminate last descriptor */
last = desc_max - 1; last = desc_max - 1;
...@@ -3204,11 +3219,13 @@ static int transfer_init(struct xdma_engine *engine, ...@@ -3204,11 +3219,13 @@ static int transfer_init(struct xdma_engine *engine,
engine->desc_used += desc_max; engine->desc_used += desc_max;
/* fill in adjacent numbers */ /* fill in adjacent numbers */
for (i = 0; i < xfer->desc_num && desc_align; i++, desc_align--) for (i = 0; i < xfer->desc_num; i++) {
xdma_desc_adjacent(xfer->desc_virt + i, desc_align); u32 next_adj = xdma_get_next_adj(xfer->desc_num - i - 1,
(xfer->desc_virt + i)->next_lo);
for (; i < xfer->desc_num; i++) dbg_desc("set next adj at index %d to %u\n", i, next_adj);
xdma_desc_adjacent(xfer->desc_virt + i, xfer->desc_num - i - 1); xdma_desc_adjacent(xfer->desc_virt + i, next_adj);
}
spin_unlock_irqrestore(&engine->lock, flags); spin_unlock_irqrestore(&engine->lock, flags);
return 0; return 0;
...@@ -3229,7 +3246,7 @@ static int transfer_init_cyclic(struct xdma_engine *engine, ...@@ -3229,7 +3246,7 @@ static int transfer_init_cyclic(struct xdma_engine *engine,
memset(xfer, 0, sizeof(*xfer)); memset(xfer, 0, sizeof(*xfer));
/* initialize wait queue */ /* initialize wait queue */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP
init_swait_queue_head(&xfer->wq); init_swait_queue_head(&xfer->wq);
#else #else
init_waitqueue_head(&xfer->wq); init_waitqueue_head(&xfer->wq);
...@@ -3266,8 +3283,12 @@ static int transfer_init_cyclic(struct xdma_engine *engine, ...@@ -3266,8 +3283,12 @@ static int transfer_init_cyclic(struct xdma_engine *engine,
dbg_sg("transfer 0x%p has %d descriptors\n", xfer, xfer->desc_num); dbg_sg("transfer 0x%p has %d descriptors\n", xfer, xfer->desc_num);
/* fill in adjacent numbers */ /* fill in adjacent numbers */
for (i = 0; i < xfer->desc_num; i++) for (i = 0; i < xfer->desc_num; i++) {
xdma_desc_adjacent(xfer->desc_virt + i, xfer->desc_num - i - 1); u32 next_adj = xdma_get_next_adj(xfer->desc_num - i - 1,
(xfer->desc_virt + i)->next_lo);
dbg_desc("set next adj at index %d to %u\n", i, next_adj);
xdma_desc_adjacent(xfer->desc_virt + i, next_adj);
}
return 0; return 0;
} }
...@@ -4004,7 +4025,7 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine) ...@@ -4004,7 +4025,7 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
transfer->cyclic = 1; transfer->cyclic = 1;
/* initialize wait queue */ /* initialize wait queue */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP
init_swait_queue_head(&transfer->wq); init_swait_queue_head(&transfer->wq);
#else #else
init_waitqueue_head(&transfer->wq); init_waitqueue_head(&transfer->wq);
...@@ -4084,7 +4105,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev) ...@@ -4084,7 +4105,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
spin_lock_init(&engine->lock); spin_lock_init(&engine->lock);
mutex_init(&engine->desc_lock); mutex_init(&engine->desc_lock);
INIT_LIST_HEAD(&engine->transfer_list); INIT_LIST_HEAD(&engine->transfer_list);
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP
init_swait_queue_head(&engine->shutdown_wq); init_swait_queue_head(&engine->shutdown_wq);
init_swait_queue_head(&engine->xdma_perf_wq); init_swait_queue_head(&engine->xdma_perf_wq);
#else #else
...@@ -4098,7 +4119,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev) ...@@ -4098,7 +4119,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
spin_lock_init(&engine->lock); spin_lock_init(&engine->lock);
mutex_init(&engine->desc_lock); mutex_init(&engine->desc_lock);
INIT_LIST_HEAD(&engine->transfer_list); INIT_LIST_HEAD(&engine->transfer_list);
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP
init_swait_queue_head(&engine->shutdown_wq); init_swait_queue_head(&engine->shutdown_wq);
init_swait_queue_head(&engine->xdma_perf_wq); init_swait_queue_head(&engine->xdma_perf_wq);
#else #else
...@@ -4404,15 +4425,15 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max, ...@@ -4404,15 +4425,15 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max,
rv = probe_engines(xdev); rv = probe_engines(xdev);
if (rv) if (rv)
goto err_engines; goto err_mask;
rv = enable_msi_msix(xdev, pdev); rv = enable_msi_msix(xdev, pdev);
if (rv < 0) if (rv < 0)
goto err_enable_msix; goto err_engines;
rv = irq_setup(xdev, pdev); rv = irq_setup(xdev, pdev);
if (rv < 0) if (rv < 0)
goto err_interrupts; goto err_msix;
if (!poll_mode) if (!poll_mode)
channel_interrupts_enable(xdev, ~0); channel_interrupts_enable(xdev, ~0);
...@@ -4427,9 +4448,7 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max, ...@@ -4427,9 +4448,7 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max,
xdma_device_flag_clear(xdev, XDEV_FLAG_OFFLINE); xdma_device_flag_clear(xdev, XDEV_FLAG_OFFLINE);
return (void *)xdev; return (void *)xdev;
err_interrupts: err_msix:
irq_teardown(xdev);
err_enable_msix:
disable_msi_msix(xdev, pdev); disable_msi_msix(xdev, pdev);
err_engines: err_engines:
remove_engines(xdev); remove_engines(xdev);
...@@ -4520,8 +4539,6 @@ void xdma_device_offline(struct pci_dev *pdev, void *dev_hndl) ...@@ -4520,8 +4539,6 @@ void xdma_device_offline(struct pci_dev *pdev, void *dev_hndl)
rv = xdma_engine_stop(engine); rv = xdma_engine_stop(engine);
if (rv < 0) if (rv < 0)
pr_err("Failed to stop engine\n"); pr_err("Failed to stop engine\n");
else
engine->running = 0;
spin_unlock_irqrestore(&engine->lock, flags); spin_unlock_irqrestore(&engine->lock, flags);
} }
} }
...@@ -4537,8 +4554,6 @@ void xdma_device_offline(struct pci_dev *pdev, void *dev_hndl) ...@@ -4537,8 +4554,6 @@ void xdma_device_offline(struct pci_dev *pdev, void *dev_hndl)
rv = xdma_engine_stop(engine); rv = xdma_engine_stop(engine);
if (rv < 0) if (rv < 0)
pr_err("Failed to stop engine\n"); pr_err("Failed to stop engine\n");
else
engine->running = 0;
spin_unlock_irqrestore(&engine->lock, flags); spin_unlock_irqrestore(&engine->lock, flags);
} }
} }
......
...@@ -31,9 +31,32 @@ ...@@ -31,9 +31,32 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
/* Add compatibility checking for RHEL versions */
#if defined(RHEL_RELEASE_CODE)
# define ACCESS_OK_2_ARGS (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0))
#else
# define ACCESS_OK_2_ARGS (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
#endif
#if defined(RHEL_RELEASE_CODE)
# define HAS_MMIOWB (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8, 0))
#else
# define HAS_MMIOWB (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 0))
#endif
#if defined(RHEL_RELEASE_CODE)
# define HAS_SWAKE_UP_ONE (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0))
# define HAS_SWAKE_UP (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0))
#else
# define HAS_SWAKE_UP_ONE (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
# define HAS_SWAKE_UP (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
#endif
#if HAS_SWAKE_UP
#include <linux/swait.h> #include <linux/swait.h>
#endif #endif
/* /*
* if the config bar is fixed, the driver does not neeed to search through * if the config bar is fixed, the driver does not neeed to search through
* all of the bars * all of the bars
...@@ -58,7 +81,8 @@ ...@@ -58,7 +81,8 @@
* .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]), * .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]),
*/ */
#define XDMA_ENG_IRQ_NUM (1) #define XDMA_ENG_IRQ_NUM (1)
#define MAX_EXTRA_ADJ (0x3F) #define XDMA_MAX_ADJ_BLOCK_SIZE 0x40
#define XDMA_PAGE_SIZE 0x1000
#define RX_STATUS_EOP (1) #define RX_STATUS_EOP (1)
/* Target internal components on XDMA control BAR */ /* Target internal components on XDMA control BAR */
...@@ -417,7 +441,7 @@ struct xdma_transfer { ...@@ -417,7 +441,7 @@ struct xdma_transfer {
int desc_num; /* number of descriptors in transfer */ int desc_num; /* number of descriptors in transfer */
int desc_index; /* index for 1st desc. in transfer */ int desc_index; /* index for 1st desc. in transfer */
enum dma_data_direction dir; enum dma_data_direction dir;
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP
struct swait_queue_head wq; struct swait_queue_head wq;
#else #else
wait_queue_head_t wq; /* wait queue for transfer completion */ wait_queue_head_t wq; /* wait queue for transfer completion */
...@@ -503,7 +527,7 @@ struct xdma_engine { ...@@ -503,7 +527,7 @@ struct xdma_engine {
dma_addr_t poll_mode_bus; /* bus addr for descriptor writeback */ dma_addr_t poll_mode_bus; /* bus addr for descriptor writeback */
/* Members associated with interrupt mode support */ /* Members associated with interrupt mode support */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP
struct swait_queue_head shutdown_wq; struct swait_queue_head shutdown_wq;
#else #else
wait_queue_head_t shutdown_wq; /* wait queue for shutdown sync */ wait_queue_head_t shutdown_wq; /* wait queue for shutdown sync */
...@@ -522,7 +546,7 @@ struct xdma_engine { ...@@ -522,7 +546,7 @@ struct xdma_engine {
/* for performance test support */ /* for performance test support */
struct xdma_performance_ioctl *xdma_perf; /* perf test control */ struct xdma_performance_ioctl *xdma_perf; /* perf test control */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP
struct swait_queue_head xdma_perf_wq; struct swait_queue_head xdma_perf_wq;
#else #else
wait_queue_head_t xdma_perf_wq; /* Perf test sync */ wait_queue_head_t xdma_perf_wq; /* Perf test sync */
......
...@@ -28,6 +28,7 @@ all : ...@@ -28,6 +28,7 @@ all :
clean: clean:
$(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) clean $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) clean
@/bin/rm -f *.o.ur-safe
install: all install: all
$(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules_install $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules_install
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* The full GNU General Public License is included in this distribution in * The full GNU General Public License is included in this distribution in
* the file called "COPYING". * the file called "COPYING".
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include "libxdma_api.h" #include "libxdma_api.h"
#include "xdma_cdev.h" #include "xdma_cdev.h"
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "xdma_cdev.h" #include "xdma_cdev.h"
#include "cdev_ctrl.h" #include "cdev_ctrl.h"
#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE #if ACCESS_OK_2_ARGS
#define xlx_access_ok(X, Y, Z) access_ok(Y, Z) #define xlx_access_ok(X, Y, Z) access_ok(Y, Z)
#else #else
#define xlx_access_ok(X, Y, Z) access_ok(X, Y, Z) #define xlx_access_ok(X, Y, Z) access_ok(X, Y, Z)
......
...@@ -611,7 +611,7 @@ static int ioctl_do_perf_start(struct xdma_engine *engine, unsigned long arg) ...@@ -611,7 +611,7 @@ static int ioctl_do_perf_start(struct xdma_engine *engine, unsigned long arg)
enable_perf(engine); enable_perf(engine);
dbg_perf("transfer_size = %d\n", engine->xdma_perf->transfer_size); dbg_perf("transfer_size = %d\n", engine->xdma_perf->transfer_size);
/* initialize wait queue */ /* initialize wait queue */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if HAS_SWAKE_UP
init_swait_queue_head(&engine->xdma_perf_wq); init_swait_queue_head(&engine->xdma_perf_wq);
#else #else
init_waitqueue_head(&engine->xdma_perf_wq); init_waitqueue_head(&engine->xdma_perf_wq);
......
...@@ -213,7 +213,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -213,7 +213,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
pr_info("copy back tdo_buf failed: %d/%u.\n", rv, total_bytes); pr_info("copy back tdo_buf failed: %d/%u.\n", rv, total_bytes);
unlock: unlock:
#if KERNEL_VERSION(5, 1, 0) >= LINUX_VERSION_CODE #if HAS_MMIOWB
mmiowb(); mmiowb();
#endif #endif
spin_unlock(&xcdev->lock); spin_unlock(&xcdev->lock);
......
...@@ -23,8 +23,7 @@ ...@@ -23,8 +23,7 @@
#include <linux/ioctl.h> #include <linux/ioctl.h>
/* /*
* !!! TODO !!! * the bar offset can be changed at compile time
* need a better way set the bar offset dynamicly
*/ */
#define XVC_BAR_OFFSET_DFLT 0x40000 /* DSA 4.0 */ #define XVC_BAR_OFFSET_DFLT 0x40000 /* DSA 4.0 */
......
...@@ -522,7 +522,6 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev) ...@@ -522,7 +522,6 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev)
} }
xpdev_flag_set(xpdev, XDF_CDEV_SG); xpdev_flag_set(xpdev, XDF_CDEV_SG);
/* ??? Bypass */
/* Initialize Bypass Character Device */ /* Initialize Bypass Character Device */
if (xdev->bypass_bar_idx > 0) { if (xdev->bypass_bar_idx > 0) {
for (i = 0; i < xpdev->h2c_channel_max; i++) { for (i = 0; i < xpdev->h2c_channel_max; i++) {
......
...@@ -108,6 +108,10 @@ static const struct pci_device_id pci_ids[] = { ...@@ -108,6 +108,10 @@ static const struct pci_device_id pci_ids[] = {
#ifdef INTERNAL_TESTING #ifdef INTERNAL_TESTING
{ PCI_DEVICE(0x1d0f, 0x1042), 0}, { PCI_DEVICE(0x1d0f, 0x1042), 0},
#endif #endif
/* aws */
{ PCI_DEVICE(0x1d0f, 0xf000), },
{ PCI_DEVICE(0x1d0f, 0xf001), },
{0,} {0,}
}; };
MODULE_DEVICE_TABLE(pci, pci_ids); MODULE_DEVICE_TABLE(pci, pci_ids);
...@@ -289,7 +293,11 @@ static void xdma_error_resume(struct pci_dev *pdev) ...@@ -289,7 +293,11 @@ static void xdma_error_resume(struct pci_dev *pdev)
struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev); struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev);
pr_info("dev 0x%p,0x%p.\n", pdev, xpdev); pr_info("dev 0x%p,0x%p.\n", pdev, xpdev);
#if KERNEL_VERSION(5, 7, 0) <= LINUX_VERSION_CODE
pci_aer_clear_nonfatal_status(pdev);
#else
pci_cleanup_aer_uncorrect_error_status(pdev); pci_cleanup_aer_uncorrect_error_status(pdev);
#endif
} }
#if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment