Unverified Commit 781c12f2 authored by Karen Xie's avatar Karen Xie Committed by GitHub

Merge pull request #50 from jascondley/REF41_SCHED_ATOMIC

Fixes a design error revealed by running on aarch64
parents 54c1de28 d1f334b1
...@@ -3514,7 +3514,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, ...@@ -3514,7 +3514,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
sg = sgt->sgl; sg = sgt->sgl;
nents = req->sw_desc_cnt; nents = req->sw_desc_cnt;
spin_lock(&engine->desc_lock); mutex_lock(&engine->desc_lock);
while (nents) { while (nents) {
unsigned long flags; unsigned long flags;
...@@ -3523,7 +3523,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, ...@@ -3523,7 +3523,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
/* build transfer */ /* build transfer */
rv = transfer_init(engine, req, &req->tfer[0]); rv = transfer_init(engine, req, &req->tfer[0]);
if (rv < 0) { if (rv < 0) {
spin_unlock(&engine->desc_lock); mutex_unlock(&engine->desc_lock);
goto unmap_sgl; goto unmap_sgl;
} }
xfer = &req->tfer[0]; xfer = &req->tfer[0];
...@@ -3547,7 +3547,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, ...@@ -3547,7 +3547,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
rv = transfer_queue(engine, xfer); rv = transfer_queue(engine, xfer);
if (rv < 0) { if (rv < 0) {
spin_unlock(&engine->desc_lock); mutex_unlock(&engine->desc_lock);
pr_info("unable to submit %s, %d.\n", engine->name, rv); pr_info("unable to submit %s, %d.\n", engine->name, rv);
goto unmap_sgl; goto unmap_sgl;
} }
...@@ -3566,7 +3566,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, ...@@ -3566,7 +3566,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
desc_count); desc_count);
rv = engine_service_poll(engine, desc_count); rv = engine_service_poll(engine, desc_count);
if (rv < 0) { if (rv < 0) {
spin_unlock(&engine->desc_lock); mutex_unlock(&engine->desc_lock);
pr_err("Failed to service polling\n"); pr_err("Failed to service polling\n");
goto unmap_sgl; goto unmap_sgl;
} }
...@@ -3649,11 +3649,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, ...@@ -3649,11 +3649,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
tfer_idx++; tfer_idx++;
if (rv < 0) { if (rv < 0) {
spin_unlock(&engine->desc_lock); mutex_unlock(&engine->desc_lock);
goto unmap_sgl; goto unmap_sgl;
} }
} /* while (sg) */ } /* while (sg) */
spin_unlock(&engine->desc_lock); mutex_unlock(&engine->desc_lock);
unmap_sgl: unmap_sgl:
if (!dma_mapped && sgt->nents) { if (!dma_mapped && sgt->nents) {
...@@ -4131,7 +4131,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev) ...@@ -4131,7 +4131,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
engine = xdev->engine_h2c; engine = xdev->engine_h2c;
for (i = 0; i < XDMA_CHANNEL_NUM_MAX; i++, engine++) { for (i = 0; i < XDMA_CHANNEL_NUM_MAX; i++, engine++) {
spin_lock_init(&engine->lock); spin_lock_init(&engine->lock);
spin_lock_init(&engine->desc_lock); mutex_init(&engine->desc_lock);
INIT_LIST_HEAD(&engine->transfer_list); INIT_LIST_HEAD(&engine->transfer_list);
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
init_swait_queue_head(&engine->shutdown_wq); init_swait_queue_head(&engine->shutdown_wq);
...@@ -4145,7 +4145,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev) ...@@ -4145,7 +4145,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
engine = xdev->engine_c2h; engine = xdev->engine_c2h;
for (i = 0; i < XDMA_CHANNEL_NUM_MAX; i++, engine++) { for (i = 0; i < XDMA_CHANNEL_NUM_MAX; i++, engine++) {
spin_lock_init(&engine->lock); spin_lock_init(&engine->lock);
spin_lock_init(&engine->desc_lock); mutex_init(&engine->desc_lock);
INIT_LIST_HEAD(&engine->transfer_list); INIT_LIST_HEAD(&engine->transfer_list);
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE #if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
init_swait_queue_head(&engine->shutdown_wq); init_swait_queue_head(&engine->shutdown_wq);
......
...@@ -511,7 +511,7 @@ struct xdma_engine { ...@@ -511,7 +511,7 @@ struct xdma_engine {
u32 irq_bitmask; /* IRQ bit mask for this engine */ u32 irq_bitmask; /* IRQ bit mask for this engine */
struct work_struct work; /* Work queue for interrupt handling */ struct work_struct work; /* Work queue for interrupt handling */
spinlock_t desc_lock; /* protects concurrent access */ struct mutex desc_lock; /* protects concurrent access */
dma_addr_t desc_bus; dma_addr_t desc_bus;
struct xdma_desc *desc; struct xdma_desc *desc;
int desc_idx; /* current descriptor index */ int desc_idx; /* current descriptor index */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment