Commit a0df2803 authored by sujathabanoth-xlnx's avatar sujathabanoth-xlnx

Merge branch 'master' of https://github.com/Xilinx/dma_ip_drivers

parents c0fcc39e e316ccf3
...@@ -8,9 +8,7 @@ Both the linux kernel driver and the DPDK driver can be run on a PCI Express roo ...@@ -8,9 +8,7 @@ Both the linux kernel driver and the DPDK driver can be run on a PCI Express roo
### Getting Started ### Getting Started
* [QDMA DPDK Reference Driver User Guide](http://htmlpreview.github.io/?https://github.com/Xilinx/dma_ip_drivers/blob/master/QDMA/DPDK/docs/git_doc/html/index.html) * [QDMA Reference Drivers Comprehensive documentation](https://xilinx.github.io/dma_ip_drivers/)
* [QDMA Linux Kernel Reference Driver User Guide](http://htmlpreview.github.io/?https://github.com/Xilinx/dma_ip_drivers/blob/master/QDMA/linux-kernel/docs/git_doc/html/index.html)
## Xilinx-VSEC (XVSEC) ## Xilinx-VSEC (XVSEC)
......
Release: 2019.2
===============
Change list:
- Updated data rate for performance run. Now it will print data rate based on the size.
- remove BUG_ON, return proper error code instead
- Streaming mode: enable credit mechanism by default
- Streaming mode: Do not read more than user supplied buffer size on C2H
- Streaming mode: Added support for Async-IO for both streaming and MM transfers
- fixed performance appliaction crash
...@@ -132,6 +132,14 @@ int xdma_user_isr_disable(void *dev_hndl, unsigned int mask); ...@@ -132,6 +132,14 @@ int xdma_user_isr_disable(void *dev_hndl, unsigned int mask);
*/ */
ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr,
struct sg_table *sgt, bool dma_mapped, int timeout_ms); struct sg_table *sgt, bool dma_mapped, int timeout_ms);
ssize_t xdma_xfer_submit_nowait(void *cb_hndl, void *dev_hndl, int channel, bool write, u64 ep_addr,
struct sg_table *sgt, bool dma_mapped, int timeout_ms);
ssize_t xdma_xfer_completion(void *cb_hndl, void *dev_hndl, int channel, bool write, u64 ep_addr,
struct sg_table *sgt, bool dma_mapped, int timeout_ms);
/////////////////////missing API//////////////////// /////////////////////missing API////////////////////
......
...@@ -13,7 +13,7 @@ ifneq ($(KERNELRELEASE),) ...@@ -13,7 +13,7 @@ ifneq ($(KERNELRELEASE),)
else else
BUILDSYSTEM_DIR:=/lib/modules/$(shell uname -r)/build BUILDSYSTEM_DIR:=/lib/modules/$(shell uname -r)/build
PWD:=$(shell pwd) PWD:=$(shell pwd)
all : all :
$(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules
clean: clean:
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -31,11 +31,11 @@ ...@@ -31,11 +31,11 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) #if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#include <linux/swait.h> #include <linux/swait.h>
#endif #endif
/* /*
* if the config bar is fixed, the driver does not neeed to search through * if the config bar is fixed, the driver does not neeed to search through
* all of the bars * all of the bars
*/ */
//#define XDMA_CONFIG_BAR_NUM 1 //#define XDMA_CONFIG_BAR_NUM 1
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
#define XDMA_TRANSFER_MAX_DESC (2048) #define XDMA_TRANSFER_MAX_DESC (2048)
/* maximum size of a single DMA transfer descriptor */ /* maximum size of a single DMA transfer descriptor */
#define XDMA_DESC_BLEN_BITS 28 #define XDMA_DESC_BLEN_BITS 28
#define XDMA_DESC_BLEN_MAX ((1 << (XDMA_DESC_BLEN_BITS)) - 1) #define XDMA_DESC_BLEN_MAX ((1 << (XDMA_DESC_BLEN_BITS)) - 1)
/* bits of the SG DMA control register */ /* bits of the SG DMA control register */
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
#define XDMA_CTRL_IE_DESC_ERROR (0x1FUL << 19) #define XDMA_CTRL_IE_DESC_ERROR (0x1FUL << 19)
#define XDMA_CTRL_NON_INCR_ADDR (1UL << 25) #define XDMA_CTRL_NON_INCR_ADDR (1UL << 25)
#define XDMA_CTRL_POLL_MODE_WB (1UL << 26) #define XDMA_CTRL_POLL_MODE_WB (1UL << 26)
#define XDMA_CTRL_STM_MODE_WB (1UL << 27)
/* bits of the SG DMA status register */ /* bits of the SG DMA status register */
#define XDMA_STAT_BUSY (1UL << 0) #define XDMA_STAT_BUSY (1UL << 0)
...@@ -138,7 +139,7 @@ ...@@ -138,7 +139,7 @@
/* all combined */ /* all combined */
#define XDMA_STAT_H2C_ERR_MASK \ #define XDMA_STAT_H2C_ERR_MASK \
(XDMA_STAT_COMMON_ERR_MASK | XDMA_STAT_DESC_ERR_MASK | \ (XDMA_STAT_COMMON_ERR_MASK | XDMA_STAT_DESC_ERR_MASK | \
XDMA_STAT_H2C_R_ERR_MASK | XDMA_STAT_H2C_W_ERR_MASK) XDMA_STAT_H2C_R_ERR_MASK | XDMA_STAT_H2C_W_ERR_MASK)
#define XDMA_STAT_C2H_ERR_MASK \ #define XDMA_STAT_C2H_ERR_MASK \
(XDMA_STAT_COMMON_ERR_MASK | XDMA_STAT_DESC_ERR_MASK | \ (XDMA_STAT_COMMON_ERR_MASK | XDMA_STAT_DESC_ERR_MASK | \
...@@ -161,7 +162,7 @@ ...@@ -161,7 +162,7 @@
#define XDMA_ID_C2H 0x1fc1U #define XDMA_ID_C2H 0x1fc1U
/* for C2H AXI-ST mode */ /* for C2H AXI-ST mode */
#define CYCLIC_RX_PAGES_MAX 256 #define CYCLIC_RX_PAGES_MAX 256
#define LS_BYTE_MASK 0x000000FFUL #define LS_BYTE_MASK 0x000000FFUL
...@@ -247,6 +248,23 @@ enum dev_capabilities { ...@@ -247,6 +248,23 @@ enum dev_capabilities {
/* SECTION: Structure definitions */ /* SECTION: Structure definitions */
struct xdma_io_cb {
void __user *buf;
size_t len;
void *private;
unsigned int pages_nr;
struct sg_table sgt;
struct page **pages;
/** total data size */
unsigned int count;
/** MM only, DDR/BRAM memory addr */
u64 ep_addr;
/** write: if write to the device */
struct xdma_request_cb *req;
u8 write:1;
void (*io_done)(unsigned long cb_hndl, int err);
};
struct config_regs { struct config_regs {
u32 identifier; u32 identifier;
u32 reserved_1[4]; u32 reserved_1[4];
...@@ -392,11 +410,14 @@ struct sw_desc { ...@@ -392,11 +410,14 @@ struct sw_desc {
struct xdma_transfer { struct xdma_transfer {
struct list_head entry; /* queue of non-completed transfers */ struct list_head entry; /* queue of non-completed transfers */
struct xdma_desc *desc_virt; /* virt addr of the 1st descriptor */ struct xdma_desc *desc_virt; /* virt addr of the 1st descriptor */
struct xdma_result *res_virt; /* virt addr of result for c2h streaming */
dma_addr_t res_bus; /* bus addr for result descriptors */
dma_addr_t desc_bus; /* bus addr of the first descriptor */ dma_addr_t desc_bus; /* bus addr of the first descriptor */
int desc_adjacent; /* adjacent descriptors at desc_bus */ int desc_adjacent; /* adjacent descriptors at desc_bus */
int desc_num; /* number of descriptors in transfer */ int desc_num; /* number of descriptors in transfer */
int desc_index; /* index for first descriptor in transfer */
enum dma_data_direction dir; enum dma_data_direction dir;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) #if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
struct swait_queue_head wq; struct swait_queue_head wq;
#else #else
wait_queue_head_t wq; /* wait queue for transfer completion */ wait_queue_head_t wq; /* wait queue for transfer completion */
...@@ -409,6 +430,7 @@ struct xdma_transfer { ...@@ -409,6 +430,7 @@ struct xdma_transfer {
int last_in_request; /* flag if last within request */ int last_in_request; /* flag if last within request */
unsigned int len; unsigned int len;
struct sg_table *sgt; struct sg_table *sgt;
struct xdma_io_cb *cb;
}; };
struct xdma_request_cb { struct xdma_request_cb {
...@@ -416,7 +438,8 @@ struct xdma_request_cb { ...@@ -416,7 +438,8 @@ struct xdma_request_cb {
unsigned int total_len; unsigned int total_len;
u64 ep_addr; u64 ep_addr;
struct xdma_transfer xfer; struct xdma_transfer tfer[2]; /* Use two transfers in case single request needs to be split */
struct xdma_io_cb *cb;
unsigned int sw_desc_idx; unsigned int sw_desc_idx;
unsigned int sw_desc_cnt; unsigned int sw_desc_cnt;
...@@ -450,7 +473,8 @@ struct xdma_engine { ...@@ -450,7 +473,8 @@ struct xdma_engine {
int max_extra_adj; /* descriptor prefetch capability */ int max_extra_adj; /* descriptor prefetch capability */
int desc_dequeued; /* num descriptors of completed transfers */ int desc_dequeued; /* num descriptors of completed transfers */
u32 status; /* last known status of device */ u32 status; /* last known status of device */
u32 interrupt_enable_mask_value;/* only used for MSIX mode to store per-engine interrupt mask value */ /* only used for MSIX mode to store per-engine interrupt mask value */
u32 interrupt_enable_mask_value;
/* Transfer list management */ /* Transfer list management */
struct list_head transfer_list; /* queue of transfers */ struct list_head transfer_list; /* queue of transfers */
...@@ -458,8 +482,10 @@ struct xdma_engine { ...@@ -458,8 +482,10 @@ struct xdma_engine {
/* Members applicable to AXI-ST C2H (cyclic) transfers */ /* Members applicable to AXI-ST C2H (cyclic) transfers */
struct xdma_result *cyclic_result; struct xdma_result *cyclic_result;
dma_addr_t cyclic_result_bus; /* bus addr for transfer */ dma_addr_t cyclic_result_bus; /* bus addr for transfer */
struct xdma_request_cb *cyclic_req; struct xdma_request_cb *cyclic_req;
struct sg_table cyclic_sgt; struct sg_table cyclic_sgt;
u8 *perf_buf_virt;
dma_addr_t perf_buf_bus; /* bus address */
u8 eop_found; /* used only for cyclic(rx:c2h) */ u8 eop_found; /* used only for cyclic(rx:c2h) */
int eop_count; int eop_count;
int rx_tail; /* follows the HW */ int rx_tail; /* follows the HW */
...@@ -474,7 +500,7 @@ struct xdma_engine { ...@@ -474,7 +500,7 @@ struct xdma_engine {
dma_addr_t poll_mode_bus; /* bus addr for descriptor writeback */ dma_addr_t poll_mode_bus; /* bus addr for descriptor writeback */
/* Members associated with interrupt mode support */ /* Members associated with interrupt mode support */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) #if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
struct swait_queue_head shutdown_wq; struct swait_queue_head shutdown_wq;
#else #else
wait_queue_head_t shutdown_wq; /* wait queue for shutdown sync */ wait_queue_head_t shutdown_wq; /* wait queue for shutdown sync */
...@@ -488,14 +514,23 @@ struct xdma_engine { ...@@ -488,14 +514,23 @@ struct xdma_engine {
spinlock_t desc_lock; /* protects concurrent access */ spinlock_t desc_lock; /* protects concurrent access */
dma_addr_t desc_bus; dma_addr_t desc_bus;
struct xdma_desc *desc; struct xdma_desc *desc;
int desc_idx; /* current descriptor index */
int desc_used; /* total descriptors used */
/* for performance test support */ /* for performance test support */
struct xdma_performance_ioctl *xdma_perf; /* perf test control */ struct xdma_performance_ioctl *xdma_perf; /* perf test control */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) #if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
struct swait_queue_head xdma_perf_wq; struct swait_queue_head xdma_perf_wq;
#else #else
wait_queue_head_t xdma_perf_wq; /* Perf test sync */ wait_queue_head_t xdma_perf_wq; /* Perf test sync */
#endif #endif
struct xdma_kthread *cmplthp;
/* completion status thread list for the queue */
struct list_head cmplthp_list;
/* pending work thread list */
/* cpu attached to intr_work */
unsigned int intr_work_cpu;
}; };
struct xdma_user_irq { struct xdma_user_irq {
...@@ -506,14 +541,14 @@ struct xdma_user_irq { ...@@ -506,14 +541,14 @@ struct xdma_user_irq {
wait_queue_head_t events_wq; /* wait queue to sync waiting threads */ wait_queue_head_t events_wq; /* wait queue to sync waiting threads */
irq_handler_t handler; irq_handler_t handler;
void *dev; void *dev;
}; };
/* XDMA PCIe device specific book-keeping */ /* XDMA PCIe device specific book-keeping */
#define XDEV_FLAG_OFFLINE 0x1 #define XDEV_FLAG_OFFLINE 0x1
struct xdma_dev { struct xdma_dev {
struct list_head list_head; struct list_head list_head;
struct list_head rcu_node; struct list_head rcu_node;
unsigned long magic; /* structure ID for sanity checks */ unsigned long magic; /* structure ID for sanity checks */
struct pci_dev *pdev; /* pci device struct from probe() */ struct pci_dev *pdev; /* pci device struct from probe() */
...@@ -525,7 +560,7 @@ struct xdma_dev { ...@@ -525,7 +560,7 @@ struct xdma_dev {
unsigned int flags; unsigned int flags;
/* PCIe BAR management */ /* PCIe BAR management */
void *__iomem bar[XDMA_BAR_NUM]; /* addresses for mapped BARs */ void __iomem *bar[XDMA_BAR_NUM]; /* addresses for mapped BARs */
int user_bar_idx; /* BAR index of user logic */ int user_bar_idx; /* BAR index of user logic */
int config_bar_idx; /* BAR index of XDMA config logic */ int config_bar_idx; /* BAR index of XDMA config logic */
int bypass_bar_idx; /* BAR index of XDMA bypass logic */ int bypass_bar_idx; /* BAR index of XDMA bypass logic */
...@@ -541,7 +576,7 @@ struct xdma_dev { ...@@ -541,7 +576,7 @@ struct xdma_dev {
int irq_line; /* flag if irq allocated successfully */ int irq_line; /* flag if irq allocated successfully */
int msi_enabled; /* flag if msi was enabled for the device */ int msi_enabled; /* flag if msi was enabled for the device */
int msix_enabled; /* flag if msi-x was enabled for the device */ int msix_enabled; /* flag if msi-x was enabled for the device */
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0) #if KERNEL_VERSION(4, 12, 0) > LINUX_VERSION_CODE
struct msix_entry entry[32]; /* msi-x vector/entry table */ struct msix_entry entry[32]; /* msi-x vector/entry table */
#endif #endif
struct xdma_user_irq user_irq[16]; /* user IRQ management */ struct xdma_user_irq user_irq[16]; /* user IRQ management */
...@@ -621,8 +656,8 @@ void get_perf_stats(struct xdma_engine *engine); ...@@ -621,8 +656,8 @@ void get_perf_stats(struct xdma_engine *engine);
int xdma_cyclic_transfer_setup(struct xdma_engine *engine); int xdma_cyclic_transfer_setup(struct xdma_engine *engine);
int xdma_cyclic_transfer_teardown(struct xdma_engine *engine); int xdma_cyclic_transfer_teardown(struct xdma_engine *engine);
ssize_t xdma_engine_read_cyclic(struct xdma_engine *, char __user *, size_t, ssize_t xdma_engine_read_cyclic(struct xdma_engine *engine,
int); char __user *buf, size_t count, int timeout_ms);
int engine_addrmode_set(struct xdma_engine *engine, unsigned long arg); int engine_addrmode_set(struct xdma_engine *engine, unsigned long arg);
int engine_service_poll(struct xdma_engine *engine, u32 expected_desc_count);
#endif /* XDMA_LIB_H */ #endif /* XDMA_LIB_H */
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
#ifndef __XDMA_VERSION_H__ #ifndef __XDMA_VERSION_H__
#define __XDMA_VERSION_H__ #define __XDMA_VERSION_H__
#define DRV_MOD_MAJOR 2018 #define DRV_MOD_MAJOR 2019
#define DRV_MOD_MINOR 3 #define DRV_MOD_MINOR 2
#define DRV_MOD_PATCHLEVEL 41 #define DRV_MOD_PATCHLEVEL 42
#define DRV_MODULE_VERSION \ #define DRV_MODULE_VERSION \
__stringify(DRV_MOD_MAJOR) "." \ __stringify(DRV_MOD_MAJOR) "." \
......
...@@ -18,12 +18,12 @@ EXTRA_CFLAGS := -I$(topdir)/include $(XVC_FLAGS) ...@@ -18,12 +18,12 @@ EXTRA_CFLAGS := -I$(topdir)/include $(XVC_FLAGS)
#EXTRA_CFLAGS += -DINTERNAL_TESTING #EXTRA_CFLAGS += -DINTERNAL_TESTING
ifneq ($(KERNELRELEASE),) ifneq ($(KERNELRELEASE),)
$(TARGET_MODULE)-objs := libxdma.o xdma_cdev.o cdev_ctrl.o cdev_events.o cdev_sgdma.o cdev_xvc.o cdev_bypass.o xdma_mod.o $(TARGET_MODULE)-objs := libxdma.o xdma_cdev.o cdev_ctrl.o cdev_events.o cdev_sgdma.o cdev_xvc.o cdev_bypass.o xdma_mod.o xdma_thread.o
obj-m := $(TARGET_MODULE).o obj-m := $(TARGET_MODULE).o
else else
BUILDSYSTEM_DIR:=/lib/modules/$(shell uname -r)/build BUILDSYSTEM_DIR:=/lib/modules/$(shell uname -r)/build
PWD:=$(shell pwd) PWD:=$(shell pwd)
all : all :
$(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules
clean: clean:
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include "libxdma_api.h" #include "libxdma_api.h"
#include "xdma_cdev.h" #include "xdma_cdev.h"
#define write_register(v,mem,off) iowrite32(v, mem) #define write_register(v, mem, off) iowrite32(v, mem)
static int copy_desc_data(struct xdma_transfer *transfer, char __user *buf, static int copy_desc_data(struct xdma_transfer *transfer, char __user *buf,
size_t *buf_offset, size_t buf_size) size_t *buf_offset, size_t buf_size)
...@@ -29,8 +29,15 @@ static int copy_desc_data(struct xdma_transfer *transfer, char __user *buf, ...@@ -29,8 +29,15 @@ static int copy_desc_data(struct xdma_transfer *transfer, char __user *buf,
int copy_err; int copy_err;
int rc = 0; int rc = 0;
BUG_ON(!buf); if (!buf) {
BUG_ON(!buf_offset); pr_err("Invalid user buffer\n");
return -EINVAL;
}
if (!buf_offset) {
pr_err("Invalid user buffer offset\n");
return -EINVAL;
}
/* Fill user buffer with descriptor data */ /* Fill user buffer with descriptor data */
for (i = 0; i < transfer->desc_num; i++) { for (i = 0; i < transfer->desc_num; i++) {
...@@ -71,7 +78,7 @@ static ssize_t char_bypass_read(struct file *file, char __user *buf, ...@@ -71,7 +78,7 @@ static ssize_t char_bypass_read(struct file *file, char __user *buf,
xdev = xcdev->xdev; xdev = xcdev->xdev;
engine = xcdev->engine; engine = xcdev->engine;
dbg_sg("In char_bypass_read()\n"); dbg_sg("In %s()\n", __func__);
if (count & 3) { if (count & 3) {
dbg_sg("Buffer size must be a multiple of 4 bytes\n"); dbg_sg("Buffer size must be a multiple of 4 bytes\n");
...@@ -114,7 +121,7 @@ static ssize_t char_bypass_write(struct file *file, const char __user *buf, ...@@ -114,7 +121,7 @@ static ssize_t char_bypass_write(struct file *file, const char __user *buf,
struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data;
u32 desc_data; u32 desc_data;
u32 *bypass_addr; void __iomem *bypass_addr;
size_t buf_offset = 0; size_t buf_offset = 0;
int rc = 0; int rc = 0;
int copy_err; int copy_err;
...@@ -140,18 +147,21 @@ static ssize_t char_bypass_write(struct file *file, const char __user *buf, ...@@ -140,18 +147,21 @@ static ssize_t char_bypass_write(struct file *file, const char __user *buf,
return -ENODEV; return -ENODEV;
} }
dbg_sg("In char_bypass_write()\n"); dbg_sg("In %s()\n", __func__);
spin_lock(&engine->lock); spin_lock(&engine->lock);
/* Write descriptor data to the bypass BAR */ /* Write descriptor data to the bypass BAR */
bypass_addr = (u32 *)xdev->bar[xdev->bypass_bar_idx]; bypass_addr = xdev->bar[xdev->bypass_bar_idx];
bypass_addr += engine->bypass_offset; bypass_addr = (void __iomem *)(
(u32 __iomem *)bypass_addr + engine->bypass_offset
);
while (buf_offset < count) { while (buf_offset < count) {
copy_err = copy_from_user(&desc_data, &buf[buf_offset], copy_err = copy_from_user(&desc_data, &buf[buf_offset],
sizeof(u32)); sizeof(u32));
if (!copy_err) { if (!copy_err) {
write_register(desc_data, bypass_addr, bypass_addr - engine->bypass_offset); write_register(desc_data, bypass_addr,
bypass_addr - engine->bypass_offset);
buf_offset += sizeof(u32); buf_offset += sizeof(u32);
rc = buf_offset; rc = buf_offset;
} else { } else {
...@@ -183,5 +193,5 @@ static const struct file_operations bypass_fops = { ...@@ -183,5 +193,5 @@ static const struct file_operations bypass_fops = {
void cdev_bypass_init(struct xdma_cdev *xcdev) void cdev_bypass_init(struct xdma_cdev *xcdev)
{ {
cdev_init(&xcdev->cdev, &bypass_fops); cdev_init(&xcdev->cdev, &bypass_fops);
} }
...@@ -32,13 +32,13 @@ static ssize_t char_ctrl_read(struct file *fp, char __user *buf, size_t count, ...@@ -32,13 +32,13 @@ static ssize_t char_ctrl_read(struct file *fp, char __user *buf, size_t count,
{ {
struct xdma_cdev *xcdev = (struct xdma_cdev *)fp->private_data; struct xdma_cdev *xcdev = (struct xdma_cdev *)fp->private_data;
struct xdma_dev *xdev; struct xdma_dev *xdev;
void *reg; void __iomem *reg;
u32 w; u32 w;
int rv; int rv;
rv = xcdev_check(__func__, xcdev, 0); rv = xcdev_check(__func__, xcdev, 0);
if (rv < 0) if (rv < 0)
return rv; return rv;
xdev = xcdev->xdev; xdev = xcdev->xdev;
/* only 32-bit aligned and 32-bit multiples */ /* only 32-bit aligned and 32-bit multiples */
...@@ -48,8 +48,8 @@ static ssize_t char_ctrl_read(struct file *fp, char __user *buf, size_t count, ...@@ -48,8 +48,8 @@ static ssize_t char_ctrl_read(struct file *fp, char __user *buf, size_t count,
reg = xdev->bar[xcdev->bar] + *pos; reg = xdev->bar[xcdev->bar] + *pos;
//w = read_register(reg); //w = read_register(reg);
w = ioread32(reg); w = ioread32(reg);
dbg_sg("char_ctrl_read(@%p, count=%ld, pos=%d) value = 0x%08x\n", reg, dbg_sg("%s(@%p, count=%ld, pos=%d) value = 0x%08x\n",
(long)count, (int)*pos, w); __func__, reg, (long)count, (int)*pos, w);
rv = copy_to_user(buf, &w, 4); rv = copy_to_user(buf, &w, 4);
if (rv) if (rv)
dbg_sg("Copy to userspace failed but continuing\n"); dbg_sg("Copy to userspace failed but continuing\n");
...@@ -63,13 +63,13 @@ static ssize_t char_ctrl_write(struct file *file, const char __user *buf, ...@@ -63,13 +63,13 @@ static ssize_t char_ctrl_write(struct file *file, const char __user *buf,
{ {
struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data;
struct xdma_dev *xdev; struct xdma_dev *xdev;
void *reg; void __iomem *reg;
u32 w; u32 w;
int rv; int rv;
rv = xcdev_check(__func__, xcdev, 0); rv = xcdev_check(__func__, xcdev, 0);
if (rv < 0) if (rv < 0)
return rv; return rv;
xdev = xcdev->xdev; xdev = xcdev->xdev;
/* only 32-bit aligned and 32-bit multiples */ /* only 32-bit aligned and 32-bit multiples */
...@@ -79,12 +79,11 @@ static ssize_t char_ctrl_write(struct file *file, const char __user *buf, ...@@ -79,12 +79,11 @@ static ssize_t char_ctrl_write(struct file *file, const char __user *buf,
/* first address is BAR base plus file position offset */ /* first address is BAR base plus file position offset */
reg = xdev->bar[xcdev->bar] + *pos; reg = xdev->bar[xcdev->bar] + *pos;
rv = copy_from_user(&w, buf, 4); rv = copy_from_user(&w, buf, 4);
if (rv) { if (rv)
pr_info("copy from user failed %d/4, but continuing.\n", rv); pr_info("copy from user failed %d/4, but continuing.\n", rv);
}
dbg_sg("char_ctrl_write(0x%08x @%p, count=%ld, pos=%d)\n", w, reg, dbg_sg("%s(0x%08x @%p, count=%ld, pos=%d)\n",
(long)count, (int)*pos); __func__, w, reg, (long)count, (int)*pos);
//write_register(w, reg); //write_register(w, reg);
iowrite32(w, reg); iowrite32(w, reg);
*pos += 4; *pos += 4;
...@@ -129,7 +128,7 @@ long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -129,7 +128,7 @@ long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
rv = xcdev_check(__func__, xcdev, 0); rv = xcdev_check(__func__, xcdev, 0);
if (rv < 0) if (rv < 0)
return rv; return rv;
xdev = xcdev->xdev; xdev = xcdev->xdev;
if (!xdev) { if (!xdev) {
...@@ -158,7 +157,7 @@ long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -158,7 +157,7 @@ long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) { switch (cmd) {
case XDMA_IOCINFO: case XDMA_IOCINFO:
if (copy_from_user((void *)&ioctl_obj, (void *) arg, if (copy_from_user((void *)&ioctl_obj, (void __user *) arg,
sizeof(struct xdma_ioc_base))) { sizeof(struct xdma_ioc_base))) {
pr_err("copy_from_user failed.\n"); pr_err("copy_from_user failed.\n");
return -EFAULT; return -EFAULT;
...@@ -196,7 +195,7 @@ int bridge_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -196,7 +195,7 @@ int bridge_mmap(struct file *file, struct vm_area_struct *vma)
rv = xcdev_check(__func__, xcdev, 0); rv = xcdev_check(__func__, xcdev, 0);
if (rv < 0) if (rv < 0)
return rv; return rv;
xdev = xcdev->xdev; xdev = xcdev->xdev;
off = vma->vm_pgoff << PAGE_SHIFT; off = vma->vm_pgoff << PAGE_SHIFT;
......
...@@ -60,14 +60,14 @@ struct xdma_ioc_base { ...@@ -60,14 +60,14 @@ struct xdma_ioc_base {
}; };
struct xdma_ioc_info { struct xdma_ioc_info {
struct xdma_ioc_base base; struct xdma_ioc_base base;
unsigned short vendor; unsigned short vendor;
unsigned short device; unsigned short device;
unsigned short subsystem_vendor; unsigned short subsystem_vendor;
unsigned short subsystem_device; unsigned short subsystem_device;
unsigned int dma_engine_version; unsigned int dma_engine_version;
unsigned int driver_version; unsigned int driver_version;
unsigned long long feature_id; unsigned long long feature_id;
unsigned short domain; unsigned short domain;
unsigned char bus; unsigned char bus;
unsigned char dev; unsigned char dev;
......
...@@ -35,7 +35,7 @@ static ssize_t char_events_read(struct file *file, char __user *buf, ...@@ -35,7 +35,7 @@ static ssize_t char_events_read(struct file *file, char __user *buf,
rv = xcdev_check(__func__, xcdev, 0); rv = xcdev_check(__func__, xcdev, 0);
if (rv < 0) if (rv < 0)
return rv; return rv;
user_irq = xcdev->user_irq; user_irq = xcdev->user_irq;
if (!user_irq) { if (!user_irq) {
pr_info("xcdev 0x%p, user_irq NULL.\n", xcdev); pr_info("xcdev 0x%p, user_irq NULL.\n", xcdev);
...@@ -84,7 +84,7 @@ static unsigned int char_events_poll(struct file *file, poll_table *wait) ...@@ -84,7 +84,7 @@ static unsigned int char_events_poll(struct file *file, poll_table *wait)
rv = xcdev_check(__func__, xcdev, 0); rv = xcdev_check(__func__, xcdev, 0);
if (rv < 0) if (rv < 0)
return rv; return rv;
user_irq = xcdev->user_irq; user_irq = xcdev->user_irq;
if (!user_irq) { if (!user_irq) {
pr_info("xcdev 0x%p, user_irq NULL.\n", xcdev); pr_info("xcdev 0x%p, user_irq NULL.\n", xcdev);
......
...@@ -19,16 +19,116 @@ ...@@ -19,16 +19,116 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/types.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <linux/slab.h>
#include <linux/aio.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/kthread.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
#include <linux/uio.h>
#endif
#include "libxdma_api.h" #include "libxdma_api.h"
#include "xdma_cdev.h" #include "xdma_cdev.h"
#include "cdev_sgdma.h" #include "cdev_sgdma.h"
#include "xdma_thread.h"
/* Module Parameters */ /* Module Parameters */
unsigned int sgdma_timeout = 10; unsigned int sgdma_timeout = 10;
module_param(sgdma_timeout, uint, 0644); module_param(sgdma_timeout, uint, 0644);
MODULE_PARM_DESC(sgdma_timeout, "timeout in seconds for sgdma, default is 10 sec."); MODULE_PARM_DESC(sgdma_timeout, "timeout in seconds for sgdma, default is 10 sec.");
extern struct kmem_cache *cdev_cache;
static void char_sgdma_unmap_user_buf(struct xdma_io_cb *cb, bool write);
static void async_io_handler(unsigned long cb_hndl, int err)
{
struct xdma_cdev *xcdev;
struct xdma_engine *engine;
struct xdma_dev *xdev;
struct xdma_io_cb *cb = (struct xdma_io_cb *)cb_hndl;
struct cdev_async_io *caio = (struct cdev_async_io *)cb->private;
ssize_t numbytes = 0;
ssize_t res, res2;
int lock_stat;
int rv;
if (NULL == caio) {
pr_err("Invalid work struct\n");
return;
}
xcdev = (struct xdma_cdev *)caio->iocb->ki_filp->private_data;
rv = xcdev_check(__func__, xcdev, 1);
if (rv < 0)
return;
/* Safeguarding for cancel requests */
lock_stat = spin_trylock(&caio->lock);
if (!lock_stat) {
pr_err("caio lock not acquired\n");
goto skip_dev_lock;
}
if (false != caio->cancel) {
pr_err("skipping aio\n");
goto skip_tran;
}
engine = xcdev->engine;
xdev = xcdev->xdev;
if (!err)
numbytes = xdma_xfer_completion((void *)cb, xdev, engine->channel, cb->write, cb->ep_addr, &cb->sgt,
0, sgdma_timeout * 1000);
char_sgdma_unmap_user_buf(cb, cb->write);
caio->res2 |= (err < 0) ? err : 0;
if (caio->res2)
caio->err_cnt++;
caio->cmpl_cnt++;
caio->res += numbytes;
if (caio->cmpl_cnt == caio->req_cnt)
{
res = caio->res;
res2 = caio->res2;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
caio->iocb->ki_complete(caio->iocb, res, res2);
#else
aio_complete(caio->iocb, res, res2);
#endif
skip_tran:
spin_unlock(&caio->lock);
kmem_cache_free(cdev_cache, caio);
kfree(cb);
return;
}
else
{
spin_unlock(&caio->lock);
return;
}
skip_dev_lock:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
caio->iocb->ki_complete(caio->iocb, numbytes, -EBUSY);
#else
aio_complete(caio->iocb, numbytes, -EBUSY);
#endif
kmem_cache_free(cdev_cache, caio);
}
/* /*
* character device file operations for SG DMA engine * character device file operations for SG DMA engine
*/ */
...@@ -52,10 +152,10 @@ static loff_t char_sgdma_llseek(struct file *file, loff_t off, int whence) ...@@ -52,10 +152,10 @@ static loff_t char_sgdma_llseek(struct file *file, loff_t off, int whence)
if (newpos < 0) if (newpos < 0)
return -EINVAL; return -EINVAL;
file->f_pos = newpos; file->f_pos = newpos;
dbg_fops("char_sgdma_llseek: pos=%lld\n", (signed long long)newpos); dbg_fops("%s: pos=%lld\n", __func__, (signed long long)newpos);
#if 0 #if 0
pr_err("0x%p, off 0x%lld, whence %d -> pos %lld.\n", pr_err("0x%p, off %lld, whence %d -> pos %lld.\n",
file, (signed long long)off, whence, (signed long long)off); file, (signed long long)off, whence, (signed long long)off);
#endif #endif
...@@ -80,7 +180,10 @@ static loff_t char_sgdma_llseek(struct file *file, loff_t off, int whence) ...@@ -80,7 +180,10 @@ static loff_t char_sgdma_llseek(struct file *file, loff_t off, int whence)
static int check_transfer_align(struct xdma_engine *engine, static int check_transfer_align(struct xdma_engine *engine,
const char __user *buf, size_t count, loff_t pos, int sync) const char __user *buf, size_t count, loff_t pos, int sync)
{ {
BUG_ON(!engine); if (!engine) {
pr_err("Invalid DMA engine\n");
return -EINVAL;
}
/* AXI ST or AXI MM non-incremental addressing mode? */ /* AXI ST or AXI MM non-incremental addressing mode? */
if (engine->non_incr_addr) { if (engine->non_incr_addr) {
...@@ -171,17 +274,16 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write) ...@@ -171,17 +274,16 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write)
{ {
struct sg_table *sgt = &cb->sgt; struct sg_table *sgt = &cb->sgt;
unsigned long len = cb->len; unsigned long len = cb->len;
char *buf = cb->buf; void __user *buf = cb->buf;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int pages_nr = (((unsigned long)buf + len + PAGE_SIZE -1) - unsigned int pages_nr = (((unsigned long)buf + len + PAGE_SIZE - 1) -
((unsigned long)buf & PAGE_MASK)) ((unsigned long)buf & PAGE_MASK))
>> PAGE_SHIFT; >> PAGE_SHIFT;
int i; int i;
int rv; int rv;
if (pages_nr == 0) { if (pages_nr == 0)
return -EINVAL; return -EINVAL;
}
if (sg_alloc_table(sgt, pages_nr, GFP_KERNEL)) { if (sg_alloc_table(sgt, pages_nr, GFP_KERNEL)) {
pr_err("sgl OOM.\n"); pr_err("sgl OOM.\n");
...@@ -225,7 +327,8 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write) ...@@ -225,7 +327,8 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write)
sg = sgt->sgl; sg = sgt->sgl;
for (i = 0; i < pages_nr; i++, sg = sg_next(sg)) { for (i = 0; i < pages_nr; i++, sg = sg_next(sg)) {
unsigned int offset = offset_in_page(buf); unsigned int offset = offset_in_page(buf);
unsigned int nbytes = min_t(unsigned int, PAGE_SIZE - offset, len); unsigned int nbytes =
min_t(unsigned int, PAGE_SIZE - offset, len);
flush_dcache_page(cb->pages[i]); flush_dcache_page(cb->pages[i]);
sg_set_page(sg, cb->pages[i], nbytes, offset); sg_set_page(sg, cb->pages[i], nbytes, offset);
...@@ -234,7 +337,10 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write) ...@@ -234,7 +337,10 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write)
len -= nbytes; len -= nbytes;
} }
BUG_ON(len); if (len) {
pr_err("Invalid user buffer length. Cannot map to sgl\n");
return -EINVAL;
}
cb->pages_nr = pages_nr; cb->pages_nr = pages_nr;
return 0; return 0;
...@@ -244,7 +350,7 @@ err_out: ...@@ -244,7 +350,7 @@ err_out:
return rv; return rv;
} }
static ssize_t char_sgdma_read_write(struct file *file, char __user *buf, static ssize_t char_sgdma_read_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos, bool write) size_t count, loff_t *pos, bool write)
{ {
int rv; int rv;
...@@ -278,14 +384,16 @@ static ssize_t char_sgdma_read_write(struct file *file, char __user *buf, ...@@ -278,14 +384,16 @@ static ssize_t char_sgdma_read_write(struct file *file, char __user *buf,
} }
memset(&cb, 0, sizeof(struct xdma_io_cb)); memset(&cb, 0, sizeof(struct xdma_io_cb));
cb.buf = buf; cb.buf = (char __user *)buf;
cb.len = count; cb.len = count;
cb.ep_addr = (u64)*pos;
cb.write = write;
rv = char_sgdma_map_user_buf_to_sgl(&cb, write); rv = char_sgdma_map_user_buf_to_sgl(&cb, write);
if (rv < 0) if (rv < 0)
return rv; return rv;
res = xdma_xfer_submit(xdev, engine->channel, write, *pos, &cb.sgt, res = xdma_xfer_submit(xdev, engine->channel, write, *pos, &cb.sgt,
0, sgdma_timeout * 1000); 0, sgdma_timeout * 1000);
char_sgdma_unmap_user_buf(&cb, write); char_sgdma_unmap_user_buf(&cb, write);
...@@ -294,158 +402,311 @@ static ssize_t char_sgdma_read_write(struct file *file, char __user *buf, ...@@ -294,158 +402,311 @@ static ssize_t char_sgdma_read_write(struct file *file, char __user *buf,
static ssize_t char_sgdma_write(struct file *file, const char __user *buf, static ssize_t char_sgdma_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
return char_sgdma_read_write(file, (char *)buf, count, pos, 1); return char_sgdma_read_write(file, buf, count, pos, 1);
} }
static ssize_t char_sgdma_read(struct file *file, char __user *buf, static ssize_t char_sgdma_read(struct file *file, char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; return char_sgdma_read_write(file, buf, count, pos, 0);
}
static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
unsigned long count, loff_t pos)
{
struct xdma_cdev *xcdev = (struct xdma_cdev *)iocb->ki_filp->private_data;
struct cdev_async_io *caio;
struct xdma_engine *engine; struct xdma_engine *engine;
struct xdma_dev *xdev;
int rv; int rv;
unsigned long i;
rv = xcdev_check(__func__, xcdev, 1); if (!xcdev) {
if (rv < 0) pr_info("file 0x%p, xcdev NULL, %llu, pos %llu, W %d.\n",
return rv; iocb->ki_filp, (u64)count, (u64)pos, 1);
return -EINVAL;
}
engine = xcdev->engine;
xdev = xcdev->xdev;
if (engine->dir != DMA_TO_DEVICE) {
pr_err("r/w mismatch. WRITE, dir %d.\n",
engine->dir);
return -EINVAL;
}
caio = kmem_cache_alloc(cdev_cache, GFP_KERNEL);
memset(caio, 0, sizeof(struct cdev_async_io));
caio->cb = kzalloc(count * (sizeof(struct xdma_io_cb)), GFP_KERNEL);
spin_lock_init(&caio->lock);
iocb->private = caio;
caio->iocb = iocb;
caio->write = true;
caio->cancel = false;
caio->req_cnt = count;
for (i = 0; i < count; i++) {
memset(&(caio->cb[i]), 0, sizeof(struct xdma_io_cb));
caio->cb[i].buf = io[i].iov_base;
caio->cb[i].len = io[i].iov_len;
caio->cb[i].ep_addr = (u64)pos;
caio->cb[i].write = true;
caio->cb[i].private = caio;
caio->cb[i].io_done = async_io_handler;
rv = check_transfer_align(engine, caio->cb[i].buf, caio->cb[i].len, pos, 1);
if (rv) {
pr_info("Invalid transfer alignment detected\n");
kmem_cache_free(cdev_cache, caio);
return rv;
}
rv = char_sgdma_map_user_buf_to_sgl(&caio->cb[i], true);
if (rv < 0) {
return rv;
}
rv = xdma_xfer_submit_nowait((void *)&caio->cb[i], xdev, engine->channel, caio->cb[i].write, caio->cb[i].ep_addr, &caio->cb[i].sgt,
0, sgdma_timeout * 1000);
}
if (engine->cmplthp)
xdma_kthread_wakeup(engine->cmplthp);
return -EIOCBQUEUED;
}
static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
unsigned long count, loff_t pos)
{
struct xdma_cdev *xcdev = (struct xdma_cdev *)iocb->ki_filp->private_data;
struct cdev_async_io *caio;
struct xdma_engine *engine;
struct xdma_dev *xdev;
int rv;
unsigned long i;
if (!xcdev) {
pr_info("file 0x%p, xcdev NULL, %llu, pos %llu, W %d.\n",
iocb->ki_filp, (u64)count, (u64)pos, 1);
return -EINVAL;
}
engine = xcdev->engine; engine = xcdev->engine;
xdev = xcdev->xdev;
if (engine->streaming && engine->dir == DMA_FROM_DEVICE) { if (engine->dir != DMA_FROM_DEVICE) {
rv = xdma_cyclic_transfer_setup(engine); pr_err("r/w mismatch. READ, dir %d.\n",
if (rv < 0 && rv != -EBUSY) engine->dir);
return -EINVAL;
}
caio = kmem_cache_alloc(cdev_cache, GFP_KERNEL);
memset(caio, 0, sizeof(struct cdev_async_io));
caio->cb = kzalloc(count * (sizeof(struct xdma_io_cb)), GFP_KERNEL);
spin_lock_init(&caio->lock);
iocb->private = caio;
caio->iocb = iocb;
caio->write = false;
caio->cancel = false;
caio->req_cnt = count;
for (i = 0; i < count; i++) {
memset(&(caio->cb[i]), 0, sizeof(struct xdma_io_cb));
caio->cb[i].buf = io[i].iov_base;
caio->cb[i].len = io[i].iov_len;
caio->cb[i].ep_addr = (u64)pos;
caio->cb[i].write = false;
caio->cb[i].private = caio;
caio->cb[i].io_done = async_io_handler;
rv = check_transfer_align(engine, caio->cb[i].buf, caio->cb[i].len, pos, 1);
if (rv) {
pr_info("Invalid transfer alignment detected\n");
kmem_cache_free(cdev_cache, caio);
return rv;
}
rv = char_sgdma_map_user_buf_to_sgl(&caio->cb[i], true);
if (rv < 0) {
return rv; return rv;
/* 600 sec. timeout */ }
return xdma_engine_read_cyclic(engine, buf, count, 600000);
rv = xdma_xfer_submit_nowait((void *)&caio->cb[i], xdev, engine->channel, caio->cb[i].write, caio->cb[i].ep_addr, &caio->cb[i].sgt,
0, sgdma_timeout * 1000);
} }
return char_sgdma_read_write(file, (char *)buf, count, pos, 0); if (engine->cmplthp)
xdma_kthread_wakeup(engine->cmplthp);
return -EIOCBQUEUED;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
static ssize_t cdev_write_iter(struct kiocb *iocb, struct iov_iter *io)
{
return cdev_aio_write(iocb, io->iov, io->nr_segs, io->iov_offset);
} }
static ssize_t cdev_read_iter(struct kiocb *iocb, struct iov_iter *io)
{
return cdev_aio_read(iocb, io->iov, io->nr_segs, io->iov_offset);
}
#endif
static int ioctl_do_perf_start(struct xdma_engine *engine, unsigned long arg) static int ioctl_do_perf_start(struct xdma_engine *engine, unsigned long arg)
{ {
int rv; int rv;
struct xdma_dev *xdev; struct xdma_dev *xdev;
BUG_ON(!engine); if (!engine) {
xdev = engine->xdev; pr_err("Invalid DMA engine\n");
BUG_ON(!xdev); return -EINVAL;
}
/* performance measurement already running on this engine? */
if (engine->xdma_perf) { xdev = engine->xdev;
dbg_perf("IOCTL_XDMA_PERF_START failed!\n"); if (!xdev) {
dbg_perf("Perf measurement already seems to be running!\n"); pr_err("Invalid xdev\n");
return -EBUSY; return -EINVAL;
} }
engine->xdma_perf = kzalloc(sizeof(struct xdma_performance_ioctl),
GFP_KERNEL); /* performance measurement already running on this engine? */
if (engine->xdma_perf) {
if (!engine->xdma_perf) dbg_perf("IOCTL_XDMA_PERF_START failed!\n");
return -ENOMEM; dbg_perf("Perf measurement already seems to be running!\n");
return -EBUSY;
rv = copy_from_user(engine->xdma_perf, }
(struct xdma_performance_ioctl *)arg, engine->xdma_perf = kzalloc(sizeof(struct xdma_performance_ioctl),
sizeof(struct xdma_performance_ioctl)); GFP_KERNEL);
if (rv < 0) { if (!engine->xdma_perf)
dbg_perf("Failed to copy from user space 0x%lx\n", arg); return -ENOMEM;
return -EINVAL;
} rv = copy_from_user(engine->xdma_perf,
if (engine->xdma_perf->version != IOCTL_XDMA_PERF_V1) { (struct xdma_performance_ioctl __user *)arg,
dbg_perf("Unsupported IOCTL version %d\n", sizeof(struct xdma_performance_ioctl));
engine->xdma_perf->version);
return -EINVAL; if (rv < 0) {
} dbg_perf("Failed to copy from user space 0x%lx\n", arg);
return -EINVAL;
}
if (engine->xdma_perf->version != IOCTL_XDMA_PERF_V1) {
dbg_perf("Unsupported IOCTL version %d\n",
engine->xdma_perf->version);
return -EINVAL;
}
enable_perf(engine); enable_perf(engine);
dbg_perf("transfer_size = %d\n", engine->xdma_perf->transfer_size); dbg_perf("transfer_size = %d\n", engine->xdma_perf->transfer_size);
/* initialize wait queue */ /* initialize wait queue */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) #if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
init_swait_queue_head(&engine->xdma_perf_wq); init_swait_queue_head(&engine->xdma_perf_wq);
#else #else
init_waitqueue_head(&engine->xdma_perf_wq); init_waitqueue_head(&engine->xdma_perf_wq);
#endif #endif
xdma_performance_submit(xdev, engine); rv = xdma_performance_submit(xdev, engine);
if (rv < 0)
return 0; pr_err("Failed to submit dma performance\n");
return rv;
} }
static int ioctl_do_perf_stop(struct xdma_engine *engine, unsigned long arg) static int ioctl_do_perf_stop(struct xdma_engine *engine, unsigned long arg)
{ {
struct xdma_transfer *transfer = NULL; struct xdma_transfer *transfer = NULL;
int rv; int rv;
if (!engine) {
pr_err("Invalid DMA engine\n");
return -EINVAL;
}
dbg_perf("IOCTL_XDMA_PERF_STOP\n"); dbg_perf("IOCTL_XDMA_PERF_STOP\n");
/* no performance measurement running on this engine? */ /* no performance measurement running on this engine? */
if (!engine->xdma_perf) { if (!engine->xdma_perf) {
dbg_perf("No measurement in progress\n"); dbg_perf("No measurement in progress\n");
return -EINVAL; return -EINVAL;
} }
/* stop measurement */ /* stop measurement */
transfer = engine_cyclic_stop(engine); transfer = engine_cyclic_stop(engine);
dbg_perf("Waiting for measurement to stop\n"); if (!transfer) {
pr_err("Failed to stop cyclic transfer\n");
return -EINVAL;
}
dbg_perf("Waiting for measurement to stop\n");
if (engine->xdma_perf) { get_perf_stats(engine);
get_perf_stats(engine);
rv = copy_to_user((void __user *)arg, engine->xdma_perf, rv = copy_to_user((void __user *)arg, engine->xdma_perf,
sizeof(struct xdma_performance_ioctl)); sizeof(struct xdma_performance_ioctl));
if (rv) { if (rv) {
dbg_perf("Error copying result to user\n"); dbg_perf("Error copying result to user\n");
return -EINVAL; return rv;
} }
if (transfer) kfree(transfer);
kfree(transfer);
} else {
dbg_perf("engine->xdma_perf == NULL?\n");
}
kfree(engine->xdma_perf); kfree(engine->xdma_perf);
engine->xdma_perf = NULL; engine->xdma_perf = NULL;
return 0; return 0;
} }
static int ioctl_do_perf_get(struct xdma_engine *engine, unsigned long arg) static int ioctl_do_perf_get(struct xdma_engine *engine, unsigned long arg)
{ {
int rc; int rc;
BUG_ON(!engine); if (!engine) {
pr_err("Invalid DMA engine\n");
return -EINVAL;
}
dbg_perf("IOCTL_XDMA_PERF_GET\n"); dbg_perf("IOCTL_XDMA_PERF_GET\n");
if (engine->xdma_perf) { if (engine->xdma_perf) {
get_perf_stats(engine); get_perf_stats(engine);
rc = copy_to_user((void __user *)arg, engine->xdma_perf, rc = copy_to_user((void __user *)arg, engine->xdma_perf,
sizeof(struct xdma_performance_ioctl)); sizeof(struct xdma_performance_ioctl));
if (rc) { if (rc) {
dbg_perf("Error copying result to user\n"); dbg_perf("Error copying result to user\n");
return -EINVAL; return rc;
} }
} else { } else {
dbg_perf("engine->xdma_perf == NULL?\n"); dbg_perf("engine->xdma_perf == NULL?\n");
return -EPROTO; return -EPROTO;
} }
return 0; return 0;
} }
static int ioctl_do_addrmode_set(struct xdma_engine *engine, unsigned long arg) static int ioctl_do_addrmode_set(struct xdma_engine *engine, unsigned long arg)
{ {
return engine_addrmode_set(engine, arg); return engine_addrmode_set(engine, arg);
} }
static int ioctl_do_addrmode_get(struct xdma_engine *engine, unsigned long arg) static int ioctl_do_addrmode_get(struct xdma_engine *engine, unsigned long arg)
{ {
int rv; int rv;
unsigned long src; unsigned long src;
BUG_ON(!engine); if (!engine) {
pr_err("Invalid DMA engine\n");
return -EINVAL;
}
src = !!engine->non_incr_addr; src = !!engine->non_incr_addr;
dbg_perf("IOCTL_XDMA_ADDRMODE_GET\n"); dbg_perf("IOCTL_XDMA_ADDRMODE_GET\n");
...@@ -454,22 +715,25 @@ static int ioctl_do_addrmode_get(struct xdma_engine *engine, unsigned long arg) ...@@ -454,22 +715,25 @@ static int ioctl_do_addrmode_get(struct xdma_engine *engine, unsigned long arg)
return rv; return rv;
} }
static int ioctl_do_align_get(struct xdma_engine *engine, unsigned long arg) static int ioctl_do_align_get(struct xdma_engine *engine, unsigned long arg)
{ {
BUG_ON(!engine); if (!engine) {
pr_err("Invalid DMA engine\n");
return -EINVAL;
}
dbg_perf("IOCTL_XDMA_ALIGN_GET\n"); dbg_perf("IOCTL_XDMA_ALIGN_GET\n");
return put_user(engine->addr_align, (int __user *)arg); return put_user(engine->addr_align, (int __user *)arg);
} }
static long char_sgdma_ioctl(struct file *file, unsigned int cmd, static long char_sgdma_ioctl(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data;
struct xdma_dev *xdev; struct xdma_dev *xdev;
struct xdma_engine *engine; struct xdma_engine *engine;
int rv = 0; int rv = 0;
rv = xcdev_check(__func__, xcdev, 1); rv = xcdev_check(__func__, xcdev, 1);
if (rv < 0) if (rv < 0)
...@@ -478,16 +742,16 @@ static long char_sgdma_ioctl(struct file *file, unsigned int cmd, ...@@ -478,16 +742,16 @@ static long char_sgdma_ioctl(struct file *file, unsigned int cmd,
xdev = xcdev->xdev; xdev = xcdev->xdev;
engine = xcdev->engine; engine = xcdev->engine;
switch (cmd) { switch (cmd) {
case IOCTL_XDMA_PERF_START: case IOCTL_XDMA_PERF_START:
rv = ioctl_do_perf_start(engine, arg); rv = ioctl_do_perf_start(engine, arg);
break; break;
case IOCTL_XDMA_PERF_STOP: case IOCTL_XDMA_PERF_STOP:
rv = ioctl_do_perf_stop(engine, arg); rv = ioctl_do_perf_stop(engine, arg);
break; break;
case IOCTL_XDMA_PERF_GET: case IOCTL_XDMA_PERF_GET:
rv = ioctl_do_perf_get(engine, arg); rv = ioctl_do_perf_get(engine, arg);
break; break;
case IOCTL_XDMA_ADDRMODE_SET: case IOCTL_XDMA_ADDRMODE_SET:
rv = ioctl_do_addrmode_set(engine, arg); rv = ioctl_do_addrmode_set(engine, arg);
break; break;
...@@ -497,13 +761,13 @@ static long char_sgdma_ioctl(struct file *file, unsigned int cmd, ...@@ -497,13 +761,13 @@ static long char_sgdma_ioctl(struct file *file, unsigned int cmd,
case IOCTL_XDMA_ALIGN_GET: case IOCTL_XDMA_ALIGN_GET:
rv = ioctl_do_align_get(engine, arg); rv = ioctl_do_align_get(engine, arg);
break; break;
default: default:
dbg_perf("Unsupported operation\n"); dbg_perf("Unsupported operation\n");
rv = -EINVAL; rv = -EINVAL;
break; break;
} }
return rv; return rv;
} }
static int char_sgdma_open(struct inode *inode, struct file *file) static int char_sgdma_open(struct inode *inode, struct file *file)
...@@ -519,8 +783,7 @@ static int char_sgdma_open(struct inode *inode, struct file *file) ...@@ -519,8 +783,7 @@ static int char_sgdma_open(struct inode *inode, struct file *file)
if (engine->streaming && engine->dir == DMA_FROM_DEVICE) { if (engine->streaming && engine->dir == DMA_FROM_DEVICE) {
if (engine->device_open == 1) if (engine->device_open == 1)
return -EBUSY; return -EBUSY;
else engine->device_open = 1;
engine->device_open = 1;
} }
return 0; return 0;
...@@ -551,7 +814,17 @@ static const struct file_operations sgdma_fops = { ...@@ -551,7 +814,17 @@ static const struct file_operations sgdma_fops = {
.open = char_sgdma_open, .open = char_sgdma_open,
.release = char_sgdma_close, .release = char_sgdma_close,
.write = char_sgdma_write, .write = char_sgdma_write,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
.write_iter = cdev_write_iter,
#else
.aio_write = cdev_aio_write,
#endif
.read = char_sgdma_read, .read = char_sgdma_read,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
.read_iter = cdev_read_iter,
#else
.aio_read = cdev_aio_read,
#endif
.unlocked_ioctl = char_sgdma_ioctl, .unlocked_ioctl = char_sgdma_ioctl,
.llseek = char_sgdma_llseek, .llseek = char_sgdma_llseek,
}; };
......
...@@ -46,17 +46,16 @@ ...@@ -46,17 +46,16 @@
* _IOC_SIZE(nr) returns size * _IOC_SIZE(nr) returns size
*/ */
struct xdma_performance_ioctl struct xdma_performance_ioctl {
{ /* IOCTL_XDMA_IOCTL_Vx */
/* IOCTL_XDMA_IOCTL_Vx */ uint32_t version;
uint32_t version; uint32_t transfer_size;
uint32_t transfer_size; /* measurement */
/* measurement */ uint32_t stopped;
uint32_t stopped; uint32_t iterations;
uint32_t iterations; uint64_t clock_cycle_count;
uint64_t clock_cycle_count; uint64_t data_cycle_count;
uint64_t data_cycle_count; uint64_t pending_count;
uint64_t pending_count;
}; };
......
...@@ -32,30 +32,30 @@ ...@@ -32,30 +32,30 @@
#ifdef __REG_DEBUG__ #ifdef __REG_DEBUG__
/* SECTION: Function definitions */ /* SECTION: Function definitions */
inline void __write_register(const char *fn, u32 value, void *base, inline void __write_register(const char *fn, u32 value, void __iomem *base,
unsigned int off) unsigned int off)
{ {
pr_info("%s: 0x%p, W reg 0x%lx, 0x%x.\n", fn, base, off, value); pr_info("%s: 0x%p, W reg 0x%lx, 0x%x.\n", fn, base, off, value);
iowrite32(value, base + off); iowrite32(value, base + off);
} }
inline u32 __read_register(const char *fn, void *base, unsigned int off) inline u32 __read_register(const char *fn, void __iomem *base, unsigned int off)
{ {
u32 v = ioread32(base + off); u32 v = ioread32(base + off);
pr_info("%s: 0x%p, R reg 0x%lx, 0x%x.\n", fn, base, off, v); pr_info("%s: 0x%p, R reg 0x%lx, 0x%x.\n", fn, base, off, v);
return v; return v;
} }
#define write_register(v,base,off) __write_register(__func__, v, base, off) #define write_register(v, base, off) __write_register(__func__, v, base, off)
#define read_register(base,off) __read_register(__func__, base, off) #define read_register(base, off) __read_register(__func__, base, off)
#else #else
#define write_register(v,base,off) iowrite32(v, (base) + (off)) #define write_register(v, base, off) iowrite32(v, (base) + (off))
#define read_register(base,off) ioread32((base) + (off)) #define read_register(base, off) ioread32((base) + (off))
#endif /* #ifdef __REG_DEBUG__ */ #endif /* #ifdef __REG_DEBUG__ */
static int xvc_shift_bits(void *base, u32 tms_bits, u32 tdi_bits, static int xvc_shift_bits(void __iomem *base, u32 tms_bits, u32 tdi_bits,
u32 *tdo_bits) u32 *tdo_bits)
{ {
u32 control; u32 control;
...@@ -92,7 +92,7 @@ static int xvc_shift_bits(void *base, u32 tms_bits, u32 tdi_bits, ...@@ -92,7 +92,7 @@ static int xvc_shift_bits(void *base, u32 tms_bits, u32 tdi_bits,
static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{ {
struct xdma_cdev *xcdev = (struct xdma_cdev *)filp->private_data; struct xdma_cdev *xcdev = (struct xdma_cdev *)filp->private_data;
struct xdma_dev *xdev; struct xdma_dev *xdev;
struct xvc_ioc xvc_obj; struct xvc_ioc xvc_obj;
unsigned int opcode; unsigned int opcode;
...@@ -109,6 +109,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -109,6 +109,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
rv = xcdev_check(__func__, xcdev, 0); rv = xcdev_check(__func__, xcdev, 0);
if (rv < 0) if (rv < 0)
return rv; return rv;
xdev = xcdev->xdev; xdev = xcdev->xdev;
if (cmd != XDMA_IOCXVC) { if (cmd != XDMA_IOCXVC) {
...@@ -135,7 +136,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -135,7 +136,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
total_bits = xvc_obj.length; total_bits = xvc_obj.length;
total_bytes = (total_bits + 7) >> 3; total_bytes = (total_bits + 7) >> 3;
buffer = (char *)kmalloc(total_bytes * 3, GFP_KERNEL); buffer = (unsigned char *)kmalloc(total_bytes * 3, GFP_KERNEL);
if (!buffer) { if (!buffer) {
pr_info("OOM %u, op 0x%x, len %u bits, %u bytes.\n", pr_info("OOM %u, op 0x%x, len %u bits, %u bytes.\n",
3 * total_bytes, opcode, total_bits, total_bytes); 3 * total_bytes, opcode, total_bits, total_bytes);
...@@ -146,12 +147,16 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -146,12 +147,16 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
tdi_buf = tms_buf + total_bytes; tdi_buf = tms_buf + total_bytes;
tdo_buf = tdi_buf + total_bytes; tdo_buf = tdi_buf + total_bytes;
rv = copy_from_user((void *)tms_buf, xvc_obj.tms_buf, total_bytes); rv = copy_from_user((void *)tms_buf,
(const char __user *)xvc_obj.tms_buf,
total_bytes);
if (rv) { if (rv) {
pr_info("copy tmfs_buf failed: %d/%u.\n", rv, total_bytes); pr_info("copy tmfs_buf failed: %d/%u.\n", rv, total_bytes);
goto cleanup; goto cleanup;
} }
rv = copy_from_user((void *)tdi_buf, xvc_obj.tdi_buf, total_bytes); rv = copy_from_user((void *)tdi_buf,
(const char __user *)xvc_obj.tdi_buf,
total_bytes);
if (rv) { if (rv) {
pr_info("copy tdi_buf failed: %d/%u.\n", rv, total_bytes); pr_info("copy tdi_buf failed: %d/%u.\n", rv, total_bytes);
goto cleanup; goto cleanup;
...@@ -162,7 +167,8 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -162,7 +167,8 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
iobase = xdev->bar[xcdev->bar] + xcdev->base; iobase = xdev->bar[xcdev->bar] + xcdev->base;
/* set length register to 32 initially if more than one /* set length register to 32 initially if more than one
* word-transaction is to be done */ * word-transaction is to be done
*/
if (total_bits >= 32) if (total_bits >= 32)
write_register(0x20, iobase, XVC_BAR_LENGTH_REG); write_register(0x20, iobase, XVC_BAR_LENGTH_REG);
...@@ -173,7 +179,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -173,7 +179,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
u32 tms_store = 0; u32 tms_store = 0;
u32 tdi_store = 0; u32 tdi_store = 0;
u32 tdo_store = 0; u32 tdo_store = 0;
if (bits_left < 32) { if (bits_left < 32) {
/* set number of bits to shift out */ /* set number of bits to shift out */
write_register(bits_left, iobase, XVC_BAR_LENGTH_REG); write_register(bits_left, iobase, XVC_BAR_LENGTH_REG);
...@@ -186,33 +192,33 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -186,33 +192,33 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* Shift data out and copy to output buffer */ /* Shift data out and copy to output buffer */
rv = xvc_shift_bits(iobase, tms_store, tdi_store, &tdo_store); rv = xvc_shift_bits(iobase, tms_store, tdi_store, &tdo_store);
if (rv < 0) if (rv < 0)
goto cleanup; break;
memcpy(tdo_buf + bytes, &tdo_store, shift_bytes); memcpy(tdo_buf + bytes, &tdo_store, shift_bytes);
} }
if (rv < 0)
goto unlock;
/* if testing bar access swap tdi and tdo bufferes to "loopback" */ /* if testing bar access swap tdi and tdo bufferes to "loopback" */
if (opcode == 0x2) { if (opcode == 0x2) {
char *tmp = tdo_buf; unsigned char *tmp = tdo_buf;
tdo_buf = tdi_buf; tdo_buf = tdi_buf;
tdi_buf = tmp; tdi_buf = tmp;
} }
rv = copy_to_user((void *)xvc_obj.tdo_buf, tdo_buf, total_bytes); rv = copy_to_user(xvc_obj.tdo_buf, (const void *)tdo_buf, total_bytes);
if (rv) { if (rv)
pr_info("copy back tdo_buf failed: %d/%u.\n", rv, total_bytes); pr_info("copy back tdo_buf failed: %d/%u.\n", rv, total_bytes);
rv = -EFAULT;
goto cleanup;
}
cleanup:
if (buffer)
kfree(buffer);
unlock:
mmiowb(); mmiowb();
spin_unlock(&xcdev->lock); spin_unlock(&xcdev->lock);
cleanup:
kfree(buffer);
return rv; return rv;
} }
...@@ -220,10 +226,10 @@ cleanup: ...@@ -220,10 +226,10 @@ cleanup:
* character device file operations for the XVC * character device file operations for the XVC
*/ */
static const struct file_operations xvc_fops = { static const struct file_operations xvc_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = char_open, .open = char_open,
.release = char_close, .release = char_close,
.unlocked_ioctl = xvc_ioctl, .unlocked_ioctl = xvc_ioctl,
}; };
void cdev_xvc_init(struct xdma_cdev *xcdev) void cdev_xvc_init(struct xdma_cdev *xcdev)
......
...@@ -33,9 +33,9 @@ ...@@ -33,9 +33,9 @@
struct xvc_ioc { struct xvc_ioc {
unsigned int opcode; unsigned int opcode;
unsigned int length; unsigned int length;
unsigned char *tms_buf; const char __user *tms_buf;
unsigned char *tdi_buf; const char __user *tdi_buf;
unsigned char *tdo_buf; void __user *tdo_buf;
}; };
#define XDMA_IOCXVC _IOWR(XVC_MAGIC, 1, struct xvc_ioc) #define XDMA_IOCXVC _IOWR(XVC_MAGIC, 1, struct xvc_ioc)
......
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
#ifndef __XDMA_VERSION_H__ #ifndef __XDMA_VERSION_H__
#define __XDMA_VERSION_H__ #define __XDMA_VERSION_H__
#define DRV_MOD_MAJOR 2018 #define DRV_MOD_MAJOR 2019
#define DRV_MOD_MINOR 3 #define DRV_MOD_MINOR 2
#define DRV_MOD_PATCHLEVEL 50 #define DRV_MOD_PATCHLEVEL 51
#define DRV_MODULE_VERSION \ #define DRV_MODULE_VERSION \
__stringify(DRV_MOD_MAJOR) "." \ __stringify(DRV_MOD_MAJOR) "." \
......
...@@ -21,7 +21,9 @@ ...@@ -21,7 +21,9 @@
#include "xdma_cdev.h" #include "xdma_cdev.h"
struct class *g_xdma_class; static struct class *g_xdma_class;
struct kmem_cache *cdev_cache;
enum cdev_type { enum cdev_type {
CHAR_USER, CHAR_USER,
...@@ -48,12 +50,12 @@ static const char * const devnode_names[] = { ...@@ -48,12 +50,12 @@ static const char * const devnode_names[] = {
}; };
enum xpdev_flags_bits { enum xpdev_flags_bits {
XDF_CDEV_USER, XDF_CDEV_USER,
XDF_CDEV_CTRL, XDF_CDEV_CTRL,
XDF_CDEV_XVC, XDF_CDEV_XVC,
XDF_CDEV_EVENT, XDF_CDEV_EVENT,
XDF_CDEV_SG, XDF_CDEV_SG,
XDF_CDEV_BYPASS, XDF_CDEV_BYPASS,
}; };
static inline void xpdev_flag_set(struct xdma_pci_dev *xpdev, static inline void xpdev_flag_set(struct xdma_pci_dev *xpdev,
...@@ -75,16 +77,18 @@ static inline int xpdev_flag_test(struct xdma_pci_dev *xpdev, ...@@ -75,16 +77,18 @@ static inline int xpdev_flag_test(struct xdma_pci_dev *xpdev,
} }
#ifdef __XDMA_SYSFS__ #ifdef __XDMA_SYSFS__
ssize_t show_device_numbers(struct device *dev, struct device_attribute *attr, ssize_t xdma_dev_instance_show(struct device *dev,
char *buf) struct device_attribute *attr,
char *buf)
{ {
struct xdma_pci_dev *xpdev = (struct xdma_pci_dev *)dev_get_drvdata(dev); struct xdma_pci_dev *xpdev =
(struct xdma_pci_dev *)dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d\t%d\n", return snprintf(buf, PAGE_SIZE, "%d\t%d\n",
xpdev->major, xpdev->xdev->idx); xpdev->major, xpdev->xdev->idx);
} }
static DEVICE_ATTR(xdma_dev_instance, S_IRUGO, show_device_numbers, NULL); static DEVICE_ATTR_RO(xdma_dev_instance);
#endif #endif
static int config_kobject(struct xdma_cdev *xcdev, enum cdev_type type) static int config_kobject(struct xdma_cdev *xcdev, enum cdev_type type)
...@@ -98,7 +102,10 @@ static int config_kobject(struct xdma_cdev *xcdev, enum cdev_type type) ...@@ -98,7 +102,10 @@ static int config_kobject(struct xdma_cdev *xcdev, enum cdev_type type)
case CHAR_XDMA_C2H: case CHAR_XDMA_C2H:
case CHAR_BYPASS_H2C: case CHAR_BYPASS_H2C:
case CHAR_BYPASS_C2H: case CHAR_BYPASS_C2H:
BUG_ON(!engine); if (!engine) {
pr_err("Invalid DMA engine\n");
return rv;
}
rv = kobject_set_name(&xcdev->cdev.kobj, devnode_names[type], rv = kobject_set_name(&xcdev->cdev.kobj, devnode_names[type],
xdev->idx, engine->channel); xdev->idx, engine->channel);
break; break;
...@@ -129,22 +136,23 @@ int xcdev_check(const char *fname, struct xdma_cdev *xcdev, bool check_engine) ...@@ -129,22 +136,23 @@ int xcdev_check(const char *fname, struct xdma_cdev *xcdev, bool check_engine)
if (!xcdev || xcdev->magic != MAGIC_CHAR) { if (!xcdev || xcdev->magic != MAGIC_CHAR) {
pr_info("%s, xcdev 0x%p, magic 0x%lx.\n", pr_info("%s, xcdev 0x%p, magic 0x%lx.\n",
fname, xcdev, xcdev ? xcdev->magic : 0xFFFFFFFF); fname, xcdev, xcdev ? xcdev->magic : 0xFFFFFFFF);
return -EINVAL; return -EINVAL;
} }
xdev = xcdev->xdev; xdev = xcdev->xdev;
if (!xdev || xdev->magic != MAGIC_DEVICE) { if (!xdev || xdev->magic != MAGIC_DEVICE) {
pr_info("%s, xdev 0x%p, magic 0x%lx.\n", pr_info("%s, xdev 0x%p, magic 0x%lx.\n",
fname, xdev, xdev ? xdev->magic : 0xFFFFFFFF); fname, xdev, xdev ? xdev->magic : 0xFFFFFFFF);
return -EINVAL; return -EINVAL;
} }
if (check_engine) { if (check_engine) {
struct xdma_engine *engine = xcdev->engine; struct xdma_engine *engine = xcdev->engine;
if (!engine || engine->magic != MAGIC_ENGINE) { if (!engine || engine->magic != MAGIC_ENGINE) {
pr_info("%s, engine 0x%p, magic 0x%lx.\n", fname, pr_info("%s, engine 0x%p, magic 0x%lx.\n", fname,
engine, engine ? engine->magic : 0xFFFFFFFF); engine, engine ? engine->magic : 0xFFFFFFFF);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -158,7 +166,11 @@ int char_open(struct inode *inode, struct file *file) ...@@ -158,7 +166,11 @@ int char_open(struct inode *inode, struct file *file)
/* pointer to containing structure of the character device inode */ /* pointer to containing structure of the character device inode */
xcdev = container_of(inode->i_cdev, struct xdma_cdev, cdev); xcdev = container_of(inode->i_cdev, struct xdma_cdev, cdev);
BUG_ON(xcdev->magic != MAGIC_CHAR); if (xcdev->magic != MAGIC_CHAR) {
pr_err("xcdev 0x%p inode 0x%lx magic mismatch 0x%lx\n",
xcdev, inode->i_ino, xcdev->magic);
return -EINVAL;
}
/* create a reference to our char device in the opened file */ /* create a reference to our char device in the opened file */
file->private_data = xcdev; file->private_data = xcdev;
...@@ -173,13 +185,30 @@ int char_close(struct inode *inode, struct file *file) ...@@ -173,13 +185,30 @@ int char_close(struct inode *inode, struct file *file)
struct xdma_dev *xdev; struct xdma_dev *xdev;
struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data;
BUG_ON(!xcdev); if (!xcdev) {
BUG_ON(xcdev->magic != MAGIC_CHAR); pr_err("char device with inode 0x%lx xcdev NULL\n",
inode->i_ino);
return -EINVAL;
}
if (xcdev->magic != MAGIC_CHAR) {
pr_err("xcdev 0x%p magic mismatch 0x%lx\n",
xcdev, xcdev->magic);
return -EINVAL;
}
/* fetch device specific data stored earlier during open */ /* fetch device specific data stored earlier during open */
xdev = xcdev->xdev; xdev = xcdev->xdev;
BUG_ON(!xdev); if (!xdev) {
BUG_ON(xdev->magic != MAGIC_DEVICE); pr_err("char device with inode 0x%lx xdev NULL\n",
inode->i_ino);
return -EINVAL;
}
if (xdev->magic != MAGIC_DEVICE) {
pr_err("xdev 0x%p magic mismatch 0x%lx\n", xdev, xdev->magic);
return -EINVAL;
}
return 0; return 0;
} }
...@@ -193,40 +222,52 @@ int char_close(struct inode *inode, struct file *file) ...@@ -193,40 +222,52 @@ int char_close(struct inode *inode, struct file *file)
static int create_sys_device(struct xdma_cdev *xcdev, enum cdev_type type) static int create_sys_device(struct xdma_cdev *xcdev, enum cdev_type type)
{ {
struct xdma_dev *xdev = xcdev->xdev; struct xdma_dev *xdev = xcdev->xdev;
struct xdma_engine *engine = xcdev->engine; struct xdma_engine *engine = xcdev->engine;
int last_param; int last_param;
if (type == CHAR_EVENTS) if (type == CHAR_EVENTS)
last_param = xcdev->bar; last_param = xcdev->bar;
else else
last_param = engine ? engine->channel : 0; last_param = engine ? engine->channel : 0;
xcdev->sys_device = device_create(g_xdma_class, &xdev->pdev->dev, xcdev->sys_device = device_create(g_xdma_class, &xdev->pdev->dev,
xcdev->cdevno, NULL, devnode_names[type], xdev->idx, xcdev->cdevno, NULL, devnode_names[type], xdev->idx,
last_param); last_param);
if (!xcdev->sys_device) { if (!xcdev->sys_device) {
pr_err("device_create(%s) failed\n", devnode_names[type]); pr_err("device_create(%s) failed\n", devnode_names[type]);
return -1; return -1;
} }
return 0; return 0;
} }
static int destroy_xcdev(struct xdma_cdev *cdev) static int destroy_xcdev(struct xdma_cdev *cdev)
{ {
if (!cdev) { if (!cdev) {
pr_warn("cdev NULL.\n"); pr_warn("cdev NULL.\n");
return 0; return -EINVAL;
} }
if (cdev->magic != MAGIC_CHAR) { if (cdev->magic != MAGIC_CHAR) {
pr_warn("cdev 0x%p magic mismatch 0x%lx\n", cdev, cdev->magic); pr_warn("cdev 0x%p magic mismatch 0x%lx\n", cdev, cdev->magic);
return 0; return -EINVAL;
}
if (!cdev->xdev) {
pr_err("xdev NULL\n");
return -EINVAL;
}
if (!g_xdma_class) {
pr_err("g_xdma_class NULL\n");
return -EINVAL;
}
if (!cdev->sys_device) {
pr_err("cdev sys_device NULL\n");
return -EINVAL;
} }
BUG_ON(!cdev->xdev);
BUG_ON(!g_xdma_class);
BUG_ON(!cdev->sys_device);
if (cdev->sys_device) if (cdev->sys_device)
device_destroy(g_xdma_class, cdev->cdevno); device_destroy(g_xdma_class, cdev->cdevno);
...@@ -343,52 +384,85 @@ unregister_region: ...@@ -343,52 +384,85 @@ unregister_region:
void xpdev_destroy_interfaces(struct xdma_pci_dev *xpdev) void xpdev_destroy_interfaces(struct xdma_pci_dev *xpdev)
{ {
int i; int i = 0;
int rv;
#ifdef __XDMA_SYSFS__ #ifdef __XDMA_SYSFS__
device_remove_file(&xpdev->pdev->dev, &dev_attr_xdma_dev_instance); device_remove_file(&xpdev->pdev->dev, &dev_attr_xdma_dev_instance);
#endif #endif
if (xpdev_flag_test(xpdev, XDF_CDEV_SG)) { if (xpdev_flag_test(xpdev, XDF_CDEV_SG)) {
/* iterate over channels */ /* iterate over channels */
for (i = 0; i < xpdev->h2c_channel_max; i++) for (i = 0; i < xpdev->h2c_channel_max; i++) {
/* remove SG DMA character device */ /* remove SG DMA character device */
destroy_xcdev(&xpdev->sgdma_h2c_cdev[i]); rv = destroy_xcdev(&xpdev->sgdma_h2c_cdev[i]);
for (i = 0; i < xpdev->c2h_channel_max; i++) if (rv < 0)
destroy_xcdev(&xpdev->sgdma_c2h_cdev[i]); pr_err("Failed to destroy h2c xcdev %d error :0x%x\n",
i, rv);
}
for (i = 0; i < xpdev->c2h_channel_max; i++) {
rv = destroy_xcdev(&xpdev->sgdma_c2h_cdev[i]);
if (rv < 0)
pr_err("Failed to destroy c2h xcdev %d error 0x%x\n",
i, rv);
}
} }
if (xpdev_flag_test(xpdev, XDF_CDEV_EVENT)) { if (xpdev_flag_test(xpdev, XDF_CDEV_EVENT)) {
for (i = 0; i < xpdev->user_max; i++) for (i = 0; i < xpdev->user_max; i++) {
destroy_xcdev(&xpdev->events_cdev[i]); rv = destroy_xcdev(&xpdev->events_cdev[i]);
if (rv < 0)
pr_err("Failed to destroy cdev event %d error 0x%x\n",
i, rv);
}
} }
/* remove control character device */ /* remove control character device */
if (xpdev_flag_test(xpdev, XDF_CDEV_CTRL)) { if (xpdev_flag_test(xpdev, XDF_CDEV_CTRL)) {
destroy_xcdev(&xpdev->ctrl_cdev); rv = destroy_xcdev(&xpdev->ctrl_cdev);
if (rv < 0)
pr_err("Failed to destroy cdev ctrl event %d error 0x%x\n",
i, rv);
} }
/* remove user character device */ /* remove user character device */
if (xpdev_flag_test(xpdev, XDF_CDEV_USER)) { if (xpdev_flag_test(xpdev, XDF_CDEV_USER)) {
destroy_xcdev(&xpdev->user_cdev); rv = destroy_xcdev(&xpdev->user_cdev);
if (rv < 0)
pr_err("Failed to destroy user cdev %d error 0x%x\n",
i, rv);
} }
if (xpdev_flag_test(xpdev, XDF_CDEV_XVC)) { if (xpdev_flag_test(xpdev, XDF_CDEV_XVC)) {
destroy_xcdev(&xpdev->xvc_cdev); rv = destroy_xcdev(&xpdev->xvc_cdev);
if (rv < 0)
pr_err("Failed to destroy xvc cdev %d error 0x%x\n",
i, rv);
} }
if (xpdev_flag_test(xpdev, XDF_CDEV_BYPASS)) { if (xpdev_flag_test(xpdev, XDF_CDEV_BYPASS)) {
/* iterate over channels */ /* iterate over channels */
for (i = 0; i < xpdev->h2c_channel_max; i++) for (i = 0; i < xpdev->h2c_channel_max; i++) {
/* remove DMA Bypass character device */ /* remove DMA Bypass character device */
destroy_xcdev(&xpdev->bypass_h2c_cdev[i]); rv = destroy_xcdev(&xpdev->bypass_h2c_cdev[i]);
for (i = 0; i < xpdev->c2h_channel_max; i++) if (rv < 0)
destroy_xcdev(&xpdev->bypass_c2h_cdev[i]); pr_err("Failed to destroy bypass h2c cdev %d error 0x%x\n",
destroy_xcdev(&xpdev->bypass_cdev_base); i, rv);
}
for (i = 0; i < xpdev->c2h_channel_max; i++) {
rv = destroy_xcdev(&xpdev->bypass_c2h_cdev[i]);
if (rv < 0)
pr_err("Failed to destroy bypass c2h %d error 0x%x\n",
i, rv);
}
rv = destroy_xcdev(&xpdev->bypass_cdev_base);
if (rv < 0)
pr_err("Failed to destroy base cdev\n");
} }
if (xpdev->major) if (xpdev->major)
unregister_chrdev_region(MKDEV(xpdev->major, XDMA_MINOR_BASE), XDMA_MINOR_COUNT); unregister_chrdev_region(
MKDEV(xpdev->major, XDMA_MINOR_BASE),
XDMA_MINOR_COUNT);
} }
int xpdev_create_interfaces(struct xdma_pci_dev *xpdev) int xpdev_create_interfaces(struct xdma_pci_dev *xpdev)
...@@ -450,7 +524,7 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev) ...@@ -450,7 +524,7 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev)
/* ??? Bypass */ /* ??? Bypass */
/* Initialize Bypass Character Device */ /* Initialize Bypass Character Device */
if (xdev->bypass_bar_idx > 0){ if (xdev->bypass_bar_idx > 0) {
for (i = 0; i < xpdev->h2c_channel_max; i++) { for (i = 0; i < xpdev->h2c_channel_max; i++) {
engine = &xdev->engine_h2c[i]; engine = &xdev->engine_h2c[i];
...@@ -515,7 +589,7 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev) ...@@ -515,7 +589,7 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev)
rv = device_create_file(&xpdev->pdev->dev, rv = device_create_file(&xpdev->pdev->dev,
&dev_attr_xdma_dev_instance); &dev_attr_xdma_dev_instance);
if (rv) { if (rv) {
pr_err("Failed to create device file \n"); pr_err("Failed to create device file\n");
goto fail; goto fail;
} }
#endif #endif
...@@ -531,16 +605,34 @@ fail: ...@@ -531,16 +605,34 @@ fail:
int xdma_cdev_init(void) int xdma_cdev_init(void)
{ {
g_xdma_class = class_create(THIS_MODULE, XDMA_NODE_NAME); g_xdma_class = class_create(THIS_MODULE, XDMA_NODE_NAME);
if (IS_ERR(g_xdma_class)) { if (IS_ERR(g_xdma_class)) {
dbg_init(XDMA_NODE_NAME ": failed to create class"); dbg_init(XDMA_NODE_NAME ": failed to create class");
return -1; return -1;
} }
/* using kmem_cache_create to enable sequential cleanup */
cdev_cache = kmem_cache_create("cdev_cache",
sizeof(struct cdev_async_io),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!cdev_cache) {
pr_info("memory allocation for cdev_cache failed. OOM\n");
return -ENOMEM;
}
xdma_threads_create(8);
return 0; return 0;
} }
void xdma_cdev_cleanup(void) void xdma_cdev_cleanup(void)
{ {
if (cdev_cache)
kmem_cache_destroy(cdev_cache);
if (g_xdma_class) if (g_xdma_class)
class_destroy(g_xdma_class); class_destroy(g_xdma_class);
xdma_threads_destroy();
} }
...@@ -35,13 +35,13 @@ int xdma_cdev_init(void); ...@@ -35,13 +35,13 @@ int xdma_cdev_init(void);
int char_open(struct inode *inode, struct file *file); int char_open(struct inode *inode, struct file *file);
int char_close(struct inode *inode, struct file *file); int char_close(struct inode *inode, struct file *file);
int xcdev_check(const char *, struct xdma_cdev *, bool); int xcdev_check(const char *fname, struct xdma_cdev *xcdev, bool check_engine);
void cdev_ctrl_init(struct xdma_cdev *xcdev); void cdev_ctrl_init(struct xdma_cdev *xcdev);
void cdev_xvc_init(struct xdma_cdev *xcdev); void cdev_xvc_init(struct xdma_cdev *xcdev);
void cdev_event_init(struct xdma_cdev *xcdev); void cdev_event_init(struct xdma_cdev *xcdev);
void cdev_sgdma_init(struct xdma_cdev *xcdev); void cdev_sgdma_init(struct xdma_cdev *xcdev);
void cdev_bypass_init(struct xdma_cdev *xcdev); void cdev_bypass_init(struct xdma_cdev *xcdev);
long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
void xpdev_destroy_interfaces(struct xdma_pci_dev *xpdev); void xpdev_destroy_interfaces(struct xdma_pci_dev *xpdev);
int xpdev_create_interfaces(struct xdma_pci_dev *xpdev); int xpdev_create_interfaces(struct xdma_pci_dev *xpdev);
......
...@@ -40,51 +40,51 @@ static char version[] = ...@@ -40,51 +40,51 @@ static char version[] =
MODULE_AUTHOR("Xilinx, Inc."); MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION(DRV_MODULE_DESC); MODULE_DESCRIPTION(DRV_MODULE_DESC);
MODULE_VERSION(DRV_MODULE_VERSION); MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_LICENSE("GPL"); MODULE_LICENSE("Dual BSD/GPL");
/* SECTION: Module global variables */ /* SECTION: Module global variables */
static int xpdev_cnt = 0; static int xpdev_cnt;
static const struct pci_device_id pci_ids[] = { static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(0x10ee, 0x903f), }, { PCI_DEVICE(0x10ee, 0x903f), },
{ PCI_DEVICE(0x10ee, 0x9038), }, { PCI_DEVICE(0x10ee, 0x9038), },
{ PCI_DEVICE(0x10ee, 0x9028), }, { PCI_DEVICE(0x10ee, 0x9028), },
{ PCI_DEVICE(0x10ee, 0x9018), }, { PCI_DEVICE(0x10ee, 0x9018), },
{ PCI_DEVICE(0x10ee, 0x9034), }, { PCI_DEVICE(0x10ee, 0x9034), },
{ PCI_DEVICE(0x10ee, 0x9024), }, { PCI_DEVICE(0x10ee, 0x9024), },
{ PCI_DEVICE(0x10ee, 0x9014), }, { PCI_DEVICE(0x10ee, 0x9014), },
{ PCI_DEVICE(0x10ee, 0x9032), }, { PCI_DEVICE(0x10ee, 0x9032), },
{ PCI_DEVICE(0x10ee, 0x9022), }, { PCI_DEVICE(0x10ee, 0x9022), },
{ PCI_DEVICE(0x10ee, 0x9012), }, { PCI_DEVICE(0x10ee, 0x9012), },
{ PCI_DEVICE(0x10ee, 0x9031), }, { PCI_DEVICE(0x10ee, 0x9031), },
{ PCI_DEVICE(0x10ee, 0x9021), }, { PCI_DEVICE(0x10ee, 0x9021), },
{ PCI_DEVICE(0x10ee, 0x9011), }, { PCI_DEVICE(0x10ee, 0x9011), },
{ PCI_DEVICE(0x10ee, 0x8011), }, { PCI_DEVICE(0x10ee, 0x8011), },
{ PCI_DEVICE(0x10ee, 0x8012), }, { PCI_DEVICE(0x10ee, 0x8012), },
{ PCI_DEVICE(0x10ee, 0x8014), }, { PCI_DEVICE(0x10ee, 0x8014), },
{ PCI_DEVICE(0x10ee, 0x8018), }, { PCI_DEVICE(0x10ee, 0x8018), },
{ PCI_DEVICE(0x10ee, 0x8021), }, { PCI_DEVICE(0x10ee, 0x8021), },
{ PCI_DEVICE(0x10ee, 0x8022), }, { PCI_DEVICE(0x10ee, 0x8022), },
{ PCI_DEVICE(0x10ee, 0x8024), }, { PCI_DEVICE(0x10ee, 0x8024), },
{ PCI_DEVICE(0x10ee, 0x8028), }, { PCI_DEVICE(0x10ee, 0x8028), },
{ PCI_DEVICE(0x10ee, 0x8031), }, { PCI_DEVICE(0x10ee, 0x8031), },
{ PCI_DEVICE(0x10ee, 0x8032), }, { PCI_DEVICE(0x10ee, 0x8032), },
{ PCI_DEVICE(0x10ee, 0x8034), }, { PCI_DEVICE(0x10ee, 0x8034), },
{ PCI_DEVICE(0x10ee, 0x8038), }, { PCI_DEVICE(0x10ee, 0x8038), },
{ PCI_DEVICE(0x10ee, 0x7011), }, { PCI_DEVICE(0x10ee, 0x7011), },
{ PCI_DEVICE(0x10ee, 0x7012), }, { PCI_DEVICE(0x10ee, 0x7012), },
{ PCI_DEVICE(0x10ee, 0x7014), }, { PCI_DEVICE(0x10ee, 0x7014), },
{ PCI_DEVICE(0x10ee, 0x7018), }, { PCI_DEVICE(0x10ee, 0x7018), },
{ PCI_DEVICE(0x10ee, 0x7021), }, { PCI_DEVICE(0x10ee, 0x7021), },
{ PCI_DEVICE(0x10ee, 0x7022), }, { PCI_DEVICE(0x10ee, 0x7022), },
{ PCI_DEVICE(0x10ee, 0x7024), }, { PCI_DEVICE(0x10ee, 0x7024), },
{ PCI_DEVICE(0x10ee, 0x7028), }, { PCI_DEVICE(0x10ee, 0x7028), },
{ PCI_DEVICE(0x10ee, 0x7031), }, { PCI_DEVICE(0x10ee, 0x7031), },
{ PCI_DEVICE(0x10ee, 0x7032), }, { PCI_DEVICE(0x10ee, 0x7032), },
{ PCI_DEVICE(0x10ee, 0x7034), }, { PCI_DEVICE(0x10ee, 0x7034), },
{ PCI_DEVICE(0x10ee, 0x7038), }, { PCI_DEVICE(0x10ee, 0x7038), },
{ PCI_DEVICE(0x10ee, 0x6828), }, { PCI_DEVICE(0x10ee, 0x6828), },
{ PCI_DEVICE(0x10ee, 0x6830), }, { PCI_DEVICE(0x10ee, 0x6830), },
...@@ -125,7 +125,7 @@ static void xpdev_free(struct xdma_pci_dev *xpdev) ...@@ -125,7 +125,7 @@ static void xpdev_free(struct xdma_pci_dev *xpdev)
static struct xdma_pci_dev *xpdev_alloc(struct pci_dev *pdev) static struct xdma_pci_dev *xpdev_alloc(struct pci_dev *pdev)
{ {
struct xdma_pci_dev *xpdev = kmalloc(sizeof(*xpdev), GFP_KERNEL); struct xdma_pci_dev *xpdev = kmalloc(sizeof(*xpdev), GFP_KERNEL);
if (!xpdev) if (!xpdev)
return NULL; return NULL;
...@@ -154,14 +154,28 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -154,14 +154,28 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
hndl = xdma_device_open(DRV_MODULE_NAME, pdev, &xpdev->user_max, hndl = xdma_device_open(DRV_MODULE_NAME, pdev, &xpdev->user_max,
&xpdev->h2c_channel_max, &xpdev->c2h_channel_max); &xpdev->h2c_channel_max, &xpdev->c2h_channel_max);
if (!hndl){ if (!hndl) {
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_out;
} }
BUG_ON(xpdev->user_max > MAX_USER_IRQ); if (xpdev->user_max > MAX_USER_IRQ) {
BUG_ON(xpdev->h2c_channel_max > XDMA_CHANNEL_NUM_MAX); pr_err("Maximum users limit reached\n");
BUG_ON(xpdev->c2h_channel_max > XDMA_CHANNEL_NUM_MAX); rv = -EINVAL;
goto err_out;
}
if (xpdev->h2c_channel_max > XDMA_CHANNEL_NUM_MAX) {
pr_err("Maximun H2C channel limit reached\n");
rv = -EINVAL;
goto err_out;
}
if (xpdev->c2h_channel_max > XDMA_CHANNEL_NUM_MAX) {
pr_err("Maximun C2H channel limit reached\n");
rv = -EINVAL;
goto err_out;
}
if (!xpdev->h2c_channel_max && !xpdev->c2h_channel_max) if (!xpdev->h2c_channel_max && !xpdev->c2h_channel_max)
pr_warn("NO engine found!\n"); pr_warn("NO engine found!\n");
...@@ -181,7 +195,12 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -181,7 +195,12 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_out;
} }
BUG_ON(hndl != xdev );
if (hndl != xdev) {
pr_err("xdev handle mismatch\n");
rv = -EINVAL;
goto err_out;
}
pr_info("%s xdma%d, pdev 0x%p, xdev 0x%p, 0x%p, usr %d, ch %d,%d.\n", pr_info("%s xdma%d, pdev 0x%p, xdev 0x%p, 0x%p, usr %d, ch %d,%d.\n",
dev_name(&pdev->dev), xdev->idx, pdev, xpdev, xdev, dev_name(&pdev->dev), xdev->idx, pdev, xpdev, xdev,
...@@ -198,7 +217,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -198,7 +217,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0; return 0;
err_out: err_out:
pr_err("pdev 0x%p, err %d.\n", pdev, rv); pr_err("pdev 0x%p, err %d.\n", pdev, rv);
xpdev_free(xpdev); xpdev_free(xpdev);
return rv; return rv;
...@@ -219,7 +238,7 @@ static void remove_one(struct pci_dev *pdev) ...@@ -219,7 +238,7 @@ static void remove_one(struct pci_dev *pdev)
pdev, xpdev, xpdev->xdev); pdev, xpdev, xpdev->xdev);
xpdev_free(xpdev); xpdev_free(xpdev);
dev_set_drvdata(&pdev->dev, NULL); dev_set_drvdata(&pdev->dev, NULL);
} }
static pci_ers_result_t xdma_error_detected(struct pci_dev *pdev, static pci_ers_result_t xdma_error_detected(struct pci_dev *pdev,
...@@ -270,7 +289,7 @@ static void xdma_error_resume(struct pci_dev *pdev) ...@@ -270,7 +289,7 @@ static void xdma_error_resume(struct pci_dev *pdev)
pci_cleanup_aer_uncorrect_error_status(pdev); pci_cleanup_aer_uncorrect_error_status(pdev);
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
static void xdma_reset_prepare(struct pci_dev *pdev) static void xdma_reset_prepare(struct pci_dev *pdev)
{ {
struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev); struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev);
...@@ -287,7 +306,7 @@ static void xdma_reset_done(struct pci_dev *pdev) ...@@ -287,7 +306,7 @@ static void xdma_reset_done(struct pci_dev *pdev)
xdma_device_online(pdev, xpdev->xdev); xdma_device_online(pdev, xpdev->xdev);
} }
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) #elif KERNEL_VERSION(3, 16, 0) <= LINUX_VERSION_CODE
static void xdma_reset_notify(struct pci_dev *pdev, bool prepare) static void xdma_reset_notify(struct pci_dev *pdev, bool prepare)
{ {
struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev); struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev);
...@@ -305,10 +324,10 @@ static const struct pci_error_handlers xdma_err_handler = { ...@@ -305,10 +324,10 @@ static const struct pci_error_handlers xdma_err_handler = {
.error_detected = xdma_error_detected, .error_detected = xdma_error_detected,
.slot_reset = xdma_slot_reset, .slot_reset = xdma_slot_reset,
.resume = xdma_error_resume, .resume = xdma_error_resume,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
.reset_prepare = xdma_reset_prepare, .reset_prepare = xdma_reset_prepare,
.reset_done = xdma_reset_done, .reset_done = xdma_reset_done,
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) #elif KERNEL_VERSION(3, 16, 0) <= LINUX_VERSION_CODE
.reset_notify = xdma_reset_notify, .reset_notify = xdma_reset_notify,
#endif #endif
}; };
...@@ -324,8 +343,6 @@ static struct pci_driver pci_driver = { ...@@ -324,8 +343,6 @@ static struct pci_driver pci_driver = {
static int __init xdma_mod_init(void) static int __init xdma_mod_init(void)
{ {
int rv; int rv;
extern unsigned int desc_blen_max;
extern unsigned int sgdma_timeout;
pr_info("%s", version); pr_info("%s", version);
......
...@@ -44,14 +44,19 @@ ...@@ -44,14 +44,19 @@
#include <linux/splice.h> #include <linux/splice.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/spinlock_types.h>
#include "libxdma.h" #include "libxdma.h"
#include "xdma_thread.h"
#define MAGIC_ENGINE 0xEEEEEEEEUL #define MAGIC_ENGINE 0xEEEEEEEEUL
#define MAGIC_DEVICE 0xDDDDDDDDUL #define MAGIC_DEVICE 0xDDDDDDDDUL
#define MAGIC_CHAR 0xCCCCCCCCUL #define MAGIC_CHAR 0xCCCCCCCCUL
#define MAGIC_BITSTREAM 0xBBBBBBBBUL #define MAGIC_BITSTREAM 0xBBBBBBBBUL
extern unsigned int desc_blen_max;
extern unsigned int sgdma_timeout;
struct xdma_cdev { struct xdma_cdev {
unsigned long magic; /* structure ID for sanity checks */ unsigned long magic; /* structure ID for sanity checks */
struct xdma_pci_dev *xpdev; struct xdma_pci_dev *xpdev;
...@@ -94,12 +99,19 @@ struct xdma_pci_dev { ...@@ -94,12 +99,19 @@ struct xdma_pci_dev {
void *data; void *data;
}; };
struct xdma_io_cb { struct cdev_async_io {
void __user *buf; struct kiocb *iocb;
size_t len; struct xdma_io_cb* cb;
unsigned int pages_nr; bool write;
struct sg_table sgt; bool cancel;
struct page **pages; int cmpl_cnt;
int req_cnt;
spinlock_t lock;
struct work_struct wrk_itm;
struct cdev_async_io *next;
ssize_t res;
ssize_t res2;
int err_cnt;
}; };
#endif /* ifndef __XDMA_MODULE_H__ */ #endif /* ifndef __XDMA_MODULE_H__ */
/*
* This file is part of the Xilinx DMA IP Core driver for Linux
*
* Copyright (c) 2017-present, Xilinx, Inc.
* All rights reserved.
*
* This source code is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include "xdma_thread.h"
#include <linux/kernel.h>
#include <linux/slab.h>
/* ********************* global variables *********************************** */
static struct xdma_kthread *cs_threads;
static unsigned int thread_cnt;
/* ********************* static function definitions ************************ */
static int xdma_thread_cmpl_status_pend(struct list_head *work_item)
{
struct xdma_engine *engine = list_entry(work_item, struct xdma_engine,
cmplthp_list);
int pend = 0;
unsigned long flags;
spin_lock_irqsave(&engine->lock, flags);
pend = !list_empty(&engine->transfer_list);
spin_unlock_irqrestore(&engine->lock, flags);
return pend;
}
static int xdma_thread_cmpl_status_proc(struct list_head *work_item)
{
struct xdma_engine *engine;
struct xdma_transfer * transfer;
engine = list_entry(work_item, struct xdma_engine, cmplthp_list);
transfer = list_entry(engine->transfer_list.next, struct xdma_transfer,
entry);
engine_service_poll(engine, transfer->desc_num);
return 0;
}
static inline int xthread_work_pending(struct xdma_kthread *thp)
{
struct list_head *work_item, *next;
/* any work items assigned to this thread? */
if (list_empty(&thp->work_list))
return 0;
/* any work item has pending work to do? */
list_for_each_safe(work_item, next, &thp->work_list) {
if (thp->fpending && thp->fpending(work_item))
return 1;
}
return 0;
}
static inline void xthread_reschedule(struct xdma_kthread *thp)
{
if (thp->timeout) {
pr_debug_thread("%s rescheduling for %u seconds",
thp->name, thp->timeout);
wait_event_interruptible_timeout(thp->waitq, thp->schedule,
msecs_to_jiffies(thp->timeout));
} else {
pr_debug_thread("%s rescheduling", thp->name);
wait_event_interruptible(thp->waitq, thp->schedule);
}
}
static int xthread_main(void *data)
{
struct xdma_kthread *thp = (struct xdma_kthread *)data;
pr_debug_thread("%s UP.\n", thp->name);
disallow_signal(SIGPIPE);
if (thp->finit)
thp->finit(thp);
while (!kthread_should_stop()) {
struct list_head *work_item, *next;
pr_debug_thread("%s interruptible\n", thp->name);
/* any work to do? */
lock_thread(thp);
if (!xthread_work_pending(thp)) {
unlock_thread(thp);
xthread_reschedule(thp);
lock_thread(thp);
}
thp->schedule = 0;
if (thp->work_cnt) {
pr_debug_thread("%s processing %u work items\n",
thp->name, thp->work_cnt);
/* do work */
list_for_each_safe(work_item, next, &thp->work_list) {
thp->fproc(work_item);
}
}
unlock_thread(thp);
schedule();
}
pr_debug_thread("%s, work done.\n", thp->name);
if (thp->fdone)
thp->fdone(thp);
pr_debug_thread("%s, exit.\n", thp->name);
return 0;
}
int xdma_kthread_start(struct xdma_kthread *thp, char *name, int id)
{
int len;
if (thp->task) {
pr_warn("kthread %s task already running?\n", thp->name);
return -EINVAL;
}
len = snprintf(thp->name, sizeof(thp->name), "%s%d", name, id);
if (len < 0)
return -EINVAL;
thp->id = id;
spin_lock_init(&thp->lock);
INIT_LIST_HEAD(&thp->work_list);
init_waitqueue_head(&thp->waitq);
thp->task = kthread_create_on_node(xthread_main, (void *)thp,
cpu_to_node(thp->cpu), "%s", thp->name);
if (IS_ERR(thp->task)) {
pr_err("kthread %s, create task failed: 0x%lx\n",
thp->name, (unsigned long)IS_ERR(thp->task));
thp->task = NULL;
return -EFAULT;
}
kthread_bind(thp->task, thp->cpu);
pr_debug_thread("kthread 0x%p, %s, cpu %u, task 0x%p.\n",
thp, thp->name, thp->cpu, thp->task);
wake_up_process(thp->task);
return 0;
}
int xdma_kthread_stop(struct xdma_kthread *thp)
{
int rv;
if (!thp->task) {
pr_debug_thread("kthread %s, already stopped.\n", thp->name);
return 0;
}
thp->schedule = 1;
rv = kthread_stop(thp->task);
if (rv < 0) {
pr_warn("kthread %s, stop err %d.\n", thp->name, rv);
return rv;
}
pr_debug_thread("kthread %s, 0x%p, stopped.\n", thp->name, thp->task);
thp->task = NULL;
return 0;
}
void xdma_thread_remove_work(struct xdma_engine *engine)
{
struct xdma_kthread *cmpl_thread;
unsigned long flags;
spin_lock_irqsave(&engine->lock, flags);
cmpl_thread = engine->cmplthp;
engine->cmplthp = NULL;
// pr_debug("%s removing from thread %s, %u.\n",
// descq->conf.name, cmpl_thread ? cmpl_thread->name : "?",
// cpu_idx);
spin_unlock_irqrestore(&engine->lock, flags);
#if 0
if (cpu_idx < cpu_count) {
spin_lock(&qcnt_lock);
per_cpu_qcnt[cpu_idx]--;
spin_unlock(&qcnt_lock);
}
#endif
if (cmpl_thread) {
lock_thread(cmpl_thread);
list_del(&engine->cmplthp_list);
cmpl_thread->work_cnt--;
unlock_thread(cmpl_thread);
}
}
void xdma_thread_add_work(struct xdma_engine *engine)
{
struct xdma_kthread *thp = cs_threads;
unsigned int v = 0;
int i, idx = thread_cnt;
unsigned long flags;
/* Polled mode only */
for (i = 0; i < thread_cnt; i++, thp++) {
lock_thread(thp);
if (idx == thread_cnt) {
v = thp->work_cnt;
idx = i;
} else if (!thp->work_cnt) {
idx = i;
unlock_thread(thp);
break;
} else if (thp->work_cnt < v)
idx = i;
unlock_thread(thp);
}
thp = cs_threads + idx;
lock_thread(thp);
list_add_tail(&engine->cmplthp_list, &thp->work_list);
engine->intr_work_cpu = idx;
thp->work_cnt++;
unlock_thread(thp);
pr_info("%s 0x%p assigned to cmpl status thread %s,%u.\n",
engine->name, engine, thp->name, thp->work_cnt);
spin_lock_irqsave(&engine->lock, flags);
engine->cmplthp = thp;
spin_unlock_irqrestore(&engine->lock, flags);
}
int xdma_threads_create(unsigned int num_threads)
{
struct xdma_kthread *thp;
int i;
int rv;
if (thread_cnt) {
pr_warn("threads already created!");
return 0;
}
pr_info("xdma_threads_create\n");
thread_cnt = num_threads;
cs_threads = kzalloc(thread_cnt * sizeof(struct xdma_kthread),
GFP_KERNEL);
if (!cs_threads)
return -ENOMEM;
/* N dma writeback monitoring threads */
thp = cs_threads;
for (i = 0; i < thread_cnt; i++, thp++) {
thp->cpu = i;
thp->timeout = 0;
thp->fproc = xdma_thread_cmpl_status_proc;
thp->fpending = xdma_thread_cmpl_status_pend;
rv = xdma_kthread_start(thp, "cmpl_status_th", i);
if (rv < 0)
goto cleanup_threads;
}
return 0;
cleanup_threads:
kfree(cs_threads);
cs_threads = NULL;
thread_cnt = 0;
return rv;
}
void xdma_threads_destroy(void)
{
int i;
struct xdma_kthread *thp;
if (!thread_cnt)
return;
/* N dma writeback monitoring threads */
thp = cs_threads;
for (i = 0; i < thread_cnt; i++, thp++)
if (thp->fproc)
xdma_kthread_stop(thp);
kfree(cs_threads);
cs_threads = NULL;
thread_cnt = 0;
}
/*
* This file is part of the Xilinx DMA IP Core driver for Linux
*
* Copyright (c) 2017-present, Xilinx, Inc.
* All rights reserved.
*
* This source code is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
#ifndef __XDMA_KTHREAD_H__
#define __XDMA_KTHREAD_H__
/**
* @file
* @brief This file contains the declarations for xdma kernel threads
*
*/
#include <linux/version.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/cpuset.h>
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include "libxdma.h"
#ifdef DEBUG_THREADS
#define lock_thread(thp) \
do { \
pr_debug("locking thp %s ...\n", (thp)->name); \
spin_lock(&(thp)->lock); \
} while (0)
#define unlock_thread(thp) \
do { \
pr_debug("unlock thp %s ...\n", (thp)->name); \
spin_unlock(&(thp)->lock); \
} while (0)
#define xdma_kthread_wakeup(thp) \
do { \
pr_info("signaling thp %s ...\n", (thp)->name); \
wake_up_process((thp)->task); \
} while (0)
#define pr_debug_thread(fmt, ...) pr_info(fmt, __VA_ARGS__)
#else
/** lock thread macro */
#define lock_thread(thp) spin_lock(&(thp)->lock)
/** un lock thread macro */
#define unlock_thread(thp) spin_unlock(&(thp)->lock)
#define xdma_kthread_wakeup(thp) \
do { \
thp->schedule = 1; \
wake_up_interruptible(&thp->waitq); \
} while (0)
/** pr_debug_thread */
#define pr_debug_thread(fmt, ...)
#endif
/**
* @struct - xdma_kthread
* @brief xdma thread book keeping parameters
*/
struct xdma_kthread {
/** thread lock*/
spinlock_t lock;
/** name of the thread */
char name[16];
/** cpu number for which the thread associated with */
unsigned short cpu;
/** thread id */
unsigned short id;
/** thread sleep timeout value */
unsigned int timeout;
/** flags for thread */
unsigned long flag;
/** thread wait queue */
wait_queue_head_t waitq;
/* flag to indicate scheduling of thread */
unsigned int schedule;
/** kernel task structure associated with thread*/
struct task_struct *task;
/** thread work list count */
unsigned int work_cnt;
/** thread work list count */
struct list_head work_list;
/** thread initialization handler */
int (*finit)(struct xdma_kthread *);
/** thread pending handler */
int (*fpending)(struct list_head *);
/** thread peocessing handler */
int (*fproc)(struct list_head *);
/** thread done handler */
int (*fdone)(struct xdma_kthread *);
};
/*****************************************************************************/
/**
* xdma_threads_create() - create xdma threads
*********/
int xdma_threads_create(unsigned int num_threads);
/*****************************************************************************/
/**
* xdma_threads_destroy() - destroy all the xdma threads created
* during system initialization
*
* @return none
*****************************************************************************/
void xdma_threads_destroy(void);
/*****************************************************************************/
/**
* xdma_thread_remove_work() - handler to remove the attached work thread
*
* @param[in] engine: pointer to xdma_engine
*
* @return none
*****************************************************************************/
void xdma_thread_remove_work(struct xdma_engine *engine);
/*****************************************************************************/
/**
* xdma_thread_add_work() - handler to add a work thread
*
* @param[in] engine: pointer to xdma_engine
*
* @return none
*****************************************************************************/
void xdma_thread_add_work(struct xdma_engine *engine);
#endif /* #ifndef __XDMA_KTHREAD_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment