Commit 5c7b8b10 authored by Karen Xie's avatar Karen Xie

XDMA: fix code format/style warnings

parent 017b4bd9
This diff is collapsed.
......@@ -57,9 +57,9 @@
* interrupts per engine, rad2_vul.sv:237
* .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]),
*/
#define XDMA_ENG_IRQ_NUM (1)
#define MAX_EXTRA_ADJ 0x3F
#define RX_STATUS_EOP (1)
#define XDMA_ENG_IRQ_NUM (1)
#define MAX_EXTRA_ADJ (0x3F)
#define RX_STATUS_EOP (1)
/* Target internal components on XDMA control BAR */
#define XDMA_OFS_INT_CTRL (0x2000UL)
......@@ -410,12 +410,12 @@ struct sw_desc {
struct xdma_transfer {
struct list_head entry; /* queue of non-completed transfers */
struct xdma_desc *desc_virt; /* virt addr of the 1st descriptor */
struct xdma_result *res_virt; /* virt addr of result for c2h streaming */
dma_addr_t res_bus; /* bus addr for result descriptors */
struct xdma_result *res_virt; /* virt addr of result, c2h streaming */
dma_addr_t res_bus; /* bus addr for result descriptors */
dma_addr_t desc_bus; /* bus addr of the first descriptor */
int desc_adjacent; /* adjacent descriptors at desc_bus */
int desc_num; /* number of descriptors in transfer */
int desc_index; /* index for first descriptor in transfer */
int desc_index; /* index for 1st desc. in transfer */
enum dma_data_direction dir;
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
struct swait_queue_head wq;
......@@ -438,7 +438,9 @@ struct xdma_request_cb {
unsigned int total_len;
u64 ep_addr;
struct xdma_transfer tfer[2]; /* Use two transfers in case single request needs to be split */
/* Use two transfers in case single request needs to be split */
struct xdma_transfer tfer[2];
struct xdma_io_cb *cb;
unsigned int sw_desc_idx;
......@@ -484,8 +486,9 @@ struct xdma_engine {
dma_addr_t cyclic_result_bus; /* bus addr for transfer */
struct xdma_request_cb *cyclic_req;
struct sg_table cyclic_sgt;
u8 *perf_buf_virt;
dma_addr_t perf_buf_bus; /* bus address */
u8 *perf_buf_virt;
dma_addr_t perf_buf_bus; /* bus address */
u8 eop_found; /* used only for cyclic(rx:c2h) */
int eop_count;
int rx_tail; /* follows the HW */
......
......@@ -25,9 +25,9 @@
#include "cdev_ctrl.h"
#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE
#define xlx_access_ok(X,Y,Z) access_ok(Y,Z)
#define xlx_access_ok(X, Y, Z) access_ok(Y, Z)
#else
#define xlx_access_ok(X,Y,Z) access_ok(X,Y,Z)
#define xlx_access_ok(X, Y, Z) access_ok(X, Y, Z)
#endif
/*
......
......@@ -57,16 +57,15 @@ static void async_io_handler(unsigned long cb_hndl, int err)
int lock_stat;
int rv;
if (NULL == caio) {
if (caio == NULL) {
pr_err("Invalid work struct\n");
return;
}
xcdev = (struct xdma_cdev *)caio->iocb->ki_filp->private_data;
rv = xcdev_check(__func__, xcdev, 1);
if (rv < 0)
return;
if (rv < 0)
return;
/* Safeguarding for cancel requests */
lock_stat = spin_trylock(&caio->lock);
......@@ -80,13 +79,13 @@ static void async_io_handler(unsigned long cb_hndl, int err)
goto skip_tran;
}
engine = xcdev->engine;
xdev = xcdev->xdev;
if (!err)
numbytes = xdma_xfer_completion((void *)cb, xdev, engine->channel, cb->write, cb->ep_addr, &cb->sgt,
0, sgdma_timeout * 1000);
numbytes = xdma_xfer_completion((void *)cb, xdev,
engine->channel, cb->write, cb->ep_addr,
&cb->sgt, 0, sgdma_timeout * 1000);
char_sgdma_unmap_user_buf(cb, cb->write);
......@@ -97,9 +96,7 @@ static void async_io_handler(unsigned long cb_hndl, int err)
caio->cmpl_cnt++;
caio->res += numbytes;
if (caio->cmpl_cnt == caio->req_cnt)
{
if (caio->cmpl_cnt == caio->req_cnt) {
res = caio->res;
res2 = caio->res2;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
......@@ -112,13 +109,10 @@ skip_tran:
kmem_cache_free(cdev_cache, caio);
kfree(cb);
return;
}
else
{
spin_unlock(&caio->lock);
return;
}
spin_unlock(&caio->lock);
return;
}
skip_dev_lock:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
caio->iocb->ki_complete(caio->iocb, numbytes, -EBUSY);
......@@ -408,15 +402,16 @@ static ssize_t char_sgdma_write(struct file *file, const char __user *buf,
}
static ssize_t char_sgdma_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
size_t count, loff_t *pos)
{
return char_sgdma_read_write(file, buf, count, pos, 0);
}
static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
unsigned long count, loff_t pos)
unsigned long count, loff_t pos)
{
struct xdma_cdev *xcdev = (struct xdma_cdev *)iocb->ki_filp->private_data;
struct xdma_cdev *xcdev = (struct xdma_cdev *)
iocb->ki_filp->private_data;
struct cdev_async_io *caio;
struct xdma_engine *engine;
struct xdma_dev *xdev;
......@@ -425,11 +420,11 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
if (!xcdev) {
pr_info("file 0x%p, xcdev NULL, %llu, pos %llu, W %d.\n",
iocb->ki_filp, (u64)count, (u64)pos, 1);
iocb->ki_filp, (u64)count, (u64)pos, 1);
return -EINVAL;
}
engine = xcdev->engine;
engine = xcdev->engine;
xdev = xcdev->xdev;
if (engine->dir != DMA_TO_DEVICE) {
......@@ -460,21 +455,23 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
caio->cb[i].write = true;
caio->cb[i].private = caio;
caio->cb[i].io_done = async_io_handler;
rv = check_transfer_align(engine, caio->cb[i].buf, caio->cb[i].len, pos, 1);
rv = check_transfer_align(engine, caio->cb[i].buf,
caio->cb[i].len, pos, 1);
if (rv) {
pr_info("Invalid transfer alignment detected\n");
kmem_cache_free(cdev_cache, caio);
return rv;
return rv;
}
rv = char_sgdma_map_user_buf_to_sgl(&caio->cb[i], true);
if (rv < 0) {
if (rv < 0)
return rv;
}
rv = xdma_xfer_submit_nowait((void *)&caio->cb[i], xdev, engine->channel, caio->cb[i].write, caio->cb[i].ep_addr, &caio->cb[i].sgt,
0, sgdma_timeout * 1000);
}
rv = xdma_xfer_submit_nowait((void *)&caio->cb[i], xdev,
engine->channel, caio->cb[i].write,
caio->cb[i].ep_addr, &caio->cb[i].sgt,
0, sgdma_timeout * 1000);
}
if (engine->cmplthp)
xdma_kthread_wakeup(engine->cmplthp);
......@@ -484,10 +481,11 @@ static ssize_t cdev_aio_write(struct kiocb *iocb, const struct iovec *io,
static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
unsigned long count, loff_t pos)
unsigned long count, loff_t pos)
{
struct xdma_cdev *xcdev = (struct xdma_cdev *)iocb->ki_filp->private_data;
struct xdma_cdev *xcdev = (struct xdma_cdev *)
iocb->ki_filp->private_data;
struct cdev_async_io *caio;
struct xdma_engine *engine;
struct xdma_dev *xdev;
......@@ -496,7 +494,7 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
if (!xcdev) {
pr_info("file 0x%p, xcdev NULL, %llu, pos %llu, W %d.\n",
iocb->ki_filp, (u64)count, (u64)pos, 1);
iocb->ki_filp, (u64)count, (u64)pos, 1);
return -EINVAL;
}
......@@ -532,7 +530,8 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
caio->cb[i].private = caio;
caio->cb[i].io_done = async_io_handler;
rv = check_transfer_align(engine, caio->cb[i].buf, caio->cb[i].len, pos, 1);
rv = check_transfer_align(engine, caio->cb[i].buf,
caio->cb[i].len, pos, 1);
if (rv) {
pr_info("Invalid transfer alignment detected\n");
kmem_cache_free(cdev_cache, caio);
......@@ -540,12 +539,13 @@ static ssize_t cdev_aio_read(struct kiocb *iocb, const struct iovec *io,
}
rv = char_sgdma_map_user_buf_to_sgl(&caio->cb[i], true);
if (rv < 0) {
if (rv < 0)
return rv;
}
rv = xdma_xfer_submit_nowait((void *)&caio->cb[i], xdev, engine->channel, caio->cb[i].write, caio->cb[i].ep_addr, &caio->cb[i].sgt,
0, sgdma_timeout * 1000);
rv = xdma_xfer_submit_nowait((void *)&caio->cb[i], xdev,
engine->channel, caio->cb[i].write,
caio->cb[i].ep_addr, &caio->cb[i].sgt,
0, sgdma_timeout * 1000);
}
if (engine->cmplthp)
......
......@@ -136,7 +136,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
total_bits = xvc_obj.length;
total_bytes = (total_bits + 7) >> 3;
buffer = (unsigned char *)kmalloc(total_bytes * 3, GFP_KERNEL);
buffer = kmalloc(total_bytes * 3, GFP_KERNEL);
if (!buffer) {
pr_info("OOM %u, op 0x%x, len %u bits, %u bytes.\n",
3 * total_bytes, opcode, total_bits, total_bytes);
......
......@@ -607,21 +607,20 @@ int xdma_cdev_init(void)
g_xdma_class = class_create(THIS_MODULE, XDMA_NODE_NAME);
if (IS_ERR(g_xdma_class)) {
dbg_init(XDMA_NODE_NAME ": failed to create class");
return -1;
return -EINVAL;
}
/* using kmem_cache_create to enable sequential cleanup */
cdev_cache = kmem_cache_create("cdev_cache",
sizeof(struct cdev_async_io),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!cdev_cache) {
pr_info("memory allocation for cdev_cache failed. OOM\n");
return -ENOMEM;
}
/* using kmem_cache_create to enable sequential cleanup */
cdev_cache = kmem_cache_create("cdev_cache",
sizeof(struct cdev_async_io), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!cdev_cache) {
pr_info("memory allocation for cdev_cache failed. OOM\n");
return -ENOMEM;
}
xdma_threads_create(8);
xdma_threads_create(num_online_cpus());
return 0;
}
......
......@@ -32,7 +32,6 @@
#define DRV_MODULE_NAME "xdma"
#define DRV_MODULE_DESC "Xilinx XDMA Reference Driver"
#define DRV_MODULE_RELDATE "Feb. 2018"
static char version[] =
DRV_MODULE_DESC " " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
......
......@@ -101,7 +101,7 @@ struct xdma_pci_dev {
struct cdev_async_io {
struct kiocb *iocb;
struct xdma_io_cb* cb;
struct xdma_io_cb *cb;
bool write;
bool cancel;
int cmpl_cnt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment