Commit 514986c5 authored by Julien's avatar Julien

XDMA: fix build for Centos and RHEL

Change the checks for kernel versions to include checks for RHEL
versions in case they are present.
Add HAS_MMIOWB, ACCESS_OK_2_ARGS, HAS_WAKE_UP, and HAS_WAKE_UP_ONE
and use them
parent 3b8295f6
......@@ -56,7 +56,7 @@ MODULE_PARM_DESC(desc_blen_max,
#define XDMA_PERF_NUM_DESC 128
/* Kernel version adaptative code */
#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP_ONE
/* since 4.18, using simple wait queues is not recommended
* except for realtime constraint (see swait.h comments)
* and will likely be removed in future kernel versions
......@@ -64,7 +64,7 @@ MODULE_PARM_DESC(desc_blen_max,
#define xlx_wake_up swake_up_one
#define xlx_wait_event_interruptible_timeout \
swait_event_interruptible_timeout_exclusive
#elif KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#elif HAS_SWAKE_UP
#define xlx_wake_up swake_up
#define xlx_wait_event_interruptible_timeout \
swait_event_interruptible_timeout
......@@ -696,7 +696,7 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine)
dbg_tfr("ioread32(0x%p) (dummy read flushes writes).\n",
&engine->regs->status);
#if KERNEL_VERSION(5, 1, 0) >= LINUX_VERSION_CODE
#if HAS_MMIOWB
mmiowb();
#endif
......@@ -3152,7 +3152,7 @@ static int transfer_init(struct xdma_engine *engine,
/* lock the engine state */
spin_lock_irqsave(&engine->lock, flags);
/* initialize wait queue */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
#if HAS_SWAKE_UP
init_swait_queue_head(&xfer->wq);
#else
init_waitqueue_head(&xfer->wq);
......@@ -3229,7 +3229,7 @@ static int transfer_init_cyclic(struct xdma_engine *engine,
memset(xfer, 0, sizeof(*xfer));
/* initialize wait queue */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP
init_swait_queue_head(&xfer->wq);
#else
init_waitqueue_head(&xfer->wq);
......@@ -4004,7 +4004,7 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
transfer->cyclic = 1;
/* initialize wait queue */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP
init_swait_queue_head(&transfer->wq);
#else
init_waitqueue_head(&transfer->wq);
......@@ -4084,7 +4084,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
spin_lock_init(&engine->lock);
mutex_init(&engine->desc_lock);
INIT_LIST_HEAD(&engine->transfer_list);
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP
init_swait_queue_head(&engine->shutdown_wq);
init_swait_queue_head(&engine->xdma_perf_wq);
#else
......@@ -4098,7 +4098,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
spin_lock_init(&engine->lock);
mutex_init(&engine->desc_lock);
INIT_LIST_HEAD(&engine->transfer_list);
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP
init_swait_queue_head(&engine->shutdown_wq);
init_swait_queue_head(&engine->xdma_perf_wq);
#else
......
......@@ -31,9 +31,32 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
/* Add compatibility checking for RHEL versions */
#if defined(RHEL_RELEASE_CODE)
# define ACCESS_OK_2_ARGS (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0))
#else
# define ACCESS_OK_2_ARGS (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
#endif
#if defined(RHEL_RELEASE_CODE)
# define HAS_MMIOWB (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8, 0))
#else
# define HAS_MMIOWB (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 0))
#endif
#if defined(RHEL_RELEASE_CODE)
# define HAS_SWAKE_UP_ONE (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0))
# define HAS_SWAKE_UP (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0))
#else
# define HAS_SWAKE_UP_ONE (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
# define HAS_SWAKE_UP (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
#endif
#if HAS_SWAKE_UP
#include <linux/swait.h>
#endif
/*
* if the config bar is fixed, the driver does not neeed to search through
* all of the bars
......@@ -417,7 +440,7 @@ struct xdma_transfer {
int desc_num; /* number of descriptors in transfer */
int desc_index; /* index for 1st desc. in transfer */
enum dma_data_direction dir;
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP
struct swait_queue_head wq;
#else
wait_queue_head_t wq; /* wait queue for transfer completion */
......@@ -503,7 +526,7 @@ struct xdma_engine {
dma_addr_t poll_mode_bus; /* bus addr for descriptor writeback */
/* Members associated with interrupt mode support */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP
struct swait_queue_head shutdown_wq;
#else
wait_queue_head_t shutdown_wq; /* wait queue for shutdown sync */
......@@ -522,7 +545,7 @@ struct xdma_engine {
/* for performance test support */
struct xdma_performance_ioctl *xdma_perf; /* perf test control */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP
struct swait_queue_head xdma_perf_wq;
#else
wait_queue_head_t xdma_perf_wq; /* Perf test sync */
......
......@@ -24,7 +24,7 @@
#include "xdma_cdev.h"
#include "cdev_ctrl.h"
#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE
#if ACCESS_OK_2_ARGS
#define xlx_access_ok(X, Y, Z) access_ok(Y, Z)
#else
#define xlx_access_ok(X, Y, Z) access_ok(X, Y, Z)
......
......@@ -611,7 +611,7 @@ static int ioctl_do_perf_start(struct xdma_engine *engine, unsigned long arg)
enable_perf(engine);
dbg_perf("transfer_size = %d\n", engine->xdma_perf->transfer_size);
/* initialize wait queue */
#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
#if HAS_SWAKE_UP
init_swait_queue_head(&engine->xdma_perf_wq);
#else
init_waitqueue_head(&engine->xdma_perf_wq);
......
......@@ -213,7 +213,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
pr_info("copy back tdo_buf failed: %d/%u.\n", rv, total_bytes);
unlock:
#if KERNEL_VERSION(5, 1, 0) >= LINUX_VERSION_CODE
#if HAS_MMIOWB
mmiowb();
#endif
spin_unlock(&xcdev->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment