Commit 00b6d04a authored by Sujatha Banoth's avatar Sujatha Banoth

2020.2 QDMA DPDK Driver Release

2020.2 QDMA DPDK Driver Release
parent 5f12009e
RELEASE: 2020.1 Patch RELEASE: 2020.2
===================== ===============
This release is based on DPDK v19.11 and contains QDMA poll mode driver and This release is based on DPDK v19.11 and contains QDMA poll mode driver and
QDMA test application. This release is validated on QDMA4.0 2020.1 Patch based QDMA test application.
example design.
This release is validated for VCU1525 and U200 devices on QDMA4.0 2020.2 based example design
and QDMA3.1 2020.2 based example design.
This release includes a patch file for dpdk-pktgen v19.12.0 that extends This release includes a patch file for dpdk-pktgen v19.12.0 that extends
dpdk-pktgen application to handle packets with packet sizes more than 1518 bytes dpdk-pktgen application to handle packets with packet sizes more than 1518 bytes
and it disables the packet size classification logic in dpdk-pktgen to remove and it disables the packet size classification logic in dpdk-pktgen to remove
application overhead in performance measurement. application overhead in performance measurement.
This patch is used for performance testing with dpdk-pktgen application.
This patch is used for performance testing with dpdk-pktgen application.
The driver is validated against dpdk-pktgen and testpmd applications for API compliance. The driver is validated against dpdk-pktgen and testpmd applications for API compliance.
SUPPORTED FEATURES: SUPPORTED FEATURES:
...@@ -75,6 +77,14 @@ SUPPORTED FEATURES: ...@@ -75,6 +77,14 @@ SUPPORTED FEATURES:
- Verified QDMA DPDK software with IOVA=VA mode by enabling hugepage allocation matching support in DPDK - Verified QDMA DPDK software with IOVA=VA mode by enabling hugepage allocation matching support in DPDK
(i.e. by specifying the --match-allocations command-line switch to the EAL). (i.e. by specifying the --match-allocations command-line switch to the EAL).
2020.2 Updates
--------------
- Added support for detailed register dump
- Added support for post processing HW error messages
- Added support for Debug mode and Internal only mode
- Resolved the issue related to mbuf packet length
- Fixed VF FMAP programming of qmax issue by setting qmax to the actual user configured total queue number
KNOWN ISSUES: KNOWN ISSUES:
============= =============
- Function Level Reset(FLR) of PF device when VFs are attached to this PF results in mailbox communication failure - Function Level Reset(FLR) of PF device when VFs are attached to this PF results in mailbox communication failure
......
...@@ -83,7 +83,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_rxtx.c ...@@ -83,7 +83,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_xdebug.c SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_xdebug.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_user.c SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_user.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/eqdma_soft_access/eqdma_soft_access.c SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/eqdma_soft_access/eqdma_soft_access.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_s80_hard_access/qdma_s80_hard_access.c SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_s80_hard_access/qdma_s80_hard_access.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_s80_hard_access/qdma_s80_hard_reg_dump.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_soft_access/qdma_soft_access.c SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_soft_access/qdma_soft_access.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_list.c SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_list.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_resource_mgmt.c SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_resource_mgmt.c
......
...@@ -44,9 +44,6 @@ ...@@ -44,9 +44,6 @@
#include <rte_memzone.h> #include <rte_memzone.h>
#include <linux/pci.h> #include <linux/pci.h>
#include "qdma_user.h" #include "qdma_user.h"
#include "qdma_soft_reg.h"
#include "eqdma_soft_reg.h"
#include "qdma_s80_hard_reg.h"
#include "qdma_resource_mgmt.h" #include "qdma_resource_mgmt.h"
#include "qdma_mbox.h" #include "qdma_mbox.h"
#include "rte_pmd_qdma.h" #include "rte_pmd_qdma.h"
...@@ -84,6 +81,8 @@ ...@@ -84,6 +81,8 @@
#define RESET_TIMEOUT (60000) #define RESET_TIMEOUT (60000)
#define SHUTDOWN_TIMEOUT (60000) #define SHUTDOWN_TIMEOUT (60000)
#define QDMA_MAX_BUFLEN (2048 * 10)
#ifdef spin_lock_init #ifdef spin_lock_init
#undef spin_lock_init #undef spin_lock_init
#endif #endif
......
This source diff could not be displayed because it is too large. You can view the blob instead.
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef EQDMA_ACCESS_H_ #ifndef __EQDMA_SOFT_ACCESS_H_
#define EQDMA_ACCESS_H_ #define __EQDMA_SOFT_ACCESS_H_
#include "qdma_access_common.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include "qdma_platform.h"
/** /**
* enum qdma_error_idx - qdma errors * enum qdma_error_idx - qdma errors
*/ */
...@@ -191,8 +207,18 @@ struct eqdma_hw_err_info { ...@@ -191,8 +207,18 @@ struct eqdma_hw_err_info {
uint32_t stat_reg_addr; uint32_t stat_reg_addr;
uint32_t leaf_err_mask; uint32_t leaf_err_mask;
uint32_t global_err_mask; uint32_t global_err_mask;
void (*eqdma_hw_err_process)(void *dev_hndl);
}; };
#define EQDMA_OFFSET_VF_VERSION 0x5014
#define EQDMA_OFFSET_VF_USER_BAR 0x5018
#define EQDMA_OFFSET_MBOX_BASE_PF 0x22400
#define EQDMA_OFFSET_MBOX_BASE_VF 0x5000
#define EQDMA_COMPL_CTXT_BADDR_HIGH_H_MASK GENMASK_ULL(63, 38)
#define EQDMA_COMPL_CTXT_BADDR_HIGH_L_MASK GENMASK_ULL(37, 6)
#define EQDMA_COMPL_CTXT_BADDR_LOW_MASK GENMASK_ULL(5, 2)
int eqdma_init_ctxt_memory(void *dev_hndl); int eqdma_init_ctxt_memory(void *dev_hndl);
...@@ -259,19 +285,38 @@ int eqdma_get_user_bar(void *dev_hndl, uint8_t is_vf, ...@@ -259,19 +285,38 @@ int eqdma_get_user_bar(void *dev_hndl, uint8_t is_vf,
uint8_t func_id, uint8_t *user_bar); uint8_t func_id, uint8_t *user_bar);
int eqdma_dump_config_reg_list(void *dev_hndl, int eqdma_dump_config_reg_list(void *dev_hndl,
uint32_t num_regs, uint32_t total_regs,
struct qdma_reg_data *reg_list, struct qdma_reg_data *reg_list,
char *buf, uint32_t buflen); char *buf, uint32_t buflen);
int eqdma_read_reg_list(void *dev_hndl, uint8_t is_vf, int eqdma_read_reg_list(void *dev_hndl, uint8_t is_vf,
uint16_t reg_rd_slot, uint16_t reg_rd_group,
uint16_t *total_regs, uint16_t *total_regs,
struct qdma_reg_data *reg_list); struct qdma_reg_data *reg_list);
int eqdma_set_default_global_csr(void *dev_hndl); int eqdma_set_default_global_csr(void *dev_hndl);
int eqdma_global_csr_conf(void *dev_hndl, uint8_t index, uint8_t count,
uint32_t *csr_val,
enum qdma_global_csr_type csr_type,
enum qdma_hw_access_type access_type);
int eqdma_global_writeback_interval_conf(void *dev_hndl,
enum qdma_wrb_interval *wb_int,
enum qdma_hw_access_type access_type);
int eqdma_mm_channel_conf(void *dev_hndl, uint8_t channel, uint8_t is_c2h,
uint8_t enable);
int eqdma_dump_reg_info(void *dev_hndl, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen);
uint32_t eqdma_get_config_num_regs(void);
struct xreg_info *eqdma_get_config_regs(void);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif /* EQDMA_ACCESS_H_ */ #endif /* __EQDMA_SOFT_ACCESS_H_ */
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef EQDMA_SOFT_REG_H_ #ifndef __EQDMA_SOFT_REG_H
#define EQDMA_SOFT_REG_H_ #define __EQDMA_SOFT_REG_H
#include "qdma_soft_reg.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
/* H2C Throttle settings */ #include "qdma_platform.h"
#define EQDMA_H2C_THROT_DATA_THRESH 0x5000
#define EQDMA_THROT_EN_DATA 1
#define EQDMA_THROT_EN_REQ 0
#define EQDMA_H2C_THROT_REQ_THRESH 0xC0
/** Software Context */
#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_H_MASK GENMASK_ULL(63, 53)
#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_M_MASK GENMASK_ULL(52, 21)
#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_L_MASK GENMASK_ULL(20, 0)
#define EQDMA_SW_CTXT_PASID_GET_H_MASK GENMASK(21, 12)
#define EQDMA_SW_CTXT_PASID_GET_L_MASK GENMASK(11, 0)
#define EQDMA_SW_CTXT_W7_VIRTIO_DSC_BASE_H_MASK GENMASK(10, 0)
#define EQDMA_SW_CTXT_W6_VIRTIO_DSC_BASE_M_MASK GENMASK(31, 0)
#define EQDMA_SW_CTXT_W5_VIRTIO_DSC_BASE_L_MASK GENMASK(31, 11)
#define EQDMA_SW_CTXT_W5_PASID_EN_MASK BIT(10)
#define EQDMA_SW_CTXT_W5_PASID_H_MASK GENMASK(9, 0)
#define EQDMA_SW_CTXT_W4_PASID_L_MASK GENMASK(31, 20)
#define EQDMA_SW_CTXT_W4_HOST_ID_MASK GENMASK(19, 16)
#define EQDMA_SW_CTXT_W4_IRQ_BYP_MASK BIT(15)
#define EQDMA_SW_CTXT_W4_PACK_BYP_OUT_MASK BIT(14)
#define EQDMA_SW_CTXT_W4_VIRTIO_EN_MASK BIT(13)
#define EQDMA_SW_CTXT_W4_DIS_INTR_VF_MASK BIT(12)
/** Completion Context */
#define EQDMA_CMPL_CTXT_PASID_GET_H_MASK GENMASK(21, 9)
#define EQDMA_CMPL_CTXT_PASID_GET_L_MASK GENMASK(8, 0)
#define EQDMA_COMPL_CTXT_W5_SH_CMPT_MASK BIT(19)
#define EQDMA_COMPL_CTXT_W5_VIO_EOP_MASK BIT(18)
#define EQDMA_COMPL_CTXT_W5_BADDR4_LOW_MASK GENMASK(17, 14)
#define EQDMA_COMPL_CTXT_W5_PASID_EN_MASK BIT(13)
#define EQDMA_COMPL_CTXT_W5_PASID_H_MASK GENMASK(12, 0)
#define EQDMA_COMPL_CTXT_W4_PASID_L_MASK GENMASK(31, 23)
#define EQDMA_COMPL_CTXT_W4_HOST_ID_MASK GENMASK(22, 19)
#define EQDMA_COMPL_CTXT_W4_DIR_C2H_MASK BIT(18)
#define EQDMA_COMPL_CTXT_W4_VIO_MASK BIT(17)
#define EQDMA_COMPL_CTXT_W4_DIS_INTR_VF_MASK BIT(16)
/** Interrupt Context */
#define EQDMA_INTR_CTXT_PASID_GET_H_MASK GENMASK(21, 9)
#define EQDMA_INTR_CTXT_PASID_GET_L_MASK GENMASK(8, 0)
#define EQDMA_INTR_CTXT_W3_FUNC_ID_MASK GENMASK(29, 18)
#define EQDMA_INTR_CTXT_W3_PASID_EN_MASK BIT(13)
#define EQDMA_INTR_CTXT_W3_PASID_H_MASK GENMASK(12, 0)
#define EQDMA_INTR_CTXT_W2_PASID_L_MASK GENMASK(31, 23)
#define EQDMA_INTR_CTXT_W2_HOST_ID_MASK GENMASK(22, 19)
/** Prefetch Context */
#define EQDMA_PFTCH_CTXT_W0_NUM_PFTCH_MASK GENMASK(18, 9)
#define EQDMA_PFTCH_CTXT_W0_VAR_DESC_MASK BIT(8)
/* ------------------------- Hardware Errors ------------------------------ */
#define EQDMA_TOTAL_LEAF_ERROR_AGGREGATORS 9
#define EQDMA_OFFSET_GLBL_ERR_INT 0XB04 #ifdef CHAR_BIT
#define EQDMA_GLBL_ERR_FUNC_MASK GENMASK(11, 0) #undef CHAR_BIT
#define EQDMA_GLBL_ERR_VEC_MASK GENMASK(22, 12) #endif
#define EQDMA_GLBL_ERR_ARM_MASK BIT(24) #define CHAR_BIT 8
#define EQDMA_GLBL_ERR_COAL_MASK BIT(23)
#define EQDMA_GLBL_ERR_DIS_INTR_ON_VF_MASK BIT(25)
#define EQDMA_GLBL_ERR_HOST_ID_MASK BIT(25)
#define EQDMA_OFFSET_GLBL_ERR_STAT 0X248
#define EQDMA_OFFSET_GLBL_ERR_MASK 0X24C
#define EQDMA_GLBL_ERR_RAM_SBE_MASK BIT(0)
#define EQDMA_GLBL_ERR_RAM_DBE_MASK BIT(1)
#define EQDMA_GLBL_ERR_DSC_MASK BIT(2)
#define EQDMA_GLBL_ERR_TRQ_MASK BIT(3)
#define EQDMA_GLBL_ERR_H2C_MM_0_MASK BIT(4)
#define EQDMA_GLBL_ERR_H2C_MM_1_MASK BIT(5)
#define EQDMA_GLBL_ERR_C2H_MM_0_MASK BIT(6)
#define EQDMA_GLBL_ERR_C2H_MM_1_MASK BIT(7)
#define EQDMA_GLBL_ERR_ST_C2H_MASK BIT(8)
#define EQDMA_GLBL_ERR_BDG_MASK BIT(15)
#define EQDMA_GLBL_ERR_IND_CTXT_CMD_MASK GENMASK(14, 9)
#define EQDMA_GLBL_ERR_ST_H2C_MASK BIT(16)
#define EQDMA_OFFSET_C2H_ERR_STAT 0XAF0
#define EQDMA_OFFSET_C2H_ERR_MASK 0XAF4
#define EQDMA_C2H_ERR_MTY_MISMATCH_MASK BIT(0)
#define EQDMA_C2H_ERR_LEN_MISMATCH_MASK BIT(1)
#define EQDMA_C2H_ERR_SH_CMPT_DSC_MASK BIT(2)
#define EQDMA_C2H_ERR_QID_MISMATCH_MASK BIT(3)
#define EQDMA_C2H_ERR_DESC_RSP_ERR_MASK BIT(4)
#define EQDMA_C2H_ERR_ENG_WPL_DATA_PAR_ERR_MASK BIT(6)
#define EQDMA_C2H_ERR_MSI_INT_FAIL_MASK BIT(7)
#define EQDMA_C2H_ERR_ERR_DESC_CNT_MASK BIT(9)
#define EQDMA_C2H_ERR_PORTID_CTXT_MISMATCH_MASK BIT(10)
#define EQDMA_C2H_ERR_CMPT_INV_Q_ERR_MASK BIT(12)
#define EQDMA_C2H_ERR_CMPT_QFULL_ERR_MASK BIT(13)
#define EQDMA_C2H_ERR_CMPT_CIDX_ERR_MASK BIT(14)
#define EQDMA_C2H_ERR_CMPT_PRTY_ERR_MASK BIT(15)
#define EQDMA_C2H_ERR_AVL_RING_DSC_MASK BIT(16)
#define EQDMA_C2H_ERR_HDR_ECC_UNC_MASK BIT(17)
#define EQDMA_C2H_ERR_HDR_ECC_COR_MASK BIT(18)
#define EQDMA_C2H_ERR_ALL_MASK 0X3F6DF
#define EQDMA_OFFSET_C2H_FATAL_ERR_STAT 0XAF8 #ifdef BIT
#define EQDMA_OFFSET_C2H_FATAL_ERR_MASK 0XAFC #undef BIT
#define EQDMA_C2H_FATAL_ERR_MTY_MISMATCH_MASK BIT(0) #endif
#define EQDMA_C2H_FATAL_ERR_LEN_MISMATCH_MASK BIT(1) #define BIT(n) (1u << (n))
#define EQDMA_C2H_FATAL_ERR_QID_MISMATCH_MASK BIT(3)
#define EQDMA_C2H_FATAL_ERR_TIMER_FIFO_RAM_RDBE_MASK BIT(4)
#define EQDMA_C2H_FATAL_ERR_PFCH_II_RAM_RDBE_MASK BIT(8)
#define EQDMA_C2H_FATAL_ERR_CMPT_CTXT_RAM_RDBE_MASK BIT(9)
#define EQDMA_C2H_FATAL_ERR_PFCH_CTXT_RAM_RDBE_MASK BIT(10)
#define EQDMA_C2H_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE_MASK BIT(11)
#define EQDMA_C2H_FATAL_ERR_INT_CTXT_RAM_RDBE_MASK BIT(12)
#define EQDMA_C2H_FATAL_ERR_CMPT_COAL_DATA_RAM_RDBE_MASK BIT(14)
#define EQDMA_C2H_FATAL_ERR_CMPT_FIFO_RAM_RDBE_MASK BIT(15)
#define EQDMA_C2H_FATAL_ERR_QID_FIFO_RAM_RDBE_MASK BIT(16)
#define EQDMA_C2H_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE_MASK BIT(17)
#define EQDMA_C2H_FATAL_ERR_WPL_DATA_PAR_MASK BIT(18)
#define EQDMA_C2H_FATAL_ERR_AVL_RING_FIFO_RAM_RDBE_MASK BIT(19)
#define EQDMA_C2H_FATAL_ERR_HDR_ECC_UNC_MASK BIT(20)
#define EQDMA_C2H_FATAL_ERR_ALL_MASK 0X1FDF1B
#define EQDMA_OFFSET_H2C_ERR_STAT 0XE00 #ifdef BITS_PER_BYTE
#define EQDMA_OFFSET_H2C_ERR_MASK 0XE04 #undef BITS_PER_BYTE
#define EQDMA_H2C_ERR_ZERO_LEN_DESC_MASK BIT(0) #endif
#define EQDMA_H2C_ERR_SDI_MRKR_REQ_MOP_MASK BIT(1) #define BITS_PER_BYTE CHAR_BIT
#define EQDMA_H2C_ERR_NO_DMA_DSC_MASK BIT(2)
#define EQDMA_H2C_ERR_SBE_MASK BIT(3)
#define EQDMA_H2C_ERR_DBE_MASK BIT(4)
#define EQDMA_H2C_ERR_PAR_ERR_MASK BIT(5)
#define EQDMA_H2C_ERR_ALL_MASK 0X3F
#define EQDMA_OFFSET_GLBL_DSC_ERR_STAT 0X254 #ifdef BITS_PER_LONG
#define EQDMA_OFFSET_GLBL_DSC_ERR_MASK 0X258 #undef BITS_PER_LONG
#define EQDMA_GLBL_DSC_ERR_POISON_MASK BIT(1) #endif
#define EQDMA_GLBL_DSC_ERR_UR_CA_MASK BIT(2) #define BITS_PER_LONG (sizeof(uint32_t) * BITS_PER_BYTE)
#define EQDMA_GLBL_DSC_ERR_BCNT_MASK BIT(3)
#define EQDMA_GLBL_DSC_ERR_PARAM_MASK BIT(4)
#define EQDMA_GLBL_DSC_ERR_ADDR_MASK BIT(5)
#define EQDMA_GLBL_DSC_ERR_TAG_MASK BIT(6)
#define EQDMA_GLBL_DSC_ERR_FLR_MASK BIT(8)
#define EQDMA_GLBL_DSC_ERR_TIMEOUT_MASK BIT(9)
#define EQDMA_GLBL_DSC_ERR_DAT_POISON_MASK BIT(16)
#define EQDMA_GLBL_DSC_ERR_FLR_CANCEL_MASK BIT(19)
#define EQDMA_GLBL_DSC_ERR_DMA_MASK BIT(20)
#define EQDMA_GLBL_DSC_ERR_DSC_MASK BIT(21)
#define EQDMA_GLBL_DSC_ERR_RQ_CANCEL_MASK BIT(22)
#define EQDMA_GLBL_DSC_ERR_DBE_MASK BIT(23)
#define EQDMA_GLBL_DSC_ERR_SBE_MASK BIT(24)
#define EQDMA_GLBL_DSC_ERR_ALL_MASK 0X1F9037E
#define EQDMA_OFFSET_GLBL_TRQ_ERR_STAT 0X264 #ifdef BITS_PER_LONG_LONG
#define EQDMA_OFFSET_GLBL_TRQ_ERR_MASK 0X268 #undef BITS_PER_LONG_LONG
#define EQDMA_GLBL_TRQ_ERR_CSR_UNMAPPED_MASK BIT(0) #endif
#define EQDMA_GLBL_TRQ_ERR_VF_ACCESS_MASK BIT(1) #define BITS_PER_LONG_LONG (sizeof(uint64_t) * BITS_PER_BYTE)
#define EQDMA_GLBL_TRQ_ERR_TCP_CSR_MASK BIT(3)
#define EQDMA_GLBL_TRQ_ERR_QSPC_UNMAPPED_MASK BIT(4)
#define EQDMA_GLBL_TRQ_ERR_QID_RANGE_MASK BIT(5)
#define EQDMA_GLBL_TRQ_ERR_TCP_QSPC_TIMEOUT_MASK BIT(7)
#define EQDMA_GLBL_TRQ_ERR_ALL_MASK 0XB3
#define EQDMA_OFFSET_RAM_SBE_1_STAT 0XE4 #ifdef GENMASK
#define EQDMA_OFFSET_RAM_SBE_1_MASK 0XE0 #undef GENMASK
#define EQDMA_SBE_1_ERR_RC_RRQ_EVEN_RAM_MASK BIT(0) #endif
#define EQDMA_SBE_1_ERR_TAG_ODD_RAM_MASK BIT(1) #define GENMASK(h, l) \
#define EQDMA_SBE_1_ERR_TAG_EVEN_RAM_MASK BIT(2) ((0xFFFFFFFF << (l)) & (0xFFFFFFFF >> (BITS_PER_LONG - 1 - (h))))
#define EQDMA_SBE_1_ERR_PFCH_CTXT_CAM_RAM_0_MASK BIT(3)
#define EQDMA_SBE_1_ERR_PFCH_CTXT_CAM_RAM_1_MASK BIT(4)
#define EQDMA_SBE_1_ERR_ALL_MASK 0X1F
#define EQDMA_OFFSET_RAM_DBE_1_STAT 0XEC #ifdef GENMASK_ULL
#define EQDMA_OFFSET_RAM_DBE_1_MASK 0XE8 #undef GENMASK_ULL
#define EQDMA_DBE_1_ERR_RC_RRQ_EVEN_RAM_MASK BIT(0) #endif
#define EQDMA_DBE_1_ERR_TAG_ODD_RAM_MASK BIT(1) #define GENMASK_ULL(h, l) \
#define EQDMA_DBE_1_ERR_TAG_EVEN_RAM_MASK BIT(2) ((0xFFFFFFFFFFFFFFFF << (l)) & \
#define EQDMA_DBE_1_ERR_PFCH_CTXT_CAM_RAM_0_MASK BIT(3) (0xFFFFFFFFFFFFFFFF >> (BITS_PER_LONG_LONG - 1 - (h))))
#define EQDMA_DBE_1_ERR_PFCH_CTXT_CAM_RAM_1_MASK BIT(4)
#define EQDMA_DBE_1_ERR_ALL_MASK 0X1F
#define EQDMA_OFFSET_RAM_SBE_STAT 0XF4 #define DEBGFS_LINE_SZ (81)
#define EQDMA_OFFSET_RAM_SBE_MASK 0XF0
#define EQDMA_SBE_ERR_MI_H2C0_DAT_MASK BIT(0)
#define EQDMA_SBE_ERR_MI_H2C1_DAT_MASK BIT(1)
#define EQDMA_SBE_ERR_MI_H2C2_DAT_MASK BIT(2)
#define EQDMA_SBE_ERR_MI_H2C3_DAT_MASK BIT(3)
#define EQDMA_SBE_ERR_MI_C2H0_DAT_MASK BIT(4)
#define EQDMA_SBE_ERR_MI_C2H1_DAT_MASK BIT(5)
#define EQDMA_SBE_ERR_MI_C2H2_DAT_MASK BIT(6)
#define EQDMA_SBE_ERR_MI_C2H3_DAT_MASK BIT(7)
#define EQDMA_SBE_ERR_H2C_RD_BRG_DAT_MASK BIT(8)
#define EQDMA_SBE_ERR_H2C_WR_BRG_DAT_MASK BIT(9)
#define EQDMA_SBE_ERR_C2H_RD_BRG_DAT_MASK BIT(10)
#define EQDMA_SBE_ERR_C2H_WR_BRG_DAT_MASK BIT(11)
#define EQDMA_SBE_ERR_FUNC_MAP_MASK BIT(12)
#define EQDMA_SBE_ERR_DSC_HW_CTXT_MASK BIT(13)
#define EQDMA_SBE_ERR_DSC_CRD_RCV_MASK BIT(14)
#define EQDMA_SBE_ERR_DSC_SW_CTXT_MASK BIT(15)
#define EQDMA_SBE_ERR_DSC_CPLI_MASK BIT(16)
#define EQDMA_SBE_ERR_DSC_CPLD_MASK BIT(17)
#define EQDMA_SBE_ERR_MI_TL_SLV_FIFO_RAM_MASK BIT(18)
#define EQDMA_SBE_ERR_TIMER_FIFO_RAM_MASK GENMASK(22, 19)
#define EQDMA_SBE_ERR_QID_FIFO_RAM_MASK BIT(23)
#define EQDMA_SBE_ERR_WRB_COAL_DATA_RAM_MASK BIT(24)
#define EQDMA_SBE_ERR_INT_CTXT_RAM_MASK BIT(25)
#define EQDMA_SBE_ERR_DESC_REQ_FIFO_RAM_MASK BIT(26)
#define EQDMA_SBE_ERR_PFCH_CTXT_RAM_MASK BIT(27)
#define EQDMA_SBE_ERR_WRB_CTXT_RAM_MASK BIT(28)
#define EQDMA_SBE_ERR_PFCH_LL_RAM_MASK BIT(29)
#define EQDMA_SBE_ERR_PEND_FIFO_RAM_MASK BIT(30)
#define EQDMA_SBE_ERR_RC_RRQ_ODD_RAM_MASK BIT(31)
#define EQDMA_SBE_ERR_ALL_MASK 0XFFFFFFFF
#define EQDMA_OFFSET_RAM_DBE_STAT 0XFC #ifdef ARRAY_SIZE
#define EQDMA_OFFSET_RAM_DBE_MASK 0XF8 #undef ARRAY_SIZE
#define EQDMA_DBE_ERR_MI_H2C0_DAT_MASK BIT(0) #endif
#define EQDMA_DBE_ERR_MI_H2C1_DAT_MASK BIT(1) #define ARRAY_SIZE(arr) (sizeof(arr) / \
#define EQDMA_DBE_ERR_MI_H2C2_DAT_MASK BIT(2) sizeof(arr[0]))
#define EQDMA_DBE_ERR_MI_H2C3_DAT_MASK BIT(3)
#define EQDMA_DBE_ERR_MI_C2H0_DAT_MASK BIT(4)
#define EQDMA_DBE_ERR_MI_C2H1_DAT_MASK BIT(5)
#define EQDMA_DBE_ERR_MI_C2H2_DAT_MASK BIT(6)
#define EQDMA_DBE_ERR_MI_C2H3_DAT_MASK BIT(7)
#define EQDMA_DBE_ERR_H2C_RD_BRG_DAT_MASK BIT(8)
#define EQDMA_DBE_ERR_H2C_WR_BRG_DAT_MASK BIT(9)
#define EQDMA_DBE_ERR_C2H_RD_BRG_DAT_MASK BIT(10)
#define EQDMA_DBE_ERR_C2H_WR_BRG_DAT_MASK BIT(11)
#define EQDMA_DBE_ERR_FUNC_MAP_MASK BIT(12)
#define EQDMA_DBE_ERR_DSC_HW_CTXT_MASK BIT(13)
#define EQDMA_DBE_ERR_DSC_CRD_RCV_MASK BIT(14)
#define EQDMA_DBE_ERR_DSC_SW_CTXT_MASK BIT(15)
#define EQDMA_DBE_ERR_DSC_CPLI_MASK BIT(16)
#define EQDMA_DBE_ERR_DSC_CPLD_MASK BIT(17)
#define EQDMA_DBE_ERR_MI_TL_SLV_FIFO_RAM_MASK BIT(18)
#define EQDMA_DBE_ERR_TIMER_FIFO_RAM_MASK GENMASK(22, 19)
#define EQDMA_DBE_ERR_QID_FIFO_RAM_MASK BIT(23)
#define EQDMA_DBE_ERR_WRB_COAL_DATA_RAM_MASK BIT(24)
#define EQDMA_DBE_ERR_INT_CTXT_RAM_MASK BIT(25)
#define EQDMA_DBE_ERR_DESC_REQ_FIFO_RAM_MASK BIT(26)
#define EQDMA_DBE_ERR_PFCH_CTXT_RAM_MASK BIT(27)
#define EQDMA_DBE_ERR_WRB_CTXT_RAM_MASK BIT(28)
#define EQDMA_DBE_ERR_PFCH_LL_RAM_MASK BIT(29)
#define EQDMA_DBE_ERR_PEND_FIFO_RAM_MASK BIT(30)
#define EQDMA_DBE_ERR_RC_RRQ_ODD_RAM_MASK BIT(31)
#define EQDMA_DBE_ERR_ALL_MASK 0XFFFFFFFF
#define EQDMA_OFFSET_VF_VERSION 0x5014
#define EQDMA_OFFSET_VF_USER_BAR 0x5018
#define EQDMA_OFFSET_MBOX_BASE_VF 0x5000 uint32_t eqdma_config_num_regs_get(void);
#define EQDMA_OFFSET_MBOX_BASE_PF 0x22400 struct xreg_info *eqdma_config_regs_get(void);
#define EQDMA_CFG_BLK_IDENTIFIER_ADDR 0x00
#define CFG_BLK_IDENTIFIER_MASK GENMASK(31, 20)
#define CFG_BLK_IDENTIFIER_1_MASK GENMASK(19, 16)
#define CFG_BLK_IDENTIFIER_RSVD_1_MASK GENMASK(15, 8)
#define CFG_BLK_IDENTIFIER_VERSION_MASK GENMASK(7, 0)
#define EQDMA_CFG_BLK_PCIE_MAX_PLD_SIZE_ADDR 0x08
#define CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_1_MASK GENMASK(31, 7)
#define CFG_BLK_PCIE_MAX_PLD_SIZE_PROG_MASK GENMASK(6, 4)
#define CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_2_MASK BIT(3)
#define CFG_BLK_PCIE_MAX_PLD_SIZE_ISSUED_MASK GENMASK(2, 0)
#define EQDMA_CFG_BLK_PCIE_MAX_READ_REQ_SIZE_ADDR 0x0C
#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_1_MASK GENMASK(31, 7)
#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_PROG_MASK GENMASK(6, 4)
#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_2_MASK BIT(3)
#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_ISSUED_MASK GENMASK(2, 0)
#define EQDMA_CFG_BLK_SYSTEM_ID_ADDR 0x10
#define CFG_BLK_SYSTEM_ID_RSVD_1_MASK GENMASK(31, 17)
#define CFG_BLK_SYSTEM_ID_INST_TYPE_MASK BIT(16)
#define CFG_BLK_SYSTEM_ID_MASK GENMASK(15, 0)
#define EQDMA_CFG_BLK_MSIX_ENABLE_ADDR 0x014
#define CFG_BLK_MSIX_ENABLE_MASK GENMASK(31, 0)
#define EQDMA_CFG_PCIE_DATA_WIDTH_ADDR 0x18
#define CFG_PCIE_DATA_WIDTH_RSVD_1_MASK GENMASK(31, 3)
#define CFG_PCIE_DATA_WIDTH_DATAPATH_MASK GENMASK(2, 0)
#define EQDMA_CFG_PCIE_CTL_ADDR 0x1C
#define CFG_PCIE_CTL_RSVD_1_MASK GENMASK(31, 18)
#define CFG_PCIE_CTL_MGMT_AXIL_CTRL_MASK GENMASK(17, 16)
#define CFG_PCIE_CTL_RSVD_2_MASK GENMASK(15, 2)
#define CFG_PCIE_CTL_RRQ_DISABLE_MASK BIT(1)
#define CFG_PCIE_CTL_RELAXED_ORDERING_MASK BIT(0)
#define EQDMA_CFG_BLK_MSI_ENABLE_ADDR 0x20
#define CFG_BLK_MSI_ENABLE_MASK GENMASK(31, 0)
#define EQDMA_CFG_AXI_USER_MAX_PLD_SIZE_ADDR 0x40
#define CFG_AXI_USER_MAX_PLD_SIZE_RSVD_1_MASK GENMASK(31, 7)
#define CFG_AXI_USER_MAX_PLD_SIZE_ISSUED_MASK GENMASK(6, 4)
#define CFG_AXI_USER_MAX_PLD_SIZE_RSVD_2_MASK BIT(3)
#define CFG_AXI_USER_MAX_PLD_SIZE_PROG_MASK GENMASK(2, 0)
#define EQDMA_CFG_AXI_USER_MAX_READ_REQ_SIZE_ADDR 0x44
#define CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_1_MASK GENMASK(31, 7)
#define CFG_AXI_USER_MAX_READ_REQ_SIZE_USISSUED_MASK GENMASK(6, 4)
#define CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_2_MASK BIT(3)
#define CFG_AXI_USER_MAX_READ_REQ_SIZE_USPROG_MASK GENMASK(2, 0)
#define EQDMA_CFG_BLK_MISC_CTL_ADDR 0x4C
#define CFG_BLK_MISC_CTL_RSVD_1_MASK GENMASK(31, 24)
#define CFG_BLK_MISC_CTL_10B_TAG_EN_MASK BIT(23)
#define CFG_BLK_MISC_CTL_RSVD_2_MASK BIT(22)
#define CFG_BLK_MISC_CTL_AXI_WBK_MASK BIT(21)
#define CFG_BLK_MISC_CTL_AXI_DSC_MASK BIT(20)
#define CFG_BLK_MISC_CTL_NUM_TAG_MASK GENMASK(19, 8)
#define CFG_BLK_MISC_CTL_RSVD_3_MASK GENMASK(7, 5)
#define CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK GENMASK(4, 0)
#define EQDMA_CFG_PL_CRED_CTL_ADDR 0x68
#define CFG_PL_CRED_CTL_RSVD_1_MASK GENMASK(31, 5)
#define CFG_PL_CRED_CTL_SLAVE_CRD_RLS_MASK BIT(4)
#define CFG_PL_CRED_CTL_RSVD_2_MASK GENMASK(3, 1)
#define CFG_PL_CRED_CTL_MASTER_CRD_RST_MASK BIT(0)
#define EQDMA_CFG_BLK_SCRATCH_ADDR 0x80
#define CFG_BLK_SCRATCH_MASK GENMASK(31, 0)
#define EQDMA_CFG_GIC_ADDR 0xA0
#define CFG_GIC_RSVD_1_MASK GENMASK(31, 1)
#define CFG_GIC_GIC_IRQ_MASK BIT(0)
#define EQDMA_RAM_SBE_MSK_1_A_ADDR 0xE0
#define RAM_SBE_MSK_1_A_MASK GENMASK(31, 0)
#define EQDMA_RAM_SBE_STS_1_A_ADDR 0xE4
#define RAM_SBE_STS_1_A_RSVD_MASK GENMASK(31, 5)
#define RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK BIT(4)
#define RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK BIT(3)
#define RAM_SBE_STS_1_A_TAG_EVEN_RAM_MASK BIT(2)
#define RAM_SBE_STS_1_A_TAG_ODD_RAM_MASK BIT(1)
#define RAM_SBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK BIT(0)
#define EQDMA_RAM_DBE_MSK_1_A_ADDR 0xE8
#define RAM_DBE_MSK_1_A_MASK GENMASK(31, 0)
#define EQDMA_RAM_DBE_STS_1_A_ADDR 0xEC
#define RAM_DBE_STS_1_A_RSVD_MASK GENMASK(31, 5)
#define RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK BIT(4)
#define RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK BIT(3)
#define RAM_DBE_STS_1_A_TAG_EVEN_RAM_MASK BIT(2)
#define RAM_DBE_STS_1_A_TAG_ODD_RAM_MASK BIT(1)
#define RAM_DBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK BIT(0)
#define EQDMA_RAM_SBE_MSK_A_ADDR 0xF0
#define RAM_SBE_MSK_A_MASK GENMASK(31, 0)
#define EQDMA_RAM_SBE_STS_A_ADDR 0xF4
#define RAM_SBE_STS_A_RC_RRQ_ODD_RAM_MASK BIT(31)
#define RAM_SBE_STS_A_PEND_FIFO_RAM_MASK BIT(30)
#define RAM_SBE_STS_A_PFCH_LL_RAM_MASK BIT(29)
#define RAM_SBE_STS_A_WRB_CTXT_RAM_MASK BIT(28)
#define RAM_SBE_STS_A_PFCH_CTXT_RAM_MASK BIT(27)
#define RAM_SBE_STS_A_DESC_REQ_FIFO_RAM_MASK BIT(26)
#define RAM_SBE_STS_A_INT_CTXT_RAM_MASK BIT(25)
#define RAM_SBE_STS_A_WRB_COAL_DATA_RAM_MASK BIT(24)
#define RAM_SBE_STS_A_QID_FIFO_RAM_MASK BIT(23)
#define RAM_SBE_STS_A_TIMER_FIFO_RAM_MASK GENMASK(22, 19)
#define RAM_SBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK BIT(18)
#define RAM_SBE_STS_A_DSC_CPLD_MASK BIT(17)
#define RAM_SBE_STS_A_DSC_CPLI_MASK BIT(16)
#define RAM_SBE_STS_A_DSC_SW_CTXT_MASK BIT(15)
#define RAM_SBE_STS_A_DSC_CRD_RCV_MASK BIT(14)
#define RAM_SBE_STS_A_DSC_HW_CTXT_MASK BIT(13)
#define RAM_SBE_STS_A_FUNC_MAP_MASK BIT(12)
#define RAM_SBE_STS_A_C2H_WR_BRG_DAT_MASK BIT(11)
#define RAM_SBE_STS_A_C2H_RD_BRG_DAT_MASK BIT(10)
#define RAM_SBE_STS_A_H2C_WR_BRG_DAT_MASK BIT(9)
#define RAM_SBE_STS_A_H2C_RD_BRG_DAT_MASK BIT(8)
#define RAM_SBE_STS_A_MI_C2H3_DAT_MASK BIT(7)
#define RAM_SBE_STS_A_MI_C2H2_DAT_MASK BIT(6)
#define RAM_SBE_STS_A_MI_C2H1_DAT_MASK BIT(5)
#define RAM_SBE_STS_A_MI_C2H0_DAT_MASK BIT(4)
#define RAM_SBE_STS_A_MI_H2C3_DAT_MASK BIT(3)
#define RAM_SBE_STS_A_MI_H2C2_DAT_MASK BIT(2)
#define RAM_SBE_STS_A_MI_H2C1_DAT_MASK BIT(1)
#define RAM_SBE_STS_A_MI_H2C0_DAT_MASK BIT(0)
#define EQDMA_RAM_DBE_MSK_A_ADDR 0xF8
#define RAM_DBE_MSK_A_MASK GENMASK(31, 0)
#define EQDMA_RAM_DBE_STS_A_ADDR 0xFC
#define RAM_DBE_STS_A_RC_RRQ_ODD_RAM_MASK BIT(31)
#define RAM_DBE_STS_A_PEND_FIFO_RAM_MASK BIT(30)
#define RAM_DBE_STS_A_PFCH_LL_RAM_MASK BIT(29)
#define RAM_DBE_STS_A_WRB_CTXT_RAM_MASK BIT(28)
#define RAM_DBE_STS_A_PFCH_CTXT_RAM_MASK BIT(27)
#define RAM_DBE_STS_A_DESC_REQ_FIFO_RAM_MASK BIT(26)
#define RAM_DBE_STS_A_INT_CTXT_RAM_MASK BIT(25)
#define RAM_DBE_STS_A_WRB_COAL_DATA_RAM_MASK BIT(24)
#define RAM_DBE_STS_A_QID_FIFO_RAM_MASK BIT(23)
#define RAM_DBE_STS_A_TIMER_FIFO_RAM_MASK GENMASK(22, 19)
#define RAM_DBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK BIT(18)
#define RAM_DBE_STS_A_DSC_CPLD_MASK BIT(17)
#define RAM_DBE_STS_A_DSC_CPLI_MASK BIT(16)
#define RAM_DBE_STS_A_DSC_SW_CTXT_MASK BIT(15)
#define RAM_DBE_STS_A_DSC_CRD_RCV_MASK BIT(14)
#define RAM_DBE_STS_A_DSC_HW_CTXT_MASK BIT(13)
#define RAM_DBE_STS_A_FUNC_MAP_MASK BIT(12)
#define RAM_DBE_STS_A_C2H_WR_BRG_DAT_MASK BIT(11)
#define RAM_DBE_STS_A_C2H_RD_BRG_DAT_MASK BIT(10)
#define RAM_DBE_STS_A_H2C_WR_BRG_DAT_MASK BIT(9)
#define RAM_DBE_STS_A_H2C_RD_BRG_DAT_MASK BIT(8)
#define RAM_DBE_STS_A_MI_C2H3_DAT_MASK BIT(7)
#define RAM_DBE_STS_A_MI_C2H2_DAT_MASK BIT(6)
#define RAM_DBE_STS_A_MI_C2H1_DAT_MASK BIT(5)
#define RAM_DBE_STS_A_MI_C2H0_DAT_MASK BIT(4)
#define RAM_DBE_STS_A_MI_H2C3_DAT_MASK BIT(3)
#define RAM_DBE_STS_A_MI_H2C2_DAT_MASK BIT(2)
#define RAM_DBE_STS_A_MI_H2C1_DAT_MASK BIT(1)
#define RAM_DBE_STS_A_MI_H2C0_DAT_MASK BIT(0)
#define EQDMA_GLBL2_IDENTIFIER_ADDR 0x100
#define GLBL2_IDENTIFIER_MASK GENMASK(31, 8)
#define GLBL2_IDENTIFIER_VERSION_MASK GENMASK(7, 0)
#define EQDMA_GLBL2_CHANNEL_INST_ADDR 0x114
#define GLBL2_CHANNEL_INST_RSVD_1_MASK GENMASK(31, 18)
#define GLBL2_CHANNEL_INST_C2H_ST_MASK BIT(17)
#define GLBL2_CHANNEL_INST_H2C_ST_MASK BIT(16)
#define GLBL2_CHANNEL_INST_RSVD_2_MASK GENMASK(15, 12)
#define GLBL2_CHANNEL_INST_C2H_ENG_MASK GENMASK(11, 8)
#define GLBL2_CHANNEL_INST_RSVD_3_MASK GENMASK(7, 4)
#define GLBL2_CHANNEL_INST_H2C_ENG_MASK GENMASK(3, 0)
#define EQDMA_GLBL2_CHANNEL_MDMA_ADDR 0x118
#define GLBL2_CHANNEL_MDMA_RSVD_1_MASK GENMASK(31, 18)
#define GLBL2_CHANNEL_MDMA_C2H_ST_MASK BIT(17)
#define GLBL2_CHANNEL_MDMA_H2C_ST_MASK BIT(16)
#define GLBL2_CHANNEL_MDMA_RSVD_2_MASK GENMASK(15, 12)
#define GLBL2_CHANNEL_MDMA_C2H_ENG_MASK GENMASK(11, 8)
#define GLBL2_CHANNEL_MDMA_RSVD_3_MASK GENMASK(7, 4)
#define GLBL2_CHANNEL_MDMA_H2C_ENG_MASK GENMASK(3, 0)
#define EQDMA_GLBL2_CHANNEL_STRM_ADDR 0x11C
#define GLBL2_CHANNEL_STRM_RSVD_1_MASK GENMASK(31, 18)
#define GLBL2_CHANNEL_STRM_C2H_ST_MASK BIT(17)
#define GLBL2_CHANNEL_STRM_H2C_ST_MASK BIT(16)
#define GLBL2_CHANNEL_STRM_RSVD_2_MASK GENMASK(15, 12)
#define GLBL2_CHANNEL_STRM_C2H_ENG_MASK GENMASK(11, 8)
#define GLBL2_CHANNEL_STRM_RSVD_3_MASK GENMASK(7, 4)
#define GLBL2_CHANNEL_STRM_H2C_ENG_MASK GENMASK(3, 0)
#define EQDMA_GLBL2_CHANNEL_CAP_ADDR 0x120
#define GLBL2_CHANNEL_CAP_RSVD_1_MASK GENMASK(31, 12)
#define GLBL2_CHANNEL_CAP_MULTIQ_MAX_MASK GENMASK(11, 0)
#define EQDMA_GLBL2_CHANNEL_PASID_CAP_ADDR 0x128
#define GLBL2_CHANNEL_PASID_CAP_RSVD_1_MASK GENMASK(31, 2)
#define GLBL2_CHANNEL_PASID_CAP_BRIDGEEN_MASK BIT(1)
#define GLBL2_CHANNEL_PASID_CAP_DMAEN_MASK BIT(0)
#define EQDMA_GLBL2_SYSTEM_ID_ADDR 0x130
#define GLBL2_SYSTEM_ID_RSVD_1_MASK GENMASK(31, 16)
#define GLBL2_SYSTEM_ID_MASK GENMASK(15, 0)
#define EQDMA_GLBL2_MISC_CAP_ADDR 0x134
#define GLBL2_MISC_CAP_MASK GENMASK(31, 0)
#define EQDMA_GLBL2_DBG_PCIE_RQ0_ADDR 0x1B8
#define GLBL2_PCIE_RQ0_NPH_AVL_MASK GENMASK(31, 20)
#define GLBL2_PCIE_RQ0_RCB_AVL_MASK GENMASK(19, 9)
#define GLBL2_PCIE_RQ0_SLV_RD_CREDS_MASK GENMASK(8, 2)
#define GLBL2_PCIE_RQ0_TAG_EP_MASK GENMASK(1, 0)
#define EQDMA_GLBL2_DBG_PCIE_RQ1_ADDR 0x1BC
#define GLBL2_PCIE_RQ1_RSVD_1_MASK GENMASK(31, 21)
#define GLBL2_PCIE_RQ1_TAG_FL_MASK GENMASK(20, 19)
#define GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_FL_MASK BIT(18)
#define GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_EP_MASK BIT(17)
#define GLBL2_PCIE_RQ1_RQ_FIFO_EP_MASK BIT(16)
#define GLBL2_PCIE_RQ1_RQ_FIFO_FL_MASK BIT(15)
#define GLBL2_PCIE_RQ1_TLPSM_MASK GENMASK(14, 12)
#define GLBL2_PCIE_RQ1_TLPSM512_MASK GENMASK(11, 9)
#define GLBL2_PCIE_RQ1_RREQ_RCB_OK_MASK BIT(8)
#define GLBL2_PCIE_RQ1_RREQ0_SLV_MASK BIT(7)
#define GLBL2_PCIE_RQ1_RREQ0_VLD_MASK BIT(6)
#define GLBL2_PCIE_RQ1_RREQ0_RDY_MASK BIT(5)
#define GLBL2_PCIE_RQ1_RREQ1_SLV_MASK BIT(4)
#define GLBL2_PCIE_RQ1_RREQ1_VLD_MASK BIT(3)
#define GLBL2_PCIE_RQ1_RREQ1_RDY_MASK BIT(2)
#define GLBL2_PCIE_RQ1_WTLP_REQ_MASK BIT(1)
#define GLBL2_PCIE_RQ1_WTLP_STRADDLE_MASK BIT(0)
#define EQDMA_GLBL2_DBG_AXIMM_WR0_ADDR 0x1C0
#define GLBL2_AXIMM_WR0_RSVD_1_MASK GENMASK(31, 27)
#define GLBL2_AXIMM_WR0_WR_REQ_MASK BIT(26)
#define GLBL2_AXIMM_WR0_WR_CHN_MASK GENMASK(25, 23)
#define GLBL2_AXIMM_WR0_WTLP_DATA_FIFO_EP_MASK BIT(22)
#define GLBL2_AXIMM_WR0_WPL_FIFO_EP_MASK BIT(21)
#define GLBL2_AXIMM_WR0_BRSP_CLAIM_CHN_MASK GENMASK(20, 18)
#define GLBL2_AXIMM_WR0_WRREQ_CNT_MASK GENMASK(17, 12)
#define GLBL2_AXIMM_WR0_BID_MASK GENMASK(11, 9)
#define GLBL2_AXIMM_WR0_BVALID_MASK BIT(8)
#define GLBL2_AXIMM_WR0_BREADY_MASK BIT(7)
#define GLBL2_AXIMM_WR0_WVALID_MASK BIT(6)
#define GLBL2_AXIMM_WR0_WREADY_MASK BIT(5)
#define GLBL2_AXIMM_WR0_AWID_MASK GENMASK(4, 2)
#define GLBL2_AXIMM_WR0_AWVALID_MASK BIT(1)
#define GLBL2_AXIMM_WR0_AWREADY_MASK BIT(0)
#define EQDMA_GLBL2_DBG_AXIMM_WR1_ADDR 0x1C4
#define GLBL2_AXIMM_WR1_RSVD_1_MASK GENMASK(31, 30)
#define GLBL2_AXIMM_WR1_BRSP_CNT4_MASK GENMASK(29, 24)
#define GLBL2_AXIMM_WR1_BRSP_CNT3_MASK GENMASK(23, 18)
#define GLBL2_AXIMM_WR1_BRSP_CNT2_MASK GENMASK(17, 12)
#define GLBL2_AXIMM_WR1_BRSP_CNT1_MASK GENMASK(11, 6)
#define GLBL2_AXIMM_WR1_BRSP_CNT0_MASK GENMASK(5, 0)
#define EQDMA_GLBL2_DBG_AXIMM_RD0_ADDR 0x1C8
#define GLBL2_AXIMM_RD0_RSVD_1_MASK GENMASK(31, 23)
#define GLBL2_AXIMM_RD0_PND_CNT_MASK GENMASK(22, 17)
#define GLBL2_AXIMM_RD0_RD_REQ_MASK BIT(16)
#define GLBL2_AXIMM_RD0_RD_CHNL_MASK GENMASK(15, 13)
#define GLBL2_AXIMM_RD0_RRSP_CLAIM_CHNL_MASK GENMASK(12, 10)
#define GLBL2_AXIMM_RD0_RID_MASK GENMASK(9, 7)
#define GLBL2_AXIMM_RD0_RVALID_MASK BIT(6)
#define GLBL2_AXIMM_RD0_RREADY_MASK BIT(5)
#define GLBL2_AXIMM_RD0_ARID_MASK GENMASK(4, 2)
#define GLBL2_AXIMM_RD0_ARVALID_MASK BIT(1)
#define GLBL2_AXIMM_RD0_ARREADY_MASK BIT(0)
#define EQDMA_GLBL2_DBG_AXIMM_RD1_ADDR 0x1CC
#define GLBL2_AXIMM_RD1_RSVD_1_MASK GENMASK(31, 30)
#define GLBL2_AXIMM_RD1_RRSP_CNT4_MASK GENMASK(29, 24)
#define GLBL2_AXIMM_RD1_RRSP_CNT3_MASK GENMASK(23, 18)
#define GLBL2_AXIMM_RD1_RRSP_CNT2_MASK GENMASK(17, 12)
#define GLBL2_AXIMM_RD1_RRSP_CNT1_MASK GENMASK(11, 6)
#define GLBL2_AXIMM_RD1_RRSP_CNT0_MASK GENMASK(5, 0)
#define EQDMA_GLBL2_DBG_FAB0_ADDR 0x1D0
#define GLBL2_FAB0_H2C_INB_CONV_IN_VLD_MASK BIT(31)
#define GLBL2_FAB0_H2C_INB_CONV_IN_RDY_MASK BIT(30)
#define GLBL2_FAB0_H2C_SEG_IN_VLD_MASK BIT(29)
#define GLBL2_FAB0_H2C_SEG_IN_RDY_MASK BIT(28)
#define GLBL2_FAB0_H2C_SEG_OUT_VLD_MASK GENMASK(27, 24)
#define GLBL2_FAB0_H2C_SEG_OUT_RDY_MASK BIT(23)
#define GLBL2_FAB0_H2C_MST_CRDT_STAT_MASK GENMASK(22, 16)
#define GLBL2_FAB0_C2H_SLV_AFIFO_FULL_MASK BIT(15)
#define GLBL2_FAB0_C2H_SLV_AFIFO_EMPTY_MASK BIT(14)
#define GLBL2_FAB0_C2H_DESEG_SEG_VLD_MASK GENMASK(13, 10)
#define GLBL2_FAB0_C2H_DESEG_SEG_RDY_MASK BIT(9)
#define GLBL2_FAB0_C2H_DESEG_OUT_VLD_MASK BIT(8)
#define GLBL2_FAB0_C2H_DESEG_OUT_RDY_MASK BIT(7)
#define GLBL2_FAB0_C2H_INB_DECONV_OUT_VLD_MASK BIT(6)
#define GLBL2_FAB0_C2H_INB_DECONV_OUT_RDY_MASK BIT(5)
#define GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_FULL_MASK BIT(4)
#define GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_EMPTY_MASK BIT(3)
#define GLBL2_FAB0_IRQ_IN_AFIFO_FULL_MASK BIT(2)
#define GLBL2_FAB0_IRQ_IN_AFIFO_EMPTY_MASK BIT(1)
#define GLBL2_FAB0_IMM_CRD_AFIFO_EMPTY_MASK BIT(0)
#define EQDMA_GLBL2_DBG_FAB1_ADDR 0x1D4
#define GLBL2_FAB1_BYP_OUT_CRDT_STAT_MASK GENMASK(31, 25)
#define GLBL2_FAB1_TM_DSC_STS_CRDT_STAT_MASK GENMASK(24, 18)
#define GLBL2_FAB1_C2H_CMN_AFIFO_FULL_MASK BIT(17)
#define GLBL2_FAB1_C2H_CMN_AFIFO_EMPTY_MASK BIT(16)
#define GLBL2_FAB1_RSVD_1_MASK GENMASK(15, 13)
#define GLBL2_FAB1_C2H_BYP_IN_AFIFO_FULL_MASK BIT(12)
#define GLBL2_FAB1_RSVD_2_MASK GENMASK(11, 9)
#define GLBL2_FAB1_C2H_BYP_IN_AFIFO_EMPTY_MASK BIT(8)
#define GLBL2_FAB1_RSVD_3_MASK GENMASK(7, 5)
#define GLBL2_FAB1_H2C_BYP_IN_AFIFO_FULL_MASK BIT(4)
#define GLBL2_FAB1_RSVD_4_MASK GENMASK(3, 1)
#define GLBL2_FAB1_H2C_BYP_IN_AFIFO_EMPTY_MASK BIT(0)
#define EQDMA_GLBL2_DBG_MATCH_SEL_ADDR 0x1F4
#define GLBL2_MATCH_SEL_RSV_MASK GENMASK(31, 18)
#define GLBL2_MATCH_SEL_CSR_SEL_MASK GENMASK(17, 13)
#define GLBL2_MATCH_SEL_CSR_EN_MASK BIT(12)
#define GLBL2_MATCH_SEL_ROTATE1_MASK GENMASK(11, 10)
#define GLBL2_MATCH_SEL_ROTATE0_MASK GENMASK(9, 8)
#define GLBL2_MATCH_SEL_SEL_MASK GENMASK(7, 0)
#define EQDMA_GLBL2_DBG_MATCH_MSK_ADDR 0x1F8
#define GLBL2_MATCH_MSK_MASK GENMASK(31, 0)
#define EQDMA_GLBL2_DBG_MATCH_PAT_ADDR 0x1FC
#define GLBL2_MATCH_PAT_PATTERN_MASK GENMASK(31, 0)
#define EQDMA_GLBL_RNG_SZ_1_ADDR 0x204
#define GLBL_RNG_SZ_1_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_1_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_2_ADDR 0x208
#define GLBL_RNG_SZ_2_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_2_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_3_ADDR 0x20C
#define GLBL_RNG_SZ_3_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_3_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_4_ADDR 0x210
#define GLBL_RNG_SZ_4_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_4_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_5_ADDR 0x214
#define GLBL_RNG_SZ_5_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_5_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_6_ADDR 0x218
#define GLBL_RNG_SZ_6_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_6_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_7_ADDR 0x21C
#define GLBL_RNG_SZ_7_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_7_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_8_ADDR 0x220
#define GLBL_RNG_SZ_8_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_8_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_9_ADDR 0x224
#define GLBL_RNG_SZ_9_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_9_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_A_ADDR 0x228
#define GLBL_RNG_SZ_A_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_A_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_B_ADDR 0x22C
#define GLBL_RNG_SZ_B_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_B_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_C_ADDR 0x230
#define GLBL_RNG_SZ_C_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_C_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_D_ADDR 0x234
#define GLBL_RNG_SZ_D_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_D_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_E_ADDR 0x238
#define GLBL_RNG_SZ_E_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_E_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_F_ADDR 0x23C
#define GLBL_RNG_SZ_F_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_F_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_10_ADDR 0x240
#define GLBL_RNG_SZ_10_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_10_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_ERR_STAT_ADDR 0x248
#define GLBL_ERR_STAT_RSVD_1_MASK GENMASK(31, 18)
#define GLBL_ERR_STAT_ERR_FAB_MASK BIT(17)
#define GLBL_ERR_STAT_ERR_H2C_ST_MASK BIT(16)
#define GLBL_ERR_STAT_ERR_BDG_MASK BIT(15)
#define GLBL_ERR_STAT_IND_CTXT_CMD_ERR_MASK GENMASK(14, 9)
#define GLBL_ERR_STAT_ERR_C2H_ST_MASK BIT(8)
#define GLBL_ERR_STAT_ERR_C2H_MM_1_MASK BIT(7)
#define GLBL_ERR_STAT_ERR_C2H_MM_0_MASK BIT(6)
#define GLBL_ERR_STAT_ERR_H2C_MM_1_MASK BIT(5)
#define GLBL_ERR_STAT_ERR_H2C_MM_0_MASK BIT(4)
#define GLBL_ERR_STAT_ERR_TRQ_MASK BIT(3)
#define GLBL_ERR_STAT_ERR_DSC_MASK BIT(2)
#define GLBL_ERR_STAT_ERR_RAM_DBE_MASK BIT(1)
#define GLBL_ERR_STAT_ERR_RAM_SBE_MASK BIT(0)
#define EQDMA_GLBL_ERR_MASK_ADDR 0x24C
#define GLBL_ERR_MASK GENMASK(31, 0)
#define EQDMA_GLBL_DSC_CFG_ADDR 0x250
#define GLBL_DSC_CFG_RSVD_1_MASK GENMASK(31, 10)
#define GLBL_DSC_CFG_UNC_OVR_COR_MASK BIT(9)
#define GLBL_DSC_CFG_CTXT_FER_DIS_MASK BIT(8)
#define GLBL_DSC_CFG_RSVD_2_MASK GENMASK(7, 6)
#define GLBL_DSC_CFG_MAXFETCH_MASK GENMASK(5, 3)
#define GLBL_DSC_CFG_WB_ACC_INT_MASK GENMASK(2, 0)
#define EQDMA_GLBL_DSC_ERR_STS_ADDR 0x254
#define GLBL_DSC_ERR_STS_RSVD_1_MASK GENMASK(31, 26)
#define GLBL_DSC_ERR_STS_PORT_ID_MASK BIT(25)
#define GLBL_DSC_ERR_STS_SBE_MASK BIT(24)
#define GLBL_DSC_ERR_STS_DBE_MASK BIT(23)
#define GLBL_DSC_ERR_STS_RQ_CANCEL_MASK BIT(22)
#define GLBL_DSC_ERR_STS_DSC_MASK BIT(21)
#define GLBL_DSC_ERR_STS_DMA_MASK BIT(20)
#define GLBL_DSC_ERR_STS_FLR_CANCEL_MASK BIT(19)
#define GLBL_DSC_ERR_STS_RSVD_2_MASK GENMASK(18, 17)
#define GLBL_DSC_ERR_STS_DAT_POISON_MASK BIT(16)
#define GLBL_DSC_ERR_STS_TIMEOUT_MASK BIT(9)
#define GLBL_DSC_ERR_STS_FLR_MASK BIT(8)
#define GLBL_DSC_ERR_STS_TAG_MASK BIT(6)
#define GLBL_DSC_ERR_STS_ADDR_MASK BIT(5)
#define GLBL_DSC_ERR_STS_PARAM_MASK BIT(4)
#define GLBL_DSC_ERR_STS_BCNT_MASK BIT(3)
#define GLBL_DSC_ERR_STS_UR_CA_MASK BIT(2)
#define GLBL_DSC_ERR_STS_POISON_MASK BIT(1)
#define EQDMA_GLBL_DSC_ERR_MSK_ADDR 0x258
#define GLBL_DSC_ERR_MSK_MASK GENMASK(31, 0)
#define EQDMA_GLBL_DSC_ERR_LOG0_ADDR 0x25C
#define GLBL_DSC_ERR_LOG0_VALID_MASK BIT(31)
#define GLBL_DSC_ERR_LOG0_SEL_MASK BIT(30)
#define GLBL_DSC_ERR_LOG0_RSVD_1_MASK GENMASK(29, 13)
#define GLBL_DSC_ERR_LOG0_QID_MASK GENMASK(12, 0)
#define EQDMA_GLBL_DSC_ERR_LOG1_ADDR 0x260
#define GLBL_DSC_ERR_LOG1_RSVD_1_MASK GENMASK(31, 28)
#define GLBL_DSC_ERR_LOG1_CIDX_MASK GENMASK(27, 12)
#define GLBL_DSC_ERR_LOG1_RSVD_2_MASK GENMASK(11, 9)
#define GLBL_DSC_ERR_LOG1_SUB_TYPE_MASK GENMASK(8, 5)
#define GLBL_DSC_ERR_LOG1_ERR_TYPE_MASK GENMASK(4, 0)
#define EQDMA_GLBL_TRQ_ERR_STS_ADDR 0x264
#define GLBL_TRQ_ERR_STS_RSVD_1_MASK GENMASK(31, 8)
#define GLBL_TRQ_ERR_STS_TCP_QSPC_TIMEOUT_MASK BIT(7)
#define GLBL_TRQ_ERR_STS_RSVD_2_MASK BIT(6)
#define GLBL_TRQ_ERR_STS_QID_RANGE_MASK BIT(5)
#define GLBL_TRQ_ERR_STS_QSPC_UNMAPPED_MASK BIT(4)
#define GLBL_TRQ_ERR_STS_TCP_CSR_TIMEOUT_MASK BIT(3)
#define GLBL_TRQ_ERR_STS_RSVD_3_MASK BIT(2)
#define GLBL_TRQ_ERR_STS_VF_ACCESS_ERR_MASK BIT(1)
#define GLBL_TRQ_ERR_STS_CSR_UNMAPPED_MASK BIT(0)
#define EQDMA_GLBL_TRQ_ERR_MSK_ADDR 0x268
#define GLBL_TRQ_ERR_MSK_MASK GENMASK(31, 0)
#define EQDMA_GLBL_TRQ_ERR_LOG_ADDR 0x26C
#define GLBL_TRQ_ERR_LOG_SRC_MASK BIT(31)
#define GLBL_TRQ_ERR_LOG_TARGET_MASK GENMASK(30, 27)
#define GLBL_TRQ_ERR_LOG_FUNC_MASK GENMASK(26, 17)
#define GLBL_TRQ_ERR_LOG_ADDRESS_MASK GENMASK(16, 0)
#define EQDMA_GLBL_DSC_DBG_DAT0_ADDR 0x270
#define GLBL_DSC_DAT0_RSVD_1_MASK GENMASK(31, 30)
#define GLBL_DSC_DAT0_CTXT_ARB_DIR_MASK BIT(29)
#define GLBL_DSC_DAT0_CTXT_ARB_QID_MASK GENMASK(28, 17)
#define GLBL_DSC_DAT0_CTXT_ARB_REQ_MASK GENMASK(16, 12)
#define GLBL_DSC_DAT0_IRQ_FIFO_FL_MASK BIT(11)
#define GLBL_DSC_DAT0_TMSTALL_MASK BIT(10)
#define GLBL_DSC_DAT0_RRQ_STALL_MASK GENMASK(9, 8)
#define GLBL_DSC_DAT0_RCP_FIFO_SPC_STALL_MASK GENMASK(7, 6)
#define GLBL_DSC_DAT0_RRQ_FIFO_SPC_STALL_MASK GENMASK(5, 4)
#define GLBL_DSC_DAT0_FAB_MRKR_RSP_STALL_MASK GENMASK(3, 2)
#define GLBL_DSC_DAT0_DSC_OUT_STALL_MASK GENMASK(1, 0)
#define EQDMA_GLBL_DSC_DBG_DAT1_ADDR 0x274
#define GLBL_DSC_DAT1_RSVD_1_MASK GENMASK(31, 28)
#define GLBL_DSC_DAT1_EVT_SPC_C2H_MASK GENMASK(27, 22)
#define GLBL_DSC_DAT1_EVT_SP_H2C_MASK GENMASK(21, 16)
#define GLBL_DSC_DAT1_DSC_SPC_C2H_MASK GENMASK(15, 8)
#define GLBL_DSC_DAT1_DSC_SPC_H2C_MASK GENMASK(7, 0)
#define EQDMA_GLBL_DSC_DBG_CTL_ADDR 0x278
#define GLBL_DSC_CTL_RSVD_1_MASK GENMASK(31, 3)
#define GLBL_DSC_CTL_SELECT_MASK GENMASK(2, 0)
#define EQDMA_GLBL_DSC_ERR_LOG2_ADDR 0x27c
#define GLBL_DSC_ERR_LOG2_OLD_PIDX_MASK GENMASK(31, 16)
#define GLBL_DSC_ERR_LOG2_NEW_PIDX_MASK GENMASK(15, 0)
#define EQDMA_GLBL_GLBL_INTERRUPT_CFG_ADDR 0x2c4
#define GLBL_GLBL_INTERRUPT_CFG_RSVD_1_MASK GENMASK(31, 2)
#define GLBL_GLBL_INTERRUPT_CFG_LGCY_INTR_PENDING_MASK BIT(1)
#define GLBL_GLBL_INTERRUPT_CFG_EN_LGCY_INTR_MASK BIT(0)
#define EQDMA_GLBL_VCH_HOST_PROFILE_ADDR 0x2c8
#define GLBL_VCH_HOST_PROFILE_RSVD_1_MASK GENMASK(31, 28)
#define GLBL_VCH_HOST_PROFILE_2C_MM_MASK GENMASK(27, 24)
#define GLBL_VCH_HOST_PROFILE_2C_ST_MASK GENMASK(23, 20)
#define GLBL_VCH_HOST_PROFILE_VCH_DSC_MASK GENMASK(19, 16)
#define GLBL_VCH_HOST_PROFILE_VCH_INT_MSG_MASK GENMASK(15, 12)
#define GLBL_VCH_HOST_PROFILE_VCH_INT_AGGR_MASK GENMASK(11, 8)
#define GLBL_VCH_HOST_PROFILE_VCH_CMPT_MASK GENMASK(7, 4)
#define GLBL_VCH_HOST_PROFILE_VCH_C2H_PLD_MASK GENMASK(3, 0)
#define EQDMA_GLBL_BRIDGE_HOST_PROFILE_ADDR 0x308
#define GLBL_BRIDGE_HOST_PROFILE_RSVD_1_MASK GENMASK(31, 4)
#define GLBL_BRIDGE_HOST_PROFILE_BDGID_MASK GENMASK(3, 0)
#define EQDMA_AXIMM_IRQ_DEST_ADDR_ADDR 0x30c
#define AXIMM_IRQ_DEST_ADDR_ADDR_MASK GENMASK(31, 0)
#define EQDMA_FAB_ERR_LOG_ADDR 0x314
#define FAB_ERR_LOG_RSVD_1_MASK GENMASK(31, 7)
#define FAB_ERR_LOG_SRC_MASK GENMASK(6, 0)
#define EQDMA_GLBL_REQ_ERR_STS_ADDR 0x318
#define GLBL_REQ_ERR_STS_RSVD_1_MASK GENMASK(31, 11)
#define GLBL_REQ_ERR_STS_RC_DISCONTINUE_MASK BIT(10)
#define GLBL_REQ_ERR_STS_RC_PRTY_MASK BIT(9)
#define GLBL_REQ_ERR_STS_RC_FLR_MASK BIT(8)
#define GLBL_REQ_ERR_STS_RC_TIMEOUT_MASK BIT(7)
#define GLBL_REQ_ERR_STS_RC_INV_BCNT_MASK BIT(6)
#define GLBL_REQ_ERR_STS_RC_INV_TAG_MASK BIT(5)
#define GLBL_REQ_ERR_STS_RC_START_ADDR_MISMCH_MASK BIT(4)
#define GLBL_REQ_ERR_STS_RC_RID_TC_ATTR_MISMCH_MASK BIT(3)
#define GLBL_REQ_ERR_STS_RC_NO_DATA_MASK BIT(2)
#define GLBL_REQ_ERR_STS_RC_UR_CA_CRS_MASK BIT(1)
#define GLBL_REQ_ERR_STS_RC_POISONED_MASK BIT(0)
#define EQDMA_GLBL_REQ_ERR_MSK_ADDR 0x31C
#define GLBL_REQ_ERR_MSK_MASK GENMASK(31, 0)
#define EQDMA_IND_CTXT_DATA_ADDR 0x804
#define IND_CTXT_DATA_DATA_MASK GENMASK(31, 0)
#define EQDMA_IND_CTXT_MASK_ADDR 0x824
#define IND_CTXT_MASK GENMASK(31, 0)
#define EQDMA_IND_CTXT_CMD_ADDR 0x844
#define IND_CTXT_CMD_RSVD_1_MASK GENMASK(31, 20)
#define IND_CTXT_CMD_QID_MASK GENMASK(19, 7)
#define IND_CTXT_CMD_OP_MASK GENMASK(6, 5)
#define IND_CTXT_CMD_SEL_MASK GENMASK(4, 1)
#define IND_CTXT_CMD_BUSY_MASK BIT(0)
#define EQDMA_C2H_TIMER_CNT_ADDR 0xA00
#define C2H_TIMER_CNT_RSVD_1_MASK GENMASK(31, 16)
#define C2H_TIMER_CNT_MASK GENMASK(15, 0)
#define EQDMA_C2H_CNT_TH_ADDR 0xA40
#define C2H_CNT_TH_RSVD_1_MASK GENMASK(31, 16)
#define C2H_CNT_TH_THESHOLD_CNT_MASK GENMASK(15, 0)
#define EQDMA_C2H_STAT_S_AXIS_C2H_ACCEPTED_ADDR 0xA88
#define C2H_STAT_S_AXIS_C2H_ACCEPTED_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_S_AXIS_WRB_ACCEPTED_ADDR 0xA8C
#define C2H_STAT_S_AXIS_WRB_ACCEPTED_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_PKT_ACCEPTED_ADDR 0xA90
#define C2H_STAT_DESC_RSP_PKT_ACCEPTED_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_AXIS_PKG_CMP_ADDR 0xA94
#define C2H_STAT_AXIS_PKG_CMP_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_ACCEPTED_ADDR 0xA98
#define C2H_STAT_DESC_RSP_ACCEPTED_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_CMP_ADDR 0xA9C
#define C2H_STAT_DESC_RSP_CMP_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_WRQ_OUT_ADDR 0xAA0
#define C2H_STAT_WRQ_OUT_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_WPL_REN_ACCEPTED_ADDR 0xAA4
#define C2H_STAT_WPL_REN_ACCEPTED_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_TOTAL_WRQ_LEN_ADDR 0xAA8
#define C2H_STAT_TOTAL_WRQ_LEN_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_TOTAL_WPL_LEN_ADDR 0xAAC
#define C2H_STAT_TOTAL_WPL_LEN_MASK GENMASK(31, 0)
#define EQDMA_C2H_BUF_SZ_ADDR 0xAB0
#define C2H_BUF_SZ_IZE_MASK GENMASK(31, 0)
#define EQDMA_C2H_ERR_STAT_ADDR 0xAF0
#define C2H_ERR_STAT_RSVD_1_MASK GENMASK(31, 21)
#define C2H_ERR_STAT_WRB_PORT_ID_ERR_MASK BIT(20)
#define C2H_ERR_STAT_HDR_PAR_ERR_MASK BIT(19)
#define C2H_ERR_STAT_HDR_ECC_COR_ERR_MASK BIT(18)
#define C2H_ERR_STAT_HDR_ECC_UNC_ERR_MASK BIT(17)
#define C2H_ERR_STAT_AVL_RING_DSC_ERR_MASK BIT(16)
#define C2H_ERR_STAT_WRB_PRTY_ERR_MASK BIT(15)
#define C2H_ERR_STAT_WRB_CIDX_ERR_MASK BIT(14)
#define C2H_ERR_STAT_WRB_QFULL_ERR_MASK BIT(13)
#define C2H_ERR_STAT_WRB_INV_Q_ERR_MASK BIT(12)
#define C2H_ERR_STAT_RSVD_2_MASK BIT(11)
#define C2H_ERR_STAT_PORT_ID_CTXT_MISMATCH_MASK BIT(10)
#define C2H_ERR_STAT_ERR_DESC_CNT_MASK BIT(9)
#define C2H_ERR_STAT_RSVD_3_MASK BIT(8)
#define C2H_ERR_STAT_MSI_INT_FAIL_MASK BIT(7)
#define C2H_ERR_STAT_ENG_WPL_DATA_PAR_ERR_MASK BIT(6)
#define C2H_ERR_STAT_RSVD_4_MASK BIT(5)
#define C2H_ERR_STAT_DESC_RSP_ERR_MASK BIT(4)
#define C2H_ERR_STAT_QID_MISMATCH_MASK BIT(3)
#define C2H_ERR_STAT_SH_CMPT_DSC_ERR_MASK BIT(2)
#define C2H_ERR_STAT_LEN_MISMATCH_MASK BIT(1)
#define C2H_ERR_STAT_MTY_MISMATCH_MASK BIT(0)
#define EQDMA_C2H_ERR_MASK_ADDR 0xAF4
#define C2H_ERR_EN_MASK GENMASK(31, 0)
#define EQDMA_C2H_FATAL_ERR_STAT_ADDR 0xAF8
#define C2H_FATAL_ERR_STAT_RSVD_1_MASK GENMASK(31, 21)
#define C2H_FATAL_ERR_STAT_HDR_ECC_UNC_ERR_MASK BIT(20)
#define C2H_FATAL_ERR_STAT_AVL_RING_FIFO_RAM_RDBE_MASK BIT(19)
#define C2H_FATAL_ERR_STAT_WPL_DATA_PAR_ERR_MASK BIT(18)
#define C2H_FATAL_ERR_STAT_PLD_FIFO_RAM_RDBE_MASK BIT(17)
#define C2H_FATAL_ERR_STAT_QID_FIFO_RAM_RDBE_MASK BIT(16)
#define C2H_FATAL_ERR_STAT_CMPT_FIFO_RAM_RDBE_MASK BIT(15)
#define C2H_FATAL_ERR_STAT_WRB_COAL_DATA_RAM_RDBE_MASK BIT(14)
#define C2H_FATAL_ERR_STAT_RESERVED2_MASK BIT(13)
#define C2H_FATAL_ERR_STAT_INT_CTXT_RAM_RDBE_MASK BIT(12)
#define C2H_FATAL_ERR_STAT_DESC_REQ_FIFO_RAM_RDBE_MASK BIT(11)
#define C2H_FATAL_ERR_STAT_PFCH_CTXT_RAM_RDBE_MASK BIT(10)
#define C2H_FATAL_ERR_STAT_WRB_CTXT_RAM_RDBE_MASK BIT(9)
#define C2H_FATAL_ERR_STAT_PFCH_LL_RAM_RDBE_MASK BIT(8)
#define C2H_FATAL_ERR_STAT_TIMER_FIFO_RAM_RDBE_MASK GENMASK(7, 4)
#define C2H_FATAL_ERR_STAT_QID_MISMATCH_MASK BIT(3)
#define C2H_FATAL_ERR_STAT_RESERVED1_MASK BIT(2)
#define C2H_FATAL_ERR_STAT_LEN_MISMATCH_MASK BIT(1)
#define C2H_FATAL_ERR_STAT_MTY_MISMATCH_MASK BIT(0)
#define EQDMA_C2H_FATAL_ERR_MASK_ADDR 0xAFC
#define C2H_FATAL_ERR_C2HEN_MASK GENMASK(31, 0)
#define EQDMA_C2H_FATAL_ERR_ENABLE_ADDR 0xB00
#define C2H_FATAL_ERR_ENABLE_RSVD_1_MASK GENMASK(31, 2)
#define C2H_FATAL_ERR_ENABLE_WPL_PAR_INV_MASK BIT(1)
#define C2H_FATAL_ERR_ENABLE_WRQ_DIS_MASK BIT(0)
#define EQDMA_GLBL_ERR_INT_ADDR 0xB04
#define GLBL_ERR_INT_RSVD_1_MASK GENMASK(31, 30)
#define GLBL_ERR_INT_HOST_ID_MASK GENMASK(29, 26)
#define GLBL_ERR_INT_DIS_INTR_ON_VF_MASK BIT(25)
#define GLBL_ERR_INT_ARM_MASK BIT(24)
#define GLBL_ERR_INT_EN_COAL_MASK BIT(23)
#define GLBL_ERR_INT_VEC_MASK GENMASK(22, 12)
#define GLBL_ERR_INT_FUNC_MASK GENMASK(11, 0)
#define EQDMA_C2H_PFCH_CFG_ADDR 0xB08
#define C2H_PFCH_CFG_EVTFL_TH_MASK GENMASK(31, 16)
#define C2H_PFCH_CFG_FL_TH_MASK GENMASK(15, 0)
#define EQDMA_C2H_PFCH_CFG_1_ADDR 0xA80
#define C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK GENMASK(31, 16)
#define C2H_PFCH_CFG_1_QCNT_MASK GENMASK(15, 0)
#define EQDMA_C2H_PFCH_CFG_2_ADDR 0xA84
#define C2H_PFCH_CFG_2_FENCE_MASK BIT(31)
#define C2H_PFCH_CFG_2_RSVD_MASK GENMASK(30, 29)
#define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK BIT(28)
#define C2H_PFCH_CFG_2_LL_SZ_TH_MASK GENMASK(27, 12)
#define C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK GENMASK(11, 6)
#define C2H_PFCH_CFG_2_NUM_MASK GENMASK(5, 0)
#define EQDMA_C2H_INT_TIMER_TICK_ADDR 0xB0C
#define C2H_INT_TIMER_TICK_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_DROP_ACCEPTED_ADDR 0xB10
#define C2H_STAT_DESC_RSP_DROP_ACCEPTED_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_ERR_ACCEPTED_ADDR 0xB14
#define C2H_STAT_DESC_RSP_ERR_ACCEPTED_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_REQ_ADDR 0xB18
#define C2H_STAT_DESC_REQ_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_0_ADDR 0xB1C
#define C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TVALID_MASK BIT(31)
#define C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TREADY_MASK BIT(30)
#define C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TVALID_MASK GENMASK(29, 27)
#define C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TREADY_MASK GENMASK(26, 24)
#define C2H_STAT_DMA_ENG_0_PLD_FIFO_IN_RDY_MASK BIT(23)
#define C2H_STAT_DMA_ENG_0_QID_FIFO_IN_RDY_MASK BIT(22)
#define C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_VLD_MASK BIT(21)
#define C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_QID_MASK GENMASK(20, 9)
#define C2H_STAT_DMA_ENG_0_WRB_FIFO_IN_RDY_MASK BIT(8)
#define C2H_STAT_DMA_ENG_0_WRB_FIFO_OUT_CNT_MASK GENMASK(7, 5)
#define C2H_STAT_DMA_ENG_0_WRB_SM_CS_MASK BIT(4)
#define C2H_STAT_DMA_ENG_0_MAIN_SM_CS_MASK GENMASK(3, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_1_ADDR 0xB20
#define C2H_STAT_DMA_ENG_1_RSVD_1_MASK GENMASK(31, 29)
#define C2H_STAT_DMA_ENG_1_QID_FIFO_OUT_CNT_MASK GENMASK(28, 18)
#define C2H_STAT_DMA_ENG_1_PLD_FIFO_OUT_CNT_MASK GENMASK(17, 7)
#define C2H_STAT_DMA_ENG_1_PLD_ST_FIFO_CNT_MASK GENMASK(6, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_2_ADDR 0xB24
#define C2H_STAT_DMA_ENG_2_RSVD_1_MASK GENMASK(31, 29)
#define C2H_STAT_DMA_ENG_2_QID_FIFO_OUT_CNT_MASK GENMASK(28, 18)
#define C2H_STAT_DMA_ENG_2_PLD_FIFO_OUT_CNT_MASK GENMASK(17, 7)
#define C2H_STAT_DMA_ENG_2_PLD_ST_FIFO_CNT_MASK GENMASK(6, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_3_ADDR 0xB28
#define C2H_STAT_DMA_ENG_3_RSVD_1_MASK GENMASK(31, 24)
#define C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_CNT_MASK GENMASK(23, 19)
#define C2H_STAT_DMA_ENG_3_QID_FIFO_OUT_VLD_MASK BIT(18)
#define C2H_STAT_DMA_ENG_3_PLD_FIFO_OUT_VLD_MASK BIT(17)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_VLD_MASK BIT(16)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_EOP_MASK BIT(15)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE_MASK BIT(14)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_DROP_MASK BIT(13)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_ERR_MASK BIT(12)
#define C2H_STAT_DMA_ENG_3_DESC_CNT_FIFO_IN_RDY_MASK BIT(11)
#define C2H_STAT_DMA_ENG_3_DESC_RSP_FIFO_IN_RDY_MASK BIT(10)
#define C2H_STAT_DMA_ENG_3_PLD_PKT_ID_LARGER_0_MASK BIT(9)
#define C2H_STAT_DMA_ENG_3_WRQ_VLD_MASK BIT(8)
#define C2H_STAT_DMA_ENG_3_WRQ_RDY_MASK BIT(7)
#define C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_RDY_MASK BIT(6)
#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_DROP_MASK BIT(5)
#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_ERR_MASK BIT(4)
#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_MARKER_MASK BIT(3)
#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_PRE_EOR_MASK BIT(2)
#define C2H_STAT_DMA_ENG_3_WCP_FIFO_IN_RDY_MASK BIT(1)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_IN_RDY_MASK BIT(0)
#define EQDMA_C2H_DBG_PFCH_ERR_CTXT_ADDR 0xB2C
#define C2H_PFCH_ERR_CTXT_RSVD_1_MASK GENMASK(31, 14)
#define C2H_PFCH_ERR_CTXT_ERR_STAT_MASK BIT(13)
#define C2H_PFCH_ERR_CTXT_CMD_WR_MASK BIT(12)
#define C2H_PFCH_ERR_CTXT_QID_MASK GENMASK(11, 1)
#define C2H_PFCH_ERR_CTXT_DONE_MASK BIT(0)
#define EQDMA_C2H_FIRST_ERR_QID_ADDR 0xB30
#define C2H_FIRST_ERR_QID_RSVD_1_MASK GENMASK(31, 21)
#define C2H_FIRST_ERR_QID_ERR_TYPE_MASK GENMASK(20, 16)
#define C2H_FIRST_ERR_QID_RSVD_MASK GENMASK(15, 13)
#define C2H_FIRST_ERR_QID_QID_MASK GENMASK(12, 0)
#define EQDMA_STAT_NUM_WRB_IN_ADDR 0xB34
#define STAT_NUM_WRB_IN_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_WRB_IN_WRB_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_WRB_OUT_ADDR 0xB38
#define STAT_NUM_WRB_OUT_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_WRB_OUT_WRB_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_WRB_DRP_ADDR 0xB3C
#define STAT_NUM_WRB_DRP_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_WRB_DRP_WRB_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_STAT_DESC_OUT_ADDR 0xB40
#define STAT_NUM_STAT_DESC_OUT_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_STAT_DESC_OUT_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_DSC_CRDT_SENT_ADDR 0xB44
#define STAT_NUM_DSC_CRDT_SENT_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_DSC_CRDT_SENT_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_FCH_DSC_RCVD_ADDR 0xB48
#define STAT_NUM_FCH_DSC_RCVD_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_FCH_DSC_RCVD_DSC_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_BYP_DSC_RCVD_ADDR 0xB4C
#define STAT_NUM_BYP_DSC_RCVD_RSVD_1_MASK GENMASK(31, 11)
#define STAT_NUM_BYP_DSC_RCVD_DSC_CNT_MASK GENMASK(10, 0)
#define EQDMA_C2H_WRB_COAL_CFG_ADDR 0xB50
#define C2H_WRB_COAL_CFG_MAX_BUF_SZ_MASK GENMASK(31, 26)
#define C2H_WRB_COAL_CFG_TICK_VAL_MASK GENMASK(25, 14)
#define C2H_WRB_COAL_CFG_TICK_CNT_MASK GENMASK(13, 2)
#define C2H_WRB_COAL_CFG_SET_GLB_FLUSH_MASK BIT(1)
#define C2H_WRB_COAL_CFG_DONE_GLB_FLUSH_MASK BIT(0)
#define EQDMA_C2H_INTR_H2C_REQ_ADDR 0xB54
#define C2H_INTR_H2C_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_MM_REQ_ADDR 0xB58
#define C2H_INTR_C2H_MM_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_MM_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_ERR_INT_REQ_ADDR 0xB5C
#define C2H_INTR_ERR_INT_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_ERR_INT_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_REQ_ADDR 0xB60
#define C2H_INTR_C2H_ST_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_ADDR 0xB64
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_ADDR 0xB68
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_ADDR 0xB6C
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_ADDR 0xB70
#define C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_MSIX_ACK_ADDR 0xB74
#define C2H_INTR_C2H_ST_MSIX_ACK_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_MSIX_ACK_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_MSIX_FAIL_ADDR 0xB78
#define C2H_INTR_C2H_ST_MSIX_FAIL_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_MSIX_FAIL_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_NO_MSIX_ADDR 0xB7C
#define C2H_INTR_C2H_ST_NO_MSIX_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_NO_MSIX_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_CTXT_INVAL_ADDR 0xB80
#define C2H_INTR_C2H_ST_CTXT_INVAL_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_CTXT_INVAL_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_WR_CMP_ADDR 0xB84
#define C2H_STAT_WR_CMP_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_WR_CMP_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_4_ADDR 0xB88
#define C2H_STAT_DMA_ENG_4_RSVD_1_MASK GENMASK(31, 24)
#define C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_CNT_MASK GENMASK(23, 19)
#define C2H_STAT_DMA_ENG_4_QID_FIFO_OUT_VLD_MASK BIT(18)
#define C2H_STAT_DMA_ENG_4_PLD_FIFO_OUT_VLD_MASK BIT(17)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_VLD_MASK BIT(16)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_EOP_MASK BIT(15)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE_MASK BIT(14)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_DROP_MASK BIT(13)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_ERR_MASK BIT(12)
#define C2H_STAT_DMA_ENG_4_DESC_CNT_FIFO_IN_RDY_MASK BIT(11)
#define C2H_STAT_DMA_ENG_4_DESC_RSP_FIFO_IN_RDY_MASK BIT(10)
#define C2H_STAT_DMA_ENG_4_PLD_PKT_ID_LARGER_0_MASK BIT(9)
#define C2H_STAT_DMA_ENG_4_WRQ_VLD_MASK BIT(8)
#define C2H_STAT_DMA_ENG_4_WRQ_RDY_MASK BIT(7)
#define C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_RDY_MASK BIT(6)
#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_DROP_MASK BIT(5)
#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_ERR_MASK BIT(4)
#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_MARKER_MASK BIT(3)
#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_PRE_EOR_MASK BIT(2)
#define C2H_STAT_DMA_ENG_4_WCP_FIFO_IN_RDY_MASK BIT(1)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_IN_RDY_MASK BIT(0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_5_ADDR 0xB8C
#define C2H_STAT_DMA_ENG_5_RSVD_1_MASK GENMASK(31, 30)
#define C2H_STAT_DMA_ENG_5_WRB_SM_VIRT_CH_MASK BIT(29)
#define C2H_STAT_DMA_ENG_5_WRB_FIFO_IN_REQ_MASK GENMASK(28, 24)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_CNT_MASK GENMASK(23, 22)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_LEN_MASK GENMASK(21, 6)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VIRT_CH_MASK BIT(5)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VAR_DESC_MASK BIT(4)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_DROP_REQ_MASK BIT(3)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_NUM_BUF_OV_MASK BIT(2)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_MARKER_MASK BIT(1)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_HAS_CMPT_MASK BIT(0)
#define EQDMA_C2H_DBG_PFCH_QID_ADDR 0xB90
#define C2H_PFCH_QID_RSVD_1_MASK GENMASK(31, 16)
#define C2H_PFCH_QID_ERR_CTXT_MASK BIT(15)
#define C2H_PFCH_QID_TARGET_MASK GENMASK(14, 12)
#define C2H_PFCH_QID_QID_OR_TAG_MASK GENMASK(11, 0)
#define EQDMA_C2H_DBG_PFCH_ADDR 0xB94
#define C2H_PFCH_DATA_MASK GENMASK(31, 0)
#define EQDMA_C2H_INT_DBG_ADDR 0xB98
#define C2H_INT_RSVD_1_MASK GENMASK(31, 8)
#define C2H_INT_INT_COAL_SM_MASK GENMASK(7, 4)
#define C2H_INT_INT_SM_MASK GENMASK(3, 0)
#define EQDMA_C2H_STAT_IMM_ACCEPTED_ADDR 0xB9C
#define C2H_STAT_IMM_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_IMM_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_MARKER_ACCEPTED_ADDR 0xBA0
#define C2H_STAT_MARKER_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_MARKER_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_DISABLE_CMP_ACCEPTED_ADDR 0xBA4
#define C2H_STAT_DISABLE_CMP_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_DISABLE_CMP_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_PLD_FIFO_CRDT_CNT_ADDR 0xBA8
#define C2H_PLD_FIFO_CRDT_CNT_RSVD_1_MASK GENMASK(31, 18)
#define C2H_PLD_FIFO_CRDT_CNT_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_DYN_REQ_ADDR 0xBAC
#define C2H_INTR_DYN_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_DYN_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_DYN_MISC_ADDR 0xBB0
#define C2H_INTR_DYN_MISC_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_DYN_MISC_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_DROP_LEN_MISMATCH_ADDR 0xBB4
#define C2H_DROP_LEN_MISMATCH_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_LEN_MISMATCH_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_DROP_DESC_RSP_LEN_ADDR 0xBB8
#define C2H_DROP_DESC_RSP_LEN_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_DESC_RSP_LEN_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_DROP_QID_FIFO_LEN_ADDR 0xBBC
#define C2H_DROP_QID_FIFO_LEN_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_QID_FIFO_LEN_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_DROP_PLD_CNT_ADDR 0xBC0
#define C2H_DROP_PLD_CNT_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_PLD_CNT_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_CMPT_FORMAT_0_ADDR 0xBC4
#define C2H_CMPT_FORMAT_0_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_0_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_1_ADDR 0xBC8
#define C2H_CMPT_FORMAT_1_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_1_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_2_ADDR 0xBCC
#define C2H_CMPT_FORMAT_2_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_2_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_3_ADDR 0xBD0
#define C2H_CMPT_FORMAT_3_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_3_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_4_ADDR 0xBD4
#define C2H_CMPT_FORMAT_4_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_4_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_5_ADDR 0xBD8
#define C2H_CMPT_FORMAT_5_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_5_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_6_ADDR 0xBDC
#define C2H_CMPT_FORMAT_6_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_6_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_PFCH_CACHE_DEPTH_ADDR 0xBE0
#define C2H_PFCH_CACHE_DEPTH_MAX_STBUF_MASK GENMASK(23, 16)
#define C2H_PFCH_CACHE_DEPTH_MASK GENMASK(7, 0)
#define EQDMA_C2H_WRB_COAL_BUF_DEPTH_ADDR 0xBE4
#define C2H_WRB_COAL_BUF_DEPTH_RSVD_1_MASK GENMASK(31, 8)
#define C2H_WRB_COAL_BUF_DEPTH_BUFFER_MASK GENMASK(7, 0)
#define EQDMA_C2H_PFCH_CRDT_ADDR 0xBE8
#define C2H_PFCH_CRDT_RSVD_1_MASK GENMASK(31, 1)
#define C2H_PFCH_CRDT_RSVD_2_MASK BIT(0)
#define EQDMA_C2H_STAT_HAS_CMPT_ACCEPTED_ADDR 0xBEC
#define C2H_STAT_HAS_CMPT_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_HAS_CMPT_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_HAS_PLD_ACCEPTED_ADDR 0xBF0
#define C2H_STAT_HAS_PLD_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_HAS_PLD_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_PLD_PKT_ID_ADDR 0xBF4
#define C2H_PLD_PKT_ID_CMPT_WAIT_MASK GENMASK(31, 16)
#define C2H_PLD_PKT_ID_DATA_MASK GENMASK(15, 0)
#define EQDMA_C2H_PLD_PKT_ID_1_ADDR 0xBF8
#define C2H_PLD_PKT_ID_1_CMPT_WAIT_MASK GENMASK(31, 16)
#define C2H_PLD_PKT_ID_1_DATA_MASK GENMASK(15, 0)
#define EQDMA_C2H_DROP_PLD_CNT_1_ADDR 0xBFC
#define C2H_DROP_PLD_CNT_1_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_PLD_CNT_1_CNT_MASK GENMASK(17, 0)
#define EQDMA_H2C_ERR_STAT_ADDR 0xE00
#define H2C_ERR_STAT_RSVD_1_MASK GENMASK(31, 6)
#define H2C_ERR_STAT_PAR_ERR_MASK BIT(5)
#define H2C_ERR_STAT_SBE_MASK BIT(4)
#define H2C_ERR_STAT_DBE_MASK BIT(3)
#define H2C_ERR_STAT_NO_DMA_DS_MASK BIT(2)
#define H2C_ERR_STAT_SDI_MRKR_REQ_MOP_ERR_MASK BIT(1)
#define H2C_ERR_STAT_ZERO_LEN_DS_MASK BIT(0)
#define EQDMA_H2C_ERR_MASK_ADDR 0xE04
#define H2C_ERR_EN_MASK GENMASK(31, 0)
#define EQDMA_H2C_FIRST_ERR_QID_ADDR 0xE08
#define H2C_FIRST_ERR_QID_RSVD_1_MASK GENMASK(31, 20)
#define H2C_FIRST_ERR_QID_ERR_TYPE_MASK GENMASK(19, 16)
#define H2C_FIRST_ERR_QID_RSVD_2_MASK GENMASK(15, 13)
#define H2C_FIRST_ERR_QID_QID_MASK GENMASK(12, 0)
#define EQDMA_H2C_DBG_REG0_ADDR 0xE0C
#define H2C_REG0_NUM_DSC_RCVD_MASK GENMASK(31, 16)
#define H2C_REG0_NUM_WRB_SENT_MASK GENMASK(15, 0)
#define EQDMA_H2C_DBG_REG1_ADDR 0xE10
#define H2C_REG1_NUM_REQ_SENT_MASK GENMASK(31, 16)
#define H2C_REG1_NUM_CMP_SENT_MASK GENMASK(15, 0)
#define EQDMA_H2C_DBG_REG2_ADDR 0xE14
#define H2C_REG2_RSVD_1_MASK GENMASK(31, 16)
#define H2C_REG2_NUM_ERR_DSC_RCVD_MASK GENMASK(15, 0)
#define EQDMA_H2C_DBG_REG3_ADDR 0xE18
#define H2C_REG3_RSVD_1_MASK BIT(31)
#define H2C_REG3_DSCO_FIFO_EMPTY_MASK BIT(30)
#define H2C_REG3_DSCO_FIFO_FULL_MASK BIT(29)
#define H2C_REG3_CUR_RC_STATE_MASK GENMASK(28, 26)
#define H2C_REG3_RDREQ_LINES_MASK GENMASK(25, 16)
#define H2C_REG3_RDATA_LINES_AVAIL_MASK GENMASK(15, 6)
#define H2C_REG3_PEND_FIFO_EMPTY_MASK BIT(5)
#define H2C_REG3_PEND_FIFO_FULL_MASK BIT(4)
#define H2C_REG3_CUR_RQ_STATE_MASK GENMASK(3, 2)
#define H2C_REG3_DSCI_FIFO_FULL_MASK BIT(1)
#define H2C_REG3_DSCI_FIFO_EMPTY_MASK BIT(0)
#define EQDMA_H2C_DBG_REG4_ADDR 0xE1C
#define H2C_REG4_RDREQ_ADDR_MASK GENMASK(31, 0)
#define EQDMA_H2C_FATAL_ERR_EN_ADDR 0xE20
#define H2C_FATAL_ERR_EN_RSVD_1_MASK GENMASK(31, 1)
#define H2C_FATAL_ERR_EN_H2C_MASK BIT(0)
#define EQDMA_H2C_REQ_THROT_PCIE_ADDR 0xE24
#define H2C_REQ_THROT_PCIE_EN_REQ_MASK BIT(31)
#define H2C_REQ_THROT_PCIE_MASK GENMASK(30, 19)
#define H2C_REQ_THROT_PCIE_EN_DATA_MASK BIT(18)
#define H2C_REQ_THROT_PCIE_DATA_THRESH_MASK GENMASK(17, 0)
#define EQDMA_H2C_ALN_DBG_REG0_ADDR 0xE28
#define H2C_ALN_REG0_NUM_PKT_SENT_MASK GENMASK(15, 0)
#define EQDMA_H2C_REQ_THROT_AXIMM_ADDR 0xE2C
#define H2C_REQ_THROT_AXIMM_EN_REQ_MASK BIT(31)
#define H2C_REQ_THROT_AXIMM_MASK GENMASK(30, 19)
#define H2C_REQ_THROT_AXIMM_EN_DATA_MASK BIT(18)
#define H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK GENMASK(17, 0)
#define EQDMA_C2H_MM_CTL_ADDR 0x1004
#define C2H_MM_CTL_RESERVED1_MASK GENMASK(31, 9)
#define C2H_MM_CTL_ERRC_EN_MASK BIT(8)
#define C2H_MM_CTL_RESERVED0_MASK GENMASK(7, 1)
#define C2H_MM_CTL_RUN_MASK BIT(0)
#define EQDMA_C2H_MM_STATUS_ADDR 0x1040
#define C2H_MM_STATUS_RSVD_1_MASK GENMASK(31, 1)
#define C2H_MM_STATUS_RUN_MASK BIT(0)
#define EQDMA_C2H_MM_CMPL_DESC_CNT_ADDR 0x1048
#define C2H_MM_CMPL_DESC_CNT_C2H_CO_MASK GENMASK(31, 0)
#define EQDMA_C2H_MM_ERR_CODE_ENABLE_MASK_ADDR 0x1054
#define C2H_MM_ERR_CODE_ENABLE_RESERVED1_MASK BIT(31)
#define C2H_MM_ERR_CODE_ENABLE_WR_UC_RAM_MASK BIT(30)
#define C2H_MM_ERR_CODE_ENABLE_WR_UR_MASK BIT(29)
#define C2H_MM_ERR_CODE_ENABLE_WR_FLR_MASK BIT(28)
#define C2H_MM_ERR_CODE_ENABLE_RESERVED0_MASK GENMASK(27, 2)
#define C2H_MM_ERR_CODE_ENABLE_RD_SLV_ERR_MASK BIT(1)
#define C2H_MM_ERR_CODE_ENABLE_WR_SLV_ERR_MASK BIT(0)
#define EQDMA_C2H_MM_ERR_CODE_ADDR 0x1058
#define C2H_MM_ERR_CODE_RESERVED1_MASK GENMASK(31, 28)
#define C2H_MM_ERR_CODE_CIDX_MASK GENMASK(27, 12)
#define C2H_MM_ERR_CODE_RESERVED0_MASK GENMASK(11, 10)
#define C2H_MM_ERR_CODE_SUB_TYPE_MASK GENMASK(9, 5)
#define C2H_MM_ERR_CODE_MASK GENMASK(4, 0)
#define EQDMA_C2H_MM_ERR_INFO_ADDR 0x105C
#define C2H_MM_ERR_INFO_VALID_MASK BIT(31)
#define C2H_MM_ERR_INFO_SEL_MASK BIT(30)
#define C2H_MM_ERR_INFO_RSVD_1_MASK GENMASK(29, 24)
#define C2H_MM_ERR_INFO_QID_MASK GENMASK(23, 0)
#define EQDMA_C2H_MM_PERF_MON_CTL_ADDR 0x10C0
#define C2H_MM_PERF_MON_CTL_RSVD_1_MASK GENMASK(31, 4)
#define C2H_MM_PERF_MON_CTL_IMM_START_MASK BIT(3)
#define C2H_MM_PERF_MON_CTL_RUN_START_MASK BIT(2)
#define C2H_MM_PERF_MON_CTL_IMM_CLEAR_MASK BIT(1)
#define C2H_MM_PERF_MON_CTL_RUN_CLEAR_MASK BIT(0)
#define EQDMA_C2H_MM_PERF_MON_CYCLE_CNT0_ADDR 0x10C4
#define C2H_MM_PERF_MON_CYCLE_CNT0_CYC_CNT_MASK GENMASK(31, 0)
#define EQDMA_C2H_MM_PERF_MON_CYCLE_CNT1_ADDR 0x10C8
#define C2H_MM_PERF_MON_CYCLE_CNT1_RSVD_1_MASK GENMASK(31, 10)
#define C2H_MM_PERF_MON_CYCLE_CNT1_CYC_CNT_MASK GENMASK(9, 0)
#define EQDMA_C2H_MM_PERF_MON_DATA_CNT0_ADDR 0x10CC
#define C2H_MM_PERF_MON_DATA_CNT0_DCNT_MASK GENMASK(31, 0)
#define EQDMA_C2H_MM_PERF_MON_DATA_CNT1_ADDR 0x10D0
#define C2H_MM_PERF_MON_DATA_CNT1_RSVD_1_MASK GENMASK(31, 10)
#define C2H_MM_PERF_MON_DATA_CNT1_DCNT_MASK GENMASK(9, 0)
#define EQDMA_C2H_MM_DBG_ADDR 0x10E8
#define C2H_MM_RSVD_1_MASK GENMASK(31, 24)
#define C2H_MM_RRQ_ENTRIES_MASK GENMASK(23, 17)
#define C2H_MM_DAT_FIFO_SPC_MASK GENMASK(16, 7)
#define C2H_MM_RD_STALL_MASK BIT(6)
#define C2H_MM_RRQ_FIFO_FI_MASK BIT(5)
#define C2H_MM_WR_STALL_MASK BIT(4)
#define C2H_MM_WRQ_FIFO_FI_MASK BIT(3)
#define C2H_MM_WBK_STALL_MASK BIT(2)
#define C2H_MM_DSC_FIFO_EP_MASK BIT(1)
#define C2H_MM_DSC_FIFO_FL_MASK BIT(0)
#define EQDMA_H2C_MM_CTL_ADDR 0x1204
#define H2C_MM_CTL_RESERVED1_MASK GENMASK(31, 9)
#define H2C_MM_CTL_ERRC_EN_MASK BIT(8)
#define H2C_MM_CTL_RESERVED0_MASK GENMASK(7, 1)
#define H2C_MM_CTL_RUN_MASK BIT(0)
#define EQDMA_H2C_MM_STATUS_ADDR 0x1240
#define H2C_MM_STATUS_RSVD_1_MASK GENMASK(31, 1)
#define H2C_MM_STATUS_RUN_MASK BIT(0)
#define EQDMA_H2C_MM_CMPL_DESC_CNT_ADDR 0x1248
#define H2C_MM_CMPL_DESC_CNT_H2C_CO_MASK GENMASK(31, 0)
#define EQDMA_H2C_MM_ERR_CODE_ENABLE_MASK_ADDR 0x1254
#define H2C_MM_ERR_CODE_ENABLE_RESERVED5_MASK GENMASK(31, 30)
#define H2C_MM_ERR_CODE_ENABLE_WR_SLV_ERR_MASK BIT(29)
#define H2C_MM_ERR_CODE_ENABLE_WR_DEC_ERR_MASK BIT(28)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED4_MASK GENMASK(27, 23)
#define H2C_MM_ERR_CODE_ENABLE_RD_RQ_DIS_ERR_MASK BIT(22)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED3_MASK GENMASK(21, 17)
#define H2C_MM_ERR_CODE_ENABLE_RD_DAT_POISON_ERR_MASK BIT(16)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED2_MASK GENMASK(15, 9)
#define H2C_MM_ERR_CODE_ENABLE_RD_FLR_ERR_MASK BIT(8)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED1_MASK GENMASK(7, 6)
#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_ADR_ERR_MASK BIT(5)
#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_PARA_MASK BIT(4)
#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_BYTE_ERR_MASK BIT(3)
#define H2C_MM_ERR_CODE_ENABLE_RD_UR_CA_MASK BIT(2)
#define H2C_MM_ERR_CODE_ENABLE_RD_HRD_POISON_ERR_MASK BIT(1)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED0_MASK BIT(0)
#define EQDMA_H2C_MM_ERR_CODE_ADDR 0x1258
#define H2C_MM_ERR_CODE_RSVD_1_MASK GENMASK(31, 28)
#define H2C_MM_ERR_CODE_CIDX_MASK GENMASK(27, 12)
#define H2C_MM_ERR_CODE_RESERVED0_MASK GENMASK(11, 10)
#define H2C_MM_ERR_CODE_SUB_TYPE_MASK GENMASK(9, 5)
#define H2C_MM_ERR_CODE_MASK GENMASK(4, 0)
#define EQDMA_H2C_MM_ERR_INFO_ADDR 0x125C
#define H2C_MM_ERR_INFO_VALID_MASK BIT(31)
#define H2C_MM_ERR_INFO_SEL_MASK BIT(30)
#define H2C_MM_ERR_INFO_RSVD_1_MASK GENMASK(29, 24)
#define H2C_MM_ERR_INFO_QID_MASK GENMASK(23, 0)
#define EQDMA_H2C_MM_PERF_MON_CTL_ADDR 0x12C0
#define H2C_MM_PERF_MON_CTL_RSVD_1_MASK GENMASK(31, 4)
#define H2C_MM_PERF_MON_CTL_IMM_START_MASK BIT(3)
#define H2C_MM_PERF_MON_CTL_RUN_START_MASK BIT(2)
#define H2C_MM_PERF_MON_CTL_IMM_CLEAR_MASK BIT(1)
#define H2C_MM_PERF_MON_CTL_RUN_CLEAR_MASK BIT(0)
#define EQDMA_H2C_MM_PERF_MON_CYCLE_CNT0_ADDR 0x12C4
#define H2C_MM_PERF_MON_CYCLE_CNT0_CYC_CNT_MASK GENMASK(31, 0)
#define EQDMA_H2C_MM_PERF_MON_CYCLE_CNT1_ADDR 0x12C8
#define H2C_MM_PERF_MON_CYCLE_CNT1_RSVD_1_MASK GENMASK(31, 10)
#define H2C_MM_PERF_MON_CYCLE_CNT1_CYC_CNT_MASK GENMASK(9, 0)
#define EQDMA_H2C_MM_PERF_MON_DATA_CNT0_ADDR 0x12CC
#define H2C_MM_PERF_MON_DATA_CNT0_DCNT_MASK GENMASK(31, 0)
#define EQDMA_H2C_MM_PERF_MON_DATA_CNT1_ADDR 0x12D0
#define H2C_MM_PERF_MON_DATA_CNT1_RSVD_1_MASK GENMASK(31, 10)
#define H2C_MM_PERF_MON_DATA_CNT1_DCNT_MASK GENMASK(9, 0)
#define EQDMA_H2C_MM_DBG_ADDR 0x12E8
#define H2C_MM_RSVD_1_MASK GENMASK(31, 24)
#define H2C_MM_RRQ_ENTRIES_MASK GENMASK(23, 17)
#define H2C_MM_DAT_FIFO_SPC_MASK GENMASK(16, 7)
#define H2C_MM_RD_STALL_MASK BIT(6)
#define H2C_MM_RRQ_FIFO_FI_MASK BIT(5)
#define H2C_MM_WR_STALL_MASK BIT(4)
#define H2C_MM_WRQ_FIFO_FI_MASK BIT(3)
#define H2C_MM_WBK_STALL_MASK BIT(2)
#define H2C_MM_DSC_FIFO_EP_MASK BIT(1)
#define H2C_MM_DSC_FIFO_FL_MASK BIT(0)
#define EQDMA_C2H_CRDT_COAL_CFG_1_ADDR 0x1400
#define C2H_CRDT_COAL_CFG_1_RSVD_1_MASK GENMASK(31, 18)
#define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK GENMASK(17, 10)
#define C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK GENMASK(9, 0)
#define EQDMA_C2H_CRDT_COAL_CFG_2_ADDR 0x1404
#define C2H_CRDT_COAL_CFG_2_RSVD_1_MASK GENMASK(31, 24)
#define C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK GENMASK(23, 16)
#define C2H_CRDT_COAL_CFG_2_RESERVED1_MASK GENMASK(15, 11)
#define C2H_CRDT_COAL_CFG_2_NT_TH_MASK GENMASK(10, 0)
#define EQDMA_C2H_PFCH_BYP_QID_ADDR 0x1408
#define C2H_PFCH_BYP_QID_RSVD_1_MASK GENMASK(31, 12)
#define C2H_PFCH_BYP_QID_MASK GENMASK(11, 0)
#define EQDMA_C2H_PFCH_BYP_TAG_ADDR 0x140C
#define C2H_PFCH_BYP_TAG_RSVD_1_MASK GENMASK(31, 20)
#define C2H_PFCH_BYP_TAG_BYP_QID_MASK GENMASK(19, 8)
#define C2H_PFCH_BYP_TAG_RSVD_2_MASK BIT(7)
#define C2H_PFCH_BYP_TAG_MASK GENMASK(6, 0)
#define EQDMA_C2H_WATER_MARK_ADDR 0x1500
#define C2H_WATER_MARK_HIGH_WM_MASK GENMASK(31, 16)
#define C2H_WATER_MARK_LOW_WM_MASK GENMASK(15, 0)
#define SW_IND_CTXT_DATA_W7_VIRTIO_DSC_BASE_H_MASK GENMASK(10, 0)
#define SW_IND_CTXT_DATA_W6_VIRTIO_DSC_BASE_M_MASK GENMASK(31, 0)
#define SW_IND_CTXT_DATA_W5_VIRTIO_DSC_BASE_L_MASK GENMASK(31, 11)
#define SW_IND_CTXT_DATA_W5_PASID_EN_MASK BIT(10)
#define SW_IND_CTXT_DATA_W5_PASID_H_MASK GENMASK(9, 0)
#define SW_IND_CTXT_DATA_W4_PASID_L_MASK GENMASK(31, 20)
#define SW_IND_CTXT_DATA_W4_HOST_ID_MASK GENMASK(19, 16)
#define SW_IND_CTXT_DATA_W4_IRQ_BYP_MASK BIT(15)
#define SW_IND_CTXT_DATA_W4_PACK_BYP_OUT_MASK BIT(14)
#define SW_IND_CTXT_DATA_W4_VIRTIO_EN_MASK BIT(13)
#define SW_IND_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK BIT(12)
#define SW_IND_CTXT_DATA_W4_INT_AGGR_MASK BIT(11)
#define SW_IND_CTXT_DATA_W4_VEC_MASK GENMASK(10, 0)
#define SW_IND_CTXT_DATA_W3_DSC_BASE_H_MASK GENMASK(31, 0)
#define SW_IND_CTXT_DATA_W2_DSC_BASE_L_MASK GENMASK(31, 0)
#define SW_IND_CTXT_DATA_W1_IS_MM_MASK BIT(31)
#define SW_IND_CTXT_DATA_W1_MRKR_DIS_MASK BIT(30)
#define SW_IND_CTXT_DATA_W1_IRQ_REQ_MASK BIT(29)
#define SW_IND_CTXT_DATA_W1_ERR_WB_SENT_MASK BIT(28)
#define SW_IND_CTXT_DATA_W1_ERR_MASK GENMASK(27, 26)
#define SW_IND_CTXT_DATA_W1_IRQ_NO_LAST_MASK BIT(25)
#define SW_IND_CTXT_DATA_W1_PORT_ID_MASK GENMASK(24, 22)
#define SW_IND_CTXT_DATA_W1_IRQ_EN_MASK BIT(21)
#define SW_IND_CTXT_DATA_W1_WBK_EN_MASK BIT(20)
#define SW_IND_CTXT_DATA_W1_MM_CHN_MASK BIT(19)
#define SW_IND_CTXT_DATA_W1_BYPASS_MASK BIT(18)
#define SW_IND_CTXT_DATA_W1_DSC_SZ_MASK GENMASK(17, 16)
#define SW_IND_CTXT_DATA_W1_RNG_SZ_MASK GENMASK(15, 12)
#define SW_IND_CTXT_DATA_W1_RSVD_1_MASK GENMASK(11, 9)
#define SW_IND_CTXT_DATA_W1_FETCH_MAX_MASK GENMASK(8, 5)
#define SW_IND_CTXT_DATA_W1_AT_MASK BIT(4)
#define SW_IND_CTXT_DATA_W1_WBI_INTVL_EN_MASK BIT(3)
#define SW_IND_CTXT_DATA_W1_WBI_CHK_MASK BIT(2)
#define SW_IND_CTXT_DATA_W1_FCRD_EN_MASK BIT(1)
#define SW_IND_CTXT_DATA_W1_QEN_MASK BIT(0)
#define SW_IND_CTXT_DATA_W0_RSV_MASK GENMASK(31, 29)
#define SW_IND_CTXT_DATA_W0_FNC_MASK GENMASK(28, 17)
#define SW_IND_CTXT_DATA_W0_IRQ_ARM_MASK BIT(16)
#define SW_IND_CTXT_DATA_W0_PIDX_MASK GENMASK(15, 0)
#define HW_IND_CTXT_DATA_W1_RSVD_1_MASK BIT(15)
#define HW_IND_CTXT_DATA_W1_FETCH_PND_MASK GENMASK(14, 11)
#define HW_IND_CTXT_DATA_W1_EVT_PND_MASK BIT(10)
#define HW_IND_CTXT_DATA_W1_IDL_STP_B_MASK BIT(9)
#define HW_IND_CTXT_DATA_W1_DSC_PND_MASK BIT(8)
#define HW_IND_CTXT_DATA_W1_RSVD_2_MASK GENMASK(7, 0)
#define HW_IND_CTXT_DATA_W0_CRD_USE_MASK GENMASK(31, 16)
#define HW_IND_CTXT_DATA_W0_CIDX_MASK GENMASK(15, 0)
#define CRED_CTXT_DATA_W0_RSVD_1_MASK GENMASK(31, 16)
#define CRED_CTXT_DATA_W0_CREDT_MASK GENMASK(15, 0)
#define PREFETCH_CTXT_DATA_W1_VALID_MASK BIT(13)
#define PREFETCH_CTXT_DATA_W1_SW_CRDT_H_MASK GENMASK(12, 0)
#define PREFETCH_CTXT_DATA_W0_SW_CRDT_L_MASK GENMASK(31, 29)
#define PREFETCH_CTXT_DATA_W0_PFCH_MASK BIT(28)
#define PREFETCH_CTXT_DATA_W0_PFCH_EN_MASK BIT(27)
#define PREFETCH_CTXT_DATA_W0_ERR_MASK BIT(26)
#define PREFETCH_CTXT_DATA_W0_RSVD_MASK GENMASK(25, 22)
#define PREFETCH_CTXT_DATA_W0_PFCH_NEED_MASK GENMASK(21, 16)
#define PREFETCH_CTXT_DATA_W0_NUM_PFCH_MASK GENMASK(15, 10)
#define PREFETCH_CTXT_DATA_W0_VIRTIO_MASK BIT(9)
#define PREFETCH_CTXT_DATA_W0_VAR_DESC_MASK BIT(8)
#define PREFETCH_CTXT_DATA_W0_PORT_ID_MASK GENMASK(7, 5)
#define PREFETCH_CTXT_DATA_W0_BUF_SZ_IDX_MASK GENMASK(4, 1)
#define PREFETCH_CTXT_DATA_W0_BYPASS_MASK BIT(0)
#define CMPL_CTXT_DATA_W6_RSVD_1_H_MASK GENMASK(7, 0)
#define CMPL_CTXT_DATA_W5_RSVD_1_L_MASK GENMASK(31, 23)
#define CMPL_CTXT_DATA_W5_PORT_ID_MASK GENMASK(22, 20)
#define CMPL_CTXT_DATA_W5_SH_CMPT_MASK BIT(19)
#define CMPL_CTXT_DATA_W5_VIO_EOP_MASK BIT(18)
#define CMPL_CTXT_DATA_W5_BADDR4_LOW_MASK GENMASK(17, 14)
#define CMPL_CTXT_DATA_W5_PASID_EN_MASK BIT(13)
#define CMPL_CTXT_DATA_W5_PASID_H_MASK GENMASK(12, 0)
#define CMPL_CTXT_DATA_W4_PASID_L_MASK GENMASK(31, 23)
#define CMPL_CTXT_DATA_W4_HOST_ID_MASK GENMASK(22, 19)
#define CMPL_CTXT_DATA_W4_DIR_C2H_MASK BIT(18)
#define CMPL_CTXT_DATA_W4_VIO_MASK BIT(17)
#define CMPL_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK BIT(16)
#define CMPL_CTXT_DATA_W4_INT_AGGR_MASK BIT(15)
#define CMPL_CTXT_DATA_W4_VEC_MASK GENMASK(14, 4)
#define CMPL_CTXT_DATA_W4_AT_MASK BIT(3)
#define CMPL_CTXT_DATA_W4_OVF_CHK_DIS_MASK BIT(2)
#define CMPL_CTXT_DATA_W4_FULL_UPD_MASK BIT(1)
#define CMPL_CTXT_DATA_W4_TIMER_RUNNING_MASK BIT(0)
#define CMPL_CTXT_DATA_W3_USER_TRIG_PEND_MASK BIT(31)
#define CMPL_CTXT_DATA_W3_ERR_MASK GENMASK(30, 29)
#define CMPL_CTXT_DATA_W3_VALID_MASK BIT(28)
#define CMPL_CTXT_DATA_W3_CIDX_MASK GENMASK(27, 12)
#define CMPL_CTXT_DATA_W3_PIDX_H_MASK GENMASK(11, 0)
#define CMPL_CTXT_DATA_W2_PIDX_L_MASK GENMASK(31, 28)
#define CMPL_CTXT_DATA_W2_DESC_SIZE_MASK GENMASK(27, 26)
#define CMPL_CTXT_DATA_W2_BADDR4_HIGH_H_MASK GENMASK(25, 0)
#define CMPL_CTXT_DATA_W1_BADDR4_HIGH_L_MASK GENMASK(31, 0)
#define CMPL_CTXT_DATA_W0_QSIZE_IX_MASK GENMASK(31, 28)
#define CMPL_CTXT_DATA_W0_COLOR_MASK BIT(27)
#define CMPL_CTXT_DATA_W0_INT_ST_MASK GENMASK(26, 25)
#define CMPL_CTXT_DATA_W0_TIMER_IX_MASK GENMASK(24, 21)
#define CMPL_CTXT_DATA_W0_CNTER_IX_MASK GENMASK(20, 17)
#define CMPL_CTXT_DATA_W0_FNC_ID_MASK GENMASK(16, 5)
#define CMPL_CTXT_DATA_W0_TRIG_MODE_MASK GENMASK(4, 2)
#define CMPL_CTXT_DATA_W0_EN_INT_MASK BIT(1)
#define CMPL_CTXT_DATA_W0_EN_STAT_DESC_MASK BIT(0)
#define INTR_CTXT_DATA_W3_FUNC_MASK GENMASK(29, 18)
#define INTR_CTXT_DATA_W3_RSVD_MASK GENMASK(17, 14)
#define INTR_CTXT_DATA_W3_PASID_EN_MASK BIT(13)
#define INTR_CTXT_DATA_W3_PASID_H_MASK GENMASK(12, 0)
#define INTR_CTXT_DATA_W2_PASID_L_MASK GENMASK(31, 23)
#define INTR_CTXT_DATA_W2_HOST_ID_MASK GENMASK(22, 19)
#define INTR_CTXT_DATA_W2_AT_MASK BIT(18)
#define INTR_CTXT_DATA_W2_PIDX_MASK GENMASK(17, 6)
#define INTR_CTXT_DATA_W2_PAGE_SIZE_MASK GENMASK(5, 3)
#define INTR_CTXT_DATA_W2_BADDR_4K_H_MASK GENMASK(2, 0)
#define INTR_CTXT_DATA_W1_BADDR_4K_M_MASK GENMASK(31, 0)
#define INTR_CTXT_DATA_W0_BADDR_4K_L_MASK GENMASK(31, 15)
#define INTR_CTXT_DATA_W0_COLOR_MASK BIT(14)
#define INTR_CTXT_DATA_W0_INT_ST_MASK BIT(13)
#define INTR_CTXT_DATA_W0_RSVD1_MASK BIT(12)
#define INTR_CTXT_DATA_W0_VEC_MASK GENMASK(11, 1)
#define INTR_CTXT_DATA_W0_VALID_MASK BIT(0)
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif /* EQDMA_SOFT_REG_H_ */ #endif
This source diff could not be displayed because it is too large. You can view the blob instead.
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License, *
* version 2, as published by the Free Software Foundation. * Redistribution and use in source and binary forms, with or without
* * modification, are permitted provided that the following conditions
* This program is distributed in the hope that it will be useful, but WITHOUT * are met:
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * * Redistributions of source code must retain the above copyright
* more details. * notice, this list of conditions and the following disclaimer.
* * * Redistributions in binary form must reproduce the above copyright
* The full GNU General Public License is included in this distribution in * notice, this list of conditions and the following disclaimer in
* the file called "COPYING". * the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "qdma_access_common.h" #include "qdma_access_common.h"
#include "qdma_platform.h" #include "qdma_platform.h"
#include "qdma_soft_reg.h" #include "qdma_soft_reg.h"
#include "eqdma_soft_reg.h"
#include "qdma_soft_access.h" #include "qdma_soft_access.h"
#include "qdma_s80_hard_access.h" #include "qdma_s80_hard_access.h"
#include "eqdma_soft_access.h" #include "eqdma_soft_access.h"
...@@ -169,6 +184,8 @@ static const char *qdma_get_vivado_release_id( ...@@ -169,6 +184,8 @@ static const char *qdma_get_vivado_release_id(
return "vivado 2019.2"; return "vivado 2019.2";
case QDMA_VIVADO_2020_1: case QDMA_VIVADO_2020_1:
return "vivado 2020.1"; return "vivado 2020.1";
case QDMA_VIVADO_2020_2:
return "vivado 2020.2";
default: default:
qdma_log_error("%s: invalid vivado_release_id(%d), err:%d\n", qdma_log_error("%s: invalid vivado_release_id(%d), err:%d\n",
__func__, __func__,
...@@ -319,6 +336,9 @@ void qdma_fetch_version_details(uint8_t is_vf, uint32_t version_reg_val, ...@@ -319,6 +336,9 @@ void qdma_fetch_version_details(uint8_t is_vf, uint32_t version_reg_val,
case 0: case 0:
version_info->vivado_release = QDMA_VIVADO_2020_1; version_info->vivado_release = QDMA_VIVADO_2020_1;
break; break;
case 1:
version_info->vivado_release = QDMA_VIVADO_2020_2;
break;
default: default:
version_info->vivado_release = QDMA_VIVADO_NONE; version_info->vivado_release = QDMA_VIVADO_NONE;
break; break;
...@@ -378,641 +398,6 @@ void qdma_memset(void *to, uint8_t val, uint32_t size) ...@@ -378,641 +398,6 @@ void qdma_memset(void *to, uint8_t val, uint32_t size)
_to[i] = val; _to[i] = val;
} }
/*****************************************************************************/
/**
* qdma_write_global_ring_sizes() - function to set the global ring size array
*
* @dev_hndl: device handle
* @index: Index from where the values needs to written
* @count: number of entries to be written
* @glbl_rng_sz: pointer to the array having the values to write
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_write_global_ring_sizes(void *dev_hndl, uint8_t index,
uint8_t count, const uint32_t *glbl_rng_sz)
{
if (!dev_hndl || !glbl_rng_sz || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_rng_sz=%p, err:%d\n",
__func__, dev_hndl, glbl_rng_sz,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_RING_SIZES) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_RING_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_write_csr_values(dev_hndl, QDMA_OFFSET_GLBL_RNG_SZ, index, count,
glbl_rng_sz);
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_read_global_ring_sizes() - function to get the global rng_sz array
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @glbl_rng_sz: pointer to array to hold the values read
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_read_global_ring_sizes(void *dev_hndl, uint8_t index,
uint8_t count, uint32_t *glbl_rng_sz)
{
if (!dev_hndl || !glbl_rng_sz || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_rng_sz=%p, err:%d\n",
__func__, dev_hndl, glbl_rng_sz,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_RING_SIZES) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_BUFFER_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_read_csr_values(dev_hndl, QDMA_OFFSET_GLBL_RNG_SZ, index, count,
glbl_rng_sz);
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_write_global_timer_count() - function to set the timer values
*
* @dev_hndl: device handle
* @glbl_tmr_cnt: pointer to the array having the values to write
* @index: Index from where the values needs to written
* @count: number of entries to be written
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_write_global_timer_count(void *dev_hndl, uint8_t index,
uint8_t count, const uint32_t *glbl_tmr_cnt)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_tmr_cnt || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_tmr_cnt=%p, err:%d\n",
__func__, dev_hndl, glbl_tmr_cnt,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_TIMERS) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_TIMERS,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en)
qdma_write_csr_values(dev_hndl, QDMA_OFFSET_C2H_TIMER_CNT,
index, count, glbl_tmr_cnt);
else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_read_global_timer_count() - function to get the timer values
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @glbl_tmr_cnt: pointer to array to hold the values read
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_read_global_timer_count(void *dev_hndl, uint8_t index,
uint8_t count, uint32_t *glbl_tmr_cnt)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_tmr_cnt || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_tmr_cnt=%p, err:%d\n",
__func__, dev_hndl, glbl_tmr_cnt,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_TIMERS) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_TIMERS,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en)
qdma_read_csr_values(dev_hndl,
QDMA_OFFSET_C2H_TIMER_CNT, index,
count, glbl_tmr_cnt);
else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_write_global_counter_threshold() - function to set the counter
* threshold values
*
* @dev_hndl: device handle
* @index: Index from where the values needs to written
* @count: number of entries to be written
* @glbl_cnt_th: pointer to the array having the values to write
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_write_global_counter_threshold(void *dev_hndl, uint8_t index,
uint8_t count, const uint32_t *glbl_cnt_th)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_cnt_th || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_cnt_th=%p, err:%d\n",
__func__, dev_hndl, glbl_cnt_th,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_COUNTERS) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_BUFFER_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en)
qdma_write_csr_values(dev_hndl, QDMA_OFFSET_C2H_CNT_TH, index,
count, glbl_cnt_th);
else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_read_global_counter_threshold() - function to get the counter threshold
* values
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @glbl_cnt_th: pointer to array to hold the values read
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_read_global_counter_threshold(void *dev_hndl, uint8_t index,
uint8_t count, uint32_t *glbl_cnt_th)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_cnt_th || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_cnt_th=%p, err:%d\n",
__func__, dev_hndl, glbl_cnt_th,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_COUNTERS) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_COUNTERS,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en)
qdma_read_csr_values(dev_hndl, QDMA_OFFSET_C2H_CNT_TH, index,
count, glbl_cnt_th);
else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_write_global_buffer_sizes() - function to set the buffer sizes
*
* @dev_hndl: device handle
* @index: Index from where the values needs to written
* @count: number of entries to be written
* @glbl_buf_sz: pointer to the array having the values to write
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_write_global_buffer_sizes(void *dev_hndl, uint8_t index,
uint8_t count, const uint32_t *glbl_buf_sz)
{
struct qdma_dev_attributes *dev_cap = NULL;
if (!dev_hndl || !glbl_buf_sz || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_buf_sz=%p, err:%d\n",
__func__, dev_hndl, glbl_buf_sz,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_BUFFER_SIZES) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_BUFFER_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en)
qdma_write_csr_values(dev_hndl, QDMA_OFFSET_C2H_BUF_SZ, index,
count, glbl_buf_sz);
else {
qdma_log_error("%s: ST not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_read_global_buffer_sizes() - function to get the buffer sizes
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @glbl_buf_sz: pointer to array to hold the values read
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_read_global_buffer_sizes(void *dev_hndl, uint8_t index,
uint8_t count, uint32_t *glbl_buf_sz)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_buf_sz || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_buf_sz=%p, err:%d\n",
__func__, dev_hndl, glbl_buf_sz,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_BUFFER_SIZES) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_BUFFER_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en)
qdma_read_csr_values(dev_hndl, QDMA_OFFSET_C2H_BUF_SZ, index,
count, glbl_buf_sz);
else {
qdma_log_error("%s: ST is not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_global_csr_conf() - function to configure global csr
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @csr_val: uint32_t pointer to csr value
* @csr_type: Type of the CSR (qdma_global_csr_type enum) to configure
* @access_type HW access type (qdma_hw_access_type enum) value
* QDMA_HW_ACCESS_CLEAR - Not supported
* QDMA_HW_ACCESS_INVALIDATE - Not supported
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_global_csr_conf(void *dev_hndl, uint8_t index, uint8_t count,
uint32_t *csr_val,
enum qdma_global_csr_type csr_type,
enum qdma_hw_access_type access_type)
{
int rv = QDMA_SUCCESS;
switch (csr_type) {
case QDMA_CSR_RING_SZ:
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv = qdma_read_global_ring_sizes(
dev_hndl,
index,
count,
csr_val);
break;
case QDMA_HW_ACCESS_WRITE:
rv = qdma_write_global_ring_sizes(
dev_hndl,
index,
count,
csr_val);
break;
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
break;
case QDMA_CSR_TIMER_CNT:
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv = qdma_read_global_timer_count(
dev_hndl,
index,
count,
csr_val);
break;
case QDMA_HW_ACCESS_WRITE:
rv = qdma_write_global_timer_count(
dev_hndl,
index,
count,
csr_val);
break;
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
break;
case QDMA_CSR_CNT_TH:
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv =
qdma_read_global_counter_threshold(
dev_hndl,
index,
count,
csr_val);
break;
case QDMA_HW_ACCESS_WRITE:
rv =
qdma_write_global_counter_threshold(
dev_hndl,
index,
count,
csr_val);
break;
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
break;
case QDMA_CSR_BUF_SZ:
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv =
qdma_read_global_buffer_sizes(dev_hndl,
index,
count,
csr_val);
break;
case QDMA_HW_ACCESS_WRITE:
rv =
qdma_write_global_buffer_sizes(dev_hndl,
index,
count,
csr_val);
break;
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
break;
default:
qdma_log_error("%s: csr_type(%d) invalid, err:%d\n",
__func__,
csr_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
return rv;
}
/*****************************************************************************/
/**
* qdma_global_writeback_interval_write() - function to set the writeback
* interval
*
* @dev_hndl device handle
* @wb_int: Writeback Interval
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_global_writeback_interval_write(void *dev_hndl,
enum qdma_wrb_interval wb_int)
{
uint32_t reg_val;
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (wb_int >= QDMA_NUM_WRB_INTERVALS) {
qdma_log_error("%s: wb_int=%d is invalid, err:%d\n",
__func__, wb_int,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en) {
reg_val = qdma_reg_read(dev_hndl, QDMA_OFFSET_GLBL_DSC_CFG);
reg_val |= FIELD_SET(QDMA_GLBL_DSC_CFG_WB_ACC_INT_MASK, wb_int);
qdma_reg_write(dev_hndl, QDMA_OFFSET_GLBL_DSC_CFG, reg_val);
} else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_global_writeback_interval_read() - function to get the writeback
* interval
*
* @dev_hndl: device handle
* @wb_int: pointer to the data to hold Writeback Interval
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_global_writeback_interval_read(void *dev_hndl,
enum qdma_wrb_interval *wb_int)
{
uint32_t reg_val;
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (!wb_int) {
qdma_log_error("%s: wb_int is NULL, err:%d\n", __func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en) {
reg_val = qdma_reg_read(dev_hndl, QDMA_OFFSET_GLBL_DSC_CFG);
*wb_int = (enum qdma_wrb_interval)FIELD_GET(
QDMA_GLBL_DSC_CFG_WB_ACC_INT_MASK, reg_val);
} else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_global_writeback_interval_conf() - function to configure
* the writeback interval
*
* @dev_hndl: device handle
* @wb_int: pointer to the data to hold Writeback Interval
* @access_type HW access type (qdma_hw_access_type enum) value
* QDMA_HW_ACCESS_CLEAR - Not supported
* QDMA_HW_ACCESS_INVALIDATE - Not supported
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_global_writeback_interval_conf(void *dev_hndl,
enum qdma_wrb_interval *wb_int,
enum qdma_hw_access_type access_type)
{
int rv = QDMA_SUCCESS;
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv = qdma_global_writeback_interval_read(dev_hndl, wb_int);
break;
case QDMA_HW_ACCESS_WRITE:
rv = qdma_global_writeback_interval_write(dev_hndl, *wb_int);
break;
case QDMA_HW_ACCESS_CLEAR:
case QDMA_HW_ACCESS_INVALIDATE:
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
return rv;
}
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_queue_cmpt_cidx_read() - function to read the CMPT CIDX register * qdma_queue_cmpt_cidx_read() - function to read the CMPT CIDX register
...@@ -1066,45 +451,6 @@ static int qdma_queue_cmpt_cidx_read(void *dev_hndl, uint8_t is_vf, ...@@ -1066,45 +451,6 @@ static int qdma_queue_cmpt_cidx_read(void *dev_hndl, uint8_t is_vf,
} }
/*****************************************************************************/
/**
* qdma_mm_channel_conf() - Function to enable/disable the MM channel
*
* @dev_hndl: device handle
* @channel: MM channel number
* @is_c2h: Queue direction. Set 1 for C2H and 0 for H2C
* @enable: Enable or disable MM channel
*
* Presently, we have only 1 MM channel
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_mm_channel_conf(void *dev_hndl, uint8_t channel, uint8_t is_c2h,
uint8_t enable)
{
uint32_t reg_addr = (is_c2h) ? QDMA_OFFSET_C2H_MM_CONTROL :
QDMA_OFFSET_H2C_MM_CONTROL;
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->mm_en) {
qdma_reg_write(dev_hndl,
reg_addr + (channel * QDMA_MM_CONTROL_STEP),
enable);
}
return QDMA_SUCCESS;
}
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_initiate_flr() - function to initiate Function Level Reset * qdma_initiate_flr() - function to initiate Function Level Reset
...@@ -1259,6 +605,58 @@ int qdma_acc_reg_dump_buf_len(void *dev_hndl, ...@@ -1259,6 +605,58 @@ int qdma_acc_reg_dump_buf_len(void *dev_hndl,
return rv; return rv;
} }
int qdma_acc_reg_info_len(void *dev_hndl,
enum qdma_ip_type ip_type, int *buflen, int *num_regs)
{
uint32_t len = 0;
int rv = 0;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (!buflen) {
qdma_log_error("%s: buflen is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (!num_regs) {
qdma_log_error("%s: num_regs is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
*buflen = 0;
switch (ip_type) {
case QDMA_SOFT_IP:
len = 0;
*num_regs = 0;
break;
case QDMA_VERSAL_HARD_IP:
len = qdma_s80_hard_reg_dump_buf_len();
*num_regs = (int)((len / REG_DUMP_SIZE_PER_LINE) - 1);
break;
case EQDMA_SOFT_IP:
len = eqdma_reg_dump_buf_len();
*num_regs = (int)((len / REG_DUMP_SIZE_PER_LINE) - 1);
break;
default:
qdma_log_error("%s: Invalid version number, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
*buflen = (int)len;
return rv;
}
int qdma_acc_context_buf_len(void *dev_hndl, int qdma_acc_context_buf_len(void *dev_hndl,
enum qdma_ip_type ip_type, uint8_t st, enum qdma_ip_type ip_type, uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen) enum qdma_dev_q_type q_type, uint32_t *buflen)
...@@ -1292,6 +690,108 @@ int qdma_acc_context_buf_len(void *dev_hndl, ...@@ -1292,6 +690,108 @@ int qdma_acc_context_buf_len(void *dev_hndl,
return rv; return rv;
} }
int qdma_acc_get_num_config_regs(void *dev_hndl,
enum qdma_ip_type ip_type, uint32_t *num_regs)
{
int rv = 0;
*num_regs = 0;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
switch (ip_type) {
case QDMA_SOFT_IP:
rv = qdma_get_config_num_regs();
break;
case QDMA_VERSAL_HARD_IP:
rv = qdma_s80_hard_get_config_num_regs();
break;
case EQDMA_SOFT_IP:
rv = eqdma_get_config_num_regs();
break;
default:
qdma_log_error("%s: Invalid version number, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
*num_regs = rv;
return 0;
}
/*****************************************************************************/
/**
* qdma_acc_get_config_regs() - Function to get qdma config registers.
*
* @dev_hndl: device handle
* @is_vf: Whether PF or VF
* @ip_type: QDMA IP Type
* @reg_data: pointer to register data to be filled
*
* Return: Length up-till the buffer is filled -success and < 0 - failure
*****************************************************************************/
int qdma_acc_get_config_regs(void *dev_hndl, uint8_t is_vf,
enum qdma_ip_type ip_type,
uint32_t *reg_data)
{
struct xreg_info *reg_info;
uint32_t count = 0;
uint32_t num_regs;
int rv = 0;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (is_vf) {
qdma_log_error("%s: Get Config regs not valid for VF, err:%d\n",
__func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (reg_data == NULL) {
qdma_log_error("%s: reg_data is NULL, err:%d\n",
__func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
switch (ip_type) {
case QDMA_SOFT_IP:
num_regs = qdma_get_config_num_regs();
reg_info = qdma_get_config_regs();
break;
case QDMA_VERSAL_HARD_IP:
num_regs = qdma_s80_hard_get_config_num_regs();
reg_info = qdma_s80_hard_get_config_regs();
break;
case EQDMA_SOFT_IP:
num_regs = eqdma_get_config_num_regs();
reg_info = eqdma_get_config_regs();
break;
default:
qdma_log_error("%s: Invalid version number, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
for (count = 0; count < num_regs - 1; count++) {
reg_data[count] = qdma_reg_read(dev_hndl,
reg_info[count].addr);
}
return rv;
}
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_acc_dump_config_regs() - Function to get qdma config register dump in a * qdma_acc_dump_config_regs() - Function to get qdma config register dump in a
...@@ -1333,6 +833,58 @@ int qdma_acc_dump_config_regs(void *dev_hndl, uint8_t is_vf, ...@@ -1333,6 +833,58 @@ int qdma_acc_dump_config_regs(void *dev_hndl, uint8_t is_vf,
return rv; return rv;
} }
/*****************************************************************************/
/**
* qdma_acc_dump_reg_info() - Function to dump fileds in
* a specified register.
*
* @dev_hndl: device handle
* @ip_type: QDMA IP Type
* @buf : pointer to buffer to be filled
* @buflen : Length of the buffer
*
* Return: Length up-till the buffer is filled -success and < 0 - failure
*****************************************************************************/
int qdma_acc_dump_reg_info(void *dev_hndl,
enum qdma_ip_type ip_type, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen)
{
int rv = 0;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (!buf || !buflen) {
qdma_log_error("%s: Invalid input buffer, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
switch (ip_type) {
case QDMA_SOFT_IP:
QDMA_SNPRINTF_S(buf, buflen, DEBGFS_LINE_SZ,
"QDMA reg field info not supported for QDMA_SOFT_IP\n");
break;
case QDMA_VERSAL_HARD_IP:
rv = qdma_s80_hard_dump_reg_info(dev_hndl, reg_addr,
num_regs, buf, buflen);
break;
case EQDMA_SOFT_IP:
rv = eqdma_dump_reg_info(dev_hndl, reg_addr,
num_regs, buf, buflen);
break;
default:
qdma_log_error("%s: Invalid version number, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
return rv;
}
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_acc_dump_queue_context() - Function to get qdma queue context dump in a * qdma_acc_dump_queue_context() - Function to get qdma queue context dump in a
...@@ -1596,6 +1148,8 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf, ...@@ -1596,6 +1148,8 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
return rv; return rv;
} }
qdma_memset(hw_access, 0, sizeof(struct qdma_hw_access));
if (ip == EQDMA_IP) if (ip == EQDMA_IP)
hw_access->qdma_get_version = &eqdma_get_version; hw_access->qdma_get_version = &eqdma_get_version;
else else
...@@ -1639,8 +1193,10 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf, ...@@ -1639,8 +1193,10 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
hw_access->qdma_read_reg_list = &qdma_read_reg_list; hw_access->qdma_read_reg_list = &qdma_read_reg_list;
hw_access->qdma_dump_config_reg_list = hw_access->qdma_dump_config_reg_list =
&qdma_soft_dump_config_reg_list; &qdma_soft_dump_config_reg_list;
hw_access->qdma_dump_reg_info = &qdma_dump_reg_info;
hw_access->mbox_base_pf = QDMA_OFFSET_MBOX_BASE_PF; hw_access->mbox_base_pf = QDMA_OFFSET_MBOX_BASE_PF;
hw_access->mbox_base_vf = QDMA_OFFSET_MBOX_BASE_VF; hw_access->mbox_base_vf = QDMA_OFFSET_MBOX_BASE_VF;
hw_access->qdma_max_errors = QDMA_ERRS_ALL;
rv = hw_access->qdma_get_version(dev_hndl, is_vf, &version_info); rv = hw_access->qdma_get_version(dev_hndl, is_vf, &version_info);
if (rv != QDMA_SUCCESS) if (rv != QDMA_SUCCESS)
...@@ -1684,6 +1240,12 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf, ...@@ -1684,6 +1240,12 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
&qdma_s80_hard_dump_config_regs; &qdma_s80_hard_dump_config_regs;
hw_access->qdma_dump_intr_context = hw_access->qdma_dump_intr_context =
&qdma_s80_hard_dump_intr_context; &qdma_s80_hard_dump_intr_context;
hw_access->qdma_hw_error_enable =
&qdma_s80_hard_hw_error_enable;
hw_access->qdma_hw_error_process =
&qdma_s80_hard_hw_error_process;
hw_access->qdma_hw_get_error_name =
&qdma_s80_hard_hw_get_error_name;
hw_access->qdma_legacy_intr_conf = NULL; hw_access->qdma_legacy_intr_conf = NULL;
hw_access->qdma_read_reg_list = &qdma_s80_hard_read_reg_list; hw_access->qdma_read_reg_list = &qdma_s80_hard_read_reg_list;
hw_access->qdma_dump_config_reg_list = hw_access->qdma_dump_config_reg_list =
...@@ -1692,6 +1254,8 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf, ...@@ -1692,6 +1254,8 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
&qdma_s80_hard_dump_queue_context; &qdma_s80_hard_dump_queue_context;
hw_access->qdma_read_dump_queue_context = hw_access->qdma_read_dump_queue_context =
&qdma_s80_hard_read_dump_queue_context; &qdma_s80_hard_read_dump_queue_context;
hw_access->qdma_dump_reg_info = &qdma_s80_hard_dump_reg_info;
hw_access->qdma_max_errors = QDMA_S80_HARD_ERRS_ALL;
} }
if (version_info.ip_type == EQDMA_SOFT_IP) { if (version_info.ip_type == EQDMA_SOFT_IP) {
...@@ -1720,7 +1284,7 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf, ...@@ -1720,7 +1284,7 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
&eqdma_dump_queue_context; &eqdma_dump_queue_context;
hw_access->qdma_read_dump_queue_context = hw_access->qdma_read_dump_queue_context =
&eqdma_read_dump_queue_context; &eqdma_read_dump_queue_context;
hw_access->qdma_dump_reg_info = &eqdma_dump_reg_info;
/* All CSR and Queue space register belongs to Window 0. /* All CSR and Queue space register belongs to Window 0.
* Mailbox and MSIX register belongs to Window 1 * Mailbox and MSIX register belongs to Window 1
* Therefore, Mailbox offsets are different for EQDMA * Therefore, Mailbox offsets are different for EQDMA
...@@ -1729,6 +1293,7 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf, ...@@ -1729,6 +1293,7 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
*/ */
hw_access->mbox_base_pf = EQDMA_OFFSET_MBOX_BASE_PF; hw_access->mbox_base_pf = EQDMA_OFFSET_MBOX_BASE_PF;
hw_access->mbox_base_vf = EQDMA_OFFSET_MBOX_BASE_VF; hw_access->mbox_base_vf = EQDMA_OFFSET_MBOX_BASE_VF;
hw_access->qdma_max_errors = EQDMA_ERRS_ALL;
} }
return QDMA_SUCCESS; return QDMA_SUCCESS;
......
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_ACCESS_COMMON_H_ #ifndef __QDMA_ACCESS_COMMON_H_
#define QDMA_ACCESS_COMMON_H_ #define __QDMA_ACCESS_COMMON_H_
#include "qdma_access_export.h"
#include "qdma_access_errors.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include "qdma_access_export.h"
#include "qdma_access_errors.h"
/* QDMA HW version string array length */ /* QDMA HW version string array length */
#define QDMA_HW_VERSION_STRING_LEN 32 #define QDMA_HW_VERSION_STRING_LEN 32
#define ENABLE_INIT_CTXT_MEMORY 1 #define ENABLE_INIT_CTXT_MEMORY 1
#ifdef GCC_COMPILER
static inline uint32_t get_trailing_zeros(uint64_t x)
{
uint32_t rv =
__builtin_ffsll(x) - 1;
return rv;
}
#else
static inline uint32_t get_trailing_zeros(uint64_t value)
{
uint32_t pos = 0;
if ((value & 0xffffffff) == 0) {
pos += 32;
value >>= 32;
}
if ((value & 0xffff) == 0) {
pos += 16;
value >>= 16;
}
if ((value & 0xff) == 0) {
pos += 8;
value >>= 8;
}
if ((value & 0xf) == 0) {
pos += 4;
value >>= 4;
}
if ((value & 0x3) == 0) {
pos += 2;
value >>= 2;
}
if ((value & 0x1) == 0)
pos += 1;
return pos;
}
#endif
#define FIELD_SHIFT(mask) get_trailing_zeros(mask)
#define FIELD_SET(mask, val) ((val << FIELD_SHIFT(mask)) & mask)
#define FIELD_GET(mask, reg) ((reg & mask) >> FIELD_SHIFT(mask))
/* CSR Default values */ /* CSR Default values */
#define DEFAULT_MAX_DSC_FETCH 6 #define DEFAULT_MAX_DSC_FETCH 6
#define DEFAULT_WRB_INT QDMA_WRB_INTERVAL_128 #define DEFAULT_WRB_INT QDMA_WRB_INTERVAL_128
...@@ -50,6 +110,66 @@ extern "C" { ...@@ -50,6 +110,66 @@ extern "C" {
*/ */
#define QDMA_NUM_DATA_VEC_FOR_INTR_CXT 1 #define QDMA_NUM_DATA_VEC_FOR_INTR_CXT 1
enum ind_ctxt_cmd_op {
QDMA_CTXT_CMD_CLR,
QDMA_CTXT_CMD_WR,
QDMA_CTXT_CMD_RD,
QDMA_CTXT_CMD_INV
};
enum ind_ctxt_cmd_sel {
QDMA_CTXT_SEL_SW_C2H,
QDMA_CTXT_SEL_SW_H2C,
QDMA_CTXT_SEL_HW_C2H,
QDMA_CTXT_SEL_HW_H2C,
QDMA_CTXT_SEL_CR_C2H,
QDMA_CTXT_SEL_CR_H2C,
QDMA_CTXT_SEL_CMPT,
QDMA_CTXT_SEL_PFTCH,
QDMA_CTXT_SEL_INT_COAL,
QDMA_CTXT_SEL_PASID_RAM_LOW,
QDMA_CTXT_SEL_PASID_RAM_HIGH,
QDMA_CTXT_SEL_TIMER,
QDMA_CTXT_SEL_FMAP,
};
/* polling a register */
#define QDMA_REG_POLL_DFLT_INTERVAL_US 10 /* 10us per poll */
#define QDMA_REG_POLL_DFLT_TIMEOUT_US (500*1000) /* 500ms */
/** Constants */
#define QDMA_NUM_RING_SIZES 16
#define QDMA_NUM_C2H_TIMERS 16
#define QDMA_NUM_C2H_BUFFER_SIZES 16
#define QDMA_NUM_C2H_COUNTERS 16
#define QDMA_MM_CONTROL_RUN 0x1
#define QDMA_MM_CONTROL_STEP 0x100
#define QDMA_MAGIC_NUMBER 0x1fd3
#define QDMA_PIDX_STEP 0x10
#define QDMA_CMPT_CIDX_STEP 0x10
#define QDMA_INT_CIDX_STEP 0x10
/** QDMA_IND_REG_SEL_PFTCH */
#define QDMA_PFTCH_CTXT_SW_CRDT_GET_H_MASK GENMASK(15, 3)
#define QDMA_PFTCH_CTXT_SW_CRDT_GET_L_MASK GENMASK(2, 0)
/** QDMA_IND_REG_SEL_CMPT */
#define QDMA_COMPL_CTXT_BADDR_GET_H_MASK GENMASK_ULL(63, 38)
#define QDMA_COMPL_CTXT_BADDR_GET_L_MASK GENMASK_ULL(37, 12)
#define QDMA_COMPL_CTXT_PIDX_GET_H_MASK GENMASK(15, 4)
#define QDMA_COMPL_CTXT_PIDX_GET_L_MASK GENMASK(3, 0)
#define QDMA_INTR_CTXT_BADDR_GET_H_MASK GENMASK_ULL(63, 61)
#define QDMA_INTR_CTXT_BADDR_GET_M_MASK GENMASK_ULL(60, 29)
#define QDMA_INTR_CTXT_BADDR_GET_L_MASK GENMASK_ULL(28, 12)
#define QDMA_GLBL2_MM_CMPT_EN_MASK BIT(2)
#define QDMA_GLBL2_FLR_PRESENT_MASK BIT(1)
#define QDMA_GLBL2_MAILBOX_EN_MASK BIT(0)
#define QDMA_REG_IND_CTXT_REG_COUNT 8
/* ------------------------ indirect register context fields -----------*/ /* ------------------------ indirect register context fields -----------*/
union qdma_ind_ctxt_cmd { union qdma_ind_ctxt_cmd {
uint32_t word; uint32_t word;
...@@ -294,8 +414,6 @@ struct qdma_descq_cmpt_ctxt { ...@@ -294,8 +414,6 @@ struct qdma_descq_cmpt_ctxt {
uint32_t pasid; uint32_t pasid;
/** @pasid_en - PASID Enable */ /** @pasid_en - PASID Enable */
uint8_t pasid_en; uint8_t pasid_en;
/** @virtio_dsc_base - Virtio Desc Base Address */
uint8_t base_addr;
/** @vio_eop - Virtio End-of-packet */ /** @vio_eop - Virtio End-of-packet */
uint8_t vio_eop; uint8_t vio_eop;
/** @sh_cmpt - Shared Completion Queue */ /** @sh_cmpt - Shared Completion Queue */
...@@ -518,11 +636,16 @@ void qdma_memset(void *to, uint8_t val, uint32_t size); ...@@ -518,11 +636,16 @@ void qdma_memset(void *to, uint8_t val, uint32_t size);
int qdma_acc_reg_dump_buf_len(void *dev_hndl, int qdma_acc_reg_dump_buf_len(void *dev_hndl,
enum qdma_ip_type ip_type, int *buflen); enum qdma_ip_type ip_type, int *buflen);
int qdma_acc_reg_info_len(void *dev_hndl,
enum qdma_ip_type ip_type, int *buflen, int *num_regs);
int qdma_acc_context_buf_len(void *dev_hndl, int qdma_acc_context_buf_len(void *dev_hndl,
enum qdma_ip_type ip_type, uint8_t st, enum qdma_ip_type ip_type, uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen); enum qdma_dev_q_type q_type, uint32_t *buflen);
int qdma_acc_get_num_config_regs(void *dev_hndl,
enum qdma_ip_type ip_type, uint32_t *num_regs);
/* /*
* struct qdma_hw_access - Structure to hold HW access function pointers * struct qdma_hw_access - Structure to hold HW access function pointers
*/ */
...@@ -592,6 +715,10 @@ struct qdma_hw_access { ...@@ -592,6 +715,10 @@ struct qdma_hw_access {
int (*qdma_hw_error_process)(void *dev_hndl); int (*qdma_hw_error_process)(void *dev_hndl);
int (*qdma_dump_config_regs)(void *dev_hndl, uint8_t is_vf, char *buf, int (*qdma_dump_config_regs)(void *dev_hndl, uint8_t is_vf, char *buf,
uint32_t buflen); uint32_t buflen);
int (*qdma_dump_reg_info)(void *dev_hndl, uint32_t reg_addr,
uint32_t num_regs,
char *buf,
uint32_t buflen);
int (*qdma_dump_queue_context)(void *dev_hndl, int (*qdma_dump_queue_context)(void *dev_hndl,
uint8_t st, uint8_t st,
enum qdma_dev_q_type q_type, enum qdma_dev_q_type q_type,
...@@ -622,6 +749,7 @@ struct qdma_hw_access { ...@@ -622,6 +749,7 @@ struct qdma_hw_access {
char *buf, uint32_t buflen); char *buf, uint32_t buflen);
uint32_t mbox_base_pf; uint32_t mbox_base_pf;
uint32_t mbox_base_vf; uint32_t mbox_base_vf;
uint32_t qdma_max_errors;
}; };
/*****************************************************************************/ /*****************************************************************************/
...@@ -643,6 +771,21 @@ struct qdma_hw_access { ...@@ -643,6 +771,21 @@ struct qdma_hw_access {
int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf, int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
struct qdma_hw_access *hw_access); struct qdma_hw_access *hw_access);
/*****************************************************************************/
/**
* qdma_acc_dump_config_regs() - Function to get qdma config registers
*
* @dev_hndl: device handle
* @is_vf: Whether PF or VF
* @ip_type: QDMA IP Type
* @reg_data: pointer to register data to be filled
*
* Return: Length up-till the buffer is filled -success and < 0 - failure
*****************************************************************************/
int qdma_acc_get_config_regs(void *dev_hndl, uint8_t is_vf,
enum qdma_ip_type ip_type,
uint32_t *reg_data);
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_acc_dump_config_regs() - Function to get qdma config register dump in a * qdma_acc_dump_config_regs() - Function to get qdma config register dump in a
...@@ -660,6 +803,23 @@ int qdma_acc_dump_config_regs(void *dev_hndl, uint8_t is_vf, ...@@ -660,6 +803,23 @@ int qdma_acc_dump_config_regs(void *dev_hndl, uint8_t is_vf,
enum qdma_ip_type ip_type, enum qdma_ip_type ip_type,
char *buf, uint32_t buflen); char *buf, uint32_t buflen);
/*****************************************************************************/
/**
* qdma_acc_dump_reg_info() - Function to get qdma reg info in a buffer
*
* @dev_hndl: device handle
* @ip_type: QDMA IP Type
* @reg_addr: Register Address
* @num_regs: Number of Registers
* @buf : pointer to buffer to be filled
* @buflen : Length of the buffer
*
* Return: Length up-till the buffer is filled -success and < 0 - failure
*****************************************************************************/
int qdma_acc_dump_reg_info(void *dev_hndl,
enum qdma_ip_type ip_type, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen);
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_acc_dump_queue_context() - Function to dump qdma queue context data in a * qdma_acc_dump_queue_context() - Function to dump qdma queue context data in a
......
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_ACCESS_ERRORS_H_ #ifndef __QDMA_ACCESS_ERRORS_H_
#define QDMA_ACCESS_ERRORS_H_ #define __QDMA_ACCESS_ERRORS_H_
#ifdef __cplusplus
extern "C" {
#endif
/** /**
* DOC: QDMA common library error codes definitions * DOC: QDMA common library error codes definitions
...@@ -62,4 +81,8 @@ enum qdma_access_error_codes { ...@@ -62,4 +81,8 @@ enum qdma_access_error_codes {
QDMA_ERR_MBOX_ALL_ZERO_MSG, /* 25 */ QDMA_ERR_MBOX_ALL_ZERO_MSG, /* 25 */
}; };
#endif /* QDMA_ACCESS_H_ */ #ifdef __cplusplus
}
#endif
#endif /* __QDMA_ACCESS_ERRORS_H_ */
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_ACCESS_EXPORT_H_ #ifndef __QDMA_ACCESS_EXPORT_H_
#define QDMA_ACCESS_EXPORT_H_ #define __QDMA_ACCESS_EXPORT_H_
#include "qdma_platform_env.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include "qdma_platform_env.h"
/** QDMA Global CSR array size */ /** QDMA Global CSR array size */
#define QDMA_GLOBAL_CSR_ARRAY_SZ 16 #define QDMA_GLOBAL_CSR_ARRAY_SZ 16
...@@ -44,6 +60,12 @@ struct qdma_dev_attributes { ...@@ -44,6 +60,12 @@ struct qdma_dev_attributes {
uint8_t mm_cmpt_en:1; uint8_t mm_cmpt_en:1;
/** @mailbox_en - Mailbox supported or not? */ /** @mailbox_en - Mailbox supported or not? */
uint8_t mailbox_en:1; uint8_t mailbox_en:1;
/** @debug_mode - Debug mode is enabled/disabled for IP */
uint8_t debug_mode:1;
/** @desc_eng_mode - Descriptor Engine mode:
* Internal only/Bypass only/Internal & Bypass
*/
uint8_t desc_eng_mode:2;
/** @mm_channel_max - Num of MM channels */ /** @mm_channel_max - Num of MM channels */
uint8_t mm_channel_max; uint8_t mm_channel_max;
...@@ -202,6 +224,8 @@ enum qdma_vivado_release_id { ...@@ -202,6 +224,8 @@ enum qdma_vivado_release_id {
QDMA_VIVADO_2019_2, QDMA_VIVADO_2019_2,
/** @QDMA_VIVADO_2020_1 - Vivado version 2020.1 */ /** @QDMA_VIVADO_2020_1 - Vivado version 2020.1 */
QDMA_VIVADO_2020_1, QDMA_VIVADO_2020_1,
/** @QDMA_VIVADO_2020_2 - Vivado version 2020.2 */
QDMA_VIVADO_2020_2,
/** @QDMA_VIVADO_NONE - Not a valid Vivado version*/ /** @QDMA_VIVADO_NONE - Not a valid Vivado version*/
QDMA_VIVADO_NONE QDMA_VIVADO_NONE
}; };
...@@ -229,137 +253,19 @@ enum qdma_device_type { ...@@ -229,137 +253,19 @@ enum qdma_device_type {
QDMA_DEVICE_NONE QDMA_DEVICE_NONE
}; };
/** enum qdma_desc_eng_mode {
* enum qdma_error_idx - qdma errors /** @QDMA_DESC_ENG_INTERNAL_BYPASS - Internal and Bypass mode */
*/ QDMA_DESC_ENG_INTERNAL_BYPASS,
enum qdma_error_idx { /** @QDMA_DESC_ENG_BYPASS_ONLY - Only Bypass mode */
/* Descriptor errors */ QDMA_DESC_ENG_BYPASS_ONLY,
QDMA_DSC_ERR_POISON, /** @QDMA_DESC_ENG_INTERNAL_ONLY - Only Internal mode */
QDMA_DSC_ERR_UR_CA, QDMA_DESC_ENG_INTERNAL_ONLY,
QDMA_DSC_ERR_PARAM, /** @QDMA_DESC_ENG_MODE_MAX - Max of desc engine modes */
QDMA_DSC_ERR_ADDR, QDMA_DESC_ENG_MODE_MAX
QDMA_DSC_ERR_TAG,
QDMA_DSC_ERR_FLR,
QDMA_DSC_ERR_TIMEOUT,
QDMA_DSC_ERR_DAT_POISON,
QDMA_DSC_ERR_FLR_CANCEL,
QDMA_DSC_ERR_DMA,
QDMA_DSC_ERR_DSC,
QDMA_DSC_ERR_RQ_CANCEL,
QDMA_DSC_ERR_DBE,
QDMA_DSC_ERR_SBE,
QDMA_DSC_ERR_ALL,
/* TRQ Errors */
QDMA_TRQ_ERR_UNMAPPED,
QDMA_TRQ_ERR_QID_RANGE,
QDMA_TRQ_ERR_VF_ACCESS,
QDMA_TRQ_ERR_TCP_TIMEOUT,
QDMA_TRQ_ERR_ALL,
/* C2H Errors */
QDMA_ST_C2H_ERR_MTY_MISMATCH,
QDMA_ST_C2H_ERR_LEN_MISMATCH,
QDMA_ST_C2H_ERR_QID_MISMATCH,
QDMA_ST_C2H_ERR_DESC_RSP_ERR,
QDMA_ST_C2H_ERR_ENG_WPL_DATA_PAR_ERR,
QDMA_ST_C2H_ERR_MSI_INT_FAIL,
QDMA_ST_C2H_ERR_ERR_DESC_CNT,
QDMA_ST_C2H_ERR_PORTID_CTXT_MISMATCH,
QDMA_ST_C2H_ERR_PORTID_BYP_IN_MISMATCH,
QDMA_ST_C2H_ERR_CMPT_INV_Q_ERR,
QDMA_ST_C2H_ERR_CMPT_QFULL_ERR,
QDMA_ST_C2H_ERR_CMPT_CIDX_ERR,
QDMA_ST_C2H_ERR_CMPT_PRTY_ERR,
QDMA_ST_C2H_ERR_ALL,
/* Fatal Errors */
QDMA_ST_FATAL_ERR_MTY_MISMATCH,
QDMA_ST_FATAL_ERR_LEN_MISMATCH,
QDMA_ST_FATAL_ERR_QID_MISMATCH,
QDMA_ST_FATAL_ERR_TIMER_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_PFCH_II_RAM_RDBE,
QDMA_ST_FATAL_ERR_CMPT_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_PFCH_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_INT_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_CMPT_COAL_DATA_RAM_RDBE,
QDMA_ST_FATAL_ERR_TUSER_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_QID_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_WPL_DATA_PAR,
QDMA_ST_FATAL_ERR_ALL,
/* H2C Errors */
QDMA_ST_H2C_ERR_ZERO_LEN_DESC,
QDMA_ST_H2C_ERR_CSI_MOP,
QDMA_ST_H2C_ERR_NO_DMA_DSC,
QDMA_ST_H2C_ERR_SBE,
QDMA_ST_H2C_ERR_DBE,
QDMA_ST_H2C_ERR_ALL,
/* Single bit errors */
QDMA_SBE_ERR_MI_H2C0_DAT,
QDMA_SBE_ERR_MI_C2H0_DAT,
QDMA_SBE_ERR_H2C_RD_BRG_DAT,
QDMA_SBE_ERR_H2C_WR_BRG_DAT,
QDMA_SBE_ERR_C2H_RD_BRG_DAT,
QDMA_SBE_ERR_C2H_WR_BRG_DAT,
QDMA_SBE_ERR_FUNC_MAP,
QDMA_SBE_ERR_DSC_HW_CTXT,
QDMA_SBE_ERR_DSC_CRD_RCV,
QDMA_SBE_ERR_DSC_SW_CTXT,
QDMA_SBE_ERR_DSC_CPLI,
QDMA_SBE_ERR_DSC_CPLD,
QDMA_SBE_ERR_PASID_CTXT_RAM,
QDMA_SBE_ERR_TIMER_FIFO_RAM,
QDMA_SBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_SBE_ERR_QID_FIFO_RAM,
QDMA_SBE_ERR_TUSER_FIFO_RAM,
QDMA_SBE_ERR_WRB_COAL_DATA_RAM,
QDMA_SBE_ERR_INT_QID2VEC_RAM,
QDMA_SBE_ERR_INT_CTXT_RAM,
QDMA_SBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_SBE_ERR_PFCH_CTXT_RAM,
QDMA_SBE_ERR_WRB_CTXT_RAM,
QDMA_SBE_ERR_PFCH_LL_RAM,
QDMA_SBE_ERR_H2C_PEND_FIFO,
QDMA_SBE_ERR_ALL,
/* Double bit Errors */
QDMA_DBE_ERR_MI_H2C0_DAT,
QDMA_DBE_ERR_MI_C2H0_DAT,
QDMA_DBE_ERR_H2C_RD_BRG_DAT,
QDMA_DBE_ERR_H2C_WR_BRG_DAT,
QDMA_DBE_ERR_C2H_RD_BRG_DAT,
QDMA_DBE_ERR_C2H_WR_BRG_DAT,
QDMA_DBE_ERR_FUNC_MAP,
QDMA_DBE_ERR_DSC_HW_CTXT,
QDMA_DBE_ERR_DSC_CRD_RCV,
QDMA_DBE_ERR_DSC_SW_CTXT,
QDMA_DBE_ERR_DSC_CPLI,
QDMA_DBE_ERR_DSC_CPLD,
QDMA_DBE_ERR_PASID_CTXT_RAM,
QDMA_DBE_ERR_TIMER_FIFO_RAM,
QDMA_DBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_DBE_ERR_QID_FIFO_RAM,
QDMA_DBE_ERR_TUSER_FIFO_RAM,
QDMA_DBE_ERR_WRB_COAL_DATA_RAM,
QDMA_DBE_ERR_INT_QID2VEC_RAM,
QDMA_DBE_ERR_INT_CTXT_RAM,
QDMA_DBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_DBE_ERR_PFCH_CTXT_RAM,
QDMA_DBE_ERR_WRB_CTXT_RAM,
QDMA_DBE_ERR_PFCH_LL_RAM,
QDMA_DBE_ERR_H2C_PEND_FIFO,
QDMA_DBE_ERR_ALL,
QDMA_ERRS_ALL
}; };
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif /* QDMA_ACCESS_EXPORT_H_ */ #endif /* __QDMA_ACCESS_EXPORT_H_ */
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_VERSION_H_ #ifndef __QDMA_ACCESS_VERSION_H_
#define QDMA_VERSION_H_ #define __QDMA_ACCESS_VERSION_H_
#define QDMA_VERSION_MAJOR 2020 #define QDMA_VERSION_MAJOR 2020
#define QDMA_VERSION_MINOR 1 #define QDMA_VERSION_MINOR 2
#define QDMA_VERSION_PATCH 1 #define QDMA_VERSION_PATCH 0
#define QDMA_VERSION_STR \ #define QDMA_VERSION_STR \
__stringify(QDMA_VERSION_MAJOR) "." \ __stringify(QDMA_VERSION_MAJOR) "." \
...@@ -33,4 +49,4 @@ ...@@ -33,4 +49,4 @@
QDMA_VERSION_PATCH) QDMA_VERSION_PATCH)
#endif /* COMMON_QDMA_VERSION_H_ */ #endif /* __QDMA_ACCESS_VERSION_H_ */
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "qdma_list.h" #include "qdma_list.h"
......
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License, *
* version 2, as published by the Free Software Foundation. * Redistribution and use in source and binary forms, with or without
* * modification, are permitted provided that the following conditions
* This program is distributed in the hope that it will be useful, but WITHOUT * are met:
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * * Redistributions of source code must retain the above copyright
* more details. * notice, this list of conditions and the following disclaimer.
* * * Redistributions in binary form must reproduce the above copyright
* The full GNU General Public License is included in this distribution in * notice, this list of conditions and the following disclaimer in
* the file called "COPYING". * the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_LIST_H_ #ifndef __QDMA_LIST_H_
#define QDMA_LIST_H_ #define __QDMA_LIST_H_
#ifdef __cplusplus
extern "C" {
#endif
/** /**
* DOC: QDMA common library provided list implementation definitions * DOC: QDMA common library provided list implementation definitions
...@@ -114,4 +134,8 @@ void qdma_list_insert_after(struct qdma_list_head *new_node, ...@@ -114,4 +134,8 @@ void qdma_list_insert_after(struct qdma_list_head *new_node,
*****************************************************************************/ *****************************************************************************/
void qdma_list_del(struct qdma_list_head *node); void qdma_list_del(struct qdma_list_head *node);
#endif /* QDMA_LIST_H_ */ #ifdef __cplusplus
}
#endif
#endif /* __QDMA_LIST_H_ */
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "qdma_mbox_protocol.h" #include "qdma_mbox_protocol.h"
#include "qdma_platform.h"
#include "qdma_resource_mgmt.h"
/** mailbox function status */ /** mailbox function status */
#define MBOX_FN_STATUS 0x0 #define MBOX_FN_STATUS 0x0
...@@ -381,9 +395,9 @@ static inline void mbox_pf_hw_clear_func_ack(void *dev_hndl, uint16_t func_id) ...@@ -381,9 +395,9 @@ static inline void mbox_pf_hw_clear_func_ack(void *dev_hndl, uint16_t func_id)
(1 << bit)); (1 << bit));
} }
static void qdma_mbox_memcpy(void *to, void *from, uint32_t size) static void qdma_mbox_memcpy(void *to, void *from, uint8_t size)
{ {
uint32_t i; uint8_t i;
uint8_t *_to = (uint8_t *)to; uint8_t *_to = (uint8_t *)to;
uint8_t *_from = (uint8_t *)from; uint8_t *_from = (uint8_t *)from;
...@@ -391,9 +405,9 @@ static void qdma_mbox_memcpy(void *to, void *from, uint32_t size) ...@@ -391,9 +405,9 @@ static void qdma_mbox_memcpy(void *to, void *from, uint32_t size)
_to[i] = _from[i]; _to[i] = _from[i];
} }
static void qdma_mbox_memset(void *to, uint8_t val, uint32_t size) static void qdma_mbox_memset(void *to, uint8_t val, uint8_t size)
{ {
uint32_t i; uint8_t i;
uint8_t *_to = (uint8_t *)to; uint8_t *_to = (uint8_t *)to;
for (i = 0; i < size; i++) for (i = 0; i < size; i++)
...@@ -1079,7 +1093,7 @@ int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index, ...@@ -1079,7 +1093,7 @@ int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index,
case MBOX_OP_CSR: case MBOX_OP_CSR:
{ {
struct mbox_msg_csr *rsp_csr = &resp->csr; struct mbox_msg_csr *rsp_csr = &resp->csr;
struct qdma_dev_attributes *dev_cap; struct qdma_dev_attributes dev_cap;
uint32_t ringsz[QDMA_GLOBAL_CSR_ARRAY_SZ] = {0}; uint32_t ringsz[QDMA_GLOBAL_CSR_ARRAY_SZ] = {0};
uint32_t bufsz[QDMA_GLOBAL_CSR_ARRAY_SZ] = {0}; uint32_t bufsz[QDMA_GLOBAL_CSR_ARRAY_SZ] = {0};
...@@ -1093,9 +1107,9 @@ int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index, ...@@ -1093,9 +1107,9 @@ int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index,
if (rv < 0) if (rv < 0)
goto exit_func; goto exit_func;
qdma_get_device_attr(dev_hndl, &dev_cap); hw->qdma_get_device_attributes(dev_hndl, &dev_cap);
if (dev_cap->st_en) { if (dev_cap.st_en) {
rv = hw->qdma_global_csr_conf(dev_hndl, 0, rv = hw->qdma_global_csr_conf(dev_hndl, 0,
QDMA_GLOBAL_CSR_ARRAY_SZ, bufsz, QDMA_GLOBAL_CSR_ARRAY_SZ, bufsz,
QDMA_CSR_BUF_SZ, QDMA_HW_ACCESS_READ); QDMA_CSR_BUF_SZ, QDMA_HW_ACCESS_READ);
...@@ -1104,7 +1118,7 @@ int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index, ...@@ -1104,7 +1118,7 @@ int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index,
goto exit_func; goto exit_func;
} }
if (dev_cap->st_en || dev_cap->mm_cmpt_en) { if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
rv = hw->qdma_global_csr_conf(dev_hndl, 0, rv = hw->qdma_global_csr_conf(dev_hndl, 0,
QDMA_GLOBAL_CSR_ARRAY_SZ, tmr_th, QDMA_GLOBAL_CSR_ARRAY_SZ, tmr_th,
QDMA_CSR_TIMER_CNT, QDMA_HW_ACCESS_READ); QDMA_CSR_TIMER_CNT, QDMA_HW_ACCESS_READ);
......
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License, *
* version 2, as published by the Free Software Foundation. * Redistribution and use in source and binary forms, with or without
* * modification, are permitted provided that the following conditions
* This program is distributed in the hope that it will be useful, but WITHOUT * are met:
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * * Redistributions of source code must retain the above copyright
* more details. * notice, this list of conditions and the following disclaimer.
* * * Redistributions in binary form must reproduce the above copyright
* The full GNU General Public License is included in this distribution in * notice, this list of conditions and the following disclaimer in
* the file called "COPYING". * the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_MBOX_PROTOCOL_H_ #ifndef __QDMA_MBOX_PROTOCOL_H_
#define QDMA_MBOX_PROTOCOL_H_ #define __QDMA_MBOX_PROTOCOL_H_
#ifdef __cplusplus
extern "C" {
#endif
/** /**
* DOC: QDMA message box handling interface definitions * DOC: QDMA message box handling interface definitions
...@@ -24,10 +44,10 @@ ...@@ -24,10 +44,10 @@
* signatures exported for QDMA Mbox message handling. * signatures exported for QDMA Mbox message handling.
*/ */
#include "qdma_platform_env.h" #include "qdma_platform.h"
#include "qdma_access_common.h"
#include "qdma_resource_mgmt.h" #include "qdma_resource_mgmt.h"
#define QDMA_MBOX_VF_ONLINE (1) #define QDMA_MBOX_VF_ONLINE (1)
#define QDMA_MBOX_VF_OFFLINE (-1) #define QDMA_MBOX_VF_OFFLINE (-1)
#define QDMA_MBOX_VF_RESET (2) #define QDMA_MBOX_VF_RESET (2)
...@@ -154,7 +174,7 @@ void qdma_mbox_hw_init(void *dev_hndl, uint8_t is_vf); ...@@ -154,7 +174,7 @@ void qdma_mbox_hw_init(void *dev_hndl, uint8_t is_vf);
/** /**
* qdma_mbox_pf_rcv_msg_handler(): handles the raw message received in pf * qdma_mbox_pf_rcv_msg_handler(): handles the raw message received in pf
* *
* @pci_bus_num: pci bus number * @dma_device_index: pci bus number
* @dev_hndl: device handle * @dev_hndl: device handle
* @func_id: own function id * @func_id: own function id
* @rcv_msg: received raw message * @rcv_msg: received raw message
...@@ -162,7 +182,7 @@ void qdma_mbox_hw_init(void *dev_hndl, uint8_t is_vf); ...@@ -162,7 +182,7 @@ void qdma_mbox_hw_init(void *dev_hndl, uint8_t is_vf);
* *
* Return: 0 : success and < 0: failure * Return: 0 : success and < 0: failure
*****************************************************************************/ *****************************************************************************/
int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t pci_bus_num, int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index,
uint16_t func_id, uint32_t *rcv_msg, uint16_t func_id, uint32_t *rcv_msg,
uint32_t *resp_msg); uint32_t *resp_msg);
...@@ -683,4 +703,8 @@ int qdma_mbox_vf_rcv_msg_handler(uint32_t *rcv_msg, uint32_t *resp_msg); ...@@ -683,4 +703,8 @@ int qdma_mbox_vf_rcv_msg_handler(uint32_t *rcv_msg, uint32_t *resp_msg);
*****************************************************************************/ *****************************************************************************/
uint8_t qdma_mbox_out_status(void *dev_hndl, uint8_t is_vf); uint8_t qdma_mbox_out_status(void *dev_hndl, uint8_t is_vf);
#endif /* QDMA_MBOX_PROTOCOL_H_ */ #ifdef __cplusplus
}
#endif
#endif /* __QDMA_MBOX_PROTOCOL_H_ */
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License, *
* version 2, as published by the Free Software Foundation. * Redistribution and use in source and binary forms, with or without
* * modification, are permitted provided that the following conditions
* This program is distributed in the hope that it will be useful, but WITHOUT * are met:
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * * Redistributions of source code must retain the above copyright
* more details. * notice, this list of conditions and the following disclaimer.
* * * Redistributions in binary form must reproduce the above copyright
* The full GNU General Public License is included in this distribution in * notice, this list of conditions and the following disclaimer in
* the file called "COPYING". * the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef LIBQDMA_QDMA_PLATFORM_H_ #ifndef __QDMA_PLATFORM_H_
#define LIBQDMA_QDMA_PLATFORM_H_ #define __QDMA_PLATFORM_H_
#include "qdma_access_common.h"
#include "qdma_platform_env.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -31,6 +44,8 @@ extern "C" { ...@@ -31,6 +44,8 @@ extern "C" {
* required to be implemented by platform specific drivers. * required to be implemented by platform specific drivers.
*/ */
#include "qdma_access_common.h"
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_calloc(): allocate memory and initialize with 0 * qdma_calloc(): allocate memory and initialize with 0
...@@ -130,28 +145,6 @@ int qdma_reg_access_release(void *dev_hndl); ...@@ -130,28 +145,6 @@ int qdma_reg_access_release(void *dev_hndl);
*****************************************************************************/ *****************************************************************************/
void qdma_udelay(uint32_t delay_usec); void qdma_udelay(uint32_t delay_usec);
/*****************************************************************************/
/**
* qdma_hw_error_handler() - function to handle the hardware errors
*
* @dev_hndl: device handle
* @err_idx: error index
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
void qdma_hw_error_handler(void *dev_hndl, enum qdma_error_idx err_idx);
/*****************************************************************************/
/**
* qdma_get_device_attr() - function to get the device attributes
*
* @dev_hndl: device handle
* @dev_cap: pointer to hold the device capabilities
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
void qdma_get_device_attr(void *dev_hndl, struct qdma_dev_attributes **dev_cap);
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_get_hw_access() - function to get the qdma_hw_access * qdma_get_hw_access() - function to get the qdma_hw_access
...@@ -188,4 +181,4 @@ int qdma_get_err_code(int acc_err_code); ...@@ -188,4 +181,4 @@ int qdma_get_err_code(int acc_err_code);
} }
#endif #endif
#endif /* LIBQDMA_QDMA_PLATFORM_H_ */ #endif /* __QDMA_PLATFORM_H_ */
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef __QDMA_REG_DUMP_H__ #ifndef __QDMA_REG_DUMP_H__
#define __QDMA_REG_DUMP_H__ #define __QDMA_REG_DUMP_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "qdma_platform_env.h" #include "qdma_platform_env.h"
#include "qdma_access_common.h" #include "qdma_access_common.h"
#define DEBUGFS_DEV_INFO_SZ (300) #define DEBUGFS_DEV_INFO_SZ (300)
#define QDMA_REG_NAME_LENGTH 64
#define DEBUGFS_INTR_CNTX_SZ (2048 * 2) #define DEBUGFS_INTR_CNTX_SZ (2048 * 2)
#define DBGFS_ERR_BUFLEN (64) #define DBGFS_ERR_BUFLEN (64)
#define DEBGFS_LINE_SZ (81) #define DEBGFS_LINE_SZ (81)
...@@ -50,19 +71,28 @@ ...@@ -50,19 +71,28 @@
(st_en << QDMA_ST_EN_SHIFT) | \ (st_en << QDMA_ST_EN_SHIFT) | \
(mailbox_en << QDMA_MAILBOX_EN_SHIFT)) (mailbox_en << QDMA_MAILBOX_EN_SHIFT))
struct regfield_info {
const char *field_name;
uint32_t field_mask;
};
struct xreg_info { struct xreg_info {
char name[32]; const char *name;
uint32_t addr; uint32_t addr;
uint32_t repeat; uint32_t repeat;
uint32_t step; uint32_t step;
uint8_t shift; uint8_t shift;
uint8_t len; uint8_t len;
uint8_t is_debug_reg;
uint8_t mode; uint8_t mode;
uint8_t read_type; uint8_t read_type;
uint8_t num_bitfields;
struct regfield_info *bitfields;
}; };
extern struct xreg_info qdma_config_regs[MAX_QDMA_CFG_REGS]; #ifdef __cplusplus
extern struct xreg_info qdma_cpm_config_regs[MAX_QDMA_CFG_REGS]; }
#endif
#endif #endif
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "qdma_resource_mgmt.h" #include "qdma_resource_mgmt.h"
......
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License, *
* version 2, as published by the Free Software Foundation. * Redistribution and use in source and binary forms, with or without
* * modification, are permitted provided that the following conditions
* This program is distributed in the hope that it will be useful, but WITHOUT * are met:
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * * Redistributions of source code must retain the above copyright
* more details. * notice, this list of conditions and the following disclaimer.
* * * Redistributions in binary form must reproduce the above copyright
* The full GNU General Public License is included in this distribution in * notice, this list of conditions and the following disclaimer in
* the file called "COPYING". * the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_RESOURCE_MGMT_H_ #ifndef __QDMA_RESOURCE_MGMT_H_
#define QDMA_RESOURCE_MGMT_H_ #define __QDMA_RESOURCE_MGMT_H_
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -27,6 +43,7 @@ extern "C" { ...@@ -27,6 +43,7 @@ extern "C" {
* Header file *qdma_resource_mgmt.h* defines data structures and function * Header file *qdma_resource_mgmt.h* defines data structures and function
* signatures exported for QDMA queue management. * signatures exported for QDMA queue management.
*/ */
#include "qdma_platform_env.h" #include "qdma_platform_env.h"
#include "qdma_access_export.h" #include "qdma_access_export.h"
...@@ -209,4 +226,4 @@ int qdma_get_device_active_queue_count(uint32_t dma_device_index, ...@@ -209,4 +226,4 @@ int qdma_get_device_active_queue_count(uint32_t dma_device_index,
} }
#endif #endif
#endif /* LIBQDMA_QDMA_RESOURCE_MGMT_H_ */ #endif /* __QDMA_RESOURCE_MGMT_H_ */
This source diff could not be displayed because it is too large. You can view the blob instead.
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_S80_HARD_ACCESS_H_ #ifndef __QDMA_S80_HARD_ACCESS_H_
#define QDMA_S80_HARD_ACCESS_H_ #define __QDMA_S80_HARD_ACCESS_H_
#include "qdma_access_common.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include "qdma_platform.h"
/**
* enum qdma_error_idx - qdma errors
*/
enum qdma_s80_hard_error_idx {
/* Descriptor errors */
QDMA_S80_HARD_DSC_ERR_POISON,
QDMA_S80_HARD_DSC_ERR_UR_CA,
QDMA_S80_HARD_DSC_ERR_PARAM,
QDMA_S80_HARD_DSC_ERR_ADDR,
QDMA_S80_HARD_DSC_ERR_TAG,
QDMA_S80_HARD_DSC_ERR_FLR,
QDMA_S80_HARD_DSC_ERR_TIMEOUT,
QDMA_S80_HARD_DSC_ERR_DAT_POISON,
QDMA_S80_HARD_DSC_ERR_FLR_CANCEL,
QDMA_S80_HARD_DSC_ERR_DMA,
QDMA_S80_HARD_DSC_ERR_DSC,
QDMA_S80_HARD_DSC_ERR_RQ_CANCEL,
QDMA_S80_HARD_DSC_ERR_DBE,
QDMA_S80_HARD_DSC_ERR_SBE,
QDMA_S80_HARD_DSC_ERR_ALL,
/* TRQ Errors */
QDMA_S80_HARD_TRQ_ERR_UNMAPPED,
QDMA_S80_HARD_TRQ_ERR_QID_RANGE,
QDMA_S80_HARD_TRQ_ERR_VF_ACCESS_ERR,
QDMA_S80_HARD_TRQ_ERR_TCP_TIMEOUT,
QDMA_S80_HARD_TRQ_ERR_ALL,
/* C2H Errors */
QDMA_S80_HARD_ST_C2H_ERR_MTY_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_LEN_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_QID_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_DESC_RSP_ERR,
QDMA_S80_HARD_ST_C2H_ERR_ENG_WPL_DATA_PAR_ERR,
QDMA_S80_HARD_ST_C2H_ERR_MSI_INT_FAIL,
QDMA_S80_HARD_ST_C2H_ERR_ERR_DESC_CNT,
QDMA_S80_HARD_ST_C2H_ERR_PORTID_CTXT_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_PORTID_BYP_IN_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_WRB_INV_Q_ERR,
QDMA_S80_HARD_ST_C2H_ERR_WRB_QFULL_ERR,
QDMA_S80_HARD_ST_C2H_ERR_WRB_CIDX_ERR,
QDMA_S80_HARD_ST_C2H_ERR_WRB_PRTY_ERR,
QDMA_S80_HARD_ST_C2H_ERR_ALL,
/* Fatal Errors */
QDMA_S80_HARD_ST_FATAL_ERR_MTY_MISMATCH,
QDMA_S80_HARD_ST_FATAL_ERR_LEN_MISMATCH,
QDMA_S80_HARD_ST_FATAL_ERR_QID_MISMATCH,
QDMA_S80_HARD_ST_FATAL_ERR_TIMER_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_PFCH_II_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_WRB_CTXT_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_PFCH_CTXT_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_INT_CTXT_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_INT_QID2VEC_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_WRB_COAL_DATA_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_TUSER_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_QID_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_WPL_DATA_PAR_ERR,
QDMA_S80_HARD_ST_FATAL_ERR_ALL,
/* H2C Errors */
QDMA_S80_HARD_ST_H2C_ERR_ZERO_LEN_DESC_ERR,
QDMA_S80_HARD_ST_H2C_ERR_SDI_MRKR_REQ_MOP_ERR,
QDMA_S80_HARD_ST_H2C_ERR_NO_DMA_DSC,
QDMA_S80_HARD_ST_H2C_ERR_DBE,
QDMA_S80_HARD_ST_H2C_ERR_SBE,
QDMA_S80_HARD_ST_H2C_ERR_ALL,
/* Single bit errors */
QDMA_S80_HARD_SBE_ERR_MI_H2C0_DAT,
QDMA_S80_HARD_SBE_ERR_MI_C2H0_DAT,
QDMA_S80_HARD_SBE_ERR_H2C_RD_BRG_DAT,
QDMA_S80_HARD_SBE_ERR_H2C_WR_BRG_DAT,
QDMA_S80_HARD_SBE_ERR_C2H_RD_BRG_DAT,
QDMA_S80_HARD_SBE_ERR_C2H_WR_BRG_DAT,
QDMA_S80_HARD_SBE_ERR_FUNC_MAP,
QDMA_S80_HARD_SBE_ERR_DSC_HW_CTXT,
QDMA_S80_HARD_SBE_ERR_DSC_CRD_RCV,
QDMA_S80_HARD_SBE_ERR_DSC_SW_CTXT,
QDMA_S80_HARD_SBE_ERR_DSC_CPLI,
QDMA_S80_HARD_SBE_ERR_DSC_CPLD,
QDMA_S80_HARD_SBE_ERR_PASID_CTXT_RAM,
QDMA_S80_HARD_SBE_ERR_TIMER_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_QID_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_TUSER_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_WRB_COAL_DATA_RAM,
QDMA_S80_HARD_SBE_ERR_INT_QID2VEC_RAM,
QDMA_S80_HARD_SBE_ERR_INT_CTXT_RAM,
QDMA_S80_HARD_SBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_PFCH_CTXT_RAM,
QDMA_S80_HARD_SBE_ERR_WRB_CTXT_RAM,
QDMA_S80_HARD_SBE_ERR_PFCH_LL_RAM,
QDMA_S80_HARD_SBE_ERR_ALL,
/* Double bit Errors */
QDMA_S80_HARD_DBE_ERR_MI_H2C0_DAT,
QDMA_S80_HARD_DBE_ERR_MI_C2H0_DAT,
QDMA_S80_HARD_DBE_ERR_H2C_RD_BRG_DAT,
QDMA_S80_HARD_DBE_ERR_H2C_WR_BRG_DAT,
QDMA_S80_HARD_DBE_ERR_C2H_RD_BRG_DAT,
QDMA_S80_HARD_DBE_ERR_C2H_WR_BRG_DAT,
QDMA_S80_HARD_DBE_ERR_FUNC_MAP,
QDMA_S80_HARD_DBE_ERR_DSC_HW_CTXT,
QDMA_S80_HARD_DBE_ERR_DSC_CRD_RCV,
QDMA_S80_HARD_DBE_ERR_DSC_SW_CTXT,
QDMA_S80_HARD_DBE_ERR_DSC_CPLI,
QDMA_S80_HARD_DBE_ERR_DSC_CPLD,
QDMA_S80_HARD_DBE_ERR_PASID_CTXT_RAM,
QDMA_S80_HARD_DBE_ERR_TIMER_FIFO_RAM,
QDMA_S80_HARD_DBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_S80_HARD_DBE_ERR_QID_FIFO_RAM,
QDMA_S80_HARD_DBE_ERR_WRB_COAL_DATA_RAM,
QDMA_S80_HARD_DBE_ERR_INT_QID2VEC_RAM,
QDMA_S80_HARD_DBE_ERR_INT_CTXT_RAM,
QDMA_S80_HARD_DBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_S80_HARD_DBE_ERR_PFCH_CTXT_RAM,
QDMA_S80_HARD_DBE_ERR_WRB_CTXT_RAM,
QDMA_S80_HARD_DBE_ERR_PFCH_LL_RAM,
QDMA_S80_HARD_DBE_ERR_ALL,
QDMA_S80_HARD_ERRS_ALL
};
struct qdma_s80_hard_hw_err_info {
enum qdma_s80_hard_error_idx idx;
const char *err_name;
uint32_t mask_reg_addr;
uint32_t stat_reg_addr;
uint32_t leaf_err_mask;
uint32_t global_err_mask;
void (*qdma_s80_hard_hw_err_process)(void *dev_hndl);
};
int qdma_s80_hard_init_ctxt_memory(void *dev_hndl); int qdma_s80_hard_init_ctxt_memory(void *dev_hndl);
int qdma_s80_hard_qid2vec_conf(void *dev_hndl, uint8_t c2h, uint16_t hw_qid, int qdma_s80_hard_qid2vec_conf(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
...@@ -77,11 +230,15 @@ int qdma_s80_hard_get_device_attributes(void *dev_hndl, ...@@ -77,11 +230,15 @@ int qdma_s80_hard_get_device_attributes(void *dev_hndl,
uint32_t qdma_s80_hard_reg_dump_buf_len(void); uint32_t qdma_s80_hard_reg_dump_buf_len(void);
int qdma_s80_hard_context_buf_len(uint8_t st, int qdma_s80_hard_context_buf_len(uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen); enum qdma_dev_q_type q_type, uint32_t *req_buflen);
int qdma_s80_hard_dump_config_regs(void *dev_hndl, uint8_t is_vf, int qdma_s80_hard_dump_config_regs(void *dev_hndl, uint8_t is_vf,
char *buf, uint32_t buflen); char *buf, uint32_t buflen);
int qdma_s80_hard_hw_error_process(void *dev_hndl);
const char *qdma_s80_hard_hw_get_error_name(uint32_t err_idx);
int qdma_s80_hard_hw_error_enable(void *dev_hndl, uint32_t err_idx);
int qdma_s80_hard_dump_queue_context(void *dev_hndl, int qdma_s80_hard_dump_queue_context(void *dev_hndl,
uint8_t st, uint8_t st,
enum qdma_dev_q_type q_type, enum qdma_dev_q_type q_type,
...@@ -100,17 +257,38 @@ int qdma_s80_hard_read_dump_queue_context(void *dev_hndl, ...@@ -100,17 +257,38 @@ int qdma_s80_hard_read_dump_queue_context(void *dev_hndl,
char *buf, uint32_t buflen); char *buf, uint32_t buflen);
int qdma_s80_hard_dump_config_reg_list(void *dev_hndl, int qdma_s80_hard_dump_config_reg_list(void *dev_hndl,
uint32_t num_regs, uint32_t total_regs,
struct qdma_reg_data *reg_list, struct qdma_reg_data *reg_list,
char *buf, uint32_t buflen); char *buf, uint32_t buflen);
int qdma_s80_hard_read_reg_list(void *dev_hndl, uint8_t is_vf, int qdma_s80_hard_read_reg_list(void *dev_hndl, uint8_t is_vf,
uint16_t reg_rd_group, uint16_t reg_rd_slot,
uint16_t *total_regs, uint16_t *total_regs,
struct qdma_reg_data *reg_list); struct qdma_reg_data *reg_list);
int qdma_s80_hard_global_csr_conf(void *dev_hndl, uint8_t index,
uint8_t count,
uint32_t *csr_val,
enum qdma_global_csr_type csr_type,
enum qdma_hw_access_type access_type);
int qdma_s80_hard_global_writeback_interval_conf(void *dev_hndl,
enum qdma_wrb_interval *wb_int,
enum qdma_hw_access_type access_type);
int qdma_s80_hard_mm_channel_conf(void *dev_hndl, uint8_t channel,
uint8_t is_c2h,
uint8_t enable);
int qdma_s80_hard_dump_reg_info(void *dev_hndl, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen);
uint32_t qdma_s80_hard_get_config_num_regs(void);
struct xreg_info *qdma_s80_hard_get_config_regs(void);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif /* QDMA_S80_HARD_ACCESS_H_ */ #endif /* __QDMA_S80_HARD_ACCESS_H_ */
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_ACCESS_H_ #ifndef __QDMA_SOFT_ACCESS_H_
#define QDMA_ACCESS_H_ #define __QDMA_SOFT_ACCESS_H_
#include "qdma_access_export.h"
#include "qdma_platform_env.h"
#include "qdma_access_errors.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
...@@ -31,6 +44,136 @@ extern "C" { ...@@ -31,6 +44,136 @@ extern "C" {
* exported by QDMA common library. * exported by QDMA common library.
*/ */
#include "qdma_platform.h"
/**
* enum qdma_error_idx - qdma errors
*/
enum qdma_error_idx {
/* Descriptor errors */
QDMA_DSC_ERR_POISON,
QDMA_DSC_ERR_UR_CA,
QDMA_DSC_ERR_PARAM,
QDMA_DSC_ERR_ADDR,
QDMA_DSC_ERR_TAG,
QDMA_DSC_ERR_FLR,
QDMA_DSC_ERR_TIMEOUT,
QDMA_DSC_ERR_DAT_POISON,
QDMA_DSC_ERR_FLR_CANCEL,
QDMA_DSC_ERR_DMA,
QDMA_DSC_ERR_DSC,
QDMA_DSC_ERR_RQ_CANCEL,
QDMA_DSC_ERR_DBE,
QDMA_DSC_ERR_SBE,
QDMA_DSC_ERR_ALL,
/* TRQ Errors */
QDMA_TRQ_ERR_UNMAPPED,
QDMA_TRQ_ERR_QID_RANGE,
QDMA_TRQ_ERR_VF_ACCESS,
QDMA_TRQ_ERR_TCP_TIMEOUT,
QDMA_TRQ_ERR_ALL,
/* C2H Errors */
QDMA_ST_C2H_ERR_MTY_MISMATCH,
QDMA_ST_C2H_ERR_LEN_MISMATCH,
QDMA_ST_C2H_ERR_QID_MISMATCH,
QDMA_ST_C2H_ERR_DESC_RSP_ERR,
QDMA_ST_C2H_ERR_ENG_WPL_DATA_PAR_ERR,
QDMA_ST_C2H_ERR_MSI_INT_FAIL,
QDMA_ST_C2H_ERR_ERR_DESC_CNT,
QDMA_ST_C2H_ERR_PORTID_CTXT_MISMATCH,
QDMA_ST_C2H_ERR_PORTID_BYP_IN_MISMATCH,
QDMA_ST_C2H_ERR_CMPT_INV_Q_ERR,
QDMA_ST_C2H_ERR_CMPT_QFULL_ERR,
QDMA_ST_C2H_ERR_CMPT_CIDX_ERR,
QDMA_ST_C2H_ERR_CMPT_PRTY_ERR,
QDMA_ST_C2H_ERR_ALL,
/* Fatal Errors */
QDMA_ST_FATAL_ERR_MTY_MISMATCH,
QDMA_ST_FATAL_ERR_LEN_MISMATCH,
QDMA_ST_FATAL_ERR_QID_MISMATCH,
QDMA_ST_FATAL_ERR_TIMER_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_PFCH_II_RAM_RDBE,
QDMA_ST_FATAL_ERR_CMPT_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_PFCH_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_INT_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_CMPT_COAL_DATA_RAM_RDBE,
QDMA_ST_FATAL_ERR_TUSER_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_QID_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_WPL_DATA_PAR,
QDMA_ST_FATAL_ERR_ALL,
/* H2C Errors */
QDMA_ST_H2C_ERR_ZERO_LEN_DESC,
QDMA_ST_H2C_ERR_CSI_MOP,
QDMA_ST_H2C_ERR_NO_DMA_DSC,
QDMA_ST_H2C_ERR_SBE,
QDMA_ST_H2C_ERR_DBE,
QDMA_ST_H2C_ERR_ALL,
/* Single bit errors */
QDMA_SBE_ERR_MI_H2C0_DAT,
QDMA_SBE_ERR_MI_C2H0_DAT,
QDMA_SBE_ERR_H2C_RD_BRG_DAT,
QDMA_SBE_ERR_H2C_WR_BRG_DAT,
QDMA_SBE_ERR_C2H_RD_BRG_DAT,
QDMA_SBE_ERR_C2H_WR_BRG_DAT,
QDMA_SBE_ERR_FUNC_MAP,
QDMA_SBE_ERR_DSC_HW_CTXT,
QDMA_SBE_ERR_DSC_CRD_RCV,
QDMA_SBE_ERR_DSC_SW_CTXT,
QDMA_SBE_ERR_DSC_CPLI,
QDMA_SBE_ERR_DSC_CPLD,
QDMA_SBE_ERR_PASID_CTXT_RAM,
QDMA_SBE_ERR_TIMER_FIFO_RAM,
QDMA_SBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_SBE_ERR_QID_FIFO_RAM,
QDMA_SBE_ERR_TUSER_FIFO_RAM,
QDMA_SBE_ERR_WRB_COAL_DATA_RAM,
QDMA_SBE_ERR_INT_QID2VEC_RAM,
QDMA_SBE_ERR_INT_CTXT_RAM,
QDMA_SBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_SBE_ERR_PFCH_CTXT_RAM,
QDMA_SBE_ERR_WRB_CTXT_RAM,
QDMA_SBE_ERR_PFCH_LL_RAM,
QDMA_SBE_ERR_H2C_PEND_FIFO,
QDMA_SBE_ERR_ALL,
/* Double bit Errors */
QDMA_DBE_ERR_MI_H2C0_DAT,
QDMA_DBE_ERR_MI_C2H0_DAT,
QDMA_DBE_ERR_H2C_RD_BRG_DAT,
QDMA_DBE_ERR_H2C_WR_BRG_DAT,
QDMA_DBE_ERR_C2H_RD_BRG_DAT,
QDMA_DBE_ERR_C2H_WR_BRG_DAT,
QDMA_DBE_ERR_FUNC_MAP,
QDMA_DBE_ERR_DSC_HW_CTXT,
QDMA_DBE_ERR_DSC_CRD_RCV,
QDMA_DBE_ERR_DSC_SW_CTXT,
QDMA_DBE_ERR_DSC_CPLI,
QDMA_DBE_ERR_DSC_CPLD,
QDMA_DBE_ERR_PASID_CTXT_RAM,
QDMA_DBE_ERR_TIMER_FIFO_RAM,
QDMA_DBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_DBE_ERR_QID_FIFO_RAM,
QDMA_DBE_ERR_TUSER_FIFO_RAM,
QDMA_DBE_ERR_WRB_COAL_DATA_RAM,
QDMA_DBE_ERR_INT_QID2VEC_RAM,
QDMA_DBE_ERR_INT_CTXT_RAM,
QDMA_DBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_DBE_ERR_PFCH_CTXT_RAM,
QDMA_DBE_ERR_WRB_CTXT_RAM,
QDMA_DBE_ERR_PFCH_LL_RAM,
QDMA_DBE_ERR_H2C_PEND_FIFO,
QDMA_DBE_ERR_ALL,
QDMA_ERRS_ALL
};
struct qdma_hw_err_info { struct qdma_hw_err_info {
enum qdma_error_idx idx; enum qdma_error_idx idx;
const char *err_name; const char *err_name;
...@@ -38,6 +181,7 @@ struct qdma_hw_err_info { ...@@ -38,6 +181,7 @@ struct qdma_hw_err_info {
uint32_t stat_reg_addr; uint32_t stat_reg_addr;
uint32_t leaf_err_mask; uint32_t leaf_err_mask;
uint32_t global_err_mask; uint32_t global_err_mask;
void (*qdma_hw_err_process)(void *dev_hndl);
}; };
...@@ -98,6 +242,10 @@ int qdma_dump_intr_context(void *dev_hndl, ...@@ -98,6 +242,10 @@ int qdma_dump_intr_context(void *dev_hndl,
uint32_t qdma_soft_reg_dump_buf_len(void); uint32_t qdma_soft_reg_dump_buf_len(void);
uint32_t qdma_get_config_num_regs(void);
struct xreg_info *qdma_get_config_regs(void);
int qdma_soft_context_buf_len(uint8_t st, int qdma_soft_context_buf_len(uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen); enum qdma_dev_q_type q_type, uint32_t *buflen);
...@@ -129,7 +277,7 @@ int qdma_get_user_bar(void *dev_hndl, uint8_t is_vf, ...@@ -129,7 +277,7 @@ int qdma_get_user_bar(void *dev_hndl, uint8_t is_vf,
uint8_t func_id, uint8_t *user_bar); uint8_t func_id, uint8_t *user_bar);
int qdma_soft_dump_config_reg_list(void *dev_hndl, int qdma_soft_dump_config_reg_list(void *dev_hndl,
uint32_t num_regs, uint32_t total_regs,
struct qdma_reg_data *reg_list, struct qdma_reg_data *reg_list,
char *buf, uint32_t buflen); char *buf, uint32_t buflen);
...@@ -138,9 +286,23 @@ int qdma_read_reg_list(void *dev_hndl, uint8_t is_vf, ...@@ -138,9 +286,23 @@ int qdma_read_reg_list(void *dev_hndl, uint8_t is_vf,
uint16_t *total_regs, uint16_t *total_regs,
struct qdma_reg_data *reg_list); struct qdma_reg_data *reg_list);
int qdma_global_csr_conf(void *dev_hndl, uint8_t index, uint8_t count,
uint32_t *csr_val,
enum qdma_global_csr_type csr_type,
enum qdma_hw_access_type access_type);
int qdma_global_writeback_interval_conf(void *dev_hndl,
enum qdma_wrb_interval *wb_int,
enum qdma_hw_access_type access_type);
int qdma_mm_channel_conf(void *dev_hndl, uint8_t channel, uint8_t is_c2h,
uint8_t enable);
int qdma_dump_reg_info(void *dev_hndl, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif /* QDMA_ACCESS_H_ */ #endif /* __QDMA_SOFT_ACCESS_H_ */
/* /*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved. * Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* *
* This source code is free software; you can redistribute it and/or modify it * BSD LICENSE
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, but WITHOUT * Redistribution and use in source and binary forms, with or without
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * modification, are permitted provided that the following conditions
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * are met:
* more details.
* *
* The full GNU General Public License is included in this distribution in * * Redistributions of source code must retain the above copyright
* the file called "COPYING". * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef QDMA_SOFT_REG_H__ #ifndef __QDMA_SOFT_REG_H__
#define QDMA_SOFT_REG_H__ #define __QDMA_SOFT_REG_H__
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -65,56 +81,7 @@ extern "C" { ...@@ -65,56 +81,7 @@ extern "C" {
(0xFFFFFFFFFFFFFFFF >> (BITS_PER_LONG_LONG - 1 - (h)))) (0xFFFFFFFFFFFFFFFF >> (BITS_PER_LONG_LONG - 1 - (h))))
/* #define DEBGFS_LINE_SZ (81)
* Returns the number of trailing 0s in x, starting at LSB.
* Same as gcc __builtin_ffsll function
*/
#ifdef GCC_COMPILER
static inline uint32_t get_trailing_zeros(uint64_t x)
{
uint32_t rv =
__builtin_ffsll(x) - 1;
return rv;
}
#else
static inline uint32_t get_trailing_zeros(uint64_t value)
{
uint32_t pos = 0;
if ((value & 0xffffffff) == 0) {
pos += 32;
value >>= 32;
}
if ((value & 0xffff) == 0) {
pos += 16;
value >>= 16;
}
if ((value & 0xff) == 0) {
pos += 8;
value >>= 8;
}
if ((value & 0xf) == 0) {
pos += 4;
value >>= 4;
}
if ((value & 0x3) == 0) {
pos += 2;
value >>= 2;
}
if ((value & 0x1) == 0)
pos += 1;
return pos;
}
#endif
#define FIELD_SHIFT(mask) get_trailing_zeros(mask)
#define FIELD_SET(mask, val) ((val << FIELD_SHIFT(mask)) & mask)
#define FIELD_GET(mask, reg) ((reg & mask) >> FIELD_SHIFT(mask))
/* polling a register */
#define QDMA_REG_POLL_DFLT_INTERVAL_US 10 /* 10us per poll */
#define QDMA_REG_POLL_DFLT_TIMEOUT_US (500*1000) /* 500ms */
#define QDMA_H2C_THROT_DATA_THRESH 0x4000 #define QDMA_H2C_THROT_DATA_THRESH 0x4000
...@@ -125,28 +92,6 @@ static inline uint32_t get_trailing_zeros(uint64_t value) ...@@ -125,28 +92,6 @@ static inline uint32_t get_trailing_zeros(uint64_t value)
/* /*
* Q Context programming (indirect) * Q Context programming (indirect)
*/ */
enum ind_ctxt_cmd_op {
QDMA_CTXT_CMD_CLR,
QDMA_CTXT_CMD_WR,
QDMA_CTXT_CMD_RD,
QDMA_CTXT_CMD_INV
};
enum ind_ctxt_cmd_sel {
QDMA_CTXT_SEL_SW_C2H,
QDMA_CTXT_SEL_SW_H2C,
QDMA_CTXT_SEL_HW_C2H,
QDMA_CTXT_SEL_HW_H2C,
QDMA_CTXT_SEL_CR_C2H,
QDMA_CTXT_SEL_CR_H2C,
QDMA_CTXT_SEL_CMPT,
QDMA_CTXT_SEL_PFTCH,
QDMA_CTXT_SEL_INT_COAL,
QDMA_CTXT_SEL_PASID_RAM_LOW,
QDMA_CTXT_SEL_PASID_RAM_HIGH,
QDMA_CTXT_SEL_TIMER,
QDMA_CTXT_SEL_FMAP,
};
#define QDMA_REG_IND_CTXT_REG_COUNT 8 #define QDMA_REG_IND_CTXT_REG_COUNT 8
#define QDMA_REG_IND_CTXT_WCNT_1 1 #define QDMA_REG_IND_CTXT_WCNT_1 1
...@@ -197,9 +142,7 @@ enum ind_ctxt_cmd_sel { ...@@ -197,9 +142,7 @@ enum ind_ctxt_cmd_sel {
#define QDMA_SW_CTXT_W0_IRQ_ARM_MASK BIT(16) #define QDMA_SW_CTXT_W0_IRQ_ARM_MASK BIT(16)
#define QDMA_SW_CTXT_W0_PIDX GENMASK(15, 0) #define QDMA_SW_CTXT_W0_PIDX GENMASK(15, 0)
/** QDMA_IND_REG_SEL_PFTCH */
#define QDMA_PFTCH_CTXT_SW_CRDT_GET_H_MASK GENMASK(15, 3)
#define QDMA_PFTCH_CTXT_SW_CRDT_GET_L_MASK GENMASK(2, 0)
#define QDMA_PFTCH_CTXT_W1_VALID_MASK BIT(13) #define QDMA_PFTCH_CTXT_W1_VALID_MASK BIT(13)
#define QDMA_PFTCH_CTXT_W1_SW_CRDT_H_MASK GENMASK(12, 0) #define QDMA_PFTCH_CTXT_W1_SW_CRDT_H_MASK GENMASK(12, 0)
...@@ -211,11 +154,8 @@ enum ind_ctxt_cmd_sel { ...@@ -211,11 +154,8 @@ enum ind_ctxt_cmd_sel {
#define QDMA_PFTCH_CTXT_W0_BUF_SIZE_IDX_MASK GENMASK(4, 1) #define QDMA_PFTCH_CTXT_W0_BUF_SIZE_IDX_MASK GENMASK(4, 1)
#define QDMA_PFTCH_CTXT_W0_BYPASS_MASK BIT(0) #define QDMA_PFTCH_CTXT_W0_BYPASS_MASK BIT(0)
/** QDMA_IND_REG_SEL_CMPT */
#define QDMA_COMPL_CTXT_BADDR_GET_H_MASK GENMASK_ULL(63, 38)
#define QDMA_COMPL_CTXT_BADDR_GET_L_MASK GENMASK_ULL(37, 12)
#define QDMA_COMPL_CTXT_PIDX_GET_H_MASK GENMASK(15, 4)
#define QDMA_COMPL_CTXT_PIDX_GET_L_MASK GENMASK(3, 0)
#define QDMA_COMPL_CTXT_W4_INTR_AGGR_MASK BIT(15) #define QDMA_COMPL_CTXT_W4_INTR_AGGR_MASK BIT(15)
#define QDMA_COMPL_CTXT_W4_INTR_VEC_MASK GENMASK(14, 4) #define QDMA_COMPL_CTXT_W4_INTR_VEC_MASK GENMASK(14, 4)
...@@ -256,9 +196,7 @@ enum ind_ctxt_cmd_sel { ...@@ -256,9 +196,7 @@ enum ind_ctxt_cmd_sel {
#define QDMA_CR_CTXT_W0_CREDT_MASK GENMASK(15, 0) #define QDMA_CR_CTXT_W0_CREDT_MASK GENMASK(15, 0)
/** QDMA_IND_REG_SEL_INTR */ /** QDMA_IND_REG_SEL_INTR */
#define QDMA_INTR_CTXT_BADDR_GET_H_MASK GENMASK_ULL(63, 61)
#define QDMA_INTR_CTXT_BADDR_GET_M_MASK GENMASK_ULL(60, 29)
#define QDMA_INTR_CTXT_BADDR_GET_L_MASK GENMASK_ULL(28, 12)
#define QDMA_INTR_CTXT_W2_AT_MASK BIT(18) #define QDMA_INTR_CTXT_W2_AT_MASK BIT(18)
#define QDMA_INTR_CTXT_W2_PIDX_MASK GENMASK(17, 6) #define QDMA_INTR_CTXT_W2_PIDX_MASK GENMASK(17, 6)
...@@ -271,17 +209,9 @@ enum ind_ctxt_cmd_sel { ...@@ -271,17 +209,9 @@ enum ind_ctxt_cmd_sel {
#define QDMA_INTR_CTXT_W0_VEC_ID_MASK GENMASK(11, 1) #define QDMA_INTR_CTXT_W0_VEC_ID_MASK GENMASK(11, 1)
#define QDMA_INTR_CTXT_W0_VALID_MASK BIT(0) #define QDMA_INTR_CTXT_W0_VALID_MASK BIT(0)
/** Constants */
#define QDMA_NUM_RING_SIZES 16
#define QDMA_NUM_C2H_TIMERS 16
#define QDMA_NUM_C2H_BUFFER_SIZES 16
#define QDMA_NUM_C2H_COUNTERS 16
#define QDMA_MM_CONTROL_RUN 0x1
#define QDMA_MM_CONTROL_STEP 0x100
#define QDMA_MAGIC_NUMBER 0x1fd3
#define QDMA_PIDX_STEP 0x10
#define QDMA_CMPT_CIDX_STEP 0x10
#define QDMA_INT_CIDX_STEP 0x10
/* ------------------------ QDMA_TRQ_SEL_GLBL (0x00200)-------------------*/ /* ------------------------ QDMA_TRQ_SEL_GLBL (0x00200)-------------------*/
#define QDMA_OFFSET_GLBL_RNG_SZ 0x204 #define QDMA_OFFSET_GLBL_RNG_SZ 0x204
...@@ -459,9 +389,7 @@ enum ind_ctxt_cmd_sel { ...@@ -459,9 +389,7 @@ enum ind_ctxt_cmd_sel {
#define QDMA_OFFSET_GLBL2_CHANNEL_FUNC_RET 0x12C #define QDMA_OFFSET_GLBL2_CHANNEL_FUNC_RET 0x12C
#define QDMA_OFFSET_GLBL2_SYSTEM_ID 0x130 #define QDMA_OFFSET_GLBL2_SYSTEM_ID 0x130
#define QDMA_OFFSET_GLBL2_MISC_CAP 0x134 #define QDMA_OFFSET_GLBL2_MISC_CAP 0x134
#define QDMA_GLBL2_MM_CMPT_EN_MASK BIT(2)
#define QDMA_GLBL2_FLR_PRESENT_MASK BIT(1)
#define QDMA_GLBL2_MAILBOX_EN_MASK BIT(0)
#define QDMA_GLBL2_DEVICE_ID_MASK GENMASK(31, 28) #define QDMA_GLBL2_DEVICE_ID_MASK GENMASK(31, 28)
#define QDMA_GLBL2_VIVADO_RELEASE_MASK GENMASK(27, 24) #define QDMA_GLBL2_VIVADO_RELEASE_MASK GENMASK(27, 24)
#define QDMA_GLBL2_VERSAL_IP_MASK GENMASK(23, 20) #define QDMA_GLBL2_VERSAL_IP_MASK GENMASK(23, 20)
...@@ -667,4 +595,4 @@ enum ind_ctxt_cmd_sel { ...@@ -667,4 +595,4 @@ enum ind_ctxt_cmd_sel {
} }
#endif #endif
#endif /* ifndef QDMA_SOFT_REG_H__ */ #endif /* __QDMA_SOFT_REG_H__ */
...@@ -544,7 +544,7 @@ int qdma_identify_bars(struct rte_eth_dev *dev) ...@@ -544,7 +544,7 @@ int qdma_identify_bars(struct rte_eth_dev *dev)
return -1; return -1;
} }
/* Find user bar*/ /* Find AXI Master Lite(user bar) */
ret = dma_priv->hw_access->qdma_get_user_bar(dev, ret = dma_priv->hw_access->qdma_get_user_bar(dev,
dma_priv->is_vf, dma_priv->func_id, &usr_bar); dma_priv->is_vf, dma_priv->func_id, &usr_bar);
if ((ret != QDMA_SUCCESS) || if ((ret != QDMA_SUCCESS) ||
...@@ -556,12 +556,12 @@ int qdma_identify_bars(struct rte_eth_dev *dev) ...@@ -556,12 +556,12 @@ int qdma_identify_bars(struct rte_eth_dev *dev)
dma_priv->user_bar_idx = 1; dma_priv->user_bar_idx = 1;
} else { } else {
dma_priv->user_bar_idx = -1; dma_priv->user_bar_idx = -1;
PMD_DRV_LOG(INFO, "Cannot find User BAR"); PMD_DRV_LOG(INFO, "Cannot find AXI Master Lite BAR");
} }
} else } else
dma_priv->user_bar_idx = usr_bar; dma_priv->user_bar_idx = usr_bar;
/* Find bypass bar*/ /* Find AXI Bridge Master bar(bypass bar) */
for (i = 0; i < QDMA_NUM_BARS; i++) { for (i = 0; i < QDMA_NUM_BARS; i++) {
bar_len = pci_dev->mem_resource[i].len; bar_len = pci_dev->mem_resource[i].len;
if (!bar_len) /* Bar not enabled ? */ if (!bar_len) /* Bar not enabled ? */
...@@ -575,8 +575,9 @@ int qdma_identify_bars(struct rte_eth_dev *dev) ...@@ -575,8 +575,9 @@ int qdma_identify_bars(struct rte_eth_dev *dev)
PMD_DRV_LOG(INFO, "QDMA config bar idx :%d\n", PMD_DRV_LOG(INFO, "QDMA config bar idx :%d\n",
dma_priv->config_bar_idx); dma_priv->config_bar_idx);
PMD_DRV_LOG(INFO, "QDMA user bar idx :%d\n", dma_priv->user_bar_idx); PMD_DRV_LOG(INFO, "QDMA AXI Master Lite bar idx :%d\n",
PMD_DRV_LOG(INFO, "QDMA bypass bar idx :%d\n", dma_priv->user_bar_idx);
PMD_DRV_LOG(INFO, "QDMA AXI Bridge Master bar idx :%d\n",
dma_priv->bypass_bar_idx); dma_priv->bypass_bar_idx);
return 0; return 0;
...@@ -590,7 +591,7 @@ int qdma_get_hw_version(struct rte_eth_dev *dev) ...@@ -590,7 +591,7 @@ int qdma_get_hw_version(struct rte_eth_dev *dev)
dma_priv = (struct qdma_pci_dev *)dev->data->dev_private; dma_priv = (struct qdma_pci_dev *)dev->data->dev_private;
ret = dma_priv->hw_access->qdma_get_version(dev, ret = dma_priv->hw_access->qdma_get_version(dev,
dma_priv->is_vf, &version_info); dma_priv->is_vf, &version_info);
if (ret != QDMA_SUCCESS) if (ret < 0)
return dma_priv->hw_access->qdma_get_error_code(ret); return dma_priv->hw_access->qdma_get_error_code(ret);
dma_priv->rtl_version = version_info.rtl_version; dma_priv->rtl_version = version_info.rtl_version;
......
...@@ -169,7 +169,10 @@ int qdma_pf_csr_read(struct rte_eth_dev *dev) ...@@ -169,7 +169,10 @@ int qdma_pf_csr_read(struct rte_eth_dev *dev)
"returned %d", ret); "returned %d", ret);
} }
return qdma_dev->hw_access->qdma_get_error_code(ret); if (ret < 0)
return qdma_dev->hw_access->qdma_get_error_code(ret);
return ret;
} }
static int qdma_pf_fmap_prog(struct rte_eth_dev *dev) static int qdma_pf_fmap_prog(struct rte_eth_dev *dev)
...@@ -185,7 +188,7 @@ static int qdma_pf_fmap_prog(struct rte_eth_dev *dev) ...@@ -185,7 +188,7 @@ static int qdma_pf_fmap_prog(struct rte_eth_dev *dev)
fmap_cfg.qmax = qdma_dev->qsets_en; fmap_cfg.qmax = qdma_dev->qsets_en;
ret = qdma_dev->hw_access->qdma_fmap_conf(dev, ret = qdma_dev->hw_access->qdma_fmap_conf(dev,
qdma_dev->func_id, &fmap_cfg, QDMA_HW_ACCESS_WRITE); qdma_dev->func_id, &fmap_cfg, QDMA_HW_ACCESS_WRITE);
if (ret != QDMA_SUCCESS) if (ret < 0)
return qdma_dev->hw_access->qdma_get_error_code(ret); return qdma_dev->hw_access->qdma_get_error_code(ret);
return ret; return ret;
...@@ -421,6 +424,28 @@ int qdma_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, ...@@ -421,6 +424,28 @@ int qdma_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
RTE_PMD_QDMA_RX_BYPASS_SIMPLE) RTE_PMD_QDMA_RX_BYPASS_SIMPLE)
rxq->en_bypass_prefetch = 1; rxq->en_bypass_prefetch = 1;
if (qdma_dev->ip_type == EQDMA_SOFT_IP &&
qdma_dev->vivado_rel >= QDMA_VIVADO_2020_2) {
if (qdma_dev->dev_cap.desc_eng_mode ==
QDMA_DESC_ENG_BYPASS_ONLY) {
PMD_DRV_LOG(ERR,
"Bypass only mode design "
"is not supported\n");
return -ENOTSUP;
}
if (rxq->en_bypass &&
(qdma_dev->dev_cap.desc_eng_mode ==
QDMA_DESC_ENG_INTERNAL_ONLY)) {
PMD_DRV_LOG(ERR,
"Rx qid %d config in bypass "
"mode not supported on "
"internal only mode design\n",
rx_queue_id);
return -ENOTSUP;
}
}
if (rxq->en_bypass) { if (rxq->en_bypass) {
rxq->bypass_desc_sz = rxq->bypass_desc_sz =
qdma_dev->q_info[rx_queue_id].rx_bypass_desc_sz; qdma_dev->q_info[rx_queue_id].rx_bypass_desc_sz;
...@@ -753,6 +778,28 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, ...@@ -753,6 +778,28 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
goto tx_setup_err; goto tx_setup_err;
} }
if (qdma_dev->ip_type == EQDMA_SOFT_IP &&
qdma_dev->vivado_rel >= QDMA_VIVADO_2020_2) {
if (qdma_dev->dev_cap.desc_eng_mode ==
QDMA_DESC_ENG_BYPASS_ONLY) {
PMD_DRV_LOG(ERR,
"Bypass only mode design "
"is not supported\n");
return -ENOTSUP;
}
if (txq->en_bypass &&
(qdma_dev->dev_cap.desc_eng_mode ==
QDMA_DESC_ENG_INTERNAL_ONLY)) {
PMD_DRV_LOG(ERR,
"Tx qid %d config in bypass "
"mode not supported on "
"internal only mode design\n",
tx_queue_id);
return -ENOTSUP;
}
}
/* Allocate memory for TX descriptor ring */ /* Allocate memory for TX descriptor ring */
if (txq->st_mode) { if (txq->st_mode) {
if (!qdma_dev->dev_cap.st_en) { if (!qdma_dev->dev_cap.st_en) {
...@@ -1104,6 +1151,7 @@ void qdma_dev_close(struct rte_eth_dev *dev) ...@@ -1104,6 +1151,7 @@ void qdma_dev_close(struct rte_eth_dev *dev)
struct qdma_cmpt_queue *cmptq; struct qdma_cmpt_queue *cmptq;
uint32_t qid; uint32_t qid;
struct qdma_fmap_cfg fmap_cfg; struct qdma_fmap_cfg fmap_cfg;
int ret = 0;
PMD_DRV_LOG(INFO, "PF-%d(DEVFN) DEV Close\n", qdma_dev->func_id); PMD_DRV_LOG(INFO, "PF-%d(DEVFN) DEV Close\n", qdma_dev->func_id);
...@@ -1189,8 +1237,14 @@ void qdma_dev_close(struct rte_eth_dev *dev) ...@@ -1189,8 +1237,14 @@ void qdma_dev_close(struct rte_eth_dev *dev)
} }
} }
qdma_dev->qsets_en = 0; qdma_dev->qsets_en = 0;
qdma_dev_update(qdma_dev->dma_device_index, qdma_dev->func_id, ret = qdma_dev_update(qdma_dev->dma_device_index, qdma_dev->func_id,
qdma_dev->qsets_en, (int *)&qdma_dev->queue_base); qdma_dev->qsets_en, (int *)&qdma_dev->queue_base);
if (ret != QDMA_SUCCESS) {
PMD_DRV_LOG(ERR, "PF-%d(DEVFN) qmax update failed: %d\n",
qdma_dev->func_id, ret);
return;
}
qdma_dev->init_q_range = 0; qdma_dev->init_q_range = 0;
rte_free(qdma_dev->q_info); rte_free(qdma_dev->q_info);
qdma_dev->q_info = NULL; qdma_dev->q_info = NULL;
...@@ -1451,7 +1505,7 @@ int qdma_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid) ...@@ -1451,7 +1505,7 @@ int qdma_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
err = hw_access->qdma_sw_ctx_conf(dev, 0, err = hw_access->qdma_sw_ctx_conf(dev, 0,
(qid + queue_base), &q_sw_ctxt, (qid + queue_base), &q_sw_ctxt,
QDMA_HW_ACCESS_WRITE); QDMA_HW_ACCESS_WRITE);
if (err != QDMA_SUCCESS) if (err < 0)
return qdma_dev->hw_access->qdma_get_error_code(err); return qdma_dev->hw_access->qdma_get_error_code(err);
txq->q_pidx_info.pidx = 0; txq->q_pidx_info.pidx = 0;
...@@ -1557,20 +1611,20 @@ int qdma_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qid) ...@@ -1557,20 +1611,20 @@ int qdma_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
/* Set SW Context */ /* Set SW Context */
err = hw_access->qdma_sw_ctx_conf(dev, 1, (qid + queue_base), err = hw_access->qdma_sw_ctx_conf(dev, 1, (qid + queue_base),
&q_sw_ctxt, QDMA_HW_ACCESS_WRITE); &q_sw_ctxt, QDMA_HW_ACCESS_WRITE);
if (err != QDMA_SUCCESS) if (err < 0)
return qdma_dev->hw_access->qdma_get_error_code(err); return qdma_dev->hw_access->qdma_get_error_code(err);
if (rxq->st_mode) { if (rxq->st_mode) {
/* Set Prefetch Context */ /* Set Prefetch Context */
err = hw_access->qdma_pfetch_ctx_conf(dev, (qid + queue_base), err = hw_access->qdma_pfetch_ctx_conf(dev, (qid + queue_base),
&q_prefetch_ctxt, QDMA_HW_ACCESS_WRITE); &q_prefetch_ctxt, QDMA_HW_ACCESS_WRITE);
if (err != QDMA_SUCCESS) if (err < 0)
return qdma_dev->hw_access->qdma_get_error_code(err); return qdma_dev->hw_access->qdma_get_error_code(err);
/* Set Completion Context */ /* Set Completion Context */
err = hw_access->qdma_cmpt_ctx_conf(dev, (qid + queue_base), err = hw_access->qdma_cmpt_ctx_conf(dev, (qid + queue_base),
&q_cmpt_ctxt, QDMA_HW_ACCESS_WRITE); &q_cmpt_ctxt, QDMA_HW_ACCESS_WRITE);
if (err != QDMA_SUCCESS) if (err < 0)
return qdma_dev->hw_access->qdma_get_error_code(err); return qdma_dev->hw_access->qdma_get_error_code(err);
rte_wmb(); rte_wmb();
...@@ -1707,26 +1761,37 @@ int ...@@ -1707,26 +1761,37 @@ int
qdma_dev_get_regs(struct rte_eth_dev *dev, qdma_dev_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs) struct rte_dev_reg_info *regs)
{ {
struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
uint32_t *data = regs->data; uint32_t *data = regs->data;
uint32_t count = 0; uint32_t reg_length = 0;
uint32_t reg_length = (sizeof(qdma_config_regs) / int ret = 0;
sizeof(qdma_config_regs[0])) - 1;
ret = qdma_acc_get_num_config_regs(dev,
(enum qdma_ip_type)qdma_dev->ip_type,
&reg_length);
if (ret < 0 || reg_length == 0) {
PMD_DRV_LOG(ERR, "%s: Failed to get number of config registers\n",
__func__);
return ret;
}
if (data == NULL) { if (data == NULL) {
regs->length = reg_length; regs->length = reg_length - 1;
regs->width = sizeof(uint32_t); regs->width = sizeof(uint32_t);
return 0; return 0;
} }
/* Support only full register dump */ /* Support only full register dump */
if ((regs->length == 0) || if ((regs->length == 0) ||
(regs->length == reg_length)) { (regs->length == (reg_length - 1))) {
regs->version = 1; regs->version = 1;
for (count = 0; count < reg_length; count++) { ret = qdma_acc_get_config_regs(dev, qdma_dev->is_vf,
data[count] = qdma_reg_read(dev, (enum qdma_ip_type)qdma_dev->ip_type, data);
qdma_config_regs[count].addr); if (ret < 0) {
PMD_DRV_LOG(ERR, "%s: Failed to get config registers\n",
__func__);
} }
return 0; return ret;
} }
PMD_DRV_LOG(ERR, "%s: Unsupported length (0x%x) requested\n", PMD_DRV_LOG(ERR, "%s: Unsupported length (0x%x) requested\n",
......
...@@ -61,6 +61,8 @@ ...@@ -61,6 +61,8 @@
#define PCI_CONFIG_BRIDGE_DEVICE (6) #define PCI_CONFIG_BRIDGE_DEVICE (6)
#define PCI_CONFIG_CLASS_CODE_SHIFT (16) #define PCI_CONFIG_CLASS_CODE_SHIFT (16)
#define MAX_PCIE_CAPABILITY (48)
static void qdma_device_attributes_get(struct rte_eth_dev *dev); static void qdma_device_attributes_get(struct rte_eth_dev *dev);
/* Poll for any QDMA errors */ /* Poll for any QDMA errors */
...@@ -273,23 +275,25 @@ static inline uint8_t pcie_find_cap(const struct rte_pci_device *pci_dev, ...@@ -273,23 +275,25 @@ static inline uint8_t pcie_find_cap(const struct rte_pci_device *pci_dev,
{ {
uint8_t pcie_cap_pos = 0; uint8_t pcie_cap_pos = 0;
uint8_t pcie_cap_id = 0; uint8_t pcie_cap_id = 0;
int ttl = MAX_PCIE_CAPABILITY;
int ret;
if (rte_pci_read_config(pci_dev, &pcie_cap_pos, sizeof(uint8_t), ret = rte_pci_read_config(pci_dev, &pcie_cap_pos, sizeof(uint8_t),
PCI_CAPABILITY_LIST) < 0) { PCI_CAPABILITY_LIST);
if (ret < 0) {
PMD_DRV_LOG(ERR, "PCIe config space read failed..\n"); PMD_DRV_LOG(ERR, "PCIe config space read failed..\n");
return 0; return 0;
} }
if (pcie_cap_pos < 0x40) while (ttl-- && pcie_cap_pos >= PCI_STD_HEADER_SIZEOF) {
return 0;
while (pcie_cap_pos >= 0x40) {
pcie_cap_pos &= ~3; pcie_cap_pos &= ~3;
if (rte_pci_read_config(pci_dev, &pcie_cap_id, sizeof(uint8_t), ret = rte_pci_read_config(pci_dev,
pcie_cap_pos + PCI_CAP_LIST_ID) < 0) { &pcie_cap_id, sizeof(uint8_t),
(pcie_cap_pos + PCI_CAP_LIST_ID));
if (ret < 0) {
PMD_DRV_LOG(ERR, "PCIe config space read failed..\n"); PMD_DRV_LOG(ERR, "PCIe config space read failed..\n");
goto ret; return 0;
} }
if (pcie_cap_id == 0xff) if (pcie_cap_id == 0xff)
...@@ -298,14 +302,15 @@ static inline uint8_t pcie_find_cap(const struct rte_pci_device *pci_dev, ...@@ -298,14 +302,15 @@ static inline uint8_t pcie_find_cap(const struct rte_pci_device *pci_dev,
if (pcie_cap_id == cap) if (pcie_cap_id == cap)
return pcie_cap_pos; return pcie_cap_pos;
if (rte_pci_read_config(pci_dev, &pcie_cap_pos, sizeof(uint8_t), ret = rte_pci_read_config(pci_dev,
pcie_cap_pos + PCI_CAP_LIST_NEXT) < 0) { &pcie_cap_pos, sizeof(uint8_t),
(pcie_cap_pos + PCI_CAP_LIST_NEXT));
if (ret < 0) {
PMD_DRV_LOG(ERR, "PCIe config space read failed..\n"); PMD_DRV_LOG(ERR, "PCIe config space read failed..\n");
goto ret; return 0;
} }
} }
ret:
return 0; return 0;
} }
...@@ -402,16 +407,10 @@ static int parse_pci_addr_format(const char *buf, ...@@ -402,16 +407,10 @@ static int parse_pci_addr_format(const char *buf,
*splitaddr.function++ = '\0'; *splitaddr.function++ = '\0';
/* now convert to int values */ /* now convert to int values */
errno = 0;
addr->domain = strtoul(splitaddr.domain, NULL, 16); addr->domain = strtoul(splitaddr.domain, NULL, 16);
addr->bus = strtoul(splitaddr.bus, NULL, 16); addr->bus = strtoul(splitaddr.bus, NULL, 16);
addr->devid = strtoul(splitaddr.devid, NULL, 16); addr->devid = strtoul(splitaddr.devid, NULL, 16);
addr->function = strtoul(splitaddr.function, NULL, 10); addr->function = strtoul(splitaddr.function, NULL, 10);
if (errno != 0) {
PMD_DRV_LOG(ERR,
"Failed to convert pci address to int values\n");
goto error;
}
free(buf_copy); /* free the copy made with strdup */ free(buf_copy); /* free the copy made with strdup */
return 0; return 0;
...@@ -440,8 +439,8 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus) ...@@ -440,8 +439,8 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus)
/* Open pci devices directory */ /* Open pci devices directory */
dir = opendir(rte_pci_get_sysfs_path()); dir = opendir(rte_pci_get_sysfs_path());
if (dir == NULL) { if (dir == NULL) {
PMD_DRV_LOG(ERR, "%s(): opendir failed: %s\n", PMD_DRV_LOG(ERR, "%s(): opendir failed\n",
__func__, strerror(errno)); __func__);
return -1; return -1;
} }
...@@ -471,8 +470,8 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus) ...@@ -471,8 +470,8 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus)
"%s/config", dirname); "%s/config", dirname);
fd = open(cfgname, O_RDWR); fd = open(cfgname, O_RDWR);
if (fd < 0) { if (fd < 0) {
PMD_DRV_LOG(ERR, "Failed to open %s: %s\n", PMD_DRV_LOG(ERR, "Failed to open %s\n",
cfgname, strerror(errno)); cfgname);
goto error; goto error;
} }
...@@ -493,7 +492,6 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus) ...@@ -493,7 +492,6 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus)
close(fd); close(fd);
goto error; goto error;
} }
close(fd);
/* Get max bus number by checking if given bus number /* Get max bus number by checking if given bus number
* falls in between secondary and subordinate bus * falls in between secondary and subordinate bus
...@@ -502,9 +500,12 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus) ...@@ -502,9 +500,12 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus)
if ((start_bus >= sec_bus_num) && if ((start_bus >= sec_bus_num) &&
(start_bus <= sub_bus_num)) { (start_bus <= sub_bus_num)) {
*end_bus = sub_bus_num; *end_bus = sub_bus_num;
close(fd);
closedir(dir); closedir(dir);
return 0; return 0;
} }
close(fd);
} }
} }
...@@ -621,7 +622,7 @@ int qdma_eth_dev_init(struct rte_eth_dev *dev) ...@@ -621,7 +622,7 @@ int qdma_eth_dev_init(struct rte_eth_dev *dev)
return -EINVAL; return -EINVAL;
} }
/* Store BAR address and length of User BAR */ /* Store BAR address and length of AXI Master Lite BAR(user bar) */
if (dma_priv->user_bar_idx >= 0) { if (dma_priv->user_bar_idx >= 0) {
baseaddr = (uint8_t *) baseaddr = (uint8_t *)
pci_dev->mem_resource[dma_priv->user_bar_idx].addr; pci_dev->mem_resource[dma_priv->user_bar_idx].addr;
...@@ -699,8 +700,23 @@ int qdma_eth_dev_init(struct rte_eth_dev *dev) ...@@ -699,8 +700,23 @@ int qdma_eth_dev_init(struct rte_eth_dev *dev)
} }
} }
dma_priv->hw_access->qdma_init_ctxt_memory(dev); ret = dma_priv->hw_access->qdma_init_ctxt_memory(dev);
dma_priv->hw_access->qdma_hw_error_enable(dev, QDMA_ERRS_ALL); if (ret < 0) {
PMD_DRV_LOG(ERR,
"%s: Failed to initialize ctxt memory, err = %d\n",
__func__, ret);
return -EINVAL;
}
dma_priv->hw_access->qdma_hw_error_enable(dev,
dma_priv->hw_access->qdma_max_errors);
if (ret < 0) {
PMD_DRV_LOG(ERR,
"%s: Failed to enable hw errors, err = %d\n",
__func__, ret);
return -EINVAL;
}
rte_eal_alarm_set(QDMA_ERROR_POLL_FRQ, qdma_check_errors, rte_eal_alarm_set(QDMA_ERROR_POLL_FRQ, qdma_check_errors,
(void *)dev); (void *)dev);
dma_priv->is_master = 1; dma_priv->is_master = 1;
......
...@@ -202,41 +202,6 @@ void qdma_udelay(uint32_t delay_usec) ...@@ -202,41 +202,6 @@ void qdma_udelay(uint32_t delay_usec)
rte_delay_us(delay_usec); rte_delay_us(delay_usec);
} }
/*****************************************************************************/
/**
* qdma_hw_error_handler() - function to handle the hardware errors
*
* @dev_hndl: device handle
* @err_idx: error index
*
* Return: None
*****************************************************************************/
void qdma_hw_error_handler(void *dev_hndl, enum qdma_error_idx err_idx)
{
struct qdma_pci_dev *qdma_dev;
qdma_dev = ((struct rte_eth_dev *)dev_hndl)->data->dev_private;
rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,
"%s(): Detected %s\n", __func__,
qdma_dev->hw_access->qdma_hw_get_error_name(err_idx));
}
/*****************************************************************************/
/**
* qdma_get_device_attr() - function to retrive device attributes
*
* @dev_hndl: device handle
* @dev_cap: pointer to the device attributes structure
*
* Return: None
*****************************************************************************/
void qdma_get_device_attr(void *dev_hndl, struct qdma_dev_attributes **dev_cap)
{
struct qdma_pci_dev *qdma_dev;
qdma_dev = ((struct rte_eth_dev *)dev_hndl)->data->dev_private;
*dev_cap = &qdma_dev->dev_cap;
}
/*****************************************************************************/ /*****************************************************************************/
/** /**
* qdma_get_hw_access() - function to get the qdma_hw_access * qdma_get_hw_access() - function to get the qdma_hw_access
...@@ -270,11 +235,16 @@ void qdma_strncpy(char *dest, const char *src, size_t n) ...@@ -270,11 +235,16 @@ void qdma_strncpy(char *dest, const char *src, size_t n)
/** /**
* qdma_get_err_code() - function to get the qdma access mapped error code * qdma_get_err_code() - function to get the qdma access mapped error code
* *
* @acc_err_code: qdma access error code * @acc_err_code: qdma access error code which is a negative input value
* *
* Return: returns the platform specific error code * Return: returns the platform specific error code
*****************************************************************************/ *****************************************************************************/
int qdma_get_err_code(int acc_err_code) int qdma_get_err_code(int acc_err_code)
{ {
/* Multiply acc_err_code with -1 to convert it to a postive number
* and use it as an array index for error codes.
*/
acc_err_code *= -1;
return -(error_code_map_list[acc_err_code].err_code); return -(error_code_map_list[acc_err_code].err_code);
} }
...@@ -243,7 +243,8 @@ static int reclaim_tx_mbuf(struct qdma_tx_queue *txq, ...@@ -243,7 +243,8 @@ static int reclaim_tx_mbuf(struct qdma_tx_queue *txq,
fl_desc = free_cnt; fl_desc = free_cnt;
if ((id + fl_desc) < (txq->nb_tx_desc - 1)) { if ((id + fl_desc) < (txq->nb_tx_desc - 1)) {
for (count = 0; count < fl_desc; count++) { for (count = 0; count < ((uint16_t)fl_desc & 0xFFFF);
count++) {
rte_pktmbuf_free(txq->sw_ring[id]); rte_pktmbuf_free(txq->sw_ring[id]);
txq->sw_ring[id++] = NULL; txq->sw_ring[id++] = NULL;
} }
...@@ -255,7 +256,8 @@ static int reclaim_tx_mbuf(struct qdma_tx_queue *txq, ...@@ -255,7 +256,8 @@ static int reclaim_tx_mbuf(struct qdma_tx_queue *txq,
} }
id -= (txq->nb_tx_desc - 1); id -= (txq->nb_tx_desc - 1);
for (count = 0; count < fl_desc; count++) { for (count = 0; count < ((uint16_t)fl_desc & 0xFFFF);
count++) {
rte_pktmbuf_free(txq->sw_ring[id]); rte_pktmbuf_free(txq->sw_ring[id]);
txq->sw_ring[id++] = NULL; txq->sw_ring[id++] = NULL;
} }
...@@ -282,7 +284,7 @@ static uint16_t qdma_xmit_64B_desc_bypass(struct qdma_tx_queue *txq, ...@@ -282,7 +284,7 @@ static uint16_t qdma_xmit_64B_desc_bypass(struct qdma_tx_queue *txq,
memset(&tx_ring_st_bypass[id * (txq->bypass_desc_sz)], memset(&tx_ring_st_bypass[id * (txq->bypass_desc_sz)],
((id % 255) + 1), txq->bypass_desc_sz); ((id % 255) + 1), txq->bypass_desc_sz);
sprintf(fln, "q_%u_%s", txq->queue_id, snprintf(fln, sizeof(fln), "q_%u_%s", txq->queue_id,
"h2c_desc_data.txt"); "h2c_desc_data.txt");
ofd = open(fln, O_RDWR | O_CREAT | O_APPEND | O_SYNC, ofd = open(fln, O_RDWR | O_CREAT | O_APPEND | O_SYNC,
0666); 0666);
...@@ -756,11 +758,13 @@ static struct rte_mbuf *prepare_segmented_packet(struct qdma_rx_queue *rxq, ...@@ -756,11 +758,13 @@ static struct rte_mbuf *prepare_segmented_packet(struct qdma_rx_queue *rxq,
struct rte_mbuf *first_seg = NULL; struct rte_mbuf *first_seg = NULL;
struct rte_mbuf *last_seg = NULL; struct rte_mbuf *last_seg = NULL;
uint16_t id = *tail; uint16_t id = *tail;
uint16_t length;
uint16_t rx_buff_size = rxq->rx_buff_size; uint16_t rx_buff_size = rxq->rx_buff_size;
do { do {
mb = rxq->sw_ring[id]; mb = rxq->sw_ring[id];
rxq->sw_ring[id++] = NULL; rxq->sw_ring[id++] = NULL;
length = pkt_length;
if (unlikely(id >= (rxq->nb_rx_desc - 1))) if (unlikely(id >= (rxq->nb_rx_desc - 1)))
id -= (rxq->nb_rx_desc - 1); id -= (rxq->nb_rx_desc - 1);
...@@ -776,7 +780,7 @@ static struct rte_mbuf *prepare_segmented_packet(struct qdma_rx_queue *rxq, ...@@ -776,7 +780,7 @@ static struct rte_mbuf *prepare_segmented_packet(struct qdma_rx_queue *rxq,
if (first_seg == NULL) { if (first_seg == NULL) {
first_seg = mb; first_seg = mb;
first_seg->nb_segs = 1; first_seg->nb_segs = 1;
first_seg->pkt_len = pkt_length; first_seg->pkt_len = length;
first_seg->packet_type = 0; first_seg->packet_type = 0;
first_seg->ol_flags = 0; first_seg->ol_flags = 0;
first_seg->port = rxq->port_id; first_seg->port = rxq->port_id;
...@@ -952,7 +956,6 @@ static uint16_t prepare_packets_v(struct qdma_rx_queue *rxq, ...@@ -952,7 +956,6 @@ static uint16_t prepare_packets_v(struct qdma_rx_queue *rxq,
mb = prepare_single_packet(rxq, count); mb = prepare_single_packet(rxq, count);
if (mb) if (mb)
rx_pkts[count_pkts++] = mb; rx_pkts[count_pkts++] = mb;
count++;
} }
return count_pkts; return count_pkts;
...@@ -1027,7 +1030,7 @@ static int rearm_c2h_ring(struct qdma_rx_queue *rxq, uint16_t num_desc) ...@@ -1027,7 +1030,7 @@ static int rearm_c2h_ring(struct qdma_rx_queue *rxq, uint16_t num_desc)
__m128i head_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, __m128i head_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
RTE_PKTMBUF_HEADROOM); RTE_PKTMBUF_HEADROOM);
for (mbuf_index = 0; mbuf_index < rearm_cnt; for (mbuf_index = 0; mbuf_index < ((uint16_t)rearm_cnt & 0xFFFF);
mbuf_index += RTE_QDMA_DESCS_PER_LOOP, mbuf_index += RTE_QDMA_DESCS_PER_LOOP,
id += RTE_QDMA_DESCS_PER_LOOP) { id += RTE_QDMA_DESCS_PER_LOOP) {
__m128i vaddr0, vaddr1; __m128i vaddr0, vaddr1;
...@@ -1099,7 +1102,8 @@ static int rearm_c2h_ring(struct qdma_rx_queue *rxq, uint16_t num_desc) ...@@ -1099,7 +1102,8 @@ static int rearm_c2h_ring(struct qdma_rx_queue *rxq, uint16_t num_desc)
return -1; return -1;
} }
for (mbuf_index = 0; mbuf_index < rearm_descs; for (mbuf_index = 0;
mbuf_index < ((uint16_t)rearm_descs & 0xFFFF);
mbuf_index++, id++) { mbuf_index++, id++) {
mb = rxq->sw_ring[id]; mb = rxq->sw_ring[id];
mb->data_off = RTE_PKTMBUF_HEADROOM; mb->data_off = RTE_PKTMBUF_HEADROOM;
......
...@@ -111,7 +111,7 @@ int qdma_ul_process_immediate_data_st(void *qhndl, void *cmpt_entry, ...@@ -111,7 +111,7 @@ int qdma_ul_process_immediate_data_st(void *qhndl, void *cmpt_entry,
uint16_t queue_id = 0; uint16_t queue_id = 0;
queue_id = qdma_get_rx_queue_id(qhndl); queue_id = qdma_get_rx_queue_id(qhndl);
sprintf(fln, "q_%d_%s", queue_id, snprintf(fln, sizeof(fln), "q_%d_%s", queue_id,
"immmediate_data.txt"); "immmediate_data.txt");
ofd = open(fln, O_RDWR | O_CREAT | O_APPEND | ofd = open(fln, O_RDWR | O_CREAT | O_APPEND |
O_SYNC, 0666); O_SYNC, 0666);
......
...@@ -298,7 +298,7 @@ static int qdma_vf_set_qrange(struct rte_eth_dev *dev) ...@@ -298,7 +298,7 @@ static int qdma_vf_set_qrange(struct rte_eth_dev *dev)
return -ENOMEM; return -ENOMEM;
qdma_mbox_compose_vf_fmap_prog(qdma_dev->func_id, qdma_mbox_compose_vf_fmap_prog(qdma_dev->func_id,
qdma_dev->dev_cap.num_qs, (uint16_t)qdma_dev->qsets_en,
(int)qdma_dev->queue_base, (int)qdma_dev->queue_base,
m->raw_data); m->raw_data);
rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT); rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
...@@ -1105,7 +1105,7 @@ static int eth_qdma_vf_dev_init(struct rte_eth_dev *dev) ...@@ -1105,7 +1105,7 @@ static int eth_qdma_vf_dev_init(struct rte_eth_dev *dev)
return -EINVAL; return -EINVAL;
} }
/* Store BAR address and length of User BAR */ /* Store BAR address and length of AXI Master Lite BAR(user bar)*/
if (dma_priv->user_bar_idx >= 0) { if (dma_priv->user_bar_idx >= 0) {
baseaddr = (uint8_t *) baseaddr = (uint8_t *)
pci_dev->mem_resource[dma_priv->user_bar_idx].addr; pci_dev->mem_resource[dma_priv->user_bar_idx].addr;
......
...@@ -64,6 +64,12 @@ struct xdebug_desc_param { ...@@ -64,6 +64,12 @@ struct xdebug_desc_param {
enum rte_pmd_qdma_xdebug_desc_type type; enum rte_pmd_qdma_xdebug_desc_type type;
}; };
const char *qdma_desc_eng_mode_info[QDMA_DESC_ENG_MODE_MAX] = {
"Internal and Bypass mode",
"Bypass only mode",
"Internal only mode"
};
static void print_header(const char *str) static void print_header(const char *str)
{ {
xdebug_info("\n\n%s\n\n", str); xdebug_info("\n\n%s\n\n", str);
...@@ -375,9 +381,9 @@ static int qdma_device_dump(uint8_t port_id) ...@@ -375,9 +381,9 @@ static int qdma_device_dump(uint8_t port_id)
xdebug_info("\t\t config BAR index :%x\n", xdebug_info("\t\t config BAR index :%x\n",
qdma_dev->config_bar_idx); qdma_dev->config_bar_idx);
xdebug_info("\t\t user BAR index :%x\n", xdebug_info("\t\t AXI Master Lite BAR index :%x\n",
qdma_dev->user_bar_idx); qdma_dev->user_bar_idx);
xdebug_info("\t\t bypass BAR index :%x\n", xdebug_info("\t\t AXI Bridge Master BAR index :%x\n",
qdma_dev->bypass_bar_idx); qdma_dev->bypass_bar_idx);
xdebug_info("\t\t qsets enable :%x\n", xdebug_info("\t\t qsets enable :%x\n",
qdma_dev->qsets_en); qdma_dev->qsets_en);
...@@ -427,6 +433,10 @@ static int qdma_device_dump(uint8_t port_id) ...@@ -427,6 +433,10 @@ static int qdma_device_dump(uint8_t port_id)
qdma_dev->dev_cap.mailbox_en); qdma_dev->dev_cap.mailbox_en);
xdebug_info("\t\t Num of MM channels :%x\n", xdebug_info("\t\t Num of MM channels :%x\n",
qdma_dev->dev_cap.mm_channel_max); qdma_dev->dev_cap.mm_channel_max);
xdebug_info("\t\t Descriptor engine mode :%s\n",
qdma_desc_eng_mode_info[qdma_dev->dev_cap.desc_eng_mode]);
xdebug_info("\t\t Debug mode enable :%x\n",
qdma_dev->dev_cap.debug_mode);
return 0; return 0;
} }
...@@ -779,7 +789,8 @@ static int qdma_queue_desc_dump(uint8_t port_id, ...@@ -779,7 +789,8 @@ static int qdma_queue_desc_dump(uint8_t port_id,
for (x = param->start; x < param->end; x++) { for (x = param->start; x < param->end; x++) {
uint8_t *rx_bypass = uint8_t *rx_bypass =
&rx_ring_bypass[x]; &rx_ring_bypass[x];
sprintf(str, "\nDescriptor ID %d\t", x); snprintf(str, sizeof(str),
"\nDescriptor ID %d\t", x);
rte_hexdump(stdout, str, rte_hexdump(stdout, str,
(const void *)rx_bypass, (const void *)rx_bypass,
rxq->bypass_desc_sz); rxq->bypass_desc_sz);
...@@ -793,7 +804,8 @@ static int qdma_queue_desc_dump(uint8_t port_id, ...@@ -793,7 +804,8 @@ static int qdma_queue_desc_dump(uint8_t port_id,
for (x = param->start; x < param->end; x++) { for (x = param->start; x < param->end; x++) {
struct qdma_ul_st_c2h_desc *rx_st = struct qdma_ul_st_c2h_desc *rx_st =
&rx_ring_st[x]; &rx_ring_st[x];
sprintf(str, "\nDescriptor ID %d\t", x); snprintf(str, sizeof(str),
"\nDescriptor ID %d\t", x);
rte_hexdump(stdout, str, rte_hexdump(stdout, str,
(const void *)rx_st, (const void *)rx_st,
sizeof(struct qdma_ul_st_c2h_desc)); sizeof(struct qdma_ul_st_c2h_desc));
...@@ -803,7 +815,8 @@ static int qdma_queue_desc_dump(uint8_t port_id, ...@@ -803,7 +815,8 @@ static int qdma_queue_desc_dump(uint8_t port_id,
(struct qdma_ul_mm_desc *)rxq->rx_ring; (struct qdma_ul_mm_desc *)rxq->rx_ring;
xdebug_info("\n====== C2H ring descriptors======\n"); xdebug_info("\n====== C2H ring descriptors======\n");
for (x = param->start; x < param->end; x++) { for (x = param->start; x < param->end; x++) {
sprintf(str, "\nDescriptor ID %d\t", x); snprintf(str, sizeof(str),
"\nDescriptor ID %d\t", x);
rte_hexdump(stdout, str, rte_hexdump(stdout, str,
(const void *)&rx_ring_mm[x], (const void *)&rx_ring_mm[x],
sizeof(struct qdma_ul_mm_desc)); sizeof(struct qdma_ul_mm_desc));
...@@ -847,7 +860,8 @@ static int qdma_queue_desc_dump(uint8_t port_id, ...@@ -847,7 +860,8 @@ static int qdma_queue_desc_dump(uint8_t port_id,
uint32_t *cmpt_ring = (uint32_t *) uint32_t *cmpt_ring = (uint32_t *)
((uint64_t)(rxq->cmpt_ring) + ((uint64_t)(rxq->cmpt_ring) +
((uint64_t)x * rxq->cmpt_desc_len)); ((uint64_t)x * rxq->cmpt_desc_len));
sprintf(str, "\nDescriptor ID %d\t", x); snprintf(str, sizeof(str),
"\nDescriptor ID %d\t", x);
rte_hexdump(stdout, str, rte_hexdump(stdout, str,
(const void *)cmpt_ring, (const void *)cmpt_ring,
rxq->cmpt_desc_len); rxq->cmpt_desc_len);
...@@ -890,7 +904,8 @@ static int qdma_queue_desc_dump(uint8_t port_id, ...@@ -890,7 +904,8 @@ static int qdma_queue_desc_dump(uint8_t port_id,
for (x = param->start; x < param->end; x++) { for (x = param->start; x < param->end; x++) {
uint8_t *tx_bypass = uint8_t *tx_bypass =
&tx_ring_bypass[x]; &tx_ring_bypass[x];
sprintf(str, "\nDescriptor ID %d\t", x); snprintf(str, sizeof(str),
"\nDescriptor ID %d\t", x);
rte_hexdump(stdout, str, rte_hexdump(stdout, str,
(const void *)tx_bypass, (const void *)tx_bypass,
txq->bypass_desc_sz); txq->bypass_desc_sz);
...@@ -901,7 +916,8 @@ static int qdma_queue_desc_dump(uint8_t port_id, ...@@ -901,7 +916,8 @@ static int qdma_queue_desc_dump(uint8_t port_id,
(struct qdma_ul_st_h2c_desc *)txq->tx_ring; (struct qdma_ul_st_h2c_desc *)txq->tx_ring;
xdebug_info("\n====== H2C ring descriptors=====\n"); xdebug_info("\n====== H2C ring descriptors=====\n");
for (x = param->start; x < param->end; x++) { for (x = param->start; x < param->end; x++) {
sprintf(str, "\nDescriptor ID %d\t", x); snprintf(str, sizeof(str),
"\nDescriptor ID %d\t", x);
rte_hexdump(stdout, str, rte_hexdump(stdout, str,
(const void *)&qdma_h2c_ring[x], (const void *)&qdma_h2c_ring[x],
sizeof(struct qdma_ul_st_h2c_desc)); sizeof(struct qdma_ul_st_h2c_desc));
...@@ -911,7 +927,8 @@ static int qdma_queue_desc_dump(uint8_t port_id, ...@@ -911,7 +927,8 @@ static int qdma_queue_desc_dump(uint8_t port_id,
(struct qdma_ul_mm_desc *)txq->tx_ring; (struct qdma_ul_mm_desc *)txq->tx_ring;
xdebug_info("\n===== H2C ring descriptors=====\n"); xdebug_info("\n===== H2C ring descriptors=====\n");
for (x = param->start; x < param->end; x++) { for (x = param->start; x < param->end; x++) {
sprintf(str, "\nDescriptor ID %d\t", x); snprintf(str, sizeof(str),
"\nDescriptor ID %d\t", x);
rte_hexdump(stdout, str, rte_hexdump(stdout, str,
(const void *)&tx_ring_mm[x], (const void *)&tx_ring_mm[x],
sizeof(struct qdma_ul_mm_desc)); sizeof(struct qdma_ul_mm_desc));
...@@ -943,6 +960,47 @@ int rte_pmd_qdma_dbg_regdump(uint8_t port_id) ...@@ -943,6 +960,47 @@ int rte_pmd_qdma_dbg_regdump(uint8_t port_id)
return 0; return 0;
} }
int rte_pmd_qdma_dbg_reg_info_dump(uint8_t port_id,
uint32_t num_regs, uint32_t reg_addr)
{
struct rte_eth_dev *dev;
struct qdma_pci_dev *qdma_dev;
enum qdma_ip_type ip_type;
char *buf = NULL;
int buflen = QDMA_MAX_BUFLEN;
int ret;
if (port_id >= rte_eth_dev_count_avail()) {
xdebug_error("Wrong port id %d\n", port_id);
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
qdma_dev = dev->data->dev_private;
ip_type = (enum qdma_ip_type)qdma_dev->ip_type;
/*allocate memory for register dump*/
buf = (char *)rte_zmalloc("QDMA_DUMP_BUF_REG_INFO", buflen,
RTE_CACHE_LINE_SIZE);
if (!buf) {
xdebug_error("Unable to allocate memory for reg info dump "
"size %d\n", buflen);
return -ENOMEM;
}
ret = qdma_acc_dump_reg_info(dev, ip_type,
reg_addr, num_regs, buf, buflen);
if (ret < 0) {
xdebug_error("Failed to dump reg field values\n");
rte_free(buf);
return qdma_get_error_code(ret);
}
xdebug_info("%s\n", buf);
rte_free(buf);
return 0;
}
int rte_pmd_qdma_dbg_qdevice(uint8_t port_id) int rte_pmd_qdma_dbg_qdevice(uint8_t port_id)
{ {
int err; int err;
......
...@@ -120,8 +120,8 @@ static int8_t qdma_get_trigger_mode(enum rte_pmd_qdma_tigger_mode_t mode) ...@@ -120,8 +120,8 @@ static int8_t qdma_get_trigger_mode(enum rte_pmd_qdma_tigger_mode_t mode)
* *
* @param port_id : Port ID * @param port_id : Port ID
* @param config_bar_idx : Config BAR index * @param config_bar_idx : Config BAR index
* @param user_bar_idx : User BAR index * @param user_bar_idx : AXI Master Lite BAR(user bar) index
* @param bypass_bar_idx : Bypass BAR index * @param bypass_bar_idx : AXI Bridge Master BAR(bypass bar) index
* *
* @return '0' on success and '< 0' on failure. * @return '0' on success and '< 0' on failure.
* *
...@@ -1147,6 +1147,8 @@ int rte_pmd_qdma_get_device_capabilities(int port_id, ...@@ -1147,6 +1147,8 @@ int rte_pmd_qdma_get_device_capabilities(int port_id,
dev_attr->mm_cmpt_en = qdma_dev->dev_cap.mm_cmpt_en; dev_attr->mm_cmpt_en = qdma_dev->dev_cap.mm_cmpt_en;
dev_attr->mailbox_en = qdma_dev->dev_cap.mailbox_en; dev_attr->mailbox_en = qdma_dev->dev_cap.mailbox_en;
dev_attr->mm_channel_max = qdma_dev->dev_cap.mm_channel_max; dev_attr->mm_channel_max = qdma_dev->dev_cap.mm_channel_max;
dev_attr->debug_mode = qdma_dev->dev_cap.debug_mode;
dev_attr->desc_eng_mode = qdma_dev->dev_cap.desc_eng_mode;
dev_attr->cmpt_ovf_chk_dis = qdma_dev->dev_cap.cmpt_ovf_chk_dis; dev_attr->cmpt_ovf_chk_dis = qdma_dev->dev_cap.cmpt_ovf_chk_dis;
dev_attr->sw_desc_64b = qdma_dev->dev_cap.sw_desc_64b; dev_attr->sw_desc_64b = qdma_dev->dev_cap.sw_desc_64b;
dev_attr->cmpt_desc_64b = qdma_dev->dev_cap.cmpt_desc_64b; dev_attr->cmpt_desc_64b = qdma_dev->dev_cap.cmpt_desc_64b;
...@@ -1512,7 +1514,7 @@ static int qdma_pf_cmptq_context_write(struct rte_eth_dev *dev, uint32_t qid) ...@@ -1512,7 +1514,7 @@ static int qdma_pf_cmptq_context_write(struct rte_eth_dev *dev, uint32_t qid)
/* Set Completion Context */ /* Set Completion Context */
err = qdma_dev->hw_access->qdma_cmpt_ctx_conf(dev, (qid + queue_base), err = qdma_dev->hw_access->qdma_cmpt_ctx_conf(dev, (qid + queue_base),
&q_cmpt_ctxt, QDMA_HW_ACCESS_WRITE); &q_cmpt_ctxt, QDMA_HW_ACCESS_WRITE);
if (err != QDMA_SUCCESS) if (err < 0)
return qdma_dev->hw_access->qdma_get_error_code(err); return qdma_dev->hw_access->qdma_get_error_code(err);
cmptq->cmpt_cidx_info.counter_idx = cmptq->threshidx; cmptq->cmpt_cidx_info.counter_idx = cmptq->threshidx;
......
...@@ -259,6 +259,12 @@ struct rte_pmd_qdma_dev_attributes { ...@@ -259,6 +259,12 @@ struct rte_pmd_qdma_dev_attributes {
uint8_t mm_cmpt_en:1; uint8_t mm_cmpt_en:1;
/** Indicates whether Mailbox supported or not */ /** Indicates whether Mailbox supported or not */
uint8_t mailbox_en:1; uint8_t mailbox_en:1;
/** Debug mode is enabled/disabled for IP */
uint8_t debug_mode:1;
/** Descriptor Engine mode:
* Internal only/Bypass only/Internal & Bypass
*/
uint8_t desc_eng_mode:2;
/** Number of MM channels */ /** Number of MM channels */
uint8_t mm_channel_max; uint8_t mm_channel_max;
...@@ -298,6 +304,21 @@ struct rte_pmd_qdma_dev_attributes { ...@@ -298,6 +304,21 @@ struct rte_pmd_qdma_dev_attributes {
******************************************************************************/ ******************************************************************************/
int rte_pmd_qdma_dbg_regdump(uint8_t port_id); int rte_pmd_qdma_dbg_regdump(uint8_t port_id);
/******************************************************************************/
/**
* Dumps the QDMA register field information for a given register offset
*
* @param port_id Port ID
* @param reg_addr Register Address
*
* @return '0' on success and "< 0" on failure
*
* @note None
* @ingroup rte_pmd_qdma_func
******************************************************************************/
int rte_pmd_qdma_dbg_reg_info_dump(uint8_t port_id,
uint32_t num_regs, uint32_t reg_addr);
/******************************************************************************/ /******************************************************************************/
/** /**
* Dumps the device specific SW structure for the given port * Dumps the device specific SW structure for the given port
...@@ -350,8 +371,8 @@ int rte_pmd_qdma_dbg_qdesc(uint8_t port_id, uint16_t queue, int start, ...@@ -350,8 +371,8 @@ int rte_pmd_qdma_dbg_qdesc(uint8_t port_id, uint16_t queue, int start,
* *
* @param port_id Port ID * @param port_id Port ID
* @param config_bar_idx Config BAR index * @param config_bar_idx Config BAR index
* @param user_bar_idx User BAR index * @param user_bar_idx AXI Master Lite BAR(user bar) index
* @param bypass_bar_idx Bypass BAR index * @param bypass_bar_idx AXI Bridge Master BAR(bypass bar) index
* *
* @return '0' on success and '< 0' on failure * @return '0' on success and '< 0' on failure
* *
......
...@@ -37,8 +37,8 @@ ...@@ -37,8 +37,8 @@
#define qdma_stringify(x...) qdma_stringify1(x) #define qdma_stringify(x...) qdma_stringify1(x)
#define QDMA_PMD_MAJOR 2020 #define QDMA_PMD_MAJOR 2020
#define QDMA_PMD_MINOR 1 #define QDMA_PMD_MINOR 2
#define QDMA_PMD_PATCHLEVEL 1 #define QDMA_PMD_PATCHLEVEL 0
#define QDMA_PMD_VERSION \ #define QDMA_PMD_VERSION \
qdma_stringify(QDMA_PMD_MAJOR) "." \ qdma_stringify(QDMA_PMD_MAJOR) "." \
......
# BSD LICENSE # BSD LICENSE
# #
# Copyright(c) 2017-2018 Xilinx, Inc. All rights reserved. # Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions # modification, are permitted provided that the following conditions
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017-2019 Xilinx, Inc. All rights reserved. * Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
...@@ -121,6 +121,9 @@ ...@@ -121,6 +121,9 @@
#include "testapp.h" #include "testapp.h"
#include "../../drivers/net/qdma/rte_pmd_qdma.h" #include "../../drivers/net/qdma/rte_pmd_qdma.h"
#define ALIGN_TO_WORD_BYTES (4)
#define NUMERICAL_BASE_HEXADECIMAL (16)
/* Command help */ /* Command help */
struct cmd_help_result { struct cmd_help_result {
cmdline_fixed_string_t help; cmdline_fixed_string_t help;
...@@ -162,6 +165,8 @@ static void cmd_help_parsed(__attribute__((unused)) void *parsed_result, ...@@ -162,6 +165,8 @@ static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
":To Receive\n" ":To Receive\n"
"\treg_dump <port-id> " "\treg_dump <port-id> "
":To dump all the valid registers\n" ":To dump all the valid registers\n"
"\treg_info_read <port-id> <reg-addr> <num-regs> "
":Reads the field info for the specified number of registers\n"
"\tqueue_dump <port-id> <queue-id> " "\tqueue_dump <port-id> <queue-id> "
":To dump the queue-context of a queue-number\n" ":To dump the queue-context of a queue-number\n"
"\tdesc_dump <port-id> <queue-id> " "\tdesc_dump <port-id> <queue-id> "
...@@ -475,9 +480,9 @@ static void cmd_obj_reg_read_parsed(void *parsed_result, ...@@ -475,9 +480,9 @@ static void cmd_obj_reg_read_parsed(void *parsed_result,
cmdline_printf(cl, "Read Port:%s, BAR-index:%s, Address:%s\n\n", cmdline_printf(cl, "Read Port:%s, BAR-index:%s, Address:%s\n\n",
res->port_id, res->bar_id, res->addr); res->port_id, res->bar_id, res->addr);
int addr = strtol(res->addr, NULL, 16); int addr = strtol(res->addr, NULL, NUMERICAL_BASE_HEXADECIMAL);
if (addr % 4) { if (addr % ALIGN_TO_WORD_BYTES) {
cmdline_printf(cl, "ERROR: Read address must aligned to " cmdline_printf(cl, "ERROR: Read address must aligned to "
"a 4-byte boundary.\n\n"); "a 4-byte boundary.\n\n");
} else { } else {
...@@ -540,17 +545,17 @@ static void cmd_obj_reg_write_parsed(void *parsed_result, ...@@ -540,17 +545,17 @@ static void cmd_obj_reg_write_parsed(void *parsed_result,
int bar_id = atoi(res->bar_id); int bar_id = atoi(res->bar_id);
int port_id = atoi(res->port_id); int port_id = atoi(res->port_id);
int addr = strtol(res->address, NULL, 16); int addr = strtol(res->address, NULL, NUMERICAL_BASE_HEXADECIMAL);
if (port_id >= num_ports) { if (port_id >= num_ports) {
cmdline_printf(cl, "Error: port-id:%d not supported\n " cmdline_printf(cl, "Error: port-id:%d not supported\n "
"Please enter valid port-id\n", port_id); "Please enter valid port-id\n", port_id);
return; return;
} }
if (addr % 4) { if (addr % ALIGN_TO_WORD_BYTES) {
cmdline_printf(cl, "ERROR: Write address must aligned to a " cmdline_printf(cl, "ERROR: Write address must aligned to a "
"4-byte boundary.\n\n"); "4-byte boundary.\n\n");
} else{ } else{
int value = strtol(res->value, NULL, 16); int value = strtol(res->value, NULL, NUMERICAL_BASE_HEXADECIMAL);
PciWrite(bar_id, addr, value, port_id); PciWrite(bar_id, addr, value, port_id);
int result = PciRead(bar_id, addr, port_id); int result = PciRead(bar_id, addr, port_id);
cmdline_printf(cl, "Read (%d:0x%08x) = 0x%08x\n", port_id, addr, cmdline_printf(cl, "Read (%d:0x%08x) = 0x%08x\n", port_id, addr,
...@@ -1090,6 +1095,67 @@ cmdline_parse_inst_t cmd_obj_reg_dump = { ...@@ -1090,6 +1095,67 @@ cmdline_parse_inst_t cmd_obj_reg_dump = {
}; };
/* Command Read Info addr */
struct cmd_obj_reg_info_read_result {
cmdline_fixed_string_t action;
cmdline_fixed_string_t port_id;
cmdline_fixed_string_t reg_addr;
cmdline_fixed_string_t num_regs;
};
static void cmd_obj_reg_info_read_parsed(void *parsed_result,
struct cmdline *cl,
__attribute__((unused)) void *data)
{
struct cmd_obj_reg_info_read_result *res = parsed_result;
cmdline_printf(cl, "Read Reg info Port:%s, Address:%s, Num Regs: %s\n\n",
res->port_id, res->reg_addr, res->num_regs);
int reg_addr = strtol(res->reg_addr, NULL, NUMERICAL_BASE_HEXADECIMAL);
if (reg_addr % ALIGN_TO_WORD_BYTES) {
cmdline_printf(cl, "ERROR: Read address must aligned to "
"a 4-byte boundary.\n\n");
} else {
int port_id = atoi(res->port_id);
int num_regs = atoi(res->num_regs);
if (port_id >= num_ports) {
cmdline_printf(cl, "Error: port-id:%d not supported\n "
"Please enter valid port-id\n",
port_id);
return;
}
rte_pmd_qdma_dbg_reg_info_dump(port_id, num_regs,reg_addr);
}
}
cmdline_parse_token_string_t cmd_obj_action_reg_info_read =
TOKEN_STRING_INITIALIZER(struct cmd_obj_reg_info_read_result, action,
"reg_info_read");
cmdline_parse_token_string_t cmd_obj_reg_info_read_port_id =
TOKEN_STRING_INITIALIZER(struct cmd_obj_reg_info_read_result, port_id, NULL);
cmdline_parse_token_string_t cmd_obj_reg_info_read_reg_addr =
TOKEN_STRING_INITIALIZER(struct cmd_obj_reg_info_read_result, reg_addr, NULL);
cmdline_parse_token_string_t cmd_obj_reg_info_read_num_regs =
TOKEN_STRING_INITIALIZER(struct cmd_obj_reg_info_read_result, num_regs, NULL);
cmdline_parse_inst_t cmd_obj_reg_info_read = {
.f = cmd_obj_reg_info_read_parsed, /* function to call */
.data = NULL, /* 2nd arg of func */
.help_str = "reg_info_read port-id reg-addr",
.tokens = { /* token list, NULL terminated */
(void *)&cmd_obj_action_reg_info_read,
(void *)&cmd_obj_reg_info_read_port_id,
(void *)&cmd_obj_reg_info_read_reg_addr,
(void *)&cmd_obj_reg_info_read_num_regs,
NULL,
},
};
/*Command queue-context dump*/ /*Command queue-context dump*/
struct cmd_obj_queue_dump_result { struct cmd_obj_queue_dump_result {
...@@ -1297,6 +1363,7 @@ cmdline_parse_ctx_t main_ctx[] = { ...@@ -1297,6 +1363,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_obj_dma_to_device, (cmdline_parse_inst_t *)&cmd_obj_dma_to_device,
(cmdline_parse_inst_t *)&cmd_obj_dma_from_device, (cmdline_parse_inst_t *)&cmd_obj_dma_from_device,
(cmdline_parse_inst_t *)&cmd_obj_reg_dump, (cmdline_parse_inst_t *)&cmd_obj_reg_dump,
(cmdline_parse_inst_t *)&cmd_obj_reg_info_read,
(cmdline_parse_inst_t *)&cmd_obj_queue_dump, (cmdline_parse_inst_t *)&cmd_obj_queue_dump,
(cmdline_parse_inst_t *)&cmd_obj_desc_dump, (cmdline_parse_inst_t *)&cmd_obj_desc_dump,
(cmdline_parse_inst_t *)&cmd_obj_load_cmds, (cmdline_parse_inst_t *)&cmd_obj_load_cmds,
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * Copyright(c) 2010-2020 Intel Corporation. All rights reserved.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017-2018 Xilinx, Inc. All rights reserved. * Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017-2018 Xilinx, Inc. All rights reserved. * Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017-2018 Xilinx, Inc. All rights reserved. * Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017-2018 Xilinx, Inc. All rights reserved. * Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017-2019 Xilinx, Inc. All rights reserved. * Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017-2019 Xilinx, Inc. All rights reserved. * Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
...@@ -227,14 +227,14 @@ int do_recv_st(int port_id, int fd, int queueid, int input_size) ...@@ -227,14 +227,14 @@ int do_recv_st(int port_id, int fd, int queueid, int input_size)
* *
* As per this when testing sizes beyond 28KB, one needs to split it * As per this when testing sizes beyond 28KB, one needs to split it
* up in chunks of 28KB, example : to test 56KB data size, set 28KB * up in chunks of 28KB, example : to test 56KB data size, set 28KB
* as packet length in USER BAR 0x04 register and no of packets as 2 * as packet length in AXI Master Lite BAR(user bar) 0x04 register and no of packets as 2
* in user BAR 0x20 register this would give you completions or * in AXI Master Lite BAR(user bar) 0x20 register this would give you completions or
* packets, which needs to be combined as one in application. * packets, which needs to be combined as one in application.
*/ */
max_completion_size = pinfo[port_id].buff_size * 7; max_completion_size = pinfo[port_id].buff_size * 7;
/* Calculate number of packets to receive and programming user bar */ /* Calculate number of packets to receive and programming AXI Master Lite bar(user bar) */
if (input_size == 0) /* zerobyte support uses one descriptor */ if (input_size == 0) /* zerobyte support uses one descriptor */
num_pkts = 1; num_pkts = 1;
else if (input_size % max_completion_size != 0) { else if (input_size % max_completion_size != 0) {
...@@ -914,8 +914,8 @@ int port_init(int port_id, int num_queues, int st_queues, ...@@ -914,8 +914,8 @@ int port_init(int port_id, int num_queues, int st_queues,
rte_exit(EXIT_FAILURE, "rte_pmd_qdma_get_bar_details failed\n"); rte_exit(EXIT_FAILURE, "rte_pmd_qdma_get_bar_details failed\n");
printf("QDMA Config bar idx: %d\n", pinfo[port_id].config_bar_idx); printf("QDMA Config bar idx: %d\n", pinfo[port_id].config_bar_idx);
printf("QDMA User bar idx: %d\n", pinfo[port_id].user_bar_idx); printf("QDMA AXI Master Lite bar idx: %d\n", pinfo[port_id].user_bar_idx);
printf("QDMA Bypass bar idx: %d\n", pinfo[port_id].bypass_bar_idx); printf("QDMA AXI Bridge Master bar idx: %d\n", pinfo[port_id].bypass_bar_idx);
/* configure the device to use # queues */ /* configure the device to use # queues */
diag = rte_eth_dev_configure(port_id, num_queues, num_queues, diag = rte_eth_dev_configure(port_id, num_queues, num_queues,
......
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017-2019 Xilinx, Inc. All rights reserved. * Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#define MP_CACHE_SZ 512 #define MP_CACHE_SZ 512
#define MBUF_POOL_NAME_PORT "mbuf_pool_%d" #define MBUF_POOL_NAME_PORT "mbuf_pool_%d"
/* User bar registers */ /* AXI Master Lite bar(user bar) registers */
#define C2H_ST_QID_REG 0x0 #define C2H_ST_QID_REG 0x0
#define C2H_ST_LEN_REG 0x4 #define C2H_ST_LEN_REG 0x4
#define C2H_CONTROL_REG 0x8 #define C2H_CONTROL_REG 0x8
......
From a3d5d265c96834efca0b4b419385128127e6b821 Mon Sep 17 00:00:00 2001
From: Pankaj Darak <pankajd@xilinx.com>
Date: Mon, 22 Apr 2019 19:18:37 +0530
Subject: [PATCH] PKTGEN-3.6.1: Patch to add Jumbo packet support
This patch include:
1. Jumbo frame support for Pktgen.
2. Increase default number of RX_DESC to 2K.
3. Disable RX classification.
4. Set user provided packet size as DMA packet size i.e. not to remove
CRC bytes
Signed-off-by: Kumar Sanghvi <kumars@xilinx.com>
Signed-off-by: Nikhil Agarwal <nagarwal@xilinx.com>
Signed-off-by: Pankaj Darak <pankajd@xilinx.com>
Signed-off-by: Thanneeru Srinivasulu <sthannee@xilinx.com>
---
app/pktgen-cmds.c | 15 ++++++++++-----
app/pktgen-constants.h | 4 ++--
app/pktgen-main.c | 9 +++++++--
app/pktgen-port-cfg.c | 12 ++++++++----
app/pktgen-range.c | 3 ++-
app/pktgen.c | 14 ++++++++++++--
app/pktgen.h | 5 +++--
7 files changed, 44 insertions(+), 18 deletions(-)
diff --git a/app/pktgen-cmds.c b/app/pktgen-cmds.c
index 95f6ef1..888e329 100644
--- a/app/pktgen-cmds.c
+++ b/app/pktgen-cmds.c
@@ -2508,6 +2508,8 @@ single_set_pkt_size(port_info_t *info, uint16_t size)
{
pkt_seq_t * pkt = &info->seq_pkt[SINGLE_PKT];
+ uint16_t pktsize = (pktgen.flags & MTU9K_SUPPORT_FLAG) ?
+ MAX_PKT_SIZE: (ETHER_MAX_LEN - ETHER_CRC_LEN);
if (size < ETHER_CRC_LEN)
size = ETHER_CRC_LEN;
@@ -2515,13 +2517,13 @@ single_set_pkt_size(port_info_t *info, uint16_t size)
if ( (size - ETHER_CRC_LEN) < MIN_PKT_SIZE)
size = (MIN_PKT_SIZE + ETHER_CRC_LEN);
}
- if ( (size - ETHER_CRC_LEN) > MAX_PKT_SIZE)
- size = MAX_PKT_SIZE + ETHER_CRC_LEN;
+ if ( (size - ETHER_CRC_LEN) > pktsize)
+ size = pktsize + ETHER_CRC_LEN;
if ((pkt->ethType == ETHER_TYPE_IPv6) && (size < (MIN_v6_PKT_SIZE + ETHER_CRC_LEN)))
size = MIN_v6_PKT_SIZE + ETHER_CRC_LEN;
- pkt->pktSize = (size - ETHER_CRC_LEN);
+ pkt->pktSize = size;
pktgen_packet_ctor(info, SINGLE_PKT, -1);
pktgen_packet_rate(info);
@@ -3066,6 +3068,9 @@ range_set_cos_id(port_info_t *info, char *what, uint8_t id)
void
range_set_pkt_size(port_info_t *info, char *what, uint16_t size)
{
+ uint32_t pktsize = (pktgen.flags & MTU9K_SUPPORT_FLAG) ?
+ MAX_9K_SIZE : ETHER_MAX_LEN;
+
if (!strcmp(what, "inc") || !strcmp(what, "increment")) {
if (size > ETHER_MAX_LEN)
size = ETHER_MAX_LEN;
@@ -3073,8 +3078,8 @@ range_set_pkt_size(port_info_t *info, char *what, uint16_t size)
} else {
if (size < ETHER_MIN_LEN)
size = MIN_PKT_SIZE;
- else if (size > ETHER_MAX_LEN)
- size = MAX_PKT_SIZE;
+ else if (size > pktsize)
+ size = pktsize;
else
size -= ETHER_CRC_LEN;
diff --git a/app/pktgen-constants.h b/app/pktgen-constants.h
index 417bf64..62a787d 100644
--- a/app/pktgen-constants.h
+++ b/app/pktgen-constants.h
@@ -17,7 +17,7 @@ extern "C" {
enum {
DEFAULT_PKT_BURST = 64, /* Increasing this number consumes memory very fast */
#ifdef RTE_LIBRTE_VMXNET3_PMD
- DEFAULT_RX_DESC = (DEFAULT_PKT_BURST * 8 * 2),
+ DEFAULT_RX_DESC = (DEFAULT_PKT_BURST * 8 * 2 * 2),
DEFAULT_TX_DESC = DEFAULT_RX_DESC * 2,
#else
DEFAULT_RX_DESC = (DEFAULT_PKT_BURST * 8),
@@ -30,7 +30,7 @@ enum {
DEFAULT_PRIV_SIZE = 0,
DEFAULT_MBUF_SIZE = RTE_MBUF_DEFAULT_BUF_SIZE + DEFAULT_PRIV_SIZE, /* See: http://dpdk.org/dev/patchwork/patch/4479/ */
-
+ MBUF_9K_SIZE = 9018 + RTE_PKTMBUF_HEADROOM + DEFAULT_PRIV_SIZE,
NUM_Q = 8, /**< Number of cores per port. */
};
diff --git a/app/pktgen-main.c b/app/pktgen-main.c
index b90da0c..768a2e3 100644
--- a/app/pktgen-main.c
+++ b/app/pktgen-main.c
@@ -161,7 +161,7 @@ pktgen_parse_args(int argc, char **argv)
pktgen.argv[opt] = strdup(argv[opt]);
pktgen.verbose = 0;
- while ((opt = getopt_long(argc, argvopt, "p:m:f:l:s:g:hPNGTv",
+ while ((opt = getopt_long(argc, argvopt, "p:m:f:l:s:g:hPNGTv9",
lgopts, &option_index)) != EOF)
switch (opt) {
case 'p':
@@ -240,7 +240,12 @@ pktgen_parse_args(int argc, char **argv)
case 'h': /* print out the help message */
pktgen_usage(prgname);
- return -1;
+ return -1;
+
+ case '9': /* MTU 9K support */
+ pktgen_log_info("%s: case 9... \n", __func__);
+ pktgen.flags |= MTU9K_SUPPORT_FLAG;
+ break;
case 0: /* crc-strip for all ports */
pktgen_set_hw_strip_crc(1);
diff --git a/app/pktgen-port-cfg.c b/app/pktgen-port-cfg.c
index a1da13c..e18ac42 100644
--- a/app/pktgen-port-cfg.c
+++ b/app/pktgen-port-cfg.c
@@ -99,16 +99,19 @@ pktgen_mbuf_pool_create(const char *type, uint8_t pid, uint8_t queue_id,
struct rte_mempool *mp;
char name[RTE_MEMZONE_NAMESIZE];
uint64_t sz;
+ uint16_t mbuf_sz = (pktgen.flags & MTU9K_SUPPORT_FLAG) ?
+ MBUF_9K_SIZE :DEFAULT_MBUF_SIZE;
+
snprintf(name, sizeof(name), "%-12s%u:%u", type, pid, queue_id);
- sz = nb_mbufs * (DEFAULT_MBUF_SIZE + sizeof(struct rte_mbuf));
+ sz = nb_mbufs * (mbuf_sz + sizeof(struct rte_mbuf));
sz = RTE_ALIGN_CEIL(sz + sizeof(struct rte_mempool), 1024);
if (pktgen.verbose)
pktgen_log_info(
" Create: %-*s - Memory used (MBUFs %5u x (size %u + Hdr %lu)) + %lu = %6lu KB, headroom %d",
- 16, name, nb_mbufs, DEFAULT_MBUF_SIZE,
+ 16, name, nb_mbufs, mbuf_sz,
sizeof(struct rte_mbuf), sizeof(struct rte_mempool),
sz / 1024, RTE_PKTMBUF_HEADROOM);
@@ -117,7 +120,7 @@ pktgen_mbuf_pool_create(const char *type, uint8_t pid, uint8_t queue_id,
/* create the mbuf pool */
mp = rte_pktmbuf_pool_create(name, nb_mbufs, cache_size,
- DEFAULT_PRIV_SIZE, DEFAULT_MBUF_SIZE, socket_id);
+ DEFAULT_PRIV_SIZE, mbuf_sz, socket_id);
if (mp == NULL)
pktgen_log_panic(
"Cannot create mbuf pool (%s) port %d, queue %d, nb_mbufs %d, socket_id %d: %s",
@@ -174,7 +177,8 @@ pktgen_config_ports(void)
pktgen_log_info(
"Configuring %d ports, MBUF Size %d, MBUF Cache Size %d",
pktgen.nb_ports,
- DEFAULT_MBUF_SIZE,
+ (pktgen.flags & MTU9K_SUPPORT_FLAG) ? MBUF_9K_SIZE :
+ DEFAULT_MBUF_SIZE,
MBUF_CACHE_SIZE);
}
diff --git a/app/pktgen-range.c b/app/pktgen-range.c
index a8f62cd..b03fe84 100644
--- a/app/pktgen-range.c
+++ b/app/pktgen-range.c
@@ -561,7 +561,8 @@ pktgen_range_setup(port_info_t *info)
range->pkt_size = MIN_PKT_SIZE;
range->pkt_size_inc = 0;
range->pkt_size_min = MIN_PKT_SIZE;
- range->pkt_size_max = MAX_PKT_SIZE;
+ range->pkt_size_max = (pktgen.flags & MTU9K_SUPPORT_FLAG) ?
+ MAX_PKT_SIZE : (ETHER_MAX_LEN - ETHER_CRC_LEN);
range->vxlan_gid = info->seq_pkt[SINGLE_PKT].group_id;
range->vxlan_gid_inc = 0;
diff --git a/app/pktgen.c b/app/pktgen.c
index f7eee07..f0a8b58 100644
--- a/app/pktgen.c
+++ b/app/pktgen.c
@@ -62,6 +62,7 @@ pktgen_wire_size(port_info_t *info)
size = info->seq_pkt[SINGLE_PKT].pktSize +
PKT_PREAMBLE_SIZE + INTER_FRAME_GAP + ETHER_CRC_LEN;
}
+ size -= (PKT_PREAMBLE_SIZE + INTER_FRAME_GAP + ETHER_CRC_LEN);
return size;
}
@@ -955,6 +956,10 @@ pktgen_setup_cb(struct rte_mempool *mp,
pkt_seq_t *pkt;
uint16_t qid, idx;
+ uint32_t pktsize = (pktgen.flags & MTU9K_SUPPORT_FLAG) ?
+ MAX_PKT_SIZE:
+ (ETHER_MAX_LEN - ETHER_CRC_LEN);
+
info = data->info;
qid = data->qid;
@@ -982,7 +987,7 @@ pktgen_setup_cb(struct rte_mempool *mp,
pktgen_packet_ctor(info, idx, -1);
rte_memcpy((uint8_t *)m->buf_addr + m->data_off,
- (uint8_t *)&pkt->hdr, MAX_PKT_SIZE);
+ (uint8_t *)&pkt->hdr, pktsize);
m->pkt_len = pkt->pktSize;
m->data_len = pkt->pktSize;
@@ -1171,7 +1176,7 @@ pktgen_main_receive(port_info_t *info,
{
uint8_t pid;
uint16_t qid, nb_rx;
- capture_t *capture;
+ __rte_unused capture_t *capture;
pid = info->pid;
qid = get_rxque(pktgen.l2p, lid, pid);
@@ -1182,6 +1187,10 @@ pktgen_main_receive(port_info_t *info,
if ( (nb_rx = rte_eth_rx_burst(pid, qid, pkts_burst, info->tx_burst)) == 0)
return;
+ info->sizes._64 += nb_rx;
+ rte_pktmbuf_free_bulk(pkts_burst, nb_rx);
+#if 0
+
pktgen_recv_latency(info, pkts_burst, nb_rx);
/* packets are not freed in the next call. */
@@ -1198,6 +1207,7 @@ pktgen_main_receive(port_info_t *info,
}
rte_pktmbuf_free_bulk(pkts_burst, nb_rx);
+#endif
}
static void
diff --git a/app/pktgen.h b/app/pktgen.h
index d9da2eb..2e46247 100644
--- a/app/pktgen.h
+++ b/app/pktgen.h
@@ -233,9 +233,9 @@ enum {
INTER_FRAME_GAP = 12, /**< in bytes */
PKT_PREAMBLE_SIZE = 8, /**< in bytes */
-
+ MAX_9K_SIZE = 9018,
MIN_PKT_SIZE = (ETHER_MIN_LEN - ETHER_CRC_LEN),
- MAX_PKT_SIZE = (ETHER_MAX_LEN - ETHER_CRC_LEN),
+ MAX_PKT_SIZE = (MAX_9K_SIZE - ETHER_CRC_LEN),
MIN_v6_PKT_SIZE = (78 - ETHER_CRC_LEN),
MAX_RX_QUEUES = 16, /**< RX Queues per port */
@@ -331,6 +331,7 @@ enum { /* Pktgen flags bits */
BLINK_PORTS_FLAG = (1 << 10), /**< Blink the port leds */
ENABLE_THEME_FLAG = (1 << 11), /**< Enable theme or color support */
+ MTU9K_SUPPORT_FLAG = (1 << 15), /**< MTU 9K support */
CONFIG_PAGE_FLAG = (1 << 16), /**< Display the configure page */
SEQUENCE_PAGE_FLAG = (1 << 17), /**< Display the Packet sequence page */
RANGE_PAGE_FLAG = (1 << 18), /**< Display the range page */
--
2.7.4
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment