Commit 5f12009e authored by Sujatha Banoth's avatar Sujatha Banoth

2020.2 QDMA Windows Driver Release

2020.2 QDMA Windows Driver Release
parent 1fe2bd02
Release: 2020.1
Release: 2020.2
================
This release is validated on QDMA4.0 2020.1 based example design,
This release is validated on QDMA4.0 2020.2 based example design and
QDMA3.1 2019.2 patch based example design.
QDMA Windows driver is supported in poll mode by default
......@@ -53,12 +53,19 @@ SUPPORTED FEATURES:
- Updated and validated the example design with marker changes for QDMA4.0 and without marker changes for QDMA3.1
- Support multiple bus numbers on single card
2020.2 Updates
--------------
- Support larger MM & ST packet transfer support
- Added support for detailed register dump
- Added support for post processing HW error messages
- Added support for Debug mode and Internal only mode
KNOWN ISSUES:
=============
- Driver installation gives warning due to test signature.
- In interrupt mode, Sometimes completions are not received when C2H PIDX updates are held for 64 descriptors
- On QDMA4.0 2020.1 design, HW errors are observed in trace logs while validating the MM only design.
- On QDMA4.0 2020.1 design, User logic fails to generate C2H streaming packets when multiple threads try
- On QDMA4.0 2020.2 design, User logic fails to generate C2H streaming packets when multiple threads try
to generate packets on multiple active queues.
DRIVER LIMITATIONS:
......
......@@ -20,7 +20,7 @@
#define __QDMARW_VERSION_H
#define PROGNAME "dma-arw"
#define VERSION "2020.1.0"
#define VERSION "2020.2.0"
#define COPYRIGHT "Copyright (c) 2020 Xilinx Inc."
#endif /*__QDMARW_VERSION_H*/
......@@ -26,9 +26,11 @@
#define MAX_VALID_GLBL_IDX 15
#define MAX_VALID_CMPT_SZ 3
#define MAX_VALID_INTR_RING_VEC 8
#define MAX_VALID_BAR_NUM 5
#define MAX_CMPT_DESC_SZ 64
#define MAX_INTR_RING_ENTRY_SZ 32
#define MAX_DUMP_BUFF_SZ 64 * 1024
#define MAX_REG_INFO_SZ 1024
#pragma comment(lib, "setupapi.lib")
......@@ -38,6 +40,13 @@ using std::runtime_error;
using std::cout;
using namespace std;
const char *desc_engine_mode[] = {
"Internal and Bypass mode",
"Bypass only mode"
"Inernal only mode",
};
static void help(void);
static bool open_device(const char *dev_name, device_file& device)
......@@ -231,6 +240,14 @@ static int handle_devinfo(const char *dev_name)
printf("%-35s : %s\n", "MM enabled", cmd.dev_info.out->mm_en ? "yes" : "no");
printf("%-35s : %s\n", "Mailbox enabled", cmd.dev_info.out->mailbox_en ? "yes" : "no");
printf("%-35s : %s\n", "MM completion enabled", cmd.dev_info.out->mm_cmpl_en ? "yes" : "no");
printf("%-35s : %s\n", "Debug Mode enabled", cmd.dev_info.out->debug_mode ? "yes" : "no");
if (cmd.dev_info.out->desc_eng_mode < sizeof(desc_engine_mode) / sizeof(desc_engine_mode[0])) {
printf("%-35s : %s\n", "Desc Engine Mode", desc_engine_mode[cmd.dev_info.out->desc_eng_mode]);
}
else {
printf("%-35s : %s\n", "Desc Engine Mode", "Invalid");
}
printf("\n");
delete cmd.dev_info.out;
......@@ -1107,27 +1124,120 @@ static int handle_reg_dump(const char *dev_name, const int argc, char* argv[])
unsigned int size = MAX_DUMP_BUFF_SZ;
unsigned int out_size = sizeof(struct regdump_info_out) + size;
cmd.reg_info.out = (struct regdump_info_out *)new char[out_size];
cmd.reg_info.out->ret_len = 0;
cmd.reg_dump_info.out = (struct regdump_info_out *)new char[out_size];
cmd.reg_dump_info.out->ret_len = 0;
ioctl_code = IOCTL_QDMA_REG_DUMP;
device.ioctl(ioctl_code, NULL, 0, cmd.reg_info.out, out_size);
device.ioctl(ioctl_code, NULL, 0, cmd.reg_dump_info.out, out_size);
if (!cmd.reg_info.out->ret_len) {
if (!cmd.reg_dump_info.out->ret_len) {
printf("Failed to dump the registers\n");
}
else {
char *addr = (char *)&cmd.reg_info.out->pbuffer[0];
for (auto i = 0; ((i < (int)cmd.reg_info.out->ret_len) && (addr[i] != '\0')); i++) {
char *addr = (char *)&cmd.reg_dump_info.out->pbuffer[0];
for (auto i = 0; ((i < (int)cmd.reg_dump_info.out->ret_len) && (addr[i] != '\0')); i++) {
printf("%c", addr[i]);
}
}
delete[] cmd.reg_dump_info.out;
}
catch (const std::exception& e) {
delete[] cmd.reg_dump_info.out;
cout << "Failed to execute reg dump command.\n" << e.what() << '\n';
}
return 0;
}
static int handle_reg_info(const char* dev_name, const int argc, char* argv[])
{
UNREFERENCED_PARAMETER(dev_name);
UNREFERENCED_PARAMETER(argc);
UNREFERENCED_PARAMETER(argv);
auto i = 0;
bool bar_valid = false;
ioctl_cmd cmd = {};
DWORD ioctl_code = 0x0;
if (i == argc) {
cout << "insufficient arguments\n";
return 1;
}
while (i < argc) {
if (strcmp(argv[i], "bar") == 0) {
if ((argv[i + 1] == NULL) || (argv[i + 2] == NULL)) {
cout << "Insufficient options provided\n";
cout << "bar option needs atleast two arguments : <bar_num> <address> [num_regs <M>]\n";
return 1;
}
++i;
cmd.reg_info.in.bar_no = strtoul(argv[i], NULL, 0);
if ((MAX_VALID_BAR_NUM < cmd.reg_info.in.bar_no) &&
((cmd.reg_info.in.bar_no % 2) != 0)) {
cout << "Invalid BAR number provided : " << argv[i] << endl;
break;
}
cmd.reg_info.in.bar_no = cmd.reg_info.in.bar_no / 2;
bar_valid = true;
++i;
cmd.reg_info.in.address = strtoul(argv[i], NULL, 0);
++i;
if ((argv[i] != NULL) && (strcmp(argv[i], "num_regs")) == 0) {
cmd.reg_info.in.reg_cnt = strtoul(argv[i + 1], NULL, 0);
++i;
}
else {
cmd.reg_info.in.reg_cnt = 1;
}
}
else {
cout << "Unknown command " << argv[i] << endl;
return 1;
}
++i;
}
if (!bar_valid) {
return 1;
}
try {
device_file device;
auto ret = open_device(dev_name, device);
if (ret == false)
return 0;
unsigned int size = (cmd.reg_info.in.reg_cnt * MAX_REG_INFO_SZ);
unsigned int out_size = sizeof(struct reg_info_out) + size;
cmd.reg_info.out = (struct reg_info_out *)new char[out_size];
cmd.reg_info.out->ret_len = 0;
ioctl_code = IOCTL_QDMA_REG_INFO;
device.ioctl(ioctl_code, &cmd.reg_info.in, sizeof(cmd.reg_info.in),
cmd.reg_info.out, out_size);
if (!cmd.reg_info.out->ret_len) {
printf("Failed to dump the registers\n");
}
else {
char* data = (char*)&cmd.reg_info.out->pbuffer[0];
for (i = 0; ((i < (int)cmd.reg_info.out->ret_len) &&
(data[i] != '\0')); i++) {
printf("%c", data[i]);
}
}
delete[] cmd.reg_info.out;
}
catch (const std::exception& e) {
delete[] cmd.reg_info.out;
cout << "Failed to execute reg dump command.\n" << e.what() << '\n';
cout << "Failed to execute reg info command.\n" << e.what() << '\n';
}
return 0;
......@@ -1205,6 +1315,10 @@ static int handle_reg_cmds(const char *dev_name, const int argc, char* argv[])
++i;
return handle_reg_dump(dev_name, argc - i, argv + i);
}
else if (strcmp(argv[i], "info") == 0) {
++i;
return handle_reg_info(dev_name, argc - i, argv + i);
}
else {
cout << "Unknown command " << argv[i] << endl;
return 1;
......@@ -1304,6 +1418,7 @@ static void help(void)
cout << " intring dump vector <N> <start_idx> <end_idx> - interrupt ring dump for vector number <N>\n";
cout << " for intrrupt entries :<start_idx> --- <end_idx>\n";
cout << " reg dump - register dump\n";
cout << " reg info bar <N> addr [num_regs <M>] - dump detailed fields information of a register\n";
}
int __cdecl main(const int argc, char* argv[])
......
......@@ -20,7 +20,7 @@
#define __QDMATOOL_VERSION_H
#define PROGNAME "dma-ctl"
#define VERSION "2020.1.0"
#define VERSION "2020.2.0"
#define COPYRIGHT "Copyright (c) 2020 Xilinx Inc."
#endif /* __QDMATOOL_VERSION_H */
......@@ -20,7 +20,7 @@
#define __QDMARW_VERSION_H
#define PROGNAME "dma-rw"
#define VERSION "2020.1.0"
#define VERSION "2020.2.0"
#define COPYRIGHT "Copyright (c) 2020 Xilinx Inc."
#endif /*__QDMARW_VERSION_H*/
......@@ -105,6 +105,7 @@ enum commands {
CMD_QUEUE_NO_COPY,
CMD_SET_QMAX,
CMD_GET_QSTATS,
CMD_REG_INFO,
CMD_OP_MAX
};
......@@ -180,6 +181,8 @@ struct device_info_out {
BOOL mm_en;
BOOL mm_cmpl_en;
BOOL mailbox_en;
BOOL debug_mode;
UINT8 desc_eng_mode;
UINT32 num_mm_channels;
};
......@@ -307,6 +310,25 @@ struct qstat_out {
UINT32 active_cmpt_queues;
};
/** Structure to be passed as input parameter for
* IOCTL Command :
* IOCTL_QDMA_REG_INFO
*/
struct reg_info_in {
UINT32 bar_no;
UINT32 address;
UINT32 reg_cnt;
};
/** Structure to be passed as output parameter for
* IOCTL Command :
* IOCTL_QDMA_REG_INFO
*/
struct reg_info_out {
size_t ret_len;
char pbuffer[1];
};
struct csr_conf_data {
struct csr_conf_out *out;
};
......@@ -361,6 +383,11 @@ struct qstats_info {
struct qstat_out *out;
};
struct reg_info {
struct reg_info_in in;
struct reg_info_out* out;
};
/** Union that consolidates parameters for all ioctl commands */
union ioctl_cmd {
struct csr_conf_data csr;
......@@ -372,9 +399,10 @@ union ioctl_cmd {
struct ctx_dump_info ctx_info;
struct cmpt_data_info cmpt_info;
struct intring_info int_ring_info;
struct regdump_info reg_info;
struct regdump_info reg_dump_info;
struct qmax_info qmax_info;
struct qstats_info qstats_info;
struct reg_info reg_info;
};
#define QDMA_IOCTL(index) CTL_CODE(FILE_DEVICE_UNKNOWN, index, METHOD_BUFFERED, FILE_ANY_ACCESS)
......@@ -395,6 +423,7 @@ union ioctl_cmd {
#define IOCTL_QDMA_QUEUE_NO_COPY QDMA_IOCTL(CMD_QUEUE_NO_COPY)
#define IOCTL_QDMA_SET_QMAX QDMA_IOCTL(CMD_SET_QMAX)
#define IOCTL_QDMA_GET_QSTATS QDMA_IOCTL(CMD_GET_QSTATS)
#define IOCTL_QDMA_REG_INFO QDMA_IOCTL(CMD_REG_INFO)
#include <poppack.h>
......
......@@ -23,7 +23,7 @@ Class = %ClassName%
ClassGuid = {a3a4c1ce-5a80-452c-9b51-a98edd3378d1}
Provider = %ManufacturerName%
CatalogFile = QDMA.cat
DriverVer = 08/01/2019, 2019.2.5.9 ;Format : year.quarter_no.drv_ver.libqdma_ver
DriverVer = 10/15/2020, 2020.2.0.0 ;Format : year.quarter_no.drv_ver.libqdma_ver
DriverPackageType = PlugAndPlay
[DestinationDirs]
......
......@@ -148,6 +148,39 @@ ErrExit:
return status;
}
//_Use_decl_annotations_
static void
qdma_user_isr_handler(
ULONG event_id,
void *user_data)
{
UNREFERENCED_PARAMETER(event_id);
UNREFERENCED_PARAMETER(user_data);
TraceInfo(TRACE_DEVICE, "In %s", __func__);
}
//_Use_decl_annotations_
static void
qdma_user_interrupt_enable(
ULONG event_id,
void *user_data)
{
UNREFERENCED_PARAMETER(event_id);
UNREFERENCED_PARAMETER(user_data);
TraceInfo(TRACE_DEVICE, "In %s", __func__);
}
//_Use_decl_annotations_
static void
qdma_user_interrupt_disable(
ULONG event_id,
void *user_data)
{
UNREFERENCED_PARAMETER(event_id);
UNREFERENCED_PARAMETER(user_data);
TraceInfo(TRACE_DEVICE, "In %s", __func__);
}
NTSTATUS qdma_evt_device_prepare_hardware(
const WDFDEVICE device,
const WDFCMRESLIST resources,
......@@ -155,26 +188,42 @@ NTSTATUS qdma_evt_device_prepare_hardware(
{
PAGED_CODE();
device_context* ctx = get_device_context(device);
qdma_drv_config drv_conf = {};
device_context *ctx = get_device_context(device);
ctx->qdma = qdma_interface::create_qdma_device();
if (ctx->qdma == nullptr) {
TraceError(TRACE_DEVICE, "qdma device memory allocation failed");
return STATUS_INSUFFICIENT_RESOURCES;
}
NTSTATUS status = read_reg_params(ctx->config.op_mode, ctx->config.config_bar);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_DEVICE, "failed to read registry params %!STATUS!", status);
return STATUS_INTERNAL_ERROR;
return status;
}
status = ctx->qdma->init(ctx->config.op_mode, ctx->config.config_bar, QDMA_MAX_QUEUES_PER_PF);
drv_conf.operation_mode = ctx->config.op_mode;
drv_conf.cfg_bar = ctx->config.config_bar;
drv_conf.qsets_max = QDMA_MAX_QUEUES_PER_PF;
drv_conf.user_msix_max = QDMA_MAX_USER_INTR;
drv_conf.data_msix_max = QDMA_MAX_DATA_INTR;
drv_conf.user_data = (void*)device;
drv_conf.user_isr_handler = qdma_user_isr_handler;
drv_conf.user_interrupt_enable_handler = qdma_user_interrupt_enable;
drv_conf.user_interrupt_disable_handler = qdma_user_interrupt_disable;
status = ctx->qdma->init(drv_conf);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_DEVICE, "qdma.init() failed! %!STATUS!", status);
return STATUS_INTERNAL_ERROR;
return status;
}
status = ctx->qdma->open(device, resources, resources_translated);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_DEVICE, "qdma.open() failed! %!STATUS!", status);
return STATUS_INTERNAL_ERROR;
return status;
}
return STATUS_SUCCESS;
......
......@@ -22,6 +22,8 @@
EXTERN_C_START
#define QDMA_MAX_QUEUES_PER_PF 512
#define QDMA_MAX_USER_INTR 1
#define QDMA_MAX_DATA_INTR 7
struct device_context {
struct {
......
......@@ -60,7 +60,7 @@ DriverEntry(
return status;
}
TraceInfo(TRACE_DRIVER, "%!FUNC! Exit");
TraceVerbose(TRACE_DRIVER, "%!FUNC! Exit");
return status;
}
......@@ -92,7 +92,7 @@ qdma_evt_driver_context_cleanup(
UNREFERENCED_PARAMETER(driver_object);
#endif
TraceInfo(TRACE_DRIVER, "%!FUNC! Entry");
TraceVerbose(TRACE_DRIVER, "%!FUNC! Entry");
/** Stop WPP Tracing */
WPP_CLEANUP(WdfDriverWdmGetDriverObject(static_cast<WDFDRIVER>(driver_object)));
}
......@@ -174,19 +174,16 @@ static BOOLEAN program_mm_dma_cb(
params.Parameters.Write.DeviceOffset :
params.Parameters.Read.DeviceOffset;
size_t xfered_len = 0;
auto status = dev_ctx->qdma->qdma_enqueue_mm_request(dma_ctx->qid, direction, sg_list, device_offset, drv_mm_cmp_cb, transaction, xfered_len);
auto status = dev_ctx->qdma->qdma_enqueue_mm_request(dma_ctx->qid, direction, sg_list, device_offset, drv_mm_cmp_cb, transaction);
if (!NT_SUCCESS(status)) {
dma_ctx->txn_len = 0;
/** Complete the DMA transaction */
drv_mm_cmp_cb(transaction, status);
TraceError(TRACE_IO, "enqueue_transfer_mm() failed! %!STATUS!", status);;
TraceError(TRACE_IO, "qdma_enqueue_mm_request() failed! %!STATUS!", status);;
return false;
}
TraceVerbose(TRACE_IO, "enqueue_transfer_mm(): txd len : %lld", xfered_len);
/** Update the real dmaed length into dma context */
dma_ctx->txn_len = xfered_len;
TraceVerbose(TRACE_IO, "qdma_enqueue_mm_request(): txd len : %lld", dma_ctx->txn_len);
return true;
}
......@@ -203,17 +200,14 @@ static BOOLEAN program_st_tx_dma_cb(
auto dev_ctx = get_device_context(device);
DMA_TXN_CONTEXT *dma_ctx = (DMA_TXN_CONTEXT *)context;
size_t xfered_len = 0;
auto status = dev_ctx->qdma->qdma_enqueue_st_tx_request(dma_ctx->qid, sg_list, drv_st_tx_cmp_cb, transaction, xfered_len);
auto status = dev_ctx->qdma->qdma_enqueue_st_tx_request(dma_ctx->qid, sg_list, drv_st_tx_cmp_cb, transaction);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_IO, "enqueue_transfer_st() failed! %!STATUS!", status);
TraceError(TRACE_IO, "qdma_enqueue_st_tx_request() failed! %!STATUS!", status);
drv_st_tx_cmp_cb(transaction, status);
return false;
}
TraceVerbose(TRACE_IO, "qdma_enqueue_st_tx(): txd len : %lld", xfered_len);
/** Update the real dmaed length into dma context */
dma_ctx->txn_len = xfered_len;
TraceVerbose(TRACE_IO, "qdma_enqueue_st_tx_request(): txd len : %lld", dma_ctx->txn_len);
return true;
}
......@@ -256,13 +250,13 @@ static void io_mm_dma(
goto ErrExit;
}
TraceVerbose(TRACE_IO, "queue transfer complete");
TraceVerbose(TRACE_IO, "DMA transfer triggered on queue %d, direction : %d", qid, direction);
return;
ErrExit:
WdfObjectDelete(dma_transaction);
WdfRequestComplete(request, status);
TraceError(TRACE_IO, "Error Request 0x%p: %!STATUS!", request, status);
TraceError(TRACE_IO, "DMA transfer initiation failed, Request 0x%p: %!STATUS!", request, status);
}
......@@ -282,11 +276,11 @@ static void io_st_read_dma(
status = qdma_dev->qdma_enqueue_st_rx_request(qid, length, drv_st_rx_cmp_cb, (void *)request);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_IO, "WdfDmaTransactionExecute failed: %!STATUS!", status);
WdfRequestComplete(request, status);
TraceError(TRACE_IO, "ST Request completed 0x%p: %!STATUS!", request, status);
TraceError(TRACE_IO, "DMA transfer initiation failed: %!STATUS!", status);
}
TraceVerbose(TRACE_IO, "DMA transfer triggered on queue %d", qid);
}
static void io_st_zero_write_dma(
......@@ -296,31 +290,47 @@ static void io_st_zero_write_dma(
const size_t length,
const WDF_DMA_DIRECTION direction)
{
PVOID req = request;
UNREFERENCED_PARAMETER(length);
UNREFERENCED_PARAMETER(direction);
ST_DMA_ZERO_TX_PRIV *priv;
/** construct one element sg_list */
constexpr size_t sg_list_len = sizeof(SCATTER_GATHER_LIST) + sizeof(SCATTER_GATHER_ELEMENT);
UINT8 sg_buffer[sg_list_len] = { };
PSCATTER_GATHER_LIST sg_list = (PSCATTER_GATHER_LIST)sg_buffer;
size_t xfered_len = 0;
PSCATTER_GATHER_LIST sg_list;
sg_list = (PSCATTER_GATHER_LIST)ExAllocatePoolWithTag(NonPagedPoolNx, sg_list_len, IO_QUEUE_TAG);
if (sg_list == NULL) {
TraceVerbose(TRACE_IO, "sg_list: Mem alloc failed\n");
return;
}
RtlZeroMemory(sg_list, sg_list_len);
sg_list->NumberOfElements = 1;
sg_list->Elements[0].Address.QuadPart = NULL;
sg_list->Elements[0].Length = 0x0;
/* For Zero byte transfer, pass the WDFREQUEST in WDFDMATRANSACTION parameter,
locally constructed single element sglist parameter for the function qdma_enqueue_st_request */
auto status = qdma_dev->qdma_enqueue_st_tx_request(qid, sg_list, drv_st_tx_zcmp_cb, static_cast<WDFDMATRANSACTION>(req), xfered_len);
priv = (ST_DMA_ZERO_TX_PRIV*)ExAllocatePoolWithTag(NonPagedPoolNx, sizeof(ST_DMA_ZERO_TX_PRIV), IO_QUEUE_TAG);
if (priv == NULL) {
ExFreePoolWithTag(sg_list, IO_QUEUE_TAG);
TraceVerbose(TRACE_IO, "priv: Mem alloc failed\n");
return;
}
/** Store the context info in priv data */
priv->request = request;
priv->sg_list = sg_list;
/* For Zero byte transfer, pass the ST_DMA_ZERO_TX_PRIV in WDFDMATRANSACTION parameter that contains,
locally constructed single element sglist parameter & WDFREQUEST for the function qdma_enqueue_st_request */
auto status = qdma_dev->qdma_enqueue_st_tx_request(qid, sg_list, drv_st_tx_zcmp_cb, static_cast<PVOID>(priv));
if (!NT_SUCCESS(status)) {
TraceError(TRACE_IO, "enqueue_transfer_st() failed! %!STATUS!", status);
drv_st_tx_zcmp_cb(request, status);
TraceError(TRACE_IO, "DMA transfer initiation failed! %!STATUS!", status);
drv_st_tx_zcmp_cb(priv, status);
return;
}
TraceVerbose(TRACE_IO, "queue transfer complete for zero length request");
TraceVerbose(TRACE_IO, "DMA transfer triggered on queue %d for zero length", qid);
return;
}
......@@ -366,13 +376,13 @@ static void io_st_write_dma(
goto ErrExit;
}
TraceVerbose(TRACE_IO, "queue transfer complete");
TraceVerbose(TRACE_IO, "DMA transfer triggered on queue %d", qid);
return;
ErrExit:
WdfObjectDelete(dma_transaction);
WdfRequestComplete(request, status);
TraceError(TRACE_IO, "Error Request 0x%p: %!STATUS!", request, status);
TraceError(TRACE_IO, "DMA transfer initiation failed! for Request : %p, %!STATUS!", request, status);
}
/* ----- CB Processing Functions ----- */
......@@ -403,7 +413,7 @@ void drv_st_rx_cmp_cb(const st_c2h_pkt_fragment *rx_pkts, size_t num_pkts, void
#ifdef DBG
if (packet.udd_data != nullptr) {
int len = 0;
UINT8 len = 0;
constexpr unsigned short MAX_UDD_STR_LEN = (QDMA_MAX_UDD_DATA_LEN * 3) + 1;
char imm_data_str[MAX_UDD_STR_LEN];
UINT32 udd_len;
......@@ -417,7 +427,7 @@ void drv_st_rx_cmp_cb(const st_c2h_pkt_fragment *rx_pkts, size_t num_pkts, void
RtlStringCchPrintfA((imm_data_str + len), (MAX_UDD_STR_LEN - len), "%02X ", udd_buffer[iter]);
len = len + 3; /* 3 characters are getting utilized for each byte */
}
TraceInfo(TRACE_IO, "Immediate data Len : %d, Data: %s", udd_len, imm_data_str);
TraceVerbose(TRACE_IO, "Immediate data Len : %d, Data: %s", udd_len, imm_data_str);
}
}
#endif
......@@ -456,11 +466,12 @@ static void dma_complete_transaction(WDFDMATRANSACTION dma_transaction, NTSTATUS
BOOLEAN transaction_complete = false;
auto dma_ctx = get_dma_txn_context(dma_transaction);
size_t length = dma_ctx->txn_len;
UINT16 qid = dma_ctx->qid;
request = WdfDmaTransactionGetRequest(dma_transaction);
if (!request)
/** Dont return from here, Need to delete the dma_transaction object */
TraceError(TRACE_IO, "Callback but No request pending");
TraceError(TRACE_IO, "Callback triggered, No request pending on queue %d", qid);
if ((NT_SUCCESS(status)))
transaction_complete = WdfDmaTransactionDmaCompleted(dma_transaction, &ret);
......@@ -472,12 +483,12 @@ static void dma_complete_transaction(WDFDMATRANSACTION dma_transaction, NTSTATUS
WdfObjectDelete(dma_transaction);
}
else {
TraceError(TRACE_IO, "Err: DMA transaction not completed, ret : %X", ret);
TraceError(TRACE_IO, "Err: DMA transaction not completed on queue %d, ret : %X", qid, ret);
}
if (request) {
WdfRequestCompleteWithInformation(request, status, length);
TraceVerbose(TRACE_IO, "Request completed, Len : %lld", length);
TraceVerbose(TRACE_IO, "DMA transfer completed on queue %d, Len : %lld", qid, length);
}
}
......@@ -508,8 +519,14 @@ void drv_st_tx_zcmp_cb(void *priv, NTSTATUS status)
return;
}
WdfRequestCompleteWithInformation(static_cast<WDFREQUEST>(priv), status, 0);
TraceInfo(TRACE_IO, "Zero Byte Transfer ended");
ST_DMA_ZERO_TX_PRIV *priv_ctx = (ST_DMA_ZERO_TX_PRIV *)priv;
WdfRequestCompleteWithInformation(static_cast<WDFREQUEST>(priv_ctx->request), status, 0);
ExFreePoolWithTag(priv_ctx->sg_list, IO_QUEUE_TAG);
ExFreePoolWithTag(priv_ctx, IO_QUEUE_TAG);
TraceInfo(TRACE_IO, "DMA Transfer completed for Zero length");
}
void drv_st_process_udd_only_pkts(UINT16 qid, void *udd_addr, void *priv)
......@@ -521,7 +538,7 @@ void drv_st_process_udd_only_pkts(UINT16 qid, void *udd_addr, void *priv)
if ((udd_addr == nullptr) || (priv == nullptr))
return;
int len = 0;
UINT8 len = 0;
constexpr unsigned short MAX_UDD_STR_LEN = (QDMA_MAX_UDD_DATA_LEN * 3) + 1;
char imm_data_str[MAX_UDD_STR_LEN];
UINT32 udd_len;
......@@ -546,7 +563,7 @@ NTSTATUS qdma_io_queue_initialize(
{
PAGED_CODE();
TraceInfo(TRACE_IO, "Initializing main entry IO queue");
TraceVerbose(TRACE_IO, "Initializing main entry IO queue");
/* Configure a default queue so that requests that are not configure-fowarded using
* WdfDeviceConfigureRequestDispatching to goto other queues get dispatched here.
......@@ -690,8 +707,9 @@ void qdma_evt_ioctl(
auto file_ctx = get_file_context(WdfRequestGetFileObject(request));
auto qdma_dev = dev_ctx->qdma;
union ioctl_cmd cmd;
enum queue_state qstate;
TraceInfo(TRACE_IO, "Queue 0x%p, Request 0x%p OutputBufferLength %llu InputBufferLength %llu IoControlCode 0x%X",
TraceVerbose(TRACE_IO, "Queue 0x%p, Request 0x%p OutputBufferLength %llu InputBufferLength %llu IoControlCode 0x%X",
queue, request, output_buffer_length, input_buffer_length, io_control_code);
if (file_target::MGMT != file_ctx->target && file_target::ST_QUEUE != file_ctx->target) {
......@@ -781,6 +799,8 @@ void qdma_evt_ioctl(
cmd.dev_info.out->mm_cmpl_en = dev_attr.mm_cmpl_en;
cmd.dev_info.out->mailbox_en = dev_attr.mailbox_en;
cmd.dev_info.out->num_mm_channels = dev_attr.num_mm_channels;
cmd.dev_info.out->debug_mode = dev_attr.debug_mode;
cmd.dev_info.out->desc_eng_mode = dev_attr.desc_eng_mode;
WdfRequestCompleteWithInformation(request, status, sizeof(device_info_out));
......@@ -902,7 +922,7 @@ void qdma_evt_ioctl(
}
TraceVerbose(TRACE_IO, "IOCTL_QDMA_QUEUE_DUMP_STATE : %d", cmd.q_state.in.qid);
status = qdma_dev->qdma_get_queues_state(cmd.q_state.in.qid,
status = qdma_dev->qdma_get_queues_state(cmd.q_state.in.qid, &qstate,
cmd.q_state.out->state, sizeof(cmd.q_state.out->state));
if (!NT_SUCCESS(status))
......@@ -1113,11 +1133,11 @@ void qdma_evt_ioctl(
case IOCTL_QDMA_REG_DUMP :
{
status = retrive_ioctl(request, nullptr, 0,
(PVOID *)&cmd.reg_info.out, output_buffer_length);
(PVOID *)&cmd.reg_dump_info.out, output_buffer_length);
if (!NT_SUCCESS(status))
goto Exit;
if (cmd.reg_info.out == nullptr) {
if (cmd.reg_dump_info.out == nullptr) {
TraceError(TRACE_IO, "NULL Buffer for CMD_REG_DUMP");
status = STATUS_INVALID_PARAMETER;
goto Exit;
......@@ -1128,7 +1148,7 @@ void qdma_evt_ioctl(
regdump_info.buffer_len = output_buffer_length - sizeof(struct regdump_info_out);
regdump_info.ret_len = 0;
regdump_info.pbuffer = &cmd.reg_info.out->pbuffer[0];
regdump_info.pbuffer = &cmd.reg_dump_info.out->pbuffer[0];
TraceVerbose(TRACE_IO, "IOCTL_QDMA_REG_DUMP");
status = qdma_dev->qdma_regdump(&regdump_info);
......@@ -1137,7 +1157,7 @@ void qdma_evt_ioctl(
goto Exit;
}
cmd.reg_info.out->ret_len = regdump_info.ret_len;
cmd.reg_dump_info.out->ret_len = regdump_info.ret_len;
WdfRequestCompleteWithInformation(request, status,
sizeof(regdump_info_out) + regdump_info.ret_len);
......@@ -1188,6 +1208,39 @@ void qdma_evt_ioctl(
WdfRequestCompleteWithInformation(request, status, sizeof(struct qstat_out));
break;
}
case IOCTL_QDMA_REG_INFO:
{
status = retrive_ioctl(request, &cmd.reg_info.in, sizeof(cmd.reg_info.in),
(PVOID*)&cmd.reg_info.out, output_buffer_length);
if (!NT_SUCCESS(status))
goto Exit;
if (cmd.reg_info.out == nullptr) {
TraceError(TRACE_IO, "nullptr Buffer for IOCTL_QDMA_REG_INFO");
status = STATUS_INVALID_PARAMETER;
goto Exit;
}
struct qdma_reg_info reg_info = { 0 };
reg_info.bar_no = cmd.reg_info.in.bar_no;
reg_info.address = cmd.reg_info.in.address;
reg_info.reg_cnt = cmd.reg_info.in.reg_cnt;
reg_info.buf_len = output_buffer_length - sizeof(struct reg_info_out);;
reg_info.ret_len = 0;
reg_info.pbuffer = cmd.reg_info.out->pbuffer;
status = qdma_dev->qdma_get_reg_info(&reg_info);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_IO, "qdma_dev->qdma_get_reg_info() failed : Err : %X", status);
goto Exit;
}
cmd.reg_info.out->ret_len = reg_info.ret_len;
WdfRequestCompleteWithInformation(request, status,
sizeof(reg_info_out) + reg_info.ret_len);
break;
}
default:
TraceInfo(TRACE_IO, "UNKNOWN IOCTL CALLED");
WdfRequestComplete(request, STATUS_UNSUCCESSFUL);
......@@ -1230,24 +1283,24 @@ void qdma_evt_io_read(
switch (file_ctx->target) {
case file_target::USER:
TraceInfo(TRACE_IO, "user BAR reading %llu bytes", length);
TraceVerbose(TRACE_IO, "AXI Master Lite BAR reading %llu bytes", length);
io_read_bar(dev_ctx->qdma, qdma_bar_type::USER_BAR, request, length);
break;
case file_target::CONTROL:
TraceInfo(TRACE_IO, "control BAR reading %llu bytes", length);
TraceVerbose(TRACE_IO, "control BAR reading %llu bytes", length);
io_read_bar(dev_ctx->qdma, qdma_bar_type::CONFIG_BAR, request, length);
break;
case file_target::BYPASS:
TraceInfo(TRACE_IO, "bypass BAR reading %llu bytes", length);
TraceVerbose(TRACE_IO, "AXI Bridge Master BAR reading %llu bytes", length);
io_read_bar(dev_ctx->qdma, qdma_bar_type::BYPASS_BAR, request, length);
break;
case file_target::DMA_QUEUE:
TraceInfo(TRACE_IO, "queue_%u reading %llu bytes", file_ctx->qid, length);
TraceVerbose(TRACE_IO, "queue_%u reading %llu bytes", file_ctx->qid, length);
io_mm_dma(dev_ctx->qdma, file_ctx->qid, request, length,
WdfDmaDirectionReadFromDevice);
break;
case file_target::ST_QUEUE:
TraceInfo(TRACE_IO, "queue_%u reading %llu bytes", file_ctx->qid, length);
TraceVerbose(TRACE_IO, "queue_%u reading %llu bytes", file_ctx->qid, length);
io_st_read_dma(dev_ctx->qdma, file_ctx->qid, request, length);
break;
default:
......@@ -1282,23 +1335,23 @@ void qdma_evt_io_write(
switch (file_ctx->target) {
case file_target::USER:
TraceInfo(TRACE_IO, "user BAR writing %llu bytes", length);
TraceVerbose(TRACE_IO, "AXI Master Lite BAR writing %llu bytes", length);
io_write_bar(dev_ctx->qdma, qdma_bar_type::USER_BAR, request, length);
break;
case file_target::CONTROL:
TraceInfo(TRACE_IO, "control BAR writing %llu bytes", length);
TraceVerbose(TRACE_IO, "control BAR writing %llu bytes", length);
io_write_bar(dev_ctx->qdma, qdma_bar_type::CONFIG_BAR, request, length);
break;
case file_target::BYPASS:
TraceInfo(TRACE_IO, "bypass BAR writing %llu bytes", length);
TraceVerbose(TRACE_IO, "AXI Bridge Master BAR writing %llu bytes", length);
io_write_bar(dev_ctx->qdma, qdma_bar_type::BYPASS_BAR, request, length);
break;
case file_target::DMA_QUEUE:
TraceInfo(TRACE_IO, "queue_%u writing %llu bytes", file_ctx->qid, length);
TraceVerbose(TRACE_IO, "queue_%u writing %llu bytes", file_ctx->qid, length);
io_mm_dma(dev_ctx->qdma, file_ctx->qid, request, length, WdfDmaDirectionWriteToDevice);
break;
case file_target::ST_QUEUE:
TraceInfo(TRACE_IO, "queue_%u writing %llu bytes", file_ctx->qid, length);
TraceVerbose(TRACE_IO, "queue_%u writing %llu bytes", file_ctx->qid, length);
if (length == 0) {
io_st_zero_write_dma(dev_ctx->qdma, file_ctx->qid, request, length, WdfDmaDirectionWriteToDevice);
}
......
......@@ -23,6 +23,8 @@ EXTERN_C_START
using namespace xlnx;
static constexpr ULONG IO_QUEUE_TAG = 'UQOI';
NTSTATUS qdma_io_queue_initialize(WDFDEVICE wdf_device);
struct DMA_TXN_CONTEXT {
......@@ -32,6 +34,12 @@ struct DMA_TXN_CONTEXT {
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(DMA_TXN_CONTEXT, get_dma_txn_context)
struct ST_DMA_ZERO_TX_PRIV {
WDFREQUEST request;
PVOID sg_list;
};
EVT_WDF_IO_QUEUE_IO_DEVICE_CONTROL qdma_evt_ioctl;
EVT_WDF_IO_QUEUE_IO_STOP qdma_evt_io_stop;
EVT_WDF_IO_QUEUE_IO_READ qdma_evt_io_read;
......
......@@ -44,6 +44,20 @@ enum qdma_q_type {
QDMA_Q_TYPE_MAX
};
/** queue_state - State of the QDMA queue */
enum queue_state {
/** Queue is available to configure */
QUEUE_AVAILABLE,
/** Queue is added with resources */
QUEUE_ADDED,
/** Queue is programmed and started */
QUEUE_STARTED,
/** Queue critical operation is in progress */
QUEUE_BUSY,
/** Invalid Queue State */
QUEUE_INVALID_STATE
};
/** Streaming card to host packet type */
enum class st_c2h_pkt_type {
/** C2H DATA Packet MACRO */
......@@ -116,11 +130,11 @@ enum class qdma_bar_type {
* (Contains all QDMA configuration Registers)
*/
CONFIG_BAR,
/** QDMA User BAR
/** QDMA AXI Master Lite BAR
* (Contains User Logic Registers)
*/
USER_BAR,
/** QDMA Bypass BAR
/** QDMA AXI Bridge Master BAR
* (Contains Bypass Registers to bypass QDMA)
*/
BYPASS_BAR
......@@ -178,6 +192,68 @@ using st_rx_completion_cb = void(*)(const st_c2h_pkt_fragment *rx_frags, size_t
*/
using proc_st_udd_only_cb = void(*)(UINT16 qid, void *udd_addr, void *priv);
/**
* fp_user_isr_handler() - User defined user ISR handler
*
* @param[in] event_id: Event identifier
* @param[in] user_data: Driver provided user data
*
* @return void
*/
using fp_user_isr_handler = void(*)(ULONG event_id, void *user_data);
/**
* fp_user_interrupt_enable_handler() - User defined user ISR handler
*
* @param[in] event_id: Event identifier
* @param[in] user_data: Driver provided user data
*
* @return void
*/
using fp_user_interrupt_enable_handler = void(*)(ULONG event_id, void *user_data);
/**
* fp_user_interrupt_disable_handler() - User defined user ISR handler
*
* @param[in] event_id: Event identifier
* @param[in] user_data: Driver provided user data
*
* @return void
*/
using fp_user_interrupt_disable_handler = void(*)(ULONG event_id, void *user_data);
/** dev_config - qdma device configuration
* needed to initialize the device
*/
struct qdma_drv_config {
/* Queue operation mode */
queue_op_mode operation_mode;
/* Config BAR index */
UINT8 cfg_bar;
/* Maximum queues for the device */
UINT32 qsets_max;
/* Maximum user MSIx vector to use */
UINT16 user_msix_max;
/* Maximum data MSIx vector to use */
UINT16 data_msix_max;
/* User data for user interrupt callback functions */
void *user_data;
/* User ISR handler */
fp_user_isr_handler user_isr_handler;
/* User interrupt enable handler */
fp_user_interrupt_enable_handler user_interrupt_enable_handler;
/* User interrupt disable handler */
fp_user_interrupt_disable_handler user_interrupt_disable_handler;
};
/** queue_config - qdma queue configuration
* needed to add a queue
*/
......@@ -248,6 +324,12 @@ struct qdma_device_attributes_info {
bool mm_cmpl_en;
/** Mailbox Feature enabled */
bool mailbox_en;
/** Debug mode is enabled/disabled for IP */
bool debug_mode;
/** Descriptor Engine mode:
* Internal only/Bypass only/Internal & Bypass
*/
UINT8 desc_eng_mode;
/** Number of MM channels supported */
UINT16 num_mm_channels;
};
......@@ -429,6 +511,24 @@ struct qdma_qstat_info {
UINT32 active_cmpt_queues;
};
/** qdma_reg_info - Structure contains required members to
* retrieve requested qdma registers information
*/
struct qdma_reg_info {
/** PCIe bar number */
UINT32 bar_no;
/** Register address offset */
UINT32 address;
/** number of registers to retrieve */
UINT32 reg_cnt;
/** Length of the buffer pointed by pbuffer */
size_t buf_len;
/** Length of the data present in pbuffer */
size_t ret_len;
/** output buffer to copy the register info */
char *pbuffer;
};
/**
* qdma_interface - libqdma interface class
*
......@@ -445,14 +545,11 @@ public:
* init() - Initializes the qdma device with operation mode and
* config bar number to use
*
* @param[in] operation_mode: queue oper mode (poll, intr, aggr)
* @param[in] cfg_bar: config bar number for qdma device
* @param[in] qsets_max: Maximum number of queues requested for this
* device
* @param[in] conf: Device operation configuration
*
* @return STATUS_SUCCESS for success else error
*****************************************************************************/
virtual NTSTATUS init(queue_op_mode operation_mode, UINT8 cfg_bar, UINT16 qsets_max) = 0;
virtual NTSTATUS init(qdma_drv_config conf) = 0;
/*****************************************************************************/
/**
......@@ -517,17 +614,31 @@ public:
*****************************************************************************/
virtual NTSTATUS write_bar(qdma_bar_type bar_type, size_t offset, void* data, size_t size) = 0;
/*****************************************************************************/
/**
* write_bar() - Performs PCIe write operation on specified BAR number at
* requested offset of requested size
*
* @param[in] bar_type: BAR Type
* @param[out] bar_base: BAR base mapped address
* @param[out] bar_lenght: Bar length(in bytes)
*
* @return STATUS_SUCCESS for success else error
*****************************************************************************/
virtual NTSTATUS get_bar_info(qdma_bar_type bar_type, PVOID &bar_base, size_t &bar_length) = 0;
/*****************************************************************************/
/**
* qdma_get_queues_state() - Retrieves the state of the specified queue
*
* @param[in] qid: queue id relative to this QDMA device
* @param[out] qstate: state of the queue specified as enumeration
* @param[out] state: state of the queue specified as character string
* @param[in] sz: size of the state character array
*
* @return STATUS_SUCCESS for success else error
*****************************************************************************/
virtual NTSTATUS qdma_get_queues_state(UINT16 qid, char *state, size_t sz) = 0;
virtual NTSTATUS qdma_get_queues_state(UINT16 qid, enum queue_state *qstate, char *state, size_t sz) = 0;
/*****************************************************************************/
/**
......@@ -595,12 +706,10 @@ public:
* @param[in] compl_cb: completion call back function
* @param[in] priv: private data that gets passed to
* compl_cb function
* @param[out] xfered_len: If status is NT_SUCCESS, then number
* of bytes for a request successfully enqueued
*
* @return STATUS_SUCCESS for success else error
*****************************************************************************/
virtual NTSTATUS qdma_enqueue_mm_request(UINT16 qid, WDF_DMA_DIRECTION direction, PSCATTER_GATHER_LIST sg_list, LONGLONG device_offset, dma_completion_cb compl_cb, VOID *priv, size_t &xfered_len) = 0;
virtual NTSTATUS qdma_enqueue_mm_request(UINT16 qid, WDF_DMA_DIRECTION direction, PSCATTER_GATHER_LIST sg_list, LONGLONG device_offset, dma_completion_cb compl_cb, VOID *priv) = 0;
/*****************************************************************************/
/**
......@@ -612,12 +721,10 @@ public:
* to indicate write operation is completed
* @param[in,out] priv: private data that gets passed to
* compl_cb function
* @param[out] xfered_len: If status is NT_SUCCESS, then number
* of bytes for a request successfully enqueued.
*
* @return STATUS_SUCCESS for success else error
*****************************************************************************/
virtual NTSTATUS qdma_enqueue_st_tx_request(UINT16 qid, PSCATTER_GATHER_LIST sg_list, dma_completion_cb compl_cb, VOID *priv, size_t &xfered_len) = 0;
virtual NTSTATUS qdma_enqueue_st_tx_request(UINT16 qid, PSCATTER_GATHER_LIST sg_list, dma_completion_cb compl_cb, VOID *priv) = 0;
/*****************************************************************************/
/**
......@@ -768,6 +875,16 @@ public:
*****************************************************************************/
virtual NTSTATUS qdma_get_qstats_info(qdma_qstat_info &qstats) = 0;
/*****************************************************************************/
/**
* qdma_get_reg_info() - Retrieves the requested QDMA registers information
*
* @param[out] reg_info: Register information (Address, Len, etc.,)
*
* @return STATUS_SUCCESS for success else error
*****************************************************************************/
virtual NTSTATUS qdma_get_reg_info(qdma_reg_info* reg_info) = 0;
/*****************************************************************************/
/**
* create_qdma_device() - Allocates an instance for qdma device
......
......@@ -21,7 +21,7 @@
#undef VER_PRODUCTMAJORVERSION
#define VER_PRODUCTMAJORVERSION (2020)
#undef VER_PRODUCTMINORVERSION
#define VER_PRODUCTMINORVERSION (1)
#define VER_PRODUCTMINORVERSION (2)
#undef VER_PRODUCTREVISION
#define VER_PRODUCTREVISION (0)
#undef VER_PRODUCTBUILD
......@@ -56,11 +56,11 @@
// Version number (in format needed for version resources)
#undef VER_PRODUCTVERSION
#define VER_PRODUCTVERSION 2020,1,0,0
#define VER_PRODUCTVERSION 2020,2,0,0
// Version number string
#undef VER_PRODUCTVERSION_STR
#define VER_PRODUCTVERSION_STR "2020.1.0.0"
#define VER_PRODUCTVERSION_STR "2020.2.0.0"
/** File Details ************************************************************/
......
......@@ -212,10 +212,12 @@
<ClCompile Include="source\interrupts.cpp" />
<ClCompile Include="source\qdma.cpp" />
<ClCompile Include="source\qdma_access\eqdma_soft_access\eqdma_soft_access.c" />
<ClCompile Include="source\qdma_access\eqdma_soft_access\eqdma_soft_reg_dump.c" />
<ClCompile Include="source\qdma_access\qdma_access_common.c" />
<ClCompile Include="source\qdma_access\qdma_list.c" />
<ClCompile Include="source\qdma_access\qdma_resource_mgmt.c" />
<ClCompile Include="source\qdma_access\qdma_s80_hard_access\qdma_s80_hard_access.c" />
<ClCompile Include="source\qdma_access\qdma_s80_hard_access\qdma_s80_hard_reg_dump.c" />
<ClCompile Include="source\qdma_access\qdma_soft_access\qdma_soft_access.c" />
<ClCompile Include="source\qdma_platform.cpp" />
<ClCompile Include="source\thread.cpp" />
......
......@@ -119,5 +119,11 @@
<ClCompile Include="source\xpcie.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="source\qdma_access\eqdma_soft_access\eqdma_soft_reg_dump.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="source\qdma_access\qdma_s80_hard_access\qdma_s80_hard_reg_dump.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>
\ No newline at end of file
......@@ -35,22 +35,23 @@ NTSTATUS intr_queue::create(
buffer_size = ((buffer_size / PAGE_SIZE) + 1) * PAGE_SIZE;
}
TraceVerbose(TRACE_INTR, "Buffer size : %llu, Ring size : %llu",
buffer_size, size);
TraceVerbose(TRACE_INTR, "%s: Intr Queue : %d, Buffer size : %llu, Ring size : %llu",
qdma->dev_conf.name, idx_abs, buffer_size, size);
capacity = buffer_size / sizeof(intr_entry);
npages = buffer_size / PAGE_SIZE;
color = 1;
TraceVerbose(TRACE_INTR, "Page size : %llu, Capacity : %llu",
npages, capacity);
TraceVerbose(TRACE_INTR, "Intr Queue : %d, Page size : %llu, Capacity : %llu",
idx_abs, npages, capacity);
auto status = WdfCommonBufferCreate(dma_enabler,
buffer_size,
WDF_NO_OBJECT_ATTRIBUTES,
&buffer);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_INTR, "Interrupt WdfCommonBufferCreate failed: %!STATUS!", status);
TraceError(TRACE_INTR, "%s: Interrupt WdfCommonBufferCreate failed for "
"intr queue : %d, : %!STATUS!", qdma->dev_conf.name, idx_abs, status);
return status;
}
......@@ -85,7 +86,7 @@ PFORCEINLINE void intr_queue::update_csr_cidx(
queue_pair *q,
UINT32 new_cidx)
{
TraceVerbose(TRACE_INTR, "Intr queue_%u updating c2h pidx to %u", idx, new_cidx);
TraceVerbose(TRACE_INTR, "%s: Intr queue_%u updating c2h pidx to %u", qdma->dev_conf.name, idx, new_cidx);
intr_cidx_info.rng_idx = (UINT8)idx_abs;
intr_cidx_info.sw_cidx = (UINT16)new_cidx;
......@@ -103,8 +104,8 @@ NTSTATUS intr_queue::intring_dump(qdma_intr_ring_info *intring_info)
if ((intring_info->start_idx >= size) ||
(intring_info->end_idx >= size)) {
TraceError(TRACE_INTR, "Intr Ring index Range is incorrect : start : %d, end : %d, RING SIZE : %d",
intring_info->start_idx, intring_info->end_idx, size);
TraceError(TRACE_INTR, "%s: Intr Ring index Range is incorrect : start : %d, end : %d, RING SIZE : %d",
qdma->dev_conf.name, intring_info->start_idx, intring_info->end_idx, size);
return STATUS_ACCESS_VIOLATION;
}
......@@ -183,9 +184,9 @@ VOID EvtErrorInterruptDpc(
UNREFERENCED_PARAMETER(device);
UNREFERENCED_PARAMETER(interrupt);
TraceVerbose(TRACE_INTR, "Error IRQ Fired on Master PF");
TraceError(TRACE_INTR, "Error IRQ Fired on Master PF");
auto irq_ctx = get_irq_context(interrupt);
auto irq_ctx = get_qdma_irq_context(interrupt);
if (nullptr == irq_ctx) {
TraceError(TRACE_INTR, "Err: null irq_ctx in EvtErrorInterruptDpc");
return;
......@@ -198,6 +199,36 @@ VOID EvtErrorInterruptDpc(
return;
}
NTSTATUS EvtUserInterruptEnable(
WDFINTERRUPT interrupt,
WDFDEVICE device)
{
UNREFERENCED_PARAMETER(device);
auto irq_ctx = get_qdma_irq_context(interrupt);
if (irq_ctx->qdma_dev->drv_conf.user_interrupt_enable_handler) {
irq_ctx->qdma_dev->drv_conf.user_interrupt_enable_handler(irq_ctx->vector_id,
irq_ctx->qdma_dev->drv_conf.user_data);
}
TraceVerbose(TRACE_INTR, "%s: --> %s", irq_ctx->qdma_dev->dev_conf.name, __func__);
return STATUS_SUCCESS;
}
NTSTATUS EvtUserInterruptDisable(
WDFINTERRUPT interrupt,
WDFDEVICE device)
{
UNREFERENCED_PARAMETER(device);
auto irq_ctx = get_qdma_irq_context(interrupt);
if (irq_ctx->qdma_dev->drv_conf.user_interrupt_disable_handler) {
irq_ctx->qdma_dev->drv_conf.user_interrupt_disable_handler(irq_ctx->vector_id,
irq_ctx->qdma_dev->drv_conf.user_data);
}
TraceVerbose(TRACE_INTR, "%s: %s <--", irq_ctx->qdma_dev->dev_conf.name, __func__);
return STATUS_SUCCESS;
}
BOOLEAN EvtUserInterruptIsr(
WDFINTERRUPT interrupt,
ULONG MessageID)
......@@ -215,9 +246,14 @@ VOID EvtUserInterruptDpc(
WDFOBJECT device)
{
UNREFERENCED_PARAMETER(device);
UNREFERENCED_PARAMETER(interrupt);
auto irq_ctx = get_qdma_irq_context(interrupt);
TraceVerbose(TRACE_INTR, "User Interrupt DPC SUCCESSFull");
TraceVerbose(TRACE_INTR, "%s: User Interrupt DPC for vector : %u", irq_ctx->qdma_dev->dev_conf.name, irq_ctx->vector_id);
if (irq_ctx->qdma_dev->drv_conf.user_isr_handler) {
irq_ctx->qdma_dev->drv_conf.user_isr_handler(irq_ctx->vector_id,
irq_ctx->qdma_dev->drv_conf.user_data);
}
return;
}
......@@ -227,7 +263,7 @@ BOOLEAN EvtDataInterruptIsr(
ULONG MessageID)
{
UNREFERENCED_PARAMETER(MessageID);
auto irq_ctx = get_irq_context(interrupt);
auto irq_ctx = get_qdma_irq_context(interrupt);
auto qdma_dev = irq_ctx->qdma_dev;
if ((irq_ctx->intr_type == interrupt_type::LEGACY) &&
......@@ -273,7 +309,7 @@ BOOLEAN schedule_dpc(queue_pair* q, UINT8 is_c2h, CCHAR active_processors)
}
void cpm_handle_indirect_interrupt(
PIRQ_CONTEXT irq_ctx)
PQDMA_IRQ_CONTEXT irq_ctx)
{
queue_pair *q = nullptr;
UINT8 is_c2h = 0;
......@@ -281,20 +317,26 @@ void cpm_handle_indirect_interrupt(
auto intr_queue = irq_ctx->intr_q;
if (nullptr == intr_queue) {
TraceError(TRACE_INTR, "Invalid vector %lu was called in coal mode", irq_ctx->vector_id);
TraceError(TRACE_INTR, "%s: Invalid vector %lu was called in coal mode",
irq_ctx->qdma_dev->dev_conf.name, irq_ctx->vector_id);
return;
}
const auto ring_va = static_cast<cpm_intr_entry *>(intr_queue->buffer_va);
TraceVerbose(TRACE_INTR, "CPM Coal queue SW Index : %u", intr_queue->sw_index);
TraceVerbose(TRACE_INTR, "CPM Intr PIDX : %u, Intr CIDX : %u", ring_va[intr_queue->sw_index].desc_pidx, ring_va[intr_queue->sw_index].desc_cidx);
TraceVerbose(TRACE_INTR, "%s: CPM Coal queue SW Index : %u",
irq_ctx->qdma_dev->dev_conf.name, intr_queue->sw_index);
TraceVerbose(TRACE_INTR, "%s: CPM Intr PIDX : %u, Intr CIDX : %u",
irq_ctx->qdma_dev->dev_conf.name, ring_va[intr_queue->sw_index].desc_pidx,
ring_va[intr_queue->sw_index].desc_cidx);
while (ring_va[intr_queue->sw_index].color == intr_queue->color) {
q = irq_ctx->qdma_dev->qdma_get_queue_pair_by_hwid(ring_va[intr_queue->sw_index].qid);
if (nullptr == q) {
TraceError(TRACE_INTR, "Queue not found hw qid : %u Intr qid : %u",
ring_va[intr_queue->sw_index].qid, intr_queue->idx);
TraceError(TRACE_INTR, "%s: Queue not found hw qid : %u Intr qid : %u",
irq_ctx->qdma_dev->dev_conf.name,
ring_va[intr_queue->sw_index].qid,
intr_queue->idx);
intr_queue->advance_sw_index();
continue;
}
......@@ -305,7 +347,9 @@ void cpm_handle_indirect_interrupt(
intr_queue->advance_sw_index();
TraceVerbose(TRACE_INTR, "CPM QUEUE ID : %u, is_c2h : %d", ring_va[intr_queue->sw_index].qid, is_c2h);
TraceVerbose(TRACE_INTR, "%s: CPM QUEUE ID : %u, is_c2h : %d",
irq_ctx->qdma_dev->dev_conf.name,
ring_va[intr_queue->sw_index].qid, is_c2h);
}
if (q) {
......@@ -314,7 +358,7 @@ void cpm_handle_indirect_interrupt(
}
void handle_indirect_interrupt(
PIRQ_CONTEXT irq_ctx)
PQDMA_IRQ_CONTEXT irq_ctx)
{
queue_pair *q = nullptr;
UINT8 is_c2h = 0;
......@@ -322,19 +366,25 @@ void handle_indirect_interrupt(
auto intr_queue = irq_ctx->intr_q;
if (nullptr == intr_queue) {
TraceError(TRACE_INTR, "Invalid vector %lu was called in coal mode", irq_ctx->vector_id);
TraceError(TRACE_INTR, "%s: Invalid vector %lu was called in coal mode",
irq_ctx->qdma_dev->dev_conf.name, irq_ctx->vector_id);
return;
}
const auto ring_va = static_cast<intr_entry *>(intr_queue->buffer_va);
TraceVerbose(TRACE_INTR, "Coal queue SW Index : %u", intr_queue->sw_index);
TraceVerbose(TRACE_INTR, "Intr PIDX : %u, Intr CIDX : %u", ring_va[intr_queue->sw_index].desc_pidx, ring_va[intr_queue->sw_index].desc_cidx);
TraceVerbose(TRACE_INTR, "%s: Coal queue SW Index : %u",
irq_ctx->qdma_dev->dev_conf.name, intr_queue->sw_index);
TraceVerbose(TRACE_INTR, "%s: Intr PIDX : %u, Intr CIDX : %u",
irq_ctx->qdma_dev->dev_conf.name,
ring_va[intr_queue->sw_index].desc_pidx,
ring_va[intr_queue->sw_index].desc_cidx);
while (ring_va[intr_queue->sw_index].color == intr_queue->color) {
q = irq_ctx->qdma_dev->qdma_get_queue_pair_by_hwid(ring_va[intr_queue->sw_index].qid);
if (nullptr == q) {
TraceError(TRACE_INTR, "Queue not found hw qid : %u Intr qid : %u",
TraceError(TRACE_INTR, "%s: Queue not found hw qid : %u Intr qid : %u",
irq_ctx->qdma_dev->dev_conf.name,
ring_va[intr_queue->sw_index].qid, intr_queue->idx);
intr_queue->advance_sw_index();
continue;
......@@ -346,7 +396,8 @@ void handle_indirect_interrupt(
intr_queue->advance_sw_index();
TraceVerbose(TRACE_INTR, "QUEUE ID : %u, is_c2h : %d", ring_va[intr_queue->sw_index].qid, is_c2h);
TraceVerbose(TRACE_INTR, "%s: QUEUE ID : %u, is_c2h : %d",
irq_ctx->qdma_dev->dev_conf.name, ring_va[intr_queue->sw_index].qid, is_c2h);
}
if (q) {
......@@ -355,7 +406,7 @@ void handle_indirect_interrupt(
}
void handle_direct_interrupt(
PIRQ_CONTEXT irq_ctx)
PQDMA_IRQ_CONTEXT irq_ctx)
{
CCHAR active_processors = (CCHAR)KeQueryActiveProcessorCount(NULL);
PLIST_ENTRY entry;
......@@ -364,7 +415,8 @@ void handle_direct_interrupt(
LIST_FOR_EACH_ENTRY_SAFE(&irq_ctx->queue_list_head, temp, entry) {
queue_pair *queue = CONTAINING_RECORD(entry, queue_pair, list_entry);
TraceVerbose(TRACE_INTR, "SERVICING QUEUE : %u IN DIRECT INTERRUPT", queue->idx);
TraceVerbose(TRACE_INTR, "%s: SERVICING QUEUE : %u IN DIRECT INTERRUPT",
irq_ctx->qdma_dev->dev_conf.name, queue->idx);
schedule_dpc(queue, 0 /* H2C */, active_processors);
schedule_dpc(queue, 1 /* C2H */, active_processors);
......@@ -379,22 +431,25 @@ int qdma_device::setup_legacy_vector(queue_pair& q)
WdfInterruptAcquireLock(irq_mgr.irq[legacy_vec]);
auto irq_ctx = get_irq_context(irq_mgr.irq[legacy_vec]);
auto irq_ctx = get_qdma_irq_context(irq_mgr.irq[legacy_vec]);
if (false == IS_LIST_EMPTY(&irq_ctx->queue_list_head)) {
TraceError(TRACE_INTR, "Only One queue is supported in legacy interrupt mode");
TraceError(TRACE_INTR, "%s: Only One queue is supported "
"in legacy interrupt mode", dev_conf.name);
status = -(STATUS_UNSUCCESSFUL);
goto ErrExit;
}
if (hw.qdma_legacy_intr_conf == nullptr) {
TraceError(TRACE_INTR, "legacy interrupt mode not supported");
TraceError(TRACE_INTR, "%s: legacy interrupt mode "
"not supported", dev_conf.name);
status = -(STATUS_UNSUCCESSFUL);
goto ErrExit;
}
ret = hw.qdma_legacy_intr_conf(this, DISABLE);
if (ret < 0) {
TraceError(TRACE_INTR, "qdma_disable_legacy_interrupt failed, ret : %d", ret);
TraceError(TRACE_INTR, "%s: qdma_disable_legacy_interrupt "
"failed, ret : %d", dev_conf.name, ret);
status = hw.qdma_get_error_code(ret);
goto ErrExit;
}
......@@ -403,12 +458,14 @@ int qdma_device::setup_legacy_vector(queue_pair& q)
ret = hw.qdma_legacy_intr_conf(this, ENABLE);
if (ret < 0) {
TraceError(TRACE_INTR, "qdma_enable_legacy_interrupt failed, ret : %d", ret);
TraceError(TRACE_INTR, "%s: qdma_enable_legacy_interrupt "
"failed, ret : %d", dev_conf.name, ret);
status = hw.qdma_get_error_code(ret);
goto ErrExit;
}
TraceVerbose(TRACE_INTR, "Vector Allocated [0] for legacy interrupt mode");
TraceVerbose(TRACE_INTR, "%s: Vector Allocated [0] for legacy interrupt mode",
dev_conf.name);
WdfInterruptReleaseLock(irq_mgr.irq[legacy_vec]);
......@@ -430,7 +487,7 @@ UINT32 qdma_device::alloc_msix_vector_position(queue_pair& q)
vector = irq_mgr.data_vector_id_start;
weight = irq_mgr.irq_weight[vector];
for (UINT32 i = irq_mgr.data_vector_id_start + 1; i < irq_mgr.data_vector_id_end; ++i) {
for (UINT32 i = irq_mgr.data_vector_id_start + 1; i <= irq_mgr.data_vector_id_end; ++i) {
if (irq_mgr.irq_weight[i] < weight) {
weight = irq_mgr.irq_weight[i];
vector = i;
......@@ -441,22 +498,22 @@ UINT32 qdma_device::alloc_msix_vector_position(queue_pair& q)
WdfSpinLockRelease(irq_mgr.lock);
if (op_mode == queue_op_mode::INTR_MODE) {
if (drv_conf.operation_mode == queue_op_mode::INTR_MODE) {
WdfInterruptAcquireLock(irq_mgr.irq[vector]);
auto irq_ctx = get_irq_context(irq_mgr.irq[vector]);
auto irq_ctx = get_qdma_irq_context(irq_mgr.irq[vector]);
LIST_ADD_TAIL(&irq_ctx->queue_list_head, &queue_pairs[q.idx].list_entry);
WdfInterruptReleaseLock(irq_mgr.irq[vector]);
}
else if (op_mode == queue_op_mode::INTR_COAL_MODE) {
else if (drv_conf.operation_mode == queue_op_mode::INTR_COAL_MODE) {
/* For indirect interrupt, return absolute interrupt queue index */
auto irq_ctx = get_irq_context(irq_mgr.irq[vector]);
auto irq_ctx = get_qdma_irq_context(irq_mgr.irq[vector]);
vector = irq_ctx->intr_q->idx_abs;
}
TraceVerbose(TRACE_INTR, "Vector Allocated [%u]. Weight : %u",
vector, irq_mgr.irq_weight[vector]);
TraceVerbose(TRACE_INTR, "%s: Vector Allocated [%u]. Weight : %u",
dev_conf.name, vector, irq_mgr.irq_weight[vector]);
return vector;
}
......@@ -467,11 +524,11 @@ void qdma_device::free_msix_vector_position(
UINT32 vector)
{
auto RELATIVE_INTR_QID = [](auto q) { return q % (UINT32)qdma_max_msix_vectors_per_pf; };
if (op_mode == queue_op_mode::INTR_COAL_MODE)
if (drv_conf.operation_mode == queue_op_mode::INTR_COAL_MODE)
vector = RELATIVE_INTR_QID(vector);
else if (op_mode == queue_op_mode::INTR_MODE) {
else if (drv_conf.operation_mode == queue_op_mode::INTR_MODE) {
WdfInterruptAcquireLock(irq_mgr.irq[vector]);
auto irq_ctx = get_irq_context(irq_mgr.irq[vector]);
auto irq_ctx = get_qdma_irq_context(irq_mgr.irq[vector]);
PLIST_ENTRY entry;
LIST_FOR_EACH_ENTRY(&irq_ctx->queue_list_head, entry) {
queue_pair *queue = CONTAINING_RECORD(entry, queue_pair, list_entry);
......@@ -487,7 +544,8 @@ void qdma_device::free_msix_vector_position(
--irq_mgr.irq_weight[vector];
TraceVerbose(TRACE_INTR, "Vector Released. New weight : %u", irq_mgr.irq_weight[vector]);
TraceVerbose(TRACE_INTR, "%s: Vector Released. New weight : %u",
dev_conf.name, irq_mgr.irq_weight[vector]);
WdfSpinLockRelease(irq_mgr.lock);
}
......@@ -520,7 +578,7 @@ void qdma_device::clear_legacy_vector(
WdfInterruptAcquireLock(irq_mgr.irq[vector]);
auto irq_ctx = get_irq_context(irq_mgr.irq[vector]);
auto irq_ctx = get_qdma_irq_context(irq_mgr.irq[vector]);
auto queue_item = irq_ctx->queue_list_head;
if (hw.qdma_legacy_intr_conf != nullptr) {
......@@ -532,15 +590,17 @@ void qdma_device::clear_legacy_vector(
}
NTSTATUS qdma_device::configure_irq(
PIRQ_CONTEXT irq_context,
PQDMA_IRQ_CONTEXT irq_context,
ULONG vec)
{
irq_context->vector_id = vec;
irq_context->qdma_dev = this;
if ((vec >= irq_mgr.data_vector_id_start) && (vec <= irq_mgr.data_vector_id_end)) {
/* Data interrupts */
irq_mgr.irq_weight[vec] = 0u;
if (op_mode == queue_op_mode::INTR_COAL_MODE) { /* Indirect interrupt */
if (drv_conf.operation_mode == queue_op_mode::INTR_COAL_MODE) { /* Indirect interrupt */
irq_context->intr_q = &irq_mgr.intr_q[vec];
irq_mgr.intr_q[vec].vector = vec;
irq_context->is_coal = true;
......@@ -558,6 +618,7 @@ NTSTATUS qdma_device::configure_irq(
irq_context->interrupt_handler = &handle_direct_interrupt;
}
irq_context->intr_type = irq_mgr.intr_type;
}
return STATUS_SUCCESS;
}
......@@ -565,31 +626,53 @@ NTSTATUS qdma_device::configure_irq(
NTSTATUS qdma_device::arrange_msix_vector_types(void)
{
ULONG vector = 0;
ULONG req_vec;
ULONG num_msix_vectors = pcie.get_num_msix_vectors();
if (num_msix_vectors == 0ul) {
TraceError(TRACE_INTR, "%s: Not enough MSIx vectors : [%u]",
dev_conf.name, num_msix_vectors);
return STATUS_UNSUCCESSFUL;
}
/** Reserve one vector for Error Interrupt which reports out
* the QDMA internal HW errors to the user
*
* Master PF will own this option and hence other PFs dont need
* to reserve vector for error interrupt
*/
if (dev_conf.is_master_pf) { /* Master PF */
TraceInfo(TRACE_INTR, "Setting Error Interrupt by Master PF\n");
TraceInfo(TRACE_INTR, "%s: Setting Error Interrupt by Master PF",
dev_conf.name);
irq_mgr.err_vector_id = vector;
++vector;
/** Error interrupt consumes 1 vector from data interrupt vectors */
drv_conf.data_msix_max = drv_conf.data_msix_max - 1;
}
irq_mgr.user_vector_id = vector;
++vector;
req_vec = vector + drv_conf.data_msix_max + drv_conf.user_msix_max;
if (pcie.get_num_msix_vectors() <= vector) {
if (num_msix_vectors < req_vec) {
TraceError(TRACE_INTR, "%s: Not enough MSIx vectors : [%u]. Requested : [%u]\n",
dev_conf.name, num_msix_vectors, req_vec);
return STATUS_UNSUCCESSFUL;
}
irq_mgr.user_vector_id_start = vector;
irq_mgr.user_vector_id_end = vector + drv_conf.user_msix_max - 1 ;
vector += drv_conf.user_msix_max;
irq_mgr.data_vector_id_start = vector;
if (op_mode == queue_op_mode::INTR_COAL_MODE)
irq_mgr.data_vector_id_end = irq_mgr.data_vector_id_start + IND_INTR_MAX_DATA_VECTORS;
if (drv_conf.operation_mode == queue_op_mode::INTR_COAL_MODE)
irq_mgr.data_vector_id_end = irq_mgr.data_vector_id_start + IND_INTR_MAX_DATA_VECTORS - 1;
else
irq_mgr.data_vector_id_end = pcie.get_num_msix_vectors();
irq_mgr.data_vector_id_end = vector + drv_conf.data_msix_max - 1;
TraceVerbose(TRACE_INTR, "Function: %0X, Err vec : %lu, User vec : %lu Data vec : %u, Tot : %lu",
dev_conf.dev_sbdf.sbdf.fun_no, irq_mgr.err_vector_id,
irq_mgr.user_vector_id, irq_mgr.data_vector_id_start,
irq_mgr.data_vector_id_end);
TraceVerbose(TRACE_INTR, "%s: Function: %0X, Err vec : %lu, User vec : [%u : %u] Data vec : [%u : %u]",
dev_conf.name, dev_conf.dev_sbdf.sbdf.fun_no, irq_mgr.err_vector_id,
irq_mgr.user_vector_id_start, irq_mgr.user_vector_id_end,
irq_mgr.data_vector_id_start, irq_mgr.data_vector_id_end);
return STATUS_SUCCESS;
}
......@@ -605,11 +688,12 @@ NTSTATUS qdma_device::setup_msix_interrupt(
PCM_PARTIAL_RESOURCE_DESCRIPTOR resource_translated;
ULONG numResources = WdfCmResourceListGetCount(resources_translated);
TraceInfo(TRACE_INTR, "Total number of resource : %lu", numResources);
TraceVerbose(TRACE_INTR, "%s: Total number of resource : %lu",
dev_conf.name, numResources);
status = arrange_msix_vector_types();
if (!NT_SUCCESS(status)) {
TraceError(TRACE_INTR, "Failed to arrange MSIx vectors");
TraceError(TRACE_INTR, "%s: Failed to arrange MSIx vectors", dev_conf.name);
return status;
}
......@@ -623,25 +707,38 @@ NTSTATUS qdma_device::setup_msix_interrupt(
WDF_INTERRUPT_CONFIG config;
if (irq_mgr.user_vector_id == vec) {
WDF_INTERRUPT_CONFIG_INIT(&config, EvtUserInterruptIsr, EvtUserInterruptDpc);
}
else if ((irq_mgr.err_vector_id == vec) && (dev_conf.is_master_pf)) {
if ((irq_mgr.err_vector_id == vec) && (dev_conf.is_master_pf)) {
WDF_INTERRUPT_CONFIG_INIT(&config, EvtErrorInterruptIsr, EvtErrorInterruptDpc);
config.EvtInterruptEnable = nullptr;
config.EvtInterruptDisable = nullptr;
TraceVerbose(TRACE_INTR, "%s: [%u] - Error interrupt configuration",
dev_conf.name, vec);
}
else if ((vec >= irq_mgr.user_vector_id_start) && (vec <= irq_mgr.user_vector_id_end)) {
WDF_INTERRUPT_CONFIG_INIT(&config, EvtUserInterruptIsr, EvtUserInterruptDpc);
config.EvtInterruptEnable = EvtUserInterruptEnable;
config.EvtInterruptDisable = EvtUserInterruptDisable;
TraceVerbose(TRACE_INTR, "%s: [%u] - User interrupt configuration",
dev_conf.name, vec);
}
else { /* Data interrupts */
else if ((vec >= irq_mgr.data_vector_id_start) && (vec <= irq_mgr.data_vector_id_end)) { /* Data interrupts */
WDF_INTERRUPT_CONFIG_INIT(&config, EvtDataInterruptIsr, nullptr);
config.EvtInterruptEnable = nullptr;
config.EvtInterruptDisable = nullptr;
TraceVerbose(TRACE_INTR, "%s: [%u] - Data interrupt configuration", dev_conf.name, vec);
}
else {
TraceVerbose(TRACE_INTR, "%s: [%u] - No configuration", dev_conf.name, vec);
continue;
}
config.InterruptRaw = resource;
config.InterruptTranslated = resource_translated;
config.EvtInterruptEnable = nullptr;
config.EvtInterruptDisable = nullptr;
config.AutomaticSerialization = TRUE;
WDF_OBJECT_ATTRIBUTES attribs;
WDF_OBJECT_ATTRIBUTES_INIT(&attribs);
WDF_OBJECT_ATTRIBUTES_INIT_CONTEXT_TYPE(&attribs, IRQ_CONTEXT);
WDF_OBJECT_ATTRIBUTES_INIT_CONTEXT_TYPE(&attribs, QDMA_IRQ_CONTEXT);
status = WdfInterruptCreate(wdf_dev, &config, &attribs, &irq_mgr.irq[vec]);
if (!NT_SUCCESS(status)) {
......@@ -654,37 +751,42 @@ NTSTATUS qdma_device::setup_msix_interrupt(
unmask_msi_entry(vec);
}
PIRQ_CONTEXT irq_context = get_irq_context(irq_mgr.irq[vec]);
auto irq_context = get_qdma_irq_context(irq_mgr.irq[vec]);
status = configure_irq(irq_context, vec);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_INTR, "WdfInterruptCreate failed: %!STATUS!", status);
TraceError(TRACE_INTR, "%s: WdfInterruptCreate failed: %!STATUS!",
dev_conf.name, status);
return status;
}
if ((irq_mgr.err_vector_id == vec) && (dev_conf.is_master_pf)) {
int ret = hw.qdma_hw_error_intr_setup((void *)this, (uint16_t)dev_conf.dev_sbdf.sbdf.fun_no, (uint8_t)irq_mgr.err_vector_id);
int ret = hw.qdma_hw_error_intr_setup((void *)this,
(uint16_t)dev_conf.dev_sbdf.sbdf.fun_no, (uint8_t)irq_mgr.err_vector_id);
if (ret < 0) {
TraceError(TRACE_INTR, "qdma_error_interrupt_setup() failed with error %d", ret);
TraceError(TRACE_INTR, "%s: qdma_error_interrupt_setup() failed with error %d",
dev_conf.name, ret);
return hw.qdma_get_error_code(ret);
}
ret = hw.qdma_hw_error_enable((void *)this, QDMA_ERRS_ALL);
ret = hw.qdma_hw_error_enable((void *)this, hw.qdma_max_errors);
if (ret < 0) {
TraceError(TRACE_INTR, "qdma_error_enable() failed with error %d", ret);
TraceError(TRACE_INTR, "%s: qdma_error_enable() failed with error %d",
dev_conf.name, ret);
return hw.qdma_get_error_code(ret);
}
ret = hw.qdma_hw_error_intr_rearm((void *)this);
if (ret < 0) {
TraceError(TRACE_INTR, "qdma_error_interrupt_rearm() failed with error %d", ret);
TraceError(TRACE_INTR, "%s: qdma_error_interrupt_rearm() failed with error %d",
dev_conf.name, ret);
return hw.qdma_get_error_code(ret);
}
}
++vec;
TraceInfo(TRACE_INTR, "INTERRUPT REGISTERED FOR VECTOR ID: : %d WEIGHT : %d",
irq_context->vector_id, irq_context->weight);
TraceInfo(TRACE_INTR, "%s: INTERRUPT REGISTERED FOR VECTOR ID: : %d WEIGHT : %d",
dev_conf.name, irq_context->vector_id, irq_context->weight);
}
return STATUS_SUCCESS;
......@@ -701,7 +803,8 @@ NTSTATUS qdma_device::setup_legacy_interrupt(
PCM_PARTIAL_RESOURCE_DESCRIPTOR resource_translated;
ULONG numResources = WdfCmResourceListGetCount(resources_translated);
TraceInfo(TRACE_INTR, "Total number of resource : %lu", numResources);
TraceVerbose(TRACE_INTR, "%s: Total number of resource : %lu",
dev_conf.name, numResources);
for (UINT i = 0, vec = 0; i < numResources; i++) {
......@@ -725,25 +828,27 @@ NTSTATUS qdma_device::setup_legacy_interrupt(
WDF_OBJECT_ATTRIBUTES attribs;
WDF_OBJECT_ATTRIBUTES_INIT(&attribs);
WDF_OBJECT_ATTRIBUTES_INIT_CONTEXT_TYPE(&attribs, IRQ_CONTEXT);
WDF_OBJECT_ATTRIBUTES_INIT_CONTEXT_TYPE(&attribs, QDMA_IRQ_CONTEXT);
status = WdfInterruptCreate(wdf_dev, &config, &attribs, &irq_mgr.irq[vec]);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_INTR, "WdfInterruptCreate failed: %!STATUS!", status);
TraceError(TRACE_INTR, "%s: WdfInterruptCreate failed: %!STATUS!",
dev_conf.name, status);
return status;
}
PIRQ_CONTEXT irq_context = get_irq_context(irq_mgr.irq[vec]);
auto irq_context = get_qdma_irq_context(irq_mgr.irq[vec]);
status = configure_irq(irq_context, vec);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_INTR, "WdfInterruptCreate failed: %!STATUS!", status);
TraceError(TRACE_INTR, "%s: WdfInterruptCreate failed: %!STATUS!",
dev_conf.name, status);
return status;
}
++vec;
TraceInfo(TRACE_INTR, "LEGACY INTERRUPT REGISTERED FOR VECTOR ID: : %d WEIGHT : %d",
irq_context->vector_id, irq_context->weight);
TraceInfo(TRACE_INTR, "%s: LEGACY INTERRUPT REGISTERED FOR VECTOR ID: : %d WEIGHT : %d",
dev_conf.name, irq_context->vector_id, irq_context->weight);
/* Only One Vector for Legacy interrupt */
break;
......@@ -768,7 +873,8 @@ NTSTATUS qdma_device::intr_setup(
status = WdfSpinLockCreate(&attr, &irq_mgr.lock);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_INTR, "WdfSpinLockCreate failed: %!STATUS!", status);
TraceError(TRACE_INTR, "%s: WdfSpinLockCreate failed: %!STATUS!",
dev_conf.name, status);
return status;
}
......@@ -805,17 +911,20 @@ NTSTATUS qdma_device::intr_setup(
if (irq_mgr.intr_type == interrupt_type::LEGACY) {
status = setup_legacy_interrupt(resources, resources_translated);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_INTR, " setup_legacy_interrupt() failed: %!STATUS!", status);
TraceError(TRACE_INTR, "%s: setup_legacy_interrupt() failed: %!STATUS!",
dev_conf.name, status);
}
}
else if (irq_mgr.intr_type == interrupt_type::MSIX) {
status = setup_msix_interrupt(resources, resources_translated);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_INTR, " setup_msix_interrupt() failed: %!STATUS!", status);
TraceError(TRACE_INTR, "%s: setup_msix_interrupt() failed: %!STATUS!",
dev_conf.name, status);
}
}
else {
TraceError(TRACE_INTR, "Invalid Interrupt Type : %d (valid are legacy and msix)", (int)irq_mgr.intr_type);
TraceError(TRACE_INTR, "%s: Invalid Interrupt Type : %d "
"(valid are legacy and msix)", dev_conf.name, (int)irq_mgr.intr_type);
return STATUS_UNSUCCESSFUL;
}
......
......@@ -63,12 +63,15 @@ struct intr_queue {
NTSTATUS intring_dump(qdma_intr_ring_info *intring_info);
};
typedef struct IRQ_CONTEXT {
typedef struct QDMA_IRQ_CONTEXT {
bool is_coal;
interrupt_type intr_type = interrupt_type::NONE;
ULONG vector_id;
UINT32 weight;
/* For user interrupt handling */
void *user_data;
/* For Error interrupt handling */
qdma_device *qdma_dev = nullptr;
......@@ -79,21 +82,21 @@ typedef struct IRQ_CONTEXT {
intr_queue *intr_q = nullptr;
/* Interrupt handler function */
void (*interrupt_handler)(IRQ_CONTEXT *);
}*PIRQ_CONTEXT;
void (*interrupt_handler)(QDMA_IRQ_CONTEXT *);
}*PQDMA_IRQ_CONTEXT;
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(IRQ_CONTEXT, get_irq_context);
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(QDMA_IRQ_CONTEXT, get_qdma_irq_context);
struct interrupt_manager {
interrupt_type intr_type = interrupt_type::NONE;
ULONG err_vector_id = 0;
ULONG user_vector_id = 0;
ULONG user_vector_id_start = 0;
ULONG user_vector_id_end = 0;
ULONG data_vector_id_end = 0;
ULONG data_vector_id_start = 0;
WDFSPINLOCK lock = nullptr;
UINT32 irq_weight[qdma_max_msix_vectors_per_pf];
WDFINTERRUPT irq[qdma_max_msix_vectors_per_pf];
intr_queue intr_q[qdma_max_msix_vectors_per_pf];
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -87,6 +87,18 @@ static constexpr unsigned int mm_c2h_completion_weight = 2048;
static constexpr unsigned int st_h2c_completion_weight = 2048;
static constexpr unsigned int st_c2h_completion_weight = 2048;
static constexpr unsigned int max_req_service_cnt = 10;
/**
* Structure to hold the driver name and mode
*/
struct drv_mode_name {
/** Mode of the function */
queue_op_mode mode;
/** Driver Name */
char name[20];
};
class qdma_device;
struct queue_pair;
......@@ -111,6 +123,9 @@ struct queue_pair;
#define LIST_FOR_EACH_ENTRY_SAFE(list_head, n, entry) \
for ((entry) = (list_head)->Flink, (n) = (entry)->Flink; (entry) != (list_head); (entry) = (n), (n) = (entry)->Flink)
#define LIST_GET_ENTRY(entry, type, member) \
CONTAINING_RECORD(entry, type, member)
/** queue_type - QDMA queue type */
enum class queue_type {
/** Memory mapped queue type */
......@@ -121,24 +136,14 @@ enum class queue_type {
NONE
};
/** queue_state - State of the QDMA queue */
enum queue_state {
/** Queue is available to configure */
QUEUE_AVAILABLE,
/** Queue is added with resources */
QUEUE_ADDED,
/** Queue is programmed and started */
QUEUE_STARTED,
/** Queue critical operation is in progress */
QUEUE_BUSY
};
/** device_state - State of the QDMA device */
enum device_state {
/** Device is online */
DEVICE_ONLINE,
/** Device is in Init State */
DEVICE_INIT,
/** Device is offline */
DEVICE_OFFLINE,
/** Device is online */
DEVICE_ONLINE,
};
/**
......@@ -146,6 +151,11 @@ enum device_state {
*/
#define QDMA_DEV_NAME_MAXLEN 32
/**
* QDMA_QUEUE_NAME_MAXLEN - Maximum queue name length
*/
#define QDMA_QUEUE_NAME_MAXLEN 32
/**
* qdma_dev_conf - defines the per-device qdma property.
*/
......@@ -200,7 +210,6 @@ struct ring_buffer {
struct {
UINT32 tot_desc_accepted;
UINT32 tot_desc_processed;
UINT32 tot_desc_dropped;
}stats;
PFORCEINLINE void advance_idx(volatile UINT32& idx);
......@@ -312,6 +321,31 @@ struct st_c2h_pkt_buffer {
PVOID get_va(void);
};
struct dma_request {
/** Linked list entry to form request queue */
LIST_ENTRY list_entry;
/** DMA Mode (ST/MM) */
bool is_st;
/** Direction of DMA */
WDF_DMA_DIRECTION direction;
/** SG list of request */
PSCATTER_GATHER_LIST sg_list;
/** Completion callback handler */
dma_completion_cb compl_cb;
/** Private data to pass during completion callback */
VOID *priv;
/** The device address to/from DMA
* (Only Valid for MM transfers) */
LONGLONG device_offset;
/** Holds the next index to resume
* request transfer for split request */
UINT32 sg_index;
/** Holds the next device offset to resume
* request transfer for split request
* (Only valid for MM transfers ) */
LONGLONG offset_idx;
};
struct h2c_queue {
queue_config user_conf;
libqdma_queue_config lib_config;
......@@ -319,6 +353,9 @@ struct h2c_queue {
ring_buffer desc_ring;
dma_req_tracker req_tracker;
/** This forms a chain of h2c requests */
LIST_ENTRY req_list_head;
/* This lock is to ensure enqueueing/adding
the requests to the descriptor ring properly
......@@ -326,6 +363,7 @@ struct h2c_queue {
request tracker design will make sure proper execution
*/
WDFSPINLOCK lock = nullptr;
poll_operation_entry *req_proc_entry;
poll_operation_entry *poll_entry;
qdma_q_pidx_reg_info csr_pidx_info;
......@@ -341,6 +379,9 @@ struct c2h_queue {
ring_buffer desc_ring;
dma_req_tracker req_tracker;
/** This forms a chain of c2h requests */
LIST_ENTRY req_list_head;
/* This lock is to ensure MM enqueueing/adding
the requests to the descriptor ring properly
......@@ -348,7 +389,8 @@ struct c2h_queue {
request tracker design takes care
*/
WDFSPINLOCK lock = nullptr;
poll_operation_entry * poll_entry;
poll_operation_entry *req_proc_entry;
poll_operation_entry *poll_entry;
qdma_q_pidx_reg_info csr_pidx_info;
qdma_q_cmpt_cidx_reg_info csr_cmpt_cidx_info;
bool is_cmpt_valid = false;
......@@ -380,6 +422,7 @@ struct queue_pair {
qdma_device *qdma = nullptr;
queue_type type;
char name[QDMA_QUEUE_NAME_MAXLEN];
volatile LONG state;
UINT16 idx = 0; /* queue index - relative to this PF */
......@@ -399,14 +442,18 @@ struct queue_pair {
PFORCEINLINE void update_sw_index_with_csr_c2h_pidx(UINT32 new_pidx);
/** Transfer initiate functions */
NTSTATUS enqueue_mm_request(const WDF_DMA_DIRECTION direction, PSCATTER_GATHER_LIST sg_list, LONGLONG device_offset, dma_completion_cb compl_cb, VOID *priv, size_t &xfered_len);
NTSTATUS enqueue_st_tx_request(PSCATTER_GATHER_LIST sg_list, dma_completion_cb compl_cb, VOID *priv, size_t &xfered_len);
NTSTATUS enqueue_st_rx_request(size_t length, st_rx_completion_cb compl_cb, void *priv);
NTSTATUS enqueue_dma_request(dma_request *request);
NTSTATUS enqueue_dma_request(size_t length, st_rx_completion_cb compl_cb, void *priv);
/** Transfer processing functions */
service_status process_mm_request(dma_request* request, size_t* xfer_len);
service_status process_st_h2c_request(dma_request* request, size_t* xfer_len);
NTSTATUS process_st_c2h_data_pkt(void* udd_ptr, const UINT32 length);
/** Transfer completion functions */
service_status service_mm_st_h2c_completions(ring_buffer *desc_ring, dma_req_tracker *tracker, UINT32 budget);
service_status service_mm_st_h2c_completions(ring_buffer *desc_ring, dma_req_tracker *tracker, UINT32 budget, UINT32& proc_desc_cnt);
service_status st_service_c2h_queue(UINT32 budget);
NTSTATUS process_st_c2h_data_pkt(void *udd_ptr, const UINT32 length);
PFORCEINLINE void update_c2h_pidx_in_batch(UINT32 processed_desc_cnt);
NTSTATUS check_cmpt_error(c2h_wb_header_8B *cmpt_data);
......@@ -443,9 +490,11 @@ public:
qdma_dev_conf dev_conf;
/** Structure that contains QDMA global CSR registers information */
qdma_glbl_csr_conf csr_conf;
/** Structure that contains QDMA driver configuration */
qdma_drv_config drv_conf;
/** DMA Initialization/Teardown APIs */
NTSTATUS init(queue_op_mode operation_mode, UINT8 cfg_bar, UINT16 qsets_max);
NTSTATUS init(qdma_drv_config conf);
NTSTATUS open(WDFDEVICE device, WDFCMRESLIST resources, WDFCMRESLIST resources_translated);
void close(void);
bool qdma_is_device_online(void);
......@@ -455,6 +504,7 @@ public:
NTSTATUS write_bar(qdma_bar_type bar_type, size_t offset, void* data, size_t size);
ULONG qdma_conf_reg_read(size_t offset);
void qdma_conf_reg_write(size_t offset, ULONG data);
NTSTATUS get_bar_info(qdma_bar_type bar_type, PVOID &bar_base, size_t &bar_length);
/** Queue Configuration APIs (Add, Start Stop, Delete, state) */
NTSTATUS qdma_add_queue(UINT16 qid, queue_config& conf);
......@@ -462,12 +512,12 @@ public:
NTSTATUS qdma_stop_queue(UINT16 qid);
NTSTATUS qdma_remove_queue(UINT16 qid);
NTSTATUS qdma_is_queue_in_range(UINT16 qid);
NTSTATUS qdma_get_queues_state(UINT16 qid, CHAR *str, size_t str_maxlen);
NTSTATUS qdma_get_queues_state(UINT16 qid, enum queue_state *qstate, CHAR *str, size_t str_maxlen);
NTSTATUS qdma_set_qmax(UINT32 queue_max);
/** DMA transfer APIs (From Device and To Device) */
NTSTATUS qdma_enqueue_mm_request(UINT16 qid, WDF_DMA_DIRECTION direction, PSCATTER_GATHER_LIST sg_list, LONGLONG device_offset, dma_completion_cb compl_cb, VOID *priv, size_t &xfered_len);
NTSTATUS qdma_enqueue_st_tx_request(UINT16 qid, PSCATTER_GATHER_LIST sg_list, dma_completion_cb compl_cb, VOID *priv, size_t &xfered_len);
NTSTATUS qdma_enqueue_mm_request(UINT16 qid, WDF_DMA_DIRECTION direction, PSCATTER_GATHER_LIST sg_list, LONGLONG device_offset, dma_completion_cb compl_cb, VOID *priv);
NTSTATUS qdma_enqueue_st_tx_request(UINT16 qid, PSCATTER_GATHER_LIST sg_list, dma_completion_cb compl_cb, VOID *priv);
NTSTATUS qdma_enqueue_st_rx_request(UINT16 qid, size_t length, st_rx_completion_cb compl_cb, VOID *priv);
/** DMA Completion ring APIs */
......@@ -486,6 +536,7 @@ public:
NTSTATUS qdma_intring_dump(qdma_intr_ring_info *intring_info);
NTSTATUS qdma_regdump(qdma_reg_dump_info *regdump_info);
NTSTATUS qdma_get_qstats_info(qdma_qstat_info &qstats);
NTSTATUS qdma_get_reg_info(qdma_reg_info *reg_info);
/** DMA Versioning APIs */
NTSTATUS qdma_device_version_info(qdma_version_info &version_info);
......@@ -497,8 +548,6 @@ private:
LIST_ENTRY list_entry;
/** Identifier returned by resource manager */
UINT32 dma_dev_index = 0;
/** Maximum number of queues for this device */
UINT32 qmax;
/** Start/base queue number for this device */
INT32 qbase = -1;
/** Device state */
......@@ -506,8 +555,6 @@ private:
xpcie_device pcie;
WDFDEVICE wdf_dev = nullptr;
queue_op_mode op_mode;
UINT8 config_bar = 0;
qdma_hw_version_info hw_version_info;
queue_pair *queue_pairs = nullptr;
interrupt_manager irq_mgr;
......@@ -544,7 +591,7 @@ private:
void destroy_func(void);
void destroy_resource_manager(void);
NTSTATUS configure_irq(PIRQ_CONTEXT irq_context, ULONG vec);
NTSTATUS configure_irq(PQDMA_IRQ_CONTEXT irq_context, ULONG vec);
NTSTATUS intr_setup(WDFCMRESLIST resources, const WDFCMRESLIST resources_translated);
void intr_teardown(void);
NTSTATUS setup_legacy_interrupt(WDFCMRESLIST resources, const WDFCMRESLIST resources_translated);
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -14,15 +14,15 @@
* under the License.
*/
#ifndef EQDMA_ACCESS_H_
#define EQDMA_ACCESS_H_
#include "qdma_access_common.h"
#ifndef __EQDMA_SOFT_ACCESS_H_
#define __EQDMA_SOFT_ACCESS_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "qdma_platform.h"
/**
* enum qdma_error_idx - qdma errors
*/
......@@ -191,8 +191,18 @@ struct eqdma_hw_err_info {
uint32_t stat_reg_addr;
uint32_t leaf_err_mask;
uint32_t global_err_mask;
void (*eqdma_hw_err_process)(void *dev_hndl);
};
#define EQDMA_OFFSET_VF_VERSION 0x5014
#define EQDMA_OFFSET_VF_USER_BAR 0x5018
#define EQDMA_OFFSET_MBOX_BASE_PF 0x22400
#define EQDMA_OFFSET_MBOX_BASE_VF 0x5000
#define EQDMA_COMPL_CTXT_BADDR_HIGH_H_MASK GENMASK_ULL(63, 38)
#define EQDMA_COMPL_CTXT_BADDR_HIGH_L_MASK GENMASK_ULL(37, 6)
#define EQDMA_COMPL_CTXT_BADDR_LOW_MASK GENMASK_ULL(5, 2)
int eqdma_init_ctxt_memory(void *dev_hndl);
......@@ -243,7 +253,8 @@ int eqdma_context_buf_len(uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen);
int eqdma_hw_error_process(void *dev_hndl);
const char *eqdma_hw_get_error_name(enum qdma_error_idx err_idx);
const char *eqdma_hw_get_error_name(uint32_t err_idx);
int eqdma_hw_error_enable(void *dev_hndl, uint32_t err_idx);
int eqdma_read_dump_queue_context(void *dev_hndl,
uint16_t qid_hw,
......@@ -258,19 +269,38 @@ int eqdma_get_user_bar(void *dev_hndl, uint8_t is_vf,
uint8_t func_id, uint8_t *user_bar);
int eqdma_dump_config_reg_list(void *dev_hndl,
uint32_t num_regs,
uint32_t total_regs,
struct qdma_reg_data *reg_list,
char *buf, uint32_t buflen);
int eqdma_read_reg_list(void *dev_hndl, uint8_t is_vf,
uint16_t reg_rd_slot,
uint16_t reg_rd_group,
uint16_t *total_regs,
struct qdma_reg_data *reg_list);
int eqdma_set_default_global_csr(void *dev_hndl);
int eqdma_global_csr_conf(void *dev_hndl, uint8_t index, uint8_t count,
uint32_t *csr_val,
enum qdma_global_csr_type csr_type,
enum qdma_hw_access_type access_type);
int eqdma_global_writeback_interval_conf(void *dev_hndl,
enum qdma_wrb_interval *wb_int,
enum qdma_hw_access_type access_type);
int eqdma_mm_channel_conf(void *dev_hndl, uint8_t channel, uint8_t is_c2h,
uint8_t enable);
int eqdma_dump_reg_info(void *dev_hndl, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen);
uint32_t eqdma_get_config_num_regs(void);
struct xreg_info *eqdma_get_config_regs(void);
#ifdef __cplusplus
}
#endif
#endif /* EQDMA_ACCESS_H_ */
#endif /* __EQDMA_SOFT_ACCESS_H_ */
......@@ -14,270 +14,1213 @@
* under the License.
*/
#ifndef EQDMA_SOFT_REG_H_
#define EQDMA_SOFT_REG_H_
#ifndef __EQDMA_SOFT_REG_H
#define __EQDMA_SOFT_REG_H
#include "qdma_soft_reg.h"
#ifdef __cplusplus
extern "C" {
#endif
/* H2C Throttle settings */
#define EQDMA_H2C_THROT_DATA_THRESH 0x5000
#define EQDMA_THROT_EN_DATA 1
#define EQDMA_THROT_EN_REQ 0
#define EQDMA_H2C_THROT_REQ_THRESH 0xC0
/** Software Context */
#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_H_MASK GENMASK_ULL(63, 53)
#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_M_MASK GENMASK_ULL(52, 21)
#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_L_MASK GENMASK_ULL(20, 0)
#define EQDMA_SW_CTXT_PASID_GET_H_MASK GENMASK(21, 12)
#define EQDMA_SW_CTXT_PASID_GET_L_MASK GENMASK(11, 0)
#define EQDMA_SW_CTXT_W7_VIRTIO_DSC_BASE_H_MASK GENMASK(10, 0)
#define EQDMA_SW_CTXT_W6_VIRTIO_DSC_BASE_M_MASK GENMASK(31, 0)
#define EQDMA_SW_CTXT_W5_VIRTIO_DSC_BASE_L_MASK GENMASK(31, 11)
#define EQDMA_SW_CTXT_W5_PASID_EN_MASK BIT(10)
#define EQDMA_SW_CTXT_W5_PASID_H_MASK GENMASK(9, 0)
#define EQDMA_SW_CTXT_W4_PASID_L_MASK GENMASK(31, 20)
#define EQDMA_SW_CTXT_W4_HOST_ID_MASK GENMASK(19, 16)
#define EQDMA_SW_CTXT_W4_IRQ_BYP_MASK BIT(15)
#define EQDMA_SW_CTXT_W4_PACK_BYP_OUT_MASK BIT(14)
#define EQDMA_SW_CTXT_W4_VIRTIO_EN_MASK BIT(13)
#define EQDMA_SW_CTXT_W4_DIS_INTR_VF_MASK BIT(12)
/** Completion Context */
#define EQDMA_CMPL_CTXT_PASID_GET_H_MASK GENMASK(21, 9)
#define EQDMA_CMPL_CTXT_PASID_GET_L_MASK GENMASK(8, 0)
#define EQDMA_COMPL_CTXT_W5_SH_CMPT_MASK BIT(19)
#define EQDMA_COMPL_CTXT_W5_VIO_EOP_MASK BIT(18)
#define EQDMA_COMPL_CTXT_W5_BADDR4_LOW_MASK GENMASK(17, 14)
#define EQDMA_COMPL_CTXT_W5_PASID_EN_MASK BIT(13)
#define EQDMA_COMPL_CTXT_W5_PASID_H_MASK GENMASK(12, 0)
#define EQDMA_COMPL_CTXT_W4_PASID_L_MASK GENMASK(31, 23)
#define EQDMA_COMPL_CTXT_W4_HOST_ID_MASK GENMASK(22, 19)
#define EQDMA_COMPL_CTXT_W4_DIR_C2H_MASK BIT(18)
#define EQDMA_COMPL_CTXT_W4_VIO_MASK BIT(17)
#define EQDMA_COMPL_CTXT_W4_DIS_INTR_VF_MASK BIT(16)
/** Interrupt Context */
#define EQDMA_INTR_CTXT_PASID_GET_H_MASK GENMASK(21, 9)
#define EQDMA_INTR_CTXT_PASID_GET_L_MASK GENMASK(8, 0)
#define EQDMA_INTR_CTXT_W3_FUNC_ID_MASK GENMASK(29, 18)
#define EQDMA_INTR_CTXT_W3_PASID_EN_MASK BIT(13)
#define EQDMA_INTR_CTXT_W3_PASID_H_MASK GENMASK(12, 0)
#define EQDMA_INTR_CTXT_W2_PASID_L_MASK GENMASK(31, 23)
#define EQDMA_INTR_CTXT_W2_HOST_ID_MASK GENMASK(22, 19)
/** Prefetch Context */
#define EQDMA_PFTCH_CTXT_W0_NUM_PFTCH_MASK GENMASK(18, 9)
#define EQDMA_PFTCH_CTXT_W0_VAR_DESC_MASK BIT(8)
/* ------------------------- Hardware Errors ------------------------------ */
#define EQDMA_TOTAL_LEAF_ERROR_AGGREGATORS 9
#include "qdma_platform.h"
#define EQDMA_OFFSET_GLBL_ERR_INT 0XB04
#define EQDMA_GLBL_ERR_FUNC_MASK GENMASK(11, 0)
#define EQDMA_GLBL_ERR_VEC_MASK GENMASK(22, 12)
#define EQDMA_GLBL_ERR_ARM_MASK BIT(24)
#define EQDMA_GLBL_ERR_COAL_MASK BIT(23)
#define EQDMA_GLBL_ERR_DIS_INTR_ON_VF_MASK BIT(25)
#define EQDMA_GLBL_ERR_HOST_ID_MASK BIT(25)
#define EQDMA_OFFSET_GLBL_ERR_STAT 0X248
#define EQDMA_OFFSET_GLBL_ERR_MASK 0X24C
#define EQDMA_GLBL_ERR_RAM_SBE_MASK BIT(0)
#define EQDMA_GLBL_ERR_RAM_DBE_MASK BIT(1)
#define EQDMA_GLBL_ERR_DSC_MASK BIT(2)
#define EQDMA_GLBL_ERR_TRQ_MASK BIT(3)
#define EQDMA_GLBL_ERR_H2C_MM_0_MASK BIT(4)
#define EQDMA_GLBL_ERR_H2C_MM_1_MASK BIT(5)
#define EQDMA_GLBL_ERR_C2H_MM_0_MASK BIT(6)
#define EQDMA_GLBL_ERR_C2H_MM_1_MASK BIT(7)
#define EQDMA_GLBL_ERR_ST_C2H_MASK BIT(8)
#define EQDMA_GLBL_ERR_BDG_MASK BIT(15)
#define EQDMA_GLBL_ERR_IND_CTXT_CMD_MASK GENMASK(14, 9)
#define EQDMA_GLBL_ERR_ST_H2C_MASK BIT(16)
#define EQDMA_OFFSET_C2H_ERR_STAT 0XAF0
#define EQDMA_OFFSET_C2H_ERR_MASK 0XAF4
#define EQDMA_C2H_ERR_MTY_MISMATCH_MASK BIT(0)
#define EQDMA_C2H_ERR_LEN_MISMATCH_MASK BIT(1)
#define EQDMA_C2H_ERR_SH_CMPT_DSC_MASK BIT(2)
#define EQDMA_C2H_ERR_QID_MISMATCH_MASK BIT(3)
#define EQDMA_C2H_ERR_DESC_RSP_ERR_MASK BIT(4)
#define EQDMA_C2H_ERR_ENG_WPL_DATA_PAR_ERR_MASK BIT(6)
#define EQDMA_C2H_ERR_MSI_INT_FAIL_MASK BIT(7)
#define EQDMA_C2H_ERR_ERR_DESC_CNT_MASK BIT(9)
#define EQDMA_C2H_ERR_PORTID_CTXT_MISMATCH_MASK BIT(10)
#define EQDMA_C2H_ERR_CMPT_INV_Q_ERR_MASK BIT(12)
#define EQDMA_C2H_ERR_CMPT_QFULL_ERR_MASK BIT(13)
#define EQDMA_C2H_ERR_CMPT_CIDX_ERR_MASK BIT(14)
#define EQDMA_C2H_ERR_CMPT_PRTY_ERR_MASK BIT(15)
#define EQDMA_C2H_ERR_AVL_RING_DSC_MASK BIT(16)
#define EQDMA_C2H_ERR_HDR_ECC_UNC_MASK BIT(17)
#define EQDMA_C2H_ERR_HDR_ECC_COR_MASK BIT(18)
#define EQDMA_C2H_ERR_ALL_MASK 0X3F6DF
#ifdef CHAR_BIT
#undef CHAR_BIT
#endif
#define CHAR_BIT 8
#define EQDMA_OFFSET_C2H_FATAL_ERR_STAT 0XAF8
#define EQDMA_OFFSET_C2H_FATAL_ERR_MASK 0XAFC
#define EQDMA_C2H_FATAL_ERR_MTY_MISMATCH_MASK BIT(0)
#define EQDMA_C2H_FATAL_ERR_LEN_MISMATCH_MASK BIT(1)
#define EQDMA_C2H_FATAL_ERR_QID_MISMATCH_MASK BIT(3)
#define EQDMA_C2H_FATAL_ERR_TIMER_FIFO_RAM_RDBE_MASK BIT(4)
#define EQDMA_C2H_FATAL_ERR_PFCH_II_RAM_RDBE_MASK BIT(8)
#define EQDMA_C2H_FATAL_ERR_CMPT_CTXT_RAM_RDBE_MASK BIT(9)
#define EQDMA_C2H_FATAL_ERR_PFCH_CTXT_RAM_RDBE_MASK BIT(10)
#define EQDMA_C2H_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE_MASK BIT(11)
#define EQDMA_C2H_FATAL_ERR_INT_CTXT_RAM_RDBE_MASK BIT(12)
#define EQDMA_C2H_FATAL_ERR_CMPT_COAL_DATA_RAM_RDBE_MASK BIT(14)
#define EQDMA_C2H_FATAL_ERR_CMPT_FIFO_RAM_RDBE_MASK BIT(15)
#define EQDMA_C2H_FATAL_ERR_QID_FIFO_RAM_RDBE_MASK BIT(16)
#define EQDMA_C2H_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE_MASK BIT(17)
#define EQDMA_C2H_FATAL_ERR_WPL_DATA_PAR_MASK BIT(18)
#define EQDMA_C2H_FATAL_ERR_AVL_RING_FIFO_RAM_RDBE_MASK BIT(19)
#define EQDMA_C2H_FATAL_ERR_HDR_ECC_UNC_MASK BIT(20)
#define EQDMA_C2H_FATAL_ERR_ALL_MASK 0X1FDF1B
#ifdef BIT
#undef BIT
#endif
#define BIT(n) (1u << (n))
#define EQDMA_OFFSET_H2C_ERR_STAT 0XE00
#define EQDMA_OFFSET_H2C_ERR_MASK 0XE04
#define EQDMA_H2C_ERR_ZERO_LEN_DESC_MASK BIT(0)
#define EQDMA_H2C_ERR_SDI_MRKR_REQ_MOP_MASK BIT(1)
#define EQDMA_H2C_ERR_NO_DMA_DSC_MASK BIT(2)
#define EQDMA_H2C_ERR_SBE_MASK BIT(3)
#define EQDMA_H2C_ERR_DBE_MASK BIT(4)
#define EQDMA_H2C_ERR_PAR_ERR_MASK BIT(5)
#define EQDMA_H2C_ERR_ALL_MASK 0X3F
#ifdef BITS_PER_BYTE
#undef BITS_PER_BYTE
#endif
#define BITS_PER_BYTE CHAR_BIT
#define EQDMA_OFFSET_GLBL_DSC_ERR_STAT 0X254
#define EQDMA_OFFSET_GLBL_DSC_ERR_MASK 0X258
#define EQDMA_GLBL_DSC_ERR_POISON_MASK BIT(1)
#define EQDMA_GLBL_DSC_ERR_UR_CA_MASK BIT(2)
#define EQDMA_GLBL_DSC_ERR_BCNT_MASK BIT(3)
#define EQDMA_GLBL_DSC_ERR_PARAM_MASK BIT(4)
#define EQDMA_GLBL_DSC_ERR_ADDR_MASK BIT(5)
#define EQDMA_GLBL_DSC_ERR_TAG_MASK BIT(6)
#define EQDMA_GLBL_DSC_ERR_FLR_MASK BIT(8)
#define EQDMA_GLBL_DSC_ERR_TIMEOUT_MASK BIT(9)
#define EQDMA_GLBL_DSC_ERR_DAT_POISON_MASK BIT(16)
#define EQDMA_GLBL_DSC_ERR_FLR_CANCEL_MASK BIT(19)
#define EQDMA_GLBL_DSC_ERR_DMA_MASK BIT(20)
#define EQDMA_GLBL_DSC_ERR_DSC_MASK BIT(21)
#define EQDMA_GLBL_DSC_ERR_RQ_CANCEL_MASK BIT(22)
#define EQDMA_GLBL_DSC_ERR_DBE_MASK BIT(23)
#define EQDMA_GLBL_DSC_ERR_SBE_MASK BIT(24)
#define EQDMA_GLBL_DSC_ERR_ALL_MASK 0X1F9037E
#ifdef BITS_PER_LONG
#undef BITS_PER_LONG
#endif
#define BITS_PER_LONG (sizeof(uint32_t) * BITS_PER_BYTE)
#define EQDMA_OFFSET_GLBL_TRQ_ERR_STAT 0X264
#define EQDMA_OFFSET_GLBL_TRQ_ERR_MASK 0X268
#define EQDMA_GLBL_TRQ_ERR_CSR_UNMAPPED_MASK BIT(0)
#define EQDMA_GLBL_TRQ_ERR_VF_ACCESS_MASK BIT(1)
#define EQDMA_GLBL_TRQ_ERR_TCP_CSR_MASK BIT(3)
#define EQDMA_GLBL_TRQ_ERR_QSPC_UNMAPPED_MASK BIT(4)
#define EQDMA_GLBL_TRQ_ERR_QID_RANGE_MASK BIT(5)
#define EQDMA_GLBL_TRQ_ERR_TCP_QSPC_TIMEOUT_MASK BIT(7)
#define EQDMA_GLBL_TRQ_ERR_ALL_MASK 0XB3
#ifdef BITS_PER_LONG_LONG
#undef BITS_PER_LONG_LONG
#endif
#define BITS_PER_LONG_LONG (sizeof(uint64_t) * BITS_PER_BYTE)
#define EQDMA_OFFSET_RAM_SBE_1_STAT 0XE4
#define EQDMA_OFFSET_RAM_SBE_1_MASK 0XE0
#define EQDMA_SBE_1_ERR_RC_RRQ_EVEN_RAM_MASK BIT(0)
#define EQDMA_SBE_1_ERR_TAG_ODD_RAM_MASK BIT(1)
#define EQDMA_SBE_1_ERR_TAG_EVEN_RAM_MASK BIT(2)
#define EQDMA_SBE_1_ERR_PFCH_CTXT_CAM_RAM_0_MASK BIT(3)
#define EQDMA_SBE_1_ERR_PFCH_CTXT_CAM_RAM_1_MASK BIT(4)
#define EQDMA_SBE_1_ERR_ALL_MASK 0X1F
#ifdef GENMASK
#undef GENMASK
#endif
#define GENMASK(h, l) \
((0xFFFFFFFF << (l)) & (0xFFFFFFFF >> (BITS_PER_LONG - 1 - (h))))
#define EQDMA_OFFSET_RAM_DBE_1_STAT 0XEC
#define EQDMA_OFFSET_RAM_DBE_1_MASK 0XE8
#define EQDMA_DBE_1_ERR_RC_RRQ_EVEN_RAM_MASK BIT(0)
#define EQDMA_DBE_1_ERR_TAG_ODD_RAM_MASK BIT(1)
#define EQDMA_DBE_1_ERR_TAG_EVEN_RAM_MASK BIT(2)
#define EQDMA_DBE_1_ERR_PFCH_CTXT_CAM_RAM_0_MASK BIT(3)
#define EQDMA_DBE_1_ERR_PFCH_CTXT_CAM_RAM_1_MASK BIT(4)
#define EQDMA_DBE_1_ERR_ALL_MASK 0X1F
#ifdef GENMASK_ULL
#undef GENMASK_ULL
#endif
#define GENMASK_ULL(h, l) \
((0xFFFFFFFFFFFFFFFF << (l)) & \
(0xFFFFFFFFFFFFFFFF >> (BITS_PER_LONG_LONG - 1 - (h))))
#define EQDMA_OFFSET_RAM_SBE_STAT 0XF4
#define EQDMA_OFFSET_RAM_SBE_MASK 0XF0
#define EQDMA_SBE_ERR_MI_H2C0_DAT_MASK BIT(0)
#define EQDMA_SBE_ERR_MI_H2C1_DAT_MASK BIT(1)
#define EQDMA_SBE_ERR_MI_H2C2_DAT_MASK BIT(2)
#define EQDMA_SBE_ERR_MI_H2C3_DAT_MASK BIT(3)
#define EQDMA_SBE_ERR_MI_C2H0_DAT_MASK BIT(4)
#define EQDMA_SBE_ERR_MI_C2H1_DAT_MASK BIT(5)
#define EQDMA_SBE_ERR_MI_C2H2_DAT_MASK BIT(6)
#define EQDMA_SBE_ERR_MI_C2H3_DAT_MASK BIT(7)
#define EQDMA_SBE_ERR_H2C_RD_BRG_DAT_MASK BIT(8)
#define EQDMA_SBE_ERR_H2C_WR_BRG_DAT_MASK BIT(9)
#define EQDMA_SBE_ERR_C2H_RD_BRG_DAT_MASK BIT(10)
#define EQDMA_SBE_ERR_C2H_WR_BRG_DAT_MASK BIT(11)
#define EQDMA_SBE_ERR_FUNC_MAP_MASK BIT(12)
#define EQDMA_SBE_ERR_DSC_HW_CTXT_MASK BIT(13)
#define EQDMA_SBE_ERR_DSC_CRD_RCV_MASK BIT(14)
#define EQDMA_SBE_ERR_DSC_SW_CTXT_MASK BIT(15)
#define EQDMA_SBE_ERR_DSC_CPLI_MASK BIT(16)
#define EQDMA_SBE_ERR_DSC_CPLD_MASK BIT(17)
#define EQDMA_SBE_ERR_MI_TL_SLV_FIFO_RAM_MASK BIT(18)
#define EQDMA_SBE_ERR_TIMER_FIFO_RAM_MASK GENMASK(22, 19)
#define EQDMA_SBE_ERR_QID_FIFO_RAM_MASK BIT(23)
#define EQDMA_SBE_ERR_WRB_COAL_DATA_RAM_MASK BIT(24)
#define EQDMA_SBE_ERR_INT_CTXT_RAM_MASK BIT(25)
#define EQDMA_SBE_ERR_DESC_REQ_FIFO_RAM_MASK BIT(26)
#define EQDMA_SBE_ERR_PFCH_CTXT_RAM_MASK BIT(27)
#define EQDMA_SBE_ERR_WRB_CTXT_RAM_MASK BIT(28)
#define EQDMA_SBE_ERR_PFCH_LL_RAM_MASK BIT(29)
#define EQDMA_SBE_ERR_PEND_FIFO_RAM_MASK BIT(30)
#define EQDMA_SBE_ERR_RC_RRQ_ODD_RAM_MASK BIT(31)
#define EQDMA_SBE_ERR_ALL_MASK 0XFFFFFFFF
#define DEBGFS_LINE_SZ (81)
#define EQDMA_OFFSET_RAM_DBE_STAT 0XFC
#define EQDMA_OFFSET_RAM_DBE_MASK 0XF8
#define EQDMA_DBE_ERR_MI_H2C0_DAT_MASK BIT(0)
#define EQDMA_DBE_ERR_MI_H2C1_DAT_MASK BIT(1)
#define EQDMA_DBE_ERR_MI_H2C2_DAT_MASK BIT(2)
#define EQDMA_DBE_ERR_MI_H2C3_DAT_MASK BIT(3)
#define EQDMA_DBE_ERR_MI_C2H0_DAT_MASK BIT(4)
#define EQDMA_DBE_ERR_MI_C2H1_DAT_MASK BIT(5)
#define EQDMA_DBE_ERR_MI_C2H2_DAT_MASK BIT(6)
#define EQDMA_DBE_ERR_MI_C2H3_DAT_MASK BIT(7)
#define EQDMA_DBE_ERR_H2C_RD_BRG_DAT_MASK BIT(8)
#define EQDMA_DBE_ERR_H2C_WR_BRG_DAT_MASK BIT(9)
#define EQDMA_DBE_ERR_C2H_RD_BRG_DAT_MASK BIT(10)
#define EQDMA_DBE_ERR_C2H_WR_BRG_DAT_MASK BIT(11)
#define EQDMA_DBE_ERR_FUNC_MAP_MASK BIT(12)
#define EQDMA_DBE_ERR_DSC_HW_CTXT_MASK BIT(13)
#define EQDMA_DBE_ERR_DSC_CRD_RCV_MASK BIT(14)
#define EQDMA_DBE_ERR_DSC_SW_CTXT_MASK BIT(15)
#define EQDMA_DBE_ERR_DSC_CPLI_MASK BIT(16)
#define EQDMA_DBE_ERR_DSC_CPLD_MASK BIT(17)
#define EQDMA_DBE_ERR_MI_TL_SLV_FIFO_RAM_MASK BIT(18)
#define EQDMA_DBE_ERR_TIMER_FIFO_RAM_MASK GENMASK(22, 19)
#define EQDMA_DBE_ERR_QID_FIFO_RAM_MASK BIT(23)
#define EQDMA_DBE_ERR_WRB_COAL_DATA_RAM_MASK BIT(24)
#define EQDMA_DBE_ERR_INT_CTXT_RAM_MASK BIT(25)
#define EQDMA_DBE_ERR_DESC_REQ_FIFO_RAM_MASK BIT(26)
#define EQDMA_DBE_ERR_PFCH_CTXT_RAM_MASK BIT(27)
#define EQDMA_DBE_ERR_WRB_CTXT_RAM_MASK BIT(28)
#define EQDMA_DBE_ERR_PFCH_LL_RAM_MASK BIT(29)
#define EQDMA_DBE_ERR_PEND_FIFO_RAM_MASK BIT(30)
#define EQDMA_DBE_ERR_RC_RRQ_ODD_RAM_MASK BIT(31)
#define EQDMA_DBE_ERR_ALL_MASK 0XFFFFFFFF
#ifdef ARRAY_SIZE
#undef ARRAY_SIZE
#endif
#define ARRAY_SIZE(arr) (sizeof(arr) / \
sizeof(arr[0]))
#define EQDMA_OFFSET_VF_VERSION 0x5014
#define EQDMA_OFFSET_VF_USER_BAR 0x5018
#define EQDMA_OFFSET_MBOX_BASE_VF 0x5000
#define EQDMA_OFFSET_MBOX_BASE_PF 0x22400
uint32_t eqdma_config_num_regs_get(void);
struct xreg_info *eqdma_config_regs_get(void);
#define EQDMA_CFG_BLK_IDENTIFIER_ADDR 0x00
#define CFG_BLK_IDENTIFIER_MASK GENMASK(31, 20)
#define CFG_BLK_IDENTIFIER_1_MASK GENMASK(19, 16)
#define CFG_BLK_IDENTIFIER_RSVD_1_MASK GENMASK(15, 8)
#define CFG_BLK_IDENTIFIER_VERSION_MASK GENMASK(7, 0)
#define EQDMA_CFG_BLK_PCIE_MAX_PLD_SIZE_ADDR 0x08
#define CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_1_MASK GENMASK(31, 7)
#define CFG_BLK_PCIE_MAX_PLD_SIZE_PROG_MASK GENMASK(6, 4)
#define CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_2_MASK BIT(3)
#define CFG_BLK_PCIE_MAX_PLD_SIZE_ISSUED_MASK GENMASK(2, 0)
#define EQDMA_CFG_BLK_PCIE_MAX_READ_REQ_SIZE_ADDR 0x0C
#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_1_MASK GENMASK(31, 7)
#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_PROG_MASK GENMASK(6, 4)
#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_2_MASK BIT(3)
#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_ISSUED_MASK GENMASK(2, 0)
#define EQDMA_CFG_BLK_SYSTEM_ID_ADDR 0x10
#define CFG_BLK_SYSTEM_ID_RSVD_1_MASK GENMASK(31, 17)
#define CFG_BLK_SYSTEM_ID_INST_TYPE_MASK BIT(16)
#define CFG_BLK_SYSTEM_ID_MASK GENMASK(15, 0)
#define EQDMA_CFG_BLK_MSIX_ENABLE_ADDR 0x014
#define CFG_BLK_MSIX_ENABLE_MASK GENMASK(31, 0)
#define EQDMA_CFG_PCIE_DATA_WIDTH_ADDR 0x18
#define CFG_PCIE_DATA_WIDTH_RSVD_1_MASK GENMASK(31, 3)
#define CFG_PCIE_DATA_WIDTH_DATAPATH_MASK GENMASK(2, 0)
#define EQDMA_CFG_PCIE_CTL_ADDR 0x1C
#define CFG_PCIE_CTL_RSVD_1_MASK GENMASK(31, 18)
#define CFG_PCIE_CTL_MGMT_AXIL_CTRL_MASK GENMASK(17, 16)
#define CFG_PCIE_CTL_RSVD_2_MASK GENMASK(15, 2)
#define CFG_PCIE_CTL_RRQ_DISABLE_MASK BIT(1)
#define CFG_PCIE_CTL_RELAXED_ORDERING_MASK BIT(0)
#define EQDMA_CFG_BLK_MSI_ENABLE_ADDR 0x20
#define CFG_BLK_MSI_ENABLE_MASK GENMASK(31, 0)
#define EQDMA_CFG_AXI_USER_MAX_PLD_SIZE_ADDR 0x40
#define CFG_AXI_USER_MAX_PLD_SIZE_RSVD_1_MASK GENMASK(31, 7)
#define CFG_AXI_USER_MAX_PLD_SIZE_ISSUED_MASK GENMASK(6, 4)
#define CFG_AXI_USER_MAX_PLD_SIZE_RSVD_2_MASK BIT(3)
#define CFG_AXI_USER_MAX_PLD_SIZE_PROG_MASK GENMASK(2, 0)
#define EQDMA_CFG_AXI_USER_MAX_READ_REQ_SIZE_ADDR 0x44
#define CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_1_MASK GENMASK(31, 7)
#define CFG_AXI_USER_MAX_READ_REQ_SIZE_USISSUED_MASK GENMASK(6, 4)
#define CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_2_MASK BIT(3)
#define CFG_AXI_USER_MAX_READ_REQ_SIZE_USPROG_MASK GENMASK(2, 0)
#define EQDMA_CFG_BLK_MISC_CTL_ADDR 0x4C
#define CFG_BLK_MISC_CTL_RSVD_1_MASK GENMASK(31, 24)
#define CFG_BLK_MISC_CTL_10B_TAG_EN_MASK BIT(23)
#define CFG_BLK_MISC_CTL_RSVD_2_MASK BIT(22)
#define CFG_BLK_MISC_CTL_AXI_WBK_MASK BIT(21)
#define CFG_BLK_MISC_CTL_AXI_DSC_MASK BIT(20)
#define CFG_BLK_MISC_CTL_NUM_TAG_MASK GENMASK(19, 8)
#define CFG_BLK_MISC_CTL_RSVD_3_MASK GENMASK(7, 5)
#define CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK GENMASK(4, 0)
#define EQDMA_CFG_PL_CRED_CTL_ADDR 0x68
#define CFG_PL_CRED_CTL_RSVD_1_MASK GENMASK(31, 5)
#define CFG_PL_CRED_CTL_SLAVE_CRD_RLS_MASK BIT(4)
#define CFG_PL_CRED_CTL_RSVD_2_MASK GENMASK(3, 1)
#define CFG_PL_CRED_CTL_MASTER_CRD_RST_MASK BIT(0)
#define EQDMA_CFG_BLK_SCRATCH_ADDR 0x80
#define CFG_BLK_SCRATCH_MASK GENMASK(31, 0)
#define EQDMA_CFG_GIC_ADDR 0xA0
#define CFG_GIC_RSVD_1_MASK GENMASK(31, 1)
#define CFG_GIC_GIC_IRQ_MASK BIT(0)
#define EQDMA_RAM_SBE_MSK_1_A_ADDR 0xE0
#define RAM_SBE_MSK_1_A_MASK GENMASK(31, 0)
#define EQDMA_RAM_SBE_STS_1_A_ADDR 0xE4
#define RAM_SBE_STS_1_A_RSVD_MASK GENMASK(31, 5)
#define RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK BIT(4)
#define RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK BIT(3)
#define RAM_SBE_STS_1_A_TAG_EVEN_RAM_MASK BIT(2)
#define RAM_SBE_STS_1_A_TAG_ODD_RAM_MASK BIT(1)
#define RAM_SBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK BIT(0)
#define EQDMA_RAM_DBE_MSK_1_A_ADDR 0xE8
#define RAM_DBE_MSK_1_A_MASK GENMASK(31, 0)
#define EQDMA_RAM_DBE_STS_1_A_ADDR 0xEC
#define RAM_DBE_STS_1_A_RSVD_MASK GENMASK(31, 5)
#define RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK BIT(4)
#define RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK BIT(3)
#define RAM_DBE_STS_1_A_TAG_EVEN_RAM_MASK BIT(2)
#define RAM_DBE_STS_1_A_TAG_ODD_RAM_MASK BIT(1)
#define RAM_DBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK BIT(0)
#define EQDMA_RAM_SBE_MSK_A_ADDR 0xF0
#define RAM_SBE_MSK_A_MASK GENMASK(31, 0)
#define EQDMA_RAM_SBE_STS_A_ADDR 0xF4
#define RAM_SBE_STS_A_RC_RRQ_ODD_RAM_MASK BIT(31)
#define RAM_SBE_STS_A_PEND_FIFO_RAM_MASK BIT(30)
#define RAM_SBE_STS_A_PFCH_LL_RAM_MASK BIT(29)
#define RAM_SBE_STS_A_WRB_CTXT_RAM_MASK BIT(28)
#define RAM_SBE_STS_A_PFCH_CTXT_RAM_MASK BIT(27)
#define RAM_SBE_STS_A_DESC_REQ_FIFO_RAM_MASK BIT(26)
#define RAM_SBE_STS_A_INT_CTXT_RAM_MASK BIT(25)
#define RAM_SBE_STS_A_WRB_COAL_DATA_RAM_MASK BIT(24)
#define RAM_SBE_STS_A_QID_FIFO_RAM_MASK BIT(23)
#define RAM_SBE_STS_A_TIMER_FIFO_RAM_MASK GENMASK(22, 19)
#define RAM_SBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK BIT(18)
#define RAM_SBE_STS_A_DSC_CPLD_MASK BIT(17)
#define RAM_SBE_STS_A_DSC_CPLI_MASK BIT(16)
#define RAM_SBE_STS_A_DSC_SW_CTXT_MASK BIT(15)
#define RAM_SBE_STS_A_DSC_CRD_RCV_MASK BIT(14)
#define RAM_SBE_STS_A_DSC_HW_CTXT_MASK BIT(13)
#define RAM_SBE_STS_A_FUNC_MAP_MASK BIT(12)
#define RAM_SBE_STS_A_C2H_WR_BRG_DAT_MASK BIT(11)
#define RAM_SBE_STS_A_C2H_RD_BRG_DAT_MASK BIT(10)
#define RAM_SBE_STS_A_H2C_WR_BRG_DAT_MASK BIT(9)
#define RAM_SBE_STS_A_H2C_RD_BRG_DAT_MASK BIT(8)
#define RAM_SBE_STS_A_MI_C2H3_DAT_MASK BIT(7)
#define RAM_SBE_STS_A_MI_C2H2_DAT_MASK BIT(6)
#define RAM_SBE_STS_A_MI_C2H1_DAT_MASK BIT(5)
#define RAM_SBE_STS_A_MI_C2H0_DAT_MASK BIT(4)
#define RAM_SBE_STS_A_MI_H2C3_DAT_MASK BIT(3)
#define RAM_SBE_STS_A_MI_H2C2_DAT_MASK BIT(2)
#define RAM_SBE_STS_A_MI_H2C1_DAT_MASK BIT(1)
#define RAM_SBE_STS_A_MI_H2C0_DAT_MASK BIT(0)
#define EQDMA_RAM_DBE_MSK_A_ADDR 0xF8
#define RAM_DBE_MSK_A_MASK GENMASK(31, 0)
#define EQDMA_RAM_DBE_STS_A_ADDR 0xFC
#define RAM_DBE_STS_A_RC_RRQ_ODD_RAM_MASK BIT(31)
#define RAM_DBE_STS_A_PEND_FIFO_RAM_MASK BIT(30)
#define RAM_DBE_STS_A_PFCH_LL_RAM_MASK BIT(29)
#define RAM_DBE_STS_A_WRB_CTXT_RAM_MASK BIT(28)
#define RAM_DBE_STS_A_PFCH_CTXT_RAM_MASK BIT(27)
#define RAM_DBE_STS_A_DESC_REQ_FIFO_RAM_MASK BIT(26)
#define RAM_DBE_STS_A_INT_CTXT_RAM_MASK BIT(25)
#define RAM_DBE_STS_A_WRB_COAL_DATA_RAM_MASK BIT(24)
#define RAM_DBE_STS_A_QID_FIFO_RAM_MASK BIT(23)
#define RAM_DBE_STS_A_TIMER_FIFO_RAM_MASK GENMASK(22, 19)
#define RAM_DBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK BIT(18)
#define RAM_DBE_STS_A_DSC_CPLD_MASK BIT(17)
#define RAM_DBE_STS_A_DSC_CPLI_MASK BIT(16)
#define RAM_DBE_STS_A_DSC_SW_CTXT_MASK BIT(15)
#define RAM_DBE_STS_A_DSC_CRD_RCV_MASK BIT(14)
#define RAM_DBE_STS_A_DSC_HW_CTXT_MASK BIT(13)
#define RAM_DBE_STS_A_FUNC_MAP_MASK BIT(12)
#define RAM_DBE_STS_A_C2H_WR_BRG_DAT_MASK BIT(11)
#define RAM_DBE_STS_A_C2H_RD_BRG_DAT_MASK BIT(10)
#define RAM_DBE_STS_A_H2C_WR_BRG_DAT_MASK BIT(9)
#define RAM_DBE_STS_A_H2C_RD_BRG_DAT_MASK BIT(8)
#define RAM_DBE_STS_A_MI_C2H3_DAT_MASK BIT(7)
#define RAM_DBE_STS_A_MI_C2H2_DAT_MASK BIT(6)
#define RAM_DBE_STS_A_MI_C2H1_DAT_MASK BIT(5)
#define RAM_DBE_STS_A_MI_C2H0_DAT_MASK BIT(4)
#define RAM_DBE_STS_A_MI_H2C3_DAT_MASK BIT(3)
#define RAM_DBE_STS_A_MI_H2C2_DAT_MASK BIT(2)
#define RAM_DBE_STS_A_MI_H2C1_DAT_MASK BIT(1)
#define RAM_DBE_STS_A_MI_H2C0_DAT_MASK BIT(0)
#define EQDMA_GLBL2_IDENTIFIER_ADDR 0x100
#define GLBL2_IDENTIFIER_MASK GENMASK(31, 8)
#define GLBL2_IDENTIFIER_VERSION_MASK GENMASK(7, 0)
#define EQDMA_GLBL2_CHANNEL_INST_ADDR 0x114
#define GLBL2_CHANNEL_INST_RSVD_1_MASK GENMASK(31, 18)
#define GLBL2_CHANNEL_INST_C2H_ST_MASK BIT(17)
#define GLBL2_CHANNEL_INST_H2C_ST_MASK BIT(16)
#define GLBL2_CHANNEL_INST_RSVD_2_MASK GENMASK(15, 12)
#define GLBL2_CHANNEL_INST_C2H_ENG_MASK GENMASK(11, 8)
#define GLBL2_CHANNEL_INST_RSVD_3_MASK GENMASK(7, 4)
#define GLBL2_CHANNEL_INST_H2C_ENG_MASK GENMASK(3, 0)
#define EQDMA_GLBL2_CHANNEL_MDMA_ADDR 0x118
#define GLBL2_CHANNEL_MDMA_RSVD_1_MASK GENMASK(31, 18)
#define GLBL2_CHANNEL_MDMA_C2H_ST_MASK BIT(17)
#define GLBL2_CHANNEL_MDMA_H2C_ST_MASK BIT(16)
#define GLBL2_CHANNEL_MDMA_RSVD_2_MASK GENMASK(15, 12)
#define GLBL2_CHANNEL_MDMA_C2H_ENG_MASK GENMASK(11, 8)
#define GLBL2_CHANNEL_MDMA_RSVD_3_MASK GENMASK(7, 4)
#define GLBL2_CHANNEL_MDMA_H2C_ENG_MASK GENMASK(3, 0)
#define EQDMA_GLBL2_CHANNEL_STRM_ADDR 0x11C
#define GLBL2_CHANNEL_STRM_RSVD_1_MASK GENMASK(31, 18)
#define GLBL2_CHANNEL_STRM_C2H_ST_MASK BIT(17)
#define GLBL2_CHANNEL_STRM_H2C_ST_MASK BIT(16)
#define GLBL2_CHANNEL_STRM_RSVD_2_MASK GENMASK(15, 12)
#define GLBL2_CHANNEL_STRM_C2H_ENG_MASK GENMASK(11, 8)
#define GLBL2_CHANNEL_STRM_RSVD_3_MASK GENMASK(7, 4)
#define GLBL2_CHANNEL_STRM_H2C_ENG_MASK GENMASK(3, 0)
#define EQDMA_GLBL2_CHANNEL_CAP_ADDR 0x120
#define GLBL2_CHANNEL_CAP_RSVD_1_MASK GENMASK(31, 12)
#define GLBL2_CHANNEL_CAP_MULTIQ_MAX_MASK GENMASK(11, 0)
#define EQDMA_GLBL2_CHANNEL_PASID_CAP_ADDR 0x128
#define GLBL2_CHANNEL_PASID_CAP_RSVD_1_MASK GENMASK(31, 2)
#define GLBL2_CHANNEL_PASID_CAP_BRIDGEEN_MASK BIT(1)
#define GLBL2_CHANNEL_PASID_CAP_DMAEN_MASK BIT(0)
#define EQDMA_GLBL2_SYSTEM_ID_ADDR 0x130
#define GLBL2_SYSTEM_ID_RSVD_1_MASK GENMASK(31, 16)
#define GLBL2_SYSTEM_ID_MASK GENMASK(15, 0)
#define EQDMA_GLBL2_MISC_CAP_ADDR 0x134
#define GLBL2_MISC_CAP_MASK GENMASK(31, 0)
#define EQDMA_GLBL2_DBG_PCIE_RQ0_ADDR 0x1B8
#define GLBL2_PCIE_RQ0_NPH_AVL_MASK GENMASK(31, 20)
#define GLBL2_PCIE_RQ0_RCB_AVL_MASK GENMASK(19, 9)
#define GLBL2_PCIE_RQ0_SLV_RD_CREDS_MASK GENMASK(8, 2)
#define GLBL2_PCIE_RQ0_TAG_EP_MASK GENMASK(1, 0)
#define EQDMA_GLBL2_DBG_PCIE_RQ1_ADDR 0x1BC
#define GLBL2_PCIE_RQ1_RSVD_1_MASK GENMASK(31, 21)
#define GLBL2_PCIE_RQ1_TAG_FL_MASK GENMASK(20, 19)
#define GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_FL_MASK BIT(18)
#define GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_EP_MASK BIT(17)
#define GLBL2_PCIE_RQ1_RQ_FIFO_EP_MASK BIT(16)
#define GLBL2_PCIE_RQ1_RQ_FIFO_FL_MASK BIT(15)
#define GLBL2_PCIE_RQ1_TLPSM_MASK GENMASK(14, 12)
#define GLBL2_PCIE_RQ1_TLPSM512_MASK GENMASK(11, 9)
#define GLBL2_PCIE_RQ1_RREQ_RCB_OK_MASK BIT(8)
#define GLBL2_PCIE_RQ1_RREQ0_SLV_MASK BIT(7)
#define GLBL2_PCIE_RQ1_RREQ0_VLD_MASK BIT(6)
#define GLBL2_PCIE_RQ1_RREQ0_RDY_MASK BIT(5)
#define GLBL2_PCIE_RQ1_RREQ1_SLV_MASK BIT(4)
#define GLBL2_PCIE_RQ1_RREQ1_VLD_MASK BIT(3)
#define GLBL2_PCIE_RQ1_RREQ1_RDY_MASK BIT(2)
#define GLBL2_PCIE_RQ1_WTLP_REQ_MASK BIT(1)
#define GLBL2_PCIE_RQ1_WTLP_STRADDLE_MASK BIT(0)
#define EQDMA_GLBL2_DBG_AXIMM_WR0_ADDR 0x1C0
#define GLBL2_AXIMM_WR0_RSVD_1_MASK GENMASK(31, 27)
#define GLBL2_AXIMM_WR0_WR_REQ_MASK BIT(26)
#define GLBL2_AXIMM_WR0_WR_CHN_MASK GENMASK(25, 23)
#define GLBL2_AXIMM_WR0_WTLP_DATA_FIFO_EP_MASK BIT(22)
#define GLBL2_AXIMM_WR0_WPL_FIFO_EP_MASK BIT(21)
#define GLBL2_AXIMM_WR0_BRSP_CLAIM_CHN_MASK GENMASK(20, 18)
#define GLBL2_AXIMM_WR0_WRREQ_CNT_MASK GENMASK(17, 12)
#define GLBL2_AXIMM_WR0_BID_MASK GENMASK(11, 9)
#define GLBL2_AXIMM_WR0_BVALID_MASK BIT(8)
#define GLBL2_AXIMM_WR0_BREADY_MASK BIT(7)
#define GLBL2_AXIMM_WR0_WVALID_MASK BIT(6)
#define GLBL2_AXIMM_WR0_WREADY_MASK BIT(5)
#define GLBL2_AXIMM_WR0_AWID_MASK GENMASK(4, 2)
#define GLBL2_AXIMM_WR0_AWVALID_MASK BIT(1)
#define GLBL2_AXIMM_WR0_AWREADY_MASK BIT(0)
#define EQDMA_GLBL2_DBG_AXIMM_WR1_ADDR 0x1C4
#define GLBL2_AXIMM_WR1_RSVD_1_MASK GENMASK(31, 30)
#define GLBL2_AXIMM_WR1_BRSP_CNT4_MASK GENMASK(29, 24)
#define GLBL2_AXIMM_WR1_BRSP_CNT3_MASK GENMASK(23, 18)
#define GLBL2_AXIMM_WR1_BRSP_CNT2_MASK GENMASK(17, 12)
#define GLBL2_AXIMM_WR1_BRSP_CNT1_MASK GENMASK(11, 6)
#define GLBL2_AXIMM_WR1_BRSP_CNT0_MASK GENMASK(5, 0)
#define EQDMA_GLBL2_DBG_AXIMM_RD0_ADDR 0x1C8
#define GLBL2_AXIMM_RD0_RSVD_1_MASK GENMASK(31, 23)
#define GLBL2_AXIMM_RD0_PND_CNT_MASK GENMASK(22, 17)
#define GLBL2_AXIMM_RD0_RD_REQ_MASK BIT(16)
#define GLBL2_AXIMM_RD0_RD_CHNL_MASK GENMASK(15, 13)
#define GLBL2_AXIMM_RD0_RRSP_CLAIM_CHNL_MASK GENMASK(12, 10)
#define GLBL2_AXIMM_RD0_RID_MASK GENMASK(9, 7)
#define GLBL2_AXIMM_RD0_RVALID_MASK BIT(6)
#define GLBL2_AXIMM_RD0_RREADY_MASK BIT(5)
#define GLBL2_AXIMM_RD0_ARID_MASK GENMASK(4, 2)
#define GLBL2_AXIMM_RD0_ARVALID_MASK BIT(1)
#define GLBL2_AXIMM_RD0_ARREADY_MASK BIT(0)
#define EQDMA_GLBL2_DBG_AXIMM_RD1_ADDR 0x1CC
#define GLBL2_AXIMM_RD1_RSVD_1_MASK GENMASK(31, 30)
#define GLBL2_AXIMM_RD1_RRSP_CNT4_MASK GENMASK(29, 24)
#define GLBL2_AXIMM_RD1_RRSP_CNT3_MASK GENMASK(23, 18)
#define GLBL2_AXIMM_RD1_RRSP_CNT2_MASK GENMASK(17, 12)
#define GLBL2_AXIMM_RD1_RRSP_CNT1_MASK GENMASK(11, 6)
#define GLBL2_AXIMM_RD1_RRSP_CNT0_MASK GENMASK(5, 0)
#define EQDMA_GLBL2_DBG_FAB0_ADDR 0x1D0
#define GLBL2_FAB0_H2C_INB_CONV_IN_VLD_MASK BIT(31)
#define GLBL2_FAB0_H2C_INB_CONV_IN_RDY_MASK BIT(30)
#define GLBL2_FAB0_H2C_SEG_IN_VLD_MASK BIT(29)
#define GLBL2_FAB0_H2C_SEG_IN_RDY_MASK BIT(28)
#define GLBL2_FAB0_H2C_SEG_OUT_VLD_MASK GENMASK(27, 24)
#define GLBL2_FAB0_H2C_SEG_OUT_RDY_MASK BIT(23)
#define GLBL2_FAB0_H2C_MST_CRDT_STAT_MASK GENMASK(22, 16)
#define GLBL2_FAB0_C2H_SLV_AFIFO_FULL_MASK BIT(15)
#define GLBL2_FAB0_C2H_SLV_AFIFO_EMPTY_MASK BIT(14)
#define GLBL2_FAB0_C2H_DESEG_SEG_VLD_MASK GENMASK(13, 10)
#define GLBL2_FAB0_C2H_DESEG_SEG_RDY_MASK BIT(9)
#define GLBL2_FAB0_C2H_DESEG_OUT_VLD_MASK BIT(8)
#define GLBL2_FAB0_C2H_DESEG_OUT_RDY_MASK BIT(7)
#define GLBL2_FAB0_C2H_INB_DECONV_OUT_VLD_MASK BIT(6)
#define GLBL2_FAB0_C2H_INB_DECONV_OUT_RDY_MASK BIT(5)
#define GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_FULL_MASK BIT(4)
#define GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_EMPTY_MASK BIT(3)
#define GLBL2_FAB0_IRQ_IN_AFIFO_FULL_MASK BIT(2)
#define GLBL2_FAB0_IRQ_IN_AFIFO_EMPTY_MASK BIT(1)
#define GLBL2_FAB0_IMM_CRD_AFIFO_EMPTY_MASK BIT(0)
#define EQDMA_GLBL2_DBG_FAB1_ADDR 0x1D4
#define GLBL2_FAB1_BYP_OUT_CRDT_STAT_MASK GENMASK(31, 25)
#define GLBL2_FAB1_TM_DSC_STS_CRDT_STAT_MASK GENMASK(24, 18)
#define GLBL2_FAB1_C2H_CMN_AFIFO_FULL_MASK BIT(17)
#define GLBL2_FAB1_C2H_CMN_AFIFO_EMPTY_MASK BIT(16)
#define GLBL2_FAB1_RSVD_1_MASK GENMASK(15, 13)
#define GLBL2_FAB1_C2H_BYP_IN_AFIFO_FULL_MASK BIT(12)
#define GLBL2_FAB1_RSVD_2_MASK GENMASK(11, 9)
#define GLBL2_FAB1_C2H_BYP_IN_AFIFO_EMPTY_MASK BIT(8)
#define GLBL2_FAB1_RSVD_3_MASK GENMASK(7, 5)
#define GLBL2_FAB1_H2C_BYP_IN_AFIFO_FULL_MASK BIT(4)
#define GLBL2_FAB1_RSVD_4_MASK GENMASK(3, 1)
#define GLBL2_FAB1_H2C_BYP_IN_AFIFO_EMPTY_MASK BIT(0)
#define EQDMA_GLBL2_DBG_MATCH_SEL_ADDR 0x1F4
#define GLBL2_MATCH_SEL_RSV_MASK GENMASK(31, 18)
#define GLBL2_MATCH_SEL_CSR_SEL_MASK GENMASK(17, 13)
#define GLBL2_MATCH_SEL_CSR_EN_MASK BIT(12)
#define GLBL2_MATCH_SEL_ROTATE1_MASK GENMASK(11, 10)
#define GLBL2_MATCH_SEL_ROTATE0_MASK GENMASK(9, 8)
#define GLBL2_MATCH_SEL_SEL_MASK GENMASK(7, 0)
#define EQDMA_GLBL2_DBG_MATCH_MSK_ADDR 0x1F8
#define GLBL2_MATCH_MSK_MASK GENMASK(31, 0)
#define EQDMA_GLBL2_DBG_MATCH_PAT_ADDR 0x1FC
#define GLBL2_MATCH_PAT_PATTERN_MASK GENMASK(31, 0)
#define EQDMA_GLBL_RNG_SZ_1_ADDR 0x204
#define GLBL_RNG_SZ_1_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_1_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_2_ADDR 0x208
#define GLBL_RNG_SZ_2_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_2_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_3_ADDR 0x20C
#define GLBL_RNG_SZ_3_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_3_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_4_ADDR 0x210
#define GLBL_RNG_SZ_4_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_4_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_5_ADDR 0x214
#define GLBL_RNG_SZ_5_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_5_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_6_ADDR 0x218
#define GLBL_RNG_SZ_6_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_6_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_7_ADDR 0x21C
#define GLBL_RNG_SZ_7_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_7_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_8_ADDR 0x220
#define GLBL_RNG_SZ_8_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_8_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_9_ADDR 0x224
#define GLBL_RNG_SZ_9_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_9_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_A_ADDR 0x228
#define GLBL_RNG_SZ_A_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_A_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_B_ADDR 0x22C
#define GLBL_RNG_SZ_B_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_B_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_C_ADDR 0x230
#define GLBL_RNG_SZ_C_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_C_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_D_ADDR 0x234
#define GLBL_RNG_SZ_D_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_D_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_E_ADDR 0x238
#define GLBL_RNG_SZ_E_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_E_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_F_ADDR 0x23C
#define GLBL_RNG_SZ_F_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_F_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_RNG_SZ_10_ADDR 0x240
#define GLBL_RNG_SZ_10_RSVD_1_MASK GENMASK(31, 16)
#define GLBL_RNG_SZ_10_RING_SIZE_MASK GENMASK(15, 0)
#define EQDMA_GLBL_ERR_STAT_ADDR 0x248
#define GLBL_ERR_STAT_RSVD_1_MASK GENMASK(31, 18)
#define GLBL_ERR_STAT_ERR_FAB_MASK BIT(17)
#define GLBL_ERR_STAT_ERR_H2C_ST_MASK BIT(16)
#define GLBL_ERR_STAT_ERR_BDG_MASK BIT(15)
#define GLBL_ERR_STAT_IND_CTXT_CMD_ERR_MASK GENMASK(14, 9)
#define GLBL_ERR_STAT_ERR_C2H_ST_MASK BIT(8)
#define GLBL_ERR_STAT_ERR_C2H_MM_1_MASK BIT(7)
#define GLBL_ERR_STAT_ERR_C2H_MM_0_MASK BIT(6)
#define GLBL_ERR_STAT_ERR_H2C_MM_1_MASK BIT(5)
#define GLBL_ERR_STAT_ERR_H2C_MM_0_MASK BIT(4)
#define GLBL_ERR_STAT_ERR_TRQ_MASK BIT(3)
#define GLBL_ERR_STAT_ERR_DSC_MASK BIT(2)
#define GLBL_ERR_STAT_ERR_RAM_DBE_MASK BIT(1)
#define GLBL_ERR_STAT_ERR_RAM_SBE_MASK BIT(0)
#define EQDMA_GLBL_ERR_MASK_ADDR 0x24C
#define GLBL_ERR_MASK GENMASK(31, 0)
#define EQDMA_GLBL_DSC_CFG_ADDR 0x250
#define GLBL_DSC_CFG_RSVD_1_MASK GENMASK(31, 10)
#define GLBL_DSC_CFG_UNC_OVR_COR_MASK BIT(9)
#define GLBL_DSC_CFG_CTXT_FER_DIS_MASK BIT(8)
#define GLBL_DSC_CFG_RSVD_2_MASK GENMASK(7, 6)
#define GLBL_DSC_CFG_MAXFETCH_MASK GENMASK(5, 3)
#define GLBL_DSC_CFG_WB_ACC_INT_MASK GENMASK(2, 0)
#define EQDMA_GLBL_DSC_ERR_STS_ADDR 0x254
#define GLBL_DSC_ERR_STS_RSVD_1_MASK GENMASK(31, 26)
#define GLBL_DSC_ERR_STS_PORT_ID_MASK BIT(25)
#define GLBL_DSC_ERR_STS_SBE_MASK BIT(24)
#define GLBL_DSC_ERR_STS_DBE_MASK BIT(23)
#define GLBL_DSC_ERR_STS_RQ_CANCEL_MASK BIT(22)
#define GLBL_DSC_ERR_STS_DSC_MASK BIT(21)
#define GLBL_DSC_ERR_STS_DMA_MASK BIT(20)
#define GLBL_DSC_ERR_STS_FLR_CANCEL_MASK BIT(19)
#define GLBL_DSC_ERR_STS_RSVD_2_MASK GENMASK(18, 17)
#define GLBL_DSC_ERR_STS_DAT_POISON_MASK BIT(16)
#define GLBL_DSC_ERR_STS_TIMEOUT_MASK BIT(9)
#define GLBL_DSC_ERR_STS_FLR_MASK BIT(8)
#define GLBL_DSC_ERR_STS_TAG_MASK BIT(6)
#define GLBL_DSC_ERR_STS_ADDR_MASK BIT(5)
#define GLBL_DSC_ERR_STS_PARAM_MASK BIT(4)
#define GLBL_DSC_ERR_STS_BCNT_MASK BIT(3)
#define GLBL_DSC_ERR_STS_UR_CA_MASK BIT(2)
#define GLBL_DSC_ERR_STS_POISON_MASK BIT(1)
#define EQDMA_GLBL_DSC_ERR_MSK_ADDR 0x258
#define GLBL_DSC_ERR_MSK_MASK GENMASK(31, 0)
#define EQDMA_GLBL_DSC_ERR_LOG0_ADDR 0x25C
#define GLBL_DSC_ERR_LOG0_VALID_MASK BIT(31)
#define GLBL_DSC_ERR_LOG0_SEL_MASK BIT(30)
#define GLBL_DSC_ERR_LOG0_RSVD_1_MASK GENMASK(29, 13)
#define GLBL_DSC_ERR_LOG0_QID_MASK GENMASK(12, 0)
#define EQDMA_GLBL_DSC_ERR_LOG1_ADDR 0x260
#define GLBL_DSC_ERR_LOG1_RSVD_1_MASK GENMASK(31, 28)
#define GLBL_DSC_ERR_LOG1_CIDX_MASK GENMASK(27, 12)
#define GLBL_DSC_ERR_LOG1_RSVD_2_MASK GENMASK(11, 9)
#define GLBL_DSC_ERR_LOG1_SUB_TYPE_MASK GENMASK(8, 5)
#define GLBL_DSC_ERR_LOG1_ERR_TYPE_MASK GENMASK(4, 0)
#define EQDMA_GLBL_TRQ_ERR_STS_ADDR 0x264
#define GLBL_TRQ_ERR_STS_RSVD_1_MASK GENMASK(31, 8)
#define GLBL_TRQ_ERR_STS_TCP_QSPC_TIMEOUT_MASK BIT(7)
#define GLBL_TRQ_ERR_STS_RSVD_2_MASK BIT(6)
#define GLBL_TRQ_ERR_STS_QID_RANGE_MASK BIT(5)
#define GLBL_TRQ_ERR_STS_QSPC_UNMAPPED_MASK BIT(4)
#define GLBL_TRQ_ERR_STS_TCP_CSR_TIMEOUT_MASK BIT(3)
#define GLBL_TRQ_ERR_STS_RSVD_3_MASK BIT(2)
#define GLBL_TRQ_ERR_STS_VF_ACCESS_ERR_MASK BIT(1)
#define GLBL_TRQ_ERR_STS_CSR_UNMAPPED_MASK BIT(0)
#define EQDMA_GLBL_TRQ_ERR_MSK_ADDR 0x268
#define GLBL_TRQ_ERR_MSK_MASK GENMASK(31, 0)
#define EQDMA_GLBL_TRQ_ERR_LOG_ADDR 0x26C
#define GLBL_TRQ_ERR_LOG_SRC_MASK BIT(31)
#define GLBL_TRQ_ERR_LOG_TARGET_MASK GENMASK(30, 27)
#define GLBL_TRQ_ERR_LOG_FUNC_MASK GENMASK(26, 17)
#define GLBL_TRQ_ERR_LOG_ADDRESS_MASK GENMASK(16, 0)
#define EQDMA_GLBL_DSC_DBG_DAT0_ADDR 0x270
#define GLBL_DSC_DAT0_RSVD_1_MASK GENMASK(31, 30)
#define GLBL_DSC_DAT0_CTXT_ARB_DIR_MASK BIT(29)
#define GLBL_DSC_DAT0_CTXT_ARB_QID_MASK GENMASK(28, 17)
#define GLBL_DSC_DAT0_CTXT_ARB_REQ_MASK GENMASK(16, 12)
#define GLBL_DSC_DAT0_IRQ_FIFO_FL_MASK BIT(11)
#define GLBL_DSC_DAT0_TMSTALL_MASK BIT(10)
#define GLBL_DSC_DAT0_RRQ_STALL_MASK GENMASK(9, 8)
#define GLBL_DSC_DAT0_RCP_FIFO_SPC_STALL_MASK GENMASK(7, 6)
#define GLBL_DSC_DAT0_RRQ_FIFO_SPC_STALL_MASK GENMASK(5, 4)
#define GLBL_DSC_DAT0_FAB_MRKR_RSP_STALL_MASK GENMASK(3, 2)
#define GLBL_DSC_DAT0_DSC_OUT_STALL_MASK GENMASK(1, 0)
#define EQDMA_GLBL_DSC_DBG_DAT1_ADDR 0x274
#define GLBL_DSC_DAT1_RSVD_1_MASK GENMASK(31, 28)
#define GLBL_DSC_DAT1_EVT_SPC_C2H_MASK GENMASK(27, 22)
#define GLBL_DSC_DAT1_EVT_SP_H2C_MASK GENMASK(21, 16)
#define GLBL_DSC_DAT1_DSC_SPC_C2H_MASK GENMASK(15, 8)
#define GLBL_DSC_DAT1_DSC_SPC_H2C_MASK GENMASK(7, 0)
#define EQDMA_GLBL_DSC_DBG_CTL_ADDR 0x278
#define GLBL_DSC_CTL_RSVD_1_MASK GENMASK(31, 3)
#define GLBL_DSC_CTL_SELECT_MASK GENMASK(2, 0)
#define EQDMA_GLBL_DSC_ERR_LOG2_ADDR 0x27c
#define GLBL_DSC_ERR_LOG2_OLD_PIDX_MASK GENMASK(31, 16)
#define GLBL_DSC_ERR_LOG2_NEW_PIDX_MASK GENMASK(15, 0)
#define EQDMA_GLBL_GLBL_INTERRUPT_CFG_ADDR 0x2c4
#define GLBL_GLBL_INTERRUPT_CFG_RSVD_1_MASK GENMASK(31, 2)
#define GLBL_GLBL_INTERRUPT_CFG_LGCY_INTR_PENDING_MASK BIT(1)
#define GLBL_GLBL_INTERRUPT_CFG_EN_LGCY_INTR_MASK BIT(0)
#define EQDMA_GLBL_VCH_HOST_PROFILE_ADDR 0x2c8
#define GLBL_VCH_HOST_PROFILE_RSVD_1_MASK GENMASK(31, 28)
#define GLBL_VCH_HOST_PROFILE_2C_MM_MASK GENMASK(27, 24)
#define GLBL_VCH_HOST_PROFILE_2C_ST_MASK GENMASK(23, 20)
#define GLBL_VCH_HOST_PROFILE_VCH_DSC_MASK GENMASK(19, 16)
#define GLBL_VCH_HOST_PROFILE_VCH_INT_MSG_MASK GENMASK(15, 12)
#define GLBL_VCH_HOST_PROFILE_VCH_INT_AGGR_MASK GENMASK(11, 8)
#define GLBL_VCH_HOST_PROFILE_VCH_CMPT_MASK GENMASK(7, 4)
#define GLBL_VCH_HOST_PROFILE_VCH_C2H_PLD_MASK GENMASK(3, 0)
#define EQDMA_GLBL_BRIDGE_HOST_PROFILE_ADDR 0x308
#define GLBL_BRIDGE_HOST_PROFILE_RSVD_1_MASK GENMASK(31, 4)
#define GLBL_BRIDGE_HOST_PROFILE_BDGID_MASK GENMASK(3, 0)
#define EQDMA_AXIMM_IRQ_DEST_ADDR_ADDR 0x30c
#define AXIMM_IRQ_DEST_ADDR_ADDR_MASK GENMASK(31, 0)
#define EQDMA_FAB_ERR_LOG_ADDR 0x314
#define FAB_ERR_LOG_RSVD_1_MASK GENMASK(31, 7)
#define FAB_ERR_LOG_SRC_MASK GENMASK(6, 0)
#define EQDMA_GLBL_REQ_ERR_STS_ADDR 0x318
#define GLBL_REQ_ERR_STS_RSVD_1_MASK GENMASK(31, 11)
#define GLBL_REQ_ERR_STS_RC_DISCONTINUE_MASK BIT(10)
#define GLBL_REQ_ERR_STS_RC_PRTY_MASK BIT(9)
#define GLBL_REQ_ERR_STS_RC_FLR_MASK BIT(8)
#define GLBL_REQ_ERR_STS_RC_TIMEOUT_MASK BIT(7)
#define GLBL_REQ_ERR_STS_RC_INV_BCNT_MASK BIT(6)
#define GLBL_REQ_ERR_STS_RC_INV_TAG_MASK BIT(5)
#define GLBL_REQ_ERR_STS_RC_START_ADDR_MISMCH_MASK BIT(4)
#define GLBL_REQ_ERR_STS_RC_RID_TC_ATTR_MISMCH_MASK BIT(3)
#define GLBL_REQ_ERR_STS_RC_NO_DATA_MASK BIT(2)
#define GLBL_REQ_ERR_STS_RC_UR_CA_CRS_MASK BIT(1)
#define GLBL_REQ_ERR_STS_RC_POISONED_MASK BIT(0)
#define EQDMA_GLBL_REQ_ERR_MSK_ADDR 0x31C
#define GLBL_REQ_ERR_MSK_MASK GENMASK(31, 0)
#define EQDMA_IND_CTXT_DATA_ADDR 0x804
#define IND_CTXT_DATA_DATA_MASK GENMASK(31, 0)
#define EQDMA_IND_CTXT_MASK_ADDR 0x824
#define IND_CTXT_MASK GENMASK(31, 0)
#define EQDMA_IND_CTXT_CMD_ADDR 0x844
#define IND_CTXT_CMD_RSVD_1_MASK GENMASK(31, 20)
#define IND_CTXT_CMD_QID_MASK GENMASK(19, 7)
#define IND_CTXT_CMD_OP_MASK GENMASK(6, 5)
#define IND_CTXT_CMD_SEL_MASK GENMASK(4, 1)
#define IND_CTXT_CMD_BUSY_MASK BIT(0)
#define EQDMA_C2H_TIMER_CNT_ADDR 0xA00
#define C2H_TIMER_CNT_RSVD_1_MASK GENMASK(31, 16)
#define C2H_TIMER_CNT_MASK GENMASK(15, 0)
#define EQDMA_C2H_CNT_TH_ADDR 0xA40
#define C2H_CNT_TH_RSVD_1_MASK GENMASK(31, 16)
#define C2H_CNT_TH_THESHOLD_CNT_MASK GENMASK(15, 0)
#define EQDMA_C2H_STAT_S_AXIS_C2H_ACCEPTED_ADDR 0xA88
#define C2H_STAT_S_AXIS_C2H_ACCEPTED_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_S_AXIS_WRB_ACCEPTED_ADDR 0xA8C
#define C2H_STAT_S_AXIS_WRB_ACCEPTED_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_PKT_ACCEPTED_ADDR 0xA90
#define C2H_STAT_DESC_RSP_PKT_ACCEPTED_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_AXIS_PKG_CMP_ADDR 0xA94
#define C2H_STAT_AXIS_PKG_CMP_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_ACCEPTED_ADDR 0xA98
#define C2H_STAT_DESC_RSP_ACCEPTED_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_CMP_ADDR 0xA9C
#define C2H_STAT_DESC_RSP_CMP_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_WRQ_OUT_ADDR 0xAA0
#define C2H_STAT_WRQ_OUT_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_WPL_REN_ACCEPTED_ADDR 0xAA4
#define C2H_STAT_WPL_REN_ACCEPTED_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_TOTAL_WRQ_LEN_ADDR 0xAA8
#define C2H_STAT_TOTAL_WRQ_LEN_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_TOTAL_WPL_LEN_ADDR 0xAAC
#define C2H_STAT_TOTAL_WPL_LEN_MASK GENMASK(31, 0)
#define EQDMA_C2H_BUF_SZ_ADDR 0xAB0
#define C2H_BUF_SZ_IZE_MASK GENMASK(31, 0)
#define EQDMA_C2H_ERR_STAT_ADDR 0xAF0
#define C2H_ERR_STAT_RSVD_1_MASK GENMASK(31, 21)
#define C2H_ERR_STAT_WRB_PORT_ID_ERR_MASK BIT(20)
#define C2H_ERR_STAT_HDR_PAR_ERR_MASK BIT(19)
#define C2H_ERR_STAT_HDR_ECC_COR_ERR_MASK BIT(18)
#define C2H_ERR_STAT_HDR_ECC_UNC_ERR_MASK BIT(17)
#define C2H_ERR_STAT_AVL_RING_DSC_ERR_MASK BIT(16)
#define C2H_ERR_STAT_WRB_PRTY_ERR_MASK BIT(15)
#define C2H_ERR_STAT_WRB_CIDX_ERR_MASK BIT(14)
#define C2H_ERR_STAT_WRB_QFULL_ERR_MASK BIT(13)
#define C2H_ERR_STAT_WRB_INV_Q_ERR_MASK BIT(12)
#define C2H_ERR_STAT_RSVD_2_MASK BIT(11)
#define C2H_ERR_STAT_PORT_ID_CTXT_MISMATCH_MASK BIT(10)
#define C2H_ERR_STAT_ERR_DESC_CNT_MASK BIT(9)
#define C2H_ERR_STAT_RSVD_3_MASK BIT(8)
#define C2H_ERR_STAT_MSI_INT_FAIL_MASK BIT(7)
#define C2H_ERR_STAT_ENG_WPL_DATA_PAR_ERR_MASK BIT(6)
#define C2H_ERR_STAT_RSVD_4_MASK BIT(5)
#define C2H_ERR_STAT_DESC_RSP_ERR_MASK BIT(4)
#define C2H_ERR_STAT_QID_MISMATCH_MASK BIT(3)
#define C2H_ERR_STAT_SH_CMPT_DSC_ERR_MASK BIT(2)
#define C2H_ERR_STAT_LEN_MISMATCH_MASK BIT(1)
#define C2H_ERR_STAT_MTY_MISMATCH_MASK BIT(0)
#define EQDMA_C2H_ERR_MASK_ADDR 0xAF4
#define C2H_ERR_EN_MASK GENMASK(31, 0)
#define EQDMA_C2H_FATAL_ERR_STAT_ADDR 0xAF8
#define C2H_FATAL_ERR_STAT_RSVD_1_MASK GENMASK(31, 21)
#define C2H_FATAL_ERR_STAT_HDR_ECC_UNC_ERR_MASK BIT(20)
#define C2H_FATAL_ERR_STAT_AVL_RING_FIFO_RAM_RDBE_MASK BIT(19)
#define C2H_FATAL_ERR_STAT_WPL_DATA_PAR_ERR_MASK BIT(18)
#define C2H_FATAL_ERR_STAT_PLD_FIFO_RAM_RDBE_MASK BIT(17)
#define C2H_FATAL_ERR_STAT_QID_FIFO_RAM_RDBE_MASK BIT(16)
#define C2H_FATAL_ERR_STAT_CMPT_FIFO_RAM_RDBE_MASK BIT(15)
#define C2H_FATAL_ERR_STAT_WRB_COAL_DATA_RAM_RDBE_MASK BIT(14)
#define C2H_FATAL_ERR_STAT_RESERVED2_MASK BIT(13)
#define C2H_FATAL_ERR_STAT_INT_CTXT_RAM_RDBE_MASK BIT(12)
#define C2H_FATAL_ERR_STAT_DESC_REQ_FIFO_RAM_RDBE_MASK BIT(11)
#define C2H_FATAL_ERR_STAT_PFCH_CTXT_RAM_RDBE_MASK BIT(10)
#define C2H_FATAL_ERR_STAT_WRB_CTXT_RAM_RDBE_MASK BIT(9)
#define C2H_FATAL_ERR_STAT_PFCH_LL_RAM_RDBE_MASK BIT(8)
#define C2H_FATAL_ERR_STAT_TIMER_FIFO_RAM_RDBE_MASK GENMASK(7, 4)
#define C2H_FATAL_ERR_STAT_QID_MISMATCH_MASK BIT(3)
#define C2H_FATAL_ERR_STAT_RESERVED1_MASK BIT(2)
#define C2H_FATAL_ERR_STAT_LEN_MISMATCH_MASK BIT(1)
#define C2H_FATAL_ERR_STAT_MTY_MISMATCH_MASK BIT(0)
#define EQDMA_C2H_FATAL_ERR_MASK_ADDR 0xAFC
#define C2H_FATAL_ERR_C2HEN_MASK GENMASK(31, 0)
#define EQDMA_C2H_FATAL_ERR_ENABLE_ADDR 0xB00
#define C2H_FATAL_ERR_ENABLE_RSVD_1_MASK GENMASK(31, 2)
#define C2H_FATAL_ERR_ENABLE_WPL_PAR_INV_MASK BIT(1)
#define C2H_FATAL_ERR_ENABLE_WRQ_DIS_MASK BIT(0)
#define EQDMA_GLBL_ERR_INT_ADDR 0xB04
#define GLBL_ERR_INT_RSVD_1_MASK GENMASK(31, 30)
#define GLBL_ERR_INT_HOST_ID_MASK GENMASK(29, 26)
#define GLBL_ERR_INT_DIS_INTR_ON_VF_MASK BIT(25)
#define GLBL_ERR_INT_ARM_MASK BIT(24)
#define GLBL_ERR_INT_EN_COAL_MASK BIT(23)
#define GLBL_ERR_INT_VEC_MASK GENMASK(22, 12)
#define GLBL_ERR_INT_FUNC_MASK GENMASK(11, 0)
#define EQDMA_C2H_PFCH_CFG_ADDR 0xB08
#define C2H_PFCH_CFG_EVTFL_TH_MASK GENMASK(31, 16)
#define C2H_PFCH_CFG_FL_TH_MASK GENMASK(15, 0)
#define EQDMA_C2H_PFCH_CFG_1_ADDR 0xA80
#define C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK GENMASK(31, 16)
#define C2H_PFCH_CFG_1_QCNT_MASK GENMASK(15, 0)
#define EQDMA_C2H_PFCH_CFG_2_ADDR 0xA84
#define C2H_PFCH_CFG_2_FENCE_MASK BIT(31)
#define C2H_PFCH_CFG_2_RSVD_MASK GENMASK(30, 29)
#define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK BIT(28)
#define C2H_PFCH_CFG_2_LL_SZ_TH_MASK GENMASK(27, 12)
#define C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK GENMASK(11, 6)
#define C2H_PFCH_CFG_2_NUM_MASK GENMASK(5, 0)
#define EQDMA_C2H_INT_TIMER_TICK_ADDR 0xB0C
#define C2H_INT_TIMER_TICK_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_DROP_ACCEPTED_ADDR 0xB10
#define C2H_STAT_DESC_RSP_DROP_ACCEPTED_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_RSP_ERR_ACCEPTED_ADDR 0xB14
#define C2H_STAT_DESC_RSP_ERR_ACCEPTED_D_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DESC_REQ_ADDR 0xB18
#define C2H_STAT_DESC_REQ_MASK GENMASK(31, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_0_ADDR 0xB1C
#define C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TVALID_MASK BIT(31)
#define C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TREADY_MASK BIT(30)
#define C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TVALID_MASK GENMASK(29, 27)
#define C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TREADY_MASK GENMASK(26, 24)
#define C2H_STAT_DMA_ENG_0_PLD_FIFO_IN_RDY_MASK BIT(23)
#define C2H_STAT_DMA_ENG_0_QID_FIFO_IN_RDY_MASK BIT(22)
#define C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_VLD_MASK BIT(21)
#define C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_QID_MASK GENMASK(20, 9)
#define C2H_STAT_DMA_ENG_0_WRB_FIFO_IN_RDY_MASK BIT(8)
#define C2H_STAT_DMA_ENG_0_WRB_FIFO_OUT_CNT_MASK GENMASK(7, 5)
#define C2H_STAT_DMA_ENG_0_WRB_SM_CS_MASK BIT(4)
#define C2H_STAT_DMA_ENG_0_MAIN_SM_CS_MASK GENMASK(3, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_1_ADDR 0xB20
#define C2H_STAT_DMA_ENG_1_RSVD_1_MASK GENMASK(31, 29)
#define C2H_STAT_DMA_ENG_1_QID_FIFO_OUT_CNT_MASK GENMASK(28, 18)
#define C2H_STAT_DMA_ENG_1_PLD_FIFO_OUT_CNT_MASK GENMASK(17, 7)
#define C2H_STAT_DMA_ENG_1_PLD_ST_FIFO_CNT_MASK GENMASK(6, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_2_ADDR 0xB24
#define C2H_STAT_DMA_ENG_2_RSVD_1_MASK GENMASK(31, 29)
#define C2H_STAT_DMA_ENG_2_QID_FIFO_OUT_CNT_MASK GENMASK(28, 18)
#define C2H_STAT_DMA_ENG_2_PLD_FIFO_OUT_CNT_MASK GENMASK(17, 7)
#define C2H_STAT_DMA_ENG_2_PLD_ST_FIFO_CNT_MASK GENMASK(6, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_3_ADDR 0xB28
#define C2H_STAT_DMA_ENG_3_RSVD_1_MASK GENMASK(31, 24)
#define C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_CNT_MASK GENMASK(23, 19)
#define C2H_STAT_DMA_ENG_3_QID_FIFO_OUT_VLD_MASK BIT(18)
#define C2H_STAT_DMA_ENG_3_PLD_FIFO_OUT_VLD_MASK BIT(17)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_VLD_MASK BIT(16)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_EOP_MASK BIT(15)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE_MASK BIT(14)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_DROP_MASK BIT(13)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_ERR_MASK BIT(12)
#define C2H_STAT_DMA_ENG_3_DESC_CNT_FIFO_IN_RDY_MASK BIT(11)
#define C2H_STAT_DMA_ENG_3_DESC_RSP_FIFO_IN_RDY_MASK BIT(10)
#define C2H_STAT_DMA_ENG_3_PLD_PKT_ID_LARGER_0_MASK BIT(9)
#define C2H_STAT_DMA_ENG_3_WRQ_VLD_MASK BIT(8)
#define C2H_STAT_DMA_ENG_3_WRQ_RDY_MASK BIT(7)
#define C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_RDY_MASK BIT(6)
#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_DROP_MASK BIT(5)
#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_ERR_MASK BIT(4)
#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_MARKER_MASK BIT(3)
#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_PRE_EOR_MASK BIT(2)
#define C2H_STAT_DMA_ENG_3_WCP_FIFO_IN_RDY_MASK BIT(1)
#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_IN_RDY_MASK BIT(0)
#define EQDMA_C2H_DBG_PFCH_ERR_CTXT_ADDR 0xB2C
#define C2H_PFCH_ERR_CTXT_RSVD_1_MASK GENMASK(31, 14)
#define C2H_PFCH_ERR_CTXT_ERR_STAT_MASK BIT(13)
#define C2H_PFCH_ERR_CTXT_CMD_WR_MASK BIT(12)
#define C2H_PFCH_ERR_CTXT_QID_MASK GENMASK(11, 1)
#define C2H_PFCH_ERR_CTXT_DONE_MASK BIT(0)
#define EQDMA_C2H_FIRST_ERR_QID_ADDR 0xB30
#define C2H_FIRST_ERR_QID_RSVD_1_MASK GENMASK(31, 21)
#define C2H_FIRST_ERR_QID_ERR_TYPE_MASK GENMASK(20, 16)
#define C2H_FIRST_ERR_QID_RSVD_MASK GENMASK(15, 13)
#define C2H_FIRST_ERR_QID_QID_MASK GENMASK(12, 0)
#define EQDMA_STAT_NUM_WRB_IN_ADDR 0xB34
#define STAT_NUM_WRB_IN_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_WRB_IN_WRB_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_WRB_OUT_ADDR 0xB38
#define STAT_NUM_WRB_OUT_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_WRB_OUT_WRB_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_WRB_DRP_ADDR 0xB3C
#define STAT_NUM_WRB_DRP_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_WRB_DRP_WRB_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_STAT_DESC_OUT_ADDR 0xB40
#define STAT_NUM_STAT_DESC_OUT_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_STAT_DESC_OUT_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_DSC_CRDT_SENT_ADDR 0xB44
#define STAT_NUM_DSC_CRDT_SENT_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_DSC_CRDT_SENT_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_FCH_DSC_RCVD_ADDR 0xB48
#define STAT_NUM_FCH_DSC_RCVD_RSVD_1_MASK GENMASK(31, 16)
#define STAT_NUM_FCH_DSC_RCVD_DSC_CNT_MASK GENMASK(15, 0)
#define EQDMA_STAT_NUM_BYP_DSC_RCVD_ADDR 0xB4C
#define STAT_NUM_BYP_DSC_RCVD_RSVD_1_MASK GENMASK(31, 11)
#define STAT_NUM_BYP_DSC_RCVD_DSC_CNT_MASK GENMASK(10, 0)
#define EQDMA_C2H_WRB_COAL_CFG_ADDR 0xB50
#define C2H_WRB_COAL_CFG_MAX_BUF_SZ_MASK GENMASK(31, 26)
#define C2H_WRB_COAL_CFG_TICK_VAL_MASK GENMASK(25, 14)
#define C2H_WRB_COAL_CFG_TICK_CNT_MASK GENMASK(13, 2)
#define C2H_WRB_COAL_CFG_SET_GLB_FLUSH_MASK BIT(1)
#define C2H_WRB_COAL_CFG_DONE_GLB_FLUSH_MASK BIT(0)
#define EQDMA_C2H_INTR_H2C_REQ_ADDR 0xB54
#define C2H_INTR_H2C_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_MM_REQ_ADDR 0xB58
#define C2H_INTR_C2H_MM_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_MM_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_ERR_INT_REQ_ADDR 0xB5C
#define C2H_INTR_ERR_INT_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_ERR_INT_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_REQ_ADDR 0xB60
#define C2H_INTR_C2H_ST_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_ADDR 0xB64
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_ADDR 0xB68
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_ADDR 0xB6C
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_ADDR 0xB70
#define C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_MSIX_ACK_ADDR 0xB74
#define C2H_INTR_C2H_ST_MSIX_ACK_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_MSIX_ACK_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_MSIX_FAIL_ADDR 0xB78
#define C2H_INTR_C2H_ST_MSIX_FAIL_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_MSIX_FAIL_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_NO_MSIX_ADDR 0xB7C
#define C2H_INTR_C2H_ST_NO_MSIX_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_NO_MSIX_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_C2H_ST_CTXT_INVAL_ADDR 0xB80
#define C2H_INTR_C2H_ST_CTXT_INVAL_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_C2H_ST_CTXT_INVAL_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_WR_CMP_ADDR 0xB84
#define C2H_STAT_WR_CMP_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_WR_CMP_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_4_ADDR 0xB88
#define C2H_STAT_DMA_ENG_4_RSVD_1_MASK GENMASK(31, 24)
#define C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_CNT_MASK GENMASK(23, 19)
#define C2H_STAT_DMA_ENG_4_QID_FIFO_OUT_VLD_MASK BIT(18)
#define C2H_STAT_DMA_ENG_4_PLD_FIFO_OUT_VLD_MASK BIT(17)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_VLD_MASK BIT(16)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_EOP_MASK BIT(15)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE_MASK BIT(14)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_DROP_MASK BIT(13)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_ERR_MASK BIT(12)
#define C2H_STAT_DMA_ENG_4_DESC_CNT_FIFO_IN_RDY_MASK BIT(11)
#define C2H_STAT_DMA_ENG_4_DESC_RSP_FIFO_IN_RDY_MASK BIT(10)
#define C2H_STAT_DMA_ENG_4_PLD_PKT_ID_LARGER_0_MASK BIT(9)
#define C2H_STAT_DMA_ENG_4_WRQ_VLD_MASK BIT(8)
#define C2H_STAT_DMA_ENG_4_WRQ_RDY_MASK BIT(7)
#define C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_RDY_MASK BIT(6)
#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_DROP_MASK BIT(5)
#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_ERR_MASK BIT(4)
#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_MARKER_MASK BIT(3)
#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_PRE_EOR_MASK BIT(2)
#define C2H_STAT_DMA_ENG_4_WCP_FIFO_IN_RDY_MASK BIT(1)
#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_IN_RDY_MASK BIT(0)
#define EQDMA_C2H_STAT_DBG_DMA_ENG_5_ADDR 0xB8C
#define C2H_STAT_DMA_ENG_5_RSVD_1_MASK GENMASK(31, 30)
#define C2H_STAT_DMA_ENG_5_WRB_SM_VIRT_CH_MASK BIT(29)
#define C2H_STAT_DMA_ENG_5_WRB_FIFO_IN_REQ_MASK GENMASK(28, 24)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_CNT_MASK GENMASK(23, 22)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_LEN_MASK GENMASK(21, 6)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VIRT_CH_MASK BIT(5)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VAR_DESC_MASK BIT(4)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_DROP_REQ_MASK BIT(3)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_NUM_BUF_OV_MASK BIT(2)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_MARKER_MASK BIT(1)
#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_HAS_CMPT_MASK BIT(0)
#define EQDMA_C2H_DBG_PFCH_QID_ADDR 0xB90
#define C2H_PFCH_QID_RSVD_1_MASK GENMASK(31, 16)
#define C2H_PFCH_QID_ERR_CTXT_MASK BIT(15)
#define C2H_PFCH_QID_TARGET_MASK GENMASK(14, 12)
#define C2H_PFCH_QID_QID_OR_TAG_MASK GENMASK(11, 0)
#define EQDMA_C2H_DBG_PFCH_ADDR 0xB94
#define C2H_PFCH_DATA_MASK GENMASK(31, 0)
#define EQDMA_C2H_INT_DBG_ADDR 0xB98
#define C2H_INT_RSVD_1_MASK GENMASK(31, 8)
#define C2H_INT_INT_COAL_SM_MASK GENMASK(7, 4)
#define C2H_INT_INT_SM_MASK GENMASK(3, 0)
#define EQDMA_C2H_STAT_IMM_ACCEPTED_ADDR 0xB9C
#define C2H_STAT_IMM_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_IMM_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_MARKER_ACCEPTED_ADDR 0xBA0
#define C2H_STAT_MARKER_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_MARKER_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_DISABLE_CMP_ACCEPTED_ADDR 0xBA4
#define C2H_STAT_DISABLE_CMP_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_DISABLE_CMP_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_PLD_FIFO_CRDT_CNT_ADDR 0xBA8
#define C2H_PLD_FIFO_CRDT_CNT_RSVD_1_MASK GENMASK(31, 18)
#define C2H_PLD_FIFO_CRDT_CNT_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_DYN_REQ_ADDR 0xBAC
#define C2H_INTR_DYN_REQ_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_DYN_REQ_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_INTR_DYN_MISC_ADDR 0xBB0
#define C2H_INTR_DYN_MISC_RSVD_1_MASK GENMASK(31, 18)
#define C2H_INTR_DYN_MISC_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_DROP_LEN_MISMATCH_ADDR 0xBB4
#define C2H_DROP_LEN_MISMATCH_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_LEN_MISMATCH_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_DROP_DESC_RSP_LEN_ADDR 0xBB8
#define C2H_DROP_DESC_RSP_LEN_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_DESC_RSP_LEN_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_DROP_QID_FIFO_LEN_ADDR 0xBBC
#define C2H_DROP_QID_FIFO_LEN_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_QID_FIFO_LEN_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_DROP_PLD_CNT_ADDR 0xBC0
#define C2H_DROP_PLD_CNT_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_PLD_CNT_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_CMPT_FORMAT_0_ADDR 0xBC4
#define C2H_CMPT_FORMAT_0_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_0_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_1_ADDR 0xBC8
#define C2H_CMPT_FORMAT_1_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_1_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_2_ADDR 0xBCC
#define C2H_CMPT_FORMAT_2_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_2_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_3_ADDR 0xBD0
#define C2H_CMPT_FORMAT_3_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_3_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_4_ADDR 0xBD4
#define C2H_CMPT_FORMAT_4_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_4_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_5_ADDR 0xBD8
#define C2H_CMPT_FORMAT_5_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_5_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_CMPT_FORMAT_6_ADDR 0xBDC
#define C2H_CMPT_FORMAT_6_DESC_ERR_LOC_MASK GENMASK(31, 16)
#define C2H_CMPT_FORMAT_6_COLOR_LOC_MASK GENMASK(15, 0)
#define EQDMA_C2H_PFCH_CACHE_DEPTH_ADDR 0xBE0
#define C2H_PFCH_CACHE_DEPTH_MAX_STBUF_MASK GENMASK(23, 16)
#define C2H_PFCH_CACHE_DEPTH_MASK GENMASK(7, 0)
#define EQDMA_C2H_WRB_COAL_BUF_DEPTH_ADDR 0xBE4
#define C2H_WRB_COAL_BUF_DEPTH_RSVD_1_MASK GENMASK(31, 8)
#define C2H_WRB_COAL_BUF_DEPTH_BUFFER_MASK GENMASK(7, 0)
#define EQDMA_C2H_PFCH_CRDT_ADDR 0xBE8
#define C2H_PFCH_CRDT_RSVD_1_MASK GENMASK(31, 1)
#define C2H_PFCH_CRDT_RSVD_2_MASK BIT(0)
#define EQDMA_C2H_STAT_HAS_CMPT_ACCEPTED_ADDR 0xBEC
#define C2H_STAT_HAS_CMPT_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_HAS_CMPT_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_STAT_HAS_PLD_ACCEPTED_ADDR 0xBF0
#define C2H_STAT_HAS_PLD_ACCEPTED_RSVD_1_MASK GENMASK(31, 18)
#define C2H_STAT_HAS_PLD_ACCEPTED_CNT_MASK GENMASK(17, 0)
#define EQDMA_C2H_PLD_PKT_ID_ADDR 0xBF4
#define C2H_PLD_PKT_ID_CMPT_WAIT_MASK GENMASK(31, 16)
#define C2H_PLD_PKT_ID_DATA_MASK GENMASK(15, 0)
#define EQDMA_C2H_PLD_PKT_ID_1_ADDR 0xBF8
#define C2H_PLD_PKT_ID_1_CMPT_WAIT_MASK GENMASK(31, 16)
#define C2H_PLD_PKT_ID_1_DATA_MASK GENMASK(15, 0)
#define EQDMA_C2H_DROP_PLD_CNT_1_ADDR 0xBFC
#define C2H_DROP_PLD_CNT_1_RSVD_1_MASK GENMASK(31, 18)
#define C2H_DROP_PLD_CNT_1_CNT_MASK GENMASK(17, 0)
#define EQDMA_H2C_ERR_STAT_ADDR 0xE00
#define H2C_ERR_STAT_RSVD_1_MASK GENMASK(31, 6)
#define H2C_ERR_STAT_PAR_ERR_MASK BIT(5)
#define H2C_ERR_STAT_SBE_MASK BIT(4)
#define H2C_ERR_STAT_DBE_MASK BIT(3)
#define H2C_ERR_STAT_NO_DMA_DS_MASK BIT(2)
#define H2C_ERR_STAT_SDI_MRKR_REQ_MOP_ERR_MASK BIT(1)
#define H2C_ERR_STAT_ZERO_LEN_DS_MASK BIT(0)
#define EQDMA_H2C_ERR_MASK_ADDR 0xE04
#define H2C_ERR_EN_MASK GENMASK(31, 0)
#define EQDMA_H2C_FIRST_ERR_QID_ADDR 0xE08
#define H2C_FIRST_ERR_QID_RSVD_1_MASK GENMASK(31, 20)
#define H2C_FIRST_ERR_QID_ERR_TYPE_MASK GENMASK(19, 16)
#define H2C_FIRST_ERR_QID_RSVD_2_MASK GENMASK(15, 13)
#define H2C_FIRST_ERR_QID_QID_MASK GENMASK(12, 0)
#define EQDMA_H2C_DBG_REG0_ADDR 0xE0C
#define H2C_REG0_NUM_DSC_RCVD_MASK GENMASK(31, 16)
#define H2C_REG0_NUM_WRB_SENT_MASK GENMASK(15, 0)
#define EQDMA_H2C_DBG_REG1_ADDR 0xE10
#define H2C_REG1_NUM_REQ_SENT_MASK GENMASK(31, 16)
#define H2C_REG1_NUM_CMP_SENT_MASK GENMASK(15, 0)
#define EQDMA_H2C_DBG_REG2_ADDR 0xE14
#define H2C_REG2_RSVD_1_MASK GENMASK(31, 16)
#define H2C_REG2_NUM_ERR_DSC_RCVD_MASK GENMASK(15, 0)
#define EQDMA_H2C_DBG_REG3_ADDR 0xE18
#define H2C_REG3_RSVD_1_MASK BIT(31)
#define H2C_REG3_DSCO_FIFO_EMPTY_MASK BIT(30)
#define H2C_REG3_DSCO_FIFO_FULL_MASK BIT(29)
#define H2C_REG3_CUR_RC_STATE_MASK GENMASK(28, 26)
#define H2C_REG3_RDREQ_LINES_MASK GENMASK(25, 16)
#define H2C_REG3_RDATA_LINES_AVAIL_MASK GENMASK(15, 6)
#define H2C_REG3_PEND_FIFO_EMPTY_MASK BIT(5)
#define H2C_REG3_PEND_FIFO_FULL_MASK BIT(4)
#define H2C_REG3_CUR_RQ_STATE_MASK GENMASK(3, 2)
#define H2C_REG3_DSCI_FIFO_FULL_MASK BIT(1)
#define H2C_REG3_DSCI_FIFO_EMPTY_MASK BIT(0)
#define EQDMA_H2C_DBG_REG4_ADDR 0xE1C
#define H2C_REG4_RDREQ_ADDR_MASK GENMASK(31, 0)
#define EQDMA_H2C_FATAL_ERR_EN_ADDR 0xE20
#define H2C_FATAL_ERR_EN_RSVD_1_MASK GENMASK(31, 1)
#define H2C_FATAL_ERR_EN_H2C_MASK BIT(0)
#define EQDMA_H2C_REQ_THROT_PCIE_ADDR 0xE24
#define H2C_REQ_THROT_PCIE_EN_REQ_MASK BIT(31)
#define H2C_REQ_THROT_PCIE_MASK GENMASK(30, 19)
#define H2C_REQ_THROT_PCIE_EN_DATA_MASK BIT(18)
#define H2C_REQ_THROT_PCIE_DATA_THRESH_MASK GENMASK(17, 0)
#define EQDMA_H2C_ALN_DBG_REG0_ADDR 0xE28
#define H2C_ALN_REG0_NUM_PKT_SENT_MASK GENMASK(15, 0)
#define EQDMA_H2C_REQ_THROT_AXIMM_ADDR 0xE2C
#define H2C_REQ_THROT_AXIMM_EN_REQ_MASK BIT(31)
#define H2C_REQ_THROT_AXIMM_MASK GENMASK(30, 19)
#define H2C_REQ_THROT_AXIMM_EN_DATA_MASK BIT(18)
#define H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK GENMASK(17, 0)
#define EQDMA_C2H_MM_CTL_ADDR 0x1004
#define C2H_MM_CTL_RESERVED1_MASK GENMASK(31, 9)
#define C2H_MM_CTL_ERRC_EN_MASK BIT(8)
#define C2H_MM_CTL_RESERVED0_MASK GENMASK(7, 1)
#define C2H_MM_CTL_RUN_MASK BIT(0)
#define EQDMA_C2H_MM_STATUS_ADDR 0x1040
#define C2H_MM_STATUS_RSVD_1_MASK GENMASK(31, 1)
#define C2H_MM_STATUS_RUN_MASK BIT(0)
#define EQDMA_C2H_MM_CMPL_DESC_CNT_ADDR 0x1048
#define C2H_MM_CMPL_DESC_CNT_C2H_CO_MASK GENMASK(31, 0)
#define EQDMA_C2H_MM_ERR_CODE_ENABLE_MASK_ADDR 0x1054
#define C2H_MM_ERR_CODE_ENABLE_RESERVED1_MASK BIT(31)
#define C2H_MM_ERR_CODE_ENABLE_WR_UC_RAM_MASK BIT(30)
#define C2H_MM_ERR_CODE_ENABLE_WR_UR_MASK BIT(29)
#define C2H_MM_ERR_CODE_ENABLE_WR_FLR_MASK BIT(28)
#define C2H_MM_ERR_CODE_ENABLE_RESERVED0_MASK GENMASK(27, 2)
#define C2H_MM_ERR_CODE_ENABLE_RD_SLV_ERR_MASK BIT(1)
#define C2H_MM_ERR_CODE_ENABLE_WR_SLV_ERR_MASK BIT(0)
#define EQDMA_C2H_MM_ERR_CODE_ADDR 0x1058
#define C2H_MM_ERR_CODE_RESERVED1_MASK GENMASK(31, 28)
#define C2H_MM_ERR_CODE_CIDX_MASK GENMASK(27, 12)
#define C2H_MM_ERR_CODE_RESERVED0_MASK GENMASK(11, 10)
#define C2H_MM_ERR_CODE_SUB_TYPE_MASK GENMASK(9, 5)
#define C2H_MM_ERR_CODE_MASK GENMASK(4, 0)
#define EQDMA_C2H_MM_ERR_INFO_ADDR 0x105C
#define C2H_MM_ERR_INFO_VALID_MASK BIT(31)
#define C2H_MM_ERR_INFO_SEL_MASK BIT(30)
#define C2H_MM_ERR_INFO_RSVD_1_MASK GENMASK(29, 24)
#define C2H_MM_ERR_INFO_QID_MASK GENMASK(23, 0)
#define EQDMA_C2H_MM_PERF_MON_CTL_ADDR 0x10C0
#define C2H_MM_PERF_MON_CTL_RSVD_1_MASK GENMASK(31, 4)
#define C2H_MM_PERF_MON_CTL_IMM_START_MASK BIT(3)
#define C2H_MM_PERF_MON_CTL_RUN_START_MASK BIT(2)
#define C2H_MM_PERF_MON_CTL_IMM_CLEAR_MASK BIT(1)
#define C2H_MM_PERF_MON_CTL_RUN_CLEAR_MASK BIT(0)
#define EQDMA_C2H_MM_PERF_MON_CYCLE_CNT0_ADDR 0x10C4
#define C2H_MM_PERF_MON_CYCLE_CNT0_CYC_CNT_MASK GENMASK(31, 0)
#define EQDMA_C2H_MM_PERF_MON_CYCLE_CNT1_ADDR 0x10C8
#define C2H_MM_PERF_MON_CYCLE_CNT1_RSVD_1_MASK GENMASK(31, 10)
#define C2H_MM_PERF_MON_CYCLE_CNT1_CYC_CNT_MASK GENMASK(9, 0)
#define EQDMA_C2H_MM_PERF_MON_DATA_CNT0_ADDR 0x10CC
#define C2H_MM_PERF_MON_DATA_CNT0_DCNT_MASK GENMASK(31, 0)
#define EQDMA_C2H_MM_PERF_MON_DATA_CNT1_ADDR 0x10D0
#define C2H_MM_PERF_MON_DATA_CNT1_RSVD_1_MASK GENMASK(31, 10)
#define C2H_MM_PERF_MON_DATA_CNT1_DCNT_MASK GENMASK(9, 0)
#define EQDMA_C2H_MM_DBG_ADDR 0x10E8
#define C2H_MM_RSVD_1_MASK GENMASK(31, 24)
#define C2H_MM_RRQ_ENTRIES_MASK GENMASK(23, 17)
#define C2H_MM_DAT_FIFO_SPC_MASK GENMASK(16, 7)
#define C2H_MM_RD_STALL_MASK BIT(6)
#define C2H_MM_RRQ_FIFO_FI_MASK BIT(5)
#define C2H_MM_WR_STALL_MASK BIT(4)
#define C2H_MM_WRQ_FIFO_FI_MASK BIT(3)
#define C2H_MM_WBK_STALL_MASK BIT(2)
#define C2H_MM_DSC_FIFO_EP_MASK BIT(1)
#define C2H_MM_DSC_FIFO_FL_MASK BIT(0)
#define EQDMA_H2C_MM_CTL_ADDR 0x1204
#define H2C_MM_CTL_RESERVED1_MASK GENMASK(31, 9)
#define H2C_MM_CTL_ERRC_EN_MASK BIT(8)
#define H2C_MM_CTL_RESERVED0_MASK GENMASK(7, 1)
#define H2C_MM_CTL_RUN_MASK BIT(0)
#define EQDMA_H2C_MM_STATUS_ADDR 0x1240
#define H2C_MM_STATUS_RSVD_1_MASK GENMASK(31, 1)
#define H2C_MM_STATUS_RUN_MASK BIT(0)
#define EQDMA_H2C_MM_CMPL_DESC_CNT_ADDR 0x1248
#define H2C_MM_CMPL_DESC_CNT_H2C_CO_MASK GENMASK(31, 0)
#define EQDMA_H2C_MM_ERR_CODE_ENABLE_MASK_ADDR 0x1254
#define H2C_MM_ERR_CODE_ENABLE_RESERVED5_MASK GENMASK(31, 30)
#define H2C_MM_ERR_CODE_ENABLE_WR_SLV_ERR_MASK BIT(29)
#define H2C_MM_ERR_CODE_ENABLE_WR_DEC_ERR_MASK BIT(28)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED4_MASK GENMASK(27, 23)
#define H2C_MM_ERR_CODE_ENABLE_RD_RQ_DIS_ERR_MASK BIT(22)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED3_MASK GENMASK(21, 17)
#define H2C_MM_ERR_CODE_ENABLE_RD_DAT_POISON_ERR_MASK BIT(16)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED2_MASK GENMASK(15, 9)
#define H2C_MM_ERR_CODE_ENABLE_RD_FLR_ERR_MASK BIT(8)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED1_MASK GENMASK(7, 6)
#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_ADR_ERR_MASK BIT(5)
#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_PARA_MASK BIT(4)
#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_BYTE_ERR_MASK BIT(3)
#define H2C_MM_ERR_CODE_ENABLE_RD_UR_CA_MASK BIT(2)
#define H2C_MM_ERR_CODE_ENABLE_RD_HRD_POISON_ERR_MASK BIT(1)
#define H2C_MM_ERR_CODE_ENABLE_RESERVED0_MASK BIT(0)
#define EQDMA_H2C_MM_ERR_CODE_ADDR 0x1258
#define H2C_MM_ERR_CODE_RSVD_1_MASK GENMASK(31, 28)
#define H2C_MM_ERR_CODE_CIDX_MASK GENMASK(27, 12)
#define H2C_MM_ERR_CODE_RESERVED0_MASK GENMASK(11, 10)
#define H2C_MM_ERR_CODE_SUB_TYPE_MASK GENMASK(9, 5)
#define H2C_MM_ERR_CODE_MASK GENMASK(4, 0)
#define EQDMA_H2C_MM_ERR_INFO_ADDR 0x125C
#define H2C_MM_ERR_INFO_VALID_MASK BIT(31)
#define H2C_MM_ERR_INFO_SEL_MASK BIT(30)
#define H2C_MM_ERR_INFO_RSVD_1_MASK GENMASK(29, 24)
#define H2C_MM_ERR_INFO_QID_MASK GENMASK(23, 0)
#define EQDMA_H2C_MM_PERF_MON_CTL_ADDR 0x12C0
#define H2C_MM_PERF_MON_CTL_RSVD_1_MASK GENMASK(31, 4)
#define H2C_MM_PERF_MON_CTL_IMM_START_MASK BIT(3)
#define H2C_MM_PERF_MON_CTL_RUN_START_MASK BIT(2)
#define H2C_MM_PERF_MON_CTL_IMM_CLEAR_MASK BIT(1)
#define H2C_MM_PERF_MON_CTL_RUN_CLEAR_MASK BIT(0)
#define EQDMA_H2C_MM_PERF_MON_CYCLE_CNT0_ADDR 0x12C4
#define H2C_MM_PERF_MON_CYCLE_CNT0_CYC_CNT_MASK GENMASK(31, 0)
#define EQDMA_H2C_MM_PERF_MON_CYCLE_CNT1_ADDR 0x12C8
#define H2C_MM_PERF_MON_CYCLE_CNT1_RSVD_1_MASK GENMASK(31, 10)
#define H2C_MM_PERF_MON_CYCLE_CNT1_CYC_CNT_MASK GENMASK(9, 0)
#define EQDMA_H2C_MM_PERF_MON_DATA_CNT0_ADDR 0x12CC
#define H2C_MM_PERF_MON_DATA_CNT0_DCNT_MASK GENMASK(31, 0)
#define EQDMA_H2C_MM_PERF_MON_DATA_CNT1_ADDR 0x12D0
#define H2C_MM_PERF_MON_DATA_CNT1_RSVD_1_MASK GENMASK(31, 10)
#define H2C_MM_PERF_MON_DATA_CNT1_DCNT_MASK GENMASK(9, 0)
#define EQDMA_H2C_MM_DBG_ADDR 0x12E8
#define H2C_MM_RSVD_1_MASK GENMASK(31, 24)
#define H2C_MM_RRQ_ENTRIES_MASK GENMASK(23, 17)
#define H2C_MM_DAT_FIFO_SPC_MASK GENMASK(16, 7)
#define H2C_MM_RD_STALL_MASK BIT(6)
#define H2C_MM_RRQ_FIFO_FI_MASK BIT(5)
#define H2C_MM_WR_STALL_MASK BIT(4)
#define H2C_MM_WRQ_FIFO_FI_MASK BIT(3)
#define H2C_MM_WBK_STALL_MASK BIT(2)
#define H2C_MM_DSC_FIFO_EP_MASK BIT(1)
#define H2C_MM_DSC_FIFO_FL_MASK BIT(0)
#define EQDMA_C2H_CRDT_COAL_CFG_1_ADDR 0x1400
#define C2H_CRDT_COAL_CFG_1_RSVD_1_MASK GENMASK(31, 18)
#define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK GENMASK(17, 10)
#define C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK GENMASK(9, 0)
#define EQDMA_C2H_CRDT_COAL_CFG_2_ADDR 0x1404
#define C2H_CRDT_COAL_CFG_2_RSVD_1_MASK GENMASK(31, 24)
#define C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK GENMASK(23, 16)
#define C2H_CRDT_COAL_CFG_2_RESERVED1_MASK GENMASK(15, 11)
#define C2H_CRDT_COAL_CFG_2_NT_TH_MASK GENMASK(10, 0)
#define EQDMA_C2H_PFCH_BYP_QID_ADDR 0x1408
#define C2H_PFCH_BYP_QID_RSVD_1_MASK GENMASK(31, 12)
#define C2H_PFCH_BYP_QID_MASK GENMASK(11, 0)
#define EQDMA_C2H_PFCH_BYP_TAG_ADDR 0x140C
#define C2H_PFCH_BYP_TAG_RSVD_1_MASK GENMASK(31, 20)
#define C2H_PFCH_BYP_TAG_BYP_QID_MASK GENMASK(19, 8)
#define C2H_PFCH_BYP_TAG_RSVD_2_MASK BIT(7)
#define C2H_PFCH_BYP_TAG_MASK GENMASK(6, 0)
#define EQDMA_C2H_WATER_MARK_ADDR 0x1500
#define C2H_WATER_MARK_HIGH_WM_MASK GENMASK(31, 16)
#define C2H_WATER_MARK_LOW_WM_MASK GENMASK(15, 0)
#define SW_IND_CTXT_DATA_W7_VIRTIO_DSC_BASE_H_MASK GENMASK(10, 0)
#define SW_IND_CTXT_DATA_W6_VIRTIO_DSC_BASE_M_MASK GENMASK(31, 0)
#define SW_IND_CTXT_DATA_W5_VIRTIO_DSC_BASE_L_MASK GENMASK(31, 11)
#define SW_IND_CTXT_DATA_W5_PASID_EN_MASK BIT(10)
#define SW_IND_CTXT_DATA_W5_PASID_H_MASK GENMASK(9, 0)
#define SW_IND_CTXT_DATA_W4_PASID_L_MASK GENMASK(31, 20)
#define SW_IND_CTXT_DATA_W4_HOST_ID_MASK GENMASK(19, 16)
#define SW_IND_CTXT_DATA_W4_IRQ_BYP_MASK BIT(15)
#define SW_IND_CTXT_DATA_W4_PACK_BYP_OUT_MASK BIT(14)
#define SW_IND_CTXT_DATA_W4_VIRTIO_EN_MASK BIT(13)
#define SW_IND_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK BIT(12)
#define SW_IND_CTXT_DATA_W4_INT_AGGR_MASK BIT(11)
#define SW_IND_CTXT_DATA_W4_VEC_MASK GENMASK(10, 0)
#define SW_IND_CTXT_DATA_W3_DSC_BASE_H_MASK GENMASK(31, 0)
#define SW_IND_CTXT_DATA_W2_DSC_BASE_L_MASK GENMASK(31, 0)
#define SW_IND_CTXT_DATA_W1_IS_MM_MASK BIT(31)
#define SW_IND_CTXT_DATA_W1_MRKR_DIS_MASK BIT(30)
#define SW_IND_CTXT_DATA_W1_IRQ_REQ_MASK BIT(29)
#define SW_IND_CTXT_DATA_W1_ERR_WB_SENT_MASK BIT(28)
#define SW_IND_CTXT_DATA_W1_ERR_MASK GENMASK(27, 26)
#define SW_IND_CTXT_DATA_W1_IRQ_NO_LAST_MASK BIT(25)
#define SW_IND_CTXT_DATA_W1_PORT_ID_MASK GENMASK(24, 22)
#define SW_IND_CTXT_DATA_W1_IRQ_EN_MASK BIT(21)
#define SW_IND_CTXT_DATA_W1_WBK_EN_MASK BIT(20)
#define SW_IND_CTXT_DATA_W1_MM_CHN_MASK BIT(19)
#define SW_IND_CTXT_DATA_W1_BYPASS_MASK BIT(18)
#define SW_IND_CTXT_DATA_W1_DSC_SZ_MASK GENMASK(17, 16)
#define SW_IND_CTXT_DATA_W1_RNG_SZ_MASK GENMASK(15, 12)
#define SW_IND_CTXT_DATA_W1_RSVD_1_MASK GENMASK(11, 9)
#define SW_IND_CTXT_DATA_W1_FETCH_MAX_MASK GENMASK(8, 5)
#define SW_IND_CTXT_DATA_W1_AT_MASK BIT(4)
#define SW_IND_CTXT_DATA_W1_WBI_INTVL_EN_MASK BIT(3)
#define SW_IND_CTXT_DATA_W1_WBI_CHK_MASK BIT(2)
#define SW_IND_CTXT_DATA_W1_FCRD_EN_MASK BIT(1)
#define SW_IND_CTXT_DATA_W1_QEN_MASK BIT(0)
#define SW_IND_CTXT_DATA_W0_RSV_MASK GENMASK(31, 29)
#define SW_IND_CTXT_DATA_W0_FNC_MASK GENMASK(28, 17)
#define SW_IND_CTXT_DATA_W0_IRQ_ARM_MASK BIT(16)
#define SW_IND_CTXT_DATA_W0_PIDX_MASK GENMASK(15, 0)
#define HW_IND_CTXT_DATA_W1_RSVD_1_MASK BIT(15)
#define HW_IND_CTXT_DATA_W1_FETCH_PND_MASK GENMASK(14, 11)
#define HW_IND_CTXT_DATA_W1_EVT_PND_MASK BIT(10)
#define HW_IND_CTXT_DATA_W1_IDL_STP_B_MASK BIT(9)
#define HW_IND_CTXT_DATA_W1_DSC_PND_MASK BIT(8)
#define HW_IND_CTXT_DATA_W1_RSVD_2_MASK GENMASK(7, 0)
#define HW_IND_CTXT_DATA_W0_CRD_USE_MASK GENMASK(31, 16)
#define HW_IND_CTXT_DATA_W0_CIDX_MASK GENMASK(15, 0)
#define CRED_CTXT_DATA_W0_RSVD_1_MASK GENMASK(31, 16)
#define CRED_CTXT_DATA_W0_CREDT_MASK GENMASK(15, 0)
#define PREFETCH_CTXT_DATA_W1_VALID_MASK BIT(13)
#define PREFETCH_CTXT_DATA_W1_SW_CRDT_H_MASK GENMASK(12, 0)
#define PREFETCH_CTXT_DATA_W0_SW_CRDT_L_MASK GENMASK(31, 29)
#define PREFETCH_CTXT_DATA_W0_PFCH_MASK BIT(28)
#define PREFETCH_CTXT_DATA_W0_PFCH_EN_MASK BIT(27)
#define PREFETCH_CTXT_DATA_W0_ERR_MASK BIT(26)
#define PREFETCH_CTXT_DATA_W0_RSVD_MASK GENMASK(25, 22)
#define PREFETCH_CTXT_DATA_W0_PFCH_NEED_MASK GENMASK(21, 16)
#define PREFETCH_CTXT_DATA_W0_NUM_PFCH_MASK GENMASK(15, 10)
#define PREFETCH_CTXT_DATA_W0_VIRTIO_MASK BIT(9)
#define PREFETCH_CTXT_DATA_W0_VAR_DESC_MASK BIT(8)
#define PREFETCH_CTXT_DATA_W0_PORT_ID_MASK GENMASK(7, 5)
#define PREFETCH_CTXT_DATA_W0_BUF_SZ_IDX_MASK GENMASK(4, 1)
#define PREFETCH_CTXT_DATA_W0_BYPASS_MASK BIT(0)
#define CMPL_CTXT_DATA_W6_RSVD_1_H_MASK GENMASK(7, 0)
#define CMPL_CTXT_DATA_W5_RSVD_1_L_MASK GENMASK(31, 23)
#define CMPL_CTXT_DATA_W5_PORT_ID_MASK GENMASK(22, 20)
#define CMPL_CTXT_DATA_W5_SH_CMPT_MASK BIT(19)
#define CMPL_CTXT_DATA_W5_VIO_EOP_MASK BIT(18)
#define CMPL_CTXT_DATA_W5_BADDR4_LOW_MASK GENMASK(17, 14)
#define CMPL_CTXT_DATA_W5_PASID_EN_MASK BIT(13)
#define CMPL_CTXT_DATA_W5_PASID_H_MASK GENMASK(12, 0)
#define CMPL_CTXT_DATA_W4_PASID_L_MASK GENMASK(31, 23)
#define CMPL_CTXT_DATA_W4_HOST_ID_MASK GENMASK(22, 19)
#define CMPL_CTXT_DATA_W4_DIR_C2H_MASK BIT(18)
#define CMPL_CTXT_DATA_W4_VIO_MASK BIT(17)
#define CMPL_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK BIT(16)
#define CMPL_CTXT_DATA_W4_INT_AGGR_MASK BIT(15)
#define CMPL_CTXT_DATA_W4_VEC_MASK GENMASK(14, 4)
#define CMPL_CTXT_DATA_W4_AT_MASK BIT(3)
#define CMPL_CTXT_DATA_W4_OVF_CHK_DIS_MASK BIT(2)
#define CMPL_CTXT_DATA_W4_FULL_UPD_MASK BIT(1)
#define CMPL_CTXT_DATA_W4_TIMER_RUNNING_MASK BIT(0)
#define CMPL_CTXT_DATA_W3_USER_TRIG_PEND_MASK BIT(31)
#define CMPL_CTXT_DATA_W3_ERR_MASK GENMASK(30, 29)
#define CMPL_CTXT_DATA_W3_VALID_MASK BIT(28)
#define CMPL_CTXT_DATA_W3_CIDX_MASK GENMASK(27, 12)
#define CMPL_CTXT_DATA_W3_PIDX_H_MASK GENMASK(11, 0)
#define CMPL_CTXT_DATA_W2_PIDX_L_MASK GENMASK(31, 28)
#define CMPL_CTXT_DATA_W2_DESC_SIZE_MASK GENMASK(27, 26)
#define CMPL_CTXT_DATA_W2_BADDR4_HIGH_H_MASK GENMASK(25, 0)
#define CMPL_CTXT_DATA_W1_BADDR4_HIGH_L_MASK GENMASK(31, 0)
#define CMPL_CTXT_DATA_W0_QSIZE_IX_MASK GENMASK(31, 28)
#define CMPL_CTXT_DATA_W0_COLOR_MASK BIT(27)
#define CMPL_CTXT_DATA_W0_INT_ST_MASK GENMASK(26, 25)
#define CMPL_CTXT_DATA_W0_TIMER_IX_MASK GENMASK(24, 21)
#define CMPL_CTXT_DATA_W0_CNTER_IX_MASK GENMASK(20, 17)
#define CMPL_CTXT_DATA_W0_FNC_ID_MASK GENMASK(16, 5)
#define CMPL_CTXT_DATA_W0_TRIG_MODE_MASK GENMASK(4, 2)
#define CMPL_CTXT_DATA_W0_EN_INT_MASK BIT(1)
#define CMPL_CTXT_DATA_W0_EN_STAT_DESC_MASK BIT(0)
#define INTR_CTXT_DATA_W3_FUNC_MASK GENMASK(29, 18)
#define INTR_CTXT_DATA_W3_RSVD_MASK GENMASK(17, 14)
#define INTR_CTXT_DATA_W3_PASID_EN_MASK BIT(13)
#define INTR_CTXT_DATA_W3_PASID_H_MASK GENMASK(12, 0)
#define INTR_CTXT_DATA_W2_PASID_L_MASK GENMASK(31, 23)
#define INTR_CTXT_DATA_W2_HOST_ID_MASK GENMASK(22, 19)
#define INTR_CTXT_DATA_W2_AT_MASK BIT(18)
#define INTR_CTXT_DATA_W2_PIDX_MASK GENMASK(17, 6)
#define INTR_CTXT_DATA_W2_PAGE_SIZE_MASK GENMASK(5, 3)
#define INTR_CTXT_DATA_W2_BADDR_4K_H_MASK GENMASK(2, 0)
#define INTR_CTXT_DATA_W1_BADDR_4K_M_MASK GENMASK(31, 0)
#define INTR_CTXT_DATA_W0_BADDR_4K_L_MASK GENMASK(31, 15)
#define INTR_CTXT_DATA_W0_COLOR_MASK BIT(14)
#define INTR_CTXT_DATA_W0_INT_ST_MASK BIT(13)
#define INTR_CTXT_DATA_W0_RSVD1_MASK BIT(12)
#define INTR_CTXT_DATA_W0_VEC_MASK GENMASK(11, 1)
#define INTR_CTXT_DATA_W0_VALID_MASK BIT(0)
#ifdef __cplusplus
}
#endif
#endif /* EQDMA_SOFT_REG_H_ */
#endif
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -17,7 +17,6 @@
#include "qdma_access_common.h"
#include "qdma_platform.h"
#include "qdma_soft_reg.h"
#include "eqdma_soft_reg.h"
#include "qdma_soft_access.h"
#include "qdma_s80_hard_access.h"
#include "eqdma_soft_access.h"
......@@ -169,6 +168,8 @@ static const char *qdma_get_vivado_release_id(
return "vivado 2019.2";
case QDMA_VIVADO_2020_1:
return "vivado 2020.1";
case QDMA_VIVADO_2020_2:
return "vivado 2020.2";
default:
qdma_log_error("%s: invalid vivado_release_id(%d), err:%d\n",
__func__,
......@@ -319,6 +320,9 @@ void qdma_fetch_version_details(uint8_t is_vf, uint32_t version_reg_val,
case 0:
version_info->vivado_release = QDMA_VIVADO_2020_1;
break;
case 1:
version_info->vivado_release = QDMA_VIVADO_2020_2;
break;
default:
version_info->vivado_release = QDMA_VIVADO_NONE;
break;
......@@ -378,641 +382,6 @@ void qdma_memset(void *to, uint8_t val, uint32_t size)
_to[i] = val;
}
/*****************************************************************************/
/**
* qdma_write_global_ring_sizes() - function to set the global ring size array
*
* @dev_hndl: device handle
* @index: Index from where the values needs to written
* @count: number of entries to be written
* @glbl_rng_sz: pointer to the array having the values to write
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_write_global_ring_sizes(void *dev_hndl, uint8_t index,
uint8_t count, const uint32_t *glbl_rng_sz)
{
if (!dev_hndl || !glbl_rng_sz || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_rng_sz=%p, err:%d\n",
__func__, dev_hndl, glbl_rng_sz,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_RING_SIZES) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_RING_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_write_csr_values(dev_hndl, QDMA_OFFSET_GLBL_RNG_SZ, index, count,
glbl_rng_sz);
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_read_global_ring_sizes() - function to get the global rng_sz array
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @glbl_rng_sz: pointer to array to hold the values read
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_read_global_ring_sizes(void *dev_hndl, uint8_t index,
uint8_t count, uint32_t *glbl_rng_sz)
{
if (!dev_hndl || !glbl_rng_sz || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_rng_sz=%p, err:%d\n",
__func__, dev_hndl, glbl_rng_sz,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_RING_SIZES) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_BUFFER_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_read_csr_values(dev_hndl, QDMA_OFFSET_GLBL_RNG_SZ, index, count,
glbl_rng_sz);
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_write_global_timer_count() - function to set the timer values
*
* @dev_hndl: device handle
* @glbl_tmr_cnt: pointer to the array having the values to write
* @index: Index from where the values needs to written
* @count: number of entries to be written
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_write_global_timer_count(void *dev_hndl, uint8_t index,
uint8_t count, const uint32_t *glbl_tmr_cnt)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_tmr_cnt || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_tmr_cnt=%p, err:%d\n",
__func__, dev_hndl, glbl_tmr_cnt,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_TIMERS) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_TIMERS,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en)
qdma_write_csr_values(dev_hndl, QDMA_OFFSET_C2H_TIMER_CNT,
index, count, glbl_tmr_cnt);
else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_read_global_timer_count() - function to get the timer values
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @glbl_tmr_cnt: pointer to array to hold the values read
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_read_global_timer_count(void *dev_hndl, uint8_t index,
uint8_t count, uint32_t *glbl_tmr_cnt)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_tmr_cnt || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_tmr_cnt=%p, err:%d\n",
__func__, dev_hndl, glbl_tmr_cnt,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_TIMERS) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_TIMERS,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en)
qdma_read_csr_values(dev_hndl,
QDMA_OFFSET_C2H_TIMER_CNT, index,
count, glbl_tmr_cnt);
else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_write_global_counter_threshold() - function to set the counter
* threshold values
*
* @dev_hndl: device handle
* @index: Index from where the values needs to written
* @count: number of entries to be written
* @glbl_cnt_th: pointer to the array having the values to write
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_write_global_counter_threshold(void *dev_hndl, uint8_t index,
uint8_t count, const uint32_t *glbl_cnt_th)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_cnt_th || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_cnt_th=%p, err:%d\n",
__func__, dev_hndl, glbl_cnt_th,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_COUNTERS) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_BUFFER_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en)
qdma_write_csr_values(dev_hndl, QDMA_OFFSET_C2H_CNT_TH, index,
count, glbl_cnt_th);
else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_read_global_counter_threshold() - function to get the counter threshold
* values
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @glbl_cnt_th: pointer to array to hold the values read
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_read_global_counter_threshold(void *dev_hndl, uint8_t index,
uint8_t count, uint32_t *glbl_cnt_th)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_cnt_th || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_cnt_th=%p, err:%d\n",
__func__, dev_hndl, glbl_cnt_th,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_COUNTERS) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_COUNTERS,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en)
qdma_read_csr_values(dev_hndl, QDMA_OFFSET_C2H_CNT_TH, index,
count, glbl_cnt_th);
else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_write_global_buffer_sizes() - function to set the buffer sizes
*
* @dev_hndl: device handle
* @index: Index from where the values needs to written
* @count: number of entries to be written
* @glbl_buf_sz: pointer to the array having the values to write
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_write_global_buffer_sizes(void *dev_hndl, uint8_t index,
uint8_t count, const uint32_t *glbl_buf_sz)
{
struct qdma_dev_attributes *dev_cap = NULL;
if (!dev_hndl || !glbl_buf_sz || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_buf_sz=%p, err:%d\n",
__func__, dev_hndl, glbl_buf_sz,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_BUFFER_SIZES) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_BUFFER_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en)
qdma_write_csr_values(dev_hndl, QDMA_OFFSET_C2H_BUF_SZ, index,
count, glbl_buf_sz);
else {
qdma_log_error("%s: ST not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_read_global_buffer_sizes() - function to get the buffer sizes
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @glbl_buf_sz: pointer to array to hold the values read
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_read_global_buffer_sizes(void *dev_hndl, uint8_t index,
uint8_t count, uint32_t *glbl_buf_sz)
{
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl || !glbl_buf_sz || !count) {
qdma_log_error("%s: dev_hndl=%p glbl_buf_sz=%p, err:%d\n",
__func__, dev_hndl, glbl_buf_sz,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if ((index + count) > QDMA_NUM_C2H_BUFFER_SIZES) {
qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
__func__, index, count,
QDMA_NUM_C2H_BUFFER_SIZES,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en)
qdma_read_csr_values(dev_hndl, QDMA_OFFSET_C2H_BUF_SZ, index,
count, glbl_buf_sz);
else {
qdma_log_error("%s: ST is not supported, err:%d\n",
__func__,
-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_global_csr_conf() - function to configure global csr
*
* @dev_hndl: device handle
* @index: Index from where the values needs to read
* @count: number of entries to be read
* @csr_val: uint32_t pointer to csr value
* @csr_type: Type of the CSR (qdma_global_csr_type enum) to configure
* @access_type HW access type (qdma_hw_access_type enum) value
* QDMA_HW_ACCESS_CLEAR - Not supported
* QDMA_HW_ACCESS_INVALIDATE - Not supported
*
* (index + count) shall not be more than 16
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_global_csr_conf(void *dev_hndl, uint8_t index, uint8_t count,
uint32_t *csr_val,
enum qdma_global_csr_type csr_type,
enum qdma_hw_access_type access_type)
{
int rv = QDMA_SUCCESS;
switch (csr_type) {
case QDMA_CSR_RING_SZ:
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv = qdma_read_global_ring_sizes(
dev_hndl,
index,
count,
csr_val);
break;
case QDMA_HW_ACCESS_WRITE:
rv = qdma_write_global_ring_sizes(
dev_hndl,
index,
count,
csr_val);
break;
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
break;
case QDMA_CSR_TIMER_CNT:
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv = qdma_read_global_timer_count(
dev_hndl,
index,
count,
csr_val);
break;
case QDMA_HW_ACCESS_WRITE:
rv = qdma_write_global_timer_count(
dev_hndl,
index,
count,
csr_val);
break;
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
break;
case QDMA_CSR_CNT_TH:
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv =
qdma_read_global_counter_threshold(
dev_hndl,
index,
count,
csr_val);
break;
case QDMA_HW_ACCESS_WRITE:
rv =
qdma_write_global_counter_threshold(
dev_hndl,
index,
count,
csr_val);
break;
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
break;
case QDMA_CSR_BUF_SZ:
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv =
qdma_read_global_buffer_sizes(dev_hndl,
index,
count,
csr_val);
break;
case QDMA_HW_ACCESS_WRITE:
rv =
qdma_write_global_buffer_sizes(dev_hndl,
index,
count,
csr_val);
break;
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
break;
default:
qdma_log_error("%s: csr_type(%d) invalid, err:%d\n",
__func__,
csr_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
return rv;
}
/*****************************************************************************/
/**
* qdma_global_writeback_interval_write() - function to set the writeback
* interval
*
* @dev_hndl device handle
* @wb_int: Writeback Interval
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_global_writeback_interval_write(void *dev_hndl,
enum qdma_wrb_interval wb_int)
{
uint32_t reg_val;
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (wb_int >= QDMA_NUM_WRB_INTERVALS) {
qdma_log_error("%s: wb_int=%d is invalid, err:%d\n",
__func__, wb_int,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en) {
reg_val = qdma_reg_read(dev_hndl, QDMA_OFFSET_GLBL_DSC_CFG);
reg_val |= FIELD_SET(QDMA_GLBL_DSC_CFG_WB_ACC_INT_MASK, wb_int);
qdma_reg_write(dev_hndl, QDMA_OFFSET_GLBL_DSC_CFG, reg_val);
} else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_global_writeback_interval_read() - function to get the writeback
* interval
*
* @dev_hndl: device handle
* @wb_int: pointer to the data to hold Writeback Interval
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_global_writeback_interval_read(void *dev_hndl,
enum qdma_wrb_interval *wb_int)
{
uint32_t reg_val;
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (!wb_int) {
qdma_log_error("%s: wb_int is NULL, err:%d\n", __func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->st_en || dev_cap->mm_cmpt_en) {
reg_val = qdma_reg_read(dev_hndl, QDMA_OFFSET_GLBL_DSC_CFG);
*wb_int = (enum qdma_wrb_interval)FIELD_GET(
QDMA_GLBL_DSC_CFG_WB_ACC_INT_MASK, reg_val);
} else {
qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
__func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_global_writeback_interval_conf() - function to configure
* the writeback interval
*
* @dev_hndl: device handle
* @wb_int: pointer to the data to hold Writeback Interval
* @access_type HW access type (qdma_hw_access_type enum) value
* QDMA_HW_ACCESS_CLEAR - Not supported
* QDMA_HW_ACCESS_INVALIDATE - Not supported
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_global_writeback_interval_conf(void *dev_hndl,
enum qdma_wrb_interval *wb_int,
enum qdma_hw_access_type access_type)
{
int rv = QDMA_SUCCESS;
switch (access_type) {
case QDMA_HW_ACCESS_READ:
rv = qdma_global_writeback_interval_read(dev_hndl, wb_int);
break;
case QDMA_HW_ACCESS_WRITE:
rv = qdma_global_writeback_interval_write(dev_hndl, *wb_int);
break;
case QDMA_HW_ACCESS_CLEAR:
case QDMA_HW_ACCESS_INVALIDATE:
default:
qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
__func__,
access_type,
-QDMA_ERR_INV_PARAM);
rv = -QDMA_ERR_INV_PARAM;
break;
}
return rv;
}
/*****************************************************************************/
/**
* qdma_queue_cmpt_cidx_read() - function to read the CMPT CIDX register
......@@ -1066,45 +435,6 @@ static int qdma_queue_cmpt_cidx_read(void *dev_hndl, uint8_t is_vf,
}
/*****************************************************************************/
/**
* qdma_mm_channel_conf() - Function to enable/disable the MM channel
*
* @dev_hndl: device handle
* @channel: MM channel number
* @is_c2h: Queue direction. Set 1 for C2H and 0 for H2C
* @enable: Enable or disable MM channel
*
* Presently, we have only 1 MM channel
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
static int qdma_mm_channel_conf(void *dev_hndl, uint8_t channel, uint8_t is_c2h,
uint8_t enable)
{
uint32_t reg_addr = (is_c2h) ? QDMA_OFFSET_C2H_MM_CONTROL :
QDMA_OFFSET_H2C_MM_CONTROL;
struct qdma_dev_attributes *dev_cap;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
qdma_get_device_attr(dev_hndl, &dev_cap);
if (dev_cap->mm_en) {
qdma_reg_write(dev_hndl,
reg_addr + (channel * QDMA_MM_CONTROL_STEP),
enable);
}
return QDMA_SUCCESS;
}
/*****************************************************************************/
/**
* qdma_initiate_flr() - function to initiate Function Level Reset
......@@ -1259,6 +589,58 @@ int qdma_acc_reg_dump_buf_len(void *dev_hndl,
return rv;
}
int qdma_acc_reg_info_len(void *dev_hndl,
enum qdma_ip_type ip_type, int *buflen, int *num_regs)
{
uint32_t len = 0;
int rv = 0;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (!buflen) {
qdma_log_error("%s: buflen is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (!num_regs) {
qdma_log_error("%s: num_regs is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
*buflen = 0;
switch (ip_type) {
case QDMA_SOFT_IP:
len = 0;
*num_regs = 0;
break;
case QDMA_VERSAL_HARD_IP:
len = qdma_s80_hard_reg_dump_buf_len();
*num_regs = (int)((len / REG_DUMP_SIZE_PER_LINE) - 1);
break;
case EQDMA_SOFT_IP:
len = eqdma_reg_dump_buf_len();
*num_regs = (int)((len / REG_DUMP_SIZE_PER_LINE) - 1);
break;
default:
qdma_log_error("%s: Invalid version number, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
*buflen = (int)len;
return rv;
}
int qdma_acc_context_buf_len(void *dev_hndl,
enum qdma_ip_type ip_type, uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen)
......@@ -1292,6 +674,108 @@ int qdma_acc_context_buf_len(void *dev_hndl,
return rv;
}
int qdma_acc_get_num_config_regs(void *dev_hndl,
enum qdma_ip_type ip_type, uint32_t *num_regs)
{
int rv = 0;
*num_regs = 0;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
switch (ip_type) {
case QDMA_SOFT_IP:
rv = qdma_get_config_num_regs();
break;
case QDMA_VERSAL_HARD_IP:
rv = qdma_s80_hard_get_config_num_regs();
break;
case EQDMA_SOFT_IP:
rv = eqdma_get_config_num_regs();
break;
default:
qdma_log_error("%s: Invalid version number, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
*num_regs = rv;
return 0;
}
/*****************************************************************************/
/**
* qdma_acc_get_config_regs() - Function to get qdma config registers.
*
* @dev_hndl: device handle
* @is_vf: Whether PF or VF
* @ip_type: QDMA IP Type
* @reg_data: pointer to register data to be filled
*
* Return: Length up-till the buffer is filled -success and < 0 - failure
*****************************************************************************/
int qdma_acc_get_config_regs(void *dev_hndl, uint8_t is_vf,
enum qdma_ip_type ip_type,
uint32_t *reg_data)
{
struct xreg_info *reg_info;
uint32_t count = 0;
uint32_t num_regs;
int rv = 0;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (is_vf) {
qdma_log_error("%s: Get Config regs not valid for VF, err:%d\n",
__func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (reg_data == NULL) {
qdma_log_error("%s: reg_data is NULL, err:%d\n",
__func__,
-QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
switch (ip_type) {
case QDMA_SOFT_IP:
num_regs = qdma_get_config_num_regs();
reg_info = qdma_get_config_regs();
break;
case QDMA_VERSAL_HARD_IP:
num_regs = qdma_s80_hard_get_config_num_regs();
reg_info = qdma_s80_hard_get_config_regs();
break;
case EQDMA_SOFT_IP:
num_regs = eqdma_get_config_num_regs();
reg_info = eqdma_get_config_regs();
break;
default:
qdma_log_error("%s: Invalid version number, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
for (count = 0; count < num_regs - 1; count++) {
reg_data[count] = qdma_reg_read(dev_hndl,
reg_info[count].addr);
}
return rv;
}
/*****************************************************************************/
/**
* qdma_acc_dump_config_regs() - Function to get qdma config register dump in a
......@@ -1333,6 +817,58 @@ int qdma_acc_dump_config_regs(void *dev_hndl, uint8_t is_vf,
return rv;
}
/*****************************************************************************/
/**
* qdma_acc_dump_reg_info() - Function to dump fileds in
* a specified register.
*
* @dev_hndl: device handle
* @ip_type: QDMA IP Type
* @buf : pointer to buffer to be filled
* @buflen : Length of the buffer
*
* Return: Length up-till the buffer is filled -success and < 0 - failure
*****************************************************************************/
int qdma_acc_dump_reg_info(void *dev_hndl,
enum qdma_ip_type ip_type, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen)
{
int rv = 0;
if (!dev_hndl) {
qdma_log_error("%s: dev_handle is NULL, err:%d\n",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
if (!buf || !buflen) {
qdma_log_error("%s: Invalid input buffer, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
switch (ip_type) {
case QDMA_SOFT_IP:
QDMA_SNPRINTF_S(buf, buflen, DEBGFS_LINE_SZ,
"QDMA reg field info not supported for QDMA_SOFT_IP\n");
break;
case QDMA_VERSAL_HARD_IP:
rv = qdma_s80_hard_dump_reg_info(dev_hndl, reg_addr,
num_regs, buf, buflen);
break;
case EQDMA_SOFT_IP:
rv = eqdma_dump_reg_info(dev_hndl, reg_addr,
num_regs, buf, buflen);
break;
default:
qdma_log_error("%s: Invalid version number, err = %d",
__func__, -QDMA_ERR_INV_PARAM);
return -QDMA_ERR_INV_PARAM;
}
return rv;
}
/*****************************************************************************/
/**
* qdma_acc_dump_queue_context() - Function to get qdma queue context dump in a
......@@ -1596,6 +1132,8 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
return rv;
}
qdma_memset(hw_access, 0, sizeof(struct qdma_hw_access));
if (ip == EQDMA_IP)
hw_access->qdma_get_version = &eqdma_get_version;
else
......@@ -1639,8 +1177,10 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
hw_access->qdma_read_reg_list = &qdma_read_reg_list;
hw_access->qdma_dump_config_reg_list =
&qdma_soft_dump_config_reg_list;
hw_access->qdma_dump_reg_info = &qdma_dump_reg_info;
hw_access->mbox_base_pf = QDMA_OFFSET_MBOX_BASE_PF;
hw_access->mbox_base_vf = QDMA_OFFSET_MBOX_BASE_VF;
hw_access->qdma_max_errors = QDMA_ERRS_ALL;
rv = hw_access->qdma_get_version(dev_hndl, is_vf, &version_info);
if (rv != QDMA_SUCCESS)
......@@ -1684,6 +1224,12 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
&qdma_s80_hard_dump_config_regs;
hw_access->qdma_dump_intr_context =
&qdma_s80_hard_dump_intr_context;
hw_access->qdma_hw_error_enable =
&qdma_s80_hard_hw_error_enable;
hw_access->qdma_hw_error_process =
&qdma_s80_hard_hw_error_process;
hw_access->qdma_hw_get_error_name =
&qdma_s80_hard_hw_get_error_name;
hw_access->qdma_legacy_intr_conf = NULL;
hw_access->qdma_read_reg_list = &qdma_s80_hard_read_reg_list;
hw_access->qdma_dump_config_reg_list =
......@@ -1692,6 +1238,8 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
&qdma_s80_hard_dump_queue_context;
hw_access->qdma_read_dump_queue_context =
&qdma_s80_hard_read_dump_queue_context;
hw_access->qdma_dump_reg_info = &qdma_s80_hard_dump_reg_info;
hw_access->qdma_max_errors = QDMA_S80_HARD_ERRS_ALL;
}
if (version_info.ip_type == EQDMA_SOFT_IP) {
......@@ -1703,6 +1251,7 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
&eqdma_indirect_intr_ctx_conf;
hw_access->qdma_dump_config_regs = &eqdma_dump_config_regs;
hw_access->qdma_dump_intr_context = &eqdma_dump_intr_context;
hw_access->qdma_hw_error_enable = &eqdma_hw_error_enable;
hw_access->qdma_hw_error_process = &eqdma_hw_error_process;
hw_access->qdma_hw_get_error_name = &eqdma_hw_get_error_name;
hw_access->qdma_hw_ctx_conf = &eqdma_hw_ctx_conf;
......@@ -1719,7 +1268,7 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
&eqdma_dump_queue_context;
hw_access->qdma_read_dump_queue_context =
&eqdma_read_dump_queue_context;
hw_access->qdma_dump_reg_info = &eqdma_dump_reg_info;
/* All CSR and Queue space register belongs to Window 0.
* Mailbox and MSIX register belongs to Window 1
* Therefore, Mailbox offsets are different for EQDMA
......@@ -1728,6 +1277,7 @@ int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
*/
hw_access->mbox_base_pf = EQDMA_OFFSET_MBOX_BASE_PF;
hw_access->mbox_base_vf = EQDMA_OFFSET_MBOX_BASE_VF;
hw_access->qdma_max_errors = EQDMA_ERRS_ALL;
}
return QDMA_SUCCESS;
......
......@@ -14,21 +14,65 @@
* under the License.
*/
#ifndef QDMA_ACCESS_COMMON_H_
#define QDMA_ACCESS_COMMON_H_
#include "qdma_access_export.h"
#include "qdma_access_errors.h"
#ifndef __QDMA_ACCESS_COMMON_H_
#define __QDMA_ACCESS_COMMON_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "qdma_access_export.h"
#include "qdma_access_errors.h"
/* QDMA HW version string array length */
#define QDMA_HW_VERSION_STRING_LEN 32
#define ENABLE_INIT_CTXT_MEMORY 1
#ifdef GCC_COMPILER
static inline uint32_t get_trailing_zeros(uint64_t x)
{
uint32_t rv =
__builtin_ffsll(x) - 1;
return rv;
}
#else
static inline uint32_t get_trailing_zeros(uint64_t value)
{
uint32_t pos = 0;
if ((value & 0xffffffff) == 0) {
pos += 32;
value >>= 32;
}
if ((value & 0xffff) == 0) {
pos += 16;
value >>= 16;
}
if ((value & 0xff) == 0) {
pos += 8;
value >>= 8;
}
if ((value & 0xf) == 0) {
pos += 4;
value >>= 4;
}
if ((value & 0x3) == 0) {
pos += 2;
value >>= 2;
}
if ((value & 0x1) == 0)
pos += 1;
return pos;
}
#endif
#define FIELD_SHIFT(mask) get_trailing_zeros(mask)
#define FIELD_SET(mask, val) ((val << FIELD_SHIFT(mask)) & mask)
#define FIELD_GET(mask, reg) ((reg & mask) >> FIELD_SHIFT(mask))
/* CSR Default values */
#define DEFAULT_MAX_DSC_FETCH 6
#define DEFAULT_WRB_INT QDMA_WRB_INTERVAL_128
......@@ -50,6 +94,66 @@ extern "C" {
*/
#define QDMA_NUM_DATA_VEC_FOR_INTR_CXT 1
enum ind_ctxt_cmd_op {
QDMA_CTXT_CMD_CLR,
QDMA_CTXT_CMD_WR,
QDMA_CTXT_CMD_RD,
QDMA_CTXT_CMD_INV
};
enum ind_ctxt_cmd_sel {
QDMA_CTXT_SEL_SW_C2H,
QDMA_CTXT_SEL_SW_H2C,
QDMA_CTXT_SEL_HW_C2H,
QDMA_CTXT_SEL_HW_H2C,
QDMA_CTXT_SEL_CR_C2H,
QDMA_CTXT_SEL_CR_H2C,
QDMA_CTXT_SEL_CMPT,
QDMA_CTXT_SEL_PFTCH,
QDMA_CTXT_SEL_INT_COAL,
QDMA_CTXT_SEL_PASID_RAM_LOW,
QDMA_CTXT_SEL_PASID_RAM_HIGH,
QDMA_CTXT_SEL_TIMER,
QDMA_CTXT_SEL_FMAP,
};
/* polling a register */
#define QDMA_REG_POLL_DFLT_INTERVAL_US 10 /* 10us per poll */
#define QDMA_REG_POLL_DFLT_TIMEOUT_US (500*1000) /* 500ms */
/** Constants */
#define QDMA_NUM_RING_SIZES 16
#define QDMA_NUM_C2H_TIMERS 16
#define QDMA_NUM_C2H_BUFFER_SIZES 16
#define QDMA_NUM_C2H_COUNTERS 16
#define QDMA_MM_CONTROL_RUN 0x1
#define QDMA_MM_CONTROL_STEP 0x100
#define QDMA_MAGIC_NUMBER 0x1fd3
#define QDMA_PIDX_STEP 0x10
#define QDMA_CMPT_CIDX_STEP 0x10
#define QDMA_INT_CIDX_STEP 0x10
/** QDMA_IND_REG_SEL_PFTCH */
#define QDMA_PFTCH_CTXT_SW_CRDT_GET_H_MASK GENMASK(15, 3)
#define QDMA_PFTCH_CTXT_SW_CRDT_GET_L_MASK GENMASK(2, 0)
/** QDMA_IND_REG_SEL_CMPT */
#define QDMA_COMPL_CTXT_BADDR_GET_H_MASK GENMASK_ULL(63, 38)
#define QDMA_COMPL_CTXT_BADDR_GET_L_MASK GENMASK_ULL(37, 12)
#define QDMA_COMPL_CTXT_PIDX_GET_H_MASK GENMASK(15, 4)
#define QDMA_COMPL_CTXT_PIDX_GET_L_MASK GENMASK(3, 0)
#define QDMA_INTR_CTXT_BADDR_GET_H_MASK GENMASK_ULL(63, 61)
#define QDMA_INTR_CTXT_BADDR_GET_M_MASK GENMASK_ULL(60, 29)
#define QDMA_INTR_CTXT_BADDR_GET_L_MASK GENMASK_ULL(28, 12)
#define QDMA_GLBL2_MM_CMPT_EN_MASK BIT(2)
#define QDMA_GLBL2_FLR_PRESENT_MASK BIT(1)
#define QDMA_GLBL2_MAILBOX_EN_MASK BIT(0)
#define QDMA_REG_IND_CTXT_REG_COUNT 8
/* ------------------------ indirect register context fields -----------*/
union qdma_ind_ctxt_cmd {
uint32_t word;
......@@ -294,8 +398,6 @@ struct qdma_descq_cmpt_ctxt {
uint32_t pasid;
/** @pasid_en - PASID Enable */
uint8_t pasid_en;
/** @virtio_dsc_base - Virtio Desc Base Address */
uint8_t base_addr;
/** @vio_eop - Virtio End-of-packet */
uint8_t vio_eop;
/** @sh_cmpt - Shared Completion Queue */
......@@ -518,11 +620,16 @@ void qdma_memset(void *to, uint8_t val, uint32_t size);
int qdma_acc_reg_dump_buf_len(void *dev_hndl,
enum qdma_ip_type ip_type, int *buflen);
int qdma_acc_reg_info_len(void *dev_hndl,
enum qdma_ip_type ip_type, int *buflen, int *num_regs);
int qdma_acc_context_buf_len(void *dev_hndl,
enum qdma_ip_type ip_type, uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen);
int qdma_acc_get_num_config_regs(void *dev_hndl,
enum qdma_ip_type ip_type, uint32_t *num_regs);
/*
* struct qdma_hw_access - Structure to hold HW access function pointers
*/
......@@ -587,11 +694,15 @@ struct qdma_hw_access {
uint8_t err_intr_index);
int (*qdma_hw_error_intr_rearm)(void *dev_hndl);
int (*qdma_hw_error_enable)(void *dev_hndl,
enum qdma_error_idx err_idx);
const char *(*qdma_hw_get_error_name)(enum qdma_error_idx err_idx);
uint32_t err_idx);
const char *(*qdma_hw_get_error_name)(uint32_t err_idx);
int (*qdma_hw_error_process)(void *dev_hndl);
int (*qdma_dump_config_regs)(void *dev_hndl, uint8_t is_vf, char *buf,
uint32_t buflen);
int (*qdma_dump_reg_info)(void *dev_hndl, uint32_t reg_addr,
uint32_t num_regs,
char *buf,
uint32_t buflen);
int (*qdma_dump_queue_context)(void *dev_hndl,
uint8_t st,
enum qdma_dev_q_type q_type,
......@@ -622,6 +733,7 @@ struct qdma_hw_access {
char *buf, uint32_t buflen);
uint32_t mbox_base_pf;
uint32_t mbox_base_vf;
uint32_t qdma_max_errors;
};
/*****************************************************************************/
......@@ -643,6 +755,21 @@ struct qdma_hw_access {
int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
struct qdma_hw_access *hw_access);
/*****************************************************************************/
/**
* qdma_acc_dump_config_regs() - Function to get qdma config registers
*
* @dev_hndl: device handle
* @is_vf: Whether PF or VF
* @ip_type: QDMA IP Type
* @reg_data: pointer to register data to be filled
*
* Return: Length up-till the buffer is filled -success and < 0 - failure
*****************************************************************************/
int qdma_acc_get_config_regs(void *dev_hndl, uint8_t is_vf,
enum qdma_ip_type ip_type,
uint32_t *reg_data);
/*****************************************************************************/
/**
* qdma_acc_dump_config_regs() - Function to get qdma config register dump in a
......@@ -660,6 +787,23 @@ int qdma_acc_dump_config_regs(void *dev_hndl, uint8_t is_vf,
enum qdma_ip_type ip_type,
char *buf, uint32_t buflen);
/*****************************************************************************/
/**
* qdma_acc_dump_reg_info() - Function to get qdma reg info in a buffer
*
* @dev_hndl: device handle
* @ip_type: QDMA IP Type
* @reg_addr: Register Address
* @num_regs: Number of Registers
* @buf : pointer to buffer to be filled
* @buflen : Length of the buffer
*
* Return: Length up-till the buffer is filled -success and < 0 - failure
*****************************************************************************/
int qdma_acc_dump_reg_info(void *dev_hndl,
enum qdma_ip_type ip_type, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen);
/*****************************************************************************/
/**
* qdma_acc_dump_queue_context() - Function to dump qdma queue context data in a
......
......@@ -14,9 +14,12 @@
* under the License.
*/
#ifndef QDMA_ACCESS_ERRORS_H_
#define QDMA_ACCESS_ERRORS_H_
#ifndef __QDMA_ACCESS_ERRORS_H_
#define __QDMA_ACCESS_ERRORS_H_
#ifdef __cplusplus
extern "C" {
#endif
/**
* DOC: QDMA common library error codes definitions
......@@ -62,4 +65,8 @@ enum qdma_access_error_codes {
QDMA_ERR_MBOX_ALL_ZERO_MSG, /* 25 */
};
#endif /* QDMA_ACCESS_H_ */
#ifdef __cplusplus
}
#endif
#endif /* __QDMA_ACCESS_ERRORS_H_ */
......@@ -14,15 +14,15 @@
* under the License.
*/
#ifndef QDMA_ACCESS_EXPORT_H_
#define QDMA_ACCESS_EXPORT_H_
#include "qdma_platform_env.h"
#ifndef __QDMA_ACCESS_EXPORT_H_
#define __QDMA_ACCESS_EXPORT_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "qdma_platform_env.h"
/** QDMA Global CSR array size */
#define QDMA_GLOBAL_CSR_ARRAY_SZ 16
......@@ -44,6 +44,12 @@ struct qdma_dev_attributes {
uint8_t mm_cmpt_en:1;
/** @mailbox_en - Mailbox supported or not? */
uint8_t mailbox_en:1;
/** @debug_mode - Debug mode is enabled/disabled for IP */
uint8_t debug_mode:1;
/** @desc_eng_mode - Descriptor Engine mode:
* Internal only/Bypass only/Internal & Bypass
*/
uint8_t desc_eng_mode:2;
/** @mm_channel_max - Num of MM channels */
uint8_t mm_channel_max;
......@@ -202,6 +208,8 @@ enum qdma_vivado_release_id {
QDMA_VIVADO_2019_2,
/** @QDMA_VIVADO_2020_1 - Vivado version 2020.1 */
QDMA_VIVADO_2020_1,
/** @QDMA_VIVADO_2020_2 - Vivado version 2020.2 */
QDMA_VIVADO_2020_2,
/** @QDMA_VIVADO_NONE - Not a valid Vivado version*/
QDMA_VIVADO_NONE
};
......@@ -229,137 +237,19 @@ enum qdma_device_type {
QDMA_DEVICE_NONE
};
/**
* enum qdma_error_idx - qdma errors
*/
enum qdma_error_idx {
/* Descriptor errors */
QDMA_DSC_ERR_POISON,
QDMA_DSC_ERR_UR_CA,
QDMA_DSC_ERR_PARAM,
QDMA_DSC_ERR_ADDR,
QDMA_DSC_ERR_TAG,
QDMA_DSC_ERR_FLR,
QDMA_DSC_ERR_TIMEOUT,
QDMA_DSC_ERR_DAT_POISON,
QDMA_DSC_ERR_FLR_CANCEL,
QDMA_DSC_ERR_DMA,
QDMA_DSC_ERR_DSC,
QDMA_DSC_ERR_RQ_CANCEL,
QDMA_DSC_ERR_DBE,
QDMA_DSC_ERR_SBE,
QDMA_DSC_ERR_ALL,
/* TRQ Errors */
QDMA_TRQ_ERR_UNMAPPED,
QDMA_TRQ_ERR_QID_RANGE,
QDMA_TRQ_ERR_VF_ACCESS,
QDMA_TRQ_ERR_TCP_TIMEOUT,
QDMA_TRQ_ERR_ALL,
/* C2H Errors */
QDMA_ST_C2H_ERR_MTY_MISMATCH,
QDMA_ST_C2H_ERR_LEN_MISMATCH,
QDMA_ST_C2H_ERR_QID_MISMATCH,
QDMA_ST_C2H_ERR_DESC_RSP_ERR,
QDMA_ST_C2H_ERR_ENG_WPL_DATA_PAR_ERR,
QDMA_ST_C2H_ERR_MSI_INT_FAIL,
QDMA_ST_C2H_ERR_ERR_DESC_CNT,
QDMA_ST_C2H_ERR_PORTID_CTXT_MISMATCH,
QDMA_ST_C2H_ERR_PORTID_BYP_IN_MISMATCH,
QDMA_ST_C2H_ERR_CMPT_INV_Q_ERR,
QDMA_ST_C2H_ERR_CMPT_QFULL_ERR,
QDMA_ST_C2H_ERR_CMPT_CIDX_ERR,
QDMA_ST_C2H_ERR_CMPT_PRTY_ERR,
QDMA_ST_C2H_ERR_ALL,
/* Fatal Errors */
QDMA_ST_FATAL_ERR_MTY_MISMATCH,
QDMA_ST_FATAL_ERR_LEN_MISMATCH,
QDMA_ST_FATAL_ERR_QID_MISMATCH,
QDMA_ST_FATAL_ERR_TIMER_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_PFCH_II_RAM_RDBE,
QDMA_ST_FATAL_ERR_CMPT_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_PFCH_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_INT_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_CMPT_COAL_DATA_RAM_RDBE,
QDMA_ST_FATAL_ERR_TUSER_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_QID_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_WPL_DATA_PAR,
QDMA_ST_FATAL_ERR_ALL,
/* H2C Errors */
QDMA_ST_H2C_ERR_ZERO_LEN_DESC,
QDMA_ST_H2C_ERR_CSI_MOP,
QDMA_ST_H2C_ERR_NO_DMA_DSC,
QDMA_ST_H2C_ERR_SBE,
QDMA_ST_H2C_ERR_DBE,
QDMA_ST_H2C_ERR_ALL,
/* Single bit errors */
QDMA_SBE_ERR_MI_H2C0_DAT,
QDMA_SBE_ERR_MI_C2H0_DAT,
QDMA_SBE_ERR_H2C_RD_BRG_DAT,
QDMA_SBE_ERR_H2C_WR_BRG_DAT,
QDMA_SBE_ERR_C2H_RD_BRG_DAT,
QDMA_SBE_ERR_C2H_WR_BRG_DAT,
QDMA_SBE_ERR_FUNC_MAP,
QDMA_SBE_ERR_DSC_HW_CTXT,
QDMA_SBE_ERR_DSC_CRD_RCV,
QDMA_SBE_ERR_DSC_SW_CTXT,
QDMA_SBE_ERR_DSC_CPLI,
QDMA_SBE_ERR_DSC_CPLD,
QDMA_SBE_ERR_PASID_CTXT_RAM,
QDMA_SBE_ERR_TIMER_FIFO_RAM,
QDMA_SBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_SBE_ERR_QID_FIFO_RAM,
QDMA_SBE_ERR_TUSER_FIFO_RAM,
QDMA_SBE_ERR_WRB_COAL_DATA_RAM,
QDMA_SBE_ERR_INT_QID2VEC_RAM,
QDMA_SBE_ERR_INT_CTXT_RAM,
QDMA_SBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_SBE_ERR_PFCH_CTXT_RAM,
QDMA_SBE_ERR_WRB_CTXT_RAM,
QDMA_SBE_ERR_PFCH_LL_RAM,
QDMA_SBE_ERR_H2C_PEND_FIFO,
QDMA_SBE_ERR_ALL,
/* Double bit Errors */
QDMA_DBE_ERR_MI_H2C0_DAT,
QDMA_DBE_ERR_MI_C2H0_DAT,
QDMA_DBE_ERR_H2C_RD_BRG_DAT,
QDMA_DBE_ERR_H2C_WR_BRG_DAT,
QDMA_DBE_ERR_C2H_RD_BRG_DAT,
QDMA_DBE_ERR_C2H_WR_BRG_DAT,
QDMA_DBE_ERR_FUNC_MAP,
QDMA_DBE_ERR_DSC_HW_CTXT,
QDMA_DBE_ERR_DSC_CRD_RCV,
QDMA_DBE_ERR_DSC_SW_CTXT,
QDMA_DBE_ERR_DSC_CPLI,
QDMA_DBE_ERR_DSC_CPLD,
QDMA_DBE_ERR_PASID_CTXT_RAM,
QDMA_DBE_ERR_TIMER_FIFO_RAM,
QDMA_DBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_DBE_ERR_QID_FIFO_RAM,
QDMA_DBE_ERR_TUSER_FIFO_RAM,
QDMA_DBE_ERR_WRB_COAL_DATA_RAM,
QDMA_DBE_ERR_INT_QID2VEC_RAM,
QDMA_DBE_ERR_INT_CTXT_RAM,
QDMA_DBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_DBE_ERR_PFCH_CTXT_RAM,
QDMA_DBE_ERR_WRB_CTXT_RAM,
QDMA_DBE_ERR_PFCH_LL_RAM,
QDMA_DBE_ERR_H2C_PEND_FIFO,
QDMA_DBE_ERR_ALL,
QDMA_ERRS_ALL
enum qdma_desc_eng_mode {
/** @QDMA_DESC_ENG_INTERNAL_BYPASS - Internal and Bypass mode */
QDMA_DESC_ENG_INTERNAL_BYPASS,
/** @QDMA_DESC_ENG_BYPASS_ONLY - Only Bypass mode */
QDMA_DESC_ENG_BYPASS_ONLY,
/** @QDMA_DESC_ENG_INTERNAL_ONLY - Only Internal mode */
QDMA_DESC_ENG_INTERNAL_ONLY,
/** @QDMA_DESC_ENG_MODE_MAX - Max of desc engine modes */
QDMA_DESC_ENG_MODE_MAX
};
#ifdef __cplusplus
}
#endif
#endif /* QDMA_ACCESS_EXPORT_H_ */
#endif /* __QDMA_ACCESS_EXPORT_H_ */
......@@ -14,12 +14,12 @@
* under the License.
*/
#ifndef QDMA_VERSION_H_
#define QDMA_VERSION_H_
#ifndef __QDMA_ACCESS_VERSION_H_
#define __QDMA_ACCESS_VERSION_H_
#define QDMA_VERSION_MAJOR 2020
#define QDMA_VERSION_MINOR 1
#define QDMA_VERSION_MINOR 2
#define QDMA_VERSION_PATCH 0
#define QDMA_VERSION_STR \
......@@ -33,4 +33,4 @@
QDMA_VERSION_PATCH)
#endif /* COMMON_QDMA_VERSION_H_ */
#endif /* __QDMA_ACCESS_VERSION_H_ */
......@@ -14,8 +14,12 @@
* under the License.
*/
#ifndef QDMA_LIST_H_
#define QDMA_LIST_H_
#ifndef __QDMA_LIST_H_
#define __QDMA_LIST_H_
#ifdef __cplusplus
extern "C" {
#endif
/**
* DOC: QDMA common library provided list implementation definitions
......@@ -114,4 +118,8 @@ void qdma_list_insert_after(struct qdma_list_head *new_node,
*****************************************************************************/
void qdma_list_del(struct qdma_list_head *node);
#endif /* QDMA_LIST_H_ */
#ifdef __cplusplus
}
#endif
#endif /* __QDMA_LIST_H_ */
......@@ -14,11 +14,8 @@
* under the License.
*/
#ifndef LIBQDMA_QDMA_PLATFORM_H_
#define LIBQDMA_QDMA_PLATFORM_H_
#include "qdma_access_common.h"
#include "qdma_platform_env.h"
#ifndef __QDMA_PLATFORM_H_
#define __QDMA_PLATFORM_H_
#ifdef __cplusplus
extern "C" {
......@@ -31,6 +28,8 @@ extern "C" {
* required to be implemented by platform specific drivers.
*/
#include "qdma_access_common.h"
/*****************************************************************************/
/**
* qdma_calloc(): allocate memory and initialize with 0
......@@ -130,28 +129,6 @@ int qdma_reg_access_release(void *dev_hndl);
*****************************************************************************/
void qdma_udelay(uint32_t delay_usec);
/*****************************************************************************/
/**
* qdma_hw_error_handler() - function to handle the hardware errors
*
* @dev_hndl: device handle
* @err_idx: error index
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
void qdma_hw_error_handler(void *dev_hndl, enum qdma_error_idx err_idx);
/*****************************************************************************/
/**
* qdma_get_device_attr() - function to get the device attributes
*
* @dev_hndl: device handle
* @dev_cap: pointer to hold the device capabilities
*
* Return: 0 - success and < 0 - failure
*****************************************************************************/
void qdma_get_device_attr(void *dev_hndl, struct qdma_dev_attributes **dev_cap);
/*****************************************************************************/
/**
* qdma_get_hw_access() - function to get the qdma_hw_access
......@@ -188,4 +165,4 @@ int qdma_get_err_code(int acc_err_code);
}
#endif
#endif /* LIBQDMA_QDMA_PLATFORM_H_ */
#endif /* __QDMA_PLATFORM_H_ */
......@@ -17,11 +17,16 @@
#ifndef __QDMA_REG_DUMP_H__
#define __QDMA_REG_DUMP_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "qdma_platform_env.h"
#include "qdma_access_common.h"
#define DEBUGFS_DEV_INFO_SZ (300)
#define QDMA_REG_NAME_LENGTH 64
#define DEBUGFS_INTR_CNTX_SZ (2048 * 2)
#define DBGFS_ERR_BUFLEN (64)
#define DEBGFS_LINE_SZ (81)
......@@ -50,19 +55,28 @@
(st_en << QDMA_ST_EN_SHIFT) | \
(mailbox_en << QDMA_MAILBOX_EN_SHIFT))
struct regfield_info {
const char *field_name;
uint32_t field_mask;
};
struct xreg_info {
char name[32];
const char *name;
uint32_t addr;
uint32_t repeat;
uint32_t step;
uint8_t shift;
uint8_t len;
uint8_t is_debug_reg;
uint8_t mode;
uint8_t read_type;
uint8_t num_bitfields;
struct regfield_info *bitfields;
};
extern struct xreg_info qdma_config_regs[MAX_QDMA_CFG_REGS];
extern struct xreg_info qdma_cpm_config_regs[MAX_QDMA_CFG_REGS];
#ifdef __cplusplus
}
#endif
#endif
......@@ -14,8 +14,8 @@
* under the License.
*/
#ifndef QDMA_RESOURCE_MGMT_H_
#define QDMA_RESOURCE_MGMT_H_
#ifndef __QDMA_RESOURCE_MGMT_H_
#define __QDMA_RESOURCE_MGMT_H_
#ifdef __cplusplus
extern "C" {
......@@ -27,6 +27,7 @@ extern "C" {
* Header file *qdma_resource_mgmt.h* defines data structures and function
* signatures exported for QDMA queue management.
*/
#include "qdma_platform_env.h"
#include "qdma_access_export.h"
......@@ -209,4 +210,4 @@ int qdma_get_device_active_queue_count(uint32_t dma_device_index,
}
#endif
#endif /* LIBQDMA_QDMA_RESOURCE_MGMT_H_ */
#endif /* __QDMA_RESOURCE_MGMT_H_ */
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -14,15 +14,152 @@
* under the License.
*/
#ifndef QDMA_S80_HARD_ACCESS_H_
#define QDMA_S80_HARD_ACCESS_H_
#include "qdma_access_common.h"
#ifndef __QDMA_S80_HARD_ACCESS_H_
#define __QDMA_S80_HARD_ACCESS_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "qdma_platform.h"
/**
* enum qdma_error_idx - qdma errors
*/
enum qdma_s80_hard_error_idx {
/* Descriptor errors */
QDMA_S80_HARD_DSC_ERR_POISON,
QDMA_S80_HARD_DSC_ERR_UR_CA,
QDMA_S80_HARD_DSC_ERR_PARAM,
QDMA_S80_HARD_DSC_ERR_ADDR,
QDMA_S80_HARD_DSC_ERR_TAG,
QDMA_S80_HARD_DSC_ERR_FLR,
QDMA_S80_HARD_DSC_ERR_TIMEOUT,
QDMA_S80_HARD_DSC_ERR_DAT_POISON,
QDMA_S80_HARD_DSC_ERR_FLR_CANCEL,
QDMA_S80_HARD_DSC_ERR_DMA,
QDMA_S80_HARD_DSC_ERR_DSC,
QDMA_S80_HARD_DSC_ERR_RQ_CANCEL,
QDMA_S80_HARD_DSC_ERR_DBE,
QDMA_S80_HARD_DSC_ERR_SBE,
QDMA_S80_HARD_DSC_ERR_ALL,
/* TRQ Errors */
QDMA_S80_HARD_TRQ_ERR_UNMAPPED,
QDMA_S80_HARD_TRQ_ERR_QID_RANGE,
QDMA_S80_HARD_TRQ_ERR_VF_ACCESS_ERR,
QDMA_S80_HARD_TRQ_ERR_TCP_TIMEOUT,
QDMA_S80_HARD_TRQ_ERR_ALL,
/* C2H Errors */
QDMA_S80_HARD_ST_C2H_ERR_MTY_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_LEN_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_QID_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_DESC_RSP_ERR,
QDMA_S80_HARD_ST_C2H_ERR_ENG_WPL_DATA_PAR_ERR,
QDMA_S80_HARD_ST_C2H_ERR_MSI_INT_FAIL,
QDMA_S80_HARD_ST_C2H_ERR_ERR_DESC_CNT,
QDMA_S80_HARD_ST_C2H_ERR_PORTID_CTXT_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_PORTID_BYP_IN_MISMATCH,
QDMA_S80_HARD_ST_C2H_ERR_WRB_INV_Q_ERR,
QDMA_S80_HARD_ST_C2H_ERR_WRB_QFULL_ERR,
QDMA_S80_HARD_ST_C2H_ERR_WRB_CIDX_ERR,
QDMA_S80_HARD_ST_C2H_ERR_WRB_PRTY_ERR,
QDMA_S80_HARD_ST_C2H_ERR_ALL,
/* Fatal Errors */
QDMA_S80_HARD_ST_FATAL_ERR_MTY_MISMATCH,
QDMA_S80_HARD_ST_FATAL_ERR_LEN_MISMATCH,
QDMA_S80_HARD_ST_FATAL_ERR_QID_MISMATCH,
QDMA_S80_HARD_ST_FATAL_ERR_TIMER_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_PFCH_II_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_WRB_CTXT_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_PFCH_CTXT_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_INT_CTXT_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_INT_QID2VEC_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_WRB_COAL_DATA_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_TUSER_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_QID_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE,
QDMA_S80_HARD_ST_FATAL_ERR_WPL_DATA_PAR_ERR,
QDMA_S80_HARD_ST_FATAL_ERR_ALL,
/* H2C Errors */
QDMA_S80_HARD_ST_H2C_ERR_ZERO_LEN_DESC_ERR,
QDMA_S80_HARD_ST_H2C_ERR_SDI_MRKR_REQ_MOP_ERR,
QDMA_S80_HARD_ST_H2C_ERR_NO_DMA_DSC,
QDMA_S80_HARD_ST_H2C_ERR_DBE,
QDMA_S80_HARD_ST_H2C_ERR_SBE,
QDMA_S80_HARD_ST_H2C_ERR_ALL,
/* Single bit errors */
QDMA_S80_HARD_SBE_ERR_MI_H2C0_DAT,
QDMA_S80_HARD_SBE_ERR_MI_C2H0_DAT,
QDMA_S80_HARD_SBE_ERR_H2C_RD_BRG_DAT,
QDMA_S80_HARD_SBE_ERR_H2C_WR_BRG_DAT,
QDMA_S80_HARD_SBE_ERR_C2H_RD_BRG_DAT,
QDMA_S80_HARD_SBE_ERR_C2H_WR_BRG_DAT,
QDMA_S80_HARD_SBE_ERR_FUNC_MAP,
QDMA_S80_HARD_SBE_ERR_DSC_HW_CTXT,
QDMA_S80_HARD_SBE_ERR_DSC_CRD_RCV,
QDMA_S80_HARD_SBE_ERR_DSC_SW_CTXT,
QDMA_S80_HARD_SBE_ERR_DSC_CPLI,
QDMA_S80_HARD_SBE_ERR_DSC_CPLD,
QDMA_S80_HARD_SBE_ERR_PASID_CTXT_RAM,
QDMA_S80_HARD_SBE_ERR_TIMER_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_QID_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_TUSER_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_WRB_COAL_DATA_RAM,
QDMA_S80_HARD_SBE_ERR_INT_QID2VEC_RAM,
QDMA_S80_HARD_SBE_ERR_INT_CTXT_RAM,
QDMA_S80_HARD_SBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_S80_HARD_SBE_ERR_PFCH_CTXT_RAM,
QDMA_S80_HARD_SBE_ERR_WRB_CTXT_RAM,
QDMA_S80_HARD_SBE_ERR_PFCH_LL_RAM,
QDMA_S80_HARD_SBE_ERR_ALL,
/* Double bit Errors */
QDMA_S80_HARD_DBE_ERR_MI_H2C0_DAT,
QDMA_S80_HARD_DBE_ERR_MI_C2H0_DAT,
QDMA_S80_HARD_DBE_ERR_H2C_RD_BRG_DAT,
QDMA_S80_HARD_DBE_ERR_H2C_WR_BRG_DAT,
QDMA_S80_HARD_DBE_ERR_C2H_RD_BRG_DAT,
QDMA_S80_HARD_DBE_ERR_C2H_WR_BRG_DAT,
QDMA_S80_HARD_DBE_ERR_FUNC_MAP,
QDMA_S80_HARD_DBE_ERR_DSC_HW_CTXT,
QDMA_S80_HARD_DBE_ERR_DSC_CRD_RCV,
QDMA_S80_HARD_DBE_ERR_DSC_SW_CTXT,
QDMA_S80_HARD_DBE_ERR_DSC_CPLI,
QDMA_S80_HARD_DBE_ERR_DSC_CPLD,
QDMA_S80_HARD_DBE_ERR_PASID_CTXT_RAM,
QDMA_S80_HARD_DBE_ERR_TIMER_FIFO_RAM,
QDMA_S80_HARD_DBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_S80_HARD_DBE_ERR_QID_FIFO_RAM,
QDMA_S80_HARD_DBE_ERR_WRB_COAL_DATA_RAM,
QDMA_S80_HARD_DBE_ERR_INT_QID2VEC_RAM,
QDMA_S80_HARD_DBE_ERR_INT_CTXT_RAM,
QDMA_S80_HARD_DBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_S80_HARD_DBE_ERR_PFCH_CTXT_RAM,
QDMA_S80_HARD_DBE_ERR_WRB_CTXT_RAM,
QDMA_S80_HARD_DBE_ERR_PFCH_LL_RAM,
QDMA_S80_HARD_DBE_ERR_ALL,
QDMA_S80_HARD_ERRS_ALL
};
struct qdma_s80_hard_hw_err_info {
enum qdma_s80_hard_error_idx idx;
const char *err_name;
uint32_t mask_reg_addr;
uint32_t stat_reg_addr;
uint32_t leaf_err_mask;
uint32_t global_err_mask;
void (*qdma_s80_hard_hw_err_process)(void *dev_hndl);
};
int qdma_s80_hard_init_ctxt_memory(void *dev_hndl);
int qdma_s80_hard_qid2vec_conf(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
......@@ -77,11 +214,15 @@ int qdma_s80_hard_get_device_attributes(void *dev_hndl,
uint32_t qdma_s80_hard_reg_dump_buf_len(void);
int qdma_s80_hard_context_buf_len(uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen);
enum qdma_dev_q_type q_type, uint32_t *req_buflen);
int qdma_s80_hard_dump_config_regs(void *dev_hndl, uint8_t is_vf,
char *buf, uint32_t buflen);
int qdma_s80_hard_hw_error_process(void *dev_hndl);
const char *qdma_s80_hard_hw_get_error_name(uint32_t err_idx);
int qdma_s80_hard_hw_error_enable(void *dev_hndl, uint32_t err_idx);
int qdma_s80_hard_dump_queue_context(void *dev_hndl,
uint8_t st,
enum qdma_dev_q_type q_type,
......@@ -100,17 +241,38 @@ int qdma_s80_hard_read_dump_queue_context(void *dev_hndl,
char *buf, uint32_t buflen);
int qdma_s80_hard_dump_config_reg_list(void *dev_hndl,
uint32_t num_regs,
uint32_t total_regs,
struct qdma_reg_data *reg_list,
char *buf, uint32_t buflen);
int qdma_s80_hard_read_reg_list(void *dev_hndl, uint8_t is_vf,
uint16_t reg_rd_group,
uint16_t reg_rd_slot,
uint16_t *total_regs,
struct qdma_reg_data *reg_list);
int qdma_s80_hard_global_csr_conf(void *dev_hndl, uint8_t index,
uint8_t count,
uint32_t *csr_val,
enum qdma_global_csr_type csr_type,
enum qdma_hw_access_type access_type);
int qdma_s80_hard_global_writeback_interval_conf(void *dev_hndl,
enum qdma_wrb_interval *wb_int,
enum qdma_hw_access_type access_type);
int qdma_s80_hard_mm_channel_conf(void *dev_hndl, uint8_t channel,
uint8_t is_c2h,
uint8_t enable);
int qdma_s80_hard_dump_reg_info(void *dev_hndl, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen);
uint32_t qdma_s80_hard_get_config_num_regs(void);
struct xreg_info *qdma_s80_hard_get_config_regs(void);
#ifdef __cplusplus
}
#endif
#endif /* QDMA_S80_HARD_ACCESS_H_ */
#endif /* __QDMA_S80_HARD_ACCESS_H_ */
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -14,12 +14,9 @@
* under the License.
*/
#ifndef QDMA_ACCESS_H_
#define QDMA_ACCESS_H_
#ifndef __QDMA_SOFT_ACCESS_H_
#define __QDMA_SOFT_ACCESS_H_
#include "qdma_access_export.h"
#include "qdma_platform_env.h"
#include "qdma_access_errors.h"
#ifdef __cplusplus
extern "C" {
#endif
......@@ -31,6 +28,136 @@ extern "C" {
* exported by QDMA common library.
*/
#include "qdma_platform.h"
/**
* enum qdma_error_idx - qdma errors
*/
enum qdma_error_idx {
/* Descriptor errors */
QDMA_DSC_ERR_POISON,
QDMA_DSC_ERR_UR_CA,
QDMA_DSC_ERR_PARAM,
QDMA_DSC_ERR_ADDR,
QDMA_DSC_ERR_TAG,
QDMA_DSC_ERR_FLR,
QDMA_DSC_ERR_TIMEOUT,
QDMA_DSC_ERR_DAT_POISON,
QDMA_DSC_ERR_FLR_CANCEL,
QDMA_DSC_ERR_DMA,
QDMA_DSC_ERR_DSC,
QDMA_DSC_ERR_RQ_CANCEL,
QDMA_DSC_ERR_DBE,
QDMA_DSC_ERR_SBE,
QDMA_DSC_ERR_ALL,
/* TRQ Errors */
QDMA_TRQ_ERR_UNMAPPED,
QDMA_TRQ_ERR_QID_RANGE,
QDMA_TRQ_ERR_VF_ACCESS,
QDMA_TRQ_ERR_TCP_TIMEOUT,
QDMA_TRQ_ERR_ALL,
/* C2H Errors */
QDMA_ST_C2H_ERR_MTY_MISMATCH,
QDMA_ST_C2H_ERR_LEN_MISMATCH,
QDMA_ST_C2H_ERR_QID_MISMATCH,
QDMA_ST_C2H_ERR_DESC_RSP_ERR,
QDMA_ST_C2H_ERR_ENG_WPL_DATA_PAR_ERR,
QDMA_ST_C2H_ERR_MSI_INT_FAIL,
QDMA_ST_C2H_ERR_ERR_DESC_CNT,
QDMA_ST_C2H_ERR_PORTID_CTXT_MISMATCH,
QDMA_ST_C2H_ERR_PORTID_BYP_IN_MISMATCH,
QDMA_ST_C2H_ERR_CMPT_INV_Q_ERR,
QDMA_ST_C2H_ERR_CMPT_QFULL_ERR,
QDMA_ST_C2H_ERR_CMPT_CIDX_ERR,
QDMA_ST_C2H_ERR_CMPT_PRTY_ERR,
QDMA_ST_C2H_ERR_ALL,
/* Fatal Errors */
QDMA_ST_FATAL_ERR_MTY_MISMATCH,
QDMA_ST_FATAL_ERR_LEN_MISMATCH,
QDMA_ST_FATAL_ERR_QID_MISMATCH,
QDMA_ST_FATAL_ERR_TIMER_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_PFCH_II_RAM_RDBE,
QDMA_ST_FATAL_ERR_CMPT_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_PFCH_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_INT_CTXT_RAM_RDBE,
QDMA_ST_FATAL_ERR_CMPT_COAL_DATA_RAM_RDBE,
QDMA_ST_FATAL_ERR_TUSER_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_QID_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE,
QDMA_ST_FATAL_ERR_WPL_DATA_PAR,
QDMA_ST_FATAL_ERR_ALL,
/* H2C Errors */
QDMA_ST_H2C_ERR_ZERO_LEN_DESC,
QDMA_ST_H2C_ERR_CSI_MOP,
QDMA_ST_H2C_ERR_NO_DMA_DSC,
QDMA_ST_H2C_ERR_SBE,
QDMA_ST_H2C_ERR_DBE,
QDMA_ST_H2C_ERR_ALL,
/* Single bit errors */
QDMA_SBE_ERR_MI_H2C0_DAT,
QDMA_SBE_ERR_MI_C2H0_DAT,
QDMA_SBE_ERR_H2C_RD_BRG_DAT,
QDMA_SBE_ERR_H2C_WR_BRG_DAT,
QDMA_SBE_ERR_C2H_RD_BRG_DAT,
QDMA_SBE_ERR_C2H_WR_BRG_DAT,
QDMA_SBE_ERR_FUNC_MAP,
QDMA_SBE_ERR_DSC_HW_CTXT,
QDMA_SBE_ERR_DSC_CRD_RCV,
QDMA_SBE_ERR_DSC_SW_CTXT,
QDMA_SBE_ERR_DSC_CPLI,
QDMA_SBE_ERR_DSC_CPLD,
QDMA_SBE_ERR_PASID_CTXT_RAM,
QDMA_SBE_ERR_TIMER_FIFO_RAM,
QDMA_SBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_SBE_ERR_QID_FIFO_RAM,
QDMA_SBE_ERR_TUSER_FIFO_RAM,
QDMA_SBE_ERR_WRB_COAL_DATA_RAM,
QDMA_SBE_ERR_INT_QID2VEC_RAM,
QDMA_SBE_ERR_INT_CTXT_RAM,
QDMA_SBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_SBE_ERR_PFCH_CTXT_RAM,
QDMA_SBE_ERR_WRB_CTXT_RAM,
QDMA_SBE_ERR_PFCH_LL_RAM,
QDMA_SBE_ERR_H2C_PEND_FIFO,
QDMA_SBE_ERR_ALL,
/* Double bit Errors */
QDMA_DBE_ERR_MI_H2C0_DAT,
QDMA_DBE_ERR_MI_C2H0_DAT,
QDMA_DBE_ERR_H2C_RD_BRG_DAT,
QDMA_DBE_ERR_H2C_WR_BRG_DAT,
QDMA_DBE_ERR_C2H_RD_BRG_DAT,
QDMA_DBE_ERR_C2H_WR_BRG_DAT,
QDMA_DBE_ERR_FUNC_MAP,
QDMA_DBE_ERR_DSC_HW_CTXT,
QDMA_DBE_ERR_DSC_CRD_RCV,
QDMA_DBE_ERR_DSC_SW_CTXT,
QDMA_DBE_ERR_DSC_CPLI,
QDMA_DBE_ERR_DSC_CPLD,
QDMA_DBE_ERR_PASID_CTXT_RAM,
QDMA_DBE_ERR_TIMER_FIFO_RAM,
QDMA_DBE_ERR_PAYLOAD_FIFO_RAM,
QDMA_DBE_ERR_QID_FIFO_RAM,
QDMA_DBE_ERR_TUSER_FIFO_RAM,
QDMA_DBE_ERR_WRB_COAL_DATA_RAM,
QDMA_DBE_ERR_INT_QID2VEC_RAM,
QDMA_DBE_ERR_INT_CTXT_RAM,
QDMA_DBE_ERR_DESC_REQ_FIFO_RAM,
QDMA_DBE_ERR_PFCH_CTXT_RAM,
QDMA_DBE_ERR_WRB_CTXT_RAM,
QDMA_DBE_ERR_PFCH_LL_RAM,
QDMA_DBE_ERR_H2C_PEND_FIFO,
QDMA_DBE_ERR_ALL,
QDMA_ERRS_ALL
};
struct qdma_hw_err_info {
enum qdma_error_idx idx;
const char *err_name;
......@@ -38,6 +165,7 @@ struct qdma_hw_err_info {
uint32_t stat_reg_addr;
uint32_t leaf_err_mask;
uint32_t global_err_mask;
void (*qdma_hw_err_process)(void *dev_hndl);
};
......@@ -98,6 +226,10 @@ int qdma_dump_intr_context(void *dev_hndl,
uint32_t qdma_soft_reg_dump_buf_len(void);
uint32_t qdma_get_config_num_regs(void);
struct xreg_info *qdma_get_config_regs(void);
int qdma_soft_context_buf_len(uint8_t st,
enum qdma_dev_q_type q_type, uint32_t *buflen);
......@@ -118,9 +250,9 @@ int qdma_soft_read_dump_queue_context(void *dev_hndl,
int qdma_hw_error_process(void *dev_hndl);
const char *qdma_hw_get_error_name(enum qdma_error_idx err_idx);
const char *qdma_hw_get_error_name(uint32_t err_idx);
int qdma_hw_error_enable(void *dev_hndl, enum qdma_error_idx err_idx);
int qdma_hw_error_enable(void *dev_hndl, uint32_t err_idx);
int qdma_get_device_attributes(void *dev_hndl,
struct qdma_dev_attributes *dev_info);
......@@ -129,7 +261,7 @@ int qdma_get_user_bar(void *dev_hndl, uint8_t is_vf,
uint8_t func_id, uint8_t *user_bar);
int qdma_soft_dump_config_reg_list(void *dev_hndl,
uint32_t num_regs,
uint32_t total_regs,
struct qdma_reg_data *reg_list,
char *buf, uint32_t buflen);
......@@ -138,9 +270,23 @@ int qdma_read_reg_list(void *dev_hndl, uint8_t is_vf,
uint16_t *total_regs,
struct qdma_reg_data *reg_list);
int qdma_global_csr_conf(void *dev_hndl, uint8_t index, uint8_t count,
uint32_t *csr_val,
enum qdma_global_csr_type csr_type,
enum qdma_hw_access_type access_type);
int qdma_global_writeback_interval_conf(void *dev_hndl,
enum qdma_wrb_interval *wb_int,
enum qdma_hw_access_type access_type);
int qdma_mm_channel_conf(void *dev_hndl, uint8_t channel, uint8_t is_c2h,
uint8_t enable);
int qdma_dump_reg_info(void *dev_hndl, uint32_t reg_addr,
uint32_t num_regs, char *buf, uint32_t buflen);
#ifdef __cplusplus
}
#endif
#endif /* QDMA_ACCESS_H_ */
#endif /* __QDMA_SOFT_ACCESS_H_ */
......@@ -14,8 +14,8 @@
* under the License.
*/
#ifndef QDMA_SOFT_REG_H__
#define QDMA_SOFT_REG_H__
#ifndef __QDMA_SOFT_REG_H__
#define __QDMA_SOFT_REG_H__
#ifdef __cplusplus
extern "C" {
......@@ -65,56 +65,7 @@ extern "C" {
(0xFFFFFFFFFFFFFFFF >> (BITS_PER_LONG_LONG - 1 - (h))))
/*
* Returns the number of trailing 0s in x, starting at LSB.
* Same as gcc __builtin_ffsll function
*/
#ifdef GCC_COMPILER
static inline uint32_t get_trailing_zeros(uint64_t x)
{
uint32_t rv =
__builtin_ffsll(x) - 1;
return rv;
}
#else
static inline uint32_t get_trailing_zeros(uint64_t value)
{
uint32_t pos = 0;
if ((value & 0xffffffff) == 0) {
pos += 32;
value >>= 32;
}
if ((value & 0xffff) == 0) {
pos += 16;
value >>= 16;
}
if ((value & 0xff) == 0) {
pos += 8;
value >>= 8;
}
if ((value & 0xf) == 0) {
pos += 4;
value >>= 4;
}
if ((value & 0x3) == 0) {
pos += 2;
value >>= 2;
}
if ((value & 0x1) == 0)
pos += 1;
return pos;
}
#endif
#define FIELD_SHIFT(mask) get_trailing_zeros(mask)
#define FIELD_SET(mask, val) ((val << FIELD_SHIFT(mask)) & mask)
#define FIELD_GET(mask, reg) ((reg & mask) >> FIELD_SHIFT(mask))
/* polling a register */
#define QDMA_REG_POLL_DFLT_INTERVAL_US 10 /* 10us per poll */
#define QDMA_REG_POLL_DFLT_TIMEOUT_US (500*1000) /* 500ms */
#define DEBGFS_LINE_SZ (81)
#define QDMA_H2C_THROT_DATA_THRESH 0x4000
......@@ -125,28 +76,6 @@ static inline uint32_t get_trailing_zeros(uint64_t value)
/*
* Q Context programming (indirect)
*/
enum ind_ctxt_cmd_op {
QDMA_CTXT_CMD_CLR,
QDMA_CTXT_CMD_WR,
QDMA_CTXT_CMD_RD,
QDMA_CTXT_CMD_INV
};
enum ind_ctxt_cmd_sel {
QDMA_CTXT_SEL_SW_C2H,
QDMA_CTXT_SEL_SW_H2C,
QDMA_CTXT_SEL_HW_C2H,
QDMA_CTXT_SEL_HW_H2C,
QDMA_CTXT_SEL_CR_C2H,
QDMA_CTXT_SEL_CR_H2C,
QDMA_CTXT_SEL_CMPT,
QDMA_CTXT_SEL_PFTCH,
QDMA_CTXT_SEL_INT_COAL,
QDMA_CTXT_SEL_PASID_RAM_LOW,
QDMA_CTXT_SEL_PASID_RAM_HIGH,
QDMA_CTXT_SEL_TIMER,
QDMA_CTXT_SEL_FMAP,
};
#define QDMA_REG_IND_CTXT_REG_COUNT 8
#define QDMA_REG_IND_CTXT_WCNT_1 1
......@@ -197,9 +126,7 @@ enum ind_ctxt_cmd_sel {
#define QDMA_SW_CTXT_W0_IRQ_ARM_MASK BIT(16)
#define QDMA_SW_CTXT_W0_PIDX GENMASK(15, 0)
/** QDMA_IND_REG_SEL_PFTCH */
#define QDMA_PFTCH_CTXT_SW_CRDT_GET_H_MASK GENMASK(15, 3)
#define QDMA_PFTCH_CTXT_SW_CRDT_GET_L_MASK GENMASK(2, 0)
#define QDMA_PFTCH_CTXT_W1_VALID_MASK BIT(13)
#define QDMA_PFTCH_CTXT_W1_SW_CRDT_H_MASK GENMASK(12, 0)
......@@ -211,11 +138,8 @@ enum ind_ctxt_cmd_sel {
#define QDMA_PFTCH_CTXT_W0_BUF_SIZE_IDX_MASK GENMASK(4, 1)
#define QDMA_PFTCH_CTXT_W0_BYPASS_MASK BIT(0)
/** QDMA_IND_REG_SEL_CMPT */
#define QDMA_COMPL_CTXT_BADDR_GET_H_MASK GENMASK_ULL(63, 38)
#define QDMA_COMPL_CTXT_BADDR_GET_L_MASK GENMASK_ULL(37, 12)
#define QDMA_COMPL_CTXT_PIDX_GET_H_MASK GENMASK(15, 4)
#define QDMA_COMPL_CTXT_PIDX_GET_L_MASK GENMASK(3, 0)
#define QDMA_COMPL_CTXT_W4_INTR_AGGR_MASK BIT(15)
#define QDMA_COMPL_CTXT_W4_INTR_VEC_MASK GENMASK(14, 4)
......@@ -256,9 +180,7 @@ enum ind_ctxt_cmd_sel {
#define QDMA_CR_CTXT_W0_CREDT_MASK GENMASK(15, 0)
/** QDMA_IND_REG_SEL_INTR */
#define QDMA_INTR_CTXT_BADDR_GET_H_MASK GENMASK_ULL(63, 61)
#define QDMA_INTR_CTXT_BADDR_GET_M_MASK GENMASK_ULL(60, 29)
#define QDMA_INTR_CTXT_BADDR_GET_L_MASK GENMASK_ULL(28, 12)
#define QDMA_INTR_CTXT_W2_AT_MASK BIT(18)
#define QDMA_INTR_CTXT_W2_PIDX_MASK GENMASK(17, 6)
......@@ -271,17 +193,9 @@ enum ind_ctxt_cmd_sel {
#define QDMA_INTR_CTXT_W0_VEC_ID_MASK GENMASK(11, 1)
#define QDMA_INTR_CTXT_W0_VALID_MASK BIT(0)
/** Constants */
#define QDMA_NUM_RING_SIZES 16
#define QDMA_NUM_C2H_TIMERS 16
#define QDMA_NUM_C2H_BUFFER_SIZES 16
#define QDMA_NUM_C2H_COUNTERS 16
#define QDMA_MM_CONTROL_RUN 0x1
#define QDMA_MM_CONTROL_STEP 0x100
#define QDMA_MAGIC_NUMBER 0x1fd3
#define QDMA_PIDX_STEP 0x10
#define QDMA_CMPT_CIDX_STEP 0x10
#define QDMA_INT_CIDX_STEP 0x10
/* ------------------------ QDMA_TRQ_SEL_GLBL (0x00200)-------------------*/
#define QDMA_OFFSET_GLBL_RNG_SZ 0x204
......@@ -459,9 +373,7 @@ enum ind_ctxt_cmd_sel {
#define QDMA_OFFSET_GLBL2_CHANNEL_FUNC_RET 0x12C
#define QDMA_OFFSET_GLBL2_SYSTEM_ID 0x130
#define QDMA_OFFSET_GLBL2_MISC_CAP 0x134
#define QDMA_GLBL2_MM_CMPT_EN_MASK BIT(2)
#define QDMA_GLBL2_FLR_PRESENT_MASK BIT(1)
#define QDMA_GLBL2_MAILBOX_EN_MASK BIT(0)
#define QDMA_GLBL2_DEVICE_ID_MASK GENMASK(31, 28)
#define QDMA_GLBL2_VIVADO_RELEASE_MASK GENMASK(27, 24)
#define QDMA_GLBL2_VERSAL_IP_MASK GENMASK(23, 20)
......@@ -667,4 +579,4 @@ enum ind_ctxt_cmd_sel {
}
#endif
#endif /* ifndef QDMA_SOFT_REG_H__ */
#endif /* __QDMA_SOFT_REG_H__ */
......@@ -46,22 +46,6 @@ struct err_code_map error_code_map_list[] = {
{QDMA_ERR_RM_QMAX_CONF_REJECTED, STATUS_UNSUCCESSFUL}
};
void qdma_get_device_attr(
void *dev_hndl,
struct qdma_dev_attributes **dev_cap)
{
qdma_device *qdma_dev = static_cast<qdma_device *>(dev_hndl);
*dev_cap = &qdma_dev->dev_conf.dev_info;
}
void qdma_hw_error_handler(
void *dev_hndl,
enum qdma_error_idx err_idx)
{
qdma_device *qdma_dev = static_cast<qdma_device *>(dev_hndl);
TraceError(TRACE_QDMA, "%s detected", qdma_dev->hw.qdma_hw_get_error_name(err_idx));
}
void qdma_udelay(
UINT32 delay_us)
......
......@@ -32,7 +32,7 @@ VOID qdma_poll_thread(
KAFFINITY affinity = (KAFFINITY)1 << th->id;
KeSetSystemAffinityThread(affinity);
TraceVerbose(TRACE_THREAD, "Active thread ID : %lu", th->id);
TraceVerbose(TRACE_THREAD, "Thread active on CPU core : %lu", th->id);
while (1) {
......@@ -45,7 +45,7 @@ VOID qdma_poll_thread(
InterlockedDecrement(&th->sem_count);
if (th->terminate) {
TraceInfo(TRACE_THREAD, "Terminating thread : %lu", th->id);
TraceVerbose(TRACE_THREAD, "Terminating thread on CPU core: %lu", th->id);
PsTerminateSystemThread(STATUS_SUCCESS);
}
......@@ -96,7 +96,7 @@ NTSTATUS thread_manager::create_sys_threads(queue_op_mode mode)
threads = static_cast<qdma_thread *>(qdma_calloc(active_processors, sizeof(qdma_thread)));
if (nullptr == threads) {
return STATUS_MEMORY_NOT_ALLOCATED;
return STATUS_INSUFFICIENT_RESOURCES;
}
for (i = 0; i < active_processors; ++i) {
......@@ -116,7 +116,7 @@ NTSTATUS thread_manager::create_sys_threads(queue_op_mode mode)
qdma_poll_thread,
&threads[i]);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_THREAD, "Failed to create thread %d - %!STATUS!", i, status);
TraceError(TRACE_THREAD, "Failed to create thread on cpu core %d - %!STATUS!", i, status);
break;
}
......@@ -131,7 +131,7 @@ NTSTATUS thread_manager::create_sys_threads(queue_op_mode mode)
status = WdfSpinLockCreate(WDF_NO_OBJECT_ATTRIBUTES, &threads[i].lock);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_THREAD, "Failed to create thread %d - %!STATUS!", i, status);
TraceError(TRACE_THREAD, "Failed to create thread spinlock for CPU core %d - %!STATUS!", i, status);
active_threads = i + 1;
goto ErrExit;
}
......@@ -206,7 +206,7 @@ void err_poll_thread(PVOID context)
ctx->device->hw.qdma_hw_error_process(ctx->device);
if (ctx->terminate) {
TraceInfo(TRACE_THREAD, "Terminating Err thread");
TraceVerbose(TRACE_THREAD, "ctx->terminate");
PsTerminateSystemThread(STATUS_SUCCESS);
}
......@@ -218,7 +218,7 @@ NTSTATUS thread_manager::create_err_poll_thread(qdma_device *device)
{
err_th_para = static_cast<err_thread *>(qdma_calloc(1, sizeof(err_thread)));
if (nullptr == err_th_para) {
return STATUS_MEMORY_NOT_ALLOCATED;
return STATUS_INSUFFICIENT_RESOURCES;
}
/* THREAD DATA INITIALIZATION */
......@@ -233,7 +233,7 @@ NTSTATUS thread_manager::create_err_poll_thread(qdma_device *device)
err_poll_thread,
err_th_para);
if (!NT_SUCCESS(status)) {
TraceError(TRACE_THREAD, "Failed to create Err thread - %!STATUS!", status);
TraceError(TRACE_THREAD, "Failed to create Error handling thread - %!STATUS!", status);
qdma_memfree(err_th_para);
err_th_para = nullptr;
return status;
......@@ -248,14 +248,14 @@ NTSTATUS thread_manager::create_err_poll_thread(qdma_device *device)
ZwClose(err_th_para->th_handle);
TraceVerbose(TRACE_THREAD, "Err Thread Init Successfull");
TraceVerbose(TRACE_THREAD, "Error handling Thread Active");
return STATUS_SUCCESS;
}
void thread_manager::terminate_err_poll_thread(void)
{
TraceVerbose(TRACE_THREAD, "terminate_err_poll_thread called");
TraceVerbose(TRACE_THREAD, "terminating error handling thread");
if (err_th_para == nullptr)
return;
......
......@@ -55,7 +55,7 @@ NTSTATUS xpcie_device::map(
TraceError(TRACE_PCIE, "MmMapIoSpace returned NULL! for BAR%u", num_bars);
return STATUS_DEVICE_CONFIGURATION_ERROR;
}
TraceInfo(TRACE_PCIE, "MM BAR %d (addr:0x%lld, length:%llu) mapped at 0x%08p",
TraceVerbose(TRACE_PCIE, "MM BAR %d (addr:0x%lld, length:%llu) mapped at 0x%08p",
num_bars, resource->u.Memory.Start.QuadPart, bars[num_bars].length, bars[num_bars].base);
num_bars++;
}
......@@ -69,7 +69,7 @@ void xpcie_device::unmap(void)
/* Unmap any I/O ports. Disconnecting from the interrupt will be done automatically by the framework. */
for (unsigned int i = 0; i < num_bars; i++) {
if (bars[i].base != nullptr) {
TraceInfo(TRACE_PCIE, "Unmapping BAR%d, VA:(%p) Length %llu", i, bars[i].base, bars[i].length);
TraceVerbose(TRACE_PCIE, "Unmapping BAR%d, VA:(%p) Length %llu", i, bars[i].base, bars[i].length);
MmUnmapIoSpace(bars[i].base, bars[i].length);
bars[i].base = nullptr;
}
......@@ -191,7 +191,7 @@ NTSTATUS xpcie_device::assign_bar_types(const UINT8 user_bar_idx)
if (num_bars > 1) {
user_bar = &bars[user_bar_idx];
TraceVerbose(TRACE_PCIE, "User assigned at %u", user_bar_idx);
TraceInfo(TRACE_PCIE, "AXI Master Lite BAR %u", (user_bar_idx * 2));
if (num_bars > 2) {
for (auto i = 0u; i < num_bars; ++i) {
......@@ -199,7 +199,7 @@ NTSTATUS xpcie_device::assign_bar_types(const UINT8 user_bar_idx)
continue;
bypass_bar = &bars[i];
TraceVerbose(TRACE_PCIE, "Found bypass bar at %d", i);
TraceInfo(TRACE_PCIE, "AXI Bridge Master BAR %d", (i * 2));
break;
}
}
......@@ -268,6 +268,33 @@ NTSTATUS xpcie_device::write_bar(
return bar->write(offset, data, size);
}
NTSTATUS xpcie_device::get_bar_info(
qdma_bar_type bar_type,
PVOID &bar_base,
size_t &bar_length) const
{
switch (bar_type) {
case qdma_bar_type::CONFIG_BAR:
bar_base = (PVOID)config_bar->base;
bar_length = config_bar->length;
break;
case qdma_bar_type::USER_BAR:
bar_base = (PVOID)user_bar->base;
bar_length = user_bar->length;
break;
case qdma_bar_type::BYPASS_BAR:
bar_base = (PVOID)bypass_bar->base;
bar_length = bypass_bar->length;
break;
default:
bar_base = NULL;
bar_length = (size_t)0;
return STATUS_INVALID_PARAMETER;
}
return STATUS_SUCCESS;
}
ULONG xpcie_device::conf_reg_read(size_t offset) const
{
NT_ASSERTMSG("Error: BAR not assigned!", config_bar->base != nullptr);
......
......@@ -69,6 +69,7 @@ public:
NTSTATUS write_bar(qdma_bar_type bar_type, size_t offset, void* data, size_t size) const;
ULONG conf_reg_read(size_t offset) const;
void conf_reg_write(size_t offset, ULONG data) const;
NTSTATUS get_bar_info(qdma_bar_type bar_type, PVOID &bar_base, size_t &bar_length) const;
};
} /* namespace xlnx */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment