Commit 5c214bb4 authored by Sujatha Banoth's avatar Sujatha Banoth

QDMA DPDK 2020.2.1 Driver Release to support DPDK v20.11 Framework Changes

QDMA DPDK 2020.2.1 Driver Release to support DPDK v20.11 Framework Changes
parent ffaa125e
RELEASE: 2020.2
===============
RELEASE: 2020.2.1
=================
This release is based on DPDK v19.11 and contains QDMA poll mode driver and
This release is based on DPDK v20.11 and contains QDMA poll mode driver and
QDMA test application.
This release is validated for VCU1525 and U200 devices on QDMA4.0 2020.2 based example design
and QDMA3.1 2020.2 based example design.
This release includes a patch file for dpdk-pktgen v19.12.0 that extends
This release includes a patch file for dpdk-pktgen v20.12.0 that extends
dpdk-pktgen application to handle packets with packet sizes more than 1518 bytes
and it disables the packet size classification logic in dpdk-pktgen to remove
application overhead in performance measurement.
......@@ -85,6 +85,10 @@ SUPPORTED FEATURES:
- Resolved the issue related to mbuf packet length
- Fixed VF FMAP programming of qmax issue by setting qmax to the actual user configured total queue number
2020.2.1 Updates
----------------
- Migrated qdma dpdk driver to use DPDK framework v20.11
KNOWN ISSUES:
=============
- Function Level Reset(FLR) of PF device when VFs are attached to this PF results in mailbox communication failure
......
# BSD LICENSE
#
# Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
# Copyright(c) 2021 Xilinx, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
......@@ -28,78 +28,37 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
cflags += ['-DRTE_LIBRTE_QDMA_PMD']
cflags += ['-DDMA_BRAM_SIZE=524288']
include $(RTE_SDK)/mk/rte.vars.mk
includes += include_directories('.')
includes += include_directories('qdma_access')
includes += include_directories('qdma_access/qdma_soft_access')
includes += include_directories('qdma_access/eqdma_soft_access')
includes += include_directories('qdma_access/qdma_s80_hard_access')
## if modified this BRAM_SIZE, then the same should be modified in the testapp Makefile also
## default set to 512KB
BRAM_SIZE ?= 524288
#
# library name
#
LIB = librte_pmd_qdma.a
# library version
LIBABIVER := 1
# versioning export map
EXPORT_MAP := rte_pmd_qdma_version.map
CFLAGS += -O3 -DDMA_BRAM_SIZE=$(BRAM_SIZE)
#CFLAGS += -g
ifeq ($(CONFIG_RTE_LIBRTE_QDMA_GCOV),y)
CFLAGS += -ftest-coverage -fprofile-arcs
endif
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -I$(RTE_SDK)/drivers/net/qdma/qdma_access/
CFLAGS += -I$(RTE_SDK)/drivers/net/qdma/qdma_access/qdma_soft_access/
CFLAGS += -I$(RTE_SDK)/drivers/net/qdma/qdma_access/eqdma_soft_access/
CFLAGS += -I$(RTE_SDK)/drivers/net/qdma/qdma_access/qdma_s80_hard_access/
CFLAGS += -I$(RTE_SDK)/drivers/net/qdma/
ifeq ($(TEST_64B_DESC_BYPASS),1)
CFLAGS += -DTEST_64B_DESC_BYPASS
endif
ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
CFLAGS += -DQDMA_RX_VEC_X86_64
CFLAGS += -DQDMA_TX_VEC_X86_64
endif
CFLAGS += -DQDMA_LATENCY_OPTIMIZED
# this lib depends upon:
LDLIBS += -lpthread
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
#
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_vf_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_devops.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_common.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_xdebug.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_user.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/eqdma_soft_access/eqdma_soft_access.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_s80_hard_access/qdma_s80_hard_access.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_s80_hard_access/qdma_s80_hard_reg_dump.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_soft_access/qdma_soft_access.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_list.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_resource_mgmt.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_mbox_protocol.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_access/qdma_access_common.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_mbox.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += qdma_platform.c
SRCS-$(CONFIG_RTE_LIBRTE_QDMA_PMD) += rte_pmd_qdma.c
#
## Export include files
#
SYMLINK-y-include += rte_pmd_qdma.h
headers = files('rte_pmd_qdma.h')
deps += ['mempool_ring']
include $(RTE_SDK)/mk/rte.lib.mk
sources = files(
'qdma_ethdev.c',
'qdma_vf_ethdev.c',
'qdma_devops.c',
'qdma_common.c',
'qdma_rxtx.c',
'qdma_xdebug.c',
'qdma_user.c',
'qdma_access/eqdma_soft_access/eqdma_soft_access.c',
'qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c',
'qdma_access/qdma_s80_hard_access/qdma_s80_hard_access.c',
'qdma_access/qdma_s80_hard_access/qdma_s80_hard_reg_dump.c',
'qdma_access/qdma_soft_access/qdma_soft_access.c',
'qdma_access/qdma_list.c',
'qdma_access/qdma_resource_mgmt.c',
'qdma_access/qdma_mbox_protocol.c',
'qdma_access/qdma_access_common.c',
'qdma_mbox.c',
'qdma_platform.c',
'rte_pmd_qdma.c'
)
/*-
* BSD LICENSE
*
* Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2017-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -403,4 +403,6 @@ struct rte_memzone *qdma_zone_reserve(struct rte_eth_dev *dev,
bool is_qdma_supported(struct rte_eth_dev *dev);
bool is_vf_device_supported(struct rte_eth_dev *dev);
bool is_pf_device_supported(struct rte_eth_dev *dev);
void qdma_check_errors(void *arg);
#endif /* ifndef __QDMA_H__ */
......@@ -378,8 +378,8 @@ extern "C" {
#define QDMA_OFFSET_GLBL2_PF_VF_BARLITE_EXT 0x110
#define QDMA_OFFSET_GLBL2_CHANNEL_INST 0x114
#define QDMA_OFFSET_GLBL2_CHANNEL_MDMA 0x118
#define QDMA_GLBL2_ST_C2H_MASK BIT(16)
#define QDMA_GLBL2_ST_H2C_MASK BIT(17)
#define QDMA_GLBL2_ST_C2H_MASK BIT(17)
#define QDMA_GLBL2_ST_H2C_MASK BIT(16)
#define QDMA_GLBL2_MM_C2H_MASK BIT(8)
#define QDMA_GLBL2_MM_H2C_MASK BIT(0)
#define QDMA_OFFSET_GLBL2_CHANNEL_STRM 0x11C
......
......@@ -220,7 +220,7 @@ int qdma_init_rx_queue(struct qdma_rx_queue *rxq)
goto fail;
}
phys_addr = (uint64_t)mb->buf_physaddr +
phys_addr = (uint64_t)mb->buf_iova +
RTE_PKTMBUF_HEADROOM;
mb->data_off = RTE_PKTMBUF_HEADROOM;
......
/*-
* BSD LICENSE
*
* Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2017-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -876,7 +876,7 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
}
PMD_DRV_LOG(INFO, "Tx ring phys addr: 0x%lX, Tx Ring virt addr: 0x%lX",
(uint64_t)txq->tx_mz->phys_addr, (uint64_t)txq->tx_ring);
(uint64_t)txq->tx_mz->iova, (uint64_t)txq->tx_ring);
/* Allocate memory for TX software ring */
sz = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
......@@ -1113,7 +1113,7 @@ int qdma_dev_infos_get(struct rte_eth_dev *dev,
* @param dev
* Pointer to Ethernet device structure.
*/
void qdma_dev_stop(struct rte_eth_dev *dev)
int qdma_dev_stop(struct rte_eth_dev *dev)
{
#ifdef RTE_LIBRTE_QDMA_DEBUG_DRIVER
struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
......@@ -1133,6 +1133,7 @@ void qdma_dev_stop(struct rte_eth_dev *dev)
rte_eal_alarm_cancel(qdma_txq_pidx_update, (void *)dev);
#endif
return 0;
}
/**
......@@ -1143,7 +1144,7 @@ void qdma_dev_stop(struct rte_eth_dev *dev)
* @param dev
* Pointer to Ethernet device structure.
*/
void qdma_dev_close(struct rte_eth_dev *dev)
int qdma_dev_close(struct rte_eth_dev *dev)
{
struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
struct qdma_tx_queue *txq;
......@@ -1242,13 +1243,19 @@ void qdma_dev_close(struct rte_eth_dev *dev)
if (ret != QDMA_SUCCESS) {
PMD_DRV_LOG(ERR, "PF-%d(DEVFN) qmax update failed: %d\n",
qdma_dev->func_id, ret);
return;
return 0;
}
qdma_dev->init_q_range = 0;
rte_free(qdma_dev->q_info);
qdma_dev->q_info = NULL;
qdma_dev->dev_configured = 0;
/* cancel pending polls*/
if (qdma_dev->is_master)
rte_eal_alarm_cancel(qdma_check_errors, (void *)dev);
return 0;
}
/**
......@@ -1495,7 +1502,7 @@ int qdma_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
q_sw_ctxt.rngsz_idx = txq->ringszidx;
q_sw_ctxt.bypass = txq->en_bypass;
q_sw_ctxt.wbk_en = 1;
q_sw_ctxt.ring_bs_addr = (uint64_t)txq->tx_mz->phys_addr;
q_sw_ctxt.ring_bs_addr = (uint64_t)txq->tx_mz->iova;
if (txq->en_bypass &&
(txq->bypass_desc_sz != 0))
......@@ -1581,7 +1588,7 @@ int qdma_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
q_cmpt_ctxt.timer_idx = rxq->timeridx;
q_cmpt_ctxt.color = CMPT_DEFAULT_COLOR_BIT;
q_cmpt_ctxt.ringsz_idx = rxq->cmpt_ringszidx;
q_cmpt_ctxt.bs_addr = (uint64_t)rxq->rx_cmpt_mz->phys_addr;
q_cmpt_ctxt.bs_addr = (uint64_t)rxq->rx_cmpt_mz->iova;
q_cmpt_ctxt.desc_sz = cmpt_desc_fmt;
q_cmpt_ctxt.valid = 1;
if (qdma_dev->dev_cap.cmpt_ovf_chk_dis)
......@@ -1602,7 +1609,7 @@ int qdma_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
q_sw_ctxt.rngsz_idx = rxq->ringszidx;
q_sw_ctxt.bypass = rxq->en_bypass;
q_sw_ctxt.wbk_en = 1;
q_sw_ctxt.ring_bs_addr = (uint64_t)rxq->rx_mz->phys_addr;
q_sw_ctxt.ring_bs_addr = (uint64_t)rxq->rx_mz->iova;
if (rxq->en_bypass &&
(rxq->bypass_desc_sz != 0))
......@@ -2008,9 +2015,6 @@ static struct eth_dev_ops qdma_eth_dev_ops = {
.rx_queue_stop = qdma_dev_rx_queue_stop,
.tx_queue_start = qdma_dev_tx_queue_start,
.tx_queue_stop = qdma_dev_tx_queue_stop,
.rx_queue_count = qdma_dev_rx_queue_count,
.rx_descriptor_status = qdma_dev_rx_descriptor_status,
.tx_descriptor_status = qdma_dev_tx_descriptor_status,
.tx_done_cleanup = qdma_dev_tx_done_cleanup,
.queue_stats_mapping_set = qdma_dev_queue_stats_mapping,
.get_reg = qdma_dev_get_regs,
......@@ -2026,5 +2030,8 @@ void qdma_dev_ops_init(struct rte_eth_dev *dev)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
dev->rx_pkt_burst = &qdma_recv_pkts;
dev->tx_pkt_burst = &qdma_xmit_pkts;
dev->rx_queue_count = &qdma_dev_rx_queue_count;
dev->rx_descriptor_status = &qdma_dev_rx_descriptor_status;
dev->tx_descriptor_status = &qdma_dev_tx_descriptor_status;
}
}
/*-
* BSD LICENSE
*
* Copyright(c) 2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2020-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -462,7 +462,7 @@ int qdma_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qid);
* @ingroup dpdk_devops_func
*
*/
void qdma_dev_stop(struct rte_eth_dev *dev);
int qdma_dev_stop(struct rte_eth_dev *dev);
/**
* DPDK callback to release a Rx queue.
......@@ -498,7 +498,19 @@ void qdma_dev_tx_queue_release(void *tqueue);
*
* @ingroup dpdk_devops_func
*/
void qdma_dev_close(struct rte_eth_dev *dev);
int qdma_dev_close(struct rte_eth_dev *dev);
/**
* DPDK callback to close the VF device.
*
* This API frees the descriptor rings and objects beonging to all the queues
* of the given port. It also clears the FMAP.
*
* @param dev Pointer to Ethernet device structure
*
* @ingroup dpdk_devops_func
*/
int qdma_vf_dev_close(struct rte_eth_dev *dev);
/**
* DPDK callback to reset the device.
......
/*-
* BSD LICENSE
*
* Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2017-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -66,7 +66,7 @@
static void qdma_device_attributes_get(struct rte_eth_dev *dev);
/* Poll for any QDMA errors */
static void qdma_check_errors(void *arg)
void qdma_check_errors(void *arg)
{
struct qdma_pci_dev *qdma_dev;
qdma_dev = ((struct rte_eth_dev *)arg)->data->dev_private;
......
/*-
* BSD LICENSE
*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2019-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -125,7 +125,7 @@ static void *qdma_reset_task(void *arg)
if (!qdma_dev)
return NULL;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
return NULL;
......@@ -139,7 +139,7 @@ static void *qdma_remove_task(void *arg)
if (!qdma_dev)
return NULL;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV,
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV,
NULL);
return NULL;
......
/*-
* BSD LICENSE
*
* Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2017-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -1061,7 +1061,7 @@ static int rearm_c2h_ring(struct qdma_rx_queue *rxq, uint16_t num_desc)
/* rearm descriptor */
rx_ring_st[id].dst_addr =
(uint64_t)mb->buf_physaddr +
(uint64_t)mb->buf_iova +
RTE_PKTMBUF_HEADROOM;
id++;
}
......@@ -1073,7 +1073,7 @@ static int rearm_c2h_ring(struct qdma_rx_queue *rxq, uint16_t num_desc)
/* rearm descriptor */
rx_ring_st[id].dst_addr =
(uint64_t)mb->buf_physaddr +
(uint64_t)mb->buf_iova +
RTE_PKTMBUF_HEADROOM;
}
#endif //QDMA_RX_VEC_X86_64
......@@ -1110,7 +1110,7 @@ static int rearm_c2h_ring(struct qdma_rx_queue *rxq, uint16_t num_desc)
/* rearm descriptor */
rx_ring_st[id].dst_addr =
(uint64_t)mb->buf_physaddr +
(uint64_t)mb->buf_iova +
RTE_PKTMBUF_HEADROOM;
}
}
......
/*-
* BSD LICENSE
*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2019-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -178,7 +178,7 @@ int qdma_ul_update_st_h2c_desc(void *qhndl, uint64_t q_offloads,
desc_info = get_st_h2c_desc(qhndl);
desc_info->len = rte_pktmbuf_data_len(mb);
desc_info->pld_len = desc_info->len;
desc_info->src_addr = mb->buf_physaddr + mb->data_off;
desc_info->src_addr = mb->buf_iova + mb->data_off;
desc_info->flags = (S_H2C_DESC_F_SOP | S_H2C_DESC_F_EOP);
desc_info->cdh_flags = 0;
} else {
......@@ -187,7 +187,7 @@ int qdma_ul_update_st_h2c_desc(void *qhndl, uint64_t q_offloads,
desc_info->len = rte_pktmbuf_data_len(mb);
desc_info->pld_len = desc_info->len;
desc_info->src_addr = mb->buf_physaddr + mb->data_off;
desc_info->src_addr = mb->buf_iova + mb->data_off;
desc_info->flags = 0;
if (nsegs == pkt_segs)
desc_info->flags |= S_H2C_DESC_F_SOP;
......@@ -223,7 +223,7 @@ int qdma_ul_update_mm_c2h_desc(void *qhndl, struct rte_mbuf *mb, void *desc)
/* make it so the data pointer starts there too... */
mb->data_off = RTE_PKTMBUF_HEADROOM;
/* low 32-bits of phys addr must be 4KB aligned... */
desc_info->dst_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
desc_info->dst_addr = (uint64_t)mb->buf_iova + RTE_PKTMBUF_HEADROOM;
desc_info->dv = 1;
desc_info->eop = 1;
desc_info->sop = 1;
......@@ -248,7 +248,7 @@ int qdma_ul_update_mm_h2c_desc(void *qhndl, struct rte_mbuf *mb)
struct qdma_ul_mm_desc *desc_info;
desc_info = (struct qdma_ul_mm_desc *)get_mm_h2c_desc(qhndl);
desc_info->src_addr = mb->buf_physaddr + mb->data_off;
desc_info->src_addr = mb->buf_iova + mb->data_off;
desc_info->dst_addr = get_mm_h2c_ep_addr(qhndl);
desc_info->dv = 1;
desc_info->eop = 1;
......
/*-
* BSD LICENSE
*
* Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2017-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -380,7 +380,7 @@ static int qdma_rxq_context_setup(struct rte_eth_dev *dev, uint16_t qid)
cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_8B;
break;
}
descq_conf.ring_bs_addr = rxq->rx_mz->phys_addr;
descq_conf.ring_bs_addr = rxq->rx_mz->iova;
descq_conf.en_bypass = rxq->en_bypass;
descq_conf.irq_arm = 0;
descq_conf.at = 0;
......@@ -396,7 +396,7 @@ static int qdma_rxq_context_setup(struct rte_eth_dev *dev, uint16_t qid)
} else {/* st c2h*/
descq_conf.desc_sz = SW_DESC_CNTXT_C2H_STREAM_DMA;
descq_conf.forced_en = 1;
descq_conf.cmpt_ring_bs_addr = rxq->rx_cmpt_mz->phys_addr;
descq_conf.cmpt_ring_bs_addr = rxq->rx_cmpt_mz->iova;
descq_conf.cmpt_desc_sz = cmpt_desc_fmt;
descq_conf.triggermode = rxq->triggermode;
......@@ -458,7 +458,7 @@ static int qdma_txq_context_setup(struct rte_eth_dev *dev, uint16_t qid)
memset(&descq_conf, 0, sizeof(struct mbox_descq_conf));
txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
qid_hw = qdma_dev->queue_base + txq->queue_id;
descq_conf.ring_bs_addr = txq->tx_mz->phys_addr;
descq_conf.ring_bs_addr = txq->tx_mz->iova;
descq_conf.en_bypass = txq->en_bypass;
descq_conf.wbi_intvl_en = 1;
descq_conf.wbi_chk = 1;
......@@ -588,7 +588,7 @@ static int qdma_vf_dev_infos_get(__rte_unused struct rte_eth_dev *dev,
return 0;
}
static void qdma_vf_dev_stop(struct rte_eth_dev *dev)
static int qdma_vf_dev_stop(struct rte_eth_dev *dev)
{
#ifdef RTE_LIBRTE_QDMA_DEBUG_DRIVER
struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
......@@ -602,9 +602,11 @@ static void qdma_vf_dev_stop(struct rte_eth_dev *dev)
qdma_vf_dev_tx_queue_stop(dev, qid);
for (qid = 0; qid < dev->data->nb_rx_queues; qid++)
qdma_vf_dev_rx_queue_stop(dev, qid);
return 0;
}
static void qdma_vf_dev_close(struct rte_eth_dev *dev)
int qdma_vf_dev_close(struct rte_eth_dev *dev)
{
struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
struct qdma_tx_queue *txq;
......@@ -700,6 +702,8 @@ static void qdma_vf_dev_close(struct rte_eth_dev *dev)
rte_free(qdma_dev->q_info);
qdma_dev->q_info = NULL;
qdma_dev->dev_configured = 0;
return 0;
}
static int qdma_vf_dev_reset(struct rte_eth_dev *dev)
......
/*-
* BSD LICENSE
*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2019-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -50,6 +50,7 @@
#include "qdma.h"
#include "qdma_access_common.h"
#include "rte_pmd_qdma.h"
#include "qdma_devops.h"
static int validate_qdma_dev_info(int port_id, uint16_t qid)
......@@ -1420,7 +1421,7 @@ static int qdma_vf_cmptq_context_write(struct rte_eth_dev *dev, uint16_t qid)
descq_conf.irq_en = 0;
descq_conf.desc_sz = SW_DESC_CNTXT_MEMORY_MAP_DMA;
descq_conf.forced_en = 1;
descq_conf.cmpt_ring_bs_addr = cmptq->cmpt_mz->phys_addr;
descq_conf.cmpt_ring_bs_addr = cmptq->cmpt_mz->iova;
descq_conf.cmpt_desc_sz = cmpt_desc_fmt;
descq_conf.triggermode = cmptq->triggermode;
......@@ -1504,7 +1505,7 @@ static int qdma_pf_cmptq_context_write(struct rte_eth_dev *dev, uint32_t qid)
q_cmpt_ctxt.timer_idx = cmptq->timeridx;
q_cmpt_ctxt.color = CMPT_DEFAULT_COLOR_BIT;
q_cmpt_ctxt.ringsz_idx = cmptq->ringszidx;
q_cmpt_ctxt.bs_addr = (uint64_t)cmptq->cmpt_mz->phys_addr;
q_cmpt_ctxt.bs_addr = (uint64_t)cmptq->cmpt_mz->iova;
q_cmpt_ctxt.desc_sz = cmpt_desc_fmt;
q_cmpt_ctxt.valid = 1;
......@@ -1774,3 +1775,42 @@ uint16_t rte_pmd_qdma_mm_cmpt_process(int port_id, uint32_t qid,
&cmptq->cmpt_cidx_info);
return count;
}
/*****************************************************************************/
/**
* Function Name: rte_pmd_qdma_dev_close
* Description: DPDK PMD function to close the device.
*
* @param port_id Port ID
*
* @return '0' on success and '< 0' on failure
*
******************************************************************************/
int rte_pmd_qdma_dev_close(uint16_t port_id)
{
struct rte_eth_dev *dev;
struct qdma_pci_dev *qdma_dev;
if (port_id >= rte_eth_dev_count_avail()) {
PMD_DRV_LOG(ERR, "Wrong port id %d\n", port_id);
return -ENOTSUP;
}
dev = &rte_eth_devices[port_id];
qdma_dev = dev->data->dev_private;
dev->data->dev_started = 0;
if (qdma_dev->is_vf)
qdma_vf_dev_close(dev);
else
qdma_dev_close(dev);
dev->data->nb_rx_queues = 0;
rte_free(dev->data->rx_queues);
dev->data->rx_queues = NULL;
dev->data->nb_tx_queues = 0;
rte_free(dev->data->tx_queues);
dev->data->tx_queues = NULL;
return 0;
}
/*-
* BSD LICENSE
*
* Copyright(c) 2019-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2019-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -720,6 +720,18 @@ int rte_pmd_qdma_dev_cmptq_stop(int port_id, uint32_t qid);
******************************************************************************/
uint16_t rte_pmd_qdma_mm_cmpt_process(int port_id, uint32_t qid,
void *cmpt_buff, uint16_t nb_entries);
/*****************************************************************************/
/**
* DPDK PMD function to close the device.
*
* @param port_id Port ID
*
* @return '0' on success and '< 0' on failure
*
******************************************************************************/
int rte_pmd_qdma_dev_close(uint16_t port_id);
#ifdef __cplusplus
}
#endif
......
/*-
* BSD LICENSE
*
* Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2017-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -38,7 +38,7 @@
#define QDMA_PMD_MAJOR 2020
#define QDMA_PMD_MINOR 2
#define QDMA_PMD_PATCHLEVEL 0
#define QDMA_PMD_PATCHLEVEL 1
#define QDMA_PMD_VERSION \
qdma_stringify(QDMA_PMD_MAJOR) "." \
......
DPDK_21 {
global:
rte_pmd_qdma_set_immediate_data_state;
rte_pmd_qdma_get_bar_details;
rte_pmd_qdma_get_queue_base;
rte_pmd_qdma_set_queue_mode;
rte_pmd_qdma_get_device_capabilities;
qdma_pci_read_reg;
qdma_pci_write_reg;
rte_pmd_qdma_set_mm_endpoint_addr;
rte_pmd_qdma_dbg_qdesc;
rte_pmd_qdma_dbg_regdump;
rte_pmd_qdma_dbg_reg_info_dump;
rte_pmd_qdma_dbg_qinfo;
rte_pmd_qdma_get_pci_func_type;
rte_pmd_qdma_configure_tx_bypass;
rte_pmd_qdma_configure_rx_bypass;
rte_pmd_qdma_set_cmpt_descriptor_size;
rte_pmd_qdma_set_c2h_descriptor_prefetch;
rte_pmd_qdma_set_cmpt_overflow_check;
rte_pmd_qdma_set_cmpt_trigger_mode;
rte_pmd_qdma_set_cmpt_timer;
rte_pmd_qdma_get_immediate_data_state;
rte_pmd_qdma_dev_cmptq_setup;
rte_pmd_qdma_dev_cmptq_start;
rte_pmd_qdma_mm_cmpt_process;
rte_pmd_qdma_dev_cmptq_stop;
rte_pmd_qdma_dbg_qdevice;
rte_pmd_qdma_dev_close;
local: *;
};
# BSD LICENSE
#
# Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
# Copyright(c) 2017-2021 Xilinx, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
......@@ -33,12 +33,11 @@ $(error "Please define RTE_SDK environment variable")
endif
# Default target, can be overriden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
RTE_TARGET ?= build
#Default BRAM size is set to 512K
#if modified the BRAM_SIZE, the same need to be set to the driver Makefile
BRAM_SIZE ?= 524288
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
APP = qdma_testapp
......@@ -46,15 +45,52 @@ APP = qdma_testapp
# all source are stored in SRCS-y
SRCS-y := testapp.c pcierw.c commands.c
CFLAGS += -O3 -DDPDK=1 -DBRAM_SIZE=$(BRAM_SIZE)
CFLAGS += $(WERROR_FLAGS)
ifeq ($(CONFIG_RTE_LIBRTE_QDMA_GCOV),y)
CFLAGS += -g -ftest-coverage -fprofile-arcs
LDFLAGS += -lgcov
endif
# PERF benchmarking may be enabled by uncommenting following
#CFLAGS += -DPERF_BENCHMARK
# Build using pkg-config variables if possible
ifneq ($(shell pkg-config --exists libdpdk && echo 0),0)
$(error "no installation of DPDK found")
endif
all: shared
.PHONY: shared static
shared: build/$(APP)-shared
ln -sf $(APP)-shared build/$(APP)
static: build/$(APP)-static
ln -sf $(APP)-static build/$(APP)
PKGCONF ?= pkg-config
CFLAGS += -DBRAM_SIZE=$(BRAM_SIZE)
CFLAGS += -DDPDK=1
# Add flag to allow experimental API as qdma_testapp uses rte_ethdev_set_ptype API
CFLAGS += -DALLOW_EXPERIMENTAL_API
PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk)
LDFLAGS += $(shell $(PKGCONF) --libs libdpdk)
LDFLAGS += -lrte_net_qdma
# for shared library builds, we need to explicitly link these PMDs
LDFLAGS_SHARED += -lrte_net_qdma
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
build:
@mkdir -p $@
include $(RTE_SDK)/mk/rte.extapp.mk
.PHONY: clean
clean:
rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
test -d build && rmdir -p build || true
/*-
* BSD LICENSE
*
* Copyright(c) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright(c) 2017-2021 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
......@@ -721,7 +721,7 @@ void port_close(int port_id)
rte_eth_dev_stop(port_id);
rte_eth_dev_close(port_id);
rte_pmd_qdma_dev_close(port_id);
pinfo[port_id].num_queues = 0;
......
From 9f9314dfc41e56068916e38c19688f0791e85ed1 Mon Sep 17 00:00:00 2001
From cb0e7303150dcbb49c3aad88ac664b691612f1bc Mon Sep 17 00:00:00 2001
From: Suryanarayana Raju Sangani <ssangani@xilinx.com>
Date: Thu, 27 Feb 2020 05:13:38 -0700
Subject: [PATCH] PKTGEN-19.12.0: Patch to add Jumbo packet support
Subject: [PATCH] PKTGEN-20.12.0: Patch to add Jumbo packet support
This patch include:
1. Jumbo frame support for Pktgen.
......@@ -27,10 +27,10 @@ Signed-off-by: Suryanarayana Raju Sangani <ssangani@xilinx.com>
7 files changed, 55 insertions(+), 17 deletions(-)
diff --git a/app/pktgen-cmds.c b/app/pktgen-cmds.c
index e8ba94c..78fc5b4 100644
index 4da9bab..065fbe8 100644
--- a/app/pktgen-cmds.c
+++ b/app/pktgen-cmds.c
@@ -2934,18 +2934,22 @@ single_set_pkt_size(port_info_t *info, uint16_t size)
@@ -3125,18 +3125,22 @@ single_set_pkt_size(port_info_t *info, uint16_t size)
{
pkt_seq_t * pkt = &info->seq_pkt[SINGLE_PKT];
......@@ -55,7 +55,7 @@ index e8ba94c..78fc5b4 100644
pktgen_packet_ctor(info, SINGLE_PKT, -1);
pktgen_packet_rate(info);
@@ -3735,6 +3739,9 @@ range_set_cos_id(port_info_t *info, char *what, uint8_t id)
@@ -3924,6 +3928,9 @@ range_set_cos_id(port_info_t *info, char *what, uint8_t id)
void
range_set_pkt_size(port_info_t *info, char *what, uint16_t size)
{
......@@ -65,7 +65,7 @@ index e8ba94c..78fc5b4 100644
if (!strcmp(what, "inc") || !strcmp(what, "increment")) {
if (size > pktgen.eth_max_pkt)
size = pktgen.eth_max_pkt;
@@ -3742,8 +3749,8 @@ range_set_pkt_size(port_info_t *info, char *what, uint16_t size)
@@ -3931,8 +3938,8 @@ range_set_pkt_size(port_info_t *info, char *what, uint16_t size)
} else {
if (size < pktgen.eth_min_pkt)
size = MIN_PKT_SIZE;
......@@ -77,40 +77,40 @@ index e8ba94c..78fc5b4 100644
size -= PG_ETHER_CRC_LEN;
diff --git a/app/pktgen-constants.h b/app/pktgen-constants.h
index 7df8023..a36c278 100644
index ede4e65..8694e18 100644
--- a/app/pktgen-constants.h
+++ b/app/pktgen-constants.h
@@ -17,7 +17,7 @@ extern "C" {
enum {
#ifndef RTE_LIBRTE_VMXNET3_PMD
DEFAULT_PKT_BURST = 64, /* Increasing this number consumes memory very fast */
#ifdef RTE_LIBRTE_VMXNET3_PMD
- DEFAULT_RX_DESC = (DEFAULT_PKT_BURST * 8 * 2),
+ DEFAULT_RX_DESC = (DEFAULT_PKT_BURST * 8 * 2 * 2),
DEFAULT_TX_DESC = DEFAULT_RX_DESC * 2,
#else
DEFAULT_RX_DESC = (DEFAULT_PKT_BURST * 8),
@@ -30,6 +30,7 @@ enum {
DEFAULT_PKT_BURST = 32, /* Increasing this number consumes memory very fast */
@@ -31,6 +31,7 @@ enum {
DEFAULT_PRIV_SIZE = 0,
+ MBUF_9K_SIZE = 9018 + RTE_PKTMBUF_HEADROOM + DEFAULT_PRIV_SIZE,
NUM_Q = 16, /**< Number of cores per port. */
};
#define DEFAULT_MBUF_SIZE (PG_ETHER_MAX_JUMBO_FRAME_LEN + DEFAULT_PRIV_SIZE) /* See: http://dpdk.org/dev/patchwork/patch/4479/ */
#define DEFAULT_MBUF_SIZE (PG_ETHER_MAX_JUMBO_FRAME_LEN + RTE_PKTMBUF_HEADROOM) /* See: http://dpdk.org/dev/patchwork/patch/4479/ */
diff --git a/app/pktgen-main.c b/app/pktgen-main.c
index 9d52459..676bf15 100644
index 96d1c0c..9c22278 100644
--- a/app/pktgen-main.c
+++ b/app/pktgen-main.c
@@ -174,7 +174,7 @@ pktgen_parse_args(int argc, char **argv)
@@ -188,7 +188,7 @@ pktgen_parse_args(int argc, char **argv)
pktgen.mbuf_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
pktgen.verbose = 0;
- while ((opt = getopt_long(argc, argvopt, "p:m:f:l:s:g:hPNGTvj",
+ while ((opt = getopt_long(argc, argvopt, "p:m:f:l:s:g:hPNGTvj9",
- while ((opt = getopt_long(argc, argvopt, "p:m:f:l:s:g:hPNGTvjtr",
+ while ((opt = getopt_long(argc, argvopt, "p:m:f:l:s:g:hPNGTvjtr9",
lgopts, &option_index)) != EOF)
switch (opt) {
case 'j':
@@ -276,7 +276,12 @@ pktgen_parse_args(int argc, char **argv)
case 't':
@@ -300,7 +300,12 @@ pktgen_parse_args(int argc, char **argv)
case 'h': /* print out the help message */
pktgen_usage(prgname);
......@@ -124,7 +124,7 @@ index 9d52459..676bf15 100644
case 0: /* crc-strip for all ports */
printf(">>> Strip CRC in hardware is the default\n");
@@ -381,8 +386,10 @@ RTE_FINI(pktgen_fini)
@@ -407,8 +412,10 @@ RTE_FINI(pktgen_fini)
int
main(int argc, char **argv)
{
......@@ -136,7 +136,7 @@ index 9d52459..676bf15 100644
signal(SIGSEGV, sig_handler);
signal(SIGHUP, sig_handler);
@@ -532,10 +539,16 @@ main(int argc, char **argv)
@@ -563,10 +570,16 @@ main(int argc, char **argv)
/* Wait for all of the cores to stop running and exit. */
rte_eal_mp_wait_lcore();
......@@ -155,7 +155,7 @@ index 9d52459..676bf15 100644
cli_destroy();
diff --git a/app/pktgen-port-cfg.c b/app/pktgen-port-cfg.c
index 1c8d36a..100a011 100644
index a982d0d..7a27602 100644
--- a/app/pktgen-port-cfg.c
+++ b/app/pktgen-port-cfg.c
@@ -101,16 +101,19 @@ pktgen_mbuf_pool_create(const char *type, uint8_t pid, uint8_t queue_id,
......@@ -200,7 +200,7 @@ index 1c8d36a..100a011 100644
}
diff --git a/app/pktgen-range.c b/app/pktgen-range.c
index 5f04238..88cd4b5 100644
index f88258d..bbaaa6f 100644
--- a/app/pktgen-range.c
+++ b/app/pktgen-range.c
@@ -595,7 +595,8 @@ pktgen_range_setup(port_info_t *info)
......@@ -214,10 +214,10 @@ index 5f04238..88cd4b5 100644
range->vxlan_gid = info->seq_pkt[SINGLE_PKT].group_id;
range->vxlan_gid_inc = 0;
diff --git a/app/pktgen.c b/app/pktgen.c
index 1528cb9..b17e257 100644
index 26cc80d..43790e0 100644
--- a/app/pktgen.c
+++ b/app/pktgen.c
@@ -67,6 +67,7 @@ pktgen_wire_size(port_info_t *info)
@@ -74,6 +74,7 @@ pktgen_wire_size(port_info_t *info)
} else
size = info->seq_pkt[SINGLE_PKT].pktSize + PKT_OVERHEAD_SIZE;
}
......@@ -225,7 +225,7 @@ index 1528cb9..b17e257 100644
return size;
}
@@ -870,6 +871,10 @@ pktgen_setup_cb(struct rte_mempool *mp,
@@ -912,6 +913,10 @@ pktgen_setup_cb(struct rte_mempool *mp,
pkt_seq_t *pkt;
uint16_t qid, idx;
......@@ -236,7 +236,7 @@ index 1528cb9..b17e257 100644
info = data->info;
qid = data->qid;
@@ -899,7 +904,7 @@ pktgen_setup_cb(struct rte_mempool *mp,
@@ -941,7 +946,7 @@ pktgen_setup_cb(struct rte_mempool *mp,
pktgen_packet_ctor(info, idx, -1);
rte_memcpy((uint8_t *)m->buf_addr + m->data_off,
......@@ -245,7 +245,7 @@ index 1528cb9..b17e257 100644
m->pkt_len = pkt->pktSize;
m->data_len = pkt->pktSize;
@@ -1108,7 +1113,7 @@ pktgen_main_receive(port_info_t *info, uint8_t lid,
@@ -1150,7 +1155,7 @@ pktgen_main_receive(port_info_t *info, uint8_t lid,
{
uint8_t pid;
uint16_t qid, nb_rx;
......@@ -254,7 +254,7 @@ index 1528cb9..b17e257 100644
struct qstats_s *qstats;
int i;
@@ -1127,6 +1132,10 @@ pktgen_main_receive(port_info_t *info, uint8_t lid,
@@ -1169,6 +1174,10 @@ pktgen_main_receive(port_info_t *info, uint8_t lid,
for(i = 0; i < nb_rx; i++)
qstats->rxbytes += rte_pktmbuf_data_len(pkts_burst[i]);
......@@ -265,7 +265,7 @@ index 1528cb9..b17e257 100644
pktgen_recv_tstamp(info, pkts_burst, nb_rx);
/* packets are not freed in the next call. */
@@ -1143,6 +1152,7 @@ pktgen_main_receive(port_info_t *info, uint8_t lid,
@@ -1185,6 +1194,7 @@ pktgen_main_receive(port_info_t *info, uint8_t lid,
}
rte_pktmbuf_free_bulk(pkts_burst, nb_rx);
......@@ -274,10 +274,10 @@ index 1528cb9..b17e257 100644
static void
diff --git a/app/pktgen.h b/app/pktgen.h
index e871133..5271ee1 100644
index 66fa12e..20fb943 100644
--- a/app/pktgen.h
+++ b/app/pktgen.h
@@ -244,8 +244,9 @@ enum {
@@ -253,8 +253,9 @@ enum {
SOCKET0 = 0 /**< Socket ID value for allocation */
};
......@@ -288,7 +288,7 @@ index e871133..5271ee1 100644
typedef struct rte_mbuf rte_mbuf_t;
@@ -340,6 +341,7 @@ enum { /* Pktgen flags bits */
@@ -351,6 +352,7 @@ enum { /* Pktgen flags bits */
BLINK_PORTS_FLAG = (1 << 10), /**< Blink the port leds */
ENABLE_THEME_FLAG = (1 << 11), /**< Enable theme or color support */
......
0001-PKTGEN-19.12.0-Patch-to-add-Jumbo-packet-support.patch is the patch file
over dpdk-pktgen v9.12.0 that extends dpdk-pktgen application to handle packets
0001-PKTGEN-20.12.0-Patch-to-add-Jumbo-packet-support.patch is the patch file
over dpdk-pktgen v20.12.0 that extends dpdk-pktgen application to handle packets
with packet sizes more than 1518 bytes and it disables the packet size
classification logic in dpdk-pktgen to remove application overhead in
performance measurement.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment