Commit 654a6538 authored by Pankaj Darak's avatar Pankaj Darak

Merge branch 'master' of https://github.com/Xilinx/dma_ip_drivers

parents bf2c777a a0df2803
......@@ -164,7 +164,7 @@ else # bundled install
endif
$(shell rm -f $(srcdir)/drv/libqdma; ln -fs $(srcdir)/libqdma $(srcdir)/drv;)
$(shell rm -f $(srcdir)/include/qdma_reg_dump.h; ln -fs $(srcdir)/libqdma/qdma_access/qdma_reg_dump.h $(srcdir)/include/qdma_reg_dump.h;)
$(shell rm -f $(srcdir)/include/qdma_reg_dump.h; ln -fs $(srcdir)/../qdma_access/qdma_reg_dump.h $(srcdir)/include/qdma_reg_dump.h;)
.PHONY: eval.mak
......@@ -186,6 +186,13 @@ install: install-user install-etc install-dev
.PHONY: uninstall
uninstall: uninstall-user uninstall-dev
.PHONY: qdmautils
qdmautils:
@echo "#######################";
@echo "#### qdmautils ####";
@echo "#######################";
$(MAKE) -C qdmautils
.PHONY: user
user:
@echo "#######################";
......@@ -226,6 +233,10 @@ post:
.PHONY: clean
clean:
@echo "#######################";
@echo "#### qdmautils ####";
@echo "#######################";
$(MAKE) -C qdmautils clean;
@echo "#######################";
@echo "#### user ####";
@echo "#######################";
......@@ -255,6 +266,7 @@ install-user:
@echo "installing user tools to $(user_install_path) ..."
@mkdir -p -m 755 $(user_install_path)
@install -v -m 755 build/dmactl* $(user_install_path)
@install -v -m 755 build/qdma_xfer_app* $(user_install_path)
@install -v -m 755 tools/dma_from_device $(user_install_path)
@install -v -m 755 tools/dma_to_device $(user_install_path)
@install -v -m 755 tools/dmaperf $(user_install_path)
......@@ -286,6 +298,7 @@ uninstall-mods:
uninstall-user:
@echo "Un-installing user tools under $(user_install_path) ..."
@/bin/rm -f $(user_install_path)/dmactl
@/bin/rm -f $(user_install_path)/qdma_xfer_app
@/bin/rm -f $(user_install_path)/dma_from_device
@/bin/rm -f $(user_install_path)/dma_to_device
@/bin/rm -f $(user_install_path)/dmaperf
......
Release: 2019.1 Patch
=====================
RELEASE: 2019.2
===============
NOTE:
This release is based on the 2019.1 QDMA IP
This release is validated on QDMA 2019.2 Patch based example design and QDMA 2019.1 based example design
SUPPORTED FEATURES:
===================
......@@ -58,21 +57,21 @@ SUPPORTED FEATURES:
- Interoperability between Linux driver (as PF/VF) and DPDK driver (as PF/VF)
- Driver restructuring to partition QDMA access code such that it can be used across different drivers
2019.1 Patch Updates
--------------------
- Addressed the issues observed in static analysis of the code base
- Added the User Logic pluggability
- Supported HW Error detection in poll mode
2019.2 Features
---------------
- Support for PF device removal when its VF devices are active
- Support for Interrupt moderation and adaptive counter threshold
- Support for configurable number of User and Data interrupts
- Added user logic pluggable interfaces for processing the descriptors and completions
- Added new interfaces for updating the Consumer Index and Producer Index
KNOWN ISSUES:
=============
- Sometimes completions are not received when C2H PIDX updates are held for 64 descriptors
- FLR Reset on a PF while any VFs are attached to PF is not working
- Function Level Reset(FLR) of PF device when VFs are attached to this PF results in mailbox communication failure
DRIVER LIMITATIONS:
===================
- Driver compilation on Fedora 28 with gcc8.1 results compilation warnings
- Driver compilation on Fedora 28 with gcc8.1 results in compilation warnings
- Big endian systems are not supported
- All VFs created by a PF should be shut down before terminating the parent PF. This is needed as all the QDMA VF configuration is through this parent PF.
- For optimal QDMA streaming performance, packet buffers of the descriptor ring should be aligned to at least 256 bytes.
\ No newline at end of file
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
down:
wget -O kernel-doc "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/plain/scripts/kernel-doc?h=v4.14.52" && chmod +x kernel-doc
genrst:
mkdir -p $(BUILDDIR)
./kernel-doc -rst ../../libqdma/libqdma_export.h > build/libqdma_apis.rst
html: down genrst
$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
clean:
rm -rf $(BUILDDIR)
rm -rf kernel-doc
###############################################################################
Xilinx QDMA Linux Driver Documentation Generation
###############################################################################
1. Installation:
Xilinx QDMA Linux driver documentation is designed based on Sphinx.
In order to generate the documentation, make sure to install the
Sphinx software. Details of required packages are available at
http://www.sphinx-doc.org/en/master/usage/installation.html
After installing the required packages, follow the steps below to generate the documentation.
Go to linux-kernel/docs/git_doc and run 'make html'
[xilinx@]# make html
'build' directory is created. Open 'build/html/index.html' for QDMA Linux driver documentation
To remove the generated documentation, run 'make clean'
[xilinx@]# make clean
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: b9d06f4250f21da7e4f96694075b60d2
tags: 645f666f9bcd5a90fca523b33c5a78b7
******************************
QDMA Debug File System Support
******************************
debugfs is a file system interface, which the kernel drivers uses to expose debug information to the user space. Unlike sysfs, there is no structure or rules on what to publish through this interface.debugfs files are created in /sys/kernel/debug/ folder.
debugfs supports:
- Files with assigned file operations. These file interfaces can be either read-only or read/write.
- Files to export single integer that can be read-only or read/write
- Read-only data blob files.
QDMA Kernel driver uses this facility and exposes qdma information similar to ``dmactl`` user space application. The advantage of debugfs is that users need not depend on user space application but if they integrate the ``libqdma`` in kernel space driver, it comes along with this debug facility.
During PF/VF module loading, QDMA driver creates PF/VF directories in ``/sys/kernel/debug`` by the names ``qdma_pf`` and ``qdma_vf``. qdma_pf directory will list all the detected primary functions, similarly qdma_vf will list all virtual functions.
::
/sys/kernel/debug/qdma_pf
/sys/kernel/debug/qdma_vf
In driver module initialization, during PCIe probe, when a device that is to be handled by QDMA driver is detected, a directory with the name b:d.f format will be created in qdma_<df> directory. Ex: when qdma device 01000 is detected, a directory is created in qdma_pf ``/sys/kernel/debug/qdma_pf/01:00.0/``
Below files and directories are created during the device initialization.
- **info** : This file will give all the information specific to the device. For example, function number, is master PF, Q base and Qmax configured for the device etc.
- **regs**: This file will dump all the registers corresponding to the device. The registers are the configuration space and user space of the device. These also include global registers, which are common across all the devices.
- **queues**: This is the directory and is created to hold queues information attached to the device. The qdma_dev_conf structure will also hold pointer to this directory in dbgfs_queues_root, which would help to create queue information when a queue is added to the device.
::
/sys/kernel/debug/qdma_pf/01:00.0/info
/sys/kernel/debug/qdma_pf/01:00.0/regs
/sys/kernel/debug/qdma_pf/01:00.0/queues
When a queue is added to a QDMA device, per say, through dmactl, a directory with the qid is created in the corresponding ``queues`` directory of the device. This will be used to create further directories under ‘qid’ directory.
For example, if a queue with qid 2 is added to device qdma01000, the directory with name ‘2’ is created in queues directory
::
/sys/kernel/debug/qdma_pf/01:00.0/queues/2
When a queue is added to a QDMA device, a direction directory with the name corresponding to the direction, c2h, h2c and cmpt, is created under qid directory.
- If the queue is added in H2C direction, directory with name ``h2c`` is created in the corresponding qid directory.
- If C2H queue is added, then directory with name ``c2h`` is created in the corresponding queue directory. If C2H queue is added in streaming mode, then a ``cmpt`` directory is also added to the qid directory.
- If queue is added in bidirectional mode, then both ``c2h`` and ``h2c`` directories are created in the corresponding qid directory
- Lastly, if a bidirectional queues are added in streaming mode, then a ‘cmpt’ directory is also created in the respective qid directory.
For example, if a queue with qid 2 is created in bi direction ST, then all c2h, h2c and cmpt directories are created
::
/sys/kernel/debug/qdma_pf/01:00.0/queues/2/h2c
/sys/kernel/debug/qdma_pf/01:00.0/queues/2/c2h
/sys/kernel/debug/qdma_pf/01:00.0/queues/2/cmpt
- **info**: This file holds the information specific to the queue, for example if queue is on line etc
- **cntxt**: This file will dump the contexts corresponding the queue. ``cntxt`` in ``cmpt`` directory will only dump the C2H ST completion context.
- **desc**: This file will dump the descriptor contents of the ring.
::
/sys/kernel/debug/qdma_pf/01:00.0/queues/2/h2c/info
/sys/kernel/debug/qdma_pf/01:00.0/queues/2/h2c/cntxt
/sys/kernel/debug/qdma_pf/01:00.0/queues/2/h2c/desc
use ``cat`` command for displaying the contents of these leaf node debug files. Example is provided below.
::
[xilinx@]# cat cntxt
SOFTWARE CTXT:
Interrupt Aggregation 0x1 1
Ring Index 0x0 0
Descriptor Ring Base Addr (High) 0x0 0
Descriptor Ring Base Addr (Low) 0x34048000 872710144
Is Memory Mapped 0x0 0
Marker Disable 0x0 0
IRQ Request 0x0 0
Writeback Error Sent 0x0 0
Error 0x0 0
Interrupt No Last 0x0 0
Port Id 0x0 0
Interrupt Enable 0x1 1
Writeback Enable 0x1 1
MM Channel 0x0 0
Bypass Enable 0x0 0
Descriptor Size 0x1 1
Ring Size 0x9 9
Fetch Max 0x0 0
Address Translation 0x0 0
Write back/Intr Interval 0x1 1
Write back/Intr Check 0x1 1
Fetch Credit Enable 0x0 0
Queue Enable 0x1 1
Function Id 0x0 0
IRQ Arm 0x1 1
PIDX 0x40a 1034
HARDWARE CTXT:
Fetch Pending 0x0 0
Eviction Pending 0x0 0
Queue Invalid No Desc Pending 0x1 1
Descriptors Pending 0x1 1
Credits Consumed 0x0 0
CIDX 0x40a 1034
****************
Developers Guide
****************
.. toctree::
:maxdepth: 1
qdma_linux_export.rst
qdma_design.rst
qdma_usecases.rst
***************
dma_from_device
***************
dma_from_device is a user application tool provided along with QDMA Linux driver to perform the Card to Host data transfers.
usage: dma_from_device [OPTIONS]
Read via SGDMA, optionally save output to a file
::
-d (--device) device (defaults to /dev/qdma01000-MM-0)
-a (--address) the start address on the AXI bus
-s (--size) size of a single transfer in bytes, default 32.
-o (--offset) page offset of transfer
-c (--count) number of transfers, default is 1.
-f (--file) file to write the data of the transfers
-h (--help) print usage help and exit
-v (--verbose) verbose output
::
[xilinx@]# dma_from_device -d /dev/qdma01000-ST-1 -s 64
** Average BW = 64, 0.880221
\ No newline at end of file
*************
dma_to_device
*************
dma_to_device is a user application tool provided along with QDMA Linux driver to perform the Host to Card data transfers.
usage: dma_to_device [OPTIONS]
Write via SGDMA, optionally read input from a file.
**Parameters**
::
-d (--device) device (defaults to /dev/qdma01000-MM-0)
-a (--address) the start address on the AXI bus
-s (--size) size of a single transfer in bytes, default 32,
-o (--offset) page offset of transfer
-c (--count) number of transfers, default 1
-f (--data infile) filename to read the data from.
-w (--data outfile) filename to write the data of the transfers
-h (--help) print usage help and exit
-v (--verbose) verbose output
::
[xilinx@]# dma_to_device -d /dev/qdma01000-ST-1 -s 64
** Average BW = 64, 0.880221
*********************
DMA Performance Tool
*********************
Xilinx-developed custom tool``dmaperf`` is used to collect the performance metrics for unidirectional and bidirectional traffic.
The QDMA Linux kernel reference driver is a PCIe device driver, it manages the QDMA queues in the HW. The driver creates a character device for each queue pair configured.
Standard IO tools such as ``fio`` can be used for performing IO operations using the char device interface.
However, most of the tools are limited to sending / receiving 1 packet at a time and wait for the processing of the packet to complete, so they are not able to keep the driver/ HW busy enough for performance measurement. Although fio also supports asynchronous interfaces, it does not continuously submit IO requests while polling for the completion in parallel.
To overcome this limitation, Xilinx developed dmaperf tool. It leverages the asynchronous functionality provided by libaio library. Using libaio, an application can submit IO request to the driver and the driver returns the control to the caller immediately (i.e., non-blocking). The completion notification is sent separately, so the application can then poll for the completion and free the buffer upon receiving the completion.
::
usage: dmaperf [OPTIONS]
-c (--config) config file that has configuration for IO
dmaperf tool takes a configuration file as input. The configuration file format is as below.
::
name=mm_1_1
mode=mm #mode
dir=bi #dir
pf_range=0:0 #no spaces
q_range=0:0 #no spaces
flags= #no spaces
wb_acc=5
tmr_idx=9
cntr_idx=0
trig_mode=cntr_tmr
rngidx=9
ram_width=15 #31 bits - 2^31 = 2GB
runtime=30 #secs
num_threads=8
bidir_en=1
num_pkt=64
pkt_sz=64
**Parameters**
- name : name of the configuration
- mode : mode of the queue, streaming\(st\) or memory mapped\(mm\). Mode defaults to mm.
- dir : Direction of the queue, host-to-card\(h2c\), card-to-host \(c2h\) or both \(bi\).
- pf_range : Range of the PFs from 0-3 on which the performance metrics to be collected.
- q_range : Range of the Queues from 0-2047 on which the performance metrics to be collected.
- flags : queue flags
- wb_acc : write back accumulation index from CSR register
- tmr_idx : timer index from CSR register
- cntr_idx : Counter index from CSR register
- trig_mode : trigger mode
- rngidx : Ring index from CSR register
- runtime : Duration of the performance runs, time in seconds.
- num_threads : number of threads to be used in dmaperf application to pump the traffic to queues
- bidir_en : Enable the bi-direction or not
- num_pkt : number of packets
- pkt_sz : Packet size
\ No newline at end of file
QDMA Features
#############
QDMA Linux Driver supports the following list of features
QDMA Hardware Features
**********************
* SRIOV with 4 Physical Functions(PF) and 252 Virtual Functions(VF)
* Memory Mapped(MM) and Stream(ST) interfaces per queue
* 2048 queue sets
* 2048 H2C (Host-to-Card) descriptor rings
* 2048 C2H (Card-to-Host) descriptor rings
* 2048 completion rings
* Supports Legacy and MSI-X Interrupts
* 2048 MSI-X vectors.
* Up to 8 MSI-X per function.
* Interrupt Aggregation
* User Interrupts
* Error Interrupts
* Legacy Interrupts : Supported only for PF0 with single queue
* Mailbox communication between PF and VF driver
* Interrupt support for Mailbox events
* Flexible interrupt allocation between PF/VF
* HW Error reporting
* Zero byte transfers
* Immediate data transfers
* Descriptor bypass(8, 16, 32, 64 descriptor sizes) support
* Descriptor Prefetching
* Streaming C2H completion entry coalescing
* Disabling overflow check in completion ring
* Streaming H2C to C2H and C2H to H2C loopback support
* Dynamic queue configuration
* Completion ring descriptors of 8, 16, 32, 64 bytes sizes
* Flexible BAR mapping for QDMA configuration register space
* ECC support
* Completions in memory mapped mode
For details on Hardware Features refer to QDMA_Product_Guide_.
.. _QDMA_Product_Guide: https://www.xilinx.com/support/documentation/ip_documentation/qdma/v3_0/pg302-qdma.pdf
QDMA Software Features
**********************
* Polling and Interrupt Modes
QDMA software provides 2 different drivers. PF driver for Physical functions and and VF driver for Virtual Functions.
PF and VF drivers can be inserted in different modes.
- Polling Mode
In Poll Mode, Software polls for the write back completions(Status Descriptor Write Back)
- Direct Interrupt Mode
In Direct Interrupt mode, Each queue is assigned to one of the available interrupt vectors in a round robin fashion to service the requests.
Interrupt is raised by the HW upon receiving the completions and software reads the completion status.
- Indirect Interrupt Mode
In Indirect Interrupt mode or Interrupt Aggregation mode, each vector has an associated Interrupt Aggregation Ring.
The QID and status of queues requiring service are written into the Interrupt Aggregation Ring.
When a PCIe MSI-X interrupt is received by the Host, the software reads the Interrupt Aggregation Ring to determine which queue needs service.
Mapping of queues to vectors is programmable
- Auto Mode
Auto mode is mix of Poll and Interrupt Aggregation mode. Driver polls for the write back status updates.
Interrupt aggregation is used for processing the completion ring.
- Allows only Privileged Physical Functions to program the contexts and registers
- Dynamic queue configuration
- Dynamic driver configuration
- Driver configuration through sysfs
- Asynchronous and Synchronous IO support
- Display the Version details for SW and HW
\ No newline at end of file
========================
Xilinx QDMA Linux Driver
========================
Xilinx QDMA Subsystem for PCIe example design is implemented on a Xilinx FPGA,
which is connected to an X86 host system through PCI Express.
Xilinx QDMA Linux Driver is implemented as a combination of user space and
kernel driver components to control and configure the QDMA subsystem.
QDMA Linux Driver consists of the following four major components:
- **QDMA HW Access**: QDMA HW Access module handles all the QDMA IP register access functionality and exposes a set of APIs for register read/writes.
- **Libqdma**: Libqdma module exposes a set of APIs for QDMA IP configuration and management. It uses the QDMA HW Access layer for interacting with the QDMA HW and facilitates the upper layers to achieve the following QDMA functionalities
- Physical Function(PF) Management
- Virtual Function(VF) Management
- Communication Management between PF and VF
- QDMA Queue Configuration and Control
- Descriptor Rings Creation and Control
- Transfer Management
- DebugFS Support
- **Driver Interface**: This layer create a simple linux pci_driver interface and a character driver interface to demonstrate the QDMA IP functionalities using the Linux QDMA IP driver. It creates a NL interface to facilitate the user applications to interact with the ``Libqdma`` module. It also creates ``sysfs`` interface to enable users to configure and control various QDMA IP parameters.
- **Applications**: QDMA IP Driver provides the following sample applications.
- Dmactl : This application provides set of commands to configure and control the queues in the system
- dma_to_device : This application enables the users to perform Host to Card(H2C) transfer
- dma_from_device : This application enables the users to perform Card to Host(C2H) transfer
- Dmaperf : This application provides interfaces to extract performance numbers for QDMA IP in MM and ST modes
.. image:: /images/qdma_linux_driver_architecture.PNG
:align: center
----------------------------------------------------------------------------
.. toctree::
:maxdepth: 1
:caption: Table of Contents
features.rst
system-requirements.rst
build.rst
userguide.rst
user-app.rst
devguide.rst
performance.rst
\ No newline at end of file
QDMA Performance
----------------
Refer to QDMA_Performance_Answer_Record_ for more details.
.. _QDMA_Performance_Answer_Record: https://www.xilinx.com/support/answers/71453.html
\ No newline at end of file
*******************************
QDMA Linux Driver Exported APIs
*******************************
.. include:: ../build/libqdma_apis.rst
*************
QDMA Mailbox
*************
The QDMA Subsystem for PCIe provides an optional feature to support the Single Root I/O
Virtualization (SR-IOV). SR-IOV classifies the functions as:
- Physical Functions (PF) :
Full featured PCIe functions which include SR-IOV capabilities among others.
- Virtual Functions (VF):
PCIe functions featuring configuration space with Base Address
Registers (BARs) but lacking the full configuration resources and are controlled by the PF
configuration. The main role of the VF is data transfer.
VF can communicate to a PF through mailbox. Each function implements one 128B inbox and 128B outbox message buffer.
These mailboxes are accessible to the driver via PCIe BAR of its own function.
HW also provides ability to interrupt the driver for an incoming mailbox message to a PCIe function.
For further details on the mailbox internals and mailbox registers, refer to QDMA_Product_Guide_.
.. _QDMA_Product_Guide: https://www.xilinx.com/support/documentation/ip_documentation/qdma/v3_0/pg302-qdma.pdf
Physical function (PF) is privileged with full access to QDMA registers and resources, but VFs updates only data handling registers and interrupts.
VF drivers must communicate with the driver attached to the PF through the mailbox for configuration and resource allocation.
The PF and VF drivers define the message formatting to be exchanged between the driver.
The driver enables mailbox interrupt and registers a Rx handler for notification of incoming mailbox message.
It also creates a Tx thread to send a message and poll for its response from the peer driver.
Following are the list of messages supported between PF and VF driver
::
enum mbox_msg_op {
/** @MBOX_OP_BYE: vf off line */
MBOX_OP_BYE,
/** @MBOX_OP_HELLO: vf on line */
MBOX_OP_HELLO,
/** @: FMAP programming request */
MBOX_OP_FMAP,
/** @MBOX_OP_CSR: global CSR registers request */
MBOX_OP_CSR,
/** @MBOX_OP_QREQ: request queues */
MBOX_OP_QREQ,
/** @MBOX_OP_QNOTIFY_ADD: notify of queue addition */
MBOX_OP_QNOTIFY_ADD,
/** @MBOX_OP_QNOTIFY_DEL: notify of queue deletion */
MBOX_OP_QNOTIFY_DEL,
/** @MBOX_OP_QCTXT_WRT: queue context write */
MBOX_OP_QCTXT_WRT,
/** @MBOX_OP_QCTXT_RD: queue context read */
MBOX_OP_QCTXT_RD,
/** @MBOX_OP_QCTXT_CLR: queue context clear */
MBOX_OP_QCTXT_CLR,
/** @MBOX_OP_QCTXT_INV: queue context invalidate */
MBOX_OP_QCTXT_INV,
/** @MBOX_OP_INTR_CTXT_WRT: interrupt context write */
MBOX_OP_INTR_CTXT_WRT,
/** @MBOX_OP_INTR_CTXT_RD: interrupt context read */
MBOX_OP_INTR_CTXT_RD,
/** @MBOX_OP_INTR_CTXT_CLR: interrupt context clear */
MBOX_OP_INTR_CTXT_CLR,
/** @MBOX_OP_INTR_CTXT_INV: interrupt context invalidate */
MBOX_OP_INTR_CTXT_INV,
/** @MBOX_OP_HELLO_RESP: response to @MBOX_OP_HELLO */
MBOX_OP_HELLO_RESP = 0x81,
/** @MBOX_OP_FMAP_RESP: response to @MBOX_OP_FMAP */
MBOX_OP_FMAP_RESP,
/** @MBOX_OP_CSR_RESP: response to @MBOX_OP_CSR */
MBOX_OP_CSR_RESP,
/** @MBOX_OP_QREQ_RESP: response to @MBOX_OP_QREQ */
MBOX_OP_QREQ_RESP,
/** @MBOX_OP_QADD: notify of queue addition */
MBOX_OP_QNOTIFY_ADD_RESP,
/** @MBOX_OP_QNOTIFY_DEL: notify of queue deletion */
MBOX_OP_QNOTIFY_DEL_RESP,
/** @MBOX_OP_QCTXT_WRT_RESP: response to @MBOX_OP_QCTXT_WRT */
MBOX_OP_QCTXT_WRT_RESP,
/** @MBOX_OP_QCTXT_RD_RESP: response to @MBOX_OP_QCTXT_RD */
MBOX_OP_QCTXT_RD_RESP,
/** @MBOX_OP_QCTXT_CLR_RESP: response to @MBOX_OP_QCTXT_CLR */
MBOX_OP_QCTXT_CLR_RESP,
/** @MBOX_OP_QCTXT_INV_RESP: response to @MBOX_OP_QCTXT_INV */
MBOX_OP_QCTXT_INV_RESP,
/** @MBOX_OP_INTR_CTXT_WRT_RESP: response to @MBOX_OP_INTR_CTXT_WRT */
MBOX_OP_INTR_CTXT_WRT_RESP,
/** @MBOX_OP_INTR_CTXT_RD_RESP: response to @MBOX_OP_INTR_CTXT_RD */
MBOX_OP_INTR_CTXT_RD_RESP,
/** @MBOX_OP_INTR_CTXT_CLR_RESP: response to @MBOX_OP_INTR_CTXT_CLR */
MBOX_OP_INTR_CTXT_CLR_RESP,
/** @MBOX_OP_INTR_CTXT_INV_RESP: response to @MBOX_OP_INTR_CTXT_INV */
MBOX_OP_INTR_CTXT_INV_RESP,
/** @MBOX_OP_MAX: total mbox opcodes*/
MBOX_OP_MAX
};
The following sequence diagrams shows the mailbox communication between PF and VF for VF configuration.
.. image:: /images/mailbox_communication.png
:align: center
\ No newline at end of file
******************************
QDMA Queue Resource Management
******************************
QDMA IP supports 2K queues. QDMA Resource Manager defines the strategy to allocate the queues across the available PFs and VFs.
Resource Manager maintains a global resource linked list in the driver. It creates a linked list of nodes for each PCIe device (PCIe bus) it manages.
Each device (bus) node in the Resource Manager list is initialized with queue base and number of queues to manage on that device.
Given the request for a number of queues for a PCIe function on a PCIe device, Resource Manager finds the best fit available
queue base with contiguous requested number of queues on that PCIe device.
Resource Manager manages queue base allocation for all the PCIe devices and PCIe functions registered with it by the driver.
Each device node in the global resource linked list maintains a "free list" with all the queues initially assigned to it, and a "device list" initialized to NULL.
When a PCIe function is initialized and requests the required queues, Resource Manager finds the best fit available queue base
with contiguous requested number of queues from the "free list" of the requested device node. If found, it creates a PCIe function node in the "device list"
and assigns the queue base and requested queues to this PCIe function node and removes these queues from the "free list".
Driver can request a specific queue base for a PCIe function and if the requested queue base is available,
Resource Manager honors it, else, allocates the queues at a base determined by the best fit algorithm.
Host system can have drivers in user space or kernel space to manage the devices connected.
E.g. Linux QDMA Driver is a kernel mode driver and DPDK PMD is a User Space driver.
Each driver maintains its own resource manager with queue base and total queues to manage specified for each device it manages.
Below, we demonstrate few examples on how Resource Manager is used in different combinations of drivers managing QDMA devices
Single device managed by Single driver
======================================
.. image:: /images/resmgmt_sdev_sdri.PNG
:align: center
In this scenario, one PCIe QDMA device is connected to the Host System and at any given time either the Linux kernel driver or the user space DPDK driver is
managing the device. There is a single Resource Manager managing all the queues of the device and assigning the queues to functions based
on the request from each function.
Single device managed by multiple drivers
==========================================
.. image:: /images/resmgmt_sdev_mdri.PNG
:align: center
In this scenario, one device is connected to the Host System. Linux kernel driver and user space DPDK driver are loaded to manage different PCIe functions of the device.
Each driver will create its own Resource Manager using a queue base and total queues to manage the functions binded to them.
Each Resource Manager has its own pool of queues and the PCIe functions get their queues resourced from this pool.
Multiple devices on single Host managed by Single driver
========================================================
.. image:: /images/resmgmt_mdev_sdri.PNG
:align: center
In this scenario, more than one devices are connected to the Host System.
Either user space DPDK driver or Linux kernel driver is loaded to manage these devices.
Driver will instantiate Resource Manager and create one node per PCIe device conected,
resourcing each PCIe device (node) with the queue base and total number of queues to manage.
PCIe functions corresponding to each PCIe device are allocated queues from the respective device nodes.
.. _sys_req:
System Requirements
===================
Xilinx Accelerator Card
-----------------------
1. VCU1525
2. TULVU9P
Host Platform
-------------
x86_64 host system with at least one Gen 3x16 PCIe slot and minimum 32GB RAM
on same CPU node for 2K queues.
For VM testing, host system must support virtualization and it must be enabled in the BIOS.
Host System Configuration
-------------------------
Linux QDMA Driver latest release is verified on following Host system configuration for PF and VF functionality
+--------------------------+-------------------------------------------------------------+
| Host System | Configuration Details |
+==========================+=============================================================+
| Operating System | Ubuntu 18.04 LTS |
+--------------------------+-------------------------------------------------------------+
| Linux Kernel | 4.15.0-23-generic |
+--------------------------+-------------------------------------------------------------+
| RAM | 64GB on local NUMA node |
+--------------------------+-------------------------------------------------------------+
| Hypervisor | KVM |
+--------------------------+-------------------------------------------------------------+
| Qemu Version | QEMU emulator version 2.5.0 (Debian 1:2.5+dfsg-5ubuntu10.15)|
+--------------------------+-------------------------------------------------------------+
Note: QEMU is a hosted virtual machine monitor which emulates the machine's processor through dynamic binary translation and provides a set of different hardware and device models for the machine, enabling it to run a variety of guest operating systems.This is used to emulate the virtual machines and enables the users to attach virtual functions to virtual machines.
Guest System Configuration
--------------------------
Linux QDMA VF Driver latest release is verified on following Host system configuration for VF functionality
========================= ==================================
Guest System(VM) Configuration Details
========================= ==================================
Operating System Ubuntu 18.04 LTS
Linux Kernel 4.15.1-20-generic
RAM 4GB
Cores 4
========================= ==================================
Supported OS List
------------------
Linux QDMA Driver also supported on following OS and kernel versions
+-------------------------+-------------+----------------+
| Operating System | OS Version | Kernel Version |
+=========================+=============+================+
| CentOS |7.4-1708 |3.10.0-693 |
| +-------------+----------------+
| |7.5-1804 |3.10.0-862 |
| +-------------+----------------+
| |7.6-1810 |3.10.0-957 |
+-------------------------+-------------+----------------+
|Fedora |28 |4.16 |
| +-------------+----------------+
| |29 |4.18 |
| +-------------+----------------+
| |30 |5.0 |
+-------------------------+-------------+----------------+
|Ubuntu |14.04.06 |4.4.0-93 |
| +-------------+----------------+
| |16.04 |4.10 |
| +-------------+----------------+
| |18.04 |4.15.0-23 |
| +-------------+----------------+
| |18.04.2 |3.10.0 |
| +-------------+----------------+
| |18.04.2 |3.10.0 |
+-------------------------+-------------+----------------+
Supported Kernel.org Version List
---------------------------------
Linux QDMA Driver verified on following kernel.org versions
+-------------------------+-----------------+
|Kernel.org | Kernel Version |
+=========================+=================+
| | 3.16.66 |
| +-----------------+
| | 4.4.179 |
| +-----------------+
| | 4.19.41 |
| +-----------------+
| | 4.14.117 |
| +-----------------+
| | 4.9.174 |
| +-----------------+
| | 5.0.14 |
+-------------------------+-----------------+
The following kernel functions shall be included in the OS kernel being used. Make sure that these functions are included in the kernel.
- Timer Functions
- PCIe Functions
- Kernel Memory functions
- Kernel threads
- Memory and GFP Functions
*****************
User Applications
*****************
.. toctree::
:maxdepth: 1
dmactl.rst
dma_to_device.rst
dma_from_device.rst
dmaperf.rst
\ No newline at end of file
User Guide
==========
This section describes the details on controlling and configuring the QDMA IP
System Level Configurations
---------------------------
QDMA driver provides the sysfs interface to enable to user to perform system level configurations. QDMA ``PF`` and ``VF`` drivers expose several ``sysfs`` nodes under the ``pci`` device root node.
Once the qdma module is inserted and until any queue is added into the system and FMAP programming is not done, sysfs provides an interface to configure parameters for the module configuration.
::
[xilinx@]# lspci | grep -i Xilinx
01:00.0 Memory controller: Xilinx Corporation Device 903f
01:00.1 Memory controller: Xilinx Corporation Device 913f
01:00.2 Memory controller: Xilinx Corporation Device 923f
01:00.3 Memory controller: Xilinx Corporation Device 933f
Based on the above lspci output, traverse to ``/sys/bus/pci/devices/<device node>/qdma`` to find the list of configurable parameters specific to PF or VF driver.
1. **Instantiates the Virtual Functions**
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
QDMA IP supports 252 Virtual Functions(VFs). ``/sys/bus/pci/devices/<device node>`` provides two configurable entries
- ``sriov_totalvfs`` : Indicates the maximum number of VFs supported for PF. This is a read only entry which can be configured during bit stream generation.
- ``sriov_numvfs`` : Enables the user to specify the number of VFs required for a PF
Display the currently supported max VFs:
::
[xilinx@]# cat /sys/bus/pci/devices/0000:01:00.0/sriov_totalvfs
Instantiate the required number of VFs for a PF:
::
[xilinx@]# echo 3 > /sys/bus/pci/devices/0000:01:00.0/sriov_numvfs
Once the VFS are instantiated, required number of queues can be allocated the VF using ``qmax`` sysfs entry available in VF at
/sys/bus/pci/devices/<VF function number>/qdma/qmax
2. **Allocate the Queues to a function**
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
QDMA IP supports maximum of 2048 queues. By default there are no queues assigned to the functions.
``qmax`` configuration parameter enables the user to update the number of queues for a PF. This configuration parameter indicates "Maximum number of queues associated for the current pf".
If the queue allocation needs to be different for any PF, access the qmax sysfs entry and set the required number.
Once the number of queues for any PF is changed from the default value, the remaining set of queues among the 2048 queues are evenly distributed for the remaining PFs.
Display the current value:
::
[xilinx@]# cat /sys/bus/pci/devices/0000:01:00.0/qdma/qmax
0
Set a new value:
::
[xilinx@]# echo 1024 > /sys/bus/pci/devices/0000:01:00.0/qdma/qmax
[xilinx@]# dmactl dev list
qdma01000 0000:01:00.0 max QP: 1024, 0~1023
qdma01001 0000:01:00.1 max QP: 0, -~-
qdma01002 0000:01:00.2 max QP: 0, -~-
qdma01003 0000:01:00.3 max QP: 0, -~-
[xilinx@]# echo 1770 > /sys/bus/pci/devices/0000\:01\:00.0/qdma/qmax
[xilinx@]# echo 8 > /sys/bus/pci/devices/0000\:01\:00.1/qdma/qmax
[xilinx@]# echo 8 > /sys/bus/pci/devices/0000\:01\:00.2/qdma/qmax
[xilinx@]# echo 8 > /sys/bus/pci/devices/0000\:01\:00.3/qdma/qmax
[xilinx@]# dmactl dev list
qdma01000 0000:01:00.0 max QP: 1770, 0~1769
qdma01001 0000:01:00.1 max QP: 8, 1770~1777
qdma01002 0000:01:00.2 max QP: 8, 1778~1785
qdma01003 0000:01:00.3 max QP: 8, 1786~1793
``qmax`` configuration parameter is available for virtual functions as well. Once the ``qmax_vfs`` is configured, qmax for each VF can be updated from pool of queues assigned for the VFs.
3. **Reserve the Queues to VFs**
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
QDMA IP supports 2048 queues and from the set of 2048 queues, use the ``qmax`` sysfs entry to allocate queues to VFs similar to PFs.
Display the current value:
::
[xilinx@] #cat /sys/bus/pci/devices/0000:81:00.4/qdma/qmax
0
Set a new value:
::
[xilinx@] #echo 1024 > /sys/bus/pci/devices/0000:81:00.4/qdma/qmax
4. **Set Interrupt Ring Size**
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Interrupt ring size is associated with indirect interrupt mode.
When the module is inserted in indirect interrupt mode, by default the interrupt aggregation ring size is set 0 i.e 512 entries
User can configure he interrupt ring entries in multiples of 512 hence set the ``intr_rngsz`` with multiplication factor
| 0 - INTR_RING_SZ_4KB, Accommodates 512 entries
| 1 - INTR_RING_SZ_8KB, Accommodates 1024 entries
| 2 - INTR_RING_SZ_12KB, Accommodates 1536 entries
| 3 - INTR_RING_SZ_16KB, Accommodates 2048 entries
| 4 - INTR_RING_SZ_20KB, Accommodates 2560 entries
| 5 - INTR_RING_SZ_24KB, Accommodates 3072 entries
| 6 - INTR_RING_SZ_24KB, Accommodates 3584 entries
| 7 - INTR_RING_SZ_24KB, Accommodates 4096 entries
Display the current value:
::
[xilinx@]# cat /sys/bus/pci/devices/0000:81:00.0/qdma/intr_rngsz
0
Set a new value:
::
[xilinx@]# echo 2 > /sys/bus/pci/devices/0000:81:00.0/qdma/intr_rngsz
5. **Set Completion Interval**
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``cmpt_intrvl`` indicated the interval at which completions are generated for an MM or H2C Stream queue running in non-bypass mode.
User can set any of the following list of values for this configuration parameter.
| 3'h0: 4
| 3'h1: 8
| 3'h2: 16
| 3'h3: 32
| 3'h4: 64
| 3'h5: 128
| 3'h6: 256
| 3'h7: 512
Completion accumulation value is calculated as 2^(register bit [2:0]). Maximum accumulation is 512.
Accumulation can be disabled via queue context.
Display the current value:
::
[xilinx@]# cat /sys/bus/pci/devices/0000:81:00.0/qdma/cmpt_intrvl
0
Set a new value:
::
[xilinx@]# echo 2 > /sys/bus/pci/devices/0000:81:00.0/qdma/cmpt_intrvl
Queue Management
----------------
QDMA driver comes with a command-line configuration utility called ``dmactl`` to manage the queues in the system.
This diff is collapsed.
.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../font/fontawesome_webfont.eot");src:url("../font/fontawesome_webfont.eot?#iefix") format("embedded-opentype"),url("../font/fontawesome_webfont.woff") format("woff"),url("../font/fontawesome_webfont.ttf") format("truetype"),url("../font/fontawesome_webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:0.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;border-top:solid 10px #343131;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}img{width:100%;height:auto}}
/*# sourceMappingURL=badge_only.css.map */
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* doctools.js
* ~~~~~~~~~~~
*
* Sphinx JavaScript utilities for all documentation.
*
* :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
/**
* select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*/
jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' ');
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s === 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node, addItems) {
if (node.nodeType === 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 &&
!jQuery(node.parentNode).hasClass(className) &&
!jQuery(node.parentNode).hasClass("nohighlight")) {
var span;
var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.className = className;
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
var bbox = span.getBBox();
var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute('class', className);
var parentOfText = node.parentNode.parentNode;
addItems.push({
"parent": node.parentNode,
"target": rect});
}
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this, addItems);
});
}
}
var addItems = [];
var result = this.each(function() {
highlight(this, addItems);
});
for (var i = 0; i < addItems.length; ++i) {
jQuery(addItems[i].parent).before(addItems[i].target);
}
return result;
};
/*
* backward compatibility for jQuery.browser
* This will be supported until firefox bug is fixed.
*/
if (!jQuery.browser) {
jQuery.uaMatch = function(ua) {
ua = ua.toLowerCase();
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
/(msie) ([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
return {
browser: match[ 1 ] || "",
version: match[ 2 ] || "0"
};
};
jQuery.browser = {};
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) {
this.initOnKeyListeners();
}
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated === 'undefined')
return string;
return (typeof translated === 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated === 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
if (!body.length) {
body = $('body');
}
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<p class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
.appendTo($('#searchbox'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) === 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('#searchbox .highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this === '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
},
initOnKeyListeners: function() {
$(document).keyup(function(event) {
var activeElementType = document.activeElement.tagName;
// don't navigate when in search box or textarea
if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') {
switch (event.keyCode) {
case 37: // left
var prevHref = $('link[rel="prev"]').prop('href');
if (prevHref) {
window.location.href = prevHref;
return false;
}
case 39: // right
var nextHref = $('link[rel="next"]').prop('href');
if (nextHref) {
window.location.href = nextHref;
return false;
}
}
}
});
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
});
This diff is collapsed.
require=(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({"sphinx-rtd-theme":[function(require,module,exports){
var jQuery = (typeof(window) != 'undefined') ? window.jQuery : require('jquery');
// Sphinx theme nav state
function ThemeNav () {
var nav = {
navBar: null,
win: null,
winScroll: false,
winResize: false,
linkScroll: false,
winPosition: 0,
winHeight: null,
docHeight: null,
isRunning: null
};
nav.enable = function () {
var self = this;
jQuery(function ($) {
self.init($);
self.reset();
self.win.on('hashchange', self.reset);
// Set scroll monitor
self.win.on('scroll', function () {
if (!self.linkScroll) {
self.winScroll = true;
}
});
setInterval(function () { if (self.winScroll) self.onScroll(); }, 25);
// Set resize monitor
self.win.on('resize', function () {
self.winResize = true;
});
setInterval(function () { if (self.winResize) self.onResize(); }, 25);
self.onResize();
});
};
nav.init = function ($) {
var doc = $(document),
self = this;
this.navBar = $('div.wy-side-scroll:first');
this.win = $(window);
// Set up javascript UX bits
$(document)
// Shift nav in mobile when clicking the menu.
.on('click', "[data-toggle='wy-nav-top']", function() {
$("[data-toggle='wy-nav-shift']").toggleClass("shift");
$("[data-toggle='rst-versions']").toggleClass("shift");
})
// Nav menu link click operations
.on('click', ".wy-menu-vertical .current ul li a", function() {
var target = $(this);
// Close menu when you click a link.
$("[data-toggle='wy-nav-shift']").removeClass("shift");
$("[data-toggle='rst-versions']").toggleClass("shift");
// Handle dynamic display of l3 and l4 nav lists
self.toggleCurrent(target);
self.hashChange();
})
.on('click', "[data-toggle='rst-current-version']", function() {
$("[data-toggle='rst-versions']").toggleClass("shift-up");
})
// Make tables responsive
$("table.docutils:not(.field-list)")
.wrap("<div class='wy-table-responsive'></div>");
// Add expand links to all parents of nested ul
$('.wy-menu-vertical ul').not('.simple').siblings('a').each(function () {
var link = $(this);
expand = $('<span class="toctree-expand"></span>');
expand.on('click', function (ev) {
self.toggleCurrent(link);
ev.stopPropagation();
return false;
});
link.prepend(expand);
});
};
nav.reset = function () {
// Get anchor from URL and open up nested nav
var anchor = encodeURI(window.location.hash);
if (anchor) {
try {
var link = $('.wy-menu-vertical')
.find('[href="' + anchor + '"]');
$('.wy-menu-vertical li.toctree-l1 li.current')
.removeClass('current');
link.closest('li.toctree-l2').addClass('current');
link.closest('li.toctree-l3').addClass('current');
link.closest('li.toctree-l4').addClass('current');
}
catch (err) {
console.log("Error expanding nav for anchor", err);
}
}
};
nav.onScroll = function () {
this.winScroll = false;
var newWinPosition = this.win.scrollTop(),
winBottom = newWinPosition + this.winHeight,
navPosition = this.navBar.scrollTop(),
newNavPosition = navPosition + (newWinPosition - this.winPosition);
if (newWinPosition < 0 || winBottom > this.docHeight) {
return;
}
this.navBar.scrollTop(newNavPosition);
this.winPosition = newWinPosition;
};
nav.onResize = function () {
this.winResize = false;
this.winHeight = this.win.height();
this.docHeight = $(document).height();
};
nav.hashChange = function () {
this.linkScroll = true;
this.win.one('hashchange', function () {
this.linkScroll = false;
});
};
nav.toggleCurrent = function (elem) {
var parent_li = elem.closest('li');
parent_li.siblings('li.current').removeClass('current');
parent_li.siblings().find('li.current').removeClass('current');
parent_li.find('> ul li.current').removeClass('current');
parent_li.toggleClass('current');
}
return nav;
};
module.exports.ThemeNav = ThemeNav();
if (typeof(window) != 'undefined') {
window.SphinxRtdTheme = { StickyNav: module.exports.ThemeNav };
}
},{"jquery":"jquery"}]},{},["sphinx-rtd-theme"]);
.highlight .hll { background-color: #ffffcc }
.highlight { background: #ffffff; }
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment