Commit 07fb5a3d authored by wangyongshou's avatar wangyongshou

delete useless file

parent 98b5d0db
unix {
nodaemon
log /var/log/upf/vpp/vpp.log
full-coredump
interactive
gid vpp
cli-listen /run/vpp/cli.sock
exec /work/wys/mul_thread/test_58_mul_thread_v1/135_1014/Buptvppe/config_system.sh
logsize 100
full-coredump
#cli-prompt bupt>>
}
api-trace {
on
save-api-table api-trace.log
}
api-segment {
gid vpp
}
logging
{
default-log-level info
default-syslog-log-level info
}
cpu {
## In the VPP there is one main thread and optionally the user can create worker(s)
## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
## Manual pinning of thread(s) to CPU core(s)
## Set logical CPU core where main thread runs, if main core is not set
## VPP will use core 1 if available
main-core 13
## Set logical CPU core(s) where worker threads are running
corelist-workers 14-15
#corelist-io 5,6,7
## Automatic pinning of thread(s) to CPU core(s)
## Sets number of CPU core(s) to be skipped (1 ... N-1)
## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
## The main thread is automatically pinned to the first available CPU core and worker(s)
## are pinned to next free CPU core(s) after core assigned to main thread
# skip-cores 4
## Specify a number of workers to be created
## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
## and main thread's CPU core
#workers 2
## Set scheduling policy and priority of main and worker threads
## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
scheduler-policy fifo
## Scheduling priority is used only for "real-time policies (fifo and rr),
## and has to be in the range of priorities supported for a particular policy
scheduler-priority 50
}
#buffers {
## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per numa node.
## Default is 16384 (8192 if running unpriviledged)
# buffers-per-numa 1280000
# buffers-per-numa 1048576
#buffers-per-numa 2
#memory-size-in-mb 2049
## Size of buffer data area
## Default is 2048
# default data-size 1024
#}
dpdk {
## Change default settings for all interfaces
dev default {
## Number of receive queues, enables RSS
## Default is 1
## Number of transmit queues, Default is equal
## to number of worker threads or 1 if no workers treads
## Number of descriptors in transmit and receive rings
## increasing or reducing number can impact performance
## Default is 1024 for both rx and tx
# num-rx-desc 512
# num-tx-desc 512
## VLAN strip offload mode for interface
## Default is off
# vlan-strip-offload on
}
## Whitelist specific interface by specifying PCI address
#82576 1G
dev 0000:0b:00.0
{
num-rx-queues 1
num-tx-queues 1
num-rx-desc 2048
num-tx-desc 2048
}
dev 0000:0b:00.1
{
num-rx-queues 1
num-tx-queues 1
num-rx-desc 2048
num-tx-desc 2048
}
## Whitelist specific interface by specifying PCI address and in
## addition specify custom parameters for this interface
# dev 0000:02:00.1 {
# num-rx-queues 2
# }
## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci,
## uio_pci_generic or auto (default)
#uio-driver vfio-pci
#auto
#uio_pci_generic
#uio-driver igb_uio
## Disable multi-segment buffers, improves performance but
## disables Jumbo MTU support
#no-multi-seg
## Change hugepages allocation per-socket, needed only if there is need for
## larger number of mbufs. Default is 256M on each detected CPU socket
#socket-mem 2048,2048
#NUMA0 alloc 2G
#socket-mem 4,4
## Disables UDP / TCP TX checksum offload. Typically needed for use
## faster vector PMDs (together with no-multi-seg)
#no-tx-checksum-offload
}
vlib
{
memory-trace
elog-events 1
elog-post-mortem-dump
}
#heapsize 2G
plugins {
path /work/wys/mul_thread/test_58_mul_thread_v1/135_1014/Buptvppe/build-root/install-vpp_debug-native/vpp/lib/vpp_plugins
plugin default { enable }
plugin dpdk_plugin.so { enable }
plugin gtpu_plugin.so { enable }
plugin upf_plugin.so { enable }
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment