Commit bb54ab96 authored by 吴洲洋's avatar 吴洲洋

update v0.4

parent 4714bb55
This diff is collapsed.
2021/03/09 13:25:29:536: ***** Start: PID 67394 *****
2021/03/09 13:25:34:018: ***** Startup Config *****
set interface state GigabitEthernet2/1/0 up
set interface ip address GigabitEthernet2/1/0 192.168.10.20/24
create gtpu tunnel src 192.168.10.20 dst 192.168.10.100 teid 1 encap-vrf-id 0 decap-next node ip4-lookup
ip route add 192.169.0.0/24 via gtpu_tunnel0
set interface state GigabitEthernet2/2/0 up
set interface ip address GigabitEthernet2/2/0 192.168.40.10/24
ip route add 0.0.0.0/0 via GigabitEthernet2/2/0
2021/03/09 13:25:34:018: ***** End Startup Config *****
2021/03/09 13:30:45:205: vl_api_upf_sse_req_t_handler:415: CREATE SESSION BEGIN----------
2021/03/09 13:30:45:205: vl_api_upf_sse_req_t_handler:455: sessions:0x7f053cc6e5c0, sx:0x7f053cc6e5c0, cp_seid:0x1, sess_index:0
2021/03/09 13:30:45:205: bupt_handle_create_pdr:186: CREATE PDR BEGIN----------
2021/03/09 13:30:45:205: bupt_handle_create_pdr:195: pdr alloc:0x7f053cf5e440
2021/03/09 13:30:45:205: bupt_handle_create_pdr:210: pdr_id:0x1,far_id:0x1
2021/03/09 13:30:45:205: bupt_handle_create_pdr:218: pdr.pdi.fields:0x5
2021/03/09 13:30:45:205: bupt_handle_create_pdr:221: F_PDI_LOCAL_F_TEID
2021/03/09 13:30:45:205: bupt_handle_create_pdr:229: F_TEID_V4
2021/03/09 13:30:45:205: bupt_handle_create_pdr:243: pdr:0x7f053cf5e440,teid:0x1,ip4:192.168.10.20
2021/03/09 13:30:45:205: bupt_rules_add_v4_teid:69: not exist teid:1, rule_index:0
2021/03/09 13:30:45:205: bupt_pfcp_add_del_v4_teid:93: sess:0x7f053cc6e5c0,sess_index:0x0,rule_index:0x0,kv.key:0x140aa8c0,kv.value:0x0
2021/03/09 13:30:45:205: bupt_handle_create_pdr:263: F_PDI_UE_IP_ADDR
2021/03/09 13:30:45:205: bupt_handle_create_pdr:273: IE_UE_IP_ADDRESS_V4
2021/03/09 13:30:45:205: bupt_handle_create_pdr:280: ip4:192.169.0.3
2021/03/09 13:30:45:205: bupt_handle_create_pdr:336: CREATE PDR END----------
2021/03/09 13:30:45:205: bupt_handle_create_far:344: CREATE FAR BEGIN----------
2021/03/09 13:30:45:205: bupt_handle_create_far:351: create_far:0x7f053cf80940
2021/03/09 13:30:45:205: bupt_handle_create_far:365: create session far FAR_FORWARD
2021/03/09 13:30:45:205: bupt_handle_create_far:371: create session far FAR_F_REDIRECT_INFORMATION
2021/03/09 13:30:45:205: bupt_handle_create_far:408: CREATE FAR END----------
2021/03/09 13:30:45:205: vl_api_upf_sse_req_t_handler:462: CREATE SESSION END----------
2021/03/09 13:30:45:254: vl_api_upf_ssm_create_req_t_handler:1330: SSM MODIFY SESSION CREATE BEGIN----------
2021/03/09 13:30:45:254: bupt_handle_ssm_modify_create_pdr:1199: SSM MODIFY CREATE PDR BEGIN----------
2021/03/09 13:30:45:254: bupt_handle_ssm_modify_create_pdr:1203: cp_seid:0x1, pdr_id:0x2,teid:0x1000000,ueip:0xc0a90003,far_id:0x2
2021/03/09 13:30:45:254: bupt_handle_ssm_modify_create_pdr:1237: F_TEID_V4
2021/03/09 13:30:45:254: bupt_handle_ssm_modify_create_pdr:1270: F_PDI_UE_IP_ADDR
2021/03/09 13:30:45:254: bupt_handle_ssm_modify_create_pdr:1277: IE_UE_IP_ADDRESS_V4
2021/03/09 13:30:45:254: bupt_handle_ssm_modify_create_pdr:1283: ip4:192.169.0.3
2021/03/09 13:30:45:254: bupt_handle_ssm_modify_create_pdr:1293: gtpu_dl,ip4:192.169.0.3, dl_teid:0x1000000
2021/03/09 13:30:45:254: bupt_handle_ssm_modify_create_pdr:1323: SSM MODIFY CREATE PDR END----------
2021/03/09 13:30:45:254: vl_api_upf_ssm_create_req_t_handler:1347: SSM MODIFY SESSION CREATE END----------
set interface state GigabitEthernet0/9/0 up
set interface ip address GigabitEthernet0/9/0 192.168.122.78/24
create gtpu tunnel src 192.168.122.78 dst 192.168.122.1 teid 1 encap-vrf-id 0 decap-next node ip4-lookup
set interface state GigabitEthernet2/1/0 up
set interface ip address GigabitEthernet2/1/0 192.168.10.20/24
create gtpu tunnel src 192.168.10.20 dst 192.168.10.100 teid 1 encap-vrf-id 0 decap-next node ip4-lookup
ip route add 192.169.0.0/24 via gtpu_tunnel0
set interface state GigabitEthernet0/a/0 up
set interface ip address GigabitEthernet0/a/0 192.168.30.10/24
ip route add 0.0.0.0/0 via GigabitEthernet0/a/0
set interface state GigabitEthernet2/2/0 up
set interface ip address GigabitEthernet2/2/0 192.168.40.10/24
ip route add 0.0.0.0/0 via GigabitEthernet2/2/0
......
gnb: 192.168.122.78
smf : 192.168.122.197
upf : 192.168.122.88
gnb: 192.168.10.20
smf : 192.168.2.35
upf : 192.168.2.239
logSize : 100
......@@ -22,106 +22,21 @@ logging
default-syslog-log-level info
}
cpu {
## In the VPP there is one main thread and optionally the user can create worker(s)
## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
## Manual pinning of thread(s) to CPU core(s)
## Set logical CPU core where main thread runs, if main core is not set
## VPP will use core 1 if available
main-core 3
## Set logical CPU core(s) where worker threads are running
#corelist-workers 2-3
#corelist-io 3,4
## Automatic pinning of thread(s) to CPU core(s)
## Sets number of CPU core(s) to be skipped (1 ... N-1)
## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
## The main thread is automatically pinned to the first available CPU core and worker(s)
## are pinned to next free CPU core(s) after core assigned to main thread
# skip-cores 4
## Specify a number of workers to be created
## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
## and main thread's CPU core
#workers 2
## Set scheduling policy and priority of main and worker threads
## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
main-core 7
scheduler-policy fifo
## Scheduling priority is used only for "real-time policies (fifo and rr),
## and has to be in the range of priorities supported for a particular policy
scheduler-priority 50
}
# buffers {
## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per numa node.
## Default is 16384 (8192 if running unpriviledged)
# buffers-per-numa 128000
## Size of buffer data area
## Default is 2048
# default data-size 2048
# }
dpdk {
## Change default settings for all interfaces
dev default {
## Number of receive queues, enables RSS
## Default is 1
## Number of transmit queues, Default is equal
## to number of worker threads or 1 if no workers treads
## Number of descriptors in transmit and receive rings
## increasing or reducing number can impact performance
## Default is 1024 for both rx and tx
# num-rx-desc 512
# num-tx-desc 512
## VLAN strip offload mode for interface
## Default is off
# vlan-strip-offload on
}
dev 0000:02:01.0
dev 0000:02:02.0
## Whitelist specific interface by specifying PCI address
#82576 1G
dev 0000:00:09.0
dev 0000:00:0a.0
## Whitelist specific interface by specifying PCI address and in
## addition specify custom parameters for this interface
# dev 0000:02:00.1 {
# num-rx-queues 2
# }
## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci,
## uio_pci_generic or auto (default)
uio-driver vfio-pci
#uio-driver igb_uio
## Disable multi-segment buffers, improves performance but
## disables Jumbo MTU support
#no-multi-seg
## Change hugepages allocation per-socket, needed only if there is need for
## larger number of mbufs. Default is 256M on each detected CPU socket
socket-mem 2048,2048
## Disables UDP / TCP TX checksum offload. Typically needed for use
## faster vector PMDs (together with no-multi-seg)
#no-tx-checksum-offload
}
plugins {
......
......@@ -22,106 +22,21 @@ logging
default-syslog-log-level info
}
cpu {
## In the VPP there is one main thread and optionally the user can create worker(s)
## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
## Manual pinning of thread(s) to CPU core(s)
## Set logical CPU core where main thread runs, if main core is not set
## VPP will use core 1 if available
main-core @CORENUMBER@
## Set logical CPU core(s) where worker threads are running
#corelist-workers 2-3
#corelist-io 3,4
## Automatic pinning of thread(s) to CPU core(s)
## Sets number of CPU core(s) to be skipped (1 ... N-1)
## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
## The main thread is automatically pinned to the first available CPU core and worker(s)
## are pinned to next free CPU core(s) after core assigned to main thread
# skip-cores 4
## Specify a number of workers to be created
## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
## and main thread's CPU core
#workers 2
## Set scheduling policy and priority of main and worker threads
## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
scheduler-policy fifo
## Scheduling priority is used only for "real-time policies (fifo and rr),
## and has to be in the range of priorities supported for a particular policy
scheduler-priority 50
}
# buffers {
## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per numa node.
## Default is 16384 (8192 if running unpriviledged)
# buffers-per-numa 128000
## Size of buffer data area
## Default is 2048
# default data-size 2048
# }
dpdk {
## Change default settings for all interfaces
dev default {
## Number of receive queues, enables RSS
## Default is 1
## Number of transmit queues, Default is equal
## to number of worker threads or 1 if no workers treads
## Number of descriptors in transmit and receive rings
## increasing or reducing number can impact performance
## Default is 1024 for both rx and tx
# num-rx-desc 512
# num-tx-desc 512
## VLAN strip offload mode for interface
## Default is off
# vlan-strip-offload on
}
## Whitelist specific interface by specifying PCI address
#82576 1G
dev @PCI_ADDR_ID_ENTRY@
dev @PCI_ADDR_ID_EXPORT@
## Whitelist specific interface by specifying PCI address and in
## addition specify custom parameters for this interface
# dev 0000:02:00.1 {
# num-rx-queues 2
# }
## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci,
## uio_pci_generic or auto (default)
uio-driver vfio-pci
#uio-driver igb_uio
## Disable multi-segment buffers, improves performance but
## disables Jumbo MTU support
#no-multi-seg
## Change hugepages allocation per-socket, needed only if there is need for
## larger number of mbufs. Default is 256M on each detected CPU socket
socket-mem 2048,2048
## Disables UDP / TCP TX checksum offload. Typically needed for use
## faster vector PMDs (together with no-multi-seg)
#no-tx-checksum-offload
}
plugins {
......
vpp:
system:
gtpu_entry: GigabitEthernet0/9/0
gtpu_export: GigabitEthernet0/a/0
gtpu_entry_ip: 192.168.122.78
gtpu_export_ip: 192.168.30.10
gnb_gtpu_ip: 192.168.122.1
gtpu_entry: GigabitEthernet2/1/0
gtpu_export: GigabitEthernet2/2/0
gtpu_entry_ip: 192.168.10.20
gtpu_export_ip: 192.168.40.10
gnb_gtpu_ip: 192.168.10.100
ip_link:
ueip: 192.169.0.0
ueIPPrefix: 24
snatInter: ens3
snatIP: 192.168.122.88
vppHost: ens11
gtpu_entry: ens9
gtpu_export: ens10
snatInter: ens32
snatIP: 192.168.2.239
vppHost: ens35
gtpu_entry: ens33
gtpu_export: ens34
startup:
pci_addr_id_entry: 00:09.0
pci_addr_id_export: 00:0a.0
pci_addr_id_entry: 02:01.0
pci_addr_id_export: 02:02.0
vpp_plugins_path: /usr/lib/x86_64-linux-gnu/vpp_plugins
drive: e1000
n4:
smf: 192.168.122.197
smf: 192.168.2.35
logSize: 100
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment