Commit fd99e0f5 authored by gauthier's avatar gauthier

SPGWU thread scheduler and priority in config file (see boost asio later)

parent eb544724
...@@ -18,12 +18,34 @@ ...@@ -18,12 +18,34 @@
# For more information about the OpenAirInterface (OAI) Software Alliance: # For more information about the OpenAirInterface (OAI) Software Alliance:
# contact@openairinterface.org # contact@openairinterface.org
################################################################################ ################################################################################
SPGW-U = SPGW-U =
{ {
INSTANCE = @INSTANCE@; # 0 is the default INSTANCE = @INSTANCE@; # 0 is the default
PID_DIRECTORY = "@PID_DIRECTORY@"; # /var/run is the default PID_DIRECTORY = "@PID_DIRECTORY@"; # /var/run is the default
INTERFACES : ITTI :
{
SCHED_PARAMS :
{
CPU_ID = 1;
SCHED_POLICY = "SCHED_FIFO"; # Values in { SCHED_OTHER, SCHED_IDLE, SCHED_BATCH, SCHED_FIFO, SCHED_RR }
SCHED_PRIORITY = 85;
};
S1U_SCHED_PARAMS :
{
CPU_ID = 1;
SCHED_POLICY = "SCHED_FIFO"; # Values in { SCHED_OTHER, SCHED_IDLE, SCHED_BATCH, SCHED_FIFO, SCHED_RR }
SCHED_PRIORITY = 84;
};
SX_SCHED_PARAMS :
{
CPU_ID = 1;
SCHED_POLICY = "SCHED_FIFO"; # Values in { SCHED_OTHER, SCHED_IDLE, SCHED_BATCH, SCHED_FIFO, SCHED_RR }
SCHED_PRIORITY = 84;
};
};
INTERFACES :
{ {
S1U_S12_S4_UP : S1U_S12_S4_UP :
{ {
...@@ -31,7 +53,12 @@ SPGW-U = ...@@ -31,7 +53,12 @@ SPGW-U =
INTERFACE_NAME = "@SGW_INTERFACE_NAME_FOR_S1U_S12_S4_UP@"; # STRING, interface name, YOUR NETWORK CONFIG HERE INTERFACE_NAME = "@SGW_INTERFACE_NAME_FOR_S1U_S12_S4_UP@"; # STRING, interface name, YOUR NETWORK CONFIG HERE
IPV4_ADDRESS = "read"; # STRING, CIDR or read to let app read interface configured IP address IPV4_ADDRESS = "read"; # STRING, CIDR or read to let app read interface configured IP address
PORT = @SGW_UDP_PORT_FOR_S1U_S12_S4_UP@; # Default is 2152 PORT = @SGW_UDP_PORT_FOR_S1U_S12_S4_UP@; # Default is 2152
CPU_ID_THREAD_LOOP_READ = 1; SCHED_PARAMS :
{
CPU_ID = 2;
SCHED_POLICY = "SCHED_FIFO"; # Values in { SCHED_OTHER, SCHED_IDLE, SCHED_BATCH, SCHED_FIFO, SCHED_RR }
SCHED_PRIORITY = 98;
};
}; };
SX : SX :
{ {
...@@ -39,25 +66,36 @@ SPGW-U = ...@@ -39,25 +66,36 @@ SPGW-U =
INTERFACE_NAME = "@SGW_INTERFACE_NAME_FOR_SX@"; # STRING, interface name INTERFACE_NAME = "@SGW_INTERFACE_NAME_FOR_SX@"; # STRING, interface name
IPV4_ADDRESS = "read"; # STRING, CIDR or read to let app read interface configured IP address IPV4_ADDRESS = "read"; # STRING, CIDR or read to let app read interface configured IP address
PORT = 8805; # Default is 8805 PORT = 8805; # Default is 8805
SCHED_PARAMS :
{
CPU_ID = 1;
SCHED_POLICY = "SCHED_FIFO"; # Values in { SCHED_OTHER, SCHED_IDLE, SCHED_BATCH, SCHED_FIFO, SCHED_RR }
SCHED_PRIORITY = 95;
};
}; };
SGI : SGI :
{ {
# No config to set, the software will set the SGi interface to the interface used for the default route. # No config to set, the software will set the SGi interface to the interface used for the default route.
INTERFACE_NAME = "@SGW_INTERFACE_NAME_FOR_SGI@"; # STRING, interface name or "default_gateway" INTERFACE_NAME = "@SGW_INTERFACE_NAME_FOR_SGI@"; # STRING, interface name or "default_gateway"
IPV4_ADDRESS = "read"; # STRING, CIDR or read to let app read interface configured IP address IPV4_ADDRESS = "read"; # STRING, CIDR or read to let app read interface configured IP address
CPU_ID_THREAD_LOOP_READ = 3; SCHED_PARAMS :
{
CPU_ID = 3;
SCHED_POLICY = "SCHED_FIFO"; # Values in { SCHED_OTHER, SCHED_IDLE, SCHED_BATCH, SCHED_FIFO, SCHED_RR }
SCHED_PRIORITY = 98;
};
}; };
}; };
PDN_NETWORK_LIST = ( PDN_NETWORK_LIST = (
{NETWORK_IPV4 = "12.1.1.0/24"; NETWORK_IPV6 = "2001:1:2::0/64"; SNAT = "yes";}, {NETWORK_IPV4 = "12.1.1.0/24"; NETWORK_IPV6 = "2001:1:2::0/64"; SNAT = "yes";},
{NETWORK_IPV4 = "12.1.2.0/24"; SNAT = "no";}, {NETWORK_IPV4 = "12.1.2.0/24"; SNAT = "no";},
{NETWORK_IPV4 = "192.169.0.0/24"; SNAT = "no";}, {NETWORK_IPV4 = "192.169.0.0/24"; SNAT = "no";},
{NETWORK_IPV4 = "192.170.0.0/24"; SNAT = "no";}, {NETWORK_IPV4 = "192.170.0.0/24"; SNAT = "no";},
{NETWORK_IPV4 = "192.171.0.0/24"; SNAT = "no";} {NETWORK_IPV4 = "192.171.0.0/24"; SNAT = "no";}
); );
SPGW-C_LIST = ( SPGW-C_LIST = (
{IPV4_ADDRESS="192.168.160.100" ;} {IPV4_ADDRESS="192.168.160.100" ;}
); );
}; };
......
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*! \file thread_sched.h
\brief
\company Eurecom
\email: lionel.gauthier@eurecom.fr
*/
#ifndef FILE_THREAD_SCHED_SEEN
#define FILE_THREAD_SCHED_SEEN
#include <sched.h>
typedef struct thread_sched_params_s {
int cpu_id;
int sched_policy;
int sched_priority;
} thread_sched_params_t;
#endif /* FILE_COMMON_ROOT_TYPES_SEEN */
...@@ -55,23 +55,23 @@ static std::string string_to_hex(const std::string& input) ...@@ -55,23 +55,23 @@ static std::string string_to_hex(const std::string& input)
return output; return output;
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
void udp_server::udp_read_loop(int cpu) void udp_server::udp_read_loop(const thread_sched_params_t& thread_sched_params)
{ {
socklen_t r_endpoint_addr_len = 0; socklen_t r_endpoint_addr_len = 0;
struct sockaddr_storage r_endpoint = {}; struct sockaddr_storage r_endpoint = {};
size_t bytes_received = 0; size_t bytes_received = 0;
if (cpu) { if (thread_sched_params.cpu_id >= 0) {
cpu_set_t cpuset; cpu_set_t cpuset;
CPU_SET(cpu,&cpuset); CPU_SET(thread_sched_params.cpu_id ,&cpuset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
} }
int policy;
struct sched_param sparam; struct sched_param sparam;
memset(&sparam, 0, sizeof(sparam)); memset(&sparam, 0, sizeof(sparam));
sparam.sched_priority = sched_get_priority_max(SCHED_FIFO); sparam.sched_priority = thread_sched_params.sched_priority;
policy = SCHED_FIFO ; pthread_setschedparam(pthread_self(), thread_sched_params.sched_policy, &sparam);
pthread_setschedparam(pthread_self(), policy, &sparam);
while (1) { while (1) {
r_endpoint_addr_len = sizeof(struct sockaddr_storage); r_endpoint_addr_len = sizeof(struct sockaddr_storage);
...@@ -169,11 +169,11 @@ int udp_server::create_socket (char * address, const uint16_t port_num) ...@@ -169,11 +169,11 @@ int udp_server::create_socket (char * address, const uint16_t port_num)
} }
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
void udp_server::start_receive(gtpu_l4_stack * gtp_stack, const int cpu) void udp_server::start_receive(gtpu_l4_stack * gtp_stack, const thread_sched_params_t& thread_sched_params)
{ {
app_ = gtp_stack; app_ = gtp_stack;
Logger::udp().trace( "udp_server::start_receive"); Logger::udp().trace( "udp_server::start_receive");
thread_ = thread(&udp_server::udp_read_loop,this, cpu); thread_ = thread(&udp_server::udp_read_loop,this, thread_sched_params);
thread_.detach(); thread_.detach();
} }
...@@ -196,7 +196,7 @@ void udp_server::start_receive(gtpu_l4_stack * gtp_stack, const int cpu) ...@@ -196,7 +196,7 @@ void udp_server::start_receive(gtpu_l4_stack * gtp_stack, const int cpu)
//} //}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
gtpu_l4_stack::gtpu_l4_stack(const struct in_addr& address, const uint16_t port_num, const int cpu) : gtpu_l4_stack::gtpu_l4_stack(const struct in_addr& address, const uint16_t port_num, const thread_sched_params_t& thread_sched_params) :
udp_s(udp_server(address, port_num)) udp_s(udp_server(address, port_num))
{ {
Logger::gtpv1_u().info( "gtpu_l4_stack created listening to %s:%d", oai::cn::core::toString(address).c_str(), port_num); Logger::gtpv1_u().info( "gtpu_l4_stack created listening to %s:%d", oai::cn::core::toString(address).c_str(), port_num);
...@@ -205,10 +205,10 @@ gtpu_l4_stack::gtpu_l4_stack(const struct in_addr& address, const uint16_t port_ ...@@ -205,10 +205,10 @@ gtpu_l4_stack::gtpu_l4_stack(const struct in_addr& address, const uint16_t port_
srand (time(NULL)); srand (time(NULL));
seq_num = rand() & 0x7FFFFFFF; seq_num = rand() & 0x7FFFFFFF;
restart_counter = 0; restart_counter = 0;
udp_s.start_receive(this, cpu); udp_s.start_receive(this, thread_sched_params);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
gtpu_l4_stack::gtpu_l4_stack(const struct in6_addr& address, const uint16_t port_num, const int cpu) : gtpu_l4_stack::gtpu_l4_stack(const struct in6_addr& address, const uint16_t port_num, const thread_sched_params_t& thread_sched_params) :
udp_s(udp_server(address, port_num)) udp_s(udp_server(address, port_num))
{ {
Logger::gtpv1_u().info( "gtpu_l4_stack created listening to %s:%d", oai::cn::core::toString(address).c_str(), port_num); Logger::gtpv1_u().info( "gtpu_l4_stack created listening to %s:%d", oai::cn::core::toString(address).c_str(), port_num);
...@@ -217,10 +217,10 @@ gtpu_l4_stack::gtpu_l4_stack(const struct in6_addr& address, const uint16_t port ...@@ -217,10 +217,10 @@ gtpu_l4_stack::gtpu_l4_stack(const struct in6_addr& address, const uint16_t port
srand (time(NULL)); srand (time(NULL));
seq_num = rand() & 0x7FFFFFFF; seq_num = rand() & 0x7FFFFFFF;
restart_counter = 0; restart_counter = 0;
udp_s.start_receive(this, cpu); udp_s.start_receive(this, thread_sched_params);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
gtpu_l4_stack::gtpu_l4_stack(char * address, const uint16_t port_num, const int cpu) : gtpu_l4_stack::gtpu_l4_stack(char * address, const uint16_t port_num, const thread_sched_params_t& thread_sched_params) :
udp_s(udp_server(address, port_num)) udp_s(udp_server(address, port_num))
{ {
Logger::gtpv1_u().info( "gtpu_l4_stack created listening to %s:%d", address, port_num); Logger::gtpv1_u().info( "gtpu_l4_stack created listening to %s:%d", address, port_num);
...@@ -229,7 +229,7 @@ gtpu_l4_stack::gtpu_l4_stack(char * address, const uint16_t port_num, const int ...@@ -229,7 +229,7 @@ gtpu_l4_stack::gtpu_l4_stack(char * address, const uint16_t port_num, const int
srand (time(NULL)); srand (time(NULL));
seq_num = rand() & 0x7FFFFFFF; seq_num = rand() & 0x7FFFFFFF;
restart_counter = 0; restart_counter = 0;
udp_s.start_receive(this, cpu); udp_s.start_receive(this, thread_sched_params);
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "3gpp_29.281.hpp" #include "3gpp_29.281.hpp"
#include "itti.hpp" #include "itti.hpp"
#include "msg_gtpv1u.hpp" #include "msg_gtpv1u.hpp"
#include "thread_sched.h"
#include <iostream> #include <iostream>
#include <map> #include <map>
...@@ -93,7 +94,7 @@ public: ...@@ -93,7 +94,7 @@ public:
close(socket_); close(socket_);
} }
void udp_read_loop(int cpu_id); void udp_read_loop(const thread_sched_params_t& thread_sched_params);
void async_send_to(const char* send_buffer, const ssize_t num_bytes, const struct sockaddr_in& peer_addr) void async_send_to(const char* send_buffer, const ssize_t num_bytes, const struct sockaddr_in& peer_addr)
{ {
...@@ -113,7 +114,7 @@ public: ...@@ -113,7 +114,7 @@ public:
} }
void start_receive(gtpu_l4_stack * gtp_stack, const int cpu); void start_receive(gtpu_l4_stack * gtp_stack, const thread_sched_params_t& thread_sched_params);
protected: protected:
int create_socket (const struct in_addr& address, const uint16_t port); int create_socket (const struct in_addr& address, const uint16_t port);
...@@ -161,9 +162,9 @@ protected: ...@@ -161,9 +162,9 @@ protected:
public: public:
static const uint8_t version = 1; static const uint8_t version = 1;
gtpu_l4_stack(const struct in_addr& address, const uint16_t port_num, const int cpu); gtpu_l4_stack(const struct in_addr& address, const uint16_t port_num, const thread_sched_params_t& thread_sched_params);
gtpu_l4_stack(const struct in6_addr& address, const uint16_t port_num, const int cpu); gtpu_l4_stack(const struct in6_addr& address, const uint16_t port_num, const thread_sched_params_t& thread_sched_params);
gtpu_l4_stack(char * ip_address, const uint16_t port_num, const int cpu); gtpu_l4_stack(char * ip_address, const uint16_t port_num, const thread_sched_params_t& thread_sched_params);
virtual void handle_receive(char* recv_buffer, const std::size_t bytes_transferred, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len); virtual void handle_receive(char* recv_buffer, const std::size_t bytes_transferred, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len);
void handle_receive_message_cb(const gtpv1u_msg& msg, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len, const core::itti::task_id_t& task_id, bool &error, uint64_t& gtpc_tx_id); void handle_receive_message_cb(const gtpv1u_msg& msg, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len, const core::itti::task_id_t& task_id, bool &error, uint64_t& gtpc_tx_id);
......
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*! \file pfcp_switch.cpp
\brief
\author Lionel Gauthier
\date 2019
\company Eurecom
\email: lionel.gauthier@eurecom.fr
*/
#include "common_defs.h"
#include "itti.hpp"
#include "logger.hpp"
#include "pfcp_switch.hpp"
#include "spgwu_config.hpp"
#include "spgwu_pfcp_association.hpp"
#include "spgwu_s1u.hpp"
#include <algorithm>
#include <fstream> // std::ifstream
#include <sched.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <linux/ip.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <stdexcept>
#include <net/ethernet.h>
using namespace oai::cn::core;
using namespace oai::cn::core::itti;
using namespace oai::cn::core::pfcp;
using namespace oai::cn::proto::gtpv1u;
using namespace oai::cn::nf::spgwu;
using namespace std;
extern itti_mw *itti_inst;
extern spgwu_config spgwu_cfg;
extern spgwu_s1u *spgwu_s1u_inst;
static std::string string_to_hex(const char* input, const size_t len)
{
static const char* const lut = "0123456789ABCDEF";
std::string output;
output.reserve(2 * len);
for (size_t i = 0; i < len; ++i)
{
const unsigned char c = input[i];
output.push_back(lut[c >> 4]);
output.push_back(lut[c & 15]);
}
return output;
}
//------------------------------------------------------------------------------
void pfcp_switch::stop_timer_min_commit_interval()
{
if (timer_min_commit_interval_id) {
itti_inst->timer_remove(timer_min_commit_interval_id);
}
timer_min_commit_interval_id = 0;
}
//------------------------------------------------------------------------------
void pfcp_switch::start_timer_min_commit_interval()
{
stop_timer_min_commit_interval();
timer_min_commit_interval_id = itti_inst->timer_setup (PFCP_SWITCH_MIN_COMMIT_INTERVAL_MILLISECONDS/1000, PFCP_SWITCH_MIN_COMMIT_INTERVAL_MILLISECONDS%1000, TASK_SPGWU_APP, TASK_SPGWU_PFCP_SWITCH_MIN_COMMIT_INTERVAL);
}
//------------------------------------------------------------------------------
void pfcp_switch::stop_timer_max_commit_interval()
{
if (timer_max_commit_interval_id) {
itti_inst->timer_remove(timer_max_commit_interval_id);
}
timer_max_commit_interval_id = 0;
}
//------------------------------------------------------------------------------
void pfcp_switch::start_timer_max_commit_interval()
{
stop_timer_max_commit_interval();
timer_max_commit_interval_id = itti_inst->timer_setup (PFCP_SWITCH_MAX_COMMIT_INTERVAL_MILLISECONDS/1000, PFCP_SWITCH_MAX_COMMIT_INTERVAL_MILLISECONDS%1000, TASK_SPGWU_APP, TASK_SPGWU_PFCP_SWITCH_MAX_COMMIT_INTERVAL);
}
//------------------------------------------------------------------------------
void pfcp_switch::time_out_min_commit_interval(const uint32_t timer_id)
{
if (timer_id == timer_min_commit_interval_id) {
stop_timer_max_commit_interval();
timer_min_commit_interval_id = 0;
commit_changes();
}
}
//------------------------------------------------------------------------------
void pfcp_switch::time_out_max_commit_interval(const uint32_t timer_id)
{
if (timer_id == timer_max_commit_interval_id) {
stop_timer_min_commit_interval();
timer_max_commit_interval_id = 0;
commit_changes();
}
}
//------------------------------------------------------------------------------
void pfcp_switch::commit_changes()
{
}
//------------------------------------------------------------------------------
void pfcp_switch::pdn_read_loop(const thread_sched_params_t& thread_sched_params)
{
int bytes_received = 0;
if (thread_sched_params.cpu_id >= 0) {
cpu_set_t cpuset;
CPU_SET(thread_sched_params.cpu_id,&cpuset);
if (int rc = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)) {
Logger::pfcp_switch().warn( "Could not set affinity to thread polling SGi interface, err=%d", rc);
}
}
struct sched_param sparam;
memset(&sparam, 0, sizeof(sparam));
sparam.sched_priority = thread_sched_params.sched_priority;
if (int rc = pthread_setschedparam(pthread_self(), thread_sched_params.sched_policy, &sparam)) {
Logger::pfcp_switch().warn( "Could not set schedparam to thread polling SGi interface, err=%d", rc);
}
struct msghdr msg = {};
msg.msg_iov = &msg_iov_;
msg.msg_iovlen = 1;
while (1) {
if ((bytes_received = recvmsg(sock_r, &msg, 0)) > 0) {
pfcp_session_look_up_pack_in_core((const char*)msg_iov_.iov_base, bytes_received);
} else {
Logger::pfcp_switch().error( "recvmsg failed rc=%d:%s", bytes_received, strerror (errno));
}
}
}
//------------------------------------------------------------------------------
void pfcp_switch::send_to_core(char* const ip_packet, const ssize_t len)
{
ssize_t bytes_sent;
//Logger::pfcp_switch().info( "pfcp_switch::send_to_core %d bytes ", len);
struct sockaddr_in dst; // no clear
dst.sin_addr.s_addr = ((struct iphdr*)ip_packet)->daddr;
dst.sin_family = AF_INET;
if((bytes_sent = sendto(sock_w, ip_packet, len, 0, (struct sockaddr *)&dst, sizeof(dst))) < 0) {
Logger::pfcp_switch().error( "sendto failed rc=%d:%s", bytes_sent, strerror (errno));
}
}
//------------------------------------------------------------------------------
int pfcp_switch::create_pdn_socket (const char * const ifname, const bool promisc, int& if_index)
{
struct sockaddr_in addr = {};
int sd = 0;
// const int len = strnlen (ifname, IFNAMSIZ);
// if (len == IFNAMSIZ) {
// Logger::pfcp_switch().error( "Interface name too long %s", ifname);
// return RETURNerror;
// }
/*
* Create socket
* The socket_type is either SOCK_RAW for raw packets including the link-level header or SOCK_DGRAM for cooked packets with the link-level header removed.
*/
if ((sd = socket (AF_PACKET, SOCK_DGRAM, htons(ETH_P_ALL))) < 0) {
/*
* Socket creation has failed...
*/
Logger::pfcp_switch().error( "Socket creation failed (%s)", strerror (errno));
return RETURNerror;
}
if (ifname) {
struct ifreq ifr = {};
strncpy ((char *) ifr.ifr_name, ifname, IFNAMSIZ);
if (ioctl (sd, SIOCGIFINDEX, &ifr) < 0) {
Logger::pfcp_switch().error( "Get interface index failed (%s) for %s", strerror (errno), ifname);
close (sd);
return RETURNerror;
}
if_index = ifr.ifr_ifindex;
struct sockaddr_ll sll = {};
sll.sll_family = AF_PACKET; /* Always AF_PACKET */
sll.sll_protocol = htons(ETH_P_ALL); /* Physical-layer protocol */
sll.sll_ifindex = ifr.ifr_ifindex; /* Interface number */
if (bind (sd, (struct sockaddr *)&sll, sizeof (sll)) < 0) {
/*
* Bind failed
*/
Logger::pfcp_switch().error("Socket bind to %s failed (%s)", ifname, strerror (errno));
close (sd);
return RETURNerror;
}
if (promisc) {
struct packet_mreq mreq = {};
mreq.mr_ifindex = if_index;
mreq.mr_type = PACKET_MR_PROMISC;
if (setsockopt (sd, SOL_PACKET,PACKET_ADD_MEMBERSHIP, &mreq, sizeof (mreq)) < 0) {
Logger::pfcp_switch().error("Set promiscuous mode failed (%s)", strerror (errno));
close (sd);
return RETURNerror;
}
}
}
return sd;
}
//------------------------------------------------------------------------------
int pfcp_switch::create_pdn_socket (const char * const ifname)
{
struct sockaddr_in addr = {};
int sd = RETURNerror;
if (ifname) {
/*
* Create socket
* The socket_type is either SOCK_RAW for raw packets including the link-level header or SOCK_DGRAM for cooked packets with the link-level header removed.
*/
if ((sd = socket (AF_INET, SOCK_RAW, IPPROTO_RAW)) < 0) {
/*
* Socket creation has failed...
*/
Logger::pfcp_switch().error( "Socket creation failed (%s)", strerror (errno));
return RETURNerror;
}
int option_on = 1;
const int *p_option_on = &option_on;
if(setsockopt(sd, IPPROTO_IP, IP_HDRINCL, p_option_on, sizeof(option_on)) < 0) {
Logger::pfcp_switch().error("Set header included failed (%s)", strerror (errno));
close (sd);
return RETURNerror;
}
struct ifreq ifr = {};
strncpy ((char *) ifr.ifr_name, ifname, IFNAMSIZ);
if ((setsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, (void *)&ifr, sizeof(ifr))) < 0) {
Logger::pfcp_switch().error("Socket bind to %s failed (%s)", ifname, strerror (errno));
close(sd);
return RETURNerror;
}
return sd;
}
return RETURNerror;
}
//------------------------------------------------------------------------------
void pfcp_switch::setup_pdn_interfaces()
{
std::string cmd = fmt::format("ip link set dev {0} down > /dev/null 2>&1; ip link del {0} > /dev/null 2>&1; sync; sleep 1; ip link add {0} type dummy; ip link set dev {0} up", PDN_INTERFACE_NAME);
int rc = system ((const char*)cmd.c_str());
for (auto it : spgwu_cfg.pdns) {
if (it.prefix_ipv4) {
struct in_addr address4 = {};
address4.s_addr = it.network_ipv4.s_addr + be32toh(1);
std::string cmd = fmt::format("ip -4 addr add {}/{} dev {}", oai::cn::core::toString(address4).c_str(), it.prefix_ipv4, PDN_INTERFACE_NAME);
rc = system ((const char*)cmd.c_str());
if (it.snat) {
cmd = fmt::format("iptables -t nat -A POSTROUTING -s {}/{} -j SNAT --to-source {}",
oai::cn::core::toString(address4).c_str(),
it.prefix_ipv4,
oai::cn::core::toString(spgwu_cfg.sgi.addr4).c_str());
rc = system ((const char*)cmd.c_str());
}
}
if (it.prefix_ipv6) {
std::string cmd = fmt::format("echo 0 > /proc/sys/net/ipv6/conf/{}/disable_ipv6", PDN_INTERFACE_NAME);
rc = system ((const char*)cmd.c_str());
struct in6_addr addr6 = it.network_ipv6;
addr6.s6_addr[15] = 1;
cmd = fmt::format("ip -6 addr add {}/{} dev {}", oai::cn::core::toString(addr6).c_str(), it.prefix_ipv6, PDN_INTERFACE_NAME);
rc = system ((const char*)cmd.c_str());
// if ((it.snat) && (/* SGI has IPv6 address*/)){
// cmd = fmt::format("ip6tables -t nat -A POSTROUTING -s {}/{} -o {} -j SNAT --to-source {}", oai::cn::core::toString(addr6).c_str(), it.prefix_ipv6, xxx);
// rc = system ((const char*)cmd.c_str());
// }
}
}
// Otherwise redirect incoming ingress UE IP to default gw
rc = system ("/sbin/sysctl -w net.ipv4.conf.all.forwarding=1");
rc = system ("/sbin/sysctl -w net.ipv4.conf.all.send_redirects=0");
rc = system ("/sbin/sysctl -w net.ipv4.conf.default.send_redirects=0");
rc = system ("/sbin/sysctl -w net.ipv4.conf.all.accept_redirects=0");
rc = system ("/sbin/sysctl -w net.ipv4.conf.default.accept_redirects=0");
cmd = fmt::format("/sbin/sysctl -w net.ipv4.conf.{}.send_redirects=0", PDN_INTERFACE_NAME);
rc = system ((const char*)cmd.c_str());
cmd = fmt::format("/sbin/sysctl -w net.ipv4.conf.{}.accept_redirects=0", PDN_INTERFACE_NAME);
rc = system ((const char*)cmd.c_str());
if ((sock_r = create_pdn_socket(PDN_INTERFACE_NAME, false, pdn_if_index)) <= 0) {
Logger::pfcp_switch().error("Could not set PDN dummy read socket");
sleep(2);
exit(EXIT_FAILURE);
}
if ((sock_w = create_pdn_socket(spgwu_cfg.sgi.if_name.c_str())) <= 0) {
Logger::pfcp_switch().error("Could not set PDN dummy write socket");
sleep(2);
exit(EXIT_FAILURE);
}
}
//------------------------------------------------------------------------------
oai::cn::core::pfcp::fteid_t pfcp_switch::generate_fteid_s1u()
{
oai::cn::core::pfcp::fteid_t fteid = {};
fteid.teid = generate_teid_s1u();
if (spgwu_cfg.s1_up.addr4.s_addr) {
fteid.v4 = 1;
fteid.ipv4_address.s_addr = spgwu_cfg.s1_up.addr4.s_addr;
} else {
fteid.v6 = 1;
fteid.ipv6_address = spgwu_cfg.s1_up.addr6;
}
return fteid;
}
//------------------------------------------------------------------------------
pfcp_switch::pfcp_switch() : seid_generator_(), teid_s1u_generator_(),
ue_ipv4_hbo2pfcp_pdr(PFCP_SWITCH_MAX_PDRS),
ul_s1u_teid2pfcp_pdr(PFCP_SWITCH_MAX_PDRS),
up_seid2pfcp_sessions(PFCP_SWITCH_MAX_SESSIONS)
{
timer_min_commit_interval_id = 0;
timer_max_commit_interval_id = 0;
cp_fseid2pfcp_sessions = {},
msg_iov_.iov_base = &recv_buffer_[ROOM_FOR_GTPV1U_G_PDU]; // make room for GTPU G_PDU header
msg_iov_.iov_len = PFCP_SWITCH_RECV_BUFFER_SIZE - ROOM_FOR_GTPV1U_G_PDU;
sock_r = -1;
sock_w = -1;
pdn_if_index = -1;
setup_pdn_interfaces();
thread_sock_ = thread(&pfcp_switch::pdn_read_loop,this, spgwu_cfg.itti.sx_sched_params);
thread_sock_.detach();
}
//------------------------------------------------------------------------------
bool pfcp_switch::get_pfcp_session_by_cp_fseid(const core::pfcp::fseid_t& fseid, std::shared_ptr<core::pfcp::pfcp_session>& session) const
{
std::unordered_map<fseid_t, std::shared_ptr<core::pfcp::pfcp_session>>::const_iterator sit = cp_fseid2pfcp_sessions.find (fseid);
if (sit == cp_fseid2pfcp_sessions.end()) {
return false;
} else {
session = sit->second;
return true;
}
}
//------------------------------------------------------------------------------
bool pfcp_switch::get_pfcp_session_by_up_seid(const uint64_t cp_seid, std::shared_ptr<core::pfcp::pfcp_session>& session) const
{
folly::AtomicHashMap<uint64_t, std::shared_ptr<core::pfcp::pfcp_session>>::const_iterator sit = up_seid2pfcp_sessions.find (cp_seid);
if (sit == up_seid2pfcp_sessions.end()) {
return false;
} else {
session = sit->second;
return true;
}
}
//------------------------------------------------------------------------------
bool pfcp_switch::get_pfcp_ul_pdrs_by_up_teid(const teid_t teid, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>& pdrs) const
{
folly::AtomicHashMap<teid_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>>::const_iterator pit = ul_s1u_teid2pfcp_pdr.find (teid);
if ( pit == ul_s1u_teid2pfcp_pdr.end() )
return false;
else {
pdrs = pit->second;
return true;
}
}
//------------------------------------------------------------------------------
bool pfcp_switch::get_pfcp_dl_pdrs_by_ue_ip(const uint32_t ue_ip, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>& pdrs) const
{
folly::AtomicHashMap<uint32_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>>::const_iterator pit = ue_ipv4_hbo2pfcp_pdr.find (ue_ip);
if ( pit == ue_ipv4_hbo2pfcp_pdr.end() )
return false;
else {
pdrs = pit->second;
return true;
}
}
//------------------------------------------------------------------------------
void pfcp_switch::add_pfcp_session_by_cp_fseid(const core::pfcp::fseid_t& fseid, std::shared_ptr<core::pfcp::pfcp_session>& session)
{
std::pair<fseid_t, std::shared_ptr<core::pfcp::pfcp_session>> entry(fseid, session);
cp_fseid2pfcp_sessions.insert(entry);
}
//------------------------------------------------------------------------------
void pfcp_switch::add_pfcp_session_by_up_seid(const uint64_t seid, std::shared_ptr<core::pfcp::pfcp_session>& session)
{
std::pair<uint64_t, std::shared_ptr<core::pfcp::pfcp_session>> entry(seid, session);
up_seid2pfcp_sessions.insert(entry);
}
//------------------------------------------------------------------------------
void pfcp_switch::remove_pfcp_session(std::shared_ptr<core::pfcp::pfcp_session>& session)
{
session->cleanup();
cp_fseid2pfcp_sessions.erase(session->cp_fseid);
up_seid2pfcp_sessions.erase(session->seid);
}
//------------------------------------------------------------------------------
void pfcp_switch::remove_pfcp_session(const oai::cn::core::pfcp::fseid_t& cp_fseid)
{
std::shared_ptr<core::pfcp::pfcp_session> session = {};
if (get_pfcp_session_by_cp_fseid(cp_fseid, session)) {
remove_pfcp_session(session);
}
}
//------------------------------------------------------------------------------
void pfcp_switch::add_pfcp_ul_pdr_by_up_teid(const teid_t teid, std::shared_ptr<core::pfcp::pfcp_pdr>& pdr)
{
folly::AtomicHashMap<teid_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>>::const_iterator pit = ul_s1u_teid2pfcp_pdr.find (teid);
if ( pit == ul_s1u_teid2pfcp_pdr.end() ) {
std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>> pdrs = std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>(new std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>());
pdrs->push_back(pdr);
std::pair<teid_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>> entry(teid, pdrs);
//Logger::pfcp_switch().info( "add_pfcp_ul_pdr_by_up_teid tunnel " TEID_FMT " ", teid);
ul_s1u_teid2pfcp_pdr.insert(entry);
} else {
// sort by precedence
//const std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>& spdrs = pit->second;
std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>* pdrs = pit->second.get();
for (std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>::iterator it=pdrs->begin(); it < pdrs->end(); ++it) {
if (*(it->get()) < *(pdr.get())) {
pit->second->insert(it, pdr);
return;
}
}
}
}
//------------------------------------------------------------------------------
void pfcp_switch::add_pfcp_dl_pdr_by_ue_ip(const uint32_t ue_ip, std::shared_ptr<core::pfcp::pfcp_pdr>& pdr)
{
folly::AtomicHashMap<uint32_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>>::const_iterator pit = ue_ipv4_hbo2pfcp_pdr.find (ue_ip);
if ( pit == ue_ipv4_hbo2pfcp_pdr.end() ) {
std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>> pdrs = std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>(new std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>());
pdrs->push_back(pdr);
std::pair<uint32_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>> entry(ue_ip, pdrs);
ue_ipv4_hbo2pfcp_pdr.insert(entry);
//Logger::pfcp_switch().info( "add_pfcp_dl_pdr_by_ue_ip UE IP %8x", ue_ip);
} else {
// sort by precedence
//const std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>& spdrs = pit->second;
std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>* pdrs = pit->second.get();
for (std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>::iterator it=pdrs->begin(); it < pdrs->end(); ++it) {
if (*(it->get()) < *(pdr.get())) {
pit->second->insert(it, pdr);
return;
}
}
}
}
//------------------------------------------------------------------------------
std::string pfcp_switch::to_string() const
{
std::string s = {};
for (const auto& it : up_seid2pfcp_sessions) {
s.append(it.second->to_string());
}
return s;
}
//------------------------------------------------------------------------------
bool pfcp_switch::create_packet_in_access(std::shared_ptr<core::pfcp::pfcp_pdr>& pdr, const core::pfcp::fteid_t& in, uint8_t& cause)
{
cause = CAUSE_VALUE_REQUEST_ACCEPTED;
add_pfcp_ul_pdr_by_up_teid(in.teid, pdr);
return true;
}
//------------------------------------------------------------------------------
void pfcp_switch::handle_pfcp_session_establishment_request(std::shared_ptr<core::itti::itti_sxab_session_establishment_request> sreq, core::itti::itti_sxab_session_establishment_response* resp)
{
itti_sxab_session_establishment_request * req = sreq.get();
core::pfcp::fseid_t fseid = {};
core::pfcp::cause_t cause = {.cause_value = CAUSE_VALUE_REQUEST_ACCEPTED};
core::pfcp::offending_ie_t offending_ie = {};
if (req->pfcp_ies.get(fseid)) {
std::shared_ptr<core::pfcp::pfcp_session> s = {};
bool exist = get_pfcp_session_by_cp_fseid(fseid, s);
pfcp_session* session = nullptr;
if (not exist) {
session = new pfcp_session(fseid, generate_seid());
for (auto it : req->pfcp_ies.create_fars) {
create_far& cr_far = it;
if (not session->create(cr_far, cause, offending_ie.offending_ie)) {
session->cleanup();
delete session;
break;
}
}
if (cause.cause_value == CAUSE_VALUE_REQUEST_ACCEPTED) {
//--------------------------------
// Process PDR to be created
cause.cause_value = CAUSE_VALUE_REQUEST_ACCEPTED;
for (auto it : req->pfcp_ies.create_pdrs) {
create_pdr& cr_pdr = it;
core::pfcp::fteid_t allocated_fteid = {};
core::pfcp::far_id_t far_id = {};
if (not cr_pdr.get(far_id)) {
//should be caught in lower layer
cause.cause_value = CAUSE_VALUE_MANDATORY_IE_MISSING;
offending_ie.offending_ie = PFCP_IE_FAR_ID;
session->cleanup();
delete session;
break;
}
// create pdr after create far
core::pfcp::create_far cr_far = {};
if (not req->pfcp_ies.get(far_id, cr_far)) {
//should be caught in lower layer
cause.cause_value = CAUSE_VALUE_MANDATORY_IE_MISSING;
offending_ie.offending_ie = PFCP_IE_CREATE_FAR;
session->cleanup();
delete session;
break;
}
if (not session->create(cr_pdr, cause, offending_ie.offending_ie, allocated_fteid)) {
session->cleanup();
delete session;
if (cause.cause_value == CAUSE_VALUE_CONDITIONAL_IE_MISSING) {
resp->pfcp_ies.set(offending_ie);
}
resp->pfcp_ies.set(cause);
break;
}
core::pfcp::created_pdr created_pdr = {};
created_pdr.set(cr_pdr.pdr_id.second);
created_pdr.set(allocated_fteid);
resp->pfcp_ies.set(created_pdr);
}
}
if (cause.cause_value == CAUSE_VALUE_REQUEST_ACCEPTED) {
s = std::shared_ptr<pfcp_session>(session);
add_pfcp_session_by_cp_fseid(fseid, s);
add_pfcp_session_by_up_seid(session->seid, s);
//start_timer_min_commit_interval();
//start_timer_max_commit_interval();
core::pfcp::fseid_t up_fseid = {};
spgwu_cfg.get_pfcp_fseid(up_fseid);
up_fseid.seid = session->get_up_seid();
resp->pfcp_ies.set(up_fseid);
// Register session
oai::cn::core::pfcp::node_id_t node_id = {};
req->pfcp_ies.get(node_id);
pfcp_associations::get_instance().notify_add_session(node_id, fseid);
}
} else {
cause.cause_value = CAUSE_VALUE_REQUEST_REJECTED;
}
} else {
//should be caught in lower layer
cause.cause_value = CAUSE_VALUE_MANDATORY_IE_MISSING;
offending_ie.offending_ie = PFCP_IE_F_SEID;
}
resp->pfcp_ies.set(cause);
if ((cause.cause_value == CAUSE_VALUE_MANDATORY_IE_MISSING)
|| (cause.cause_value == CAUSE_VALUE_CONDITIONAL_IE_MISSING)){
resp->pfcp_ies.set(offending_ie);
}
// TODO warn may be huge, do a vector of string
Logger::pfcp_switch().info(to_string());
}
//------------------------------------------------------------------------------
void pfcp_switch::handle_pfcp_session_modification_request(std::shared_ptr<core::itti::itti_sxab_session_modification_request> sreq, itti_sxab_session_modification_response* resp)
{
itti_sxab_session_modification_request * req = sreq.get();
std::shared_ptr<core::pfcp::pfcp_session> s = {};
core::pfcp::fseid_t fseid = {};
core::pfcp::cause_t cause = {.cause_value = CAUSE_VALUE_REQUEST_ACCEPTED};
core::pfcp::offending_ie_t offending_ie = {};
failed_rule_id_t failed_rule = {};
if (not get_pfcp_session_by_up_seid(req->seid, s)) {
cause.cause_value = CAUSE_VALUE_SESSION_CONTEXT_NOT_FOUND;
} else {
core::pfcp::pfcp_session* session = s.get();
core::pfcp::fseid_t fseid = {};
if (req->pfcp_ies.get(fseid)) {
Logger::pfcp_switch().warn( "TODO check carrefully update fseid in PFCP_SESSION_MODIFICATION_REQUEST");
session->cp_fseid = fseid;
}
resp->seid = session->cp_fseid.seid;
for (auto it : req->pfcp_ies.remove_pdrs) {
remove_pdr& pdr = it;
if (not session->remove(pdr, cause, offending_ie.offending_ie)) {
if (cause.cause_value == CAUSE_VALUE_RULE_CREATION_MODIFICATION_FAILURE) {
failed_rule.rule_id_type = FAILED_RULE_ID_TYPE_PDR;
failed_rule.rule_id_value = pdr.pdr_id.second.rule_id;
resp->pfcp_ies.set(failed_rule);
break;
}
}
}
if (cause.cause_value == CAUSE_VALUE_REQUEST_ACCEPTED) {
for (auto it : req->pfcp_ies.remove_fars) {
remove_far& far = it;
if (not session->remove(far, cause, offending_ie.offending_ie)) {
if (cause.cause_value == CAUSE_VALUE_RULE_CREATION_MODIFICATION_FAILURE) {
failed_rule.rule_id_type = FAILED_RULE_ID_TYPE_FAR;
failed_rule.rule_id_value = far.far_id.second.far_id;
resp->pfcp_ies.set(failed_rule);
break;
}
}
}
}
if (cause.cause_value == CAUSE_VALUE_REQUEST_ACCEPTED) {
for (auto it : req->pfcp_ies.create_fars) {
create_far& cr_far = it;
if (not session->create(cr_far, cause, offending_ie.offending_ie)) {
break;
}
}
}
if (cause.cause_value == CAUSE_VALUE_REQUEST_ACCEPTED) {
for (auto it : req->pfcp_ies.create_pdrs) {
create_pdr& cr_pdr = it;
core::pfcp::far_id_t far_id = {};
if (not cr_pdr.get(far_id)) {
//should be caught in lower layer
cause.cause_value = CAUSE_VALUE_MANDATORY_IE_MISSING;
offending_ie.offending_ie = PFCP_IE_FAR_ID;
break;
}
// create pdr after create far
core::pfcp::create_far cr_far = {};
if (not req->pfcp_ies.get(far_id, cr_far)) {
//should be caught in lower layer
cause.cause_value = CAUSE_VALUE_MANDATORY_IE_MISSING;
offending_ie.offending_ie = PFCP_IE_CREATE_FAR;
break;
}
core::pfcp::fteid_t allocated_fteid = {};
if (not session->create(cr_pdr, cause, offending_ie.offending_ie, allocated_fteid)) {
if (cause.cause_value == CAUSE_VALUE_CONDITIONAL_IE_MISSING) {
resp->pfcp_ies.set(offending_ie);
}
resp->pfcp_ies.set(cause);
break;
}
core::pfcp::created_pdr created_pdr = {};
created_pdr.set(cr_pdr.pdr_id.second);
resp->pfcp_ies.set(created_pdr);
}
}
if (cause.cause_value == CAUSE_VALUE_REQUEST_ACCEPTED) {
for (auto it : req->pfcp_ies.update_pdrs) {
update_pdr& pdr = it;
uint8_t cause_value = CAUSE_VALUE_REQUEST_ACCEPTED;
if (not session->update(pdr, cause_value)) {
failed_rule_id_t failed_rule = {};
failed_rule.rule_id_type = FAILED_RULE_ID_TYPE_PDR;
failed_rule.rule_id_value = pdr.pdr_id.rule_id;
resp->pfcp_ies.set(failed_rule);
}
}
for (auto it : req->pfcp_ies.update_fars) {
update_far& far = it;
uint8_t cause_value = CAUSE_VALUE_REQUEST_ACCEPTED;
if (not session->update(far, cause_value)) {
cause.cause_value = cause_value;
failed_rule_id_t failed_rule = {};
failed_rule.rule_id_type = FAILED_RULE_ID_TYPE_FAR;
failed_rule.rule_id_value = far.far_id.far_id;
resp->pfcp_ies.set(failed_rule);
}
}
}
}
resp->pfcp_ies.set(cause);
if ((cause.cause_value == CAUSE_VALUE_MANDATORY_IE_MISSING)
|| (cause.cause_value == CAUSE_VALUE_CONDITIONAL_IE_MISSING)){
resp->pfcp_ies.set(offending_ie);
}
Logger::pfcp_switch().info(to_string());
}
//------------------------------------------------------------------------------
void pfcp_switch::handle_pfcp_session_deletion_request(std::shared_ptr<core::itti::itti_sxab_session_deletion_request> sreq, itti_sxab_session_deletion_response* resp)
{
itti_sxab_session_deletion_request * req = sreq.get();
std::shared_ptr<core::pfcp::pfcp_session> s = {};
core::pfcp::fseid_t fseid = {};
core::pfcp::cause_t cause = {.cause_value = CAUSE_VALUE_REQUEST_ACCEPTED};
core::pfcp::offending_ie_t offending_ie = {};
failed_rule_id_t failed_rule = {};
if (not get_pfcp_session_by_up_seid(req->seid, s)) {
cause.cause_value = CAUSE_VALUE_SESSION_CONTEXT_NOT_FOUND;
} else {
resp->seid = s->cp_fseid.seid;
remove_pfcp_session(s);
}
pfcp_associations::get_instance().notify_del_session(fseid);
resp->pfcp_ies.set(cause);
Logger::pfcp_switch().info(to_string());
}
//------------------------------------------------------------------------------
void pfcp_switch::pfcp_session_look_up_pack_in_access(struct iphdr* const iph, const std::size_t num_bytes, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len, const uint32_t tunnel_id)
{
std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>> pdrs = {};
if (get_pfcp_ul_pdrs_by_up_teid(tunnel_id, pdrs)) {
bool nocp = false;
bool buff = false;
for (std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>::iterator it_pdr = pdrs->begin(); it_pdr < pdrs->end(); ++it_pdr) {
if ((*it_pdr)->look_up_pack_in_access(iph, num_bytes, r_endpoint, r_endpoint_addr_len, tunnel_id)) {
std::shared_ptr<core::pfcp::pfcp_session> ssession = {};
uint64_t lseid = 0;
if ((*it_pdr)->get(lseid)) {
if ( get_pfcp_session_by_up_seid(lseid, ssession)) {
core::pfcp::far_id_t far_id = {};
if ((*it_pdr)->get(far_id)) {
std::shared_ptr<core::pfcp::pfcp_far> sfar = {};
if (ssession->get(far_id.far_id, sfar)) {
sfar->apply_forwarding_rules(iph, num_bytes, nocp, buff);
}
}
}
}
return;
}
else {
Logger::pfcp_switch().info( "pfcp_session_look_up_pack_in_access failed PDR id %4x ", (*it_pdr)->pdr_id.rule_id);
}
}
}
else {
//Logger::pfcp_switch().info( "pfcp_session_look_up_pack_in_access tunnel " TEID_FMT " not found", tunnel_id);
spgwu_s1u_inst->report_error_indication(r_endpoint, r_endpoint_addr_len, tunnel_id);
}
}
//------------------------------------------------------------------------------
void pfcp_switch::pfcp_session_look_up_pack_in_access(struct ipv6hdr* const ip6h, const std::size_t num_bytes, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len, const uint32_t tunnel_id)
{
//TODO
}
//------------------------------------------------------------------------------
void pfcp_switch::pfcp_session_look_up_pack_in_core(const char *buffer, const std::size_t num_bytes)
{
//Logger::pfcp_switch().info( "pfcp_session_look_up_pack_in_core %d bytes", num_bytes);
struct iphdr* iph = (struct iphdr*)buffer;
std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>> pdrs;
if (iph->version == 4) {
uint32_t ue_ip = be32toh(iph->daddr);
if (get_pfcp_dl_pdrs_by_ue_ip(ue_ip, pdrs)) {
bool nocp = false;
bool buff = false;
for (std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>::iterator it = pdrs->begin(); it < pdrs->end(); ++it) {
if ((*it)->look_up_pack_in_core(iph, num_bytes)) {
std::shared_ptr<core::pfcp::pfcp_session> ssession = {};
uint64_t lseid = 0;
if ((*it)->get(lseid)) {
if ( get_pfcp_session_by_up_seid(lseid, ssession)) {
core::pfcp::far_id_t far_id = {};
if ((*it)->get(far_id)) {
std::shared_ptr<core::pfcp::pfcp_far> sfar = {};
if (ssession->get(far_id.far_id, sfar)) {
sfar->apply_forwarding_rules(iph, num_bytes, nocp, buff);
if (buff) {
(*it)->buffering_requested(buffer, num_bytes);
}
if (nocp) {
(*it)->notify_cp_requested(ssession);
}
}
}
}
}
return;
}
else {
Logger::pfcp_switch().info( "look_up_pack_in_core failed PDR id %4x ", (*it)->pdr_id.rule_id);
}
}
}
else {
Logger::pfcp_switch().info( "pfcp_session_look_up_pack_in_core UE IP %8x not found", ue_ip);
}
} else if (iph->version == 6) {
// TODO;
} else {
Logger::pfcp_switch().info( "Unknown IP version %d packet", iph->version);
}
}
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*! \file pfcp_switch.hpp
\author Lionel GAUTHIER
\date 2019
\email: lionel.gauthier@eurecom.fr
*/
#ifndef FILE_PFCP_SWITCH_HPP_SEEN
#define FILE_PFCP_SWITCH_HPP_SEEN
//#include "concurrentqueue.h"
#include "itti.hpp"
#include "itti_msg_sxab.hpp"
#include "msg_pfcp.hpp"
#include "pfcp_session.hpp"
#include "uint_generator.hpp"
#include "thread_sched.h"
#include <folly/AtomicHashMap.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <unordered_map>
#include <memory>
#include <netinet/in.h>
#include <thread>
#include <vector>
namespace oai::cn::nf::spgwu {
// Have to be tuned for sdt situations
#define PFCP_SWITCH_MAX_SESSIONS 512
#define PFCP_SWITCH_MAX_PDRS 512
#define PDN_INTERFACE_NAME "pdn"
//class switching_records{
//public:
// switching_records() {
// cp_fseid2pfcp_sessions = {};
// up_seid2pfcp_sessions = {};
// ul_s1u_teid2pfcp_pdr = {};
// ue_ipv4_hbo2pfcp_pdr = {};
// }
// std::unordered_map<core::pfcp::fseid_t, std::shared_ptr<core::pfcp::pfcp_session>> cp_fseid2pfcp_sessions;
// std::unordered_map<uint64_t, std::shared_ptr<core::pfcp::pfcp_session>> up_seid2pfcp_sessions;
//};
//
//class switching_data_per_cpu_socket {
//public:
// switching_data_per_cpu_socket() {
// msg_iov_= {};
// switching_up_traffic_index = 0;
// switching_control_index = 1;
// switching_records[0] = {};
// switching_records[1] = {};
// }
// switching_records switching_records[2];
// struct iovec msg_iov_; /* scatter/gather array */
// uint8_t switching_up_traffic_index;
// uint8_t switching_control_index;
//};
class pfcp_switch {
private:
// Very unoptimized
#define PFCP_SWITCH_RECV_BUFFER_SIZE 2048
#define ROOM_FOR_GTPV1U_G_PDU 64
char recv_buffer_[PFCP_SWITCH_RECV_BUFFER_SIZE];
int sock_r;
int sock_w;
std::thread thread_sock_;
//std::string gw_mac_address;
int pdn_if_index;
oai::cn::util::uint_generator<uint64_t> seid_generator_;
oai::cn::util::uint_generator<teid_t> teid_s1u_generator_;
#define TASK_SPGWU_PFCP_SWITCH_MAX_COMMIT_INTERVAL (0)
#define TASK_SPGWU_PFCP_SWITCH_MIN_COMMIT_INTERVAL (1)
#define PFCP_SWITCH_MAX_COMMIT_INTERVAL_MILLISECONDS 200
#define PFCP_SWITCH_MIN_COMMIT_INTERVAL_MILLISECONDS 50
//switching_data_per_cpu_socket switching_data[];
struct iovec msg_iov_; /* scatter/gather array */
std::unordered_map<core::pfcp::fseid_t, std::shared_ptr<core::pfcp::pfcp_session>> cp_fseid2pfcp_sessions;
folly::AtomicHashMap<uint64_t, std::shared_ptr<core::pfcp::pfcp_session>> up_seid2pfcp_sessions;
folly::AtomicHashMap<teid_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>> ul_s1u_teid2pfcp_pdr;
folly::AtomicHashMap<uint32_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>> ue_ipv4_hbo2pfcp_pdr;
//moodycamel::ConcurrentQueue<core::pfcp::pfcp_session*> create_session_q;
void pdn_read_loop(const thread_sched_params_t& thread_sched_params);
int create_pdn_socket (const char * const ifname, const bool promisc, int& if_index);
int create_pdn_socket (const char * const ifname);
void setup_pdn_interfaces();
oai::cn::core::itti::timer_id_t timer_max_commit_interval_id;
oai::cn::core::itti::timer_id_t timer_min_commit_interval_id;
void stop_timer_min_commit_interval();
void start_timer_min_commit_interval();
void stop_timer_max_commit_interval();
void start_timer_max_commit_interval();
void commit_changes();
bool get_pfcp_session_by_cp_fseid(const core::pfcp::fseid_t&, std::shared_ptr<core::pfcp::pfcp_session>&) const;
bool get_pfcp_session_by_up_seid(const uint64_t, std::shared_ptr<core::pfcp::pfcp_session>&) const;
bool get_pfcp_ul_pdrs_by_up_teid(const teid_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>&) const;
bool get_pfcp_dl_pdrs_by_ue_ip(const uint32_t, std::shared_ptr<std::vector<std::shared_ptr<core::pfcp::pfcp_pdr>>>&) const;
void add_pfcp_session_by_cp_fseid(const core::pfcp::fseid_t&, std::shared_ptr<core::pfcp::pfcp_session>&);
void add_pfcp_session_by_up_seid(const uint64_t, std::shared_ptr<core::pfcp::pfcp_session>&);
void add_pfcp_ul_pdr_by_up_teid(const teid_t teid, std::shared_ptr<core::pfcp::pfcp_pdr>& );
void remove_pfcp_session(std::shared_ptr<core::pfcp::pfcp_session>&);
uint64_t generate_seid() {return seid_generator_.get_uid();};
teid_t generate_teid_s1u() {return teid_s1u_generator_.get_uid();};
public:
pfcp_switch();
pfcp_switch(pfcp_switch const&) = delete;
void operator=(pfcp_switch const&) = delete;
void add_pfcp_dl_pdr_by_ue_ip(const uint32_t ue_ip, std::shared_ptr<core::pfcp::pfcp_pdr>& );
core::pfcp::fteid_t generate_fteid_s1u();
bool create_packet_in_access(std::shared_ptr<core::pfcp::pfcp_pdr>& pdr, const core::pfcp::fteid_t& in, uint8_t& cause);
void pfcp_session_look_up_pack_in_access(struct iphdr* const iph, const std::size_t num_bytes, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len, const uint32_t tunnel_id);
void pfcp_session_look_up_pack_in_access(struct ipv6hdr* const iph, const std::size_t num_bytes, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len, const uint32_t tunnel_id);
void pfcp_session_look_up_pack_in_access(struct iphdr* const iph, const std::size_t num_bytes, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len) {};
void pfcp_session_look_up_pack_in_access(struct ipv6hdr* const iph, const std::size_t num_bytes, const struct sockaddr_storage& r_endpoint, const socklen_t& r_endpoint_addr_len) {};
//void pfcp_session_look_up(struct ethhdr* const ethh, const std::size_t num_bytes);
void pfcp_session_look_up_pack_in_core(const char *buffer, const std::size_t num_bytes);
void send_to_core(char* const ip_packet, const ssize_t len);
void handle_pfcp_session_establishment_request(std::shared_ptr<core::itti::itti_sxab_session_establishment_request> sreq, core::itti::itti_sxab_session_establishment_response* );
void handle_pfcp_session_modification_request(std::shared_ptr<core::itti::itti_sxab_session_modification_request> sreq, core::itti::itti_sxab_session_modification_response* );
void handle_pfcp_session_deletion_request(std::shared_ptr<core::itti::itti_sxab_session_deletion_request> sreq, core::itti::itti_sxab_session_deletion_response* );
void time_out_min_commit_interval(const uint32_t timer_id);
void time_out_max_commit_interval(const uint32_t timer_id);
void remove_pfcp_session(const oai::cn::core::pfcp::fseid_t& cp_fseid);
void remove_pfcp_ul_pdrs_by_up_teid(const teid_t) {};
void remove_pfcp_dl_pdrs_by_ue_ip(const uint32_t) {};
std::string to_string() const;
};
}
#endif /* FILE_PFCP_SWITCH_HPP_SEEN */
...@@ -55,6 +55,23 @@ void spgwu_s1u_task (void*); ...@@ -55,6 +55,23 @@ void spgwu_s1u_task (void*);
void spgwu_s1u_task (void *args_p) void spgwu_s1u_task (void *args_p)
{ {
const task_id_t task_id = TASK_SPGWU_S1U; const task_id_t task_id = TASK_SPGWU_S1U;
const thread_sched_params_t* const thread_sched_params = (const thread_sched_params_t* const)args_p;
if (thread_sched_params->cpu_id >= 0) {
cpu_set_t cpuset;
CPU_SET(thread_sched_params->cpu_id,&cpuset);
if (int rc = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)) {
Logger::spgwu_s1u().warn( "Could not set affinity to ITTI task TASK_SPGWU_S1U, err=%d", rc);
}
}
struct sched_param sparam;
memset(&sparam, 0, sizeof(sparam));
sparam.sched_priority = thread_sched_params->sched_priority;
if (int rc = pthread_setschedparam(pthread_self(), thread_sched_params->sched_policy, &sparam)) {
Logger::spgwu_s1u().warn( "Could not set schedparam to ITTI task TASK_SPGWU_S1U, err=%d", rc);
}
itti_inst->notify_task_ready(task_id); itti_inst->notify_task_ready(task_id);
do { do {
...@@ -88,10 +105,10 @@ void spgwu_s1u_task (void *args_p) ...@@ -88,10 +105,10 @@ void spgwu_s1u_task (void *args_p)
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
spgwu_s1u::spgwu_s1u () : gtpu_l4_stack(spgwu_cfg.s1_up.addr4, spgwu_cfg.s1_up.port, spgwu_cfg.s1_up.cpu_id_thread_loop_read) spgwu_s1u::spgwu_s1u () : gtpu_l4_stack(spgwu_cfg.s1_up.addr4, spgwu_cfg.s1_up.port, spgwu_cfg.s1_up.thread_rd_sched_params)
{ {
Logger::spgwu_s1u().startup("Starting..."); Logger::spgwu_s1u().startup("Starting...");
if (itti_inst->create_task(TASK_SPGWU_S1U, spgwu_s1u_task, nullptr) ) { if (itti_inst->create_task(TASK_SPGWU_S1U, spgwu_s1u_task, &spgwu_cfg.itti.s1u_sched_params) ) {
Logger::spgwu_s1u().error( "Cannot create task TASK_SPGWU_S1U" ); Logger::spgwu_s1u().error( "Cannot create task TASK_SPGWU_S1U" );
throw std::runtime_error( "Cannot create task TASK_SPGWU_S1U" ); throw std::runtime_error( "Cannot create task TASK_SPGWU_S1U" );
} }
...@@ -238,7 +255,7 @@ void spgwu_s1u::report_error_indication(const struct sockaddr_storage& r_endpoin ...@@ -238,7 +255,7 @@ void spgwu_s1u::report_error_indication(const struct sockaddr_storage& r_endpoin
error_ind->gtp_ies.set(peer_address); error_ind->gtp_ies.set(peer_address);
} else { } else {
// mandatory ie // mandatory ie
free(error_ind); delete error_ind;
return; return;
} }
......
...@@ -95,6 +95,48 @@ int spgwu_config::get_pfcp_fseid(oai::cn::core::pfcp::fseid_t& fseid) ...@@ -95,6 +95,48 @@ int spgwu_config::get_pfcp_fseid(oai::cn::core::pfcp::fseid_t& fseid)
} }
return rc; return rc;
} }
//------------------------------------------------------------------------------
int spgwu_config::load_thread_sched_params(const Setting& thread_sched_params_cfg, thread_sched_params_t& cfg)
{
thread_sched_params_cfg.lookupValue(SPGWU_CONFIG_STRING_THREAD_RD_CPU_ID, cfg.cpu_id);
std::string thread_rd_sched_policy;
thread_sched_params_cfg.lookupValue(SPGWU_CONFIG_STRING_THREAD_RD_SCHED_POLICY, thread_rd_sched_policy);
util::trim(thread_rd_sched_policy);
if (boost::iequals(thread_rd_sched_policy, "SCHED_OTHER")) {
cfg.sched_policy = SCHED_OTHER;
} else if (boost::iequals(thread_rd_sched_policy, "SCHED_IDLE")) {
cfg.sched_policy = SCHED_IDLE;
} else if (boost::iequals(thread_rd_sched_policy, "SCHED_BATCH")) {
cfg.sched_policy = SCHED_BATCH;
} else if (boost::iequals(thread_rd_sched_policy, "SCHED_FIFO")) {
cfg.sched_policy = SCHED_FIFO;
} else if (boost::iequals(thread_rd_sched_policy, "SCHED_RR")) {
cfg.sched_policy = SCHED_RR;
} else {
Logger::spgwu_app().error("thread_rd_sched_policy: %s, unknown in config file", thread_rd_sched_policy.c_str());
return RETURNerror;
}
thread_sched_params_cfg.lookupValue(SPGWU_CONFIG_STRING_THREAD_RD_SCHED_PRIORITY, cfg.sched_priority);
if ((cfg.sched_priority > 99) || (cfg.sched_priority < 1)) {
Logger::spgwu_app().error("thread_rd_sched_priority: %d, must be in interval [1..99] in config file", cfg.sched_priority);
return RETURNerror;
}
return RETURNok;
}
//------------------------------------------------------------------------------
int spgwu_config::load_itti(const Setting& itti_cfg, itti_cfg_t& cfg)
{
const Setting& sched_params_cfg = itti_cfg[SPGWU_CONFIG_STRING_SCHED_PARAMS];
load_thread_sched_params(sched_params_cfg, cfg.core_sched_params);
const Setting& s1u_sched_params_cfg = itti_cfg[SPGWU_CONFIG_STRING_S1U_SCHED_PARAMS];
load_thread_sched_params(s1u_sched_params_cfg, cfg.s1u_sched_params);
const Setting& sx_sched_params_cfg = itti_cfg[SPGWU_CONFIG_STRING_SX_SCHED_PARAMS];
load_thread_sched_params(sx_sched_params_cfg, cfg.sx_sched_params);
return RETURNok;
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
int spgwu_config::load_interface(const Setting& if_cfg, interface_cfg_t& cfg) int spgwu_config::load_interface(const Setting& if_cfg, interface_cfg_t& cfg)
...@@ -127,7 +169,8 @@ int spgwu_config::load_interface(const Setting& if_cfg, interface_cfg_t& cfg) ...@@ -127,7 +169,8 @@ int spgwu_config::load_interface(const Setting& if_cfg, interface_cfg_t& cfg)
cfg.network4.s_addr = htons(ntohs(cfg.addr4.s_addr) & 0xFFFFFFFF << (32 - std::stoi (util::trim(words.at(1))))); cfg.network4.s_addr = htons(ntohs(cfg.addr4.s_addr) & 0xFFFFFFFF << (32 - std::stoi (util::trim(words.at(1)))));
} }
if_cfg.lookupValue(SPGWU_CONFIG_STRING_PORT, cfg.port); if_cfg.lookupValue(SPGWU_CONFIG_STRING_PORT, cfg.port);
if_cfg.lookupValue(SPGWU_CONFIG_STRING_CPU_ID_THREAD_LOOP_READ, cfg.cpu_id_thread_loop_read); const Setting& sched_params_cfg = if_cfg[SPGWU_CONFIG_STRING_SCHED_PARAMS];
load_thread_sched_params(sched_params_cfg, cfg.thread_rd_sched_params);
} }
return RETURNok; return RETURNok;
} }
...@@ -165,6 +208,9 @@ int spgwu_config::load(const string& config_file) ...@@ -165,6 +208,9 @@ int spgwu_config::load(const string& config_file)
spgwu_cfg.lookupValue(SPGWU_CONFIG_STRING_PID_DIRECTORY, pid_dir); spgwu_cfg.lookupValue(SPGWU_CONFIG_STRING_PID_DIRECTORY, pid_dir);
util::trim(pid_dir); util::trim(pid_dir);
const Setting& itti_cfg = spgwu_cfg[SPGWU_CONFIG_STRING_ITTI];
load_itti(itti_cfg, itti);
const Setting& nw_if_cfg = spgwu_cfg[SPGWU_CONFIG_STRING_INTERFACES]; const Setting& nw_if_cfg = spgwu_cfg[SPGWU_CONFIG_STRING_INTERFACES];
const Setting& s1_up_cfg = nw_if_cfg[SPGWU_CONFIG_STRING_INTERFACE_S1U_S12_S4_UP]; const Setting& s1_up_cfg = nw_if_cfg[SPGWU_CONFIG_STRING_INTERFACE_S1U_S12_S4_UP];
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define FILE_SPGWU_CONFIG_HPP_SEEN #define FILE_SPGWU_CONFIG_HPP_SEEN
#include "3gpp_29.244.h" #include "3gpp_29.244.h"
#include "thread_sched.h"
#include <libconfig.h++> #include <libconfig.h++>
#include <mutex> #include <mutex>
#include <netinet/in.h> #include <netinet/in.h>
...@@ -47,7 +48,12 @@ namespace oai::cn::nf::spgwu { ...@@ -47,7 +48,12 @@ namespace oai::cn::nf::spgwu {
#define SPGWU_CONFIG_STRING_INTERFACE_NAME "INTERFACE_NAME" #define SPGWU_CONFIG_STRING_INTERFACE_NAME "INTERFACE_NAME"
#define SPGWU_CONFIG_STRING_IPV4_ADDRESS "IPV4_ADDRESS" #define SPGWU_CONFIG_STRING_IPV4_ADDRESS "IPV4_ADDRESS"
#define SPGWU_CONFIG_STRING_PORT "PORT" #define SPGWU_CONFIG_STRING_PORT "PORT"
#define SPGWU_CONFIG_STRING_CPU_ID_THREAD_LOOP_READ "CPU_ID_THREAD_LOOP_READ" #define SPGWU_CONFIG_STRING_SCHED_PARAMS "SCHED_PARAMS"
#define SPGWU_CONFIG_STRING_S1U_SCHED_PARAMS "S1U_SCHED_PARAMS"
#define SPGWU_CONFIG_STRING_SX_SCHED_PARAMS "SX_SCHED_PARAMS"
#define SPGWU_CONFIG_STRING_THREAD_RD_CPU_ID "THREAD_RD_CPU_ID"
#define SPGWU_CONFIG_STRING_THREAD_RD_SCHED_POLICY "THREAD_RD_SCHED_POLICY"
#define SPGWU_CONFIG_STRING_THREAD_RD_SCHED_PRIORITY "THREAD_RD_SCHED_PRIORITY"
#define SPGWU_CONFIG_STRING_INTERFACE_SGI "SGI" #define SPGWU_CONFIG_STRING_INTERFACE_SGI "SGI"
#define SPGWU_CONFIG_STRING_INTERFACE_SX "SX" #define SPGWU_CONFIG_STRING_INTERFACE_SX "SX"
#define SPGWU_CONFIG_STRING_INTERFACE_S1U_S12_S4_UP "S1U_S12_S4_UP" #define SPGWU_CONFIG_STRING_INTERFACE_S1U_S12_S4_UP "S1U_S12_S4_UP"
...@@ -58,6 +64,7 @@ namespace oai::cn::nf::spgwu { ...@@ -58,6 +64,7 @@ namespace oai::cn::nf::spgwu {
#define SPGWU_CONFIG_STRING_SNAT "SNAT" #define SPGWU_CONFIG_STRING_SNAT "SNAT"
#define SPGWU_CONFIG_STRING_MAX_PFCP_SESSIONS "MAX_PFCP_SESSIONS" #define SPGWU_CONFIG_STRING_MAX_PFCP_SESSIONS "MAX_PFCP_SESSIONS"
#define SPGWU_CONFIG_STRING_SPGWC_LIST "SPGW-C_LIST" #define SPGWU_CONFIG_STRING_SPGWC_LIST "SPGW-C_LIST"
#define SPGWU_CONFIG_STRING_ITTI "ITTI"
#define SPGW_ABORT_ON_ERROR true #define SPGW_ABORT_ON_ERROR true
#define SPGW_WARN_ON_ERROR false #define SPGW_WARN_ON_ERROR false
...@@ -69,7 +76,7 @@ typedef struct interface_cfg_s { ...@@ -69,7 +76,7 @@ typedef struct interface_cfg_s {
struct in6_addr addr6; struct in6_addr addr6;
unsigned int mtu; unsigned int mtu;
unsigned int port; unsigned int port;
int cpu_id_thread_loop_read; thread_sched_params_t thread_rd_sched_params;
} interface_cfg_t; } interface_cfg_t;
typedef struct pdn_cfg_s { typedef struct pdn_cfg_s {
...@@ -80,11 +87,19 @@ typedef struct pdn_cfg_s { ...@@ -80,11 +87,19 @@ typedef struct pdn_cfg_s {
bool snat; bool snat;
} pdn_cfg_t; } pdn_cfg_t;
typedef struct itti_cfg_s {
thread_sched_params_t core_sched_params;
thread_sched_params_t s1u_sched_params;
thread_sched_params_t sx_sched_params;
} itti_cfg_t;
class spgwu_config { class spgwu_config {
private: private:
int load_itti(const libconfig::Setting& itti_cfg, itti_cfg_t& cfg);
int load_interface(const libconfig::Setting& if_cfg, interface_cfg_t& cfg); int load_interface(const libconfig::Setting& if_cfg, interface_cfg_t& cfg);
int load_thread_sched_params(const libconfig::Setting& thread_sched_params_cfg, thread_sched_params_t& cfg);
public: public:
...@@ -95,6 +110,8 @@ public: ...@@ -95,6 +110,8 @@ public:
interface_cfg_t s1_up; interface_cfg_t s1_up;
interface_cfg_t sgi; interface_cfg_t sgi;
interface_cfg_t sx; interface_cfg_t sx;
itti_cfg_t itti;
std::string gateway; std::string gateway;
uint32_t max_pfcp_sessions; uint32_t max_pfcp_sessions;
...@@ -103,7 +120,7 @@ public: ...@@ -103,7 +120,7 @@ public:
std::vector<oai::cn::core::pfcp::node_id_t> spgwcs; std::vector<oai::cn::core::pfcp::node_id_t> spgwcs;
spgwu_config() : m_rw_lock(), pid_dir(), instance(0), s1_up(), sgi(), gateway(),sx(), pdns(), spgwcs(), max_pfcp_sessions(100) {}; spgwu_config() : m_rw_lock(), pid_dir(), instance(0), s1_up(), sgi(), gateway(), sx(), itti(), pdns(), spgwcs(), max_pfcp_sessions(100) {};
void lock() {m_rw_lock.lock();}; void lock() {m_rw_lock.lock();};
void unlock() {m_rw_lock.unlock();}; void unlock() {m_rw_lock.unlock();};
int load(const std::string& config_file); int load(const std::string& config_file);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment