Commit e90d6685 authored by Lionel Gauthier's avatar Lionel Gauthier

Partial update for data plane on eNB for S1-U

git-svn-id: http://svn.eurecom.fr/openair4G/trunk@5194 818b1a75-f10b-46b9-bf7c-635c3b92a50f
parent 7a3c4490
......@@ -180,9 +180,9 @@ typedef struct nas_bearer_param_s {
pre_emp_capability_t pre_emp_capability;
/* S-GW TEID for user-plane */
uint32_t teid;
uint32_t sgw_s1u_teid;
/* S-GW IP address for User-Plane */
ip_address_t s_gw_address;
ip_address_t sgw_s1u_address;
} nas_bearer_param_t;
typedef struct nas_conn_rel_ind_s {
......
/*******************************************************************************
Eurecom OpenAirInterface
Copyright(c) 1999 - 2012 Eurecom
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information
Openair Admin: openair_admin@eurecom.fr
Openair Tech : openair_tech@eurecom.fr
Forums : http://forums.eurecom.fr/openairinterface
Address : EURECOM, Campus SophiaTech, 450 Route des Chappes
06410 Biot FRANCE
Eurecom OpenAirInterface core network
Copyright(c) 1999 - 2014 Eurecom
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information
Openair Admin: openair_admin@eurecom.fr
Openair Tech : openair_tech@eurecom.fr
Forums : http://forums.eurecom.fsr/openairinterface
Address : EURECOM,
Campus SophiaTech,
450 Route des Chappes,
CS 50193
06904 Biot Sophia Antipolis cedex,
FRANCE
*******************************************************************************/
/*! \file gtpv1u.h
* \brief
* \author Sebastien ROUX, Lionel Gauthier
* \company Eurecom
* \email: lionel.gauthier@eurecom.fr
*/
#ifndef GTPV1_U_H_
#define GTPV1_U_H_
......
/*******************************************************************************
Eurecom OpenAirInterface Core Network
Copyright(c) 1999 - 2014 Eurecom
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information
Openair Admin: openair_admin@eurecom.fr
Openair Tech : openair_tech@eurecom.fr
Forums : http://forums.eurecom.fsr/openairinterface
Address : EURECOM,
Campus SophiaTech,
450 Route des Chappes,
CS 50193
06904 Biot Sophia Antipolis cedex,
FRANCE
*******************************************************************************/
/*! \file gtpv1u_eNB.c
* \brief
* \author Sebastien ROUX, Lionel GAUTHIER
* \version 1.0
* \company Eurecom
* \email: lionel.gauthier@eurecom.fr
*/
#include <stdio.h>
#include <errno.h>
#include "mme_config.h"
#include "assertions.h"
#include "intertask_interface.h"
#include "timer.h"
#include "gtpv1u.h"
#include "NwGtpv1u.h"
#include "NwGtpv1uMsg.h"
#include "NwLog.h"
#include "log.h"
#include "gtpv1u_eNB_defs.h"
#include "gtpv1_u_messages_types.h"
#include "udp_eNB_task.h"
#include "UTIL/LOG/log.h"
#include "COMMON/platform_types.h"
#include "COMMON/platform_constants.h"
extern boolean_t pdcp_data_req(
module_id_t enb_idP,
module_id_t UE_id,
frame_t frame,
eNB_flag_t eNB_flag,
rb_id_t rb_id,
mui_t muiP,
confirm_t confirmP, \
sdu_size_t sdu_buffer_size,
unsigned char* sdu_buffer,
pdcp_transmission_mode_t mode);
static int gtpv1u_eNB_send_init_udp(
uint16_t port_number);
NwGtpv1uRcT gtpv1u_eNB_log_request(
NwGtpv1uLogMgrHandleT hLogMgr,
NwU32T logLevel,
NwCharT *file,
NwU32T line,
NwCharT *logStr);
NwGtpv1uRcT gtpv1u_eNB_send_udp_msg(
NwGtpv1uUdpHandleT udpHandle,
NwU8T *buffer,
NwU32T buffer_len,
NwU32T peerIpAddr,
NwU16T peerPort);
NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
NwGtpv1uUlpHandleT hUlp,
NwGtpv1uUlpApiT *pUlpApi);
int data_recv_callback(
uint16_t portP,
uint32_t address,
uint8_t *buffer,
uint32_t length,
void *arg_p);
//int
//gtpv1u_create_tunnel_endpoint(
// gtpv1u_data_t *gtpv1u_data_pP,
// uint8_t ue_idP,
// uint8_t rab_idP,
// char *sgw_ip_addr_pP,
// uint16_t portP);
static NwGtpv1uRcT gtpv1u_start_timer_wrapper(
NwGtpv1uTimerMgrHandleT tmrMgrHandle,
NwU32T timeoutSec,
NwU32T timeoutUsec,
NwU32T tmrType,
void *timeoutArg,
NwGtpv1uTimerHandleT *hTmr);
static NwGtpv1uRcT gtpv1u_stop_timer_wrapper(
NwGtpv1uTimerMgrHandleT tmrMgrHandle,
NwGtpv1uTimerHandleT hTmr);
int
gtpv1u_initial_req(
gtpv1u_data_t *gtpv1u_data_pP,
teid_t teidP,
tcp_udp_port_t portP,
uint32_t address);
int
gtpv1u_new_data_req(
uint8_t enb_idP,
uint8_t ue_idP,
uint8_t rab_idP,
uint8_t *buffer_pP,
uint32_t buf_lenP);
static int
gtpv1u_create_s1u_tunnel(
gtpv1u_enb_create_tunnel_req_t *create_tunnel_req_pP);
static int gtpv1u_delete_s1u_tunnel(gtpv1u_enb_delete_tunnel_req_t *req_pP);
static int gtpv1u_eNB_init(void);
void *gtpv1u_eNB_task(void *args);
static gtpv1u_data_t gtpv1u_data_g;
static int gtpv1u_eNB_send_init_udp(uint16_t port_number)
{
// Create and alloc new message
MessageDef *message_p;
struct in_addr addr;
#ifdef GTPU_IN_KERNEL
#include <netinet/in.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
#endif
message_p = itti_alloc_new_message(TASK_GTPV1_U, UDP_INIT);
if (message_p == NULL) {
return -1;
}
extern unsigned char pdcp_data_req(u8 eNB_id, u8 UE_id, u32_t frame, u8_t eNB_flag, rb_id_t rb_id, u32 muiP, u32 confirmP, \
sdu_size_t sdu_buffer_size, unsigned char* sdu_buffer, u8 mode);
UDP_INIT(message_p).port = port_number;
//LG UDP_INIT(message_p).address = "0.0.0.0"; //ANY address
addr.s_addr = gtpv1u_data_g.enb_ip_address_for_S1u_S12_S4_up;
UDP_INIT(message_p).address = inet_ntoa(addr);
GTPU_DEBUG("Tx UDP_INIT IP addr %s\n", UDP_INIT(message_p).address);
inline NwGtpv1uRcT gtpv1u_eNB_log_request(NwGtpv1uLogMgrHandleT hLogMgr,
return itti_send_msg_to_task(TASK_UDP, INSTANCE_DEFAULT, message_p);
}
NwGtpv1uRcT gtpv1u_eNB_log_request(NwGtpv1uLogMgrHandleT hLogMgr,
NwU32T logLevel,
NwCharT *file,
NwU32T line,
NwCharT *logStr)
{
logIt(GTPU, logLevel, "%s\n", logStr);
GTPU_DEBUG("%s\n", logStr);
return NW_GTPV1U_OK;
}
NwGtpv1uRcT gtpv1u_eNB_send_udp_msg(
NwGtpv1uUdpHandleT udpHandle,
NwU8T *buffer,
NwU32T buffer_len,
NwU32T peerIpAddr,
NwU16T peerPort)
{
// Create and alloc new message
MessageDef *message_p = NULL;
udp_data_req_t *udp_data_req_p = NULL;
message_p = itti_alloc_new_message(TASK_GTPV1_U, UDP_DATA_REQ);
udp_data_req_p = &message_p->ittiMsg.udp_data_req;
udp_data_req_p->peer_address = peerIpAddr;
udp_data_req_p->peer_port = peerPort;
udp_data_req_p->buffer = buffer;
udp_data_req_p->buffer_length = buffer_len;
return itti_send_msg_to_task(TASK_UDP, INSTANCE_DEFAULT, message_p);
}
/* Callback called when a gtpv1u message arrived on UDP interface */
NwGtpv1uRcT gtpv1u_process_stack_req(
NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
NwGtpv1uUlpHandleT hUlp,
NwGtpv1uUlpApiT *pUlpApi)
{
int result;
int result = 0;
teid_t teid = 0;
hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS;
gtpv1u_teid_data_t *gtpv1u_teid_data_p = NULL;
switch(pUlpApi->apiType) {
/* Here there are two type of messages handled:
......@@ -51,28 +204,42 @@ NwGtpv1uRcT gtpv1u_process_stack_req(
case NW_GTPV1U_ULP_API_RECV_TPDU: {
uint8_t buffer[4096];
uint32_t buffer_len;
/* Nw-gptv1u stack has processed a PDU. we can schedule it to PDCP
* for transmission.
*/
teid = pUlpApi->apiInfo.recvMsgInfo.teid;
if (NW_GTPV1U_OK != nwGtpv1uMsgGetTpdu(pUlpApi->apiInfo.recvMsgInfo.hMsg,
buffer, &buffer_len)) {
GTPU_ERROR("Error while retrieving T-PDU");
}
GTPU_DEBUG("Received T-PDU from gtpv1u stack %u with size %d",
pUlpApi->apiInfo.recvMsgInfo.teid, buffer_len);
result = pdcp_data_req(0, // eNB_idx,
0, // UE idx
0, // frame
1, // enb flag
5, // rb id
//-----------------------
// GTPV1U->PDCP mapping
//-----------------------
hash_rc = hashtable_get(gtpv1u_data_g.teid_mapping, teid, (void**)&gtpv1u_teid_data_p);
if (hash_rc == HASH_TABLE_OK) {
GTPU_DEBUG("Received T-PDU from gtpv1u stack teid %u size %d -> enb module id %u ue module id %u rab id %u\n",
teid,
buffer_len,
gtpv1u_teid_data_p->enb_id,
gtpv1u_teid_data_p->ue_id,
gtpv1u_teid_data_p->eps_bearer_id);
result = pdcp_data_req(
gtpv1u_teid_data_p->enb_id,
gtpv1u_teid_data_p->ue_id,
0, // frame TO DO
ENB_FLAG_YES,
gtpv1u_teid_data_p->eps_bearer_id,
0, // mui
0, // confirm
buffer_len,
buffer,
1);
AssertFatal (result == TRUE, "PDCP data request failed!\n");
PDCP_TRANSMISSION_MODE_DATA);
AssertError (result == TRUE, return NW_GTPV1U_FAILURE ,"PDCP data request failed!\n");
} else {
GTPU_ERROR("Received T-PDU from gtpv1u stack teid %u unknown", teid, buffer_len);
}
}
break;
default: {
......@@ -83,31 +250,8 @@ NwGtpv1uRcT gtpv1u_process_stack_req(
return NW_GTPV1U_OK;
}
NwGtpv1uRcT gtpv1u_eNB_udp_req(NwGtpv1uUdpHandleT udpHandle,
NwU8T *dataBuf,
NwU32T dataSize,
NwU32T peerIpAddr,
NwU16T peerPort)
{
udp_data_t *udp_data_p;
if (udpHandle == 0) {
return NW_GTPV1U_FAILURE;
}
udp_data_p = (udp_data_t *)udpHandle;
LOG_D(GTPU, "New udp req triggered with sd %d, data size %u\n",
udp_data_p->sd, dataSize);
if (udp_send_to(udp_data_p->sd, peerPort, peerIpAddr, dataBuf, dataSize) < 0) {
return NW_GTPV1U_FAILURE;
}
return NW_GTPV1U_OK;
}
int data_recv_callback(uint16_t port,
int data_recv_callback(uint16_t portP,
uint32_t address,
uint8_t *buffer,
uint32_t length,
......@@ -124,154 +268,211 @@ int data_recv_callback(uint16_t port,
return nwGtpv1uProcessUdpReq(gtpv1u_data_p->gtpv1u_stack,
buffer,
length,
port,
portP,
address);
}
int gtpv1u_create_tunnel_endpoint(gtpv1u_data_t *gtpv1u_data_p, uint8_t ue_id,
uint8_t rab_id, char *sgw_ip_addr, uint16_t port)
{
uint32_t teid;
uint8_t max_attempt = 100;
NwGtpv1uRcT rc;
NwGtpv1uUlpApiT ulp_req;
struct gtpv1u_ue_data_s *new_ue_p;
struct gtpv1u_ue_data_s *temp;
struct gtpv1u_bearer_s *bearer;
hashtable_rc_t hash_rc;
if (rab_id > MAX_BEARERS_PER_UE) {
LOG_E(GTPU, "Could not use rab_id %d > max %d\n",
rab_id, MAX_BEARERS_PER_UE);
return -1;
}
//int
//gtpv1u_create_tunnel_endpoint(
// gtpv1u_data_t *gtpv1u_data_pP,
// uint8_t ue_idP,
// uint8_t rab_idP,
// char *sgw_ip_addr_pP,
// uint16_t portP)
//{
// uint32_t teid;
// uint8_t max_attempt = 100;
// NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
// NwGtpv1uUlpApiT ulp_req;
// struct gtpv1u_ue_data_s *new_ue_p = NULL;
// struct gtpv1u_bearer_s *bearer_p = NULL;
// hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS;;
//
// if (rab_idP > GTPV1U_MAX_BEARERS_PER_UE) {
// LOG_E(GTPU, "Could not use rab_id %d > max %d\n",
// rab_idP, GTPV1U_MAX_BEARERS_PER_UE);
// return -1;
// }
//
//
// if ((hash_rc = hashtable_get(gtpv1u_data_pP->ue_mapping, (uint64_t)ue_idP, (void**)&new_ue_p)) == HASH_TABLE_OK) {
// /* A context for this UE already exist in the tree, use it */
// /* We check that the tunnel is not already configured */
// if (new_ue_p->bearers[rab_idP].state != BEARER_DOWN) {
// LOG_E(GTPU, "Cannot create new end-point over already existing tunnel\n");
// return -1;
// }
// } else {
// /* Context doesn't exist, create it */
// if (rab_idP != 0) {
// /* UE should first establish Default bearer before trying to setup
// * additional bearers.
// */
// LOG_E(GTPU, "UE context is not known and rab_id != 0\n");
// return -1;
// }
// new_ue_p = calloc(1, sizeof(struct gtpv1u_ue_data_s));
// new_ue_p->ue_id = ue_idP;
//
// hash_rc = hashtable_insert(gtpv1u_data_pP->ue_mapping, (uint64_t)ue_idP, new_ue_p);
//
// if ((hash_rc != HASH_TABLE_OK) && (hash_rc != HASH_TABLE_INSERT_OVERWRITTEN_DATA)) {
// LOG_E(GTPU, "Failed to insert new UE context\n");
// free(new_ue_p);
// return -1;
// }
// }
//
// bearer_p = &new_ue_p->bearers[rab_idP];
//
// /* Configure the bearer */
// bearer_p->state = BEARER_IN_CONFIG;
// bearer_p->sgw_ip_addr = inet_addr(sgw_ip_addr_pP);
// bearer_p->port = portP;
//
// /* Create the new stack api request */
// memset(&ulp_req, 0, sizeof(NwGtpv1uUlpApiT));
// ulp_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT;
//
// /* Try to create new tunnel-endpoint.
// * If teid generated is already present in the stack, just peek another random
// * teid. This could be ok for small number of tunnel but more errors could be
// * thrown if we reached high number of tunnels.
// * TODO: find a solution for teid
// */
// do {
// /* Request for a new random TEID */
// teid = gtpv1u_new_teid();
// ulp_req.apiInfo.createTunnelEndPointInfo.teid = teid;
//
// rc = nwGtpv1uProcessUlpReq(gtpv1u_data_pP->gtpv1u_stack, &ulp_req);
//
// if (rc == NW_GTPV1U_OK) {
//// LOG_D(GTPU, "Successfully created new tunnel endpoint for teid 0x%x\n",
//// teid);
// bearer_p->teid_eNB = teid;
//// gtpv1u_initial_req(gtpv1u_data_pP, teid, GTPV1U_UDP_PORT,
//// inet_addr("192.168.56.101"));
// LOG_I(GTPU, "Created eNB tunnel endpoint %u for ue id %u, rab id %u\n", teid, ue_idP, rab_idP);
// return 0;
// } else {
// LOG_W(GTPU, "Teid %u already in use... %s\n",
// teid, (max_attempt > 1) ? "Trying another one" : "Last chance");
// }
// } while(max_attempt-- && rc != NW_GTPV1U_OK);
//
// bearer_p->state = BEARER_DOWN;
// LOG_I(GTPU, "Failed to created eNB tunnel endpoint %u for ue id %u, rab id %u, bearer down\n", teid, ue_idP, rab_idP);
//
// return -1;
//}
if ((hash_rc = hashtable_get(gtpv1u_data_p->ue_mapping, (uint64_t)ue_id, (void**)&new_ue_p)) == HASH_TABLE_OK) {
/* A context for this UE already exist in the tree, use it */
/* We check that the tunnel is not already configured */
if (new_ue_p->bearers[rab_id].state != BEARER_DOWN) {
LOG_E(GTPU, "Cannot create new end-point over already existing tunnel\n");
return -1;
}
} else {
/* Context doesn't exist, create it */
if (rab_id != 0) {
/* UE should first establish Default bearer before trying to setup
* additional bearers.
*/
LOG_E(GTPU, "UE context is not known and rab_id != 0\n");
return -1;
}
new_ue_p = calloc(1, sizeof(struct gtpv1u_ue_data_s));
new_ue_p->ue_id = ue_id;
hash_rc = hashtable_insert(gtpv1u_data_p->ue_mapping, (uint64_t)ue_id, new_ue_p);
static NwGtpv1uRcT gtpv1u_start_timer_wrapper(
NwGtpv1uTimerMgrHandleT tmrMgrHandle,
NwU32T timeoutSec,
NwU32T timeoutUsec,
NwU32T tmrType,
void *timeoutArg,
NwGtpv1uTimerHandleT *hTmr)
{
if ((hash_rc != HASH_TABLE_OK) && (hash_rc != HASH_TABLE_INSERT_OVERWRITTEN_DATA)) {
LOG_E(GTPU, "Failed to insert new UE context\n");
free(new_ue_p);
return -1;
}
NwGtpv1uRcT rc = NW_GTPV1U_OK;
long timer_id;
if (tmrType == NW_GTPV1U_TMR_TYPE_ONE_SHOT) {
timer_setup(timeoutSec,
timeoutUsec,
TASK_GTPV1_U,
INSTANCE_DEFAULT,
TIMER_ONE_SHOT,
timeoutArg,
&timer_id);
} else {
timer_setup(timeoutSec,
timeoutUsec,
TASK_GTPV1_U,
INSTANCE_DEFAULT,
TIMER_PERIODIC,
timeoutArg,
&timer_id);
}
bearer = &new_ue_p->bearers[rab_id];
/* Configure the bearer */
bearer->state = BEARER_IN_CONFIG;
bearer->sgw_ip_addr = inet_addr(sgw_ip_addr);
bearer->port = port;
/* Create the new stack api request */
memset(&ulp_req, 0, sizeof(NwGtpv1uUlpApiT));
ulp_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT;
/* Try to create new tunnel-endpoint.
* If teid generated is already present in the stack, just peek another random
* teid. This could be ok for small number of tunnel but more errors could be
* thrown if we reached high number of tunnels.
* TODO: find a solution for teid
*/
do {
/* Request for a new random TEID */
teid = gtpv1u_new_teid();
ulp_req.apiInfo.createTunnelEndPointInfo.teid = teid;
return rc;
}
rc = nwGtpv1uProcessUlpReq(gtpv1u_data_p->gtpv1u_stack, &ulp_req);
if (rc == NW_GTPV1U_OK) {
// LOG_D(GTPU, "Successfully created new tunnel endpoint for teid 0x%x\n",
// teid);
bearer->teid_eNB = teid;
// gtpv1u_initial_req(gtpv1u_data_p, teid, GTPV1U_UDP_PORT,
// inet_addr("192.168.56.101"));
LOG_I(GTPU, "Created eNB tunnel endpoint %u for ue id %u, rab id %u\n", teid, ue_id, rab_id);
return 0;
} else {
LOG_W(GTPU, "Teid %u already in use... %s\n",
teid, (max_attempt > 1) ? "Trying another one" : "Last chance");
}
} while(max_attempt-- && rc != NW_GTPV1U_OK);
static NwGtpv1uRcT gtpv1u_stop_timer_wrapper(
NwGtpv1uTimerMgrHandleT tmrMgrHandle,
NwGtpv1uTimerHandleT hTmr)
{
bearer->state = BEARER_DOWN;
LOG_I(GTPU, "Failed to created eNB tunnel endpoint %u for ue id %u, rab id %u, bearer down\n", teid, ue_id, rab_id);
NwGtpv1uRcT rc = NW_GTPV1U_OK;
return -1;
return rc;
}
int gtpv1u_initial_req(gtpv1u_data_t *gtpv1u_data_p, uint32_t teid,
uint16_t port, uint32_t address)
int
gtpv1u_initial_req(
gtpv1u_data_t *gtpv1u_data_pP,
teid_t teidP,
tcp_udp_port_t portP,
uint32_t address)
{
NwGtpv1uUlpApiT ulp_req;
NwGtpv1uRcT rc;
NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
memset(&ulp_req, 0, sizeof(NwGtpv1uUlpApiT));
ulp_req.apiType = NW_GTPV1U_ULP_API_INITIAL_REQ;
ulp_req.apiInfo.initialReqInfo.teid = teid;
ulp_req.apiInfo.initialReqInfo.peerPort = port;
ulp_req.apiInfo.initialReqInfo.teid = teidP;
ulp_req.apiInfo.initialReqInfo.peerPort = portP;
ulp_req.apiInfo.initialReqInfo.peerIp = address;
rc = nwGtpv1uProcessUlpReq(gtpv1u_data_p->gtpv1u_stack, &ulp_req);
rc = nwGtpv1uProcessUlpReq(gtpv1u_data_pP->gtpv1u_stack, &ulp_req);
if (rc == NW_GTPV1U_OK) {
LOG_D(GTPU, "Successfully sent initial req for teid %u\n", teid);
LOG_D(GTPU, "Successfully sent initial req for teid %u\n", teidP);
} else {
LOG_W(GTPU, "Could not send initial req for teid %u\n", teid);
LOG_W(GTPU, "Could not send initial req for teid %u\n", teidP);
}
return (rc == NW_GTPV1U_OK) ? 0 : -1;
}
int gtpv1u_new_data_req(gtpv1u_data_t *gtpv1u_data_p,
uint8_t ue_id, uint8_t rab_id,
uint8_t *buffer, uint32_t buf_len)
int
gtpv1u_new_data_req(
uint8_t enb_idP,
uint8_t ue_idP,
uint8_t rab_idP,
uint8_t *buffer_pP,
uint32_t buf_lenP)
{
#ifdef GTPU_IN_KERNEL
struct sockaddr_in dummy_dest_addr;
socklen_t socklen = sizeof(struct sockaddr_in);
#endif
NwGtpv1uUlpApiT stack_req;
NwGtpv1uRcT rc;
NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
struct gtpv1u_ue_data_s ue;
struct gtpv1u_ue_data_s *ue_inst_p;
struct gtpv1u_bearer_s *bearer_p;
hashtable_rc_t hash_rc;
struct gtpv1u_ue_data_s *ue_inst_p = NULL;
struct gtpv1u_bearer_s *bearer_p = NULL;
hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS;;
gtpv1u_data_t *gtpv1u_data_p = NULL;
memset(&ue, 0, sizeof(struct gtpv1u_ue_data_s));
ue.ue_id = ue_idP;
ue.ue_id = ue_id;
assert(gtpv1u_data_p != NULL);
assert(rab_id <= MAX_BEARERS_PER_UE);
AssertFatal(enb_idP >=0, "Bad parameter enb module id %u\n", enb_idP);
AssertFatal((rab_idP - GTPV1U_BEARER_OFFSET)< GTPV1U_MAX_BEARERS_ID, "Bad parameter rab id %u\n", rab_idP);
AssertFatal((rab_idP - GTPV1U_BEARER_OFFSET) >= 0 , "Bad parameter rab id %u\n", rab_idP);
gtpv1u_data_p = &gtpv1u_data_g;
/* Check that UE context is present in ue map. */
hash_rc = hashtable_get(gtpv1u_data_p->ue_mapping, (uint64_t)ue_id, (void**)&ue_inst_p);
hash_rc = hashtable_get(gtpv1u_data_p->ue_mapping, (uint64_t)ue_idP, (void**)&ue_inst_p);
if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS ) {
LOG_E(GTPU, "[UE %d] Trying to send data on non-existing UE context\n", ue_id);
LOG_E(GTPU, "[UE %d] Trying to send data on non-existing UE context\n", ue_idP);
return -1;
}
bearer_p = &ue_inst_p->bearers[rab_id];
bearer_p = &ue_inst_p->bearers[rab_idP - GTPV1U_BEARER_OFFSET];
/* Ensure the bearer in ready.
* TODO: handle the cases where the bearer is in HANDOVER state.
......@@ -284,32 +485,19 @@ int gtpv1u_new_data_req(gtpv1u_data_t *gtpv1u_data_p,
if (bearer_p->state != BEARER_IN_CONFIG)
return -1;
}
#ifdef GTPU_IN_KERNEL
dummy_dest_addr.sin_family = AF_INET;
dummy_dest_addr.sin_port = 5001;
dummy_dest_addr.sin_addr.s_addr = inet_addr("178.179.180.181");
if (sendto(gtpv1u_data_p->sock_desc[rab_id], (void *)buffer, buf_len, 0, (struct sockaddr *)&dummy_dest_addr, socklen) < 0) {
LOG_E(GTPU, "Error during send to socket %d : (%s:%d)\n", gtpv1u_data_p->sock_desc[rab_id], strerror(errno), errno);
return -1;
} else {
LOG_D(GTPU, "send to UDP socket %d, packet should be handled by iptables\n", gtpv1u_data_p->sock_desc[rab_id]);
}
#else
memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));
stack_req.apiType = NW_GTPV1U_ULP_API_SEND_TPDU;
// LG HACK stack_req.apiInfo.sendtoInfo.teid = bearer_p->teid_sgw;
stack_req.apiInfo.sendtoInfo.teid = 1;// LG HACK
stack_req.apiInfo.sendtoInfo.teid = bearer_p->teid_sgw;
stack_req.apiInfo.sendtoInfo.ipAddr = bearer_p->sgw_ip_addr;
rc = nwGtpv1uGpduMsgNew(gtpv1u_data_p->gtpv1u_stack,
//bearer_p->teid_sgw,
1, // LG FORCING 1 instead of bearer_p->teid_sgw
bearer_p->teid_sgw,
NW_FALSE,
gtpv1u_data_p->seq_num++,
buffer,
buf_len,
buffer_pP,
buf_lenP,
&(stack_req.apiInfo.sendtoInfo.hMsg));
if (rc != NW_GTPV1U_OK) {
......@@ -328,197 +516,381 @@ int gtpv1u_new_data_req(gtpv1u_data_t *gtpv1u_data_p,
LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc);
return -1;
}
#endif
LOG_E(GTPU, "%s() return code OK\n", __FUNCTION__);
return 0;
}
#ifdef GTPU_IN_KERNEL
#undef GTPV1U_PACKET_RX_RING
/// The number of frames in the ring
// This number is not set in stone. Nor are block_size, block_nr or frame_size
#define CONF_RING_FRAMES 128
/// Offset of data from start of frame
#define PKT_OFFSET (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + \
TPACKET_ALIGN(sizeof(struct sockaddr_ll)))
int gtpv1u_eNB_create_sockets(gtpv1u_data_t *gtpv1u_data_p)
static int gtpv1u_create_s1u_tunnel(gtpv1u_enb_create_tunnel_req_t *create_tunnel_req_pP)
{
int value, mark;
const int *val_p=&value;
struct ifreq ifr;
#ifdef GTPV1U_PACKET_RX_RING
struct tpacket_req tp;
#endif
/* Create a new nw-gtpv1-u stack req using API */
NwGtpv1uUlpApiT stack_req;
NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
/* Local tunnel end-point identifier */
teid_t s1u_teid = 0;
gtpv1u_teid_data_t *gtpv1u_teid_data_p = NULL;
gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL;
MessageDef *message_p = NULL;
hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS;
int i;
ebi_t eps_bearer_id = 0;
if (gtpv1u_data_p == NULL) {
return -1;
}
message_p = itti_alloc_new_message(TASK_GTPV1_U, GTPV1U_ENB_CREATE_TUNNEL_RESP);
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).ue_index = create_tunnel_req_pP->ue_index;
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).status = 0;
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).num_tunnels = 0;
GTPU_DEBUG("Creating socket for GTPV1U on %s if index %u\n", gtpv1u_data_p->interface_name, gtpv1u_data_p->interface_index);
for (i = 0; i < create_tunnel_req_pP->num_tunnels; i++) {
eps_bearer_id = create_tunnel_req_pP->eps_bearer_id[i];
GTPU_DEBUG("Rx GTPV1U_ENB_CREATE_TUNNEL_REQ ue_index %u eps bearer id %u\n",
create_tunnel_req_pP->ue_index, eps_bearer_id);
memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));
for (mark = 0; mark <= 15; mark++) {
stack_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT;
gtpv1u_data_p->sock_desc[mark] = socket( PF_INET , SOCK_DGRAM, 0);
if (gtpv1u_data_p->sock_desc[mark] < 0) {
GTPU_ERROR("Error during socket creation (%s:%d)\n",strerror(errno), errno);
goto error;
do {
s1u_teid = gtpv1u_new_teid();
GTPU_DEBUG("gtpv1u_create_s1u_tunnel() 0x%x %u(dec)\n", s1u_teid, s1u_teid);
stack_req.apiInfo.createTunnelEndPointInfo.teid = s1u_teid;
stack_req.apiInfo.createTunnelEndPointInfo.hUlpSession = 0;
stack_req.apiInfo.createTunnelEndPointInfo.hStackSession = 0;
rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req);
GTPU_DEBUG(".\n");
} while (rc != NW_GTPV1U_OK);
//-----------------------
// PDCP->GTPV1U mapping
//-----------------------
hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, create_tunnel_req_pP->ue_index, (void **)&gtpv1u_ue_data_p);
if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
gtpv1u_ue_data_p = calloc (1, sizeof(gtpv1u_ue_data_t));
gtpv1u_ue_data_p->ue_id = create_tunnel_req_pP->ue_index;
gtpv1u_ue_data_p->instance_id = 0; // TO DO
#warning "TO DO TO DO gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr"
gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].state = BEARER_IN_CONFIG;
gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_eNB = s1u_teid;
gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_sgw = create_tunnel_req_pP->sgw_S1u_teid[i];
hash_rc = hashtable_insert(gtpv1u_data_g.ue_mapping, create_tunnel_req_pP->ue_index, gtpv1u_ue_data_p);
AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting ue_mapping in GTPV1U hashtable");
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).enb_S1u_teid[i] = s1u_teid;
} else if (hash_rc == HASH_TABLE_OK) {
gtpv1u_ue_data_p->ue_id = create_tunnel_req_pP->ue_index;
gtpv1u_ue_data_p->instance_id = 0; // TO DO
#warning "TO DO TO DO gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr"
gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].state = BEARER_IN_CONFIG;
gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_eNB = s1u_teid;
gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_sgw = create_tunnel_req_pP->sgw_S1u_teid[i];
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).enb_S1u_teid[i] = s1u_teid;
} else {
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).enb_S1u_teid[i] = 0;
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).status = 0xFF;
}
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).eps_bearer_id[i] = eps_bearer_id;
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).num_tunnels += 1;
//-----------------------
// GTPV1U->PDCP mapping
//-----------------------
hash_rc = hashtable_get(gtpv1u_data_g.teid_mapping, s1u_teid, (void**)&gtpv1u_teid_data_p);
if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
gtpv1u_teid_data_p = calloc (1, sizeof(gtpv1u_teid_data_t));
gtpv1u_teid_data_p->enb_id = 0; // TO DO
gtpv1u_teid_data_p->ue_id = create_tunnel_req_pP->ue_index;
gtpv1u_teid_data_p->eps_bearer_id = eps_bearer_id;
hash_rc = hashtable_insert(gtpv1u_data_g.teid_mapping, s1u_teid, gtpv1u_teid_data_p);
AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting teid mapping in GTPV1U hashtable");
} else {
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).enb_S1u_teid[i] = 0;
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).status = 0xFF;
}
// // socket options, tell the kernel we provide the IP structure
// if(setsockopt(sgi_data_p->sd[rab_id], IPPROTO_IP, IP_HDRINCL, &on, sizeof(on)) < 0)
// {
// SGI_IF_ERROR("Error during socket setsockopt IP_HDRINCL (%s:%d)\n", strerror(errno), errno);
// goto error;
// }
//
//
// setting socket option to use MARK value
//value = rab_id + SGI_MIN_EPS_BEARER_ID;
value = mark;
if (setsockopt (gtpv1u_data_p->sock_desc[mark], SOL_SOCKET, SO_MARK, val_p, sizeof (value)) < 0)
{
GTPU_ERROR("error notifying kernel about MARK");
goto error;
}
GTPU_DEBUG("Created socket %d for rab_id %d (for any UE context)\n", gtpv1u_data_p->sock_desc[mark], value);
GTPU_DEBUG("Tx GTPV1U_ENB_CREATE_TUNNEL_RESP ue_index %u status %d\n",
create_tunnel_req_pP->ue_index,
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).status);
return itti_send_msg_to_task(TASK_RRC_ENB, INSTANCE_DEFAULT, message_p);
}
#ifdef SGI_PACKET_RX_RING
// tell kernel to export data through mmap()ped ring
tp.tp_block_size = CONF_RING_FRAMES * getpagesize();
tp.tp_block_nr = 1;
tp.tp_frame_size = getpagesize();
tp.tp_frame_nr = CONF_RING_FRAMES;
if (setsockopt(gtpv1u_data_p->sock_desc[mark], SOL_PACKET, PACKET_RX_RING, (void*) &tp, sizeof(tp))) {
GTPU_ERROR("setsockopt() ring\n");
goto error;
}
// open ring
gtpv1u_data_p->sock_mmap_ring[mark] = mmap(0, tp.tp_block_size * tp.tp_block_nr, PROT_READ | PROT_WRITE, MAP_SHARED, sgi_data_p->sd[mark], 0);
if (!gtpv1u_data_p->sock_mmap_ring[mark]) {
GTPU_ERROR("Failed to mmap socket (%s:%d)\n", strerror(errno), errno);
goto error;
}
/* Setup our ringbuffer */
gtpv1u_data_p->malloc_ring[mark] = malloc(tp.tp_frame_nr * sizeof(struct iovec));
for(i=0; i<tp.tp_frame_nr; i++) {
gtpv1u_data_p->malloc_ring[mark][i].iov_base=(void *)((long)gtpv1u_data_p->sock_mmap_ring[mark])+(i*tp.tp_frame_size);
gtpv1u_data_p->malloc_ring[mark][i].iov_len=tp.tp_frame_size;
}
static int gtpv1u_delete_s1u_tunnel(gtpv1u_enb_delete_tunnel_req_t *req_pP)
{
NwGtpv1uUlpApiT stack_req;
NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
MessageDef *message_p = NULL;
gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL;
hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS;
GTPU_DEBUG("Rx GTPV1U_ENB_DELETE_TUNNEL user index %u eNB S1U teid %u eps bearer id %u\n", req_pP->ue_index, req_pP->enb_S1u_teid, req_pP->eps_bearer_id);
message_p = itti_alloc_new_message(TASK_GTPV1_U, GTPV1U_ENB_DELETE_TUNNEL_RESP);
#endif
GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).enb_S1u_teid = req_pP->enb_S1u_teid;
GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).ue_index = req_pP->ue_index;
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).status = 0;
memset(&ifr, 0, sizeof(ifr));
snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), gtpv1u_data_p->interface_name);
if (setsockopt (gtpv1u_data_p->sock_desc[mark], SOL_SOCKET, SO_BINDTODEVICE,(void *)&ifr, sizeof(ifr)) < 0)
{
GTPU_ERROR("error notifying kernel about MARK");
goto error;
}
GTPU_DEBUG("Created socket %d for rab_id %d (for any UE context)\n", gtpv1u_data_p->sock_desc[mark], value);
}
return 0;
error:
GTPU_ERROR("ERROR (%s)\n", strerror(errno));
for (mark = 0; mark <= 15; mark++) {
if (gtpv1u_data_p->sock_desc[mark] > 0) {
close(gtpv1u_data_p->sock_desc[mark]);
}
gtpv1u_data_p->sock_desc[mark] = -1;
memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));
stack_req.apiType = NW_GTPV1U_ULP_API_DESTROY_TUNNEL_ENDPOINT;
GTPU_DEBUG("gtpv1u_delte_s1u_tunnel() %u\n", req_pP->enb_S1u_teid);
stack_req.apiInfo.createTunnelEndPointInfo.teid = req_pP->enb_S1u_teid;
stack_req.apiInfo.createTunnelEndPointInfo.hUlpSession = 0;
stack_req.apiInfo.createTunnelEndPointInfo.hStackSession = 0;
rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req);
GTPU_DEBUG(".\n");
}
return -1;
if (rc != NW_GTPV1U_OK) {
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).status = 0xFF;
GTPU_ERROR("NW_GTPV1U_ULP_API_DESTROY_TUNNEL_ENDPOINT failed");
}
//-----------------------
// PDCP->GTPV1U mapping
//-----------------------
hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, req_pP->ue_index, (void**)&gtpv1u_ue_data_p);
if (hash_rc == HASH_TABLE_OK) {
gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id - GTPV1U_BEARER_OFFSET].state = BEARER_DOWN;
gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_eNB = 0;
gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_sgw = 0;
#warning "TO DO TO DO gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr"
gtpv1u_ue_data_p->num_bearers -= 1;
if (gtpv1u_ue_data_p->num_bearers == 0) {
hash_rc = hashtable_remove(gtpv1u_data_g.ue_mapping, req_pP->ue_index);
GTPU_DEBUG("Removed user index %u,no more bearers configured\n", req_pP->ue_index);
}
}// else silently do nothing
//-----------------------
// GTPV1U->PDCP mapping
//-----------------------
hash_rc = hashtable_remove(gtpv1u_data_g.teid_mapping, req_pP->enb_S1u_teid);
if (hash_rc != HASH_TABLE_OK) {
GTPU_DEBUG("Removed user index %u , enb S1U teid %u not found\n", req_pP->ue_index, req_pP->enb_S1u_teid);
}
LOG_D(GTPU, "Tx GTPV1U_ENB_DELETE_TUNNEL_RESP user index %u eNB S1U teid %u status %u\n",
GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).ue_index,
GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).enb_S1u_teid,
GTPV1U_ENB_CREATE_TUNNEL_RESP(message_p).status);
return itti_send_msg_to_task(TASK_RRC_ENB, INSTANCE_DEFAULT, message_p);
}
#endif
int gtpv1u_eNB_init(gtpv1u_data_t *gtpv1u_data_p)
static int gtpv1u_eNB_init(void)
{
NwGtpv1uRcT rc;
NwGtpv1uLogMgrEntityT log;
NwGtpv1uUdpEntityT udp;
int ret;
NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
NwGtpv1uUlpEntityT ulp;
NwGtpv1uUdpEntityT udp;
NwGtpv1uLogMgrEntityT log;
NwGtpv1uTimerMgrEntityT tmr;
Enb_properties_t *enb_properties_p = NULL;
if (gtpv1u_data_p == NULL) {
return -1;
}
//(gtpv1u_data_p was allocated with calloc (zeroed))
// GTPU_INFO("Initializing GTPU stack for eNB %u\n",
// gtpv1u_data_p->eNB_id);
enb_properties_p = enb_config_get()->properties[0];
/* Initialize UE tree */
//RB_INIT(&gtpv1u_data_p->gtpv1u_ue_map_head);
#ifdef GTPU_IN_KERNEL
#warning hardcoded ENB GTPV1U interface name
gtpv1u_data_p->interface_name = "upenb0";
gtpv1u_data_p->interface_index = if_nametoindex(gtpv1u_data_p->interface_name);
memset(&gtpv1u_data_g, 0, sizeof(gtpv1u_data_g));
gtpv1u_eNB_create_sockets(gtpv1u_data_p);
#endif
GTPU_INFO("Initializing GTPU stack %p\n",&gtpv1u_data_g);
//gtpv1u_data_g.gtpv1u_stack;
/* Initialize UE hashtable */
gtpv1u_data_p->ue_mapping = hashtable_create (256, NULL, NULL);
if (gtpv1u_data_p->ue_mapping == NULL) {
perror("hashtable_create");
GTPU_ERROR("Initializing TASK_GTPV1_U task interface: ERROR\n");
gtpv1u_data_g.ue_mapping = hashtable_create (32, NULL, NULL);
AssertFatal(gtpv1u_data_g.ue_mapping != NULL, " ERROR Initializing TASK_GTPV1_U task interface: in hashtable_create returned %p\n", gtpv1u_data_g.ue_mapping);
gtpv1u_data_g.teid_mapping = hashtable_create (256, NULL, NULL);
AssertFatal(gtpv1u_data_g.teid_mapping != NULL, " ERROR Initializing TASK_GTPV1_U task interface: in hashtable_create\n");
gtpv1u_data_g.enb_ip_address_for_S1u_S12_S4_up = enb_properties_p->enb_ipv4_address_for_S1U;
gtpv1u_data_g.ip_addr = NULL;
gtpv1u_data_g.enb_port_for_S1u_S12_S4_up = GTPV1U_UDP_PORT;
//gtpv1u_data_g.udp_data;
gtpv1u_data_g.seq_num = 0;
gtpv1u_data_g.restart_counter = 0;
/* Initializing GTPv1-U stack */
if ((rc = nwGtpv1uInitialize(&gtpv1u_data_g.gtpv1u_stack)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "Failed to setup nwGtpv1u stack %x\n", rc);
return -1;
}
if (udp_create_connection(gtpv1u_data_p->ip_addr, GTPV1U_UDP_PORT,
&gtpv1u_data_p->udp_data, data_recv_callback, (void *)gtpv1u_data_p) < 0) {
if ((rc = nwGtpv1uSetLogLevel(gtpv1u_data_g.gtpv1u_stack,
NW_LOG_LEVEL_DEBG)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "Failed to setup loglevel for stack %x\n", rc);
return -1;
}
/* Initializing GTPv1-U stack */
if ((rc = nwGtpv1uInitialize(&gtpv1u_data_p->gtpv1u_stack)) != NW_GTPV1U_OK) {
GTPU_ERROR("Failed to setup nwGtpv1u stack %x\n", rc);
/* Set the ULP API callback. Called once message have been processed by the
* nw-gtpv1u stack.
*/
ulp.ulpReqCallback = gtpv1u_eNB_process_stack_req;
if ((rc = nwGtpv1uSetUlpEntity(gtpv1u_data_g.gtpv1u_stack, &ulp)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "nwGtpv1uSetUlpEntity: %x", rc);
return -1;
}
/* Set up the log interface and register the log entity */
log.logReqCallback = gtpv1u_eNB_log_request;
/* nw-gtpv1u stack requires an udp callback to send data over UDP.
* We provide a wrapper to UDP task.
*/
udp.udpDataReqCallback = gtpv1u_eNB_send_udp_msg;
if ((rc = nwGtpv1uSetLogMgrEntity(gtpv1u_data_p->gtpv1u_stack,
&log)) != NW_GTPV1U_OK) {
GTPU_ERROR("nwGtpv1uSetLogMgrEntity: %x\n", rc);
if ((rc = nwGtpv1uSetUdpEntity(gtpv1u_data_g.gtpv1u_stack, &udp)) != NW_GTPV1U_OK) {
GTPU_ERROR("nwGtpv1uSetUdpEntity: %x", rc);
return -1;
}
udp.hUdp = (NwGtpv1uUdpHandleT) &gtpv1u_data_p->udp_data;
udp.udpDataReqCallback = gtpv1u_eNB_udp_req;
log.logReqCallback = gtpv1u_eNB_log_request;
if ((rc = nwGtpv1uSetUdpEntity(gtpv1u_data_p->gtpv1u_stack,
&udp)) != NW_GTPV1U_OK) {
GTPU_ERROR("nwGtpv1uSetUdpEntity: %x\n", rc);
if ((rc = nwGtpv1uSetLogMgrEntity(gtpv1u_data_g.gtpv1u_stack, &log)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "nwGtpv1uSetLogMgrEntity: %x", rc);
return -1;
}
/* Set the ULP API callback. Called once message have been processed by the
* nw-gtpv1u stack.
/* Timer interface is more complicated as both wrappers doesn't send a message
* to the timer task but call the timer API functions start/stop timer.
*/
ulp.ulpReqCallback = gtpv1u_process_stack_req;
tmr.tmrMgrHandle = 0;
tmr.tmrStartCallback = gtpv1u_start_timer_wrapper;
tmr.tmrStopCallback = gtpv1u_stop_timer_wrapper;
if ((rc = nwGtpv1uSetUlpEntity(gtpv1u_data_p->gtpv1u_stack,
&ulp)) != NW_GTPV1U_OK) {
GTPU_ERROR("nwGtpv1uSetUlpEntity: %x\n", rc);
if ((rc = nwGtpv1uSetTimerMgrEntity(gtpv1u_data_g.gtpv1u_stack, &tmr)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "nwGtpv1uSetTimerMgrEntity: %x", rc);
return -1;
}
if ((rc = nwGtpv1uSetLogLevel(gtpv1u_data_p->gtpv1u_stack,
NW_LOG_LEVEL_DEBG)) != NW_GTPV1U_OK) {
GTPU_ERROR("nwGtpv1uSetLogLevel: %x\n", rc);
/* if (itti_create_task(TASK_GTPV1_U, &gtpv1u_eNB_thread, NULL) < 0) {
LOG_E(GTPU, "gtpv1u phtread_create: %s", strerror(errno));
return -1;
}
*/
ret = gtpv1u_eNB_send_init_udp(gtpv1u_data_g.enb_port_for_S1u_S12_S4_up);
if (ret < 0) {
return ret;
}
//gtpv1u_create_tunnel_endpoint(gtpv1u_data_p, 0, 0, "192.168.1.1", 2152);
LOG_D(GTPU, "Initializing GTPV1U interface for eNB: DONE\n");
return 0;
}
// GTPU_INFO("Initializing GTPU stack for eNB %u: DONE\n",
// gtpv1u_data_p->eNB_id);
return 0;
void *gtpv1u_eNB_task(void *args)
{
int rc = 0;
rc = gtpv1u_eNB_init();
AssertFatal(rc == 0, "gtpv1u_eNB_init Failed");
itti_mark_task_ready(TASK_GTPV1_U);
while(1) {
/* Trying to fetch a message from the message queue.
* If the queue is empty, this function will block till a
* message is sent to the task.
*/
MessageDef *received_message_p = NULL;
itti_receive_msg(TASK_GTPV1_U, &received_message_p);
DevAssert(received_message_p != NULL);
switch (ITTI_MSG_ID(received_message_p))
{
case GTPV1U_ENB_CREATE_TUNNEL_REQ: {
gtpv1u_create_s1u_tunnel(&received_message_p->ittiMsg.Gtpv1uCreateTunnelReq);
}
break;
case GTPV1U_ENB_DELETE_TUNNEL_REQ: {
gtpv1u_delete_s1u_tunnel(&received_message_p->ittiMsg.Gtpv1uDeleteTunnelReq);
}
break;
// DATA COMING FROM UDP
case UDP_DATA_IND: {
udp_data_ind_t *udp_data_ind_p;
udp_data_ind_p = &received_message_p->ittiMsg.udp_data_ind;
nwGtpv1uProcessUdpReq(gtpv1u_data_g.gtpv1u_stack,
udp_data_ind_p->buffer,
udp_data_ind_p->buffer_length,
udp_data_ind_p->peer_port,
udp_data_ind_p->peer_address);
free(udp_data_ind_p->buffer);
}
break;
// DATA TO BE SENT TO UDP
case GTPV1U_ENB_TUNNEL_DATA_REQ: {
gtpv1u_enb_tunnel_data_req_t *data_req_p = NULL;
NwGtpv1uUlpApiT stack_req;
NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS;
gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL;
teid_t enb_s1u_teid = 0;
teid_t sgw_s1u_teid = 0;
data_req_p = &GTPV1U_ENB_TUNNEL_DATA_REQ(received_message_p);
//ipv4_send_data(ipv4_data_p->sd, data_ind_p->buffer, data_ind_p->length);
memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));
hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, (uint64_t)data_req_p->ue_index, (void**)&gtpv1u_ue_data_p);
if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
GTPU_ERROR("nwGtpv1uProcessUlpReq failed: while getting ue_index %u in hashtable ue_mapping\n", data_req_p->ue_index);
} else {
if ((data_req_p->rab_id >= GTPV1U_BEARER_OFFSET) && (data_req_p->rab_id <= max_val_DRB_Identity)) {
enb_s1u_teid = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_eNB;
sgw_s1u_teid = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_sgw;
stack_req.apiType = NW_GTPV1U_ULP_API_SEND_TPDU;
stack_req.apiInfo.sendtoInfo.teid = sgw_s1u_teid;
stack_req.apiInfo.sendtoInfo.ipAddr = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr;
rc = nwGtpv1uGpduMsgNew(
gtpv1u_data_g.gtpv1u_stack,
sgw_s1u_teid,
NW_FALSE,
gtpv1u_data_g.seq_num++,
data_req_p->buffer,
data_req_p->length,
&(stack_req.apiInfo.sendtoInfo.hMsg));
if (rc != NW_GTPV1U_OK) {
GTPU_ERROR("nwGtpv1uGpduMsgNew failed: 0x%x\n", rc);
} else {
rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req);
if (rc != NW_GTPV1U_OK) {
GTPU_ERROR("nwGtpv1uProcessUlpReq failed: 0x%x\n", rc);
}
rc = nwGtpv1uMsgDelete(gtpv1u_data_g.gtpv1u_stack,
stack_req.apiInfo.sendtoInfo.hMsg);
if (rc != NW_GTPV1U_OK) {
GTPU_ERROR("nwGtpv1uMsgDelete failed: 0x%x\n", rc);
}
}
}
}
/* Buffer is no longer needed, free it */
free(data_req_p->buffer);
}
break;
case TERMINATE_MESSAGE: {
if (gtpv1u_data_g.ue_mapping != NULL) {
hashtable_destroy (gtpv1u_data_g.ue_mapping);
}
if (gtpv1u_data_g.teid_mapping != NULL) {
hashtable_destroy (gtpv1u_data_g.teid_mapping);
}
itti_exit_task();
} break;
case TIMER_HAS_EXPIRED:
nwGtpv1uProcessTimeout(&received_message_p->ittiMsg.timer_has_expired.arg);
break;
default: {
GTPU_ERROR("Unkwnon message ID %d:%s\n",
ITTI_MSG_ID(received_message_p),
ITTI_MSG_NAME(received_message_p));
}
break;
}
rc = itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
AssertFatal(rc == EXIT_SUCCESS, "Failed to free memory (%d)!\n", rc);
received_message_p = NULL;
}
return NULL;
}
#include "NwGtpv1u.h"
#include "gtpv1u.h"
#include "udp_eNB_task.h"
#include "hashtable.h"
/*******************************************************************************
Eurecom OpenAirInterface Core Network
Copyright(c) 1999 - 2014 Eurecom
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information
Openair Admin: openair_admin@eurecom.fr
Openair Tech : openair_tech@eurecom.fr
Forums : http://forums.eurecom.fsr/openairinterface
Address : EURECOM,
Campus SophiaTech,
450 Route des Chappes,
CS 50193
06904 Biot Sophia Antipolis cedex,
FRANCE
*******************************************************************************/
/*! \file gtpv1u_eNB_defs.h
* \brief
* \author Sebastien ROUX, Lionel GAUTHIER
* \version 1.0
* \company Eurecom
* \email: lionel.gauthier@eurecom.fr
*/
//TEST LG #define GTPU_IN_KERNEL
#include "hashtable.h"
//#include "tree.h"
#ifndef GTPV1U_ENB_DEFS_H_
#define GTPV1U_ENB_DEFS_H_
#define GTPV1U_UDP_PORT (2152)
#define GTPV1U_BEARER_OFFSET 3
#define MAX_BEARERS_PER_UE (11)
#define GTPV1U_MAX_BEARERS_ID (max_val_DRB_Identity - GTPV1U_BEARER_OFFSET)
typedef enum {
BEARER_DOWN = 0,
......@@ -23,28 +57,37 @@ typedef enum {
BEARER_MAX,
} bearer_state_t;
typedef struct gtpv1u_teid_data_s {
/* UE identifier for oaisim stack */
module_id_t enb_id;
module_id_t ue_id;
ebi_t eps_bearer_id;
} gtpv1u_teid_data_t;
typedef struct gtpv1u_bearer_s {
/* TEID used in dl and ul */
uint32_t teid_eNB; ///< eNB TEID
uint32_t teid_sgw; ///< Remote TEID
teid_t teid_eNB; ///< eNB TEID
teid_t teid_sgw; ///< Remote TEID
uint32_t sgw_ip_addr;
uint16_t port;
NwGtpv1uStackSessionHandleT stack_session;
tcp_udp_port_t port;
//NwGtpv1uStackSessionHandleT stack_session;
bearer_state_t state;
} gtpv1u_bearer_t;
typedef struct gtpv1u_ue_data_s {
/* UE identifier for oaisim stack */
uint8_t ue_id;
module_id_t ue_id;
/* Unique identifier used between PDCP and GTP-U to distinguish UEs */
uint32_t instance_id;
int num_bearers;
/* Bearer related data.
* Note that the first LCID available for data is 3 and we fixed the maximum
* number of e-rab per UE to be 11. The real rb id will 3 + rab_id (0..10).
* number of e-rab per UE to be (32 [id range]), max RB is 11. The real rb id will 3 + rab_id (3..32).
*/
gtpv1u_bearer_t bearers[MAX_BEARERS_PER_UE];
gtpv1u_bearer_t bearers[GTPV1U_MAX_BEARERS_ID];
//RB_ENTRY(gtpv1u_ue_data_s) gtpv1u_ue_node;
} gtpv1u_ue_data_t;
......@@ -52,14 +95,18 @@ typedef struct gtpv1u_ue_data_s {
typedef struct gtpv1u_data_s{
/* nwgtpv1u stack internal data */
NwGtpv1uStackHandleT gtpv1u_stack;
/* RB tree of UEs */
hash_table_t *ue_mapping;
hash_table_t *ue_mapping; // PDCP->GTPV1U
hash_table_t *teid_mapping; // GTPV1U -> PDCP
//RB_HEAD(gtpv1u_ue_map, gtpv1u_ue_data_s) gtpv1u_ue_map_head;
/* Local IP address to use */
uint32_t enb_ip_address_for_S1u_S12_S4_up;
char *ip_addr;
tcp_udp_port_t enb_port_for_S1u_S12_S4_up;
/* UDP internal data */
udp_data_t udp_data;
//udp_data_t udp_data;
uint16_t seq_num;
uint8_t restart_counter;
......@@ -74,13 +121,19 @@ typedef struct gtpv1u_data_s{
#endif
} gtpv1u_data_t;
int gtpv1u_new_data_req(gtpv1u_data_t *gtpv1u_data_p,
uint8_t ue_id, uint8_t rab_id,
uint8_t *buffer, uint32_t buf_len);
int gtpv1u_initial_req(gtpv1u_data_t *gtpv1u_data_p, uint32_t teid,
uint16_t port, uint32_t address);
int gtpv1u_eNB_init(gtpv1u_data_t *gtpv1u_data_p);
int
gtpv1u_new_data_req(
uint8_t enb_id,
uint8_t ue_id,
uint8_t rab_id,
uint8_t *buffer,
uint32_t buf_len);
int
gtpv1u_initial_req(
gtpv1u_data_t *gtpv1u_data_p,
uint32_t teid,
uint16_t port,
uint32_t address);
#endif /* GTPV1U_ENB_DEFS_H_ */
......@@ -62,8 +62,6 @@ typedef struct {
/* Local IP address to use */
uint32_t sgw_ip_address_for_S1u_S12_S4_up;
char *ip_addr;
/* UDP internal data */
//udp_data_t udp_data;
uint16_t seq_num;
uint8_t restart_counter;
......
/*******************************************************************************
Eurecom OpenAirInterface
Copyright(c) 1999 - 2012 Eurecom
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information
Openair Admin: openair_admin@eurecom.fr
Openair Tech : openair_tech@eurecom.fr
Forums : http://forums.eurecom.fr/openairinterface
Address : EURECOM, Campus SophiaTech, 450 Route des Chappes
06410 Biot FRANCE
Eurecom OpenAirInterface core network
Copyright(c) 1999 - 2014 Eurecom
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information
Openair Admin: openair_admin@eurecom.fr
Openair Tech : openair_tech@eurecom.fr
Forums : http://forums.eurecom.fsr/openairinterface
Address : EURECOM,
Campus SophiaTech,
450 Route des Chappes,
CS 50193
06904 Biot Sophia Antipolis cedex,
FRANCE
*******************************************************************************/
/*! \file gtpv1u_task.c
* \brief
* \author Sebastien ROUX, Lionel Gauthier
* \company Eurecom
* \email: lionel.gauthier@eurecom.fr
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
......@@ -189,9 +197,9 @@ static int gtpv1u_create_s1u_tunnel(Gtpv1uCreateTunnelReq *create_tunnel_reqP)
NwGtpv1uUlpApiT stack_req;
NwGtpv1uRcT rc;
/* Local tunnel end-point identifier */
uint32_t s1u_teid;
gtpv1u_teid2enb_info_t *gtpv1u_teid2enb_info;
MessageDef *message_p;
uint32_t s1u_teid = 0;
gtpv1u_teid2enb_info_t *gtpv1u_teid2enb_info = NULL;
MessageDef *message_p = NULL;
hashtable_rc_t hash_rc;
GTPU_DEBUG("Rx GTPV1U_CREATE_TUNNEL_REQ Context %d\n", create_tunnel_reqP->context_teid);
......@@ -387,11 +395,11 @@ static void *gtpv1u_thread(void *args)
// DATA TO BE SENT TO UDP
case GTPV1U_TUNNEL_DATA_REQ: {
Gtpv1uTunnelDataReq *data_req_p;
Gtpv1uTunnelDataReq *data_req_p = NULL;
NwGtpv1uUlpApiT stack_req;
NwGtpv1uRcT rc;
hashtable_rc_t hash_rc;
gtpv1u_teid2enb_info_t *gtpv1u_teid2enb_info;
gtpv1u_teid2enb_info_t *gtpv1u_teid2enb_info = NULL;
data_req_p = &received_message_p->ittiMsg.gtpv1uTunnelDataReq;
//ipv4_send_data(ipv4_data_p->sd, data_ind_p->buffer, data_ind_p->length);
......
/*******************************************************************************
Eurecom OpenAirInterface core network
Copyright(c) 1999 - 2014 Eurecom
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information
Openair Admin: openair_admin@eurecom.fr
Openair Tech : openair_tech@eurecom.fr
Forums : http://forums.eurecom.fsr/openairinterface
Address : EURECOM,
Campus SophiaTech,
450 Route des Chappes,
CS 50193
06904 Biot Sophia Antipolis cedex,
FRANCE
*******************************************************************************/
/*! \file gtpv1u_task.c
* \brief
* \author Lionel Gauthier
* \company Eurecom
* \email: lionel.gauthier@eurecom.fr
*/
#include <stdlib.h>
#include <stdint.h>
#include "gtpv1u.h"
#define GTPV1U_LINEAR_TEID_ALLOCATION 1
//#define GTPV1U_LINEAR_TEID_ALLOCATION 1
#ifdef GTPV1U_LINEAR_TEID_ALLOCATION
static uint32_t g_gtpv1u_teid = 0;
......
......@@ -334,8 +334,8 @@ int mme_app_handle_create_sess_resp(SgwCreateSessionResponse *create_sess_resp_p
NAS_BEARER_PARAM(message_p).pre_emp_vulnerability = current_bearer_p->pre_emp_vulnerability;
NAS_BEARER_PARAM(message_p).pre_emp_capability = current_bearer_p->pre_emp_capability;
NAS_BEARER_PARAM(message_p).teid = current_bearer_p->s_gw_teid;
memcpy(&NAS_BEARER_PARAM(message_p).s_gw_address,
NAS_BEARER_PARAM(message_p).sgw_s1u_teid = current_bearer_p->s_gw_teid;
memcpy(&NAS_BEARER_PARAM(message_p).sgw_s1u_address,
&current_bearer_p->s_gw_address, sizeof(ip_address_t));
memcpy(&NAS_BEARER_PARAM(message_p).ambr, &ue_context_p->subscribed_ambr,
......
......@@ -98,13 +98,21 @@ void *mme_app_thread(void *args)
mme_app_statistics_display();
}
} break;
case TERMINATE_MESSAGE: {
/* Termination message received TODO -> release any data allocated */
itti_exit_task();
} break;
case S1AP_UE_CAPABILITIES_IND: {
// TO DO;
} break;
default: {
MME_APP_DEBUG("Unkwnon message ID %d:%s\n",
ITTI_MSG_ID(received_message_p), ITTI_MSG_NAME(received_message_p));
AssertFatal(0, "Unkwnon message ID %d:%s\n",
ITTI_MSG_ID(received_message_p), ITTI_MSG_NAME(received_message_p));
} break;
}
itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
......
......@@ -138,7 +138,7 @@ typedef struct ue_context_s {
network_access_mode_t access_mode;
/* TODO: add ue radio cap, ms classmarks, cupported codecs */
/* TODO: add ue radio cap, ms classmarks, supported codecs */
/* TODO: add ue network capability, ms network capability */
/* TODO: add selected NAS algorithm */
......
MESSAGE_DEF(GTPV1U_ENB_CREATE_TUNNEL_REQ, MESSAGE_PRIORITY_MAX, gtpv1u_enb_create_tunnel_req_t, Gtpv1uCreateTunnelReq)
MESSAGE_DEF(GTPV1U_ENB_CREATE_TUNNEL_RESP, MESSAGE_PRIORITY_MAX, gtpv1u_enb_create_tunnel_resp_t, Gtpv1uCreateTunnelResp)
MESSAGE_DEF(GTPV1U_ENB_UPDATE_TUNNEL_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_update_tunnel_req_t, Gtpv1uUpdateTunnelReq)
MESSAGE_DEF(GTPV1U_ENB_UPDATE_TUNNEL_RESP, MESSAGE_PRIORITY_MED, gtpv1u_enb_update_tunnel_resp_t, Gtpv1uUpdateTunnelResp)
MESSAGE_DEF(GTPV1U_ENB_DELETE_TUNNEL_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_delete_tunnel_req_t, Gtpv1uDeleteTunnelReq)
MESSAGE_DEF(GTPV1U_ENB_DELETE_TUNNEL_RESP, MESSAGE_PRIORITY_MED, gtpv1u_enb_delete_tunnel_resp_t, Gtpv1uDeleteTunnelResp)
MESSAGE_DEF(GTPV1U_ENB_TUNNEL_DATA_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_tunnel_data_ind_t, Gtpv1uTunnelDataInd)
MESSAGE_DEF(GTPV1U_ENB_TUNNEL_DATA_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_tunnel_data_req_t, Gtpv1uTunnelDataReq)
#ifndef GTPV1_U_MESSAGES_TYPES_H_
#define GTPV1_U_MESSAGES_TYPES_H_
#define GTPV1U_MAX_BEARERS_PER_UE max_val_DRB_Identity
#define GTPV1U_ENB_CREATE_TUNNEL_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uCreateTunnelReq
#define GTPV1U_ENB_CREATE_TUNNEL_RESP(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uCreateTunnelResp
#define GTPV1U_ENB_UPDATE_TUNNEL_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uUpdateTunnelReq
#define GTPV1U_ENB_UPDATE_TUNNEL_RESP(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uUpdateTunnelResp
#define GTPV1U_ENB_DELETE_TUNNEL_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDeleteTunnelReq
#define GTPV1U_ENB_DELETE_TUNNEL_RESP(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDeleteTunnelResp
#define GTPV1U_ENB_TUNNEL_DATA_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataInd
#define GTPV1U_ENB_TUNNEL_DATA_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataReq
typedef struct gtpv1u_enb_create_tunnel_req_s {
module_id_t ue_index;
int num_tunnels;
teid_t sgw_S1u_teid[GTPV1U_MAX_BEARERS_PER_UE]; ///< Tunnel Endpoint Identifier
ebi_t eps_bearer_id[GTPV1U_MAX_BEARERS_PER_UE];
transport_layer_addr_t sgw_addr[GTPV1U_MAX_BEARERS_PER_UE];
} gtpv1u_enb_create_tunnel_req_t;
typedef struct gtpv1u_enb_create_tunnel_resp_s {
uint8_t status; ///< Status of S1U endpoint creation (Failed = 0xFF or Success = 0x0)
uint8_t ue_index;
int num_tunnels;
teid_t enb_S1u_teid[GTPV1U_MAX_BEARERS_PER_UE]; ///< Tunnel Endpoint Identifier
ebi_t eps_bearer_id[GTPV1U_MAX_BEARERS_PER_UE];
transport_layer_addr_t enb_addr[GTPV1U_MAX_BEARERS_PER_UE];
} gtpv1u_enb_create_tunnel_resp_t;
typedef struct gtpv1u_enb_update_tunnel_req_s {
uint8_t ue_index;
teid_t enb_S1u_teid; ///< eNB S1U Tunnel Endpoint Identifier
teid_t sgw_S1u_teid; ///< SGW S1U local Tunnel Endpoint Identifier
transport_layer_addr_t sgw_addr;
ebi_t eps_bearer_id;
} gtpv1u_enb_update_tunnel_req_t;
typedef struct gtpv1u_enb_update_tunnel_resp_s {
uint8_t ue_index;
uint8_t status; ///< Status (Failed = 0xFF or Success = 0x0)
teid_t enb_S1u_teid; ///< eNB S1U Tunnel Endpoint Identifier
teid_t sgw_S1u_teid; ///< SGW S1U local Tunnel Endpoint Identifier
ebi_t eps_bearer_id;
} gtpv1u_enb_update_tunnel_resp_t;
typedef struct gtpv1u_enb_delete_tunnel_req_s {
module_id_t ue_index;
ebi_t eps_bearer_id;
teid_t enb_S1u_teid; ///< local SGW S11 Tunnel Endpoint Identifier
} gtpv1u_enb_delete_tunnel_req_t;
typedef struct gtpv1u_enb_delete_tunnel_resp_s {
uint8_t ue_index;
uint8_t status; ///< Status of S1U endpoint deleteion (Failed = 0xFF or Success = 0x0)
teid_t enb_S1u_teid; ///< local S1U Tunnel Endpoint Identifier to be deleted
} gtpv1u_enb_delete_tunnel_resp_t;
typedef struct gtpv1u_enb_tunnel_data_ind_s {
uint8_t ue_index;
uint8_t *buffer;
uint32_t length;
teid_t enb_S1u_teid; ///< Tunnel Endpoint Identifier
} gtpv1u_enb_tunnel_data_ind_t;
typedef struct gtpv1u_enb_tunnel_data_req_s {
uint8_t *buffer;
uint32_t length;
uint8_t ue_index;
rb_id_t rab_id;
} gtpv1u_enb_tunnel_data_req_t;
#endif /* GTPV1_U_MESSAGES_TYPES_H_ */
......@@ -14,4 +14,5 @@
#include "x2ap_messages_def.h"
#include "sctp_messages_def.h"
#include "udp_messages_def.h"
#include "gtpv1_u_messages_def.h"
......@@ -22,5 +22,6 @@
#include "x2ap_messages_types.h"
#include "sctp_messages_types.h"
#include "udp_messages_types.h"
#include "gtpv1_u_messages_types.h"
#endif /* MESSAGES_TYPES_H_ */
......@@ -101,6 +101,7 @@ typedef enum pdcp_transmission_mode_e {
//-----------------------------------------------------------------------------
// IP DRIVER / PDCP TYPES
//-----------------------------------------------------------------------------
typedef uint16_t tcp_udp_port_t;
typedef enum ip_traffic_type_e {
TRAFFIC_IPVX_TYPE_UNKNOWN = 0,
TRAFFIC_IPV6_TYPE_UNICAST = 1,
......@@ -127,4 +128,10 @@ typedef enum config_action_e {
CONFIG_ACTION_MBMS_ADD = 10,
CONFIG_ACTION_MBMS_MODIFY = 11
} config_action_t;
//-----------------------------------------------------------------------------
// GTPV1U TYPES
//-----------------------------------------------------------------------------
typedef uint32_t teid_t; // tunnel endpoint identifier
typedef uint8_t ebi_t; // eps bearer id
#endif
......@@ -26,7 +26,7 @@ TASK_DEF(TASK_RAL_ENB, TASK_PRIORITY_MED, 200)
// UDP TASK
TASK_DEF(TASK_UDP, TASK_PRIORITY_MED, 200)
// GTP_V1U task
TASK_DEF(TASK_GTPU, TASK_PRIORITY_MED, 200)
TASK_DEF(TASK_GTPV1_U, TASK_PRIORITY_MED, 200)
TASK_DEF(TASK_S1AP, TASK_PRIORITY_MED, 200)
/// X2ap task, acts as both source and target
TASK_DEF(TASK_X2AP, TASK_PRIORITY_MED, 200)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment