Commit e524cd94 authored by Xue Song's avatar Xue Song

Add rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ

Add rrc_gNB_send_NGAP_PDUSESSION_SETUP_RESP
Fix some problems about pdu session setup
parent d1a055ff
......@@ -47,6 +47,7 @@
#include "NwGtpv1uMsg.h"
#include "NwGtpv1uPrivate.h"
#include "gtpv1u_eNB_defs.h"
#include "gtpv1u_gNB_defs.h"
#include "PHY/defs_L1_NB_IoT.h"
#include "RRC/LTE/defs_NB_IoT.h"
......@@ -109,6 +110,8 @@ typedef struct {
struct gNB_MAC_INST_s **nrmac;
/// GTPu descriptor
gtpv1u_data_t *gtpv1u_data_g;
/// NR GTPu descriptor
nr_gtpv1u_data_t *nr_gtpv1u_data_g;
/// RU descriptors. These describe what each radio unit is supposed to do and contain the necessary functions for fronthaul interfaces
struct RU_t_s **ru;
/// Mask to indicate fronthaul setup status of RU (hard-limit to 64 RUs)
......
......@@ -54,6 +54,7 @@ typedef enum {
MSC_S1AP_ENB,
MSC_NGAP_GNB,
MSC_GTPU_ENB,
MSC_GTPU_GNB,
MSC_GTPU_SGW,
MSC_S1AP_MME,
MSC_NGAP_AMF,
......
......@@ -30,3 +30,4 @@ MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_dat
MESSAGE_DEF(GTPV1U_ENB_END_MARKER_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_end_marker_req_t, Gtpv1uEndMarkerReq)
MESSAGE_DEF(GTPV1U_ENB_END_MARKER_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_end_marker_ind_t, Gtpv1uEndMarkerInd)
MESSAGE_DEF(GTPV1U_ENB_S1_REQ, MESSAGE_PRIORITY_MED, Gtpv1uS1Req, gtpv1uS1Req)
MESSAGE_DEF(GTPV1U_GNB_NG_REQ, MESSAGE_PRIORITY_MED, Gtpv1uNGReq, gtpv1uNGReq)
......@@ -26,6 +26,7 @@
#define GTPV1U_MAX_BEARERS_PER_UE max_val_LTE_DRB_Identity
#define NR_GTPV1U_MAX_BEARERS_PER_UE max_val_NR_DRB_Identity
#define GTPV1U_ENB_UPDATE_TUNNEL_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uUpdateTunnelReq
#define GTPV1U_ENB_UPDATE_TUNNEL_RESP(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uUpdateTunnelResp
......@@ -39,6 +40,7 @@
#define GTPV1U_ENB_END_MARKER_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uEndMarkerInd
#define GTPV1U_ENB_S1_REQ(mSGpTR) (mSGpTR)->ittiMsg.gtpv1uS1Req
#define GTPV1U_GNB_NG_REQ(mSGpTR) (mSGpTR)->ittiMsg.gtpv1uNGReq
#define GTPV1U_ALL_TUNNELS_TEID (teid_t)0xFFFFFFFF
......@@ -170,4 +172,26 @@ typedef struct {
in_addr_t enb_ip_address_for_S1u_S12_S4_up;
tcp_udp_port_t enb_port_for_S1u_S12_S4_up;
} Gtpv1uS1Req;
typedef struct {
in_addr_t gnb_ip_address_for_NGu_up;
tcp_udp_port_t gnb_port_for_NGu_up;
} Gtpv1uNGReq;
typedef struct gtpv1u_gnb_create_tunnel_req_s {
rnti_t rnti;
int num_tunnels;
teid_t upf_NGu_teid[NR_GTPV1U_MAX_BEARERS_PER_UE]; ///< Tunnel Endpoint Identifier
pdusessionid_t pdusession_id[NR_GTPV1U_MAX_BEARERS_PER_UE];
transport_layer_addr_t upf_addr[NR_GTPV1U_MAX_BEARERS_PER_UE];
} gtpv1u_gnb_create_tunnel_req_t;
typedef struct gtpv1u_gnb_create_tunnel_resp_s {
uint8_t status; ///< Status of S1U endpoint creation (Failed = 0xFF or Success = 0x0)
rnti_t rnti;
int num_tunnels;
teid_t gnb_NGu_teid[NR_GTPV1U_MAX_BEARERS_PER_UE]; ///< Tunnel Endpoint Identifier
pdusessionid_t pdusession_id[NR_GTPV1U_MAX_BEARERS_PER_UE];
transport_layer_addr_t gnb_addr;
} gtpv1u_gnb_create_tunnel_resp_t;
#endif /* GTPV1_U_MESSAGES_TYPES_H_ */
......@@ -263,8 +263,8 @@ typedef struct ngap_transport_layer_addr_s {
} while (0)
typedef struct pdusession_level_qos_parameter_s {
uint8_t qci;
uint8_t qfi;
uint64_t fiveQI;
ngap_allocation_retention_priority_t allocation_retention_priority;
} pdusession_level_qos_parameter_t;
......@@ -291,7 +291,7 @@ typedef enum pdusession_qosflow_mapping_ind_e{
}pdusession_qosflow_mapping_ind_t;
typedef struct pdusession_associate_qosflow_s{
uint8_t qci;
uint8_t qfi;
pdusession_qosflow_mapping_ind_t qos_flow_mapping_ind;
}pdusession_associate_qosflow_t;
......
......@@ -222,7 +222,7 @@ typedef enum config_action_e {
//-----------------------------------------------------------------------------
typedef uint32_t teid_t; // tunnel endpoint identifier
typedef uint8_t ebi_t; // eps bearer id
typedef uint8_t pdusessionid_t;
//-----------------------------------------------------------------------------
......
......@@ -714,19 +714,19 @@ int RCconfig_nr_gtpu(void ) {
if (address) {
MessageDef *message;
AssertFatal((message = itti_alloc_new_message(TASK_GNB_APP, GTPV1U_ENB_S1_REQ))!=NULL,"");
// IPV4_STR_ADDR_TO_INT_NWBO ( address, RC.gtpv1u_data_g->enb_ip_address_for_S1u_S12_S4_up, "BAD IP ADDRESS FORMAT FOR eNB S1_U !\n" );
// LOG_I(GTPU,"Configuring GTPu address : %s -> %x\n",address,RC.gtpv1u_data_g->enb_ip_address_for_S1u_S12_S4_up);
if (gnb_mode == 1) { // NSA
AssertFatal((message = itti_alloc_new_message(TASK_GNB_APP, GTPV1U_ENB_S1_REQ))!=NULL,"");
// IPV4_STR_ADDR_TO_INT_NWBO ( address, RC.gtpv1u_data_g->enb_ip_address_for_S1u_S12_S4_up, "BAD IP ADDRESS FORMAT FOR eNB S1_U !\n" );
// LOG_I(GTPU,"Configuring GTPu address : %s -> %x\n",address,RC.gtpv1u_data_g->enb_ip_address_for_S1u_S12_S4_up);
IPV4_STR_ADDR_TO_INT_NWBO (address, GTPV1U_ENB_S1_REQ(message).enb_ip_address_for_S1u_S12_S4_up, "BAD IP ADDRESS FORMAT FOR eNB S1_U !\n" );
LOG_I(GTPU,"Configuring GTPu address : %s -> %x\n",address,GTPV1U_ENB_S1_REQ(message).enb_ip_address_for_S1u_S12_S4_up);
GTPV1U_ENB_S1_REQ(message).enb_port_for_S1u_S12_S4_up = gnb_port_for_S1U;
} else {// TODO SA
IPV4_STR_ADDR_TO_INT_NWBO (address, GTPV1U_ENB_S1_REQ(message).enb_ip_address_for_S1u_S12_S4_up, "BAD IP ADDRESS FORMAT FOR gNB NG_U !\n" );
LOG_I(GTPU,"Configuring GTPu address : %s -> %x\n",address,GTPV1U_ENB_S1_REQ(message).enb_ip_address_for_S1u_S12_S4_up);
GTPV1U_ENB_S1_REQ(message).enb_port_for_S1u_S12_S4_up = gnb_port_for_NGU;
AssertFatal((message = itti_alloc_new_message(TASK_GNB_APP, GTPV1U_GNB_NG_REQ))!=NULL,"");
IPV4_STR_ADDR_TO_INT_NWBO (address, GTPV1U_GNB_NG_REQ(message).gnb_ip_address_for_NGu_up, "BAD IP ADDRESS FORMAT FOR gNB NG_U !\n" );
LOG_I(GTPU,"Configuring GTPu address : %s -> %x\n",address,GTPV1U_GNB_NG_REQ(message).gnb_ip_address_for_NGu_up);
GTPV1U_GNB_NG_REQ(message).gnb_port_for_NGu_up = gnb_port_for_NGU;
}
itti_send_msg_to_task (TASK_GTPV1_U, 0, message); // data model is wrong: gtpu doesn't have enb_id (or module_id)
......
......@@ -182,6 +182,13 @@ typedef enum nr_e_rab_satus_e {
NR_E_RAB_STATUS_FAILED,
} nr_e_rab_status_t;
typedef enum nr_pdu_session_status_e {
NR_PDU_SESSION_STATUS_NEW,
NR_PDU_SESSION_STATUS_DONE, // from the gNB perspective
NR_PDU_SESSION_STATUS_ESTABLISHED, // get the reconfigurationcomplete form UE
NR_PDU_SESSION_STATUS_FAILED,
} nr_pdu_session_status_t;
typedef struct nr_e_rab_param_s {
e_rab_t param;
uint8_t status;
......@@ -336,6 +343,8 @@ typedef struct gNB_RRC_UE_s {
/* Total number of e_rab already setup in the list */
uint8_t setup_e_rabs;
/* Total number of pdu session already setup in the list */
uint8_t setup_pdu_sessions;
/* Number of pdu session to be setup in the list */
uint8_t nb_of_pdusessions;
/* Number of e_rab to be modified in the list */
......@@ -351,12 +360,15 @@ typedef struct gNB_RRC_UE_s {
uint32_t gnb_gtp_teid[S1AP_MAX_E_RAB];
transport_layer_addr_t gnb_gtp_addrs[S1AP_MAX_E_RAB];
rb_id_t gnb_gtp_ebi[S1AP_MAX_E_RAB];
rb_id_t gnb_gtp_psi[S1AP_MAX_E_RAB];
uint32_t ul_failure_timer;
uint32_t ue_release_timer;
uint32_t ue_release_timer_thres;
uint32_t ue_release_timer_s1;
uint32_t ue_release_timer_thres_s1;
uint32_t ue_release_timer_ng;
uint32_t ue_release_timer_thres_ng;
uint32_t ue_release_timer_rrc;
uint32_t ue_release_timer_thres_rrc;
uint32_t ue_reestablishment_timer;
......
......@@ -1369,6 +1369,10 @@ void *rrc_gnb_task(void *args_p) {
rrc_gNB_process_NGAP_DOWNLINK_NAS(msg_p, msg_name_p, instance, &rrc_gNB_mui);
break;
case NGAP_PDUSESSION_SETUP_REQ:
rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ(msg_p, msg_name_p, instance);
break;
/*
#if defined(ENABLE_USE_MME)
......
......@@ -92,3 +92,52 @@ rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
return -1;
}
}
int
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
const protocol_ctxt_t *const ctxt_pP,
const gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_pP,
uint8_t *inde_list
) {
rnti_t rnti;
int i;
struct rrc_gNB_ue_context_s *ue_context_p = NULL;
if (create_tunnel_resp_pP) {
LOG_D(NR_RRC, PROTOCOL_NR_RRC_CTXT_UE_FMT" RX CREATE_TUNNEL_RESP num tunnels %u \n",
PROTOCOL_NR_RRC_CTXT_UE_ARGS(ctxt_pP),
create_tunnel_resp_pP->num_tunnels);
rnti = create_tunnel_resp_pP->rnti;
ue_context_p = rrc_gNB_get_ue_context(
RC.nrrrc[ctxt_pP->module_id],
ctxt_pP->rnti);
for (i = 0; i < create_tunnel_resp_pP->num_tunnels; i++) {
ue_context_p->ue_context.gnb_gtp_teid[inde_list[i]] = create_tunnel_resp_pP->gnb_NGu_teid[i];
ue_context_p->ue_context.gnb_gtp_addrs[inde_list[i]] = create_tunnel_resp_pP->gnb_addr;
ue_context_p->ue_context.gnb_gtp_psi[inde_list[i]] = create_tunnel_resp_pP->pdusession_id[i];
LOG_I(NR_RRC, PROTOCOL_NR_RRC_CTXT_UE_FMT" nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP tunnel (%u, %u) bearer UE context index %u, msg index %u, id %u, gtp addr len %d \n",
PROTOCOL_NR_RRC_CTXT_UE_ARGS(ctxt_pP),
create_tunnel_resp_pP->gnb_NGu_teid[i],
ue_context_p->ue_context.gnb_gtp_teid[inde_list[i]],
inde_list[i],
i,
create_tunnel_resp_pP->pdusession_id[i],
create_tunnel_resp_pP->gnb_addr.length);
}
MSC_LOG_RX_MESSAGE(
MSC_RRC_GNB,
MSC_GTPU_GNB,
NULL,0,
MSC_AS_TIME_FMT" CREATE_TUNNEL_RESP RNTI %"PRIx16" ntuns %u psid %u enb-s1u teid %u",
0,0,rnti,
create_tunnel_resp_pP->num_tunnels,
ue_context_p->ue_context.gnb_gtp_psi[0],
ue_context_p->ue_context.gnb_gtp_teid[0]);
(void)rnti; /* avoid gcc warning "set but not used" */
return 0;
} else {
return -1;
}
}
......@@ -37,3 +37,12 @@ rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
const gtpv1u_enb_create_tunnel_resp_t *const create_tunnel_resp_pP,
uint8_t *inde_list
);
int
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
const protocol_ctxt_t *const ctxt_pP,
const gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_pP,
uint8_t *inde_list
);
#endif
......@@ -43,7 +43,9 @@
#include "msc.h"
#include "gtpv1u_eNB_task.h"
#include "gtpv1u_gNB_task.h"
#include "RRC/LTE/rrc_eNB_GTPV1U.h"
#include "RRC/NR/rrc_gNB_GTPV1U.h"
#include "S1AP_NAS-PDU.h"
#include "executables/softmodem-common.h"
......@@ -762,3 +764,185 @@ rrc_gNB_send_NGAP_UPLINK_NAS(
LOG_I(NR_RRC,"Send RRC GNB UL Information Transfer \n");
}
}
//------------------------------------------------------------------------------
void
rrc_gNB_send_NGAP_PDUSESSION_SETUP_RESP(
const protocol_ctxt_t *const ctxt_pP,
rrc_gNB_ue_context_t *const ue_context_pP,
uint8_t xid
)
//------------------------------------------------------------------------------
{
MessageDef *msg_p;
int pdu_sessions_done = 0;
int pdu_sessions_failed = 0;
int pdusession;
int qos_flow_index;
msg_p = itti_alloc_new_message (TASK_RRC_GNB, NGAP_PDUSESSION_SETUP_RESP);
NGAP_PDUSESSION_SETUP_RESP(msg_p).gNB_ue_ngap_id = ue_context_pP->ue_context.gNB_ue_ngap_id;
for (pdusession = 0; pdusession < ue_context_pP->ue_context.setup_pdu_sessions; pdusession++) {
// if (xid == ue_context_pP->ue_context.pdusession[pdusession].xid) {
if (ue_context_pP->ue_context.pdusession[pdusession].status == NR_PDU_SESSION_STATUS_DONE) {
NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].pdusession_id = ue_context_pP->ue_context.pdusession[pdusession].param.pdusession_id;
// NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].pdusession_id = 1;
NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].nb_of_qos_flow = ue_context_pP->ue_context.pdusession[pdusession].param.nb_qos;
NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].gtp_teid = ue_context_pP->ue_context.gnb_gtp_teid[pdusession];
NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].gNB_addr.pdu_session_type = PDUSessionType_ipv4;
NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].gNB_addr.length = ue_context_pP->ue_context.gnb_gtp_addrs[pdusession].length;
memcpy(NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].gNB_addr.buffer,
ue_context_pP->ue_context.gnb_gtp_addrs[pdusession].buffer, sizeof(uint8_t)*20);
for (qos_flow_index = 0; qos_flow_index < NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].nb_of_qos_flow; qos_flow_index++) {
NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].associated_qos_flows[qos_flow_index].qfi =
ue_context_pP->ue_context.pdusession[pdusession].param.qos[qos_flow_index].qfi;
NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdusession].associated_qos_flows[qos_flow_index].qos_flow_mapping_ind = QOSFLOW_MAPPING_INDICATION_DL;
}
ue_context_pP->ue_context.pdusession[pdusession].status = NR_PDU_SESSION_STATUS_ESTABLISHED;
LOG_I (NR_RRC,"gnb_gtp_addr (msg index %d, pdu_sessions index %d, status %d, xid %d): nb_of_pdusessions %d, pdusession_id %d, teid: %u, addr: %d.%d.%d.%d \n ",
pdu_sessions_done, pdusession, ue_context_pP->ue_context.pdusession[pdusession].status, xid,
ue_context_pP->ue_context.nb_of_pdusessions,
NGAP_PDUSESSION_SETUP_RESP (msg_p).pdusessions[pdu_sessions_done].pdusession_id,
NGAP_PDUSESSION_SETUP_RESP (msg_p).pdusessions[pdu_sessions_done].gtp_teid,
NGAP_PDUSESSION_SETUP_RESP (msg_p).pdusessions[pdu_sessions_done].gNB_addr.buffer[0],
NGAP_PDUSESSION_SETUP_RESP (msg_p).pdusessions[pdu_sessions_done].gNB_addr.buffer[1],
NGAP_PDUSESSION_SETUP_RESP (msg_p).pdusessions[pdu_sessions_done].gNB_addr.buffer[2],
NGAP_PDUSESSION_SETUP_RESP (msg_p).pdusessions[pdu_sessions_done].gNB_addr.buffer[3]);
pdu_sessions_done++;
} else if ((ue_context_pP->ue_context.pdusession[pdusession].status == NR_PDU_SESSION_STATUS_NEW) ||
(ue_context_pP->ue_context.pdusession[pdusession].status == NR_PDU_SESSION_STATUS_ESTABLISHED)) {
LOG_D (NR_RRC,"PDU-SESSION is NEW or already ESTABLISHED\n");
} else { /* to be improved */
ue_context_pP->ue_context.pdusession[pdusession].status = NR_PDU_SESSION_STATUS_FAILED;
NGAP_PDUSESSION_SETUP_RESP (msg_p).pdusessions_failed[pdu_sessions_failed].pdusession_id = ue_context_pP->ue_context.pdusession[pdusession].param.pdusession_id;
pdu_sessions_failed++;
// TODO add cause when it will be integrated
}
NGAP_PDUSESSION_SETUP_RESP(msg_p).nb_of_pdusessions = pdu_sessions_done;
NGAP_PDUSESSION_SETUP_RESP(msg_p).nb_of_pdusessions_failed = pdu_sessions_failed;
// } else {
// LOG_D(NR_RRC,"xid does not corresponds (context pdu_sessions index %d, status %d, xid %d/%d) \n ",
// pdusession, ue_context_pP->ue_context.pdusession[pdusession].status, xid, ue_context_pP->ue_context.pdusession[pdusession].xid);
// }
}
if ((pdu_sessions_done > 0) ) {
LOG_I(NR_RRC,"NGAP_PDUSESSION_SETUP_RESP: sending the message: nb_of_pdusessions %d, total pdu_sessions %d, index %d\n",
ue_context_pP->ue_context.nb_of_pdusessions, ue_context_pP->ue_context.setup_pdu_sessions, pdusession);
MSC_LOG_TX_MESSAGE(
MSC_RRC_GNB,
MSC_NGAP_GNB,
(const char *)&NGAP_PDUSESSION_SETUP_RESP (msg_p),
sizeof(ngap_pdusession_setup_resp_t),
MSC_AS_TIME_FMT" PDUSESSION_SETUP_RESP UE %X gNB_ue_ngap_id %u pdu_sessions:%u succ %u fail",
MSC_AS_TIME_ARGS(ctxt_pP),
ue_context_pP->ue_id_rnti,
NGAP_PDUSESSION_SETUP_RESP (msg_p).gNB_ue_ngap_id,
pdu_sessions_done, pdu_sessions_failed);
itti_send_msg_to_task (TASK_NGAP, ctxt_pP->instance, msg_p);
}
for(int i = 0; i < NB_RB_MAX; i++) {
ue_context_pP->ue_context.pdusession[i].xid = -1;
}
return 0;
}
//------------------------------------------------------------------------------
int
rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ(
MessageDef *msg_p,
const char *msg_name,
instance_t instance
)
//------------------------------------------------------------------------------
{
uint16_t ue_initial_id;
uint32_t gNB_ue_ngap_id;
rrc_gNB_ue_context_t *ue_context_p = NULL;
protocol_ctxt_t ctxt;
gtpv1u_gnb_create_tunnel_req_t create_tunnel_req;
gtpv1u_gnb_create_tunnel_resp_t create_tunnel_resp;
uint8_t pdu_sessions_done;
uint8_t inde_list[NR_NB_RB_MAX - 3]= {0};
int ret = 0;
ue_initial_id = NGAP_PDUSESSION_SETUP_REQ(msg_p).ue_initial_id;
gNB_ue_ngap_id = NGAP_PDUSESSION_SETUP_REQ(msg_p).gNB_ue_ngap_id;
ue_context_p = rrc_gNB_get_ue_context_from_ngap_ids(instance, ue_initial_id, gNB_ue_ngap_id);
LOG_I(NR_RRC, "[gNB %d] Received %s: ue_initial_id %d, gNB_ue_ngap_id %d \n",
instance, msg_name, ue_initial_id, gNB_ue_ngap_id);
if (ue_context_p == NULL) {
MessageDef *msg_fail_p = NULL;
LOG_W(NR_RRC, "[gNB %d] In NGAP_PDUSESSION_SETUP_REQ: unknown UE from NGAP ids (%d, %d)\n", instance, ue_initial_id, gNB_ue_ngap_id);
msg_fail_p = itti_alloc_new_message(TASK_RRC_GNB, NGAP_PDUSESSION_SETUP_REQUEST_FAIL);
NGAP_PDUSESSION_SETUP_REQ(msg_fail_p).gNB_ue_ngap_id = gNB_ue_ngap_id;
// TODO add failure cause when defined!
itti_send_msg_to_task (TASK_NGAP, instance, msg_fail_p);
return (-1);
} else {
memset(&create_tunnel_req, 0, sizeof(gtpv1u_gnb_create_tunnel_req_t));
uint8_t nb_pdusessions_tosetup = NGAP_PDUSESSION_SETUP_REQ(msg_p).nb_pdusessions_tosetup;
pdu_sessions_done = 0;
PROTOCOL_CTXT_SET_BY_INSTANCE(&ctxt, instance, GNB_FLAG_YES, ue_context_p->ue_context.rnti, 0, 0);
for (int i = 0; i < NR_NB_RB_MAX - 3; i++) {
if(ue_context_p->ue_context.pdusession[i].status >= NR_PDU_SESSION_STATUS_DONE)
continue;
ue_context_p->ue_context.pdusession[i].status = NR_PDU_SESSION_STATUS_NEW;
ue_context_p->ue_context.pdusession[i].param = NGAP_PDUSESSION_SETUP_REQ(msg_p).pdusession_setup_params[pdu_sessions_done];
create_tunnel_req.pdusession_id[pdu_sessions_done] = NGAP_PDUSESSION_SETUP_REQ(msg_p).pdusession_setup_params[pdu_sessions_done].pdusession_id;
create_tunnel_req.upf_NGu_teid[pdu_sessions_done] = NGAP_PDUSESSION_SETUP_REQ(msg_p).pdusession_setup_params[pdu_sessions_done].gtp_teid;
memcpy(create_tunnel_req.upf_addr[pdu_sessions_done].buffer,
NGAP_PDUSESSION_SETUP_REQ(msg_p).pdusession_setup_params[i].upf_addr.buffer,
sizeof(uint8_t)*20);
create_tunnel_req.upf_addr[pdu_sessions_done].length = NGAP_PDUSESSION_SETUP_REQ(msg_p).pdusession_setup_params[i].upf_addr.length;
LOG_I(NR_RRC,"NGAP PDUSESSION SETUP REQ: local index %d teid %u, pdusession id %d \n",
i,
create_tunnel_req.upf_NGu_teid[i],
create_tunnel_req.pdusession_id[i]);
inde_list[pdu_sessions_done] = i;
pdu_sessions_done++;
if(pdu_sessions_done >= nb_pdusessions_tosetup) {
break;
}
}
ue_context_p->ue_context.nb_of_pdusessions = NGAP_PDUSESSION_SETUP_REQ(msg_p).nb_pdusessions_tosetup;
ue_context_p->ue_context.gNB_ue_ngap_id = NGAP_PDUSESSION_SETUP_REQ(msg_p).gNB_ue_ngap_id;
ue_context_p->ue_context.amf_ue_ngap_id = NGAP_PDUSESSION_SETUP_REQ(msg_p).amf_ue_ngap_id;
create_tunnel_req.rnti = ue_context_p->ue_context.rnti;
create_tunnel_req.num_tunnels = pdu_sessions_done;
ret = gtpv1u_create_ngu_tunnel(
instance,
&create_tunnel_req,
&create_tunnel_resp);
if (ret != 0) {
LOG_E(NR_RRC,"rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ : gtpv1u_create_ngu_tunnel failed,start to release UE %x\n",ue_context_p->ue_context.rnti);
ue_context_p->ue_context.ue_release_timer_ng = 1;
ue_context_p->ue_context.ue_release_timer_thres_ng = 100;
ue_context_p->ue_context.ue_release_timer = 0;
ue_context_p->ue_context.ue_reestablishment_timer = 0;
ue_context_p->ue_context.ul_failure_timer = 20000; // set ul_failure to 20000 for triggering rrc_eNB_send_S1AP_UE_CONTEXT_RELEASE_REQ
// rrc_gNB_free_UE(ctxt.module_id,ue_context_p);
ue_context_p->ue_context.ul_failure_timer = 0;
return (0);
}
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
&ctxt,
&create_tunnel_resp,
&inde_list[0]);
ue_context_p->ue_context.setup_pdu_sessions += nb_pdusessions_tosetup;
// TEST
ue_context_p->ue_context.pdusession[0].status = NR_PDU_SESSION_STATUS_DONE;
rrc_gNB_send_NGAP_PDUSESSION_SETUP_RESP(&ctxt, ue_context_p, 0);
return(0);
}
}
\ No newline at end of file
......@@ -93,4 +93,18 @@ rrc_gNB_send_NGAP_UPLINK_NAS(
NR_UL_DCCH_Message_t *const ul_dcch_msg
);
void
rrc_gNB_send_NGAP_PDUSESSION_SETUP_RESP(
const protocol_ctxt_t *const ctxt_pP,
rrc_gNB_ue_context_t *const ue_context_pP,
uint8_t xid
);
int
rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ(
MessageDef *msg_p,
const char *msg_name,
instance_t instance
);
#endif
......@@ -279,7 +279,7 @@ int create_gNB_tasks(uint32_t gnb_nb) {
}
}
if (itti_create_task (TASK_GTPV1_U, &gtpv1u_gNB_task, NULL) < 0) {
if (itti_create_task (TASK_GTPV1_U, &nr_gtpv1u_gNB_task, NULL) < 0) {
LOG_E(GTPU, "Create task for GTPV1U failed\n");
return -1;
}
......
......@@ -38,6 +38,7 @@
#include "NwGtpv1uPrivate.h"
#include "NwLog.h"
#include "gtpv1u_eNB_defs.h"
#include "gtpv1u_gNB_defs.h"
#include "gtpv1_u_messages_types.h"
#include "udp_eNB_task.h"
#include "common/utils/LOG/log.h"
......@@ -45,7 +46,6 @@
#include "COMMON/platform_constants.h"
#include "common/utils/LOG/vcd_signal_dumper.h"
#include "common/ran_context.h"
#include "gtpv1u_eNB_defs.h"
#include "gtpv1u_eNB_task.h"
#include "gtpv1u_gNB_task.h"
#include "rrc_eNB_GTPV1U.h"
......@@ -309,3 +309,343 @@ void *gtpv1u_gNB_task(void *args) {
return NULL;
}
int nr_gtpv1u_gNB_init(void) {
NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
NwGtpv1uUlpEntityT ulp;
NwGtpv1uUdpEntityT udp;
NwGtpv1uLogMgrEntityT log;
NwGtpv1uTimerMgrEntityT tmr;
// enb_properties_p = enb_config_get()->properties[0];
RC.nr_gtpv1u_data_g = (nr_gtpv1u_data_t *)calloc(sizeof(nr_gtpv1u_data_t),1);
LOG_I(GTPU, "Initializing GTPU stack %p\n",&RC.nr_gtpv1u_data_g);
/* Initialize UE hashtable */
RC.nr_gtpv1u_data_g->ue_mapping = hashtable_create (32, NULL, NULL);
AssertFatal(RC.nr_gtpv1u_data_g->ue_mapping != NULL, " ERROR Initializing TASK_GTPV1_U task interface: in hashtable_create returned %p\n", RC.gtpv1u_data_g->ue_mapping);
RC.nr_gtpv1u_data_g->teid_mapping = hashtable_create (256, NULL, NULL);
AssertFatal(RC.nr_gtpv1u_data_g->teid_mapping != NULL, " ERROR Initializing TASK_GTPV1_U task interface: in hashtable_create\n");
// RC.gtpv1u_data_g.enb_ip_address_for_S1u_S12_S4_up = enb_properties_p->enb_ipv4_address_for_S1U;
//gtpv1u_data_g.udp_data;
RC.nr_gtpv1u_data_g->seq_num = 0;
RC.nr_gtpv1u_data_g->restart_counter = 0;
/* Initializing GTPv1-U stack */
if ((rc = nwGtpv1uInitialize(&RC.nr_gtpv1u_data_g->gtpv1u_stack, GTPU_STACK_ENB)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "Failed to setup nwGtpv1u stack %x\n", rc);
return -1;
}
if ((rc = nwGtpv1uSetLogLevel(RC.nr_gtpv1u_data_g->gtpv1u_stack,
NW_LOG_LEVEL_DEBG)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "Failed to setup loglevel for stack %x\n", rc);
return -1;
}
/* Set the ULP API callback. Called once message have been processed by the
* nw-gtpv1u stack.
*/
ulp.ulpReqCallback = gtpv1u_gNB_process_stack_req;
memset((void *)&(ulp.hUlp), 0, sizeof(NwGtpv1uUlpHandleT));
if ((rc = nwGtpv1uSetUlpEntity(RC.nr_gtpv1u_data_g->gtpv1u_stack, &ulp)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "nwGtpv1uSetUlpEntity: %x", rc);
return -1;
}
/* nw-gtpv1u stack requires an udp callback to send data over UDP.
* We provide a wrapper to UDP task.
*/
udp.udpDataReqCallback = gtpv1u_eNB_send_udp_msg;
memset((void *)&(udp.hUdp), 0, sizeof(NwGtpv1uUdpHandleT));
if ((rc = nwGtpv1uSetUdpEntity(RC.nr_gtpv1u_data_g->gtpv1u_stack, &udp)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "nwGtpv1uSetUdpEntity: %x", rc);
return -1;
}
log.logReqCallback = gtpv1u_eNB_log_request;
memset((void *)&(log.logMgrHandle), 0, sizeof(NwGtpv1uLogMgrHandleT));
if ((rc = nwGtpv1uSetLogMgrEntity(RC.nr_gtpv1u_data_g->gtpv1u_stack, &log)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "nwGtpv1uSetLogMgrEntity: %x", rc);
return -1;
}
/* Timer interface is more complicated as both wrappers doesn't send a message
* to the timer task but call the timer API functions start/stop timer.
*/
tmr.tmrMgrHandle = 0;
tmr.tmrStartCallback = gtpv1u_start_timer_wrapper;
tmr.tmrStopCallback = gtpv1u_stop_timer_wrapper;
if ((rc = nwGtpv1uSetTimerMgrEntity(RC.nr_gtpv1u_data_g->gtpv1u_stack, &tmr)) != NW_GTPV1U_OK) {
LOG_E(GTPU, "nwGtpv1uSetTimerMgrEntity: %x", rc);
return -1;
}
#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0
if ((ret = gtpv1u_eNB_create_dump_socket()) < 0) {
return -1;
}
#endif
LOG_D(GTPU, "Initializing GTPV1U interface for eNB: DONE\n");
return 0;
}
//-----------------------------------------------------------------------------
int
gtpv1u_create_ngu_tunnel(
const instance_t instanceP,
const gtpv1u_gnb_create_tunnel_req_t *const create_tunnel_req_pP,
gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_pP)
{
/* Create a new nw-gtpv1-u stack req using API */
NwGtpv1uUlpApiT stack_req;
NwGtpv1uRcT rc = NW_GTPV1U_FAILURE;
/* Local tunnel end-point identifier */
teid_t ngu_teid = 0;
nr_gtpv1u_teid_data_t *gtpv1u_teid_data_p = NULL;
nr_gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL;
//MessageDef *message_p = NULL;
hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS;
int i;
pdusessionid_t pdusession_id = 0;
// int ipv4_addr = 0;
int ip_offset = 0;
in_addr_t in_addr;
int addrs_length_in_bytes= 0;
int loop_counter = 0;
int ret = 0;
MSC_LOG_RX_MESSAGE(
MSC_GTPU_GNB,
MSC_RRC_GNB,
NULL,0,
MSC_AS_TIME_FMT" CREATE_TUNNEL_REQ RNTI %"PRIx16" inst %u ntuns %u psid %u upf-ngu teid %u",
0,0,create_tunnel_req_pP->rnti, instanceP,
create_tunnel_req_pP->num_tunnels, create_tunnel_req_pP->pdusession_id[0],
create_tunnel_req_pP->upf_NGu_teid[0]);
create_tunnel_resp_pP->rnti = create_tunnel_req_pP->rnti;
create_tunnel_resp_pP->status = 0;
create_tunnel_resp_pP->num_tunnels = 0;
for (i = 0; i < create_tunnel_req_pP->num_tunnels; i++) {
ip_offset = 0;
loop_counter = 0;
pdusession_id = create_tunnel_req_pP->pdusession_id[i];
LOG_D(GTPU, "Rx GTPV1U_GNB_CREATE_TUNNEL_REQ ue rnti %x pdu session id %u\n",
create_tunnel_req_pP->rnti, pdusession_id);
memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));
stack_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT;
do {
ngu_teid = gtpv1u_new_teid();
LOG_D(GTPU, "gtpv1u_create_ngu_tunnel() 0x%x %u(dec)\n", ngu_teid, ngu_teid);
stack_req.apiInfo.createTunnelEndPointInfo.teid = ngu_teid;
stack_req.apiInfo.createTunnelEndPointInfo.hUlpSession = 0;
stack_req.apiInfo.createTunnelEndPointInfo.hStackSession = 0;
rc = nwGtpv1uProcessUlpReq(RC.nr_gtpv1u_data_g->gtpv1u_stack, &stack_req);
LOG_D(GTPU, ".\n");
loop_counter++;
} while (rc != NW_GTPV1U_OK && loop_counter < 10);
if ( rc != NW_GTPV1U_OK && loop_counter == 10 ) {
LOG_E(GTPU,"NwGtpv1uCreateTunnelEndPoint failed 10 times,start next loop\n");
ret = -1;
continue;
}
//-----------------------
// PDCP->GTPV1U mapping
//-----------------------
hash_rc = hashtable_get(RC.nr_gtpv1u_data_g->ue_mapping, create_tunnel_req_pP->rnti, (void **)&gtpv1u_ue_data_p);
if ((hash_rc == HASH_TABLE_KEY_NOT_EXISTS) || (hash_rc == HASH_TABLE_OK)) {
if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
gtpv1u_ue_data_p = calloc (1, sizeof(gtpv1u_ue_data_t));
hash_rc = hashtable_insert(RC.nr_gtpv1u_data_g->ue_mapping, create_tunnel_req_pP->rnti, gtpv1u_ue_data_p);
AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting ue_mapping in GTPV1U hashtable");
}
gtpv1u_ue_data_p->ue_id = create_tunnel_req_pP->rnti;
gtpv1u_ue_data_p->instance_id = 0; // TO DO
memcpy(&create_tunnel_resp_pP->gnb_addr.buffer,
&RC.nr_gtpv1u_data_g->gnb_ip_address_for_NGu_up,
sizeof (in_addr_t));
LOG_I(GTPU,"Configured GTPu address : %x\n",RC.nr_gtpv1u_data_g->gnb_ip_address_for_NGu_up);
create_tunnel_resp_pP->gnb_addr.length = sizeof (in_addr_t);
addrs_length_in_bytes = create_tunnel_req_pP->upf_addr[i].length / 8;
AssertFatal((addrs_length_in_bytes == 4) ||
(addrs_length_in_bytes == 16) ||
(addrs_length_in_bytes == 20),
"Bad transport layer address length %d (bits) %d (bytes)",
create_tunnel_req_pP->upf_addr[i].length, addrs_length_in_bytes);
if ((addrs_length_in_bytes == 4) ||
(addrs_length_in_bytes == 20)) {
in_addr = *((in_addr_t *)create_tunnel_req_pP->upf_addr[i].buffer);
ip_offset = 4;
gtpv1u_ue_data_p->bearers[pdusession_id - GTPV1U_BEARER_OFFSET].upf_ip_addr = in_addr;
}
if ((addrs_length_in_bytes == 16) ||
(addrs_length_in_bytes == 20)) {
memcpy(gtpv1u_ue_data_p->bearers[pdusession_id - GTPV1U_BEARER_OFFSET].upf_ip6_addr.s6_addr,
&create_tunnel_req_pP->upf_addr[i].buffer[ip_offset],
16);
}
gtpv1u_ue_data_p->bearers[pdusession_id - GTPV1U_BEARER_OFFSET].state = BEARER_IN_CONFIG;
gtpv1u_ue_data_p->bearers[pdusession_id - GTPV1U_BEARER_OFFSET].teid_gNB = ngu_teid;
gtpv1u_ue_data_p->bearers[pdusession_id - GTPV1U_BEARER_OFFSET].teid_gNB_stack_session = stack_req.apiInfo.createTunnelEndPointInfo.hStackSession;
gtpv1u_ue_data_p->bearers[pdusession_id - GTPV1U_BEARER_OFFSET].teid_upf = create_tunnel_req_pP->upf_NGu_teid[i];
gtpv1u_ue_data_p->num_bearers++;
create_tunnel_resp_pP->gnb_NGu_teid[i] = ngu_teid;
LOG_I(GTPU,"Copied to create_tunnel_resp tunnel: index %d target gNB ip %d.%d.%d.%d length %d gtp teid %u\n",
i,
create_tunnel_resp_pP->gnb_addr.buffer[0],
create_tunnel_resp_pP->gnb_addr.buffer[1],
create_tunnel_resp_pP->gnb_addr.buffer[2],
create_tunnel_resp_pP->gnb_addr.buffer[3],
create_tunnel_resp_pP->gnb_addr.length,
create_tunnel_resp_pP->gnb_NGu_teid[i]);
} else {
create_tunnel_resp_pP->gnb_NGu_teid[i] = 0;
create_tunnel_resp_pP->status = 0xFF;
}
create_tunnel_resp_pP->pdusession_id[i] = pdusession_id;
create_tunnel_resp_pP->num_tunnels += 1;
//-----------------------
// GTPV1U->PDCP mapping
//-----------------------
hash_rc = hashtable_get(RC.nr_gtpv1u_data_g->teid_mapping, ngu_teid, (void **)&gtpv1u_teid_data_p);
if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
gtpv1u_teid_data_p = calloc (1, sizeof(nr_gtpv1u_teid_data_t));
gtpv1u_teid_data_p->gnb_id = 0; // TO DO
gtpv1u_teid_data_p->ue_id = create_tunnel_req_pP->rnti;
gtpv1u_teid_data_p->pdu_session_id = pdusession_id;
hash_rc = hashtable_insert(RC.nr_gtpv1u_data_g->teid_mapping, ngu_teid, gtpv1u_teid_data_p);
AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting teid mapping in GTPV1U hashtable");
} else {
create_tunnel_resp_pP->gnb_NGu_teid[i] = 0;
create_tunnel_resp_pP->status = 0xFF;
}
}
MSC_LOG_TX_MESSAGE(
MSC_GTPU_GNB,
MSC_RRC_GNB,
NULL,0,
"0 GTPV1U_GNB_CREATE_TUNNEL_RESP rnti %x teid %x",
create_tunnel_resp_pP->rnti,
ngu_teid);
LOG_D(GTPU, "Tx GTPV1U_GNB_CREATE_TUNNEL_RESP ue rnti %x status %d\n",
create_tunnel_req_pP->rnti,
create_tunnel_resp_pP->status);
//return 0;
return ret;
}
//-----------------------------------------------------------------------------
static int gtpv1u_gNB_send_init_udp(const Gtpv1uNGReq *req) {
// Create and alloc new message
MessageDef *message_p;
struct in_addr addr= {0};
message_p = itti_alloc_new_message(TASK_GTPV1_U, UDP_INIT);
if (message_p == NULL) {
return -1;
}
UDP_INIT(message_p).port = req->gnb_port_for_NGu_up;
addr.s_addr = req->gnb_ip_address_for_NGu_up;
UDP_INIT(message_p).address = inet_ntoa(addr);
LOG_I(GTPU, "Tx UDP_INIT IP addr %s (%x)\n", UDP_INIT(message_p).address,UDP_INIT(message_p).port);
MSC_LOG_EVENT(
MSC_GTPU_ENB,
"0 UDP bind %s:%u",
UDP_INIT(message_p).address,
UDP_INIT(message_p).port);
return itti_send_msg_to_task(TASK_UDP, INSTANCE_DEFAULT, message_p);
}
static int gtpv1u_ng_req(
const instance_t instanceP,
const Gtpv1uNGReq *const req) {
memcpy(&RC.nr_gtpv1u_data_g->gnb_ip_address_for_NGu_up,
&req->gnb_ip_address_for_NGu_up,
sizeof (req->gnb_ip_address_for_NGu_up));
gtpv1u_gNB_send_init_udp(req);
return 0;
}
//-----------------------------------------------------------------------------
void *gtpv1u_gNB_process_itti_msg(void *notUsed) {
/* Trying to fetch a message from the message queue.
* If the queue is empty, this function will block till a
* message is sent to the task.
*/
instance_t instance;
MessageDef *received_message_p = NULL;
int rc = 0;
itti_receive_msg(TASK_GTPV1_U, &received_message_p);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_IN);
DevAssert(received_message_p != NULL);
instance = ITTI_MSG_INSTANCE(received_message_p);
switch (ITTI_MSG_ID(received_message_p)) {
case GTPV1U_GNB_NG_REQ:
gtpv1u_ng_req(instance, &received_message_p->ittiMsg.gtpv1uNGReq);
break;
case TERMINATE_MESSAGE: {
if (RC.nr_gtpv1u_data_g->ue_mapping != NULL) {
hashtable_destroy (&(RC.nr_gtpv1u_data_g->ue_mapping));
}
if (RC.nr_gtpv1u_data_g->teid_mapping != NULL) {
hashtable_destroy (&(RC.nr_gtpv1u_data_g->teid_mapping));
}
LOG_W(GTPU, " *** Exiting GTPU thread\n");
itti_exit_task();
}
break;
case TIMER_HAS_EXPIRED:
nwGtpv1uProcessTimeout(&received_message_p->ittiMsg.timer_has_expired.arg);
break;
default: {
LOG_E(GTPU, "Unkwnon message ID %d:%s\n",
ITTI_MSG_ID(received_message_p),
ITTI_MSG_NAME(received_message_p));
}
break;
}
rc = itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
AssertFatal(rc == EXIT_SUCCESS, "Failed to free memory (%d)!\n", rc);
received_message_p = NULL;
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_OUT);
return NULL;
}
void *nr_gtpv1u_gNB_task(void *args) {
int rc = 0;
rc = nr_gtpv1u_gNB_init();
AssertFatal(rc == 0, "gtpv1u_gNB_init Failed");
itti_mark_task_ready(TASK_GTPV1_U);
MSC_START_USE();
while(1) {
(void) gtpv1u_gNB_process_itti_msg (NULL);
}
return NULL;
}
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*! \file rrc_gNB_NGAP.h
* \brief rrc NGAP procedures for gNB
* \author Yoshio INOUE, Masayuki HARADA
* \date 2020
* \version 0.1
* \email: yoshio.inoue@fujitsu.com,masayuki.harada@fujitsu.com
* (yoshio.inoue%40fujitsu.com%2cmasayuki.harada%40fujitsu.com)
*/
#include "hashtable.h"
#include "NR_asn_constant.h"
#ifndef GTPV1U_GNB_DEFS_H_
#define GTPV1U_GNB_DEFS_H_
#include "NwGtpv1u.h"
#define GTPV1U_UDP_PORT (2152)
#define NR_GTPV1U_MAX_BEARERS_ID (max_val_NR_DRB_Identity - 3)
#define GTPV1U_SOURCE_GNB (0)
#define GTPV1U_TARGET_GNB (1)
#define GTPV1U_MSG_FROM_SOURCE_GNB (0)
#define GTPV1U_MSG_FROM_UPF (1)
typedef struct nr_gtpv1u_teid_data_s {
/* UE identifier for oaisim stack */
module_id_t gnb_id;
rnti_t ue_id;
pdusessionid_t pdu_session_id;
} nr_gtpv1u_teid_data_t;
typedef struct nr_gtpv1u_bearer_s {
/* TEID used in dl and ul */
teid_t teid_gNB; ///< gNB TEID
uintptr_t teid_gNB_stack_session; ///< gNB TEID
teid_t teid_upf; ///< Remote TEID
in_addr_t upf_ip_addr;
struct in6_addr upf_ip6_addr;
teid_t teid_tgNB;
in_addr_t tgnb_ip_addr; ///< target gNB ipv4
struct in6_addr tgnb_ip6_addr; ///< target gNB ipv6
tcp_udp_port_t port;
//NwGtpv1uStackSessionHandleT stack_session;
bearer_state_t state;
} nr_gtpv1u_bearer_t;
typedef struct nr_gtpv1u_ue_data_s {
/* UE identifier for oaisim stack */
rnti_t ue_id;
/* Unique identifier used between PDCP and GTP-U to distinguish UEs */
uint32_t instance_id;
int num_bearers;
/* Bearer related data.
* Note that the first LCID available for data is 3 and we fixed the maximum
* number of e-rab per UE to be (32 [id range]), max RB is 11. The real rb id will 3 + rab_id (3..32).
*/
nr_gtpv1u_bearer_t bearers[NR_GTPV1U_MAX_BEARERS_ID];
//RB_ENTRY(gtpv1u_ue_data_s) gtpv1u_ue_node;
} nr_gtpv1u_ue_data_t;
typedef struct nr_gtpv1u_data_s {
/* nwgtpv1u stack internal data */
NwGtpv1uStackHandleT gtpv1u_stack;
/* RB tree of UEs */
hash_table_t *ue_mapping; // PDCP->GTPV1U
hash_table_t *teid_mapping; // GTPV1U -> PDCP
//RB_HEAD(gtpv1u_ue_map, gtpv1u_ue_data_s) gtpv1u_ue_map_head;
/* Local IP address to use */
in_addr_t gnb_ip_address_for_NGu_up;
/* UDP internal data */
//udp_data_t udp_data;
uint16_t seq_num;
uint8_t restart_counter;
#ifdef GTPU_IN_KERNEL
char *interface_name;
int interface_index;
struct iovec *malloc_ring;
void *sock_mmap_ring[16];
int sock_desc[16]; // indexed by marking
#endif
} nr_gtpv1u_data_t;
#endif /* GTPV1U_GNB_DEFS_H_ */
......@@ -32,6 +32,13 @@ int gtpv1u_gNB_init(void);
void *gtpv1u_gNB_task(void *args);
void *nr_gtpv1u_gNB_task(void *args);
int
gtpv1u_create_ngu_tunnel(
const instance_t instanceP,
const gtpv1u_gnb_create_tunnel_req_t * const create_tunnel_req_pP,
gtpv1u_gnb_create_tunnel_resp_t * const create_tunnel_resp_pP);
#endif /* GTPV1U_GNB_TASK_H_ */
......@@ -73,7 +73,7 @@ int ngap_gNB_encode_pdu(NGAP_NGAP_PDU_t *pdu, uint8_t **buffer, uint32_t *len) {
return -1;
}
ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_NGAP_NGAP_PDU, pdu);
//ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_NGAP_NGAP_PDU, pdu);
return ret;
}
......
......@@ -49,6 +49,7 @@
#include "assertions.h"
#include "conversions.h"
#include "msc.h"
#include "NGAP_NonDynamic5QIDescriptor.h"
static
int ngap_gNB_handle_ng_setup_response(uint32_t assoc_id,
......@@ -955,7 +956,7 @@ int ngap_gNB_handle_initial_context_request(uint32_t assoc_id,
}
dec_rval = uper_decode(NULL,
dec_rval = aper_decode(NULL,
&asn_DEF_NGAP_PDUSessionResourceSetupRequestTransfer,
(void **)&pdusessionTransfer_p,
item_p->pDUSessionResourceSetupRequestTransfer.buf,
......@@ -1028,7 +1029,7 @@ int ngap_gNB_handle_initial_context_request(uint32_t assoc_id,
qosFlowItem_p = pdusessionTransfer_ies->value.choice.QosFlowSetupRequestList.list.array[qosIdx];
/* Set the QOS informations */
NGAP_INITIAL_CONTEXT_SETUP_REQ(message_p).pdusession_param[i].qos[qosIdx].qci = (uint8_t)qosFlowItem_p->qosFlowIdentifier;
NGAP_INITIAL_CONTEXT_SETUP_REQ(message_p).pdusession_param[i].qos[qosIdx].qfi = (uint8_t)qosFlowItem_p->qosFlowIdentifier;
NGAP_INITIAL_CONTEXT_SETUP_REQ(message_p).pdusession_param[i].qos[qosIdx].allocation_retention_priority.priority_level =
qosFlowItem_p->qosFlowLevelQosParameters.allocationAndRetentionPriority.priorityLevelARP;
......@@ -1289,11 +1290,11 @@ int ngap_gNB_handle_pdusession_setup_request(uint32_t assoc_id,
}
/* Initial context request = UE-related procedure -> stream != 0 */
if (stream == 0) {
NGAP_ERROR("[SCTP %d] Received UE-related procedure on stream (%d)\n",
assoc_id, stream);
return -1;
}
// if (stream == 0) {
// NGAP_ERROR("[SCTP %d] Received UE-related procedure on stream (%d)\n",
// assoc_id, stream);
// return -1;
// }
ue_desc_p->rx_stream = stream;
......@@ -1338,7 +1339,7 @@ int ngap_gNB_handle_pdusession_setup_request(uint32_t assoc_id,
NGAP_WARN("NAS PDU is not provided, generate a PDUSESSION_SETUP Failure (TBD) back to AMF \n");
}
dec_rval = uper_decode(NULL,
dec_rval = aper_decode(NULL,
&asn_DEF_NGAP_PDUSessionResourceSetupRequestTransfer,
(void **)&pdusessionTransfer_p,
item_p->pDUSessionResourceSetupRequestTransfer.buf,
......@@ -1407,8 +1408,13 @@ int ngap_gNB_handle_pdusession_setup_request(uint32_t assoc_id,
qosFlowItem_p = pdusessionTransfer_ies->value.choice.QosFlowSetupRequestList.list.array[qosIdx];
/* Set the QOS informations */
NGAP_PDUSESSION_SETUP_REQ(message_p).pdusession_setup_params[i].qos[qosIdx].qci = (uint8_t)qosFlowItem_p->qosFlowIdentifier;
NGAP_PDUSESSION_SETUP_REQ(message_p).pdusession_setup_params[i].qos[qosIdx].qfi = (uint8_t)qosFlowItem_p->qosFlowIdentifier;
if(qosFlowItem_p->qosFlowLevelQosParameters.qosCharacteristics.present == NGAP_QosCharacteristics_PR_nonDynamic5QI){
if(qosFlowItem_p->qosFlowLevelQosParameters.qosCharacteristics.choice.nonDynamic5QI != NULL){
NGAP_PDUSESSION_SETUP_REQ(message_p).pdusession_setup_params[i].qos[qosIdx].fiveQI =
(uint64_t)qosFlowItem_p->qosFlowLevelQosParameters.qosCharacteristics.choice.nonDynamic5QI->fiveQI;
}
}
NGAP_PDUSESSION_SETUP_REQ(message_p).pdusession_setup_params[i].qos[qosIdx].allocation_retention_priority.priority_level =
qosFlowItem_p->qosFlowLevelQosParameters.allocationAndRetentionPriority.priorityLevelARP;
NGAP_PDUSESSION_SETUP_REQ(message_p).pdusession_setup_params[i].qos[qosIdx].allocation_retention_priority.pre_emp_capability =
......@@ -1668,7 +1674,7 @@ int ngap_gNB_handle_pdusession_modify_request(uint32_t assoc_id,
continue;
}
dec_rval = uper_decode(NULL,
dec_rval = aper_decode(NULL,
&asn_DEF_NGAP_PDUSessionResourceModifyRequestTransfer,
(void **)&pdusessionTransfer_p,
item_p->pDUSessionResourceModifyRequestTransfer.buf,
......@@ -1709,7 +1715,7 @@ int ngap_gNB_handle_pdusession_modify_request(uint32_t assoc_id,
qosFlowItem_p = pdusessionTransfer_ies->value.choice.QosFlowAddOrModifyRequestList.list.array[qosIdx];
/* Set the QOS informations */
NGAP_PDUSESSION_MODIFY_REQ(message_p).pdusession_modify_params[i].qos[qosIdx].qci = (uint8_t)qosFlowItem_p->qosFlowIdentifier;
NGAP_PDUSESSION_MODIFY_REQ(message_p).pdusession_modify_params[i].qos[qosIdx].qfi = (uint8_t)qosFlowItem_p->qosFlowIdentifier;
if(qosFlowItem_p->qosFlowLevelQosParameters) {
NGAP_PDUSESSION_MODIFY_REQ(message_p).pdusession_modify_params[i].qos[qosIdx].allocation_retention_priority.priority_level =
qosFlowItem_p->qosFlowLevelQosParameters->allocationAndRetentionPriority.priorityLevelARP;
......
......@@ -750,7 +750,7 @@ int ngap_gNB_initial_ctxt_resp(
ass_qos_item_p = (NGAP_AssociatedQosFlowItem_t *)calloc(1, sizeof(NGAP_AssociatedQosFlowItem_t));
/* qosFlowIdentifier */
ass_qos_item_p->qosFlowIdentifier = initial_ctxt_resp_p->pdusessions[i].associated_qos_flows[j].qci;
ass_qos_item_p->qosFlowIdentifier = initial_ctxt_resp_p->pdusessions[i].associated_qos_flows[j].qfi;
/* qosFlowMappingIndication */
if(initial_ctxt_resp_p->pdusessions[i].associated_qos_flows[j].qos_flow_mapping_ind != QOSFLOW_MAPPING_INDICATION_NON) {
......@@ -1039,6 +1039,8 @@ int ngap_gNB_pdusession_setup_resp(instance_t instance,
pdusessionTransfer_p = (NGAP_PDUSessionResourceSetupResponseTransfer_t *)calloc(1, sizeof(NGAP_PDUSessionResourceSetupResponseTransfer_t));
pdusessionTransfer_p->dLQosFlowPerTNLInformation.uPTransportLayerInformation.present = NGAP_UPTransportLayerInformation_PR_gTPTunnel;
pdusessionTransfer_p->dLQosFlowPerTNLInformation.uPTransportLayerInformation.choice.gTPTunnel =
calloc(1, sizeof(struct NGAP_GTPTunnel));
GTP_TEID_TO_ASN1(pdusession_setup_resp_p->pdusessions[i].gtp_teid, &pdusessionTransfer_p->dLQosFlowPerTNLInformation.uPTransportLayerInformation.choice.gTPTunnel->gTP_TEID);
......@@ -1064,7 +1066,7 @@ int ngap_gNB_pdusession_setup_resp(instance_t instance,
ass_qos_item_p = (NGAP_AssociatedQosFlowItem_t *)calloc(1, sizeof(NGAP_AssociatedQosFlowItem_t));
/* qosFlowIdentifier */
ass_qos_item_p->qosFlowIdentifier = pdusession_setup_resp_p->pdusessions[i].associated_qos_flows[j].qci;
ass_qos_item_p->qosFlowIdentifier = pdusession_setup_resp_p->pdusessions[i].associated_qos_flows[j].qfi;
/* qosFlowMappingIndication */
if(pdusession_setup_resp_p->pdusessions[i].associated_qos_flows[j].qos_flow_mapping_ind != QOSFLOW_MAPPING_INDICATION_NON) {
......@@ -1075,11 +1077,19 @@ int ngap_gNB_pdusession_setup_resp(instance_t instance,
}
memset(&res, 0, sizeof(res));
res = asn_encode_to_new_buffer(NULL, ATS_ALIGNED_CANONICAL_PER, &asn_DEF_NGAP_PDUSessionResourceSetupResponseTransfer, pdusessionTransfer_p);
item->pDUSessionResourceSetupResponseTransfer.buf = res.buffer;
item->pDUSessionResourceSetupResponseTransfer.size = res.result.encoded;
ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_NGAP_PDUSessionResourceSetupResponseTransfer, pdusessionTransfer_p);
// res = asn_encode_to_new_buffer(NULL, ATS_ALIGNED_CANONICAL_PER, &asn_DEF_NGAP_PDUSessionResourceSetupResponseTransfer, pdusessionTransfer_p);
// item->pDUSessionResourceSetupResponseTransfer.buf = res.buffer;
// item->pDUSessionResourceSetupResponseTransfer.size = res.result.encoded;
char buffer[100];
asn_enc_rval_t enc_rval = aper_encode_to_buffer(&asn_DEF_NGAP_PDUSessionResourceSetupResponseTransfer,
NULL,
pdusessionTransfer_p,
buffer,100);
AssertFatal (enc_rval.encoded > 0, "ASN1 message encoding failed (%s, %lu)!\n", enc_rval.failed_type->name, enc_rval.encoded);
item->pDUSessionResourceSetupResponseTransfer.buf = buffer;
item->pDUSessionResourceSetupResponseTransfer.size = enc_rval.encoded;
//ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_NGAP_PDUSessionResourceSetupResponseTransfer, pdusessionTransfer_p);
ASN_SEQUENCE_ADD(&ie->value.choice.PDUSessionResourceSetupListSURes.list, item);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment