Commit 85440e2d authored by Robert Schmidt's avatar Robert Schmidt

Merge remote-tracking branch 'origin/gtpv1u-data-no-itti' into integration_2024_w50 (!3158)

GTP: use direct call instead of ITTI

Using ITTI calls for user plane implies (1) a memory allocation, (2)
mutexes, and (3) queueing messages for each user plane packet, which is
heavy. Use a direct API call instead to reduce overhead.
parents 61849adf 27f1c0db
......@@ -1474,6 +1474,7 @@ add_library(L2_UE
${MAC_SRC_UE}
)
target_link_libraries(L2_UE PRIVATE asn1_nr_rrc_hdrs asn1_lte_rrc_hdrs)
target_link_libraries(L2_UE PRIVATE GTPV1U)
add_library(L2_UE_LTE_NR
${L2_RRC_SRC_UE}
......
......@@ -378,15 +378,15 @@ You might also want to consult TS 38.401 regarding the message exchange.
### General
In the DU in UL, RLC checks in `deliver_sdu()` if we are operating in split
mode, and either (direct) calls `pdcp_data_ind` (DRB) or (f1ap) sends an
`GTPV1U_TUNNEL_DATA_REQ` ITTI message to the GTP task.
mode, and either (direct) calls `pdcp_data_ind` (DRB) or (f1ap) sends a GTP
message through the GTP API.
In the CU in UL, assuming the tunnel is in place, GTP decapsulates the packet
and calls the callback `cu_f1u_data_req()`, which calls `pdcp_data_ind()` in CU.
In the CU in DL, the PDCP function `deliver_pdu_drb_gnb()` either (direct) calls
into the RLC via `enqueue_rlc_data_req()`, or (f1ap) sends a
`GTPV1U_TUNNEL_DATA_REQ` ITTI message to the GTP task.
into the RLC via `enqueue_rlc_data_req()`, or (f1ap) sends a GTP message
through the GTP API.
In the DU in DL, assuming the GTP-U tunnel exists, GTP decapsulates the packet
and calls the reception call back `du_rlc_data_req()`, which calls
......
......@@ -338,7 +338,7 @@ On the Tx side (downlink in gNB), the entry functions `nr_pdcp_data_req_drb()` a
## PDCP Rx flow
At the Rx side, `pdcp_data_ind()` serves as the entry point for receiving data from RLC. Within `pdcp_data_ind()`, the PDCP manager mutex protects access to the PDU receiving function of PDCP (`recv_pdu()` callback corresponding to `nr_pdcp_entity_recv_pdu()` for DRBs). Following this, the `deliver_sdu_drb()` function dispatches the received data to the GTP thread via an ITTI message (`GTPV1U_TUNNEL_DATA_REQ`).
At the Rx side, `pdcp_data_ind()` serves as the entry point for receiving data from RLC. Within `pdcp_data_ind()`, the PDCP manager mutex protects access to the PDU receiving function of PDCP (`recv_pdu()` callback corresponding to `nr_pdcp_entity_recv_pdu()` for DRBs). Following this, the `deliver_sdu_drb()` function dispatches the received data to the SDAP sublayer.
## PDCP security
......
......@@ -20,7 +20,6 @@
*/
MESSAGE_DEF(GTPV1U_TUNNEL_DATA_REQ, MESSAGE_PRIORITY_MED, gtpv1u_tunnel_data_req_t, Gtpv1uTunnelDataReq)
MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_data_forwarding_req_t,Gtpv1uDataForwardingReq)
MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_data_forwarding_ind_t,Gtpv1uDataForwardingInd)
MESSAGE_DEF(GTPV1U_ENB_END_MARKER_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_end_marker_req_t, Gtpv1uEndMarkerReq)
......
......@@ -30,7 +30,6 @@
#define NR_GTPV1U_MAX_BEARERS_PER_UE max_val_NR_DRB_Identity
#define GTPV1U_ENB_TUNNEL_DATA_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataInd
#define GTPV1U_TUNNEL_DATA_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataReq
#define GTPV1U_ENB_DATA_FORWARDING_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDataForwardingReq
#define GTPV1U_ENB_DATA_FORWARDING_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDataForwardingInd
#define GTPV1U_ENB_END_MARKER_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uEndMarkerReq
......@@ -107,15 +106,6 @@ typedef struct gtpv1u_enb_delete_tunnel_resp_s {
teid_t enb_S1u_teid; ///< local S1U Tunnel Endpoint Identifier to be deleted
} gtpv1u_enb_delete_tunnel_resp_t;
typedef struct gtpv1u_tunnel_data_req_s {
uint8_t *buffer;
uint32_t length;
uint32_t offset; ///< start of message offset in buffer
ue_id_t ue_id;
rb_id_t bearer_id;
} gtpv1u_tunnel_data_req_t;
typedef struct gtpv1u_enb_data_forwarding_req_s {
uint8_t *buffer;
uint32_t length;
......
......@@ -597,7 +597,6 @@ bool pdcp_data_ind(const protocol_ctxt_t *const ctxt_pP,
uint8_t rb_offset= (srb_flagP == 0) ? DTCH -1 :0;
uint16_t pdcp_uid=0;
MessageDef *message_p = NULL;
uint32_t rx_hfn_for_count;
int pdcp_sn_for_count;
int security_ok;
......@@ -999,19 +998,11 @@ bool pdcp_data_ind(const protocol_ctxt_t *const ctxt_pP,
if (LINK_ENB_PDCP_TO_GTPV1U) {
if ((true == ctxt_pP->enb_flag) && (false == srb_flagP)) {
LOG_D(PDCP, "Sending packet to GTP, Calling GTPV1U_TUNNEL_DATA_REQ ue %lx rab %ld len %u\n", ctxt_pP->rntiMaybeUEid, rb_id + 4, sdu_buffer_sizeP - payload_offset);
message_p = itti_alloc_new_message_sized(TASK_PDCP_ENB, 0, GTPV1U_TUNNEL_DATA_REQ,
sizeof(gtpv1u_tunnel_data_req_t) +
sdu_buffer_sizeP - payload_offset + GTPU_HEADER_OVERHEAD_MAX );
AssertFatal(message_p != NULL, "OUT OF MEMORY");
gtpv1u_tunnel_data_req_t *req=&GTPV1U_TUNNEL_DATA_REQ(message_p);
req->buffer = (uint8_t*)(req+1);
memcpy(req->buffer + GTPU_HEADER_OVERHEAD_MAX, sdu_buffer_pP + payload_offset, sdu_buffer_sizeP - payload_offset);
req->length = sdu_buffer_sizeP - payload_offset;
req->offset = GTPU_HEADER_OVERHEAD_MAX;
req->ue_id = ctxt_pP->rntiMaybeUEid;
req->bearer_id = rb_id + 4;
itti_send_msg_to_task(TASK_GTPV1_U, INSTANCE_DEFAULT, message_p);
ue_id_t ue_id = ctxt_pP->rntiMaybeUEid;
uint8_t *gtp_buf = sdu_buffer_pP + payload_offset;
size_t gtp_len = sdu_buffer_sizeP - payload_offset;
LOG_D(PDCP, "Sending packet to GTP ue %lx rab %ld len %ld\n", ue_id, rb_id + 4, gtp_len);
gtpv1uSendDirect(INSTANCE_DEFAULT, ue_id, rb_id + 4, gtp_buf, gtp_len, false, false);
packet_forwarded = true;
}
} else {
......
......@@ -718,23 +718,9 @@ static void deliver_pdu_drb_gnb(void *deliver_pdu_data, ue_id_t ue_id, int rb_id
protocol_ctxt_t ctxt = { .enb_flag = 1, .rntiMaybeUEid = ue_data.secondary_ue };
if (NODE_IS_CU(node_type)) {
MessageDef *message_p = itti_alloc_new_message_sized(TASK_PDCP_ENB, 0,
GTPV1U_TUNNEL_DATA_REQ,
sizeof(gtpv1u_tunnel_data_req_t)
+ size
+ GTPU_HEADER_OVERHEAD_MAX);
AssertFatal(message_p != NULL, "OUT OF MEMORY");
gtpv1u_tunnel_data_req_t *req=&GTPV1U_TUNNEL_DATA_REQ(message_p);
uint8_t *gtpu_buffer_p = (uint8_t*)(req+1);
memcpy(gtpu_buffer_p + GTPU_HEADER_OVERHEAD_MAX, buf, size);
req->buffer = gtpu_buffer_p;
req->length = size;
req->offset = GTPU_HEADER_OVERHEAD_MAX;
req->ue_id = ue_id; // use CU UE ID as GTP will use that to look up TEID
req->bearer_id = rb_id;
LOG_D(PDCP, "%s() (drb %d) sending message to gtp size %d\n", __func__, rb_id, size);
extern instance_t CUuniqInstance;
itti_send_msg_to_task(TASK_GTPV1_U, CUuniqInstance, message_p);
gtpv1uSendDirect(CUuniqInstance, ue_id, rb_id, (uint8_t *)buf, size, false, false);
} else {
uint8_t *memblock = malloc16(size);
memcpy(memblock, buf, size);
......
......@@ -40,6 +40,7 @@
#include "openair2/F1AP/f1ap_du_rrc_message_transfer.h"
#include "openair2/F1AP/f1ap_ids.h"
#include "openair3/ocp-gtpu/gtp_itf.h"
extern RAN_CONTEXT_t RC;
......@@ -525,18 +526,9 @@ rb_found:
itti_send_msg_to_task(TASK_DU_F1, ENB_MODULE_ID_TO_INSTANCE(0 /*ctxt_pP->module_id*/), msg);
return;
} else {
MessageDef *msg = itti_alloc_new_message_sized(TASK_RLC_ENB, 0, GTPV1U_TUNNEL_DATA_REQ,
sizeof(gtpv1u_tunnel_data_req_t) + size);
gtpv1u_tunnel_data_req_t *req=&GTPV1U_TUNNEL_DATA_REQ(msg);
req->buffer=(uint8_t*)(req+1);
memcpy(req->buffer,buf,size);
req->length=size;
req->offset = 0;
req->ue_id = ue->ue_id;
req->bearer_id=rb_id;
LOG_D(RLC, "Received uplink user-plane traffic at RLC-DU to be sent to the CU, size %d \n", size);
extern instance_t DUuniqInstance;
itti_send_msg_to_task(TASK_GTPV1_U, DUuniqInstance, msg);
gtpv1uSendDirect(DUuniqInstance, ue->ue_id, rb_id, (uint8_t*) buf, size, false, false);
return;
}
}
......
......@@ -240,24 +240,14 @@ static void nr_sdap_rx_entity(nr_sdap_entity_t *entity,
}
}
uint8_t *gtp_buf = (uint8_t *)(buf + offset);
size_t gtp_len = size - offset;
// Pushing SDAP SDU to GTP-U Layer
MessageDef *message_p = itti_alloc_new_message_sized(TASK_PDCP_ENB,
0,
GTPV1U_TUNNEL_DATA_REQ,
sizeof(gtpv1u_tunnel_data_req_t)
+ size + GTPU_HEADER_OVERHEAD_MAX - offset);
AssertFatal(message_p != NULL, "OUT OF MEMORY");
gtpv1u_tunnel_data_req_t *req = &GTPV1U_TUNNEL_DATA_REQ(message_p);
uint8_t *gtpu_buffer_p = (uint8_t *) (req + 1);
memcpy(gtpu_buffer_p + GTPU_HEADER_OVERHEAD_MAX, buf + offset, size - offset);
req->buffer = gtpu_buffer_p;
req->length = size - offset;
req->offset = GTPU_HEADER_OVERHEAD_MAX;
req->ue_id = ue_id;
req->bearer_id = pdusession_id;
LOG_D(SDAP, "%s() sending message to gtp size %d\n", __func__, size-offset);
LOG_D(SDAP, "sending message to gtp size %ld\n", gtp_len);
// very very dirty hack gloabl var N3GTPUInst
itti_send_msg_to_task(TASK_GTPV1_U, *N3GTPUInst, message_p);
instance_t inst = *N3GTPUInst;
gtpv1uSendDirect(inst, ue_id, pdusession_id, gtp_buf, gtp_len, false, false);
} else { //nrUE
/*
* TS 37.324 5.2 Data transfer
......
......@@ -193,7 +193,7 @@ instance_t legacyInstanceMapping=0;
auto ptrUe=insT->ue2te_mapping.find(Ue); \
\
if ( ptrUe==insT->ue2te_mapping.end() ) { \
LOG_E(GTPU, "[%ld] gtpv1uSend failed: while getting ue id %ld in hashtable ue_mapping\n", instance, ue_id); \
LOG_E(GTPU, "[%ld] %s failed: while getting ue id %ld in hashtable ue_mapping\n", instance, __func__, ue_id); \
pthread_mutex_unlock(&globGtp.gtp_lock); \
return; \
}
......@@ -202,7 +202,7 @@ instance_t legacyInstanceMapping=0;
auto ptrUe=insT->ue2te_mapping.find(Ue); \
\
if ( ptrUe==insT->ue2te_mapping.end() ) { \
LOG_E(GTPU, "[%ld] gtpv1uSend failed: while getting ue id %ld in hashtable ue_mapping\n", instance, ue_id); \
LOG_E(GTPU, "[%ld] %s failed: while getting ue id %ld in hashtable ue_mapping\n", instance, __func__, ue_id); \
pthread_mutex_unlock(&globGtp.gtp_lock); \
return GTPNOK; \
}
......@@ -286,38 +286,48 @@ static int gtpv1uCreateAndSendMsg(int h,
return !GTPNOK;
}
static void gtpv1uSend(instance_t instance, gtpv1u_tunnel_data_req_t *req, bool seqNumFlag, bool npduNumFlag) {
uint8_t *buffer=req->buffer+req->offset;
size_t length=req->length;
ue_id_t ue_id=req->ue_id;
int bearer_id=req->bearer_id;
void gtpv1uSendDirect(instance_t instance,
ue_id_t ue_id,
int bearer_id,
uint8_t *buf,
size_t len,
bool seqNumFlag,
bool npduNumFlag)
{
pthread_mutex_lock(&globGtp.gtp_lock);
getInstRetVoid(compatInst(instance));
getUeRetVoid(inst, ue_id);
auto ptr2=ptrUe->second.bearers.find(bearer_id);
auto ptr2 = ptrUe->second.bearers.find(bearer_id);
if ( ptr2 == ptrUe->second.bearers.end() ) {
LOG_E(GTPU,"[%ld] GTP-U instance: sending a packet to a non existant UE:RAB: %lx/%x\n", instance, ue_id, bearer_id);
if (ptr2 == ptrUe->second.bearers.end()) {
LOG_E(GTPU, "[%ld] GTP-U instance: sending a packet to a non existant UE:RAB: %lx/%x\n", instance, ue_id, bearer_id);
pthread_mutex_unlock(&globGtp.gtp_lock);
return;
}
LOG_D(GTPU,"[%ld] sending a packet to UE:RAB:teid %lx/%x/%x, len %lu, oldseq %d, oldnum %d\n",
instance, ue_id, bearer_id,ptr2->second.teid_outgoing,length, ptr2->second.seqNum,ptr2->second.npduNum );
LOG_D(GTPU,
"[%ld] sending a packet to UE:RAB:teid %lx/%x/%x, len %lu, oldseq %d, oldnum %d\n",
instance,
ue_id,
bearer_id,
ptr2->second.teid_outgoing,
len,
ptr2->second.seqNum,
ptr2->second.npduNum);
if(seqNumFlag)
if (seqNumFlag)
ptr2->second.seqNum++;
if(npduNumFlag)
if (npduNumFlag)
ptr2->second.npduNum++;
// copy to release the mutex
gtpv1u_bearer_t tmp=ptr2->second;
gtpv1u_bearer_t tmp = ptr2->second;
pthread_mutex_unlock(&globGtp.gtp_lock);
if (tmp.outgoing_qfi != -1) {
Gtpv1uExtHeaderT ext = { 0 };
Gtpv1uExtHeaderT ext = {0};
ext.ExtHeaderLen = 1; // in quad bytes EXT_HDR_LNTH_OCTET_UNITS
ext.pdusession_cntr.spare = 0;
ext.pdusession_cntr.PDU_type = UL_PDU_SESSION_INFORMATION;
......@@ -331,8 +341,8 @@ static void gtpv1uSend(instance_t instance, gtpv1u_tunnel_data_req_t *req, bool
tmp.outgoing_port,
GTP_GPDU,
tmp.teid_outgoing,
buffer,
length,
buf,
len,
seqNumFlag,
npduNumFlag,
tmp.seqNum,
......@@ -341,8 +351,20 @@ static void gtpv1uSend(instance_t instance, gtpv1u_tunnel_data_req_t *req, bool
(uint8_t *)&ext,
sizeof(ext));
} else {
gtpv1uCreateAndSendMsg(
compatInst(instance), tmp.outgoing_ip_addr, tmp.outgoing_port, GTP_GPDU, tmp.teid_outgoing, buffer, length, seqNumFlag, npduNumFlag, tmp.seqNum, tmp.npduNum, NO_MORE_EXT_HDRS, NULL, 0);
gtpv1uCreateAndSendMsg(compatInst(instance),
tmp.outgoing_ip_addr,
tmp.outgoing_port,
GTP_GPDU,
tmp.teid_outgoing,
buf,
len,
seqNumFlag,
npduNumFlag,
tmp.seqNum,
tmp.npduNum,
NO_MORE_EXT_HDRS,
NULL,
0);
}
}
......@@ -403,9 +425,10 @@ static void gtpv1uSendDlDeliveryStatus(instance_t instance, gtpv1u_DU_buffer_rep
compatInst(instance), tmp.outgoing_ip_addr, tmp.outgoing_port, GTP_GPDU, tmp.teid_outgoing, NULL, 0, false, false, 0, 0, NR_RAN_CONTAINER, extensionHeader->buffer, extensionHeader->length);
}
static void gtpv1uEndTunnel(instance_t instance, gtpv1u_tunnel_data_req_t *req) {
ue_id_t ue_id=req->ue_id;
int bearer_id=req->bearer_id;
static void gtpv1uEndTunnel(instance_t instance, gtpv1u_enb_end_marker_req_t *req)
{
ue_id_t ue_id=req->rnti;
int bearer_id=req->rab_id;
pthread_mutex_lock(&globGtp.gtp_lock);
getInstRetVoid(compatInst(instance));
getUeRetVoid(inst, ue_id);
......@@ -1287,11 +1310,6 @@ void *gtpv1uTask(void *args) {
switch (msgType) {
// DATA TO BE SENT TO UDP
case GTPV1U_TUNNEL_DATA_REQ: {
gtpv1uSend(compatInst(myInstance), &GTPV1U_TUNNEL_DATA_REQ(message_p), false, false);
}
break;
case GTPV1U_DU_BUFFER_REPORT_REQ:{
gtpv1uSendDlDeliveryStatus(compatInst(myInstance), &GTPV1U_DU_BUFFER_REPORT_REQ(message_p));
}
......@@ -1305,8 +1323,8 @@ void *gtpv1uTask(void *args) {
break;
case GTPV1U_ENB_END_MARKER_REQ:
gtpv1uEndTunnel(compatInst(myInstance), &GTPV1U_TUNNEL_DATA_REQ(message_p));
itti_free(TASK_GTPV1_U, GTPV1U_TUNNEL_DATA_REQ(message_p).buffer);
gtpv1uEndTunnel(compatInst(myInstance), &GTPV1U_ENB_END_MARKER_REQ(message_p));
itti_free(TASK_GTPV1_U, GTPV1U_ENB_END_MARKER_REQ(message_p).buffer);
break;
case GTPV1U_ENB_DATA_FORWARDING_REQ:
......
......@@ -103,6 +103,9 @@ extern "C" {
int newGtpuDeleteOneTunnel(instance_t instance, ue_id_t ue_id, int rb_id);
int newGtpuDeleteAllTunnels(instance_t instance, ue_id_t ue_id);
int newGtpuDeleteTunnels(instance_t instance, ue_id_t ue_id, int nbTunnels, pdusessionid_t *pdusession_id);
void gtpv1uSendDirect(instance_t instance, ue_id_t ue_id, int bearer_id, uint8_t *buf, size_t len, bool seqNumFlag, bool npduNumFlag);
instance_t gtpv1Init(openAddr_t context);
void *gtpv1uTask(void *args);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment