Commit 6882ea7a authored by Laurent THOMAS's avatar Laurent THOMAS

functional new gtpu, to be tested

parent 85aab6e8
...@@ -288,7 +288,7 @@ int nr_dlsch_encoding(PHY_VARS_gNB *gNB, ...@@ -288,7 +288,7 @@ int nr_dlsch_encoding(PHY_VARS_gNB *gNB,
} }
G = nr_get_G(nb_rb, nb_symb_sch, nb_re_dmrs, length_dmrs,mod_order,rel15->nrOfLayers); G = nr_get_G(nb_rb, nb_symb_sch, nb_re_dmrs, length_dmrs,mod_order,rel15->nrOfLayers);
LOG_D(PHY,"dlsch coding A %d G %d (nb_rb %d, nb_symb_sch %d, nb_re_dmrs %d, length_dmrs %d, mod_order %d mod_order %d)\n", A,G, nb_rb,nb_symb_sch,nb_re_dmrs,length_dmrs,mod_order); LOG_D(PHY,"dlsch coding A %d G %d (nb_rb %d, nb_symb_sch %d, nb_re_dmrs %d, length_dmrs %d, mod_order %d mod_order %d)\n", A,G, nb_rb,nb_symb_sch,nb_re_dmrs,length_dmrs,(int)mod_order);
if (A > 3824) { if (A > 3824) {
// Add 24-bit crc (polynomial A) to payload // Add 24-bit crc (polynomial A) to payload
......
...@@ -151,7 +151,8 @@ void dump_pusch_stats(FILE *fd,PHY_VARS_gNB *gNB) { ...@@ -151,7 +151,8 @@ void dump_pusch_stats(FILE *fd,PHY_VARS_gNB *gNB) {
aa,gNB->ulsch_stats[i].power[aa]/10,gNB->ulsch_stats[i].power[aa]%10, aa,gNB->ulsch_stats[i].power[aa]/10,gNB->ulsch_stats[i].power[aa]%10,
aa,gNB->ulsch_stats[i].noise_power[aa]/10,gNB->ulsch_stats[i].noise_power[aa]%10); aa,gNB->ulsch_stats[i].noise_power[aa]/10,gNB->ulsch_stats[i].noise_power[aa]%10);
else stroff+=sprintf(output+stroff," ulsch_power[%d] %d.%d, ulsch_noise_power[%d] %d.%d\n", else stroff+=sprintf(output+stroff," ulsch_power[%d] %d.%d, ulsch_noise_power[%d] %d.%d\n",
aa,gNB->ulsch_stats[i].noise_power[aa]/10,gNB->ulsch_stats[i].noise_power[aa]%10); aa,gNB->ulsch_stats[i].power[aa]/10,gNB->ulsch_stats[i].power[aa]%10,
gNB->ulsch_stats[i].noise_power[aa]/10,gNB->ulsch_stats[i].noise_power[aa]%10);
AssertFatal(stroff<(STATSTRLEN-1000),"Increase STATSTRLEN\n"); AssertFatal(stroff<(STATSTRLEN-1000),"Increase STATSTRLEN\n");
......
...@@ -227,7 +227,8 @@ void nr_dlsim_preprocessor(module_id_t module_id, ...@@ -227,7 +227,8 @@ void nr_dlsim_preprocessor(module_id_t module_id,
/* manually set free CCE to 0 */ /* manually set free CCE to 0 */
const int target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific; const int target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
sched_ctrl->search_space = get_searchspace(sched_ctrl->active_bwp, target_ss); NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[0].ServingCellConfigCommon;
sched_ctrl->search_space = get_searchspace(scc, sched_ctrl->active_bwp, target_ss);
uint8_t nr_of_candidates; uint8_t nr_of_candidates;
find_aggregation_candidates(&sched_ctrl->aggregation_level, find_aggregation_candidates(&sched_ctrl->aggregation_level,
&nr_of_candidates, &nr_of_candidates,
......
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#include "NR_MIB.h" #include "NR_MIB.h"
#include "NR_BCCH-BCH-Message.h" #include "NR_BCCH-BCH-Message.h"
#include "rrc_gNB_UE_context.h" #include "rrc_gNB_UE_context.h"
#include <openair2/RRC/NR/MESSAGES/asn1_msg.h>
extern RAN_CONTEXT_t RC; extern RAN_CONTEXT_t RC;
......
...@@ -1034,7 +1034,7 @@ void fill_initial_SpCellConfig(rnti_t rnti, ...@@ -1034,7 +1034,7 @@ void fill_initial_SpCellConfig(rnti_t rnti,
AssertFatal(scc->downlinkConfigCommon->initialDownlinkBWP->genericParameters.subcarrierSpacing==NR_SubcarrierSpacing_kHz30, AssertFatal(scc->downlinkConfigCommon->initialDownlinkBWP->genericParameters.subcarrierSpacing==NR_SubcarrierSpacing_kHz30,
"SCS != 30kHz\n"); "SCS != 30kHz\n");
AssertFatal(scc->tdd_UL_DL_ConfigurationCommon->pattern1.dl_UL_TransmissionPeriodicity==NR_TDD_UL_DL_Pattern__dl_UL_TransmissionPeriodicity_ms5, AssertFatal(scc->tdd_UL_DL_ConfigurationCommon->pattern1.dl_UL_TransmissionPeriodicity==NR_TDD_UL_DL_Pattern__dl_UL_TransmissionPeriodicity_ms5,
"TDD period != 5ms : %ld\n",scc->tdd_UL_DL_ConfigurationCommon->pattern1.dl_UL_TransmissionPeriodicity); "TDD period != 5ms : %d\n",scc->tdd_UL_DL_ConfigurationCommon->pattern1.dl_UL_TransmissionPeriodicity);
schedulingRequestResourceConfig->periodicityAndOffset->choice.sl40 = 10*((rnti>>1)&3) + (rnti&2); schedulingRequestResourceConfig->periodicityAndOffset->choice.sl40 = 10*((rnti>>1)&3) + (rnti&2);
schedulingRequestResourceConfig->resource = calloc(1,sizeof(*schedulingRequestResourceConfig->resource)); schedulingRequestResourceConfig->resource = calloc(1,sizeof(*schedulingRequestResourceConfig->resource));
......
...@@ -1015,7 +1015,7 @@ rrc_gNB_generate_dedicatedRRCReconfiguration( ...@@ -1015,7 +1015,7 @@ rrc_gNB_generate_dedicatedRRCReconfiguration(
break; break;
default: default:
LOG_E(NR_RRC,"not supported 5qi %d\n", ue_context_pP->ue_context.pdusession[i].param.qos[qos_flow_index].fiveQI); LOG_E(NR_RRC,"not supported 5qi %lu\n", ue_context_pP->ue_context.pdusession[i].param.qos[qos_flow_index].fiveQI);
ue_context_pP->ue_context.pdusession[i].status = PDU_SESSION_STATUS_FAILED; ue_context_pP->ue_context.pdusession[i].status = PDU_SESSION_STATUS_FAILED;
ue_context_pP->ue_context.pdusession[i].xid = xid; ue_context_pP->ue_context.pdusession[i].xid = xid;
pdu_sessions_done++; pdu_sessions_done++;
......
...@@ -85,7 +85,7 @@ class gtpEndPoints { ...@@ -85,7 +85,7 @@ class gtpEndPoints {
gtpEndPoints globGtp; gtpEndPoints globGtp;
// note TEid 0 is reserved for specific usage: echo req/resp, error and supported extensions // note TEid 0 is reserved for specific usage: echo req/resp, error and supported extensions
static uint32_t gtpv1uNewTeid(void) { static uint32_t gtpv1uNewTeid(void) {
#ifdef GTPV1U_LINEAR_TEID_ALLOCATION #ifdef GTPV1U_LINEAR_TEID_ALLOCATION
g_gtpv1u_teid = g_gtpv1u_teid + 1; g_gtpv1u_teid = g_gtpv1u_teid + 1;
...@@ -121,8 +121,10 @@ static int gtpv1uCreateAndSendMsg(int h, uint32_t peerIp, uint16_t peerPort, te ...@@ -121,8 +121,10 @@ static int gtpv1uCreateAndSendMsg(int h, uint32_t peerIp, uint16_t peerPort, te
msgHdr->version=1; msgHdr->version=1;
msgHdr->msgType=GTP_GPDU; msgHdr->msgType=GTP_GPDU;
msgHdr->msgLength=htons(msgLen); msgHdr->msgLength=htons(msgLen);
if ( seqNumFlag || extHdrFlag || npduNumFlag) if ( seqNumFlag || extHdrFlag || npduNumFlag)
msgHdr->msgLength+=4; msgHdr->msgLength+=4;
msgHdr->teid=htonl(teid); msgHdr->teid=htonl(teid);
if(seqNumFlag || extHdrFlag || npduNumFlag) { if(seqNumFlag || extHdrFlag || npduNumFlag) {
...@@ -192,6 +194,47 @@ static void gtpv1uSend(instance_t instance, gtpv1u_enb_tunnel_data_req_t *req, b ...@@ -192,6 +194,47 @@ static void gtpv1uSend(instance_t instance, gtpv1u_enb_tunnel_data_req_t *req, b
buffer, length, seqNumFlag, npduNumFlag, false, tmp.seqNum, tmp.npduNum, 0) ; buffer, length, seqNumFlag, npduNumFlag, false, tmp.seqNum, tmp.npduNum, 0) ;
} }
static void gtpv1uSend2(instance_t instance, gtpv1u_gnb_tunnel_data_req_t *req, bool seqNumFlag, bool npduNumFlag) {
uint8_t *buffer=req->buffer+req->offset;
size_t length=req->length;
uint64_t rnti=req->rnti;
int rab_id=req->pdusession_id;
pthread_mutex_lock(&globGtp.gtp_lock);
auto inst=&globGtp.instances[compatInst(instance)];
auto ptrRnti=inst->ue2te_mapping.find(rnti);
if ( ptrRnti==inst->ue2te_mapping.end() ) {
LOG_E(GTPU, "gtpv1uSend failed: while getting ue rnti %lx in hashtable ue_mapping\n", rnti);
pthread_mutex_unlock(&globGtp.gtp_lock);
return;
}
auto ptr=ptrRnti->second.bearers;
if ( ptr.find(rab_id) == ptr.end() ) {
LOG_E(GTPU,"sending a packet to a non existant RNTI:RAB: %lx/%x\n", rnti, rab_id);
pthread_mutex_unlock(&globGtp.gtp_lock);
return;
} else
LOG_D(GTPU,"sending a packet to RNTI:RAB:teid %lx/%x/%x, len %lu, oldseq %d, oldnum %d\n",
rnti, rab_id,ptr[rab_id].teid_outgoing,length, ptr[rab_id].seqNum,ptr[rab_id].npduNum );
if(seqNumFlag)
ptr[rab_id].seqNum++;
if(npduNumFlag)
ptr[rab_id].npduNum++;
// We will release the lock, let's copy data before
ocp_gtpv1u_bearer_t tmp=ptr[rab_id];
pthread_mutex_unlock(&globGtp.gtp_lock);
gtpv1uCreateAndSendMsg(compatInst(instance),
tmp.outgoing_ip_addr,
tmp.outgoing_port,
tmp.teid_outgoing,
buffer, length, seqNumFlag, npduNumFlag, false, tmp.seqNum, tmp.npduNum, 0) ;
}
static void gtpv1uEndTunnel(instance_t instance, gtpv1u_enb_tunnel_data_req_t *req) { static void gtpv1uEndTunnel(instance_t instance, gtpv1u_enb_tunnel_data_req_t *req) {
uint64_t rnti=req->rnti; uint64_t rnti=req->rnti;
int rab_id=req->rab_id; int rab_id=req->rab_id;
...@@ -214,9 +257,9 @@ static void gtpv1uEndTunnel(instance_t instance, gtpv1u_enb_tunnel_data_req_t *r ...@@ -214,9 +257,9 @@ static void gtpv1uEndTunnel(instance_t instance, gtpv1u_enb_tunnel_data_req_t *r
} else } else
LOG_D(GTPU,"sending a end packet packet to RNTI:RAB:teid %lx/%x/%x\n", LOG_D(GTPU,"sending a end packet packet to RNTI:RAB:teid %lx/%x/%x\n",
rnti, rab_id,ptr[rab_id].teid_outgoing); rnti, rab_id,ptr[rab_id].teid_outgoing);
ocp_gtpv1u_bearer_t tmp=ptr[rab_id]; ocp_gtpv1u_bearer_t tmp=ptr[rab_id];
pthread_mutex_unlock(&globGtp.gtp_lock); pthread_mutex_unlock(&globGtp.gtp_lock);
Gtpv1uMsgHeaderT msgHdr; Gtpv1uMsgHeaderT msgHdr;
// N should be 0 for us (it was used only in 2G and 3G) // N should be 0 for us (it was used only in 2G and 3G)
msgHdr.PN=0; msgHdr.PN=0;
...@@ -229,7 +272,6 @@ static void gtpv1uEndTunnel(instance_t instance, gtpv1u_enb_tunnel_data_req_t *r ...@@ -229,7 +272,6 @@ static void gtpv1uEndTunnel(instance_t instance, gtpv1u_enb_tunnel_data_req_t *r
msgHdr.msgType=GTP_END_MARKER; msgHdr.msgType=GTP_END_MARKER;
msgHdr.msgLength=htons(0); msgHdr.msgLength=htons(0);
msgHdr.teid=htonl(tmp.teid_outgoing); msgHdr.teid=htonl(tmp.teid_outgoing);
// Fix me: add IPv6 support, using flag ipVersion // Fix me: add IPv6 support, using flag ipVersion
static struct sockaddr_in to= {0}; static struct sockaddr_in to= {0};
to.sin_family = AF_INET; to.sin_family = AF_INET;
...@@ -344,7 +386,8 @@ instance_t ocp_gtpv1Init(openAddr_t context) { ...@@ -344,7 +386,8 @@ instance_t ocp_gtpv1Init(openAddr_t context) {
return id; return id;
} }
teid_t newGtpuCreateTunnel(instance_t instance, rnti_t rnti, int bearer_id, teid_t teid, transport_layer_addr_t remoteAddr, int port, gtpCallback callBack) { teid_t newGtpuCreateTunnel(instance_t instance, rnti_t rnti, int bearer_id, teid_t teid,
transport_layer_addr_t remoteAddr, int port, gtpCallback callBack) {
pthread_mutex_lock(&globGtp.gtp_lock); pthread_mutex_lock(&globGtp.gtp_lock);
auto inst=&globGtp.instances[instance]; auto inst=&globGtp.instances[instance];
auto it=inst->ue2te_mapping.find(rnti); auto it=inst->ue2te_mapping.find(rnti);
...@@ -459,19 +502,39 @@ int ocp_gtpv1u_update_s1u_tunnel( ...@@ -459,19 +502,39 @@ int ocp_gtpv1u_update_s1u_tunnel(
return 0; return 0;
} }
int gtpv1u_create_ngu_tunnel( const instance_t instanceP, int gtpv1u_create_ngu_tunnel( const instance_t instance,
const gtpv1u_gnb_create_tunnel_req_t * const create_tunnel_req_pP, const gtpv1u_gnb_create_tunnel_req_t *const create_tunnel_req,
gtpv1u_gnb_create_tunnel_resp_t * const create_tunnel_resp_pP) { gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp) {
return !GTPNOK; LOG_D(GTPU, "Start create tunnels for RNTI %x, num_tunnels %d, sgw_S1u_teid %d\n",
create_tunnel_req->rnti,
create_tunnel_req->num_tunnels,
create_tunnel_req->upf_NGu_teid[0]);
for (int i = 0; i < create_tunnel_req->num_tunnels; i++) {
teid_t teid=newGtpuCreateTunnel(compatInst(instance), create_tunnel_req->rnti,
create_tunnel_req->pdusession_id[i],
create_tunnel_req->upf_NGu_teid[i],
create_tunnel_req->upf_addr[i], 2152,
pdcp_data_req);
create_tunnel_resp->status=0;
create_tunnel_resp->rnti=create_tunnel_req->rnti;
create_tunnel_resp->num_tunnels=create_tunnel_req->num_tunnels;
create_tunnel_resp->gnb_NGu_teid[i]=teid;
memcpy(create_tunnel_resp->gnb_addr.buffer,globGtp.instances[compatInst(instance)].foundAddr,
globGtp.instances[compatInst(instance)].foundAddrLen);
create_tunnel_resp->gnb_addr.length= globGtp.instances[compatInst(instance)].foundAddrLen;
} }
int gtpv1u_update_ngu_tunnel( return !GTPNOK;
}
int gtpv1u_update_ngu_tunnel(
const instance_t instanceP, const instance_t instanceP,
const gtpv1u_gnb_create_tunnel_req_t *const create_tunnel_req_pP, const gtpv1u_gnb_create_tunnel_req_t *const create_tunnel_req_pP,
const rnti_t prior_rnti const rnti_t prior_rnti
) { ) {
return GTPNOK; return GTPNOK;
} }
int ocp_gtpv1u_create_x2u_tunnel( int ocp_gtpv1u_create_x2u_tunnel(
const instance_t instanceP, const instance_t instanceP,
...@@ -510,8 +573,8 @@ int ocp_gtpv1u_delete_s1u_tunnel( const instance_t instance, ...@@ -510,8 +573,8 @@ int ocp_gtpv1u_delete_s1u_tunnel( const instance_t instance,
return newGtpuDeleteTunnel(instance, req_pP->rnti); return newGtpuDeleteTunnel(instance, req_pP->rnti);
} }
int gtpv1u_delete_x2u_tunnel( const instance_t instanceP, int gtpv1u_delete_x2u_tunnel( const instance_t instanceP,
const gtpv1u_enb_delete_tunnel_req_t * const req_pP, const gtpv1u_enb_delete_tunnel_req_t *const req_pP,
int enbflag) { int enbflag) {
return 0; return 0;
} }
...@@ -546,15 +609,16 @@ static int Gtpv1uHandleSupportedExt(int h, ...@@ -546,15 +609,16 @@ static int Gtpv1uHandleSupportedExt(int h,
return rc; return rc;
} }
// When end marker arrives, we notify the client with buffer size = 0 // When end marker arrives, we notify the client with buffer size = 0
// The client will likely call "delete tunnel" // The client will likely call "delete tunnel"
// nevertheless we don't take the initiative // nevertheless we don't take the initiative
static int Gtpv1uHandleEndMarker(int h, static int Gtpv1uHandleEndMarker(int h,
uint8_t *msgBuf, uint8_t *msgBuf,
uint32_t msgBufLen, uint32_t msgBufLen,
uint16_t peerPort, uint16_t peerPort,
uint32_t peerIp) { uint32_t peerIp) {
Gtpv1uMsgHeaderT *msgHdr = (Gtpv1uMsgHeaderT *) msgBuf; Gtpv1uMsgHeaderT *msgHdr = (Gtpv1uMsgHeaderT *) msgBuf;
if ( msgHdr->version != 1 || msgHdr->PT != 1 ) { if ( msgHdr->version != 1 || msgHdr->PT != 1 ) {
LOG_E(GTPU, "Received a packet that is not GTP header\n"); LOG_E(GTPU, "Received a packet that is not GTP header\n");
return GTPNOK; return GTPNOK;
...@@ -570,6 +634,7 @@ static int Gtpv1uHandleEndMarker(int h, ...@@ -570,6 +634,7 @@ static int Gtpv1uHandleEndMarker(int h,
pthread_mutex_unlock(&globGtp.gtp_lock); pthread_mutex_unlock(&globGtp.gtp_lock);
return GTPNOK; return GTPNOK;
} }
// This context is not good for gtp // This context is not good for gtp
// frame, ... has no meaning // frame, ... has no meaning
// manyother attributes may come from create tunnel // manyother attributes may come from create tunnel
...@@ -750,6 +815,13 @@ void *ocp_gtpv1uTask(void *args) { ...@@ -750,6 +815,13 @@ void *ocp_gtpv1uTask(void *args) {
} }
break; break;
case GTPV1U_GNB_TUNNEL_DATA_REQ: {
gtpv1uSend2(compatInst(ITTI_MSG_DESTINATION_INSTANCE(message_p)),
&GTPV1U_GNB_TUNNEL_DATA_REQ(message_p), false, false);
itti_free(OCP_GTPV1_U, GTPV1U_GNB_TUNNEL_DATA_REQ(message_p).buffer);
}
break;
case TERMINATE_MESSAGE: case TERMINATE_MESSAGE:
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment