Commit 122a4e8d authored by Robert Schmidt's avatar Robert Schmidt

Merge remote-tracking branch 'origin/fix-multipdu' into integration_2023_w08b

parents 5b3977ce 1a6dbad0
......@@ -103,7 +103,7 @@ rlc_op_status_t nr_rrc_rlc_config_asn1_req(const protocol_ctxt_t *const ctxt_pP,
return 0;
}
int nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(const protocol_ctxt_t *const ctxt_pP, const gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_pP)
int nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(const protocol_ctxt_t *const ctxt_pP, const gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_p, int offset)
{
abort();
return 0;
......
......@@ -1014,7 +1014,7 @@ void fill_mastercellGroupConfig(NR_CellGroupConfig_t *cellGroupConfig, NR_CellGr
// DRB Configuration
for (int i = bearer_id_start; i < bearer_id_start + nb_bearers_to_setup; i++ ){
const NR_RLC_Config_PR rlc_conf = use_rlc_um_for_drb ? NR_RLC_Config_PR_um_Bi_Directional : NR_RLC_Config_PR_am;
NR_RLC_BearerConfig_t *rlc_BearerConfig = get_DRB_RLC_BearerConfig(3 + i, i, rlc_conf, priority[i-1]);
NR_RLC_BearerConfig_t *rlc_BearerConfig = get_DRB_RLC_BearerConfig(3 + i, i, rlc_conf, priority[0]); // Fixme: priority hardcoded see caller function, all is wrong
asn1cSeqAdd(&cellGroupConfig->rlc_BearerToAddModList->list, rlc_BearerConfig);
asn1cSeqAdd(&ue_context_mastercellGroup->rlc_BearerToAddModList->list, rlc_BearerConfig);
}
......
......@@ -108,20 +108,17 @@ static int drb_config_gtpu_create(const protocol_ctxt_t *const ctxt_p,
gtpv1u_gnb_create_tunnel_req_t create_tunnel_req={0};
gtpv1u_gnb_create_tunnel_resp_t create_tunnel_resp={0};
for (int i=0; i < ue_context_p->ue_context.nb_of_pdusessions; i++) {
int i = ue_context_p->ue_context.nb_of_pdusessions - 1;
pdu_session_param_t *pdu = ue_context_p->ue_context.pduSession + i;
create_tunnel_req.pdusession_id[i] = pdu->param.pdusession_id;
create_tunnel_req.incoming_rb_id[i] = i + 1;
create_tunnel_req.outgoing_qfi[i] = req->pduSession[i].DRBnGRanList[0].qosFlows[0].id;
memcpy(&create_tunnel_req.dst_addr[i].buffer,
&pdu->param.upf_addr.buffer,
sizeof(uint8_t)*20);
create_tunnel_req.dst_addr[i].length = pdu->param.upf_addr.length;
create_tunnel_req.outgoing_teid[i] = pdu->param.gtp_teid;
}
create_tunnel_req.num_tunnels = ue_context_p->ue_context.nb_of_pdusessions;
create_tunnel_req.pdusession_id[0] = pdu->param.pdusession_id;
create_tunnel_req.incoming_rb_id[0] = i + 1;
create_tunnel_req.outgoing_qfi[0] = req->pduSession[i].DRBnGRanList[0].qosFlows[0].id;
memcpy(&create_tunnel_req.dst_addr[0].buffer, &pdu->param.upf_addr.buffer, sizeof(create_tunnel_req.dst_addr[0].buffer));
create_tunnel_req.dst_addr[0].length = pdu->param.upf_addr.length;
create_tunnel_req.outgoing_teid[0] = pdu->param.gtp_teid;
create_tunnel_req.num_tunnels = 1;
create_tunnel_req.ue_id = ue_context_p->ue_context.rnti;
int ret = gtpv1u_create_ngu_tunnel(getCxtE1(instance)->gtpInstN3, &create_tunnel_req, &create_tunnel_resp);
if (ret != 0) {
......@@ -130,8 +127,7 @@ static int drb_config_gtpu_create(const protocol_ctxt_t *const ctxt_p,
return ret;
}
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(ctxt_p,
&create_tunnel_resp);
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(ctxt_p, &create_tunnel_resp, i);
uint8_t *kRRCenc = NULL;
uint8_t *kRRCint = NULL;
......
......@@ -345,9 +345,7 @@ typedef struct gNB_RRC_UE_s {
uint8_t nb_of_modify_e_rabs;
uint8_t nb_of_failed_e_rabs;
nr_e_rab_param_t modify_e_rab[NB_RB_MAX];//[S1AP_MAX_E_RAB];
/* Total number of pdu session already setup in the list */
uint8_t setup_pdu_sessions;
/* Number of pdu session to be setup in the list */
/* Number of pdu session managed for the ue */
uint8_t nb_of_pdusessions;
/* Number of e_rab to be modified in the list */
uint8_t nb_of_modify_pdusessions;
......
......@@ -693,8 +693,7 @@ void fill_DRB_configList(const protocol_ctxt_t *const ctxt_pP,
}
*DRB_configList2 = CALLOC(1, sizeof(**DRB_configList2));
memset(*DRB_configList2, 0, sizeof(**DRB_configList2));
for (i = 0; i < ue_context_pP->ue_context.setup_pdu_sessions; i++) {
for (i = 0; i < ue_context_pP->ue_context.nb_of_pdusessions; i++) {
if (pdu_sessions_done >= ue_context_pP->ue_context.nb_of_pdusessions) {
break;
}
......@@ -702,6 +701,7 @@ void fill_DRB_configList(const protocol_ctxt_t *const ctxt_pP,
if (ue_context_pP->ue_context.pduSession[i].status >= PDU_SESSION_STATUS_DONE) {
continue;
}
ue_context_pP->ue_context.pduSession[i].xid = xid;
for(long drb_id_add = 1; drb_id_add <= nb_drb_to_setup; drb_id_add++){
uint8_t drb_id;
......@@ -710,19 +710,18 @@ void fill_DRB_configList(const protocol_ctxt_t *const ctxt_pP,
for (qos_flow_index = 0; qos_flow_index < ue_context_pP->ue_context.pduSession[i].param.nb_qos; qos_flow_index++) {
switch (ue_context_pP->ue_context.pduSession[i].param.qos[qos_flow_index].fiveQI) {
case 1 ... 4: /* GBR */
drb_id = next_available_drb(ue_p, ue_context_pP->ue_context.pduSession[i].param.pdusession_id, GBR_FLOW);
drb_id = next_available_drb(ue_p, &ue_context_pP->ue_context.pduSession[i], GBR_FLOW);
break;
case 5 ... 9: /* Non-GBR */
if(rrc->configuration.drbs > 1) /* Force the creation from gNB Conf file - Should be used only in noS1 mode and rfsim for testing purposes. */
drb_id = next_available_drb(ue_p, ue_context_pP->ue_context.pduSession[i].param.pdusession_id, GBR_FLOW);
drb_id = next_available_drb(ue_p, &ue_context_pP->ue_context.pduSession[i], GBR_FLOW);
else
drb_id = next_available_drb(ue_p, ue_context_pP->ue_context.pduSession[i].param.pdusession_id, NONGBR_FLOW);
drb_id = next_available_drb(ue_p, &ue_context_pP->ue_context.pduSession[i], NONGBR_FLOW);
break;
default:
LOG_E(NR_RRC,"not supported 5qi %lu\n", ue_context_pP->ue_context.pduSession[i].param.qos[qos_flow_index].fiveQI);
ue_context_pP->ue_context.pduSession[i].status = PDU_SESSION_STATUS_FAILED;
ue_context_pP->ue_context.pduSession[i].xid = xid;
pdu_sessions_done++;
continue;
}
......@@ -825,23 +824,23 @@ rrc_gNB_generate_dedicatedRRCReconfiguration(
for (int i=0; i < nb_drb_to_setup; i++) {
NR_DRB_ToAddMod_t *DRB_config = DRB_configList->list.array[i];
if (drb_id_to_setup_start == 1) drb_id_to_setup_start = DRB_config->drb_Identity;
if (ue_context_pP->ue_context.pduSession[i].param.nas_pdu.buffer != NULL) {
if (drb_id_to_setup_start == 1)
drb_id_to_setup_start = DRB_config->drb_Identity;
int j = ue_context_pP->ue_context.nb_of_pdusessions - 1;
AssertFatal(j >= 0, "");
if (ue_context_pP->ue_context.pduSession[j].param.nas_pdu.buffer != NULL) {
dedicatedNAS_Message = CALLOC(1, sizeof(NR_DedicatedNAS_Message_t));
memset(dedicatedNAS_Message, 0, sizeof(OCTET_STRING_t));
OCTET_STRING_fromBuf(dedicatedNAS_Message,
(char *)ue_context_pP->ue_context.pduSession[i].param.nas_pdu.buffer,
ue_context_pP->ue_context.pduSession[i].param.nas_pdu.length);
OCTET_STRING_fromBuf(dedicatedNAS_Message, (char *)ue_context_pP->ue_context.pduSession[j].param.nas_pdu.buffer, ue_context_pP->ue_context.pduSession[j].param.nas_pdu.length);
asn1cSeqAdd(&dedicatedNAS_MessageList->list, dedicatedNAS_Message);
LOG_I(NR_RRC,"add NAS info with size %d (pdusession id %d)\n",ue_context_pP->ue_context.pduSession[i].param.nas_pdu.length, i);
LOG_I(NR_RRC, "add NAS info with size %d (pdusession idx %d)\n", ue_context_pP->ue_context.pduSession[j].param.nas_pdu.length, j);
} else {
// TODO
LOG_E(NR_RRC,"no NAS info (pdusession id %d)\n", i);
LOG_E(NR_RRC, "no NAS info (pdusession idx %d)\n", j);
}
xid = ue_context_pP->ue_context.pduSession[i].xid;
xid = ue_context_pP->ue_context.pduSession[j].xid;
}
/* If list is empty free the list and reset the address */
......@@ -1317,8 +1316,8 @@ rrc_gNB_process_RRCReconfigurationComplete(
(int)DRB_configList->list.array[i]->drb_Identity);
//(int)*DRB_configList->list.array[i]->pdcp_Config->moreThanOneRLC->primaryPath.logicalChannel);
if (ue_context_pP->ue_context.DRB_active[drb_id] == 0) {
ue_context_pP->ue_context.DRB_active[drb_id] = 1;
if (ue_context_pP->ue_context.DRB_active[drb_id - 1] == 0) {
ue_context_pP->ue_context.DRB_active[drb_id - 1] = DRB_ACTIVE;
LOG_D(NR_RRC, "[gNB %d] Frame %d: Establish RLC UM Bidirectional, DRB %d Active\n",
ctxt_pP->module_id, ctxt_pP->frame, (int)DRB_configList->list.array[i]->drb_Identity);
......@@ -3861,10 +3860,7 @@ void rrc_gNB_process_e1_bearer_context_setup_resp(e1ap_bearer_setup_resp_t *resp
create_tunnel_resp.gnb_addr.length = sizeof(in_addr_t); // IPv4 byte length
}
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(&ctxt,
&create_tunnel_resp);
ue_context_p->ue_context.setup_pdu_sessions += resp->numPDUSessions;
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(&ctxt, &create_tunnel_resp, 0);
// TODO: SV: combine e1ap_bearer_setup_req_t and e1ap_bearer_setup_resp_t and minimize assignments
prepare_and_send_ue_context_modification_f1(ue_context_p, resp);
......
......@@ -40,6 +40,7 @@
//#endif
# include "common/ran_context.h"
#include "openair2/RRC/NR/rrc_gNB_GTPV1U.h"
extern RAN_CONTEXT_t RC;
......@@ -78,11 +79,8 @@ rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
}
}
int
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
const protocol_ctxt_t *const ctxt_pP,
const gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_pP
) {
int nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(const protocol_ctxt_t *const ctxt_pP, const gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_pP, int offset)
{
int i;
struct rrc_gNB_ue_context_s *ue_context_p = NULL;
......@@ -93,13 +91,14 @@ nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
ue_context_p = rrc_gNB_get_ue_context(RC.nrrrc[ctxt_pP->module_id], ctxt_pP->rntiMaybeUEid);
for (i = 0; i < create_tunnel_resp_pP->num_tunnels; i++) {
ue_context_p->ue_context.gnb_gtp_teid[i] = create_tunnel_resp_pP->gnb_NGu_teid[i];
ue_context_p->ue_context.gnb_gtp_addrs[i] = create_tunnel_resp_pP->gnb_addr;
ue_context_p->ue_context.gnb_gtp_psi[i] = create_tunnel_resp_pP->pdusession_id[i];
LOG_I(NR_RRC, PROTOCOL_NR_RRC_CTXT_UE_FMT" nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP tunnel (%u, %u) bearer UE context index %u, id %u, gtp addr len %d \n",
ue_context_p->ue_context.gnb_gtp_teid[i + offset] = create_tunnel_resp_pP->gnb_NGu_teid[i];
ue_context_p->ue_context.gnb_gtp_addrs[i + offset] = create_tunnel_resp_pP->gnb_addr;
ue_context_p->ue_context.gnb_gtp_psi[i + offset] = create_tunnel_resp_pP->pdusession_id[i];
LOG_I(NR_RRC,
PROTOCOL_NR_RRC_CTXT_UE_FMT " nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP tunnel (%u, %u) bearer UE context index %u, id %u, gtp addr len %d \n",
PROTOCOL_NR_RRC_CTXT_UE_ARGS(ctxt_pP),
create_tunnel_resp_pP->gnb_NGu_teid[i],
ue_context_p->ue_context.gnb_gtp_teid[i],
ue_context_p->ue_context.gnb_gtp_teid[i + offset],
i,
create_tunnel_resp_pP->pdusession_id[i],
create_tunnel_resp_pP->gnb_addr.length);
......
......@@ -38,10 +38,6 @@ rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
uint8_t *inde_list
);
int
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
const protocol_ctxt_t *const ctxt_pP,
const gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_pP
);
int nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(const protocol_ctxt_t *const ctxt_pP, const gtpv1u_gnb_create_tunnel_resp_t *const create_tunnel_resp_pP, int offset_in_rrc);
#endif
......@@ -548,10 +548,8 @@ rrc_gNB_process_NGAP_INITIAL_CONTEXT_SETUP_REQ(
return (0);
}
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(
&ctxt,
&create_tunnel_resp);
ue_context_p->ue_context.setup_pdu_sessions += NGAP_INITIAL_CONTEXT_SETUP_REQ (msg_p).nb_of_pdusessions;
nr_rrc_gNB_process_GTPV1U_CREATE_TUNNEL_RESP(&ctxt, &create_tunnel_resp, 0);
ue_context_p->ue_context.nb_of_pdusessions += NGAP_INITIAL_CONTEXT_SETUP_REQ(msg_p).nb_of_pdusessions;
ue_context_p->ue_context.established_pdu_sessions_flag = 1;
}
......@@ -890,7 +888,7 @@ rrc_gNB_send_NGAP_PDUSESSION_SETUP_RESP(
msg_p = itti_alloc_new_message (TASK_RRC_GNB, 0, NGAP_PDUSESSION_SETUP_RESP);
NGAP_PDUSESSION_SETUP_RESP(msg_p).gNB_ue_ngap_id = ue_context_pP->ue_context.gNB_ue_ngap_id;
for (pdusession = 0; pdusession < ue_context_pP->ue_context.setup_pdu_sessions; pdusession++) {
for (pdusession = 0; pdusession < ue_context_pP->ue_context.nb_of_pdusessions; pdusession++) {
// if (xid == ue_context_pP->ue_context.pdusession[pdusession].xid) {
if (ue_context_pP->ue_context.pduSession[pdusession].status == PDU_SESSION_STATUS_DONE) {
pdusession_setup_t * tmp=&NGAP_PDUSESSION_SETUP_RESP(msg_p).pdusessions[pdu_sessions_done];
......@@ -933,8 +931,11 @@ rrc_gNB_send_NGAP_PDUSESSION_SETUP_RESP(
}
if ((pdu_sessions_done > 0) ) {
LOG_I(NR_RRC,"NGAP_PDUSESSION_SETUP_RESP: sending the message: nb_of_pdusessions %d, total pdu_sessions %d, index %d\n",
ue_context_pP->ue_context.nb_of_pdusessions, ue_context_pP->ue_context.setup_pdu_sessions, pdusession);
LOG_I(NR_RRC,
"NGAP_PDUSESSION_SETUP_RESP: sending the message: nb_of_pdusessions %d, total pdu_sessions %d, index %d\n",
ue_context_pP->ue_context.nb_of_pdusessions,
ue_context_pP->ue_context.nb_of_pdusessions,
pdusession);
itti_send_msg_to_task (TASK_NGAP, ctxt_pP->instance, msg_p);
}
......@@ -979,7 +980,6 @@ rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ(
return ;
}
ue_context_p->ue_context.nb_of_pdusessions = msg->nb_pdusessions_tosetup;
ue_context_p->ue_context.gNB_ue_ngap_id = msg->gNB_ue_ngap_id;
ue_context_p->ue_context.amf_ue_ngap_id = msg->amf_ue_ngap_id;
......@@ -1000,11 +1000,16 @@ rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ(
bearer_req.ueDlAggMaxBitRate = msg->ueAggMaxBitRateDownlink;
bearer_req.numPDUSessions = msg->nb_pdusessions_tosetup;
int xid = rrc_gNB_get_next_transaction_identifier(ctxt.module_id);
for (int i=0; i < bearer_req.numPDUSessions; i++) {
ue_context_p->ue_context.pduSession[i].param = msg->pdusession_setup_params[i];
int idx = ue_context_p->ue_context.nb_of_pdusessions++;
pdu_session_to_setup_t *pdu = bearer_req.pduSession + i;
pdu->numDRB2Setup = 1; // One DRB per PDU Session. TODO: Remove hardcoding
ue_context_p->ue_context.pduSession[idx].param = msg->pdusession_setup_params[i];
ue_context_p->ue_context.pduSession[idx].xid = xid;
ue_context_p->ue_context.pduSession[idx].status = PDU_SESSION_STATUS_NEW;
pdu->sessionId = msg->pdusession_setup_params[i].pdusession_id;
pdu->sessionType = msg->pdusession_setup_params[i].upf_addr.pdu_session_type;
pdu->sst = msg->allowed_nssai[i].sST;
......@@ -1024,12 +1029,9 @@ rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ(
msg->pdusession_setup_params[i].upf_addr.buffer,
sizeof(uint8_t)*4);
ue_context_p->ue_context.pduSession[i].param = msg->pdusession_setup_params[i];
ue_context_p->ue_context.nb_of_pdusessions = msg->nb_pdusessions_tosetup;
ue_context_p->ue_context.pduSession[idx].param = msg->pdusession_setup_params[i];
ue_context_p->ue_context.gNB_ue_ngap_id = msg->gNB_ue_ngap_id;
ue_context_p->ue_context.amf_ue_ngap_id = msg->amf_ue_ngap_id;
pdu->numDRB2Setup = 1; // One DRB per PDU Session. TODO: Remove hardcoding
ue_context_p->ue_context.setup_pdu_sessions += pdu->numDRB2Setup;
for (int j=0; j < pdu->numDRB2Setup; j++) {
DRB_nGRAN_to_setup_t *drb = pdu->DRBnGRanList + j;
......@@ -1059,9 +1061,9 @@ rrc_gNB_process_NGAP_PDUSESSION_SETUP_REQ(
for (int k=0; k < drb->numQosFlow2Setup; k++) {
qos_flow_to_setup_t *qos = drb->qosFlows + k;
qos->id = msg->pdusession_setup_params[i].qos[k].qfi;
qos->fiveQI = msg->pdusession_setup_params[i].qos[k].fiveQI;
qos->fiveQI_type = msg->pdusession_setup_params[i].qos[k].fiveQI_type;
qos->id = msg->pdusession_setup_params[j].qos[k].qfi;
qos->fiveQI = msg->pdusession_setup_params[j].qos[k].fiveQI;
qos->fiveQI_type = msg->pdusession_setup_params[j].qos[k].fiveQI_type;
qos->qoSPriorityLevel = msg->pdusession_setup_params[i].qos[k].allocation_retention_priority.priority_level;
qos->pre_emptionCapability = msg->pdusession_setup_params[i].qos[k].allocation_retention_priority.pre_emp_capability;
......@@ -1263,7 +1265,7 @@ rrc_gNB_send_NGAP_PDUSESSION_MODIFY_RESP(
for (i = 0; i < ue_context_pP->ue_context.nb_of_modify_pdusessions; i++) {
if (xid == ue_context_pP->ue_context.modify_pdusession[i].xid) {
if (ue_context_pP->ue_context.modify_pdusession[i].status == PDU_SESSION_STATUS_DONE) {
for (j = 0; j < ue_context_pP->ue_context.setup_pdu_sessions; j++) {
for (j = 0; j < ue_context_pP->ue_context.nb_of_pdusessions; j++) {
if (ue_context_pP->ue_context.modify_pdusession[i].param.pdusession_id ==
ue_context_pP->ue_context.pduSession[j].param.pdusession_id) {
LOG_I(NR_RRC, "update pdu session %d \n", ue_context_pP->ue_context.pduSession[j].param.pdusession_id);
......@@ -1278,7 +1280,7 @@ rrc_gNB_send_NGAP_PDUSESSION_MODIFY_RESP(
}
}
if (j < ue_context_pP->ue_context.setup_pdu_sessions) {
if (j < ue_context_pP->ue_context.nb_of_pdusessions) {
NGAP_PDUSESSION_MODIFY_RESP(msg_p).pdusessions[pdu_sessions_done].pdusession_id =
ue_context_pP->ue_context.modify_pdusession[i].param.pdusession_id;
for (qos_flow_index = 0; qos_flow_index < ue_context_pP->ue_context.modify_pdusession[i].param.nb_qos; qos_flow_index++) {
......@@ -1321,8 +1323,11 @@ rrc_gNB_send_NGAP_PDUSESSION_MODIFY_RESP(
NGAP_PDUSESSION_MODIFY_RESP(msg_p).nb_of_pdusessions_failed = pdu_sessions_failed;
if (pdu_sessions_done > 0 || pdu_sessions_failed > 0) {
LOG_D(NR_RRC,"NGAP_PDUSESSION_MODIFY_RESP: sending the message: nb_of_pdusessions %d, total pdu session %d, index %d\n",
ue_context_pP->ue_context.nb_of_modify_pdusessions, ue_context_pP->ue_context.setup_pdu_sessions, i);
LOG_D(NR_RRC,
"NGAP_PDUSESSION_MODIFY_RESP: sending the message: nb_of_pdusessions %d, total pdu session %d, index %d\n",
ue_context_pP->ue_context.nb_of_modify_pdusessions,
ue_context_pP->ue_context.nb_of_pdusessions,
i);
itti_send_msg_to_task (TASK_NGAP, ctxt_pP->instance, msg_p);
} else {
itti_free (ITTI_MSG_ORIGIN_ID(msg_p), msg_p);
......@@ -1348,8 +1353,8 @@ rrc_gNB_send_NGAP_UE_CONTEXT_RELEASE_REQ(
NGAP_UE_CONTEXT_RELEASE_REQ(msg_context_release_req_p).gNB_ue_ngap_id = ue_context_pP->ue_context.gNB_ue_ngap_id;
NGAP_UE_CONTEXT_RELEASE_REQ(msg_context_release_req_p).cause = causeP;
NGAP_UE_CONTEXT_RELEASE_REQ(msg_context_release_req_p).cause_value = cause_valueP;
NGAP_UE_CONTEXT_RELEASE_REQ(msg_context_release_req_p).nb_of_pdusessions = ue_context_pP->ue_context.setup_pdu_sessions;
for (int pdusession = 0; pdusession < ue_context_pP->ue_context.setup_pdu_sessions; pdusession++) {
NGAP_UE_CONTEXT_RELEASE_REQ(msg_context_release_req_p).nb_of_pdusessions = ue_context_pP->ue_context.nb_of_pdusessions;
for (int pdusession = 0; pdusession < ue_context_pP->ue_context.nb_of_pdusessions; pdusession++) {
NGAP_UE_CONTEXT_RELEASE_REQ(msg_context_release_req_p).pdusessions[pdusession].pdusession_id = ue_context_pP->ue_context.pduSession[pdusession].param.pdusession_id;
}
itti_send_msg_to_task(TASK_NGAP, GNB_MODULE_ID_TO_INSTANCE(gnb_mod_idP), msg_context_release_req_p);
......@@ -1565,10 +1570,12 @@ rrc_gNB_send_NGAP_PDUSESSION_RELEASE_RESPONSE(
NGAP_PDUSESSION_RELEASE_RESPONSE (msg_p).nb_of_pdusessions_failed = ue_context_pP->ue_context.nb_release_of_pdusessions;
memcpy(&(NGAP_PDUSESSION_RELEASE_RESPONSE (msg_p).pdusessions_failed[0]), &ue_context_pP->ue_context.pdusessions_release_failed[0],
sizeof(pdusession_failed_t)*ue_context_pP->ue_context.nb_release_of_pdusessions);
ue_context_pP->ue_context.setup_pdu_sessions -= pdu_sessions_released;
LOG_I(NR_RRC,"NGAP PDUSESSION RELEASE RESPONSE: GNB_UE_NGAP_ID %u release_pdu_sessions %d setup_pdu_sessions %d \n",
NGAP_PDUSESSION_RELEASE_RESPONSE (msg_p).gNB_ue_ngap_id,
pdu_sessions_released, ue_context_pP->ue_context.setup_pdu_sessions);
ue_context_pP->ue_context.nb_of_pdusessions -= pdu_sessions_released;
LOG_I(NR_RRC,
"NGAP PDUSESSION RELEASE RESPONSE: GNB_UE_NGAP_ID %u release_pdu_sessions %d setup_pdu_sessions %d \n",
NGAP_PDUSESSION_RELEASE_RESPONSE(msg_p).gNB_ue_ngap_id,
pdu_sessions_released,
ue_context_pP->ue_context.nb_of_pdusessions);
itti_send_msg_to_task (TASK_NGAP, ctxt_pP->instance, msg_p);
//clear xid
......
......@@ -22,12 +22,8 @@
#include "rrc_gNB_radio_bearers.h"
#include "oai_asn1.h"
NR_DRB_ToAddMod_t *generateDRB(gNB_RRC_UE_t *ue,
uint8_t drb_id,
const pdu_session_param_t *pduSession,
bool enable_sdap,
int do_drb_integrity,
int do_drb_ciphering) {
NR_DRB_ToAddMod_t *generateDRB(gNB_RRC_UE_t *ue, uint8_t drb_id, pdu_session_param_t *pduSession, bool enable_sdap, int do_drb_integrity, int do_drb_ciphering)
{
NR_DRB_ToAddMod_t *DRB_config = NULL;
NR_SDAP_Config_t *SDAP_config = NULL;
......@@ -38,9 +34,7 @@ NR_DRB_ToAddMod_t *generateDRB(gNB_RRC_UE_t *ue,
/* SDAP Configuration */
SDAP_config = CALLOC(1, sizeof(NR_SDAP_Config_t));
memset(SDAP_config, 0, sizeof(NR_SDAP_Config_t));
SDAP_config->mappedQoS_FlowsToAdd = calloc(1, sizeof(struct NR_SDAP_Config__mappedQoS_FlowsToAdd));
memset(SDAP_config->mappedQoS_FlowsToAdd, 0, sizeof(struct NR_SDAP_Config__mappedQoS_FlowsToAdd));
SDAP_config->pdu_Session = pduSession->param.pdusession_id;
......@@ -61,9 +55,9 @@ NR_DRB_ToAddMod_t *generateDRB(gNB_RRC_UE_t *ue,
asn1cSeqAdd(&SDAP_config->mappedQoS_FlowsToAdd->list, qfi);
if(pduSession->param.qos[qos_flow_index].fiveQI > 5)
ue->pduSession[pduSession->param.pdusession_id].param.used_drbs[drb_id-1] = DRB_ACTIVE_NONGBR;
pduSession->param.used_drbs[drb_id - 1] = DRB_ACTIVE_NONGBR;
else
ue->pduSession[pduSession->param.pdusession_id].param.used_drbs[drb_id-1] = DRB_ACTIVE;
pduSession->param.used_drbs[drb_id - 1] = DRB_ACTIVE;
}
SDAP_config->mappedQoS_FlowsToRelease = NULL;
......@@ -110,12 +104,13 @@ NR_DRB_ToAddMod_t *generateDRB(gNB_RRC_UE_t *ue,
return DRB_config;
}
uint8_t next_available_drb(gNB_RRC_UE_t *ue, uint8_t pdusession_id, bool is_gbr) {
uint8_t next_available_drb(gNB_RRC_UE_t *ue, pdu_session_param_t *pdusession, bool is_gbr)
{
uint8_t drb_id;
if(!is_gbr) { /* Find if Non-GBR DRB exists in the same PDU Session */
for (drb_id = 0; drb_id < NGAP_MAX_DRBS_PER_UE; drb_id++)
if(ue->pduSession[pdusession_id].param.used_drbs[drb_id] == DRB_ACTIVE_NONGBR)
if (pdusession->param.used_drbs[drb_id] == DRB_ACTIVE_NONGBR)
return drb_id+1;
}
/* GBR Flow or a Non-GBR DRB does not exist in the same PDU Session, find an available DRB */
......@@ -128,6 +123,7 @@ uint8_t next_available_drb(gNB_RRC_UE_t *ue, uint8_t pdusession_id, bool is_gbr)
}
bool drb_is_active(gNB_RRC_UE_t *ue, uint8_t drb_id) {
DevAssert(drb_id > 0);
if(ue->DRB_active[drb_id-1] == DRB_ACTIVE)
return true;
return false;
......
......@@ -35,14 +35,10 @@
#define GBR_FLOW (1)
#define NONGBR_FLOW (0)
NR_DRB_ToAddMod_t *generateDRB(gNB_RRC_UE_t *rrc_ue,
uint8_t drb_id,
const pdu_session_param_t *pduSession,
bool enable_sdap,
int do_drb_integrity,
int do_drb_ciphering);
NR_DRB_ToAddMod_t *generateDRB(gNB_RRC_UE_t *rrc_ue, uint8_t drb_id, pdu_session_param_t *pduSession, bool enable_sdap, int do_drb_integrity, int do_drb_ciphering);
uint8_t next_available_drb(gNB_RRC_UE_t *ue, pdu_session_param_t *pdusession, bool is_gbr);
uint8_t next_available_drb(gNB_RRC_UE_t *ue, uint8_t pdusession_id, bool is_gbr);
bool drb_is_active(gNB_RRC_UE_t *ue, uint8_t drb_id);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment