Commit 2a7b6acb authored by zhenghuangkun's avatar zhenghuangkun

Fix X2HO Segfault

parent 4a95fd89
...@@ -644,6 +644,7 @@ typedef struct eNB_RRC_UE_s { ...@@ -644,6 +644,7 @@ typedef struct eNB_RRC_UE_s {
SRB_INFO_TABLE_ENTRY Srb1; SRB_INFO_TABLE_ENTRY Srb1;
SRB_INFO_TABLE_ENTRY Srb2; SRB_INFO_TABLE_ENTRY Srb2;
LTE_MeasConfig_t *measConfig; LTE_MeasConfig_t *measConfig;
pthread_mutex_t handover_cond_lock;
HANDOVER_INFO *handover_info; HANDOVER_INFO *handover_info;
MEASUREMENT_INFO *measurement_info; MEASUREMENT_INFO *measurement_info;
LTE_MeasResults_t *measResults; LTE_MeasResults_t *measResults;
......
...@@ -975,11 +975,13 @@ rrc_eNB_free_mem_UE_context( ...@@ -975,11 +975,13 @@ rrc_eNB_free_mem_UE_context(
ASN_STRUCT_FREE(asn_DEF_LTE_MeasGapConfig, ue_context_pP->ue_context.measGapConfig); ASN_STRUCT_FREE(asn_DEF_LTE_MeasGapConfig, ue_context_pP->ue_context.measGapConfig);
ue_context_pP->ue_context.measGapConfig = NULL; ue_context_pP->ue_context.measGapConfig = NULL;
}*/ }*/
pthread_mutex_lock(&ue_context_pP->ue_context.handover_cond_lock);
if (ue_context_pP->ue_context.handover_info) { if (ue_context_pP->ue_context.handover_info) {
/* TODO: be sure free is enough here (check memory leaks) */ /* TODO: be sure free is enough here (check memory leaks) */
free(ue_context_pP->ue_context.handover_info); free(ue_context_pP->ue_context.handover_info);
ue_context_pP->ue_context.handover_info = NULL; ue_context_pP->ue_context.handover_info = NULL;
} }
pthread_mutex_unlock(&ue_context_pP->ue_context.handover_cond_lock);
if (ue_context_pP->ue_context.measurement_info) { if (ue_context_pP->ue_context.measurement_info) {
/* TODO: be sure free is enough here (check memory leaks) */ /* TODO: be sure free is enough here (check memory leaks) */
...@@ -5088,7 +5090,12 @@ check_handovers( ...@@ -5088,7 +5090,12 @@ check_handovers(
RB_FOREACH(ue_context_p, rrc_ue_tree_s, &(RC.rrc[ctxt_pP->module_id]->rrc_ue_head)) { RB_FOREACH(ue_context_p, rrc_ue_tree_s, &(RC.rrc[ctxt_pP->module_id]->rrc_ue_head)) {
ctxt_pP->rnti = ue_context_p->ue_id_rnti; ctxt_pP->rnti = ue_context_p->ue_id_rnti;
if (ue_context_p->ue_context.Status == RRC_HO_EXECUTION && ue_context_p->ue_context.handover_info != NULL) { if(ue_context_p->ue_context.handover_info != NULL) {
pthread_mutex_lock(&ue_context_p->ue_context.handover_cond_lock);
if (ue_context_p->ue_context.Status == RRC_HO_EXECUTION &&
ue_context_p->ue_context.handover_info != NULL) {
/* in the source, UE in HO_PREPARE mode */ /* in the source, UE in HO_PREPARE mode */
if (ue_context_p->ue_context.handover_info->state == HO_PREPARE) { if (ue_context_p->ue_context.handover_info->state == HO_PREPARE) {
LOG_D(RRC, LOG_D(RRC,
...@@ -5242,17 +5249,17 @@ check_handovers( ...@@ -5242,17 +5249,17 @@ check_handovers(
GTPV1U_ENB_END_MARKER_IND (msg_p).frame, GTPV1U_ENB_END_MARKER_IND (msg_p).frame,
0, 0,
GTPV1U_ENB_END_MARKER_IND (msg_p).eNB_index); GTPV1U_ENB_END_MARKER_IND (msg_p).eNB_index);
LOG_I(RRC, PROTOCOL_CTXT_FMT"[check_handovers]Received %s from %s: instance %d, rb_id %d, muiP %d, confirmP %d, mode %d\n", LOG_D(RRC, PROTOCOL_CTXT_FMT"[check_handovers]Received %s from %s: instance %d, rb_id %d, muiP %d, confirmP %d, mode %d\n",
PROTOCOL_CTXT_ARGS(&ctxt), PROTOCOL_CTXT_ARGS(&ctxt),
ITTI_MSG_NAME (msg_p), ITTI_MSG_NAME (msg_p),
ITTI_MSG_ORIGIN_NAME(msg_p), ITTI_MSG_ORIGIN_NAME(msg_p),
ITTI_MSG_INSTANCE (msg_p), ITTI_MSG_INSTANCE (msg_p),
GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id, GTPV1U_ENB_END_MARKER_IND (msg_p).rb_id,
GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).muip, GTPV1U_ENB_END_MARKER_IND (msg_p).muip,
GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).confirmp, GTPV1U_ENB_END_MARKER_IND (msg_p).confirmp,
GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).mode); GTPV1U_ENB_END_MARKER_IND (msg_p).mode);
LOG_D(RRC, "Before calling pdcp_data_req from check_handovers! GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id: %d \n", GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id); LOG_I(RRC, "Before calling pdcp_data_req from check_handovers! GTPV1U_ENB_END_MARKER_IND (msg_p).rb_id: %d \n", GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id);
result = pdcp_data_req (&ctxt, result = pdcp_data_req (&ctxt,
SRB_FLAG_NO, SRB_FLAG_NO,
GTPV1U_ENB_END_MARKER_IND (msg_p).rb_id, GTPV1U_ENB_END_MARKER_IND (msg_p).rb_id,
...@@ -5295,6 +5302,9 @@ check_handovers( ...@@ -5295,6 +5302,9 @@ check_handovers(
#endif #endif
} }
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
}
} }
} }
...@@ -9165,6 +9175,8 @@ void *rrc_enb_process_itti_msg(void *notUsed) { ...@@ -9165,6 +9175,8 @@ void *rrc_enb_process_itti_msg(void *notUsed) {
ue_context_p = rrc_eNB_get_ue_context(RC.rrc[instance], X2AP_HANDOVER_CANCEL(msg_p).rnti); ue_context_p = rrc_eNB_get_ue_context(RC.rrc[instance], X2AP_HANDOVER_CANCEL(msg_p).rnti);
if (ue_context_p != NULL && if (ue_context_p != NULL &&
ue_context_p->ue_context.handover_info != NULL) { ue_context_p->ue_context.handover_info != NULL) {
pthread_mutex_lock(&ue_context_p->ue_context.handover_cond_lock);
if(ue_context_p->ue_context.handover_info != NULL) {
LOG_I(RRC, "[eNB %d] eNB receives X2 HANDOVER CANCEL for rnti %x, cause %s [%s]\n", LOG_I(RRC, "[eNB %d] eNB receives X2 HANDOVER CANCEL for rnti %x, cause %s [%s]\n",
instance, instance,
X2AP_HANDOVER_CANCEL(msg_p).rnti, X2AP_HANDOVER_CANCEL(msg_p).rnti,
...@@ -9205,6 +9217,8 @@ void *rrc_enb_process_itti_msg(void *notUsed) { ...@@ -9205,6 +9217,8 @@ void *rrc_enb_process_itti_msg(void *notUsed) {
ue_context_p->ue_context.handover_info->state = HO_RELEASE; ue_context_p->ue_context.handover_info->state = HO_RELEASE;
} }
} }
}
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
} else { } else {
char *failure_cause; char *failure_cause;
if (ue_context_p == NULL) if (ue_context_p == NULL)
......
...@@ -2258,8 +2258,20 @@ int rrc_eNB_process_S1AP_PATH_SWITCH_REQ_ACK (MessageDef *msg_p, const char *msg ...@@ -2258,8 +2258,20 @@ int rrc_eNB_process_S1AP_PATH_SWITCH_REQ_ACK (MessageDef *msg_p, const char *msg
S1AP_PATH_SWITCH_REQ_ACK (msg_p).next_security_key, S1AP_PATH_SWITCH_REQ_ACK (msg_p).next_security_key,
SECURITY_KEY_LENGTH); SECURITY_KEY_LENGTH);
pthread_mutex_lock(&ue_context_p->ue_context.handover_cond_lock);
if(ue_context_p->ue_context.handover_info != NULL) {
rrc_eNB_send_X2AP_UE_CONTEXT_RELEASE(&ctxt, ue_context_p); rrc_eNB_send_X2AP_UE_CONTEXT_RELEASE(&ctxt, ue_context_p);
if(ue_context_p->ue_context.handover_info->state == HO_END_MARKER ||
ue_context_p->ue_context.handover_info->state == HO_FORWARDING_COMPLETE) {
LOG_I(RRC,"Handover finish,free handover_info\n");
free(ue_context_p->ue_context.handover_info);
ue_context_p->ue_context.handover_info = NULL;
}
}
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
return (0); return (0);
} }
} }
......
...@@ -136,7 +136,7 @@ rrc_eNB_allocate_new_UE_context( ...@@ -136,7 +136,7 @@ rrc_eNB_allocate_new_UE_context(
memset(new_p, 0, sizeof(struct rrc_eNB_ue_context_s)); memset(new_p, 0, sizeof(struct rrc_eNB_ue_context_s));
new_p->local_uid = uid_linear_allocator_new(rrc_instance_pP); new_p->local_uid = uid_linear_allocator_new(rrc_instance_pP);
pthread_mutex_init(&new_p->ue_context.handover_cond_lock, NULL);
for(int i = 0; i < NB_RB_MAX; i++) { for(int i = 0; i < NB_RB_MAX; i++) {
new_p->ue_context.e_rab[i].xid = -1; new_p->ue_context.e_rab[i].xid = -1;
new_p->ue_context.modify_e_rab[i].xid = -1; new_p->ue_context.modify_e_rab[i].xid = -1;
...@@ -198,6 +198,7 @@ void rrc_eNB_remove_ue_context( ...@@ -198,6 +198,7 @@ void rrc_eNB_remove_ue_context(
"0 Removed UE %"PRIx16" ", "0 Removed UE %"PRIx16" ",
ue_context_pP->ue_context.rnti); ue_context_pP->ue_context.rnti);
rrc_eNB_free_mem_UE_context(ctxt_pP, ue_context_pP); rrc_eNB_free_mem_UE_context(ctxt_pP, ue_context_pP);
pthread_mutex_destroy(&ue_context_pP->ue_context.handover_cond_lock);
uid_linear_allocator_free(rrc_instance_pP, ue_context_pP->local_uid); uid_linear_allocator_free(rrc_instance_pP, ue_context_pP->local_uid);
free(ue_context_pP); free(ue_context_pP);
rrc_instance_pP->Nb_ue --; rrc_instance_pP->Nb_ue --;
......
...@@ -279,12 +279,17 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( ...@@ -279,12 +279,17 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
ue_context_p = rrc_eNB_get_ue_context(RC.rrc[ctxt.module_id], ctxt.rnti); ue_context_p = rrc_eNB_get_ue_context(RC.rrc[ctxt.module_id], ctxt.rnti);
if((ue_context_p != NULL) && if((ue_context_p != NULL) &&
(ue_context_p->ue_context.handover_info != NULL) && (ue_context_p->ue_context.handover_info != NULL) ) {
(HO_COMPLETE <= ue_context_p->ue_context.handover_info->state && ue_context_p->ue_context.handover_info->state < HO_FORWARDING_COMPLETE)) {
pthread_mutex_lock(&ue_context_p->ue_context.handover_cond_lock);
if((ue_context_p->ue_context.handover_info != NULL) &&
(HO_COMPLETE <= ue_context_p->ue_context.handover_info->state) &&
(ue_context_p->ue_context.handover_info->state < HO_FORWARDING_COMPLETE)) {
if(msgType == NW_GTP_END_MARKER){ if(msgType == NW_GTP_END_MARKER){
/* in the source enb, UE in RRC_HO_EXECUTION mode */ /* in the source enb, UE in RRC_HO_EXECUTION mode */
if (ue_context_p->ue_context.Status == RRC_HO_EXECUTION && ue_context_p->ue_context.handover_info->state == HO_COMPLETE) { if (ue_context_p->ue_context.handover_info->state == HO_COMPLETE) {
/* set handover state */ /* set handover state */
//ue_context_p->ue_context.handover_info->state = HO_END_MARKER; //ue_context_p->ue_context.handover_info->state = HO_END_MARKER;
...@@ -299,6 +304,7 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( ...@@ -299,6 +304,7 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
GTPV1U_ENB_END_MARKER_REQ(msg).offset = GTPU_HEADER_OVERHEAD_MAX; GTPV1U_ENB_END_MARKER_REQ(msg).offset = GTPU_HEADER_OVERHEAD_MAX;
LOG_I(GTPU, "Send End Marker to GTPV1-U at frame %d and subframe %d \n", ctxt.frame,ctxt.subframe); LOG_I(GTPU, "Send End Marker to GTPV1-U at frame %d and subframe %d \n", ctxt.frame,ctxt.subframe);
itti_send_msg_to_task(TASK_GTPV1_U, ENB_MODULE_ID_TO_INSTANCE(ctxt.module_id), msg); itti_send_msg_to_task(TASK_GTPV1_U, ENB_MODULE_ID_TO_INSTANCE(ctxt.module_id), msg);
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
return NW_GTPV1U_OK; return NW_GTPV1U_OK;
} }
} }
...@@ -323,6 +329,7 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( ...@@ -323,6 +329,7 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
memset(&delete_tunnel_req, 0 , sizeof(delete_tunnel_req)); memset(&delete_tunnel_req, 0 , sizeof(delete_tunnel_req));
delete_tunnel_req.rnti = ctxt.rnti; delete_tunnel_req.rnti = ctxt.rnti;
gtpv1u_delete_x2u_tunnel(ctxt.module_id, &delete_tunnel_req, GTPV1U_TARGET_ENB); gtpv1u_delete_x2u_tunnel(ctxt.module_id, &delete_tunnel_req, GTPV1U_TARGET_ENB);
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
return NW_GTPV1U_OK; return NW_GTPV1U_OK;
} }
...@@ -354,9 +361,9 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( ...@@ -354,9 +361,9 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
if ( result == FALSE ) { if ( result == FALSE ) {
LOG_W(GTPU, "DATA FORWARDING message save failed\n"); LOG_W(GTPU, "DATA FORWARDING message save failed\n");
return NW_GTPV1U_FAILURE;
} }
ue_context_p->ue_context.handover_info->forwarding_state = FORWARDING_NO_EMPTY; ue_context_p->ue_context.handover_info->forwarding_state = FORWARDING_NO_EMPTY;
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
return NW_GTPV1U_OK; return NW_GTPV1U_OK;
} }
/* from epc message */ /* from epc message */
...@@ -386,11 +393,12 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( ...@@ -386,11 +393,12 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
LOG_I(GTPU, "Send data forwarding to GTPV1-U at frame %d and subframe %d \n", ctxt.frame,ctxt.subframe); LOG_I(GTPU, "Send data forwarding to GTPV1-U at frame %d and subframe %d \n", ctxt.frame,ctxt.subframe);
itti_send_msg_to_task(TASK_GTPV1_U, ENB_MODULE_ID_TO_INSTANCE(ctxt.module_id), msg); itti_send_msg_to_task(TASK_GTPV1_U, ENB_MODULE_ID_TO_INSTANCE(ctxt.module_id), msg);
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
return NW_GTPV1U_OK; return NW_GTPV1U_OK;
} }
/* target eNB. x2ho forwarding is processing. spgw message save to TASK_END_MARKER */ /* target eNB. x2ho forwarding is processing. spgw message save to TASK_END_MARKER */
if(ue_context_p->ue_context.handover_info->state != HO_COMPLETE && if((ue_context_p->ue_context.handover_info->state != HO_COMPLETE) &&
(ue_context_p->ue_context.handover_info->state != HO_END_MARKER || (ue_context_p->ue_context.handover_info->state != HO_END_MARKER ||
ue_context_p->ue_context.handover_info->forwarding_state != FORWARDING_EMPTY || ue_context_p->ue_context.handover_info->forwarding_state != FORWARDING_EMPTY ||
ue_context_p->ue_context.handover_info->endmark_state != ENDMARK_EMPTY)) ue_context_p->ue_context.handover_info->endmark_state != ENDMARK_EMPTY))
...@@ -418,10 +426,10 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( ...@@ -418,10 +426,10 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
if ( result == FALSE ) { if ( result == FALSE ) {
LOG_W(GTPU, "DATA FORWARDING message save failed\n"); LOG_W(GTPU, "DATA FORWARDING message save failed\n");
return NW_GTPV1U_FAILURE;
} }
ue_context_p->ue_context.handover_info->endmark_state = ENDMARK_NO_EMPTY; ue_context_p->ue_context.handover_info->endmark_state = ENDMARK_NO_EMPTY;
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
return NW_GTPV1U_OK; return NW_GTPV1U_OK;
} }
...@@ -429,6 +437,8 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( ...@@ -429,6 +437,8 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
} }
}
pthread_mutex_unlock(&ue_context_p->ue_context.handover_cond_lock);
} }
result = pdcp_data_req( result = pdcp_data_req(
&ctxt, &ctxt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment