Commit 45655197 authored by wujing's avatar wujing

merge ues_test branch into sp3_tdd_config1

parents 54cf2d9a 7f03a6e3
......@@ -650,6 +650,15 @@ void itti_mark_task_ready(task_id_t task_id)
/* Mark the thread as using LFDS queue */
lfds611_queue_use(itti_desc.tasks[task_id].message_queue);
#if defined(UE_EXPANSION) || defined(RTAI)
/* Assign low priority to created threads */
{
struct sched_param sched_param;
sched_param.sched_priority = sched_get_priority_min(SCHED_FIFO) + 1;
sched_setscheduler(0, SCHED_FIFO, &sched_param);
}
#endif
itti_desc.threads[thread_id].task_state = TASK_STATE_READY;
itti_desc.ready_tasks ++;
......
......@@ -541,13 +541,14 @@ check_ul_failure(module_id_t module_idP, int CC_id, int UE_id,
"UE %d rnti %x: sent PDCCH order for RAPROC waiting (failure timer %d) \n",
UE_id, rnti,
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer);
if ((UE_list->UE_sched_ctrl[UE_id].ul_failure_timer % 40) == 0)
UE_list->UE_sched_ctrl[UE_id].ra_pdcch_order_sent = 0; // resend every 4 frames
if ((UE_list->UE_sched_ctrl[UE_id].ul_failure_timer % 80) == 0)
UE_list->UE_sched_ctrl[UE_id].ra_pdcch_order_sent = 0; // resend every 8 frames
}
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer++;
// check threshold
if (UE_list->UE_sched_ctrl[UE_id].ul_failure_timer > 20000) {
if (UE_list->UE_sched_ctrl[UE_id].ul_failure_timer > 4000) {
// note: probably ul_failure_timer is should be less than UE radio link failure time(see T310/N310/N311)
// inform RRC of failure and clear timer
LOG_I(MAC,
"UE %d rnti %x: UL Failure after repeated PDCCH orders: Triggering RRC \n",
......
......@@ -1672,6 +1672,8 @@ initiate_ra_proc(module_id_t module_idP,
struct PRACH_ConfigSIB_v1310 *ext4_prach = NULL;
PRACH_ParametersListCE_r13_t *prach_ParametersListCE_r13 = NULL;
static uint8_t failure_cnt = 0;
if (cc->radioResourceConfigCommon_BR
&& cc->radioResourceConfigCommon_BR->ext4) {
......@@ -1783,6 +1785,7 @@ initiate_ra_proc(module_id_t module_idP,
}
ra[i].RA_rnti = ra_rnti;
ra[i].preamble_index = preamble_index;
failure_cnt = 0;
LOG_D(MAC,
"[eNB %d][RAPROC] CC_id %d Frame %d Activating RAR generation in Frame %d, subframe %d for process %d, rnti %x, state %d\n",
module_idP, CC_id, frameP, ra[i].Msg2_frame,
......@@ -1795,6 +1798,13 @@ initiate_ra_proc(module_id_t module_idP,
LOG_E(MAC,
"[eNB %d][RAPROC] FAILURE: CC_id %d Frame %d Initiating RA procedure for preamble index %d\n",
module_idP, CC_id, frameP, preamble_index);
failure_cnt++;
if(failure_cnt > 20) {
LOG_E(MAC,"[eNB %d][RAPROC] CC_id %d Frame %d Clear Random access information\n", module_idP, CC_id, frameP);
clear_ra_proc(module_idP, CC_id, frameP);
}
}
void
......@@ -1817,6 +1827,22 @@ cancel_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP,
ra[i].RRC_timer = 20;
ra[i].rnti = 0;
ra[i].msg3_round = 0;
LOG_I(MAC,"[eNB %d][RAPROC] CC_id %d Frame %d Canceled RA procedure for UE rnti %x\n", module_idP, CC_id, frameP, rnti);
}
}
}
void clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP)
{
unsigned char i;
RA_t *ra = (RA_t *) & RC.mac[module_idP]->common_channels[CC_id].ra[0];
for (i = 0; i < NB_RA_PROC_MAX; i++) {
LOG_D(MAC,"[eNB %d][RAPROC] CC_id %d Frame %d Clear Random access information rnti %x\n", module_idP, CC_id, frameP, ra[i].rnti);
ra[i].state = IDLE;
ra[i].timing_offset = 0;
ra[i].RRC_timer = 20;
ra[i].rnti = 0;
ra[i].msg3_round = 0;
}
}
......@@ -2273,16 +2273,8 @@ int rrc_mac_remove_ue(module_id_t mod_idP, rnti_t rntiP)
eNB_dlsch_info[mod_idP][pCC_id][UE_id].serving_num = 0;
// check if this has an RA process active
RA_t *ra;
for (i = 0; i < NB_RA_PROC_MAX; i++) {
ra = (RA_t *) & RC.mac[mod_idP]->common_channels[pCC_id].ra[i];
if (ra->rnti == rntiP) {
ra->state = IDLE;
ra->timing_offset = 0;
ra->RRC_timer = 20;
ra->rnti = 0;
//break;
}
if(find_RA_id(mod_idP, pCC_id, rntiP) != -1) {
cancel_ra_proc(mod_idP, pCC_id, 0, rntiP);
}
return 0;
......
......@@ -165,18 +165,16 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid],
ul_cqi);
// AssertFatal(1==0,"ulsch in error\n");
if (UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] ==
3) {
UE_list->UE_sched_ctrl[UE_id].ul_scheduled &=
(~(1 << harq_pid));
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] =
0;
if (UE_list->UE_sched_ctrl[UE_id].
ul_consecutive_errors++ == 10)
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 1;
} else
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid]++;
// AssertFatal(1==0,"ulsch in error\n");
if (UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] == 3) {
UE_list->UE_sched_ctrl[UE_id].ul_scheduled &= (~(1 << harq_pid));
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] = 0;
if (UE_list->UE_sched_ctrl[UE_id].ul_consecutive_errors++ == 10)
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 1;
if(find_RA_id(enb_mod_idP, CC_idP, current_rnti) != -1)
cancel_ra_proc(enb_mod_idP, CC_idP, frameP, current_rnti);
} else
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid]++;
return;
}
......@@ -204,7 +202,7 @@ rx_sdu(const module_id_t enb_mod_idP,
(int) mac->common_channels[CC_idP].
radioResourceConfigCommon->rach_ConfigCommon.
maxHARQ_Msg3Tx);
if (ra[RA_id].msg3_round ==
if (ra[RA_id].msg3_round >=
mac->common_channels[CC_idP].radioResourceConfigCommon->
rach_ConfigCommon.maxHARQ_Msg3Tx - 1) {
cancel_ra_proc(enb_mod_idP, CC_idP, frameP, current_rnti);
......@@ -309,7 +307,7 @@ rx_sdu(const module_id_t enb_mod_idP,
// prepare transmission of Msg4(RRCConnectionReconfiguration)
ra->state = MSGCRNTI;
LOG_I(MAC,
"[eNB %d] Frame %d, Subframe %d CC_id %d : (rnti %x UE_id %d) RRCConnectionReconfiguration(Msg4)",
"[eNB %d] Frame %d, Subframe %d CC_id %d : (rnti %x UE_id %d) RRCConnectionReconfiguration(Msg4)\n",
enb_mod_idP, frameP, subframeP, CC_idP, old_rnti, old_UE_id);
//
UE_id = old_UE_id;
......@@ -2090,7 +2088,8 @@ void schedule_ulsch_rnti(module_id_t module_idP,
0, // ul_tx_mode
0, // current_tx_nb
0, // n_srs
get_TBS_UL(UE_template->mcs_UL[harq_pid], ulsch_ue_select[CC_id].list[ulsch_ue_num].nb_rb)
// get_TBS_UL(UE_template->mcs_UL[harq_pid], ulsch_ue_select[CC_id].list[ulsch_ue_num].nb_rb)
UE_template->TBS_UL[harq_pid]
);
#ifdef Rel14
if (UE_template->rach_resource_type>0) { // This is a BL/CE UE allocation
......
......@@ -2501,7 +2501,15 @@ void ulsch_scheduler_pre_ue_select(
ue_first_num[CC_id]++;
continue;
}
if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id] ) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
if ( ((UE_sched_ctl->ul_inactivity_timer>20)&&(UE_sched_ctl->ul_scheduled==0)) ||
((UE_sched_ctl->ul_inactivity_timer>10)&&(UE_sched_ctl->ul_scheduled==0)&&(mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP,UE_id)) < RRC_CONNECTED))) {
first_ue_id[CC_id][ue_first_num[CC_id]]= UE_id;
first_ue_total[CC_id] [ue_first_num[CC_id]] = 0;
ue_first_num[CC_id]++;
continue;
}
/*if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id] ) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
uint8_t ul_period = 0;
if (cc->tdd_Config) {
......@@ -2515,7 +2523,7 @@ void ulsch_scheduler_pre_ue_select(
ul_inactivity_num[CC_id] ++;
continue;
}
}
}*/
}
}
......@@ -2590,7 +2598,10 @@ void ulsch_scheduler_pre_ue_select(
HI_DCI0_req = &eNB->HI_DCI0_req[CC_id][subframeP].hi_dci0_request_body;
//SR BSR
if ( (UE_list->UE_template[CC_id][UE_id].ul_total_buffer > 0) || (UE_list->UE_template[CC_id][UE_id].ul_SR > 0) ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
if ( (UE_list->UE_template[CC_id][UE_id].ul_total_buffer > 0) || (UE_list->UE_template[CC_id][UE_id].ul_SR > 0) ||
((UE_sched_ctl->ul_inactivity_timer>20)&&(UE_sched_ctl->ul_scheduled==0)) ||
((UE_sched_ctl->ul_inactivity_timer>10)&&(UE_sched_ctl->ul_scheduled==0)&&(mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP,UE_id)) < RRC_CONNECTED)) ){
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
......@@ -2613,7 +2624,7 @@ void ulsch_scheduler_pre_ue_select(
}
}
//inactivity UE
if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id]) < ulsch_ue_max_num[CC_id] ) {
/* if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id]) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
uint8_t ul_period = 0;
if (cc->tdd_Config) {
......@@ -2627,7 +2638,7 @@ void ulsch_scheduler_pre_ue_select(
ul_inactivity_num[CC_id]++;
continue;
}
}
}*/
}
for ( CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++ ) {
......
......@@ -299,6 +299,9 @@ void cancel_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP,
@param Msg3_frame frame where scheduling takes place
@param Msg3_subframe subframe where scheduling takes place
*/
void clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP);
void set_msg3_subframe(module_id_t Mod_id,
int CC_id,
int frame,
......
......@@ -89,6 +89,7 @@ fill_rar(const module_id_t module_idP,
ra->msg3_TPC = 3;
ra->msg3_ULdelay = 0;
ra->msg3_cqireq = 0;
ra->msg3_round = 0;
rar[2] |= ((ra->msg3_mcs & 0x8) >> 3); // mcs 10
rar[3] =
(((ra->msg3_mcs & 0x7) << 5)) | ((ra->msg3_TPC & 7) << 2) |
......
......@@ -550,8 +550,10 @@ rlc_am_mac_status_indication (
}
rlc->last_absolute_subframe_status_indication = PROTOCOL_CTXT_TIME_MILLI_SECONDS(ctxt_pP);
if (tb_sizeP > 0)
rlc->nb_bytes_requested_by_mac = tb_sizeP;
if (tb_sizeP > 0) {
rlc->nb_bytes_requested_by_mac = tb_sizeP;
}
status_resp.buffer_occupancy_in_bytes = rlc_am_get_buffer_occupancy_in_bytes(ctxt_pP, rlc);
......
......@@ -396,14 +396,20 @@ rlc_am_receive_process_data_pdu (
}
if (pdu_info_p->sn == rlc_pP->vr_r) {
mem_block_t* cursor_p = rlc_pP->receiver_buffer.head;
rlc_am_rx_pdu_management_t * pdu_cursor_mgnt_p = (rlc_am_rx_pdu_management_t *) (cursor_p->data);
if( (((rlc_am_rx_pdu_management_t*)(tb_pP->data))->all_segments_received) == (pdu_cursor_mgnt_p->all_segments_received)){
if (((rlc_am_rx_pdu_management_t*)(tb_pP->data))->all_segments_received) {
rlc_am_rx_update_vr_r(ctxt_pP, rlc_pP, tb_pP);
rlc_pP->vr_mr = (rlc_pP->vr_r + RLC_AM_WINDOW_SIZE) & RLC_AM_SN_MASK;
}
reassemble = rlc_am_rx_check_vr_reassemble(ctxt_pP, rlc_pP);
//TODO : optimization : check whether a reassembly is needed by looking at LI, FI, SO, etc...
}else{
LOG_E(RLC, "BAD all_segments_received!!! discard buffer!!!\n");
/* Discard received block if out of window, duplicate or header error */
free_mem_block (tb_pP, __func__);
}
}
//FNA: fix check VrX out of receiving window
......
......@@ -348,7 +348,7 @@ rrc_rx_tx(
}
if (ue_context_p->ue_context.ul_failure_timer>0) {
ue_context_p->ue_context.ul_failure_timer++;
if (ue_context_p->ue_context.ul_failure_timer >= 8) {
if (ue_context_p->ue_context.ul_failure_timer >= 20000) {
// remove UE after 20 seconds after MAC has indicated UL failure
LOG_I(RRC,"Removing UE %x instance\n",ue_context_p->ue_context.rnti);
ue_to_be_removed = ue_context_p;
......@@ -513,14 +513,14 @@ rrc_rx_tx(
}
}
if (ue_to_be_removed) {
if(ue_to_be_removed->ue_context.ul_failure_timer >= 8) {
if(ue_to_be_removed->ue_context.ul_failure_timer >= 20000) {
ue_to_be_removed->ue_context.ue_release_timer_s1 = 1;
ue_to_be_removed->ue_context.ue_release_timer_thres_s1 = 200;
ue_to_be_removed->ue_context.ue_release_timer = 0;
ue_to_be_removed->ue_context.ue_reestablishment_timer = 0;
}
rrc_eNB_free_UE(ctxt_pP->module_id,ue_to_be_removed);
if(ue_to_be_removed->ue_context.ul_failure_timer >= 8){
if(ue_to_be_removed->ue_context.ul_failure_timer >= 20000){
ue_to_be_removed->ue_context.ul_failure_timer = 0;
}
}
......
......@@ -825,7 +825,7 @@ rrc_eNB_free_UE(const module_id_t enb_mod_idP,const struct rrc_eNB_ue_context_s*
LOG_W(RRC, "[eNB %d] Removing UE RNTI %x\n", enb_mod_idP, rnti);
#if defined(ENABLE_USE_MME)
if( ue_context_pP->ue_context.ul_failure_timer >= 8 ) {
if( ue_context_pP->ue_context.ul_failure_timer >= 20000 ) {
LOG_I(RRC, "[eNB %d] S1AP_UE_CONTEXT_RELEASE_REQ RNTI %x\n", enb_mod_idP, rnti);
rrc_eNB_send_S1AP_UE_CONTEXT_RELEASE_REQ(enb_mod_idP, ue_context_pP, S1AP_CAUSE_RADIO_NETWORK, 21); // send cause 21: connection with ue lost
/* From 3GPP 36300v10 p129 : 19.2.2.2.2 S1 UE Context Release Request (eNB triggered)
......@@ -844,15 +844,24 @@ rrc_eNB_free_UE(const module_id_t enb_mod_idP,const struct rrc_eNB_ue_context_s*
ulsch = eNB_PHY->ulsch[i];
if((ulsch != NULL) && (ulsch->rnti == rnti)){
LOG_I(RRC, "clean_eNb_ulsch UE %x \n", rnti);
clean_eNb_ulsch(ulsch);
//clean_eNb_ulsch(ulsch);
ulsch->rnti = 0;
}
}
for (i=0; i<NUMBER_OF_UE_MAX; i++) {
dlsch = eNB_PHY->dlsch[i][0];
if((dlsch != NULL) && (dlsch->rnti == rnti)){
LOG_I(RRC, "clean_eNb_dlsch UE %x \n", rnti);
clean_eNb_dlsch(dlsch);
}
if(eNB_PHY->uci_vars[i].rnti == rnti){
LOG_I(MAC, "clean eNb uci_vars[%d] UE %x \n",i, rnti);
memset(&eNB_PHY->uci_vars[i],0,sizeof(LTE_eNB_UCI));
}
}
ulsch = eNB_PHY->ulsch[i];
if((ulsch != NULL) && (ulsch->rnti == rnti)){
LOG_I(RRC, "clean_eNb_ulsch UE %x \n", rnti);
//clean_eNb_ulsch(ulsch);
ulsch->rnti = 0;
}
for(j = 0; j < 10; j++){
......@@ -860,12 +869,16 @@ rrc_eNB_free_UE(const module_id_t enb_mod_idP,const struct rrc_eNB_ue_context_s*
if(ul_req_tmp){
pdu_number = ul_req_tmp->number_of_pdus;
for(int pdu_index = pdu_number-1; pdu_index >= 0; pdu_index--){
if(ul_req_tmp->ul_config_pdu_list[pdu_index].ulsch_pdu.ulsch_pdu_rel8.rnti == rnti){
LOG_I(RRC, "remove UE %x from ul_config_pdu_list %d/%d\n", rnti, pdu_index, pdu_number);
if(pdu_index < pdu_number -1){
memcpy(&ul_req_tmp->ul_config_pdu_list[pdu_index], &ul_req_tmp->ul_config_pdu_list[pdu_index+1], (pdu_number-1-pdu_index) * sizeof(nfapi_ul_config_request_pdu_t));
}
ul_req_tmp->number_of_pdus--;
if((ul_req_tmp->ul_config_pdu_list[pdu_index].ulsch_pdu.ulsch_pdu_rel8.rnti == rnti) ||
(ul_req_tmp->ul_config_pdu_list[pdu_index].uci_harq_pdu.ue_information.ue_information_rel8.rnti == rnti) ||
(ul_req_tmp->ul_config_pdu_list[pdu_index].uci_cqi_pdu.ue_information.ue_information_rel8.rnti == rnti) ||
(ul_req_tmp->ul_config_pdu_list[pdu_index].uci_sr_pdu.ue_information.ue_information_rel8.rnti == rnti) ||
(ul_req_tmp->ul_config_pdu_list[pdu_index].srs_pdu.srs_pdu_rel8.rnti == rnti)){
LOG_I(RRC, "remove UE %x from ul_config_pdu_list %d/%d\n", rnti, pdu_index, pdu_number);
if(pdu_index < pdu_number -1){
memcpy(&ul_req_tmp->ul_config_pdu_list[pdu_index], &ul_req_tmp->ul_config_pdu_list[pdu_index+1], (pdu_number-1-pdu_index) * sizeof(nfapi_ul_config_request_pdu_t));
}
ul_req_tmp->number_of_pdus--;
}
}
}
......@@ -945,12 +958,21 @@ void release_UE_in_freeList(module_id_t mod_id)
PROTOCOL_CTXT_SET_BY_MODULE_ID(&ctxt, mod_id, ENB_FLAG_YES, rnti, 0, 0,mod_id);
for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) {
eNB_PHY = RC.eNB[mod_id][CC_id];
for (i=0; i<=NUMBER_OF_UE_MAX; i++) {
for (i=0; i<NUMBER_OF_UE_MAX; i++) {
ulsch = eNB_PHY->ulsch[i];
if((ulsch != NULL) && (ulsch->rnti == rnti)){
LOG_I(RRC, "clean_eNb_ulsch ulsch[%d] UE %x\n", i, rnti);
clean_eNb_ulsch(ulsch);
}
LOG_I(RRC, "clean_eNb_ulsch ulsch[%d] UE %x\n", i, rnti);
clean_eNb_ulsch(ulsch);
}
if(eNB_PHY->uci_vars[i].rnti == rnti){
LOG_I(MAC, "clean eNb uci_vars[%d] UE %x \n",i, rnti);
memset(&eNB_PHY->uci_vars[i],0,sizeof(LTE_eNB_UCI));
}
}
ulsch = eNB_PHY->ulsch[i];
if((ulsch != NULL) && (ulsch->rnti == rnti)){
LOG_I(RRC, "clean_eNb_ulsch ulsch[%d] UE %x\n", i, rnti);
clean_eNb_ulsch(ulsch);
}
for(j = 0; j < 10; j++){
......@@ -958,12 +980,16 @@ void release_UE_in_freeList(module_id_t mod_id)
if(ul_req_tmp){
pdu_number = ul_req_tmp->number_of_pdus;
for(int pdu_index = pdu_number-1; pdu_index >= 0; pdu_index--){
if(ul_req_tmp->ul_config_pdu_list[pdu_index].ulsch_pdu.ulsch_pdu_rel8.rnti == rnti){
LOG_I(RRC, "remove UE %x from ul_config_pdu_list %d/%d\n", rnti, pdu_index, pdu_number);
if(pdu_index < pdu_number -1){
memcpy(&ul_req_tmp->ul_config_pdu_list[pdu_index], &ul_req_tmp->ul_config_pdu_list[pdu_index+1], (pdu_number-1-pdu_index) * sizeof(nfapi_ul_config_request_pdu_t));
}
ul_req_tmp->number_of_pdus--;
if((ul_req_tmp->ul_config_pdu_list[pdu_index].ulsch_pdu.ulsch_pdu_rel8.rnti == rnti) ||
(ul_req_tmp->ul_config_pdu_list[pdu_index].uci_harq_pdu.ue_information.ue_information_rel8.rnti == rnti) ||
(ul_req_tmp->ul_config_pdu_list[pdu_index].uci_cqi_pdu.ue_information.ue_information_rel8.rnti == rnti) ||
(ul_req_tmp->ul_config_pdu_list[pdu_index].uci_sr_pdu.ue_information.ue_information_rel8.rnti == rnti) ||
(ul_req_tmp->ul_config_pdu_list[pdu_index].srs_pdu.srs_pdu_rel8.rnti == rnti)){
LOG_I(RRC, "remove UE %x from ul_config_pdu_list %d/%d\n", rnti, pdu_index, pdu_number);
if(pdu_index < pdu_number -1){
memcpy(&ul_req_tmp->ul_config_pdu_list[pdu_index], &ul_req_tmp->ul_config_pdu_list[pdu_index+1], (pdu_number-1-pdu_index) * sizeof(nfapi_ul_config_request_pdu_t));
}
ul_req_tmp->number_of_pdus--;
}
}
}
......@@ -1371,8 +1397,10 @@ rrc_eNB_process_RRCConnectionReestablishmentComplete(
SRB_configList2 = &ue_context_pP->ue_context.SRB_configList2[xid];
// get old configuration of SRB2
if (*SRB_configList2 != NULL) {
LOG_D(RRC, "SRB_configList2(%p) count is %d\n SRB_configList2->list.array[0] addr is %p",
if((*SRB_configList2)->list.count!=0){
LOG_D(RRC, "SRB_configList2(%p) count is %d\n SRB_configList2->list.array[0] addr is %p",
SRB_configList2, (*SRB_configList2)->list.count, (*SRB_configList2)->list.array[0]);
}
for (i = 0; (i < (*SRB_configList2)->list.count) && (i < 3); i++) {
if ((*SRB_configList2)->list.array[i]->srb_Identity == 2 ){
LOG_D(RRC, "get SRB2_config from (ue_context_pP->ue_context.SRB_configList2[%d])\n", xid);
......@@ -1384,9 +1412,13 @@ rrc_eNB_process_RRCConnectionReestablishmentComplete(
SRB_configList2 = &ue_context_pP->ue_context.SRB_configList2[next_xid];
DRB_configList2 = &ue_context_pP->ue_context.DRB_configList2[next_xid];
if (*SRB_configList2) {
free(*SRB_configList2);
LOG_D(RRC, "free(ue_context_pP->ue_context.SRB_configList2[%d])\n", next_xid);
if(SRB_configList2!=NULL){
if (*SRB_configList2) {
free(*SRB_configList2);
LOG_D(RRC, "free(ue_context_pP->ue_context.SRB_configList2[%d])\n", next_xid);
}
}else{
LOG_E(RRC, "SRB_configList2 is null\n");
}
*SRB_configList2 = CALLOC(1, sizeof(**SRB_configList2));
if (SRB2_config != NULL) {
......@@ -1406,9 +1438,13 @@ rrc_eNB_process_RRCConnectionReestablishmentComplete(
if (*DRB_configList2) {
free(*DRB_configList2);
LOG_D(RRC, "free(ue_context_pP->ue_context.DRB_configList2[%d])\n", next_xid);
if(DRB_configList2!=NULL){
if (*DRB_configList2) {
free(*DRB_configList2);
LOG_D(RRC, "free(ue_context_pP->ue_context.DRB_configList2[%d])\n", next_xid);
}
}else{
LOG_E(RRC, "DRB_configList2 is null\n");
}
*DRB_configList2 = CALLOC(1, sizeof(**DRB_configList2));
......@@ -1448,7 +1484,7 @@ rrc_eNB_process_RRCConnectionReestablishmentComplete(
memset(&create_tunnel_req, 0 , sizeof(create_tunnel_req));
for (j = 0, i = 0; i < NB_RB_MAX; i++) {
if (ue_context_pP->ue_context.e_rab[i].status == E_RAB_STATUS_ESTABLISHED) {
if (ue_context_pP->ue_context.e_rab[i].status == E_RAB_STATUS_ESTABLISHED || ue_context_pP->ue_context.e_rab[i].status == E_RAB_STATUS_DONE) {
create_tunnel_req.eps_bearer_id[j] = ue_context_pP->ue_context.e_rab[i].param.e_rab_id;
create_tunnel_req.sgw_S1u_teid[j] = ue_context_pP->ue_context.e_rab[i].param.gtp_teid;
......@@ -1863,35 +1899,39 @@ rrc_eNB_process_RRCConnectionReestablishmentComplete(
}
#endif
if(size==65535){
LOG_E(RRC,"RRC decode err!!! do_RRCConnectionReconfiguration\n");
put_UE_in_freelist(ctxt_pP->module_id, reestablish_rnti, 0);
return;
}else{
LOG_I(RRC,
"[eNB %d] Frame %d, Logical Channel DL-DCCH, Generate RRCConnectionReconfiguration (bytes %d, UE id %x)\n",
ctxt_pP->module_id, ctxt_pP->frame, size, ue_context_pP->ue_context.rnti);
LOG_I(RRC,
"[eNB %d] Frame %d, Logical Channel DL-DCCH, Generate RRCConnectionReconfiguration (bytes %d, UE id %x)\n",
ctxt_pP->module_id, ctxt_pP->frame, size, ue_context_pP->ue_context.rnti);
LOG_D(RRC,
"[FRAME %05d][RRC_eNB][MOD %u][][--- PDCP_DATA_REQ/%d Bytes (rrcConnectionReconfiguration to UE %x MUI %d) --->][PDCP][MOD %u][RB %u]\n",
ctxt_pP->frame, ctxt_pP->module_id, size, ue_context_pP->ue_context.rnti, rrc_eNB_mui, ctxt_pP->module_id, DCCH);
MSC_LOG_TX_MESSAGE(
MSC_RRC_ENB,
MSC_RRC_UE,
buffer,
size,
MSC_AS_TIME_FMT" rrcConnectionReconfiguration UE %x MUI %d size %u",
MSC_AS_TIME_ARGS(ctxt_pP),
ue_context_pP->ue_context.rnti,
rrc_eNB_mui,
size);
rrc_data_req(
ctxt_pP,
DCCH,
rrc_eNB_mui++,
SDU_CONFIRM_NO,
size,
buffer,
PDCP_TRANSMISSION_MODE_CONTROL);
LOG_D(RRC,
"[FRAME %05d][RRC_eNB][MOD %u][][--- PDCP_DATA_REQ/%d Bytes (rrcConnectionReconfiguration to UE %x MUI %d) --->][PDCP][MOD %u][RB %u]\n",
ctxt_pP->frame, ctxt_pP->module_id, size, ue_context_pP->ue_context.rnti, rrc_eNB_mui, ctxt_pP->module_id, DCCH);
MSC_LOG_TX_MESSAGE(
MSC_RRC_ENB,
MSC_RRC_UE,
buffer,
size,
MSC_AS_TIME_FMT" rrcConnectionReconfiguration UE %x MUI %d size %u",
MSC_AS_TIME_ARGS(ctxt_pP),
ue_context_pP->ue_context.rnti,
rrc_eNB_mui,
size);
rrc_data_req(
ctxt_pP,
DCCH,
rrc_eNB_mui++,
SDU_CONFIRM_NO,
size,
buffer,
PDCP_TRANSMISSION_MODE_CONTROL);
}
// delete UE data of prior RNTI. UE use current RNTI.
// protocol_ctxt_t ctxt_prior = *ctxt_pP;
// ctxt_prior.rnti = reestablish_rnti;
......@@ -6251,7 +6291,7 @@ if (ue_context_p->ue_context.nb_of_modify_e_rabs > 0) {
ue_context_p,
ul_dcch_msg->message.choice.c1.choice.rrcConnectionReconfigurationComplete.rrc_TransactionIdentifier);
}
}else {
}else if(dedicated_DRB == 0){
if(ue_context_p->ue_context.reestablishment_cause == ReestablishmentCause_spare1){
rrc_eNB_send_S1AP_INITIAL_CONTEXT_SETUP_RESP(ctxt_pP,
ue_context_p);
......@@ -6265,7 +6305,15 @@ if (ue_context_p->ue_context.nb_of_modify_e_rabs > 0) {
}
}
}
}
}else if(dedicated_DRB == 2){
for (uint8_t e_rab = 0; e_rab < ue_context_p->ue_context.nb_of_e_rabs; e_rab++) {
if (ue_context_p->ue_context.e_rab[e_rab].status == E_RAB_STATUS_DONE) {
ue_context_p->ue_context.e_rab[e_rab].status = E_RAB_STATUS_ESTABLISHED;
} else {
ue_context_p->ue_context.e_rab[e_rab].status = E_RAB_STATUS_FAILED;
}
}
}
}
#else // establish a dedicated bearer
if (dedicated_DRB == 0 ) {
......
......@@ -1527,8 +1527,10 @@ static void* ru_thread( void* param ) {
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&ru->proc.cond_phy_tx) == 0, "ERROR pthread_cond_signal for phy_tx thread\n");
}else{
LOG_E(PHY,"phy tx thread busy, skipping\n");
++ru->proc.instance_cnt_phy_tx;
}
else LOG_W(PHY,"phy tx thread busy, skipping\n");
pthread_mutex_unlock( &ru->proc.mutex_phy_tx );
} else {
phy_tx_end = 1;
......@@ -1810,8 +1812,10 @@ static void* eNB_thread_phy_tx( void* param ) {
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&ru->proc.cond_rf_tx) == 0, "ERROR pthread_cond_signal for rf_tx thread\n");
}else{
LOG_E(PHY,"rf tx thread busy, skipping\n");
ru->proc.instance_cnt_rf_tx++;
}
else LOG_W(PHY,"rf tx thread busy, skipping\n");
pthread_mutex_unlock( &ru->proc.mutex_rf_tx );
}
if (release_thread(&proc->mutex_phy_tx,&proc->instance_cnt_phy_tx,"eNB_thread_phy_tx") < 0) break;
......@@ -1855,6 +1859,10 @@ static void* rf_tx( void* param ) {
if (ru->fh_north_out) ru->fh_north_out(ru);
}
if (release_thread(&proc->mutex_rf_tx,&proc->instance_cnt_rf_tx,"rf_tx") < 0) break;
if(proc->instance_cnt_rf_tx >= 0){
late_control=STATE_BURST_TERMINATE;
LOG_E(PHY,"detect rf tx busy change mode TX failsafe\n");
}
}
LOG_I(PHY, "Exiting rf TX\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment