Commit 45655197 authored by wujing's avatar wujing

merge ues_test branch into sp3_tdd_config1

parents 54cf2d9a 7f03a6e3
......@@ -650,6 +650,15 @@ void itti_mark_task_ready(task_id_t task_id)
/* Mark the thread as using LFDS queue */
lfds611_queue_use(itti_desc.tasks[task_id].message_queue);
#if defined(UE_EXPANSION) || defined(RTAI)
/* Assign low priority to created threads */
{
struct sched_param sched_param;
sched_param.sched_priority = sched_get_priority_min(SCHED_FIFO) + 1;
sched_setscheduler(0, SCHED_FIFO, &sched_param);
}
#endif
itti_desc.threads[thread_id].task_state = TASK_STATE_READY;
itti_desc.ready_tasks ++;
......
......@@ -541,13 +541,14 @@ check_ul_failure(module_id_t module_idP, int CC_id, int UE_id,
"UE %d rnti %x: sent PDCCH order for RAPROC waiting (failure timer %d) \n",
UE_id, rnti,
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer);
if ((UE_list->UE_sched_ctrl[UE_id].ul_failure_timer % 40) == 0)
UE_list->UE_sched_ctrl[UE_id].ra_pdcch_order_sent = 0; // resend every 4 frames
if ((UE_list->UE_sched_ctrl[UE_id].ul_failure_timer % 80) == 0)
UE_list->UE_sched_ctrl[UE_id].ra_pdcch_order_sent = 0; // resend every 8 frames
}
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer++;
// check threshold
if (UE_list->UE_sched_ctrl[UE_id].ul_failure_timer > 20000) {
if (UE_list->UE_sched_ctrl[UE_id].ul_failure_timer > 4000) {
// note: probably ul_failure_timer is should be less than UE radio link failure time(see T310/N310/N311)
// inform RRC of failure and clear timer
LOG_I(MAC,
"UE %d rnti %x: UL Failure after repeated PDCCH orders: Triggering RRC \n",
......
......@@ -1672,6 +1672,8 @@ initiate_ra_proc(module_id_t module_idP,
struct PRACH_ConfigSIB_v1310 *ext4_prach = NULL;
PRACH_ParametersListCE_r13_t *prach_ParametersListCE_r13 = NULL;
static uint8_t failure_cnt = 0;
if (cc->radioResourceConfigCommon_BR
&& cc->radioResourceConfigCommon_BR->ext4) {
......@@ -1783,6 +1785,7 @@ initiate_ra_proc(module_id_t module_idP,
}
ra[i].RA_rnti = ra_rnti;
ra[i].preamble_index = preamble_index;
failure_cnt = 0;
LOG_D(MAC,
"[eNB %d][RAPROC] CC_id %d Frame %d Activating RAR generation in Frame %d, subframe %d for process %d, rnti %x, state %d\n",
module_idP, CC_id, frameP, ra[i].Msg2_frame,
......@@ -1795,6 +1798,13 @@ initiate_ra_proc(module_id_t module_idP,
LOG_E(MAC,
"[eNB %d][RAPROC] FAILURE: CC_id %d Frame %d Initiating RA procedure for preamble index %d\n",
module_idP, CC_id, frameP, preamble_index);
failure_cnt++;
if(failure_cnt > 20) {
LOG_E(MAC,"[eNB %d][RAPROC] CC_id %d Frame %d Clear Random access information\n", module_idP, CC_id, frameP);
clear_ra_proc(module_idP, CC_id, frameP);
}
}
void
......@@ -1817,6 +1827,22 @@ cancel_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP,
ra[i].RRC_timer = 20;
ra[i].rnti = 0;
ra[i].msg3_round = 0;
LOG_I(MAC,"[eNB %d][RAPROC] CC_id %d Frame %d Canceled RA procedure for UE rnti %x\n", module_idP, CC_id, frameP, rnti);
}
}
}
void clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP)
{
unsigned char i;
RA_t *ra = (RA_t *) & RC.mac[module_idP]->common_channels[CC_id].ra[0];
for (i = 0; i < NB_RA_PROC_MAX; i++) {
LOG_D(MAC,"[eNB %d][RAPROC] CC_id %d Frame %d Clear Random access information rnti %x\n", module_idP, CC_id, frameP, ra[i].rnti);
ra[i].state = IDLE;
ra[i].timing_offset = 0;
ra[i].RRC_timer = 20;
ra[i].rnti = 0;
ra[i].msg3_round = 0;
}
}
......@@ -2273,16 +2273,8 @@ int rrc_mac_remove_ue(module_id_t mod_idP, rnti_t rntiP)
eNB_dlsch_info[mod_idP][pCC_id][UE_id].serving_num = 0;
// check if this has an RA process active
RA_t *ra;
for (i = 0; i < NB_RA_PROC_MAX; i++) {
ra = (RA_t *) & RC.mac[mod_idP]->common_channels[pCC_id].ra[i];
if (ra->rnti == rntiP) {
ra->state = IDLE;
ra->timing_offset = 0;
ra->RRC_timer = 20;
ra->rnti = 0;
//break;
}
if(find_RA_id(mod_idP, pCC_id, rntiP) != -1) {
cancel_ra_proc(mod_idP, pCC_id, 0, rntiP);
}
return 0;
......
......@@ -165,18 +165,16 @@ rx_sdu(const module_id_t enb_mod_idP,
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid],
ul_cqi);
// AssertFatal(1==0,"ulsch in error\n");
if (UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] ==
3) {
UE_list->UE_sched_ctrl[UE_id].ul_scheduled &=
(~(1 << harq_pid));
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] =
0;
if (UE_list->UE_sched_ctrl[UE_id].
ul_consecutive_errors++ == 10)
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 1;
} else
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid]++;
// AssertFatal(1==0,"ulsch in error\n");
if (UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] == 3) {
UE_list->UE_sched_ctrl[UE_id].ul_scheduled &= (~(1 << harq_pid));
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid] = 0;
if (UE_list->UE_sched_ctrl[UE_id].ul_consecutive_errors++ == 10)
UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 1;
if(find_RA_id(enb_mod_idP, CC_idP, current_rnti) != -1)
cancel_ra_proc(enb_mod_idP, CC_idP, frameP, current_rnti);
} else
UE_list->UE_sched_ctrl[UE_id].round_UL[CC_idP][harq_pid]++;
return;
}
......@@ -204,7 +202,7 @@ rx_sdu(const module_id_t enb_mod_idP,
(int) mac->common_channels[CC_idP].
radioResourceConfigCommon->rach_ConfigCommon.
maxHARQ_Msg3Tx);
if (ra[RA_id].msg3_round ==
if (ra[RA_id].msg3_round >=
mac->common_channels[CC_idP].radioResourceConfigCommon->
rach_ConfigCommon.maxHARQ_Msg3Tx - 1) {
cancel_ra_proc(enb_mod_idP, CC_idP, frameP, current_rnti);
......@@ -309,7 +307,7 @@ rx_sdu(const module_id_t enb_mod_idP,
// prepare transmission of Msg4(RRCConnectionReconfiguration)
ra->state = MSGCRNTI;
LOG_I(MAC,
"[eNB %d] Frame %d, Subframe %d CC_id %d : (rnti %x UE_id %d) RRCConnectionReconfiguration(Msg4)",
"[eNB %d] Frame %d, Subframe %d CC_id %d : (rnti %x UE_id %d) RRCConnectionReconfiguration(Msg4)\n",
enb_mod_idP, frameP, subframeP, CC_idP, old_rnti, old_UE_id);
//
UE_id = old_UE_id;
......@@ -2090,7 +2088,8 @@ void schedule_ulsch_rnti(module_id_t module_idP,
0, // ul_tx_mode
0, // current_tx_nb
0, // n_srs
get_TBS_UL(UE_template->mcs_UL[harq_pid], ulsch_ue_select[CC_id].list[ulsch_ue_num].nb_rb)
// get_TBS_UL(UE_template->mcs_UL[harq_pid], ulsch_ue_select[CC_id].list[ulsch_ue_num].nb_rb)
UE_template->TBS_UL[harq_pid]
);
#ifdef Rel14
if (UE_template->rach_resource_type>0) { // This is a BL/CE UE allocation
......
......@@ -2501,7 +2501,15 @@ void ulsch_scheduler_pre_ue_select(
ue_first_num[CC_id]++;
continue;
}
if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id] ) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
if ( ((UE_sched_ctl->ul_inactivity_timer>20)&&(UE_sched_ctl->ul_scheduled==0)) ||
((UE_sched_ctl->ul_inactivity_timer>10)&&(UE_sched_ctl->ul_scheduled==0)&&(mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP,UE_id)) < RRC_CONNECTED))) {
first_ue_id[CC_id][ue_first_num[CC_id]]= UE_id;
first_ue_total[CC_id] [ue_first_num[CC_id]] = 0;
ue_first_num[CC_id]++;
continue;
}
/*if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id] ) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
uint8_t ul_period = 0;
if (cc->tdd_Config) {
......@@ -2515,7 +2523,7 @@ void ulsch_scheduler_pre_ue_select(
ul_inactivity_num[CC_id] ++;
continue;
}
}
}*/
}
}
......@@ -2590,7 +2598,10 @@ void ulsch_scheduler_pre_ue_select(
HI_DCI0_req = &eNB->HI_DCI0_req[CC_id][subframeP].hi_dci0_request_body;
//SR BSR
if ( (UE_list->UE_template[CC_id][UE_id].ul_total_buffer > 0) || (UE_list->UE_template[CC_id][UE_id].ul_SR > 0) ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
if ( (UE_list->UE_template[CC_id][UE_id].ul_total_buffer > 0) || (UE_list->UE_template[CC_id][UE_id].ul_SR > 0) ||
((UE_sched_ctl->ul_inactivity_timer>20)&&(UE_sched_ctl->ul_scheduled==0)) ||
((UE_sched_ctl->ul_inactivity_timer>10)&&(UE_sched_ctl->ul_scheduled==0)&&(mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP,UE_id)) < RRC_CONNECTED)) ){
hi_dci0_pdu = &HI_DCI0_req->hi_dci0_pdu_list[HI_DCI0_req->number_of_dci+HI_DCI0_req->number_of_hi];
format_flag = 2;
if (CCE_allocation_infeasible(module_idP,CC_id,format_flag,subframeP,aggregation,rnti) == 1) {
......@@ -2613,7 +2624,7 @@ void ulsch_scheduler_pre_ue_select(
}
}
//inactivity UE
if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id]) < ulsch_ue_max_num[CC_id] ) {
/* if ( (ulsch_ue_select[CC_id].ue_num+ul_inactivity_num[CC_id]) < ulsch_ue_max_num[CC_id] ) {
UE_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
uint8_t ul_period = 0;
if (cc->tdd_Config) {
......@@ -2627,7 +2638,7 @@ void ulsch_scheduler_pre_ue_select(
ul_inactivity_num[CC_id]++;
continue;
}
}
}*/
}
for ( CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++ ) {
......
......@@ -299,6 +299,9 @@ void cancel_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP,
@param Msg3_frame frame where scheduling takes place
@param Msg3_subframe subframe where scheduling takes place
*/
void clear_ra_proc(module_id_t module_idP, int CC_id, frame_t frameP);
void set_msg3_subframe(module_id_t Mod_id,
int CC_id,
int frame,
......
......@@ -89,6 +89,7 @@ fill_rar(const module_id_t module_idP,
ra->msg3_TPC = 3;
ra->msg3_ULdelay = 0;
ra->msg3_cqireq = 0;
ra->msg3_round = 0;
rar[2] |= ((ra->msg3_mcs & 0x8) >> 3); // mcs 10
rar[3] =
(((ra->msg3_mcs & 0x7) << 5)) | ((ra->msg3_TPC & 7) << 2) |
......
......@@ -550,8 +550,10 @@ rlc_am_mac_status_indication (
}
rlc->last_absolute_subframe_status_indication = PROTOCOL_CTXT_TIME_MILLI_SECONDS(ctxt_pP);
if (tb_sizeP > 0)
rlc->nb_bytes_requested_by_mac = tb_sizeP;
if (tb_sizeP > 0) {
rlc->nb_bytes_requested_by_mac = tb_sizeP;
}
status_resp.buffer_occupancy_in_bytes = rlc_am_get_buffer_occupancy_in_bytes(ctxt_pP, rlc);
......
......@@ -396,14 +396,20 @@ rlc_am_receive_process_data_pdu (
}
if (pdu_info_p->sn == rlc_pP->vr_r) {
mem_block_t* cursor_p = rlc_pP->receiver_buffer.head;
rlc_am_rx_pdu_management_t * pdu_cursor_mgnt_p = (rlc_am_rx_pdu_management_t *) (cursor_p->data);
if( (((rlc_am_rx_pdu_management_t*)(tb_pP->data))->all_segments_received) == (pdu_cursor_mgnt_p->all_segments_received)){
if (((rlc_am_rx_pdu_management_t*)(tb_pP->data))->all_segments_received) {
rlc_am_rx_update_vr_r(ctxt_pP, rlc_pP, tb_pP);
rlc_pP->vr_mr = (rlc_pP->vr_r + RLC_AM_WINDOW_SIZE) & RLC_AM_SN_MASK;
}
reassemble = rlc_am_rx_check_vr_reassemble(ctxt_pP, rlc_pP);
//TODO : optimization : check whether a reassembly is needed by looking at LI, FI, SO, etc...
}else{
LOG_E(RLC, "BAD all_segments_received!!! discard buffer!!!\n");
/* Discard received block if out of window, duplicate or header error */
free_mem_block (tb_pP, __func__);
}
}
//FNA: fix check VrX out of receiving window
......
......@@ -348,7 +348,7 @@ rrc_rx_tx(
}
if (ue_context_p->ue_context.ul_failure_timer>0) {
ue_context_p->ue_context.ul_failure_timer++;
if (ue_context_p->ue_context.ul_failure_timer >= 8) {
if (ue_context_p->ue_context.ul_failure_timer >= 20000) {
// remove UE after 20 seconds after MAC has indicated UL failure
LOG_I(RRC,"Removing UE %x instance\n",ue_context_p->ue_context.rnti);
ue_to_be_removed = ue_context_p;
......@@ -513,14 +513,14 @@ rrc_rx_tx(
}
}
if (ue_to_be_removed) {
if(ue_to_be_removed->ue_context.ul_failure_timer >= 8) {
if(ue_to_be_removed->ue_context.ul_failure_timer >= 20000) {
ue_to_be_removed->ue_context.ue_release_timer_s1 = 1;
ue_to_be_removed->ue_context.ue_release_timer_thres_s1 = 200;
ue_to_be_removed->ue_context.ue_release_timer = 0;
ue_to_be_removed->ue_context.ue_reestablishment_timer = 0;
}
rrc_eNB_free_UE(ctxt_pP->module_id,ue_to_be_removed);
if(ue_to_be_removed->ue_context.ul_failure_timer >= 8){
if(ue_to_be_removed->ue_context.ul_failure_timer >= 20000){
ue_to_be_removed->ue_context.ul_failure_timer = 0;
}
}
......
This diff is collapsed.
......@@ -1527,8 +1527,10 @@ static void* ru_thread( void* param ) {
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&ru->proc.cond_phy_tx) == 0, "ERROR pthread_cond_signal for phy_tx thread\n");
}else{
LOG_E(PHY,"phy tx thread busy, skipping\n");
++ru->proc.instance_cnt_phy_tx;
}
else LOG_W(PHY,"phy tx thread busy, skipping\n");
pthread_mutex_unlock( &ru->proc.mutex_phy_tx );
} else {
phy_tx_end = 1;
......@@ -1810,8 +1812,10 @@ static void* eNB_thread_phy_tx( void* param ) {
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&ru->proc.cond_rf_tx) == 0, "ERROR pthread_cond_signal for rf_tx thread\n");
}else{
LOG_E(PHY,"rf tx thread busy, skipping\n");
ru->proc.instance_cnt_rf_tx++;
}
else LOG_W(PHY,"rf tx thread busy, skipping\n");
pthread_mutex_unlock( &ru->proc.mutex_rf_tx );
}
if (release_thread(&proc->mutex_phy_tx,&proc->instance_cnt_phy_tx,"eNB_thread_phy_tx") < 0) break;
......@@ -1855,6 +1859,10 @@ static void* rf_tx( void* param ) {
if (ru->fh_north_out) ru->fh_north_out(ru);
}
if (release_thread(&proc->mutex_rf_tx,&proc->instance_cnt_rf_tx,"rf_tx") < 0) break;
if(proc->instance_cnt_rf_tx >= 0){
late_control=STATE_BURST_TERMINATE;
LOG_E(PHY,"detect rf tx busy change mode TX failsafe\n");
}
}
LOG_I(PHY, "Exiting rf TX\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment