Commit 8eb9d0b4 authored by hardy's avatar hardy

Merge remote-tracking branch 'origin/fixes-mac-sched-nfapi' into integration_2020_wk42

parents 4d14cc63 ff48c369
...@@ -348,7 +348,15 @@ int wake_eNB_rxtx(PHY_VARS_eNB *eNB, uint16_t sfn, uint16_t sf) { ...@@ -348,7 +348,15 @@ int wake_eNB_rxtx(PHY_VARS_eNB *eNB, uint16_t sfn, uint16_t sf) {
old_sf = sf; old_sf = sf;
old_sfn = sfn; old_sfn = sfn;
if (old_sf == 0 && old_sfn % 100==0) LOG_W( PHY,"[eNB] sfn/sf:%d%d old_sfn/sf:%d%d proc[rx:%d%d]\n", sfn, sf, old_sfn, old_sf, proc->frame_rx, proc->subframe_rx); if (old_sf == 0 && old_sfn % 100==0)
LOG_D(PHY,
"[eNB] sfn/sf:%d%d old_sfn/sf:%d%d proc[rx:%d%d]\n",
sfn,
sf,
old_sfn,
old_sf,
proc->frame_rx,
proc->subframe_rx);
} }
++L1_proc->instance_cnt; ++L1_proc->instance_cnt;
...@@ -435,6 +443,7 @@ int phy_rach_indication(struct nfapi_vnf_p7_config *config, nfapi_rach_indicatio ...@@ -435,6 +443,7 @@ int phy_rach_indication(struct nfapi_vnf_p7_config *config, nfapi_rach_indicatio
} }
if(index == -1){ if(index == -1){
LOG_E(MAC,"phy_rach_indication : num of rach reach max \n"); LOG_E(MAC,"phy_rach_indication : num of rach reach max \n");
pthread_mutex_unlock(&eNB->UL_INFO_mutex);
return 0; return 0;
} }
UL_RCC_INFO.rach_ind[index] = *ind; UL_RCC_INFO.rach_ind[index] = *ind;
...@@ -499,6 +508,7 @@ int phy_harq_indication(struct nfapi_vnf_p7_config *config, nfapi_harq_indicatio ...@@ -499,6 +508,7 @@ int phy_harq_indication(struct nfapi_vnf_p7_config *config, nfapi_harq_indicatio
} }
if(index == -1){ if(index == -1){
LOG_E(MAC,"phy_harq_indication : num of harq reach max \n"); LOG_E(MAC,"phy_harq_indication : num of harq reach max \n");
pthread_mutex_unlock(&eNB->UL_INFO_mutex);
return 0; return 0;
} }
UL_RCC_INFO.harq_ind[index] = *ind; UL_RCC_INFO.harq_ind[index] = *ind;
...@@ -538,6 +548,7 @@ int phy_crc_indication(struct nfapi_vnf_p7_config *config, nfapi_crc_indication_ ...@@ -538,6 +548,7 @@ int phy_crc_indication(struct nfapi_vnf_p7_config *config, nfapi_crc_indication_
} }
if(index == -1){ if(index == -1){
LOG_E(MAC,"phy_crc_indication : num of crc reach max \n"); LOG_E(MAC,"phy_crc_indication : num of crc reach max \n");
pthread_mutex_unlock(&eNB->UL_INFO_mutex);
return 0; return 0;
} }
UL_RCC_INFO.crc_ind[index] = *ind; UL_RCC_INFO.crc_ind[index] = *ind;
...@@ -603,6 +614,7 @@ int phy_rx_indication(struct nfapi_vnf_p7_config *config, nfapi_rx_indication_t ...@@ -603,6 +614,7 @@ int phy_rx_indication(struct nfapi_vnf_p7_config *config, nfapi_rx_indication_t
} }
if(index == -1){ if(index == -1){
LOG_E(MAC,"phy_rx_indication : num of rx reach max \n"); LOG_E(MAC,"phy_rx_indication : num of rx reach max \n");
pthread_mutex_unlock(&eNB->UL_INFO_mutex);
return 0; return 0;
} }
UL_RCC_INFO.rx_ind[index] = *ind; UL_RCC_INFO.rx_ind[index] = *ind;
...@@ -686,6 +698,7 @@ int phy_sr_indication(struct nfapi_vnf_p7_config *config, nfapi_sr_indication_t ...@@ -686,6 +698,7 @@ int phy_sr_indication(struct nfapi_vnf_p7_config *config, nfapi_sr_indication_t
} }
if(index == -1){ if(index == -1){
LOG_E(MAC,"phy_sr_indication : num of sr reach max \n"); LOG_E(MAC,"phy_sr_indication : num of sr reach max \n");
pthread_mutex_unlock(&eNB->UL_INFO_mutex);
return 0; return 0;
} }
UL_RCC_INFO.sr_ind[index] = *ind; UL_RCC_INFO.sr_ind[index] = *ind;
...@@ -737,6 +750,7 @@ int phy_cqi_indication(struct nfapi_vnf_p7_config *config, nfapi_cqi_indication_ ...@@ -737,6 +750,7 @@ int phy_cqi_indication(struct nfapi_vnf_p7_config *config, nfapi_cqi_indication_
} }
if(index == -1){ if(index == -1){
LOG_E(MAC,"phy_cqi_indication : num of cqi reach max \n"); LOG_E(MAC,"phy_cqi_indication : num of cqi reach max \n");
pthread_mutex_unlock(&eNB->UL_INFO_mutex);
return 0; return 0;
} }
UL_RCC_INFO.cqi_ind[index] = *ind; UL_RCC_INFO.cqi_ind[index] = *ind;
......
...@@ -955,7 +955,7 @@ void vnf_handle_ul_node_sync(void *pRecvMsg, int recvMsgLen, vnf_p7_t* vnf_p7) ...@@ -955,7 +955,7 @@ void vnf_handle_ul_node_sync(void *pRecvMsg, int recvMsgLen, vnf_p7_t* vnf_p7)
struct timespec ts; struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts); clock_gettime(CLOCK_MONOTONIC, &ts);
NFAPI_TRACE(NFAPI_TRACE_NOTE, "(%4d/%1d) %d.%d PNF to VNF phy_id:%2d (t1/2/3/4:%8u, %8u, %8u, %8u) txrx:%4u procT:%3u latency(us):%4d(avg:%4d) offset(us):%8d filtered(us):%8d wrap[t1:%u t2:%u]\n", NFAPI_TRACE(NFAPI_TRACE_INFO, "(%4d/%1d) %d.%d PNF to VNF phy_id:%2d (t1/2/3/4:%8u, %8u, %8u, %8u) txrx:%4u procT:%3u latency(us):%4d(avg:%4d) offset(us):%8d filtered(us):%8d wrap[t1:%u t2:%u]\n",
NFAPI_SFNSF2SFN(phy->sfn_sf), NFAPI_SFNSF2SF(phy->sfn_sf), ts.tv_sec, ts.tv_nsec, ind.header.phy_id, NFAPI_SFNSF2SFN(phy->sfn_sf), NFAPI_SFNSF2SF(phy->sfn_sf), ts.tv_sec, ts.tv_nsec, ind.header.phy_id,
ind.t1, ind.t2, ind.t3, t4, ind.t1, ind.t2, ind.t3, t4,
tx_2_rx, pnf_proc_time, latency, phy->average_latency, phy->sf_offset, phy->sf_offset_filtered, tx_2_rx, pnf_proc_time, latency, phy->average_latency, phy->sf_offset, phy->sf_offset_filtered,
...@@ -1016,7 +1016,7 @@ void vnf_handle_ul_node_sync(void *pRecvMsg, int recvMsgLen, vnf_p7_t* vnf_p7) ...@@ -1016,7 +1016,7 @@ void vnf_handle_ul_node_sync(void *pRecvMsg, int recvMsgLen, vnf_p7_t* vnf_p7)
{ {
phy->adjustment = NFAPI_SFNSF2DEC(new_sfn_sf) - NFAPI_SFNSF2DEC(curr_sfn_sf); phy->adjustment = NFAPI_SFNSF2DEC(new_sfn_sf) - NFAPI_SFNSF2DEC(curr_sfn_sf);
NFAPI_TRACE(NFAPI_TRACE_NOTE, "PNF to VNF phy_id:%d adjustment%d phy->previous_sf_offset_filtered:%d phy->previous_sf_offset_filtered:%d phy->sf_offset_trend:%d\n", ind.header.phy_id, phy->adjustment, phy->previous_sf_offset_filtered, phy->previous_sf_offset_filtered, phy->sf_offset_trend); NFAPI_TRACE(NFAPI_TRACE_INFO, "PNF to VNF phy_id:%d adjustment%d phy->previous_sf_offset_filtered:%d phy->previous_sf_offset_filtered:%d phy->sf_offset_trend:%d\n", ind.header.phy_id, phy->adjustment, phy->previous_sf_offset_filtered, phy->previous_sf_offset_filtered, phy->sf_offset_trend);
phy->previous_t1 = 0; phy->previous_t1 = 0;
phy->previous_t2 = 0; phy->previous_t2 = 0;
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
#include "common/ran_context.h" #include "common/ran_context.h"
extern FILL_UL_INFO_MUTEX_t fill_ul_mutex;
extern void openair_rrc_top_init_ue( int eMBMS_active, char *uecap_xer, uint8_t cba_group_active, uint8_t HO_active); extern void openair_rrc_top_init_ue( int eMBMS_active, char *uecap_xer, uint8_t cba_group_active, uint8_t HO_active);
void dl_phy_sync_success(module_id_t module_idP, frame_t frameP, unsigned char eNB_index, uint8_t first_sync) { //init as MR void dl_phy_sync_success(module_id_t module_idP, frame_t frameP, unsigned char eNB_index, uint8_t first_sync) { //init as MR
...@@ -90,16 +89,6 @@ mac_top_init_ue(int eMBMS_active, char *uecap_xer, ...@@ -90,16 +89,6 @@ mac_top_init_ue(int eMBMS_active, char *uecap_xer,
UE_mac_inst = NULL; UE_mac_inst = NULL;
} }
// mutex below are used for multiple UE's L2 FAPI simulation.
if (NFAPI_MODE == NFAPI_UE_STUB_PNF) {
pthread_mutex_init(&fill_ul_mutex.rx_mutex,NULL);
pthread_mutex_init(&fill_ul_mutex.crc_mutex,NULL);
pthread_mutex_init(&fill_ul_mutex.sr_mutex,NULL);
pthread_mutex_init(&fill_ul_mutex.harq_mutex,NULL);
pthread_mutex_init(&fill_ul_mutex.cqi_mutex,NULL);
pthread_mutex_init(&fill_ul_mutex.rach_mutex,NULL);
}
LOG_I(MAC, "[MAIN] calling RRC\n"); LOG_I(MAC, "[MAIN] calling RRC\n");
openair_rrc_top_init_ue(eMBMS_active, uecap_xer, cba_group_active, openair_rrc_top_init_ue(eMBMS_active, uecap_xer, cba_group_active,
HO_active); HO_active);
......
...@@ -69,6 +69,70 @@ int get_rbg_size_last(module_id_t Mod_id, int CC_id) { ...@@ -69,6 +69,70 @@ int get_rbg_size_last(module_id_t Mod_id, int CC_id) {
return RBGsize; return RBGsize;
} }
bool try_allocate_harq_retransmission(module_id_t Mod_id,
int CC_id,
int frame,
int subframe,
int UE_id,
int start_rbg,
int *n_rbg_sched,
uint8_t *rbgalloc_mask) {
const int N_RBG = to_rbg(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);
const int RBGsize = get_min_rb_unit(Mod_id, CC_id);
const int RBGlastsize = get_rbg_size_last(Mod_id, CC_id);
UE_info_t *UE_info = &RC.mac[Mod_id]->UE_info;
// check whether there are HARQ retransmissions
const COMMON_channels_t *cc = &RC.mac[Mod_id]->common_channels[CC_id];
const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frame, subframe);
UE_sched_ctrl_t *ue_ctrl = &UE_info->UE_sched_ctrl[UE_id];
// retransmission: allocate
const int nb_rb = UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid];
if (nb_rb == 0) {
return false;
}
int nb_rbg = (nb_rb + (nb_rb % RBGsize)) / RBGsize;
// needs more RBGs than we can allocate
if (nb_rbg > *n_rbg_sched) {
LOG_D(MAC,
"retransmission of UE %d needs more RBGs (%d) than we have (%d)\n",
UE_id, nb_rbg, *n_rbg_sched);
return false;
}
// ensure that the number of RBs can be contained by the RBGs (!), i.e.
// if we allocate the last RBG this one should have the full RBGsize
if ((nb_rb % RBGsize) == 0 && nb_rbg == *n_rbg_sched
&& rbgalloc_mask[N_RBG - 1] && RBGlastsize != RBGsize) {
LOG_D(MAC,
"retransmission of UE %d needs %d RBs, but the last RBG %d is too small (%d, normal %d)\n",
UE_id, nb_rb, N_RBG - 1, RBGlastsize, RBGsize);
return false;
}
const uint8_t cqi = ue_ctrl->dl_cqi[CC_id];
const int idx = CCE_try_allocate_dlsch(Mod_id, CC_id, subframe, UE_id, cqi);
if (idx < 0) { // cannot allocate CCE
LOG_D(MAC, "cannot allocate UE %d: no CCE can be allocated\n", UE_id);
return false;
}
ue_ctrl->pre_dci_dl_pdu_idx = idx;
// retransmissions: directly allocate
*n_rbg_sched -= nb_rbg;
ue_ctrl->pre_nb_available_rbs[CC_id] += nb_rb;
for (; nb_rbg > 0; start_rbg++) {
if (!rbgalloc_mask[start_rbg])
continue;
ue_ctrl->rballoc_sub_UE[CC_id][start_rbg] = 1;
rbgalloc_mask[start_rbg] = 0;
nb_rbg--;
}
LOG_D(MAC,
"%4d.%d n_rbg_sched %d after retransmission reservation for UE %d "
"retx nb_rb %d pre_nb_available_rbs %d\n",
frame, subframe, *n_rbg_sched, UE_id,
UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid],
ue_ctrl->pre_nb_available_rbs[CC_id]);
return true;
}
void *rr_dl_setup(void) { void *rr_dl_setup(void) {
void *data = malloc(sizeof(int)); void *data = malloc(sizeof(int));
*(int *) data = 0; *(int *) data = 0;
...@@ -115,64 +179,25 @@ int rr_dl_run(module_id_t Mod_id, ...@@ -115,64 +179,25 @@ int rr_dl_run(module_id_t Mod_id,
const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frame, subframe); const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frame, subframe);
UE_sched_ctrl_t *ue_ctrl = &UE_info->UE_sched_ctrl[UE_id]; UE_sched_ctrl_t *ue_ctrl = &UE_info->UE_sched_ctrl[UE_id];
const uint8_t round = ue_ctrl->round[CC_id][harq_pid]; const uint8_t round = ue_ctrl->round[CC_id][harq_pid];
if (round != 8) { // retransmission: allocate if (round != 8) {
const int nb_rb = UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid]; bool r = try_allocate_harq_retransmission(Mod_id, CC_id, frame, subframe,
if (nb_rb == 0) UE_id, rbg, &n_rbg_sched,
goto skip_ue; rbgalloc_mask);
int nb_rbg = (nb_rb + (nb_rb % RBGsize)) / RBGsize; if (r) {
// needs more RBGs than we can allocate /* if there are no more RBG to give, return */
if (nb_rbg > n_rbg_sched) { if (n_rbg_sched <= 0)
LOG_D(MAC, return 0;
"retransmission of UE %d needs more RBGs (%d) than we have (%d)\n", max_num_ue--;
UE_id, nb_rbg, n_rbg_sched); if (max_num_ue == 0)
goto skip_ue; return n_rbg_sched;
} for (; !rbgalloc_mask[rbg]; rbg++) /* fast-forward */ ;
// ensure that the number of RBs can be contained by the RBGs (!), i.e.
// if we allocate the last RBG this one should have the full RBGsize
if ((nb_rb % RBGsize) == 0 && nb_rbg == n_rbg_sched
&& rbgalloc_mask[N_RBG - 1] && RBGlastsize != RBGsize) {
LOG_D(MAC,
"retransmission of UE %d needs %d RBs, but the last RBG %d is too small (%d, normal %d)\n",
UE_id, nb_rb, N_RBG - 1, RBGlastsize, RBGsize);
goto skip_ue;
}
const uint8_t cqi = ue_ctrl->dl_cqi[CC_id];
const int idx = CCE_try_allocate_dlsch(Mod_id, CC_id, subframe, UE_id, cqi);
if (idx < 0)
goto skip_ue; // cannot allocate CCE
ue_ctrl->pre_dci_dl_pdu_idx = idx;
// retransmissions: directly allocate
n_rbg_sched -= nb_rbg;
ue_ctrl->pre_nb_available_rbs[CC_id] += nb_rb;
for (; nb_rbg > 0; rbg++) {
if (!rbgalloc_mask[rbg])
continue;
ue_ctrl->rballoc_sub_UE[CC_id][rbg] = 1;
rbgalloc_mask[rbg] = 0;
nb_rbg--;
} }
LOG_D(MAC,
"%4d.%d n_rbg_sched %d after retransmission reservation for UE %d "
"round %d retx nb_rb %d pre_nb_available_rbs %d\n",
frame, subframe, n_rbg_sched, UE_id, round,
UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid],
ue_ctrl->pre_nb_available_rbs[CC_id]);
/* if there are no more RBG to give, return */
if (n_rbg_sched <= 0)
return 0;
max_num_ue--;
/* if there are no UEs that can be allocated anymore, return */
if (max_num_ue == 0)
return n_rbg_sched;
for (; !rbgalloc_mask[rbg]; rbg++) /* fast-forward */ ;
} else { } else {
if (UE_info->UE_template[CC_id][UE_id].dl_buffer_total > 0) { if (UE_info->UE_template[CC_id][UE_id].dl_buffer_total > 0) {
*cur_UE = UE_id; *cur_UE = UE_id;
cur_UE = &UE_sched.next[UE_id]; cur_UE = &UE_sched.next[UE_id];
} }
} }
skip_ue:
UE_id = next_ue_list_looped(UE_list, UE_id); UE_id = next_ue_list_looped(UE_list, UE_id);
} while (UE_id != *start_ue); } while (UE_id != *start_ue);
*cur_UE = -1; // mark end *cur_UE = -1; // mark end
...@@ -187,11 +212,12 @@ skip_ue: ...@@ -187,11 +212,12 @@ skip_ue:
cur_UE = &UE_sched.head; cur_UE = &UE_sched.head;
while (*cur_UE >= 0 && max_num_ue > 0) { while (*cur_UE >= 0 && max_num_ue > 0) {
const int UE_id = *cur_UE; const int UE_id = *cur_UE;
cur_UE = &UE_sched.next[UE_id]; // go to next
const uint8_t cqi = UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id]; const uint8_t cqi = UE_info->UE_sched_ctrl[UE_id].dl_cqi[CC_id];
const int idx = CCE_try_allocate_dlsch(Mod_id, CC_id, subframe, UE_id, cqi); const int idx = CCE_try_allocate_dlsch(Mod_id, CC_id, subframe, UE_id, cqi);
if (idx < 0) { if (idx < 0) {
LOG_D(MAC, "cannot allocate CCE for UE %d, skipping\n", UE_id); LOG_D(MAC, "cannot allocate CCE for UE %d, skipping\n", UE_id);
// SKIP this UE in the list by marking the next as the current
*cur_UE = UE_sched.next[UE_id];
continue; continue;
} }
UE_info->UE_sched_ctrl[UE_id].pre_dci_dl_pdu_idx = idx; UE_info->UE_sched_ctrl[UE_id].pre_dci_dl_pdu_idx = idx;
...@@ -200,6 +226,7 @@ skip_ue: ...@@ -200,6 +226,7 @@ skip_ue:
const uint32_t B = UE_info->UE_template[CC_id][UE_id].dl_buffer_total; const uint32_t B = UE_info->UE_template[CC_id][UE_id].dl_buffer_total;
rb_required[UE_id] = find_nb_rb_DL(mcs, B, n_rbg_sched * RBGsize, RBGsize); rb_required[UE_id] = find_nb_rb_DL(mcs, B, n_rbg_sched * RBGsize, RBGsize);
max_num_ue--; max_num_ue--;
cur_UE = &UE_sched.next[UE_id]; // go to next
} }
*cur_UE = -1; // not all UEs might be allocated, mark end *cur_UE = -1; // not all UEs might be allocated, mark end
...@@ -284,56 +311,19 @@ int pf_wbcqi_dl_run(module_id_t Mod_id, ...@@ -284,56 +311,19 @@ int pf_wbcqi_dl_run(module_id_t Mod_id,
const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frame, subframe); const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frame, subframe);
UE_sched_ctrl_t *ue_ctrl = &UE_info->UE_sched_ctrl[UE_id]; UE_sched_ctrl_t *ue_ctrl = &UE_info->UE_sched_ctrl[UE_id];
const uint8_t round = ue_ctrl->round[CC_id][harq_pid]; const uint8_t round = ue_ctrl->round[CC_id][harq_pid];
if (round != 8) { // retransmission: allocate if (round != 8) {
const int nb_rb = UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid]; bool r = try_allocate_harq_retransmission(Mod_id, CC_id, frame, subframe,
if (nb_rb == 0) UE_id, rbg, &n_rbg_sched,
continue; rbgalloc_mask);
int nb_rbg = (nb_rb + (nb_rb % RBGsize)) / RBGsize; if (r) {
// needs more RBGs than we can allocate /* if there are no more RBG to give, return */
if (nb_rbg > n_rbg_sched) { if (n_rbg_sched <= 0)
LOG_D(MAC, return 0;
"retransmission of UE %d needs more RBGs (%d) than we have (%d)\n", max_num_ue--;
UE_id, nb_rbg, n_rbg_sched); if (max_num_ue == 0)
continue; return n_rbg_sched;
} for (; !rbgalloc_mask[rbg]; rbg++) /* fast-forward */ ;
// ensure that the number of RBs can be contained by the RBGs (!), i.e.
// if we allocate the last RBG this one should have the full RBGsize
if ((nb_rb % RBGsize) == 0 && nb_rbg == n_rbg_sched
&& rbgalloc_mask[N_RBG - 1] && RBGlastsize != RBGsize) {
LOG_D(MAC,
"retransmission of UE %d needs %d RBs, but the last RBG %d is too small (%d, normal %d)\n",
UE_id, nb_rb, N_RBG - 1, RBGlastsize, RBGsize);
continue;
} }
const uint8_t cqi = ue_ctrl->dl_cqi[CC_id];
const int idx = CCE_try_allocate_dlsch(Mod_id, CC_id, subframe, UE_id, cqi);
if (idx < 0)
continue; // cannot allocate CCE
ue_ctrl->pre_dci_dl_pdu_idx = idx;
// retransmissions: directly allocate
n_rbg_sched -= nb_rbg;
ue_ctrl->pre_nb_available_rbs[CC_id] += nb_rb;
for (; nb_rbg > 0; rbg++) {
if (!rbgalloc_mask[rbg])
continue;
ue_ctrl->rballoc_sub_UE[CC_id][rbg] = 1;
rbgalloc_mask[rbg] = 0;
nb_rbg--;
}
LOG_D(MAC,
"%4d.%d n_rbg_sched %d after retransmission reservation for UE %d "
"round %d retx nb_rb %d pre_nb_available_rbs %d\n",
frame, subframe, n_rbg_sched, UE_id, round,
UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid],
ue_ctrl->pre_nb_available_rbs[CC_id]);
/* if there are no more RBG to give, return */
if (n_rbg_sched <= 0)
return 0;
max_num_ue--;
/* if there are no UEs that can be allocated anymore, return */
if (max_num_ue == 0)
return n_rbg_sched;
for (; !rbgalloc_mask[rbg]; rbg++) /* fast-forward */ ;
} else { } else {
if (UE_info->UE_template[CC_id][UE_id].dl_buffer_total == 0) if (UE_info->UE_template[CC_id][UE_id].dl_buffer_total == 0)
continue; continue;
...@@ -436,56 +426,19 @@ int mt_wbcqi_dl_run(module_id_t Mod_id, ...@@ -436,56 +426,19 @@ int mt_wbcqi_dl_run(module_id_t Mod_id,
const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frame, subframe); const uint8_t harq_pid = frame_subframe2_dl_harq_pid(cc->tdd_Config, frame, subframe);
UE_sched_ctrl_t *ue_ctrl = &UE_info->UE_sched_ctrl[UE_id]; UE_sched_ctrl_t *ue_ctrl = &UE_info->UE_sched_ctrl[UE_id];
const uint8_t round = ue_ctrl->round[CC_id][harq_pid]; const uint8_t round = ue_ctrl->round[CC_id][harq_pid];
if (round != 8) { // retransmission: allocate if (round != 8) {
const int nb_rb = UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid]; bool r = try_allocate_harq_retransmission(Mod_id, CC_id, frame, subframe,
if (nb_rb == 0) UE_id, rbg, &n_rbg_sched,
continue; rbgalloc_mask);
int nb_rbg = (nb_rb + (nb_rb % RBGsize)) / RBGsize; if (r) {
// needs more RBGs than we can allocate /* if there are no more RBG to give, return */
if (nb_rbg > n_rbg_sched) { if (n_rbg_sched <= 0)
LOG_D(MAC, return 0;
"retransmission of UE %d needs more RBGs (%d) than we have (%d)\n", max_num_ue--;
UE_id, nb_rbg, n_rbg_sched); if (max_num_ue == 0)
continue; return n_rbg_sched;
} for (; !rbgalloc_mask[rbg]; rbg++) /* fast-forward */ ;
// ensure that the number of RBs can be contained by the RBGs (!), i.e.
// if we allocate the last RBG this one should have the full RBGsize
if ((nb_rb % RBGsize) == 0 && nb_rbg == n_rbg_sched
&& rbgalloc_mask[N_RBG - 1] && RBGlastsize != RBGsize) {
LOG_D(MAC,
"retransmission of UE %d needs %d RBs, but the last RBG %d is too small (%d, normal %d)\n",
UE_id, nb_rb, N_RBG - 1, RBGlastsize, RBGsize);
continue;
} }
const uint8_t cqi = ue_ctrl->dl_cqi[CC_id];
const int idx = CCE_try_allocate_dlsch(Mod_id, CC_id, subframe, UE_id, cqi);
if (idx < 0)
continue; // cannot allocate CCE
ue_ctrl->pre_dci_dl_pdu_idx = idx;
// retransmissions: directly allocate
n_rbg_sched -= nb_rbg;
ue_ctrl->pre_nb_available_rbs[CC_id] += nb_rb;
for (; nb_rbg > 0; rbg++) {
if (!rbgalloc_mask[rbg])
continue;
ue_ctrl->rballoc_sub_UE[CC_id][rbg] = 1;
rbgalloc_mask[rbg] = 0;
nb_rbg--;
}
LOG_D(MAC,
"%4d.%d n_rbg_sched %d after retransmission reservation for UE %d "
"round %d retx nb_rb %d pre_nb_available_rbs %d\n",
frame, subframe, n_rbg_sched, UE_id, round,
UE_info->UE_template[CC_id][UE_id].nb_rb[harq_pid],
ue_ctrl->pre_nb_available_rbs[CC_id]);
/* if there are no more RBG to give, return */
if (n_rbg_sched <= 0)
return 0;
max_num_ue--;
/* if there are no UEs that can be allocated anymore, return */
if (max_num_ue == 0)
return n_rbg_sched;
for (; !rbgalloc_mask[rbg]; rbg++) /* fast-forward */ ;
} else { } else {
if (UE_info->UE_template[CC_id][UE_id].dl_buffer_total == 0) if (UE_info->UE_template[CC_id][UE_id].dl_buffer_total == 0)
continue; continue;
...@@ -942,7 +895,10 @@ int rr_ul_run(module_id_t Mod_id, ...@@ -942,7 +895,10 @@ int rr_ul_run(module_id_t Mod_id,
&tx_power); &tx_power);
UE_template->pre_assigned_mcs_ul = mcs; UE_template->pre_assigned_mcs_ul = mcs;
rb_idx_required[UE_id] = rb_table_index; /* rb_idx_given >= 22: apparently the PHY cannot support more than 48
* RBs in the uplink. Hence, we limit every UE to 48 RBs, which is at
* index 22 */
rb_idx_required[UE_id] = min(22, rb_table_index);
//UE_template->pre_allocated_nb_rb_ul = rb_table[rb_table_index]; //UE_template->pre_allocated_nb_rb_ul = rb_table[rb_table_index];
/* only print log when PHR changed */ /* only print log when PHR changed */
static int phr = 0; static int phr = 0;
......
...@@ -336,6 +336,8 @@ void static_dl(module_id_t mod_id, ...@@ -336,6 +336,8 @@ void static_dl(module_id_t mod_id,
rbgalloc_slice_mask[rbg] = rbgalloc_mask[rbg]; rbgalloc_slice_mask[rbg] = rbgalloc_mask[rbg];
n_rbg_sched += rbgalloc_mask[rbg]; n_rbg_sched += rbgalloc_mask[rbg];
} }
if (n_rbg_sched == 0) /* no free RBGs, e.g., taken by RA */
continue;
s->s[i]->dl_algo.run(mod_id, s->s[i]->dl_algo.run(mod_id,
CC_id, CC_id,
...@@ -445,6 +447,8 @@ void static_ul(module_id_t mod_id, ...@@ -445,6 +447,8 @@ void static_ul(module_id_t mod_id,
} }
if (!last_rb_blocked) if (!last_rb_blocked)
rbs[n_contig - 1].length = p->posHigh - rbs[n_contig - 1].start + 1; rbs[n_contig - 1].length = p->posHigh - rbs[n_contig - 1].start + 1;
if (n_contig == 1 && rbs[0].length == 0) /* no RBs, e.g., taken by RA */
continue;
s->s[i]->ul_algo.run(mod_id, s->s[i]->ul_algo.run(mod_id,
CC_id, CC_id,
......
...@@ -203,17 +203,6 @@ typedef struct IF_Module_s{ ...@@ -203,17 +203,6 @@ typedef struct IF_Module_s{
pthread_mutex_t if_mutex; pthread_mutex_t if_mutex;
} IF_Module_t; } IF_Module_t;
// These mutex is used for multiple UEs L2 FAPI simulator.
// Each UEs set these value in UL and UL_INFO is shared in all UE's thread.
typedef struct {
pthread_mutex_t rx_mutex;
pthread_mutex_t crc_mutex;
pthread_mutex_t sr_mutex;
pthread_mutex_t harq_mutex;
pthread_mutex_t cqi_mutex;
pthread_mutex_t rach_mutex;
} FILL_UL_INFO_MUTEX_t;
/*Initial */ /*Initial */
IF_Module_t *IF_Module_init(int Mod_id); IF_Module_t *IF_Module_init(int Mod_id);
void IF_Module_kill(int Mod_id); void IF_Module_kill(int Mod_id);
......
...@@ -64,8 +64,6 @@ void fill_rx_indication_UE_MAC(module_id_t Mod_id, ...@@ -64,8 +64,6 @@ void fill_rx_indication_UE_MAC(module_id_t Mod_id,
nfapi_rx_indication_pdu_t *pdu; nfapi_rx_indication_pdu_t *pdu;
int timing_advance_update; int timing_advance_update;
pthread_mutex_lock(&fill_ul_mutex.rx_mutex);
UL_INFO->rx_ind.sfn_sf = frame << 4 | subframe; UL_INFO->rx_ind.sfn_sf = frame << 4 | subframe;
UL_INFO->rx_ind.rx_indication_body.tl.tag = NFAPI_RX_INDICATION_BODY_TAG; UL_INFO->rx_ind.rx_indication_body.tl.tag = NFAPI_RX_INDICATION_BODY_TAG;
UL_INFO->rx_ind.vendor_extension = ul_config_req->vendor_extension; UL_INFO->rx_ind.vendor_extension = ul_config_req->vendor_extension;
...@@ -101,7 +99,6 @@ void fill_rx_indication_UE_MAC(module_id_t Mod_id, ...@@ -101,7 +99,6 @@ void fill_rx_indication_UE_MAC(module_id_t Mod_id,
UL_INFO->rx_ind.rx_indication_body.number_of_pdus++; UL_INFO->rx_ind.rx_indication_body.number_of_pdus++;
UL_INFO->rx_ind.sfn_sf = frame << 4 | subframe; UL_INFO->rx_ind.sfn_sf = frame << 4 | subframe;
pthread_mutex_unlock(&fill_ul_mutex.rx_mutex);
} }
void fill_sr_indication_UE_MAC(int Mod_id, void fill_sr_indication_UE_MAC(int Mod_id,
...@@ -109,8 +106,6 @@ void fill_sr_indication_UE_MAC(int Mod_id, ...@@ -109,8 +106,6 @@ void fill_sr_indication_UE_MAC(int Mod_id,
int subframe, int subframe,
UL_IND_t *UL_INFO, UL_IND_t *UL_INFO,
uint16_t rnti) { uint16_t rnti) {
pthread_mutex_lock(&fill_ul_mutex.sr_mutex);
nfapi_sr_indication_t *sr_ind = &UL_INFO->sr_ind; nfapi_sr_indication_t *sr_ind = &UL_INFO->sr_ind;
nfapi_sr_indication_body_t *sr_ind_body = &sr_ind->sr_indication_body; nfapi_sr_indication_body_t *sr_ind_body = &sr_ind->sr_indication_body;
nfapi_sr_indication_pdu_t *pdu = &sr_ind_body->sr_pdu_list[sr_ind_body->number_of_srs]; nfapi_sr_indication_pdu_t *pdu = &sr_ind_body->sr_pdu_list[sr_ind_body->number_of_srs];
...@@ -139,7 +134,6 @@ void fill_sr_indication_UE_MAC(int Mod_id, ...@@ -139,7 +134,6 @@ void fill_sr_indication_UE_MAC(int Mod_id,
// UL_INFO->rx_ind.rx_indication_body.number_of_pdus++; // UL_INFO->rx_ind.rx_indication_body.number_of_pdus++;
sr_ind_body->number_of_srs++; sr_ind_body->number_of_srs++;
pthread_mutex_unlock(&fill_ul_mutex.sr_mutex);
} }
void fill_crc_indication_UE_MAC(int Mod_id, void fill_crc_indication_UE_MAC(int Mod_id,
...@@ -149,8 +143,6 @@ void fill_crc_indication_UE_MAC(int Mod_id, ...@@ -149,8 +143,6 @@ void fill_crc_indication_UE_MAC(int Mod_id,
uint8_t crc_flag, uint8_t crc_flag,
int index, int index,
uint16_t rnti) { uint16_t rnti) {
pthread_mutex_lock(&fill_ul_mutex.crc_mutex);
nfapi_crc_indication_pdu_t *pdu = nfapi_crc_indication_pdu_t *pdu =
&UL_INFO->crc_ind.crc_indication_body &UL_INFO->crc_ind.crc_indication_body
.crc_pdu_list[UL_INFO->crc_ind.crc_indication_body.number_of_crcs]; .crc_pdu_list[UL_INFO->crc_ind.crc_indication_body.number_of_crcs];
...@@ -174,8 +166,6 @@ void fill_crc_indication_UE_MAC(int Mod_id, ...@@ -174,8 +166,6 @@ void fill_crc_indication_UE_MAC(int Mod_id,
__FUNCTION__, __FUNCTION__,
pdu->rx_ue_information.rnti, pdu->rx_ue_information.rnti,
UL_INFO->crc_ind.crc_indication_body.number_of_crcs); UL_INFO->crc_ind.crc_indication_body.number_of_crcs);
pthread_mutex_unlock(&fill_ul_mutex.crc_mutex);
} }
void fill_rach_indication_UE_MAC(int Mod_id, void fill_rach_indication_UE_MAC(int Mod_id,
...@@ -184,10 +174,6 @@ void fill_rach_indication_UE_MAC(int Mod_id, ...@@ -184,10 +174,6 @@ void fill_rach_indication_UE_MAC(int Mod_id,
UL_IND_t *UL_INFO, UL_IND_t *UL_INFO,
uint8_t ra_PreambleIndex, uint8_t ra_PreambleIndex,
uint16_t ra_RNTI) { uint16_t ra_RNTI) {
LOG_D(MAC, "fill_rach_indication_UE_MAC 1 \n");
pthread_mutex_lock(&fill_ul_mutex.rach_mutex);
UL_INFO->rach_ind.rach_indication_body.number_of_preambles = 1; UL_INFO->rach_ind.rach_indication_body.number_of_preambles = 1;
UL_INFO->rach_ind.header.message_id = NFAPI_RACH_INDICATION; UL_INFO->rach_ind.header.message_id = NFAPI_RACH_INDICATION;
...@@ -232,8 +218,6 @@ void fill_rach_indication_UE_MAC(int Mod_id, ...@@ -232,8 +218,6 @@ void fill_rach_indication_UE_MAC(int Mod_id,
// should call it when we merge with that branch. // should call it when we merge with that branch.
oai_nfapi_rach_ind(&UL_INFO->rach_ind); oai_nfapi_rach_ind(&UL_INFO->rach_ind);
free(UL_INFO->rach_ind.rach_indication_body.preamble_list); free(UL_INFO->rach_ind.rach_indication_body.preamble_list);
pthread_mutex_unlock(&fill_ul_mutex.rach_mutex);
} }
void fill_ulsch_cqi_indication_UE_MAC(int Mod_id, void fill_ulsch_cqi_indication_UE_MAC(int Mod_id,
...@@ -241,7 +225,6 @@ void fill_ulsch_cqi_indication_UE_MAC(int Mod_id, ...@@ -241,7 +225,6 @@ void fill_ulsch_cqi_indication_UE_MAC(int Mod_id,
uint8_t subframe, uint8_t subframe,
UL_IND_t *UL_INFO, UL_IND_t *UL_INFO,
uint16_t rnti) { uint16_t rnti) {
pthread_mutex_lock(&fill_ul_mutex.cqi_mutex);
nfapi_cqi_indication_pdu_t *pdu = nfapi_cqi_indication_pdu_t *pdu =
&UL_INFO->cqi_ind.cqi_indication_body &UL_INFO->cqi_ind.cqi_indication_body
.cqi_pdu_list[UL_INFO->cqi_ind.cqi_indication_body.number_of_cqis]; .cqi_pdu_list[UL_INFO->cqi_ind.cqi_indication_body.number_of_cqis];
...@@ -275,7 +258,6 @@ void fill_ulsch_cqi_indication_UE_MAC(int Mod_id, ...@@ -275,7 +258,6 @@ void fill_ulsch_cqi_indication_UE_MAC(int Mod_id,
raw_pdu->pdu[0] = cqi << 4; raw_pdu->pdu[0] = cqi << 4;
UL_INFO->cqi_ind.cqi_indication_body.number_of_cqis++; UL_INFO->cqi_ind.cqi_indication_body.number_of_cqis++;
pthread_mutex_unlock(&fill_ul_mutex.cqi_mutex);
} }
void fill_ulsch_harq_indication_UE_MAC( void fill_ulsch_harq_indication_UE_MAC(
...@@ -285,8 +267,6 @@ void fill_ulsch_harq_indication_UE_MAC( ...@@ -285,8 +267,6 @@ void fill_ulsch_harq_indication_UE_MAC(
UL_IND_t *UL_INFO, UL_IND_t *UL_INFO,
nfapi_ul_config_ulsch_harq_information *harq_information, nfapi_ul_config_ulsch_harq_information *harq_information,
uint16_t rnti) { uint16_t rnti) {
pthread_mutex_lock(&fill_ul_mutex.harq_mutex);
nfapi_harq_indication_pdu_t *pdu = nfapi_harq_indication_pdu_t *pdu =
&UL_INFO->harq_ind.harq_indication_body.harq_pdu_list &UL_INFO->harq_ind.harq_indication_body.harq_pdu_list
[UL_INFO->harq_ind.harq_indication_body.number_of_harqs]; [UL_INFO->harq_ind.harq_indication_body.number_of_harqs];
...@@ -318,7 +298,6 @@ void fill_ulsch_harq_indication_UE_MAC( ...@@ -318,7 +298,6 @@ void fill_ulsch_harq_indication_UE_MAC(
} }
UL_INFO->harq_ind.harq_indication_body.number_of_harqs++; UL_INFO->harq_ind.harq_indication_body.number_of_harqs++;
pthread_mutex_unlock(&fill_ul_mutex.harq_mutex);
} }
void fill_uci_harq_indication_UE_MAC(int Mod_id, void fill_uci_harq_indication_UE_MAC(int Mod_id,
...@@ -327,8 +306,6 @@ void fill_uci_harq_indication_UE_MAC(int Mod_id, ...@@ -327,8 +306,6 @@ void fill_uci_harq_indication_UE_MAC(int Mod_id,
UL_IND_t *UL_INFO, UL_IND_t *UL_INFO,
nfapi_ul_config_harq_information *harq_information, nfapi_ul_config_harq_information *harq_information,
uint16_t rnti) { uint16_t rnti) {
pthread_mutex_lock(&fill_ul_mutex.harq_mutex);
nfapi_harq_indication_t *ind = &UL_INFO->harq_ind; nfapi_harq_indication_t *ind = &UL_INFO->harq_ind;
nfapi_harq_indication_body_t *body = &ind->harq_indication_body; nfapi_harq_indication_body_t *body = &ind->harq_indication_body;
nfapi_harq_indication_pdu_t *pdu = nfapi_harq_indication_pdu_t *pdu =
...@@ -410,7 +387,6 @@ void fill_uci_harq_indication_UE_MAC(int Mod_id, ...@@ -410,7 +387,6 @@ void fill_uci_harq_indication_UE_MAC(int Mod_id,
LOG_D(PHY, LOG_D(PHY,
"Incremented eNB->UL_INFO.harq_ind.number_of_harqs:%d\n", "Incremented eNB->UL_INFO.harq_ind.number_of_harqs:%d\n",
UL_INFO->harq_ind.harq_indication_body.number_of_harqs); UL_INFO->harq_ind.harq_indication_body.number_of_harqs);
pthread_mutex_unlock(&fill_ul_mutex.harq_mutex);
} }
void handle_nfapi_ul_pdu_UE_MAC(module_id_t Mod_id, void handle_nfapi_ul_pdu_UE_MAC(module_id_t Mod_id,
......
...@@ -17,8 +17,6 @@ ...@@ -17,8 +17,6 @@
//#include "openair1/PHY/defs.h" //#include "openair1/PHY/defs.h"
//#include "openair1/PHY/LTE_TRANSPORT/defs.h" //#include "openair1/PHY/LTE_TRANSPORT/defs.h"
// this mutex is used to set multiple UE's UL value in L2 FAPI simulator.
FILL_UL_INFO_MUTEX_t fill_ul_mutex;
//below 2 difinitions move to phy_stub_UE.c to add initialization when difinition. //below 2 difinitions move to phy_stub_UE.c to add initialization when difinition.
extern UL_IND_t *UL_INFO; extern UL_IND_t *UL_INFO;
extern nfapi_tx_request_pdu_t* tx_request_pdu_list; extern nfapi_tx_request_pdu_t* tx_request_pdu_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment