Commit 539ff7bf authored by Raymond Knopp's avatar Raymond Knopp

Merge branch 'NR_RRC_harq_hacks' of...

Merge branch 'NR_RRC_harq_hacks' of https://gitlab.eurecom.fr/oai/openairinterface5g into NR_RRC_harq_hacks
parents fe5cd3d2 6fa5a6cf
......@@ -219,7 +219,8 @@ void nr_fill_dci(PHY_VARS_gNB *gNB,
}
dlsch = gNB->dlsch[dlsch_id][0];
int harq_pid = 0;//extract_harq_pid(i,pdu_rel15);
int num_slots_tdd = (gNB->frame_parms.slots_per_frame)>>(7-gNB->gNB_config.tdd_table.tdd_period.value);
int harq_pid = slot % num_slots_tdd;
dlsch->slot_tx[slot] = 1;
dlsch->harq_ids[frame%2][slot] = harq_pid;
......
......@@ -126,7 +126,7 @@ uint8_t nr_generate_pdsch(NR_gNB_DLSCH_t *dlsch,
time_stats_t *dlsch_interleaving_stats,
time_stats_t *dlsch_segmentation_stats) {
int harq_pid = 0;
int harq_pid = dlsch->harq_ids[frame%2][slot];
NR_DL_gNB_HARQ_t *harq = dlsch->harq_processes[harq_pid];
nfapi_nr_dl_tti_pdsch_pdu_rel15_t *rel15 = &harq->pdsch_pdu.pdsch_pdu_rel15;
uint32_t scrambled_output[NR_MAX_NB_CODEWORDS][NR_MAX_PDSCH_ENCODED_LENGTH>>5];
......
......@@ -377,7 +377,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
if (scc->tdd_UL_DL_ConfigurationCommon->pattern1.nrofUplinkSymbols!=0)
nr_ulmix_slots++;
if (slot_txP == 0) {
if ((slot_txP == 0) && (UE_list->fiveG_connected[UE_id] || get_softmodem_params()->phy_test)) {
for (int k=0; k<nr_ulmix_slots; k++) {
memset((void *) &UE_list->UE_sched_ctrl[UE_id].sched_pucch[k],
0,
......@@ -467,6 +467,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
// Phytest scheduling
if (get_softmodem_params()->phy_test && (is_xlsch_in_slot(*dlsch_in_slot_bitmap,slot_txP%num_slots_per_tdd))) {
ue_sched_ctl->current_harq_pid = slot_txP % num_slots_per_tdd;
nr_update_pucch_scheduling(module_idP, UE_id, frame_txP, slot_txP, num_slots_per_tdd,&pucch_sched);
nr_schedule_uss_dlsch_phytest(module_idP, frame_txP, slot_txP, &UE_list->UE_sched_ctrl[UE_id].sched_pucch[pucch_sched], NULL);
// resetting ta flag
......@@ -474,12 +475,12 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
}
// Test DL scheduling
if (get_softmodem_params()->phy_test == 0 && slot_txP == 1 && UE_list->fiveG_connected[UE_id]) {
if (get_softmodem_params()->phy_test == 0 && slot_txP>0 && slot_txP<7 && UE_list->fiveG_connected[UE_id]) {
ue_sched_ctl->current_harq_pid = slot_txP % num_slots_per_tdd;
nr_update_pucch_scheduling(module_idP, UE_id, frame_txP, slot_txP, num_slots_per_tdd,&pucch_sched);
nr_schedule_uss_dlsch_phytest(module_idP, frame_txP, slot_txP, &UE_list->UE_sched_ctrl[UE_id].sched_pucch[pucch_sched], NULL);
// resetting ta flag
gNB->ta_len = 0;
UE_list->fiveG_connected[UE_id] = false;
}
......@@ -494,8 +495,7 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
if (is_nr_UL_slot(cc->ServingCellConfigCommon,slot_rxP)) {
if (get_softmodem_params()->phy_test == 0) {
NR_sched_pucch *curr_pucch = UE_list->UE_sched_ctrl[UE_id].sched_pucch;
if (curr_pucch != NULL)
if (UE_list->fiveG_connected[UE_id])
nr_schedule_pucch(module_idP, UE_id, frame_rxP, slot_rxP);
schedule_nr_prach(module_idP, (frame_rxP+1)&1023, slot_rxP);
nr_schedule_reception_msg3(module_idP, 0, frame_rxP, slot_rxP);
......
......@@ -309,11 +309,12 @@ int configure_fapi_dl_pdu(int Mod_idP,
pdsch_pdu_rel15->NrOfCodewords = 1;
int mcs = (mcsIndex!=NULL) ? *mcsIndex : 9;
int current_harq_pid = UE_list->UE_sched_ctrl[UE_id].current_harq_pid;
pdsch_pdu_rel15->targetCodeRate[0] = nr_get_code_rate_dl(mcs,0);
pdsch_pdu_rel15->qamModOrder[0] = 2;
pdsch_pdu_rel15->mcsIndex[0] = mcs;
pdsch_pdu_rel15->mcsTable[0] = 0;
pdsch_pdu_rel15->rvIndex[0] = UE_list->UE_sched_ctrl[UE_id].harq_processes[0].round;
pdsch_pdu_rel15->rvIndex[0] = UE_list->UE_sched_ctrl[UE_id].harq_processes[current_harq_pid].round;
pdsch_pdu_rel15->dataScramblingId = *scc->physCellId;
pdsch_pdu_rel15->nrOfLayers = 1;
pdsch_pdu_rel15->transmissionScheme = 0;
......@@ -365,8 +366,8 @@ int configure_fapi_dl_pdu(int Mod_idP,
dci_pdu_rel15[0].mcs = pdsch_pdu_rel15->mcsIndex[0];
dci_pdu_rel15[0].rv = pdsch_pdu_rel15->rvIndex[0];
// harq pid
dci_pdu_rel15[0].harq_pid = 0;
dci_pdu_rel15[0].ndi = UE_list->UE_sched_ctrl[UE_id].harq_processes[0].ndi;
dci_pdu_rel15[0].harq_pid = current_harq_pid;
dci_pdu_rel15[0].ndi = UE_list->UE_sched_ctrl[UE_id].harq_processes[current_harq_pid].ndi;
// DAI
dci_pdu_rel15[0].dai[0].val = (pucch_sched->dai_c-1)&3;
......@@ -376,6 +377,7 @@ int configure_fapi_dl_pdu(int Mod_idP,
dci_pdu_rel15[0].pucch_resource_indicator = pucch_sched->resource_indicator;
// PDSCH to HARQ TI
dci_pdu_rel15[0].pdsch_to_harq_feedback_timing_indicator.val = pucch_sched->timing_indicator;
UE_list->UE_sched_ctrl[UE_id].harq_processes[current_harq_pid].feedback_slot = pucch_sched->ul_slot;
// antenna ports
dci_pdu_rel15[0].antenna_ports.val = 0; // nb of cdm groups w/o data 1 and dmrs port 0
......
......@@ -113,6 +113,10 @@ void mac_top_init_gNB(void)
UE_list->next[list_el] = list_el + 1;
UE_list->next_ul[list_el] = list_el + 1;
UE_list->active[list_el] = FALSE;
for (int list_harq = 0; list_harq < NR_MAX_NB_HARQ_PROCESSES; list_harq++) {
UE_list->UE_sched_ctrl[list_el].harq_processes[list_harq].round = 0;
UE_list->UE_sched_ctrl[list_el].harq_processes[list_harq].ndi = 0;
}
}
UE_list->next[list_el] = -1;
......
......@@ -190,6 +190,7 @@ typedef struct NR_sched_pucch {
typedef struct NR_UE_harq {
uint8_t ndi;
uint8_t round;
uint16_t feedback_slot;
} NR_UE_harq_t;
/*! \brief scheduling control information set through an API */
......@@ -199,6 +200,7 @@ typedef struct {
NR_sched_pucch *sched_pucch;
uint16_t ta_timer;
int16_t ta_update;
uint8_t current_harq_pid;
NR_UE_harq_t harq_processes[NR_MAX_NB_HARQ_PROCESSES];
} NR_UE_sched_ctrl_t;
......
......@@ -76,8 +76,44 @@ void handle_nr_rach(NR_UL_IND_t *UL_info) {
}
void handle_nr_uci(NR_UL_IND_t *UL_info) {
void handle_nr_uci(NR_UL_IND_t *UL_info, NR_UE_sched_ctrl_t *sched_ctrl) {
// TODO
int max_harq_rounds = 4; // TODO define macro
int num_ucis = UL_info->uci_ind.num_ucis;
nfapi_nr_uci_t *uci_list = UL_info->uci_ind.uci_list;
for (int i = 0; i < num_ucis; i++) {
switch (uci_list[i].pdu_type) {
case NFAPI_NR_UCI_PDCCH_PDU_TYPE: break;
case NFAPI_NR_UCI_FORMAT_0_1_PDU_TYPE: {
nfapi_nr_uci_pucch_pdu_format_0_1_t *uci_pdu = &uci_list[i].pucch_pdu_format_0_1;
// handle harq
int harq_idx_s = 0;
// iterate over received harq bits
for (int harq_bit = 0; harq_bit < uci_pdu->harq->num_harq; harq_bit++) {
// search for the right harq process
for (int harq_idx = harq_idx_s; harq_idx < NR_MAX_NB_HARQ_PROCESSES-1; harq_idx++) {
if ((UL_info->slot-1) == sched_ctrl->harq_processes[harq_idx].feedback_slot) {
if (uci_pdu->harq->harq_list[harq_bit].harq_value == 0)
sched_ctrl->harq_processes[harq_idx].round++;
if ((uci_pdu->harq->harq_list[harq_bit].harq_value == 1) ||
(sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds)) {
sched_ctrl->harq_processes[harq_idx].ndi ^= 1;
sched_ctrl->harq_processes[harq_idx].round = 0;
}
harq_idx_s = harq_idx + 1;
break;
}
}
}
break;
}
case NFAPI_NR_UCI_FORMAT_2_3_4_PDU_TYPE: break;
}
}
UL_info->uci_ind.num_ucis = 0;
}
......@@ -180,7 +216,7 @@ void NR_UL_indication(NR_UL_IND_t *UL_info) {
// clear DL/UL info for new scheduling round
clear_nr_nfapi_information(mac,CC_id,UL_info->frame,UL_info->slot);
handle_nr_rach(UL_info);
handle_nr_uci(UL_info);
handle_nr_uci(UL_info, &mac->UE_list.UE_sched_ctrl[0]);
// clear HI prior to handling ULSCH
mac->UL_dci_req[CC_id].numPdus = 0;
handle_nr_ulsch(UL_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment