diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c index 9e8a82b37ed737b88699070064290fe4dd6d8a34..355883d0d5ff66a9ad3665495f03fe6d7326a5a2 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c @@ -264,6 +264,99 @@ void nr_process_mac_pdu( } } +void handle_nr_ul_harq(uint16_t slot, NR_UE_sched_ctrl_t *sched_ctrl, nfapi_nr_crc_t crc_pdu) { + + int max_harq_rounds = 4; // TODO define macro + uint8_t hrq_id = crc_pdu.harq_id; + NR_UE_ul_harq_t *cur_harq = &sched_ctrl->ul_harq_processes[hrq_id]; + if (cur_harq->state==ACTIVE_SCHED) { + if (!crc_pdu.tb_crc_status) { + cur_harq->ndi ^= 1; + cur_harq->round = 0; + cur_harq->state = INACTIVE; // passed -> make inactive. can be used by scheduder for next grant +#ifdef UL_HARQ_PRINT + printf("[HARQ HANDLER] Ulharq id %d crc passed, freeing it for scheduler\n",hrq_id); +#endif + } else { + cur_harq->round++; + cur_harq->state = ACTIVE_NOT_SCHED; +#ifdef UL_HARQ_PRINT + printf("[HARQ HANDLER] Ulharq id %d crc failed, requesting retransmission\n",hrq_id); +#endif + } + + if (!(cur_harq->round<max_harq_rounds)) { + cur_harq->ndi ^= 1; + cur_harq->state = INACTIVE; // failed after 4 rounds -> make inactive + cur_harq->round = 0; +#ifdef UL_HARQ_PRINT + printf("[HARQ HANDLER] Ulharq id %d crc failed in all round, freeing it for scheduler\n",hrq_id); +#endif + } + return; + } else + AssertFatal(0,"Incorrect UL HARQ process %d or invalid state %d\n",hrq_id,cur_harq->state); +} + +void handle_nr_uci(NR_UL_IND_t *UL_info, NR_UE_sched_ctrl_t *sched_ctrl, int target_snrx10) { + // TODO + int max_harq_rounds = 4; // TODO define macro + int num_ucis = UL_info->uci_ind.num_ucis; + nfapi_nr_uci_t *uci_list = UL_info->uci_ind.uci_list; + + for (int i = 0; i < num_ucis; i++) { + switch (uci_list[i].pdu_type) { + case NFAPI_NR_UCI_PDCCH_PDU_TYPE: break; + + case NFAPI_NR_UCI_FORMAT_0_1_PDU_TYPE: { + //if (get_softmodem_params()->phy_test == 0) { + nfapi_nr_uci_pucch_pdu_format_0_1_t *uci_pdu = &uci_list[i].pucch_pdu_format_0_1; + // handle harq + int harq_idx_s = 0; + // tpc (power control) + sched_ctrl->tpc1 = nr_get_tpc(target_snrx10,uci_pdu->ul_cqi,30); + // iterate over received harq bits + for (int harq_bit = 0; harq_bit < uci_pdu->harq->num_harq; harq_bit++) { + // search for the right harq process + for (int harq_idx = harq_idx_s; harq_idx < NR_MAX_NB_HARQ_PROCESSES; harq_idx++) { + // if the gNB received ack with a good confidence or if the max harq rounds was reached + if ((UL_info->slot-1) == sched_ctrl->harq_processes[harq_idx].feedback_slot) { + if (((uci_pdu->harq->harq_list[harq_bit].harq_value == 1) && + (uci_pdu->harq->harq_confidence_level == 0)) || + (sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds)) { + // toggle NDI and reset round + sched_ctrl->harq_processes[harq_idx].ndi ^= 1; + sched_ctrl->harq_processes[harq_idx].round = 0; + } + else + sched_ctrl->harq_processes[harq_idx].round++; + sched_ctrl->harq_processes[harq_idx].is_waiting = 0; + harq_idx_s = harq_idx + 1; + break; + } + // if feedback slot processing is aborted + else if (((UL_info->slot-1) > sched_ctrl->harq_processes[harq_idx].feedback_slot) && + (sched_ctrl->harq_processes[harq_idx].is_waiting)) { + sched_ctrl->harq_processes[harq_idx].round++; + if (sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds) { + sched_ctrl->harq_processes[harq_idx].ndi ^= 1; + sched_ctrl->harq_processes[harq_idx].round = 0; + } + sched_ctrl->harq_processes[harq_idx].is_waiting = 0; + } + } + } + //} + break; + } + + case NFAPI_NR_UCI_FORMAT_2_3_4_PDU_TYPE: break; + } + } + + UL_info->uci_ind.num_ucis = 0; +} + /* * When data are received on PHY and transmitted to MAC */ diff --git a/openair2/LAYER2/NR_MAC_gNB/mac_proto.h b/openair2/LAYER2/NR_MAC_gNB/mac_proto.h index 2ccfcd5b149f03198771c5688f93c7574d511edd..aba9baf9adfa6805cdf58d055663576390127c3d 100644 --- a/openair2/LAYER2/NR_MAC_gNB/mac_proto.h +++ b/openair2/LAYER2/NR_MAC_gNB/mac_proto.h @@ -340,4 +340,7 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, const uint8_t ul_cqi, const uint16_t rssi); +void handle_nr_ul_harq(uint16_t slot, NR_UE_sched_ctrl_t *sched_ctrl, nfapi_nr_crc_t crc_pdu); + +void handle_nr_uci(NR_UL_IND_t *UL_info, NR_UE_sched_ctrl_t *sched_ctrl, int target_snrx10); #endif /*__LAYER2_NR_MAC_PROTO_H__*/ diff --git a/openair2/NR_PHY_INTERFACE/NR_IF_Module.c b/openair2/NR_PHY_INTERFACE/NR_IF_Module.c index 8a57fccc7b6f46ac63bc8839b4ca6fd8d686bd0c..93389c4f708cb297a855bcf1e3f97edd7ea9f405 100644 --- a/openair2/NR_PHY_INTERFACE/NR_IF_Module.c +++ b/openair2/NR_PHY_INTERFACE/NR_IF_Module.c @@ -78,100 +78,6 @@ void handle_nr_rach(NR_UL_IND_t *UL_info) { } -void handle_nr_uci(NR_UL_IND_t *UL_info, NR_UE_sched_ctrl_t *sched_ctrl, int target_snrx10) { - // TODO - int max_harq_rounds = 4; // TODO define macro - int num_ucis = UL_info->uci_ind.num_ucis; - nfapi_nr_uci_t *uci_list = UL_info->uci_ind.uci_list; - - for (int i = 0; i < num_ucis; i++) { - switch (uci_list[i].pdu_type) { - case NFAPI_NR_UCI_PDCCH_PDU_TYPE: break; - - case NFAPI_NR_UCI_FORMAT_0_1_PDU_TYPE: { - //if (get_softmodem_params()->phy_test == 0) { - nfapi_nr_uci_pucch_pdu_format_0_1_t *uci_pdu = &uci_list[i].pucch_pdu_format_0_1; - // handle harq - int harq_idx_s = 0; - // tpc (power control) - sched_ctrl->tpc1 = nr_get_tpc(target_snrx10,uci_pdu->ul_cqi,30); - // iterate over received harq bits - for (int harq_bit = 0; harq_bit < uci_pdu->harq->num_harq; harq_bit++) { - // search for the right harq process - for (int harq_idx = harq_idx_s; harq_idx < NR_MAX_NB_HARQ_PROCESSES; harq_idx++) { - // if the gNB received ack with a good confidence or if the max harq rounds was reached - if ((UL_info->slot-1) == sched_ctrl->harq_processes[harq_idx].feedback_slot) { - if (((uci_pdu->harq->harq_list[harq_bit].harq_value == 1) && - (uci_pdu->harq->harq_confidence_level == 0)) || - (sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds)) { - // toggle NDI and reset round - sched_ctrl->harq_processes[harq_idx].ndi ^= 1; - sched_ctrl->harq_processes[harq_idx].round = 0; - } - else - sched_ctrl->harq_processes[harq_idx].round++; - sched_ctrl->harq_processes[harq_idx].is_waiting = 0; - harq_idx_s = harq_idx + 1; - break; - } - // if feedback slot processing is aborted - else if (((UL_info->slot-1) > sched_ctrl->harq_processes[harq_idx].feedback_slot) && - (sched_ctrl->harq_processes[harq_idx].is_waiting)) { - sched_ctrl->harq_processes[harq_idx].round++; - if (sched_ctrl->harq_processes[harq_idx].round == max_harq_rounds) { - sched_ctrl->harq_processes[harq_idx].ndi ^= 1; - sched_ctrl->harq_processes[harq_idx].round = 0; - } - sched_ctrl->harq_processes[harq_idx].is_waiting = 0; - } - } - } - //} - break; - } - - case NFAPI_NR_UCI_FORMAT_2_3_4_PDU_TYPE: break; - } - } - - UL_info->uci_ind.num_ucis = 0; -} - -void handle_nr_ul_harq(uint16_t slot, NR_UE_sched_ctrl_t *sched_ctrl, nfapi_nr_crc_t crc_pdu) { - - int max_harq_rounds = 4; // TODO define macro - uint8_t hrq_id = crc_pdu.harq_id; - NR_UE_ul_harq_t *cur_harq = &sched_ctrl->ul_harq_processes[hrq_id]; - if (cur_harq->state==ACTIVE_SCHED) { - if (!crc_pdu.tb_crc_status) { - cur_harq->ndi ^= 1; - cur_harq->round = 0; - cur_harq->state = INACTIVE; // passed -> make inactive. can be used by scheduder for next grant -#ifdef UL_HARQ_PRINT - printf("[HARQ HANDLER] Ulharq id %d crc passed, freeing it for scheduler\n",hrq_id); -#endif - } else { - cur_harq->round++; - cur_harq->state = ACTIVE_NOT_SCHED; -#ifdef UL_HARQ_PRINT - printf("[HARQ HANDLER] Ulharq id %d crc failed, requesting retransmission\n",hrq_id); -#endif - } - - if (!(cur_harq->round<max_harq_rounds)) { - cur_harq->ndi ^= 1; - cur_harq->state = INACTIVE; // failed after 4 rounds -> make inactive - cur_harq->round = 0; -#ifdef UL_HARQ_PRINT - printf("[HARQ HANDLER] Ulharq id %d crc failed in all round, freeing it for scheduler\n",hrq_id); -#endif - } - return; - } else - AssertFatal(0,"Incorrect UL HARQ process %d or invalid state %d\n",hrq_id,cur_harq->state); -} - - void handle_nr_ulsch(NR_UL_IND_t *UL_info, NR_UE_sched_ctrl_t *sched_ctrl) { if(nfapi_mode == 1) { if (UL_info->crc_ind.number_crcs>0) {