Commit 217f949f authored by Robert Schmidt's avatar Robert Schmidt

Merge remote-tracking branch 'origin/NR_pdcch_improvements' into integration_2022_wk06

parents fecba399 46f35e0f
......@@ -500,6 +500,12 @@ int get_subband_size(int NPRB,int size) {
}
// from start symbol index and nb or symbols to symbol occupation bitmap in a slot
uint16_t SL_to_bitmap(int startSymbolIndex, int nrOfSymbols) {
return ((1<<nrOfSymbols)-1)<<startSymbolIndex;
}
int get_SLIV(uint8_t S, uint8_t L) {
return ( (uint16_t)(((L-1)<=7)? (14*(L-1)+S) : (14*(15-L)+(13-S))) );
}
......
......@@ -78,6 +78,7 @@ uint32_t nr_get_code_rate(uint8_t Imcs, uint8_t table_idx);
int get_subband_size(int NPRB,int size);
void SLIV2SL(int SLIV,int *S,int *L);
int get_dmrs_port(int nl, uint16_t dmrs_ports);
uint16_t SL_to_bitmap(int startSymbolIndex, int nrOfSymbols);
int get_nb_periods_per_frame(uint8_t tdd_period);
#define CEILIDIV(a,b) ((a+b-1)/b)
......
......@@ -1122,11 +1122,11 @@ int pnf_phy_ul_dci_req(gNB_L1_rxtx_proc_t *proc, nfapi_pnf_p7_config_t *pnf_p7,
proc = &gNB->proc.L1_proc;
if (req->numPdus > 0) {
if (req->ul_dci_pdu_list[req->numPdus-1].PDUType == 0) { // copy only the last PDU (PHY can have only one UL PDCCH pdu)
msgTx->ul_pdcch_pdu = req->ul_dci_pdu_list[req->numPdus-1]; // copy the last pdu
}
else {
LOG_E(PHY,"[PNF] UL_DCI_REQ sfn_slot:%d PDU[%d] - unknown pdu type:%d\n", NFAPI_SFNSLOT2DEC(req->SFN, req->Slot), req->numPdus-1, req->ul_dci_pdu_list[req->numPdus-1].PDUType);
for (int i=0; i<req->numPdus; i++) {
if (req->ul_dci_pdu_list[i].PDUType == NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE) // only possible value 0: PDCCH PDU
msgTx->ul_pdcch_pdu[i] = req->ul_dci_pdu_list[i];
else
LOG_E(PHY,"[PNF] UL_DCI_REQ sfn_slot:%d PDU[%d] - unknown pdu type:%d\n", NFAPI_SFNSLOT2DEC(req->SFN, req->Slot), req->numPdus-1, req->ul_dci_pdu_list[req->numPdus-1].PDUType);
}
}
......@@ -1207,8 +1207,7 @@ int pnf_phy_dl_tti_req(gNB_L1_rxtx_proc_t *proc, nfapi_pnf_p7_config_t *pnf_p7,
processingData_L1tx_t *msgTx = (processingData_L1tx_t *)NotifiedFifoData(res);
if (dl_tti_pdu_list[i].PDUType == NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE) {
// we trust the scheduler sends only one PDCCH PDU per slot
msgTx->pdcch_pdu = dl_tti_pdu_list[i].pdcch_pdu; // fills the last received PDCCH PDU
msgTx->pdcch_pdu[i] = dl_tti_pdu_list[i].pdcch_pdu; // copies all the received PDCCH PDUs
}
else if (dl_tti_pdu_list[i].PDUType == NFAPI_NR_DL_TTI_SSB_PDU_TYPE) {
//NFAPI_TRACE(NFAPI_TRACE_INFO, "%s() PDU:%d BCH: pdu_index:%u pdu_length:%d sdu_length:%d BCH_SDU:%x,%x,%x\n", __FUNCTION__, i, pdu_index, bch_pdu->bch_pdu_rel8.length, tx_request_pdu[sfn][sf][pdu_index]->segments[0].segment_length, sdu[0], sdu[1], sdu[2]);
......
......@@ -1402,9 +1402,6 @@ typedef struct
} nfapi_nr_ul_dci_request_t;
*/
// normally one PDU per coreset per BWP
#define NFAPI_NR_MAX_UL_DCI_PDUS 4
typedef struct {
/// only possible value 0: PDCCH PDU
uint16_t PDUType;
......@@ -1418,7 +1415,7 @@ typedef struct {
uint16_t SFN;
uint16_t Slot;
uint8_t numPdus;
nfapi_nr_ul_dci_request_pdus_t ul_dci_pdu_list[NFAPI_NR_MAX_UL_DCI_PDUS];
nfapi_nr_ul_dci_request_pdus_t ul_dci_pdu_list[NFAPI_NR_MAX_NB_CORESETS];
} nfapi_nr_ul_dci_request_t;
//3.4.5 slot_errors
......
......@@ -32,7 +32,7 @@
#include "PHY/NR_REFSIG/ul_ref_seq_nr.h"
#include "PHY/NR_REFSIG/refsig_defs_ue.h"
#include "PHY/NR_REFSIG/nr_refsig.h"
#include <openair1/PHY/MODULATION/nr_modulation.h>
#include "PHY/MODULATION/nr_modulation.h"
#if 0
void phy_config_harq_ue(module_id_t Mod_id,
......
......@@ -250,20 +250,16 @@ void nr_generate_dci(nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu_rel15,
} // for (int d=0;d<pdcch_pdu_rel15->numDlDci;d++)
}
void nr_generate_dci_top(nfapi_nr_dl_tti_pdcch_pdu *pdcch_pdu,
nfapi_nr_dl_tti_pdcch_pdu *ul_dci_pdu,
void nr_generate_dci_top(processingData_L1tx_t *msgTx,
uint32_t **gold_pdcch_dmrs,
int32_t *txdataF,
int16_t amp,
NR_DL_FRAME_PARMS *frame_parms) {
AssertFatal(pdcch_pdu!=NULL || ul_dci_pdu!=NULL,"At least one pointer has to be !NULL\n");
for (int i=0; i<msgTx->num_ul_pdcch; i++)
nr_generate_dci(&msgTx->ul_pdcch_pdu[i].pdcch_pdu.pdcch_pdu_rel15,gold_pdcch_dmrs,txdataF,amp,frame_parms);
for (int i=0; i<msgTx->num_dl_pdcch; i++)
nr_generate_dci(&msgTx->pdcch_pdu[i].pdcch_pdu_rel15,gold_pdcch_dmrs,txdataF,amp,frame_parms);
if (pdcch_pdu) {
nr_generate_dci(&pdcch_pdu->pdcch_pdu_rel15,gold_pdcch_dmrs,txdataF,amp,frame_parms);
}
if (ul_dci_pdu) {
nr_generate_dci(&ul_dci_pdu->pdcch_pdu_rel15,gold_pdcch_dmrs,txdataF,amp,frame_parms);
}
}
......@@ -29,13 +29,11 @@ uint16_t nr_get_dci_size(nfapi_nr_dci_format_e format,
nfapi_nr_rnti_type_e rnti_type,
uint16_t N_RB);
void nr_generate_dci_top(
nfapi_nr_dl_tti_pdcch_pdu *pdcch_pdu,
nfapi_nr_dl_tti_pdcch_pdu *ul_pdcch_pdu,
uint32_t **gold_pdcch_dmrs,
int32_t *txdataF,
int16_t amp,
NR_DL_FRAME_PARMS *frame_parms);
void nr_generate_dci_top(processingData_L1tx_t *msgTx,
uint32_t **gold_pdcch_dmrs,
int32_t *txdataF,
int16_t amp,
NR_DL_FRAME_PARMS *frame_parms);
void nr_pdcch_scrambling(uint32_t *in,
uint32_t size,
......
......@@ -951,12 +951,14 @@ typedef struct processingData_L1tx {
int slot;
openair0_timestamp timestamp_tx;
PHY_VARS_gNB *gNB;
nfapi_nr_dl_tti_pdcch_pdu pdcch_pdu;
nfapi_nr_ul_dci_request_pdus_t ul_pdcch_pdu;
nfapi_nr_dl_tti_pdcch_pdu pdcch_pdu[NFAPI_NR_MAX_NB_CORESETS];
nfapi_nr_ul_dci_request_pdus_t ul_pdcch_pdu[NFAPI_NR_MAX_NB_CORESETS];
NR_gNB_CSIRS_t csirs_pdu[NUMBER_OF_NR_CSIRS_MAX];
NR_gNB_DLSCH_t *dlsch[NUMBER_OF_NR_DLSCH_MAX][2];
NR_gNB_SSB_t ssb[64];
uint16_t num_pdsch_slot;
int num_dl_pdcch;
int num_ul_pdcch;
time_stats_t phy_proc_tx;
} processingData_L1tx_t;
......
......@@ -166,10 +166,9 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
res = pullTpool(gNB->L1_tx_free, gNB->threadPool);
processingData_L1tx_t *msgTx = (processingData_L1tx_t *)NotifiedFifoData(res);
int pdcch_received=0;
msgTx->num_pdsch_slot=0;
msgTx->pdcch_pdu.pdcch_pdu_rel15.numDlDci = 0;
msgTx->ul_pdcch_pdu.pdcch_pdu.pdcch_pdu_rel15.numDlDci = 0;
msgTx->num_dl_pdcch=0;
msgTx->num_ul_pdcch=number_ul_dci_pdu;
msgTx->slot = slot;
msgTx->frame = frame;
......@@ -183,16 +182,14 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
break;
case NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE:
AssertFatal(pdcch_received == 0, "pdcch_received is not 0, we can only handle one PDCCH PDU per slot\n");
msgTx->pdcch_pdu = dl_tti_pdu->pdcch_pdu;
pdcch_received = 1;
LOG_D(PHY,"frame %d, slot %d, Got NFAPI_NR_DL_TTI_PDCCH_PDU_TYPE for %d.%d\n",frame,slot,DL_req->SFN,DL_req->Slot);
msgTx->pdcch_pdu[msgTx->num_dl_pdcch] = dl_tti_pdu->pdcch_pdu;
msgTx->num_dl_pdcch++;
break;
case NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE:
LOG_D(PHY,"frame %d, slot %d, Got NFAPI_NR_DL_TTI_CSI_RS_PDU_TYPE for %d.%d\n",frame,slot,DL_req->SFN,DL_req->Slot);
handle_nfapi_nr_csirs_pdu(msgTx,frame,slot,
&dl_tti_pdu->csi_rs_pdu);
handle_nfapi_nr_csirs_pdu(msgTx,frame,slot,&dl_tti_pdu->csi_rs_pdu);
break;
case NFAPI_NR_DL_TTI_PDSCH_PDU_TYPE:
......@@ -203,13 +200,13 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
pduIndex,TX_req->pdu_list[pduIndex].num_TLV);
uint8_t *sdu = (uint8_t *)TX_req->pdu_list[pduIndex].TLVs[0].value.direct;
AssertFatal(msgTx->num_pdsch_slot < gNB->number_of_nr_dlsch_max,"Number of PDSCH PDUs %d exceeded the limit %d\n",
msgTx->num_pdsch_slot,gNB->number_of_nr_dlsch_max);
msgTx->num_pdsch_slot,gNB->number_of_nr_dlsch_max);
handle_nr_nfapi_pdsch_pdu(msgTx,&dl_tti_pdu->pdsch_pdu, sdu);
}
}
if (number_ul_dci_pdu > 0)
msgTx->ul_pdcch_pdu = UL_dci_req->ul_dci_pdu_list[number_ul_dci_pdu-1]; // copy the last pdu
for (int i=0; i<number_ul_dci_pdu; i++)
msgTx->ul_pdcch_pdu[i] = UL_dci_req->ul_dci_pdu_list[i];
pushNotifiedFIFO(gNB->L1_tx_filled,res);
}
......
......@@ -158,18 +158,15 @@ void phy_procedures_gNB_TX(processingData_L1tx_t *msgTx,
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_gNB_COMMON_TX,0);
int num_dl_dci = msgTx->pdcch_pdu.pdcch_pdu_rel15.numDlDci;
int num_ul_dci = msgTx->ul_pdcch_pdu.pdcch_pdu.pdcch_pdu_rel15.numDlDci;
int num_pdcch_pdus = msgTx->num_ul_pdcch + msgTx->num_dl_pdcch;
if (num_dl_dci > 0 || num_ul_dci > 0) {
LOG_D(PHY, "[gNB %d] Frame %d slot %d Calling nr_generate_dci_top (number of UL/DL DCI %d/%d)\n",
gNB->Mod_id, frame, slot, num_ul_dci, num_dl_dci);
if (num_pdcch_pdus > 0) {
LOG_D(PHY, "[gNB %d] Frame %d slot %d Calling nr_generate_dci_top (number of UL/DL PDCCH PDUs %d/%d)\n",
gNB->Mod_id, frame, slot, msgTx->num_ul_pdcch, msgTx->num_dl_pdcch);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_gNB_PDCCH_TX,1);
nr_generate_dci_top(
num_dl_dci > 0 ? &msgTx->pdcch_pdu : NULL,
num_ul_dci > 0 ? &msgTx->ul_pdcch_pdu.pdcch_pdu : NULL,
nr_generate_dci_top(msgTx,
gNB->nr_gold_pdcch_dmrs[slot],
&gNB->common_vars.txdataF[0][txdataF_offset],
AMP, fp);
......
......@@ -1053,8 +1053,6 @@ int main(int argc, char **argv)
while ((round<num_rounds) && (UE_harq_process->ack==0)) {
memset(RC.nrmac[0]->cce_list[1][0],0,MAX_NUM_CCE*sizeof(int));
memset(RC.nrmac[0]->cce_list[1][1],0,MAX_NUM_CCE*sizeof(int));
clear_nr_nfapi_information(RC.nrmac[0], 0, frame, slot);
UE_info->UE_sched_ctrl[0].harq_processes[harq_pid].ndi = !(trial&1);
......@@ -1062,7 +1060,7 @@ int main(int argc, char **argv)
UE_info->UE_sched_ctrl[0].harq_processes[harq_pid].round = round;
for (int i=0; i<MAX_NUM_CORESET; i++)
gNB_mac->UE_info.num_pdcch_cand[0][i] = 0;
gNB_mac->pdcch_cand[i] = 0;
if (css_flag == 0) {
nr_schedule_ue_spec(0, frame, slot);
......
......@@ -536,7 +536,6 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP,
if (CellGroup) {
const NR_ServingCellConfig_t *servingCellConfig = CellGroup->spCellConfig->spCellConfigDedicated;
const struct NR_ServingCellConfig__downlinkBWP_ToAddModList *bwpList = servingCellConfig->downlinkBWP_ToAddModList;
if(bwpList) {
AssertFatal(bwpList->list.count > 0, "downlinkBWP_ToAddModList has no BWPs!\n");
......@@ -607,6 +606,7 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP,
ra->msg3_dcch_dtch = false;
LOG_I(NR_MAC,"Added new RA process for UE RNTI %04x with initial CellGroup\n", rnti);
} else { // CellGroup has been updated
NR_ServingCellConfigCommon_t *scc = RC.nrmac[Mod_idP]->common_channels[0].ServingCellConfigCommon;
const int UE_id = find_nr_UE_id(Mod_idP,rnti);
int target_ss;
UE_info->CellGroup[UE_id] = CellGroup;
......@@ -627,19 +627,30 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP,
}
// update coreset/searchspace
void *bwpd = NULL;
NR_BWP_t *genericParameters = NULL;
target_ss = NR_SearchSpace__searchSpaceType_PR_common;
if ((sched_ctrl->active_bwp)) {
target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
bwpd = (void*)sched_ctrl->active_bwp->bwp_Dedicated;
genericParameters = &sched_ctrl->active_bwp->bwp_Common->genericParameters;
}
else if (CellGroup->spCellConfig &&
CellGroup->spCellConfig->spCellConfigDedicated &&
(CellGroup->spCellConfig->spCellConfigDedicated->initialDownlinkBWP)) {
target_ss = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
bwpd = (void*)CellGroup->spCellConfig->spCellConfigDedicated->initialDownlinkBWP;
genericParameters = &scc->downlinkConfigCommon->initialDownlinkBWP->genericParameters;
}
else
AssertFatal(1==0,"Either initial BWP or active BWP should always be present\n");
sched_ctrl->search_space = get_searchspace(scc, bwpd, target_ss);
sched_ctrl->coreset = get_coreset(Mod_idP, scc, bwpd, sched_ctrl->search_space, target_ss);
sched_ctrl->sched_pdcch = set_pdcch_structure(RC.nrmac[Mod_idP],
sched_ctrl->search_space,
sched_ctrl->coreset,
scc,
genericParameters,
NULL);
sched_ctrl->maxL = 2;
}
}
......
......@@ -68,7 +68,7 @@ void clear_nr_nfapi_information(gNB_MAC_INST * gNB,
const int num_slots = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
nfapi_nr_dl_tti_request_t *DL_req = &gNB->DL_req[0];
nfapi_nr_dl_tti_pdcch_pdu_rel15_t ***pdcch = (nfapi_nr_dl_tti_pdcch_pdu_rel15_t ***)gNB->pdcch_pdu_idx[CC_idP];
nfapi_nr_dl_tti_pdcch_pdu_rel15_t **pdcch = (nfapi_nr_dl_tti_pdcch_pdu_rel15_t **)gNB->pdcch_pdu_idx[CC_idP];
nfapi_nr_ul_tti_request_t *future_ul_tti_req =
&gNB->UL_tti_req_ahead[CC_idP][(slotP + num_slots - 1) % num_slots];
nfapi_nr_ul_dci_request_t *UL_dci_req = &gNB->UL_dci_req[0];
......@@ -81,7 +81,7 @@ void clear_nr_nfapi_information(gNB_MAC_INST * gNB,
DL_req[CC_idP].dl_tti_request_body.nPDUs = 0;
DL_req[CC_idP].dl_tti_request_body.nGroup = 0;
//DL_req[CC_idP].dl_tti_request_body.transmission_power_pcfich = 6000;
memset(pdcch, 0, sizeof(**pdcch) * MAX_NUM_BWP * MAX_NUM_CORESET);
memset(pdcch, 0, sizeof(*pdcch) * MAX_NUM_CORESET);
UL_dci_req[CC_idP].SFN = frameP;
UL_dci_req[CC_idP].Slot = slotP;
......@@ -278,7 +278,6 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
protocol_ctxt_t ctxt={0};
PROTOCOL_CTXT_SET_BY_MODULE_ID(&ctxt, module_idP, ENB_FLAG_YES, NOT_A_RNTI, frame, slot,module_idP);
const int bwp_id = 1;
char stats_output[16384];
gNB_MAC_INST *gNB = RC.nrmac[module_idP];
......@@ -309,13 +308,8 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP,
nr_rrc_trigger(&ctxt, 0 /*CC_id*/, frame, slot >> *scc->ssbSubcarrierSpacing);
}
memset(RC.nrmac[module_idP]->cce_list[0][0],0,MAX_NUM_CCE*sizeof(int)); // coreset0
memset(RC.nrmac[module_idP]->cce_list[0][1],0,MAX_NUM_CCE*sizeof(int)); // coreset1 on initialBWP
memset(RC.nrmac[module_idP]->cce_list[bwp_id][1],0,MAX_NUM_CCE*sizeof(int)); // coresetid 1
NR_UE_info_t *UE_info = &RC.nrmac[module_idP]->UE_info;
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id])
for (int i=0; i<MAX_NUM_CORESET; i++)
UE_info->num_pdcch_cand[UE_id][i] = 0;
for (int i=0; i<MAX_NUM_CORESET; i++)
RC.nrmac[module_idP]->pdcch_cand[i] = 0;
for (int CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++) {
//mbsfn_status[CC_id] = 0;
......
......@@ -314,7 +314,7 @@ void fill_ssb_vrb_map (NR_COMMON_channels_t *cc, int rbStart, uint16_t symStart
uint16_t *vrb_map = cc[CC_id].vrb_map;
for (int rb = 0; rb < 20; rb++)
vrb_map[rbStart + rb] = 15<<symStart;
vrb_map[rbStart + rb] = SL_to_bitmap(symStart, 4);
}
......@@ -329,7 +329,9 @@ uint32_t schedule_control_sib1(module_id_t module_id,
int num_total_bytes) {
gNB_MAC_INST *gNB_mac = RC.nrmac[module_id];
uint16_t *vrb_map = RC.nrmac[module_id]->common_channels[CC_id].vrb_map;
NR_COMMON_channels_t *cc = &gNB_mac->common_channels[CC_id];
NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon;
uint16_t *vrb_map = cc->vrb_map;
if (gNB_mac->sched_ctrlCommon == NULL){
LOG_D(NR_MAC,"schedule_control_common: Filling nr_mac->sched_ctrlCommon\n");
......@@ -340,6 +342,12 @@ uint32_t schedule_control_sib1(module_id_t module_id,
fill_coresetZero(gNB_mac->sched_ctrlCommon->coreset,type0_PDCCH_CSS_config);
gNB_mac->cset0_bwp_start = type0_PDCCH_CSS_config->cset_start_rb;
gNB_mac->cset0_bwp_size = type0_PDCCH_CSS_config->num_rbs;
gNB_mac->sched_ctrlCommon->sched_pdcch = set_pdcch_structure(NULL,
gNB_mac->sched_ctrlCommon->search_space,
gNB_mac->sched_ctrlCommon->coreset,
scc,
NULL,
type0_PDCCH_CSS_config);
}
gNB_mac->sched_ctrlCommon->pdsch_semi_static.time_domain_allocation = time_domain_allocation;
......@@ -354,13 +362,13 @@ uint32_t schedule_control_sib1(module_id_t module_id,
if (nr_of_candidates>0) break; // choosing the lower value of aggregation level available
}
AssertFatal(nr_of_candidates>0,"nr_of_candidates is 0\n");
gNB_mac->sched_ctrlCommon->cce_index = allocate_nr_CCEs(RC.nrmac[module_id],
NULL,
gNB_mac->sched_ctrlCommon->coreset,
gNB_mac->sched_ctrlCommon->aggregation_level,
0,
candidate_idx,
nr_of_candidates);
gNB_mac->sched_ctrlCommon->cce_index = find_pdcch_candidate(gNB_mac,
CC_id,
gNB_mac->sched_ctrlCommon->aggregation_level,
nr_of_candidates,
&gNB_mac->sched_ctrlCommon->sched_pdcch,
gNB_mac->sched_ctrlCommon->coreset,
0);
AssertFatal(gNB_mac->sched_ctrlCommon->cce_index >= 0, "Could not find CCE for coreset0\n");
......@@ -384,7 +392,7 @@ uint32_t schedule_control_sib1(module_id_t module_id,
int rbSize = 0;
uint32_t TBS = 0;
do {
if(rbSize < bwpSize && !vrb_map[rbStart + rbSize])
if(rbSize < bwpSize && !(vrb_map[rbStart + rbSize]&SL_to_bitmap(startSymbolIndex, nrOfSymbols)))
rbSize++;
else{
if (gNB_mac->sched_ctrlCommon->sched_pdsch.mcs<10)
......@@ -411,16 +419,21 @@ uint32_t schedule_control_sib1(module_id_t module_id,
LOG_D(MAC,"dmrs_length %d\n",dmrs_length);
LOG_D(MAC,"N_PRB_DMRS = %d\n",N_PRB_DMRS);
LOG_D(MAC,"mappingtype = %d\n", mappingtype);
// Mark the corresponding RBs as used
fill_pdcch_vrb_map(gNB_mac,
CC_id,
&gNB_mac->sched_ctrlCommon->sched_pdcch,
gNB_mac->sched_ctrlCommon->cce_index,
gNB_mac->sched_ctrlCommon->aggregation_level);
for (int rb = 0; rb < gNB_mac->sched_ctrlCommon->sched_pdsch.rbSize; rb++) {
vrb_map[rb + rbStart] = 1;
vrb_map[rb + rbStart] = SL_to_bitmap(startSymbolIndex, nrOfSymbols);
}
return TBS;
}
void nr_fill_nfapi_dl_sib1_pdu(int Mod_idP,
nfapi_nr_dl_tti_request_body_t *dl_req,
int pdu_index,
NR_Type0_PDCCH_CSS_config_t *type0_PDCCH_CSS_config,
uint32_t TBS,
int StartSymbolIndex,
......@@ -437,13 +450,10 @@ void nr_fill_nfapi_dl_sib1_pdu(int Mod_idP,
dl_tti_pdcch_pdu->PDUSize = (uint8_t)(2+sizeof(nfapi_nr_dl_tti_pdcch_pdu));
dl_req->nPDUs += 1;
nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu_rel15 = &dl_tti_pdcch_pdu->pdcch_pdu.pdcch_pdu_rel15;
nr_configure_pdcch(NULL,
pdcch_pdu_rel15,
gNB_mac->sched_ctrlCommon->search_space,
nr_configure_pdcch(pdcch_pdu_rel15,
gNB_mac->sched_ctrlCommon->coreset,
scc,
NULL,
type0_PDCCH_CSS_config);
&gNB_mac->sched_ctrlCommon->sched_pdcch);
nfapi_nr_dl_tti_request_pdu_t *dl_tti_pdsch_pdu = &dl_req->dl_tti_pdu_list[dl_req->nPDUs];
memset((void*)dl_tti_pdsch_pdu,0,sizeof(nfapi_nr_dl_tti_request_pdu_t));
......@@ -456,7 +466,7 @@ void nr_fill_nfapi_dl_sib1_pdu(int Mod_idP,
pdsch_pdu_rel15->pduBitmap = 0;
pdsch_pdu_rel15->rnti = SI_RNTI;
pdsch_pdu_rel15->pduIndex = gNB_mac->pdu_index[0]++;
pdsch_pdu_rel15->pduIndex = pdu_index;
pdsch_pdu_rel15->BWPSize = type0_PDCCH_CSS_config->num_rbs;
pdsch_pdu_rel15->BWPStart = type0_PDCCH_CSS_config->cset_start_rb;
......@@ -623,7 +633,8 @@ void schedule_nr_sib1(module_id_t module_idP, frame_t frameP, sub_frame_t slotP)
candidate_idx, sib1_sdu_length);
nfapi_nr_dl_tti_request_body_t *dl_req = &gNB_mac->DL_req[CC_id].dl_tti_request_body;
nr_fill_nfapi_dl_sib1_pdu(module_idP, dl_req, type0_PDCCH_CSS_config, TBS, startSymbolIndex, nrOfSymbols, dlDmrsSymbPos);
int pdu_index = gNB_mac->pdu_index[0]++;
nr_fill_nfapi_dl_sib1_pdu(module_idP, dl_req, pdu_index, type0_PDCCH_CSS_config, TBS, startSymbolIndex, nrOfSymbols, dlDmrsSymbPos);
const int ntx_req = gNB_mac->TX_req[CC_id].Number_of_PDUs;
nfapi_nr_pdu_t *tx_req = &gNB_mac->TX_req[CC_id].pdu_list[ntx_req];
......@@ -633,7 +644,7 @@ void schedule_nr_sib1(module_id_t module_idP, frame_t frameP, sub_frame_t slotP)
memcpy(tx_req->TLVs[0].value.direct, sib1_payload, sib1_sdu_length);
tx_req->PDU_length = TBS;
tx_req->PDU_index = gNB_mac->pdu_index[0]++;
tx_req->PDU_index = pdu_index;
tx_req->num_TLV = 1;
tx_req->TLVs[0].length = TBS + 2;
gNB_mac->TX_req[CC_id].Number_of_PDUs++;
......
......@@ -277,6 +277,14 @@ void nr_preprocessor_phytest(module_id_t module_id,
__func__,
UE_id);
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
const int tda = sched_ctrl->active_bwp ? RC.nrmac[module_id]->preferred_dl_tda[sched_ctrl->active_bwp->bwp_Id][slot] : 1;
const long f = sched_ctrl->active_bwp ? sched_ctrl->search_space->searchSpaceType->choice.ue_Specific->dci_Formats : 0;
NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static;
ps->nrOfLayers = target_dl_Nl;
if (ps->time_domain_allocation != tda)
nr_set_pdsch_semi_static(
scc, UE_info->CellGroup[UE_id], sched_ctrl->active_bwp, NULL, tda, f, ps);
/* find largest unallocated chunk */
const int bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, MAX_BWP_SIZE);
const int BWPStart = NRRIV2PRBOFFSET(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, MAX_BWP_SIZE);
......@@ -288,11 +296,14 @@ void nr_preprocessor_phytest(module_id_t module_id,
/* loop ensures that we allocate exactly target_dl_bw, or return */
while (true) {
/* advance to first free RB */
while (rbStart < bwpSize && vrb_map[rbStart + BWPStart])
while (rbStart < bwpSize &&
(vrb_map[rbStart + BWPStart]&SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols)))
rbStart++;
rbSize = 1;
/* iterate until we are at target_dl_bw or no available RBs */
while (rbStart + rbSize < bwpSize && !vrb_map[rbStart + rbSize + BWPStart] && rbSize < target_dl_bw)
while (rbStart + rbSize < bwpSize &&
!(vrb_map[rbStart + rbSize + BWPStart]&SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols)) &&
rbSize < target_dl_bw)
rbSize++;
/* found target_dl_bw? */
if (rbSize == target_dl_bw)
......@@ -333,16 +344,17 @@ void nr_preprocessor_phytest(module_id_t module_id,
AssertFatal(nr_of_candidates>0,"nr_of_candidates is 0\n");
const int cid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t Y = UE_info->Y[UE_id][cid][slot];
const int m = UE_info->num_pdcch_cand[UE_id][cid];
sched_ctrl->cce_index = allocate_nr_CCEs(RC.nrmac[module_id],
sched_ctrl->active_bwp,
sched_ctrl->coreset,
sched_ctrl->aggregation_level,
Y,
m,
nr_of_candidates);
AssertFatal(sched_ctrl->cce_index >= 0,
const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]);
int CCEIndex = find_pdcch_candidate(RC.nrmac[module_id],
CC_id,
sched_ctrl->aggregation_level,
nr_of_candidates,
&sched_ctrl->sched_pdcch,
sched_ctrl->coreset,
Y);
AssertFatal(CCEIndex >= 0,
"%s(): could not find CCE for UE %d\n",
__func__,
UE_id);
......@@ -356,28 +368,26 @@ void nr_preprocessor_phytest(module_id_t module_id,
rnti,
frame,
slot);
UE_info->num_pdcch_cand[UE_id][cid]--;
int *cce_list = RC.nrmac[module_id]->cce_list[sched_ctrl->active_bwp->bwp_Id][cid];
for (int i = 0; i < sched_ctrl->aggregation_level; i++)
cce_list[sched_ctrl->cce_index + i] = 0;
RC.nrmac[module_id]->pdcch_cand[cid]--;
return;
}
sched_ctrl->cce_index = CCEIndex;
fill_pdcch_vrb_map(RC.nrmac[module_id],
CC_id,
&sched_ctrl->sched_pdcch,
CCEIndex,
sched_ctrl->aggregation_level);
//AssertFatal(alloc,
// "could not find uplink slot for PUCCH (RNTI %04x@%d.%d)!\n",
// rnti, frame, slot);
NR_sched_pdsch_t *sched_pdsch = &sched_ctrl->sched_pdsch;
NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static;
sched_pdsch->pucch_allocation = alloc;
sched_pdsch->rbStart = rbStart;
sched_pdsch->rbSize = rbSize;
const int tda = sched_ctrl->active_bwp ? RC.nrmac[module_id]->preferred_dl_tda[sched_ctrl->active_bwp->bwp_Id][slot] : 1;
const long f = sched_ctrl->active_bwp ? sched_ctrl->search_space->searchSpaceType->choice.ue_Specific->dci_Formats : 0;
ps->nrOfLayers = target_dl_Nl;
if (ps->time_domain_allocation != tda)
nr_set_pdsch_semi_static(
scc, UE_info->CellGroup[UE_id], sched_ctrl->active_bwp, NULL, tda, f, ps);
sched_pdsch->mcs = target_dl_mcs;
sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, ps->mcsTableIdx);
......@@ -397,7 +407,7 @@ void nr_preprocessor_phytest(module_id_t module_id,
/* mark the corresponding RBs as used */
for (int rb = 0; rb < sched_pdsch->rbSize; rb++)
vrb_map[rb + sched_pdsch->rbStart + BWPStart] = 1;
vrb_map[rb + sched_pdsch->rbStart + BWPStart] = SL_to_bitmap(ps->startSymbolIndex, ps->nrOfSymbols);
if ((frame&127) == 0) LOG_D(MAC,"phytest: %d.%d DL mcs %d, DL rbStart %d, DL rbSize %d\n", frame, slot, sched_pdsch->mcs, rbStart,rbSize);
}
......@@ -500,20 +510,21 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
AssertFatal(nr_of_candidates>0,"nr_of_candidates is 0\n");
const int cid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t Y = UE_info->Y[UE_id][cid][slot];
const int m = UE_info->num_pdcch_cand[UE_id][cid];
sched_ctrl->cce_index = allocate_nr_CCEs(RC.nrmac[module_id],
sched_ctrl->active_bwp,
sched_ctrl->coreset,
sched_ctrl->aggregation_level,
Y,
m,
nr_of_candidates);
const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]);
int CCEIndex = find_pdcch_candidate(nr_mac,
CC_id,
sched_ctrl->aggregation_level,
nr_of_candidates,
&sched_ctrl->sched_pdcch,
sched_ctrl->coreset,
Y);
if (sched_ctrl->cce_index < 0) {
LOG_E(MAC, "%s(): CCE list not empty, couldn't schedule PUSCH\n", __func__);
nr_mac->pdcch_cand[cid]--;
return false;
}
UE_info->num_pdcch_cand[UE_id][cid]++;
const int mcs = target_ul_mcs;
NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch;
......@@ -542,6 +553,12 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_
>> 3;
/* mark the corresponding RBs as used */
fill_pdcch_vrb_map(nr_mac,
CC_id,
&sched_ctrl->sched_pdcch,
CCEIndex,
sched_ctrl->aggregation_level);
for (int rb = rbStart; rb < rbStart + rbSize; rb++)
vrb_map_UL[rb+BWPStart] = 1;
return true;
......
......@@ -141,7 +141,6 @@ void nr_schedule_pucch(int Mod_idP,
for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
if (sched_ctrl->ul_failure==1 && get_softmodem_params()->phy_test==0) continue;
const int n = sizeof(sched_ctrl->sched_pucch) / sizeof(*sched_ctrl->sched_pucch);
for (int i = 0; i < n; i++) {
NR_sched_pucch_t *curr_pucch = &UE_info->UE_sched_ctrl[UE_id].sched_pucch[i];
......
......@@ -711,7 +711,8 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP,
UE_info->mac_stats[UE_id].ulsch_DTX++;
}
if (UE_info->UE_sched_ctrl[UE_id].pusch_consecutive_dtx_cnt >= pusch_failure_thres) {
LOG_D(NR_MAC,"Detected UL Failure on PUSCH, stopping scheduling\n");
LOG_W(NR_MAC,"Detected UL Failure on PUSCH after %d PUSCH DTX, stopping scheduling\n",
UE_info->UE_sched_ctrl[UE_id].pusch_consecutive_dtx_cnt);
UE_info->UE_sched_ctrl[UE_id].ul_failure = 1;
nr_mac_gNB_rrc_ul_failure(gnb_mod_idP,CC_idP,frameP,slotP,rntiP);
}
......@@ -1019,13 +1020,38 @@ bool allocate_ul_retransmission(module_id_t module_id,
sched_ctrl->pusch_semi_static = temp_ps;
}
/* Find free CCE */
bool freeCCE = find_free_CCE(module_id, slot, UE_id);
if (!freeCCE) {
/* Find a free CCE */
const int cid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]);
uint8_t nr_of_candidates;
for (int i=0; i<5; i++) {
// for now taking the lowest value among the available aggregation levels
find_aggregation_candidates(&sched_ctrl->aggregation_level,
&nr_of_candidates,
sched_ctrl->search_space,
1<<i);
if(nr_of_candidates>0) break;
}
int CCEIndex = find_pdcch_candidate(RC.nrmac[module_id],
CC_id,
sched_ctrl->aggregation_level,
nr_of_candidates,
&sched_ctrl->sched_pdcch,
sched_ctrl->coreset,
Y);
if (CCEIndex<0) {
LOG_D(NR_MAC, "%4d.%2d no free CCE for retransmission UL DCI UE %04x\n", frame, slot, UE_info->rnti[UE_id]);
return false;
}
sched_ctrl->cce_index = CCEIndex;
fill_pdcch_vrb_map(RC.nrmac[module_id],
CC_id,
&sched_ctrl->sched_pdcch,
CCEIndex,
sched_ctrl->aggregation_level);
/* frame/slot in sched_pusch has been set previously. In the following, we
* overwrite the information in the retransmission information before storing
* as the new scheduling instruction */
......@@ -1140,8 +1166,27 @@ void pf_ul(module_id_t module_id,
* based on data to transmit) */
if (B == 0 && do_sched) {
/* if no data, pre-allocate 5RB */
bool freeCCE = find_free_CCE(module_id, slot, UE_id);
if (!freeCCE) {
/* Find a free CCE */
const int cid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]);
uint8_t nr_of_candidates;
for (int i=0; i<5; i++) {
// for now taking the lowest value among the available aggregation levels
find_aggregation_candidates(&sched_ctrl->aggregation_level,
&nr_of_candidates,
sched_ctrl->search_space,
1<<i);
if(nr_of_candidates>0) break;
}
int CCEIndex = find_pdcch_candidate(RC.nrmac[module_id],
CC_id,
sched_ctrl->aggregation_level,
nr_of_candidates,
&sched_ctrl->sched_pdcch,
sched_ctrl->coreset,
Y);
if (CCEIndex<0) {
LOG_D(NR_MAC, "%4d.%2d no free CCE for UL DCI UE %04x (BSR 0)\n", frame, slot, UE_info->rnti[UE_id]);
continue;
}
......@@ -1158,6 +1203,13 @@ void pf_ul(module_id_t module_id,
return;
}
sched_ctrl->cce_index = CCEIndex;
fill_pdcch_vrb_map(RC.nrmac[module_id],
CC_id,
&sched_ctrl->sched_pdcch,
CCEIndex,
sched_ctrl->aggregation_level);
/* Save PUSCH field */
/* we want to avoid a lengthy deduction of DMRS and other parameters in
* every TTI if we can save it, so check whether dci_format, TDA, or
......@@ -1223,8 +1275,27 @@ void pf_ul(module_id_t module_id,
*max = UE_sched.next[*max];
*p = -1;
bool freeCCE = find_free_CCE(module_id, slot, UE_id);
if (!freeCCE) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
const int cid = sched_ctrl->coreset->controlResourceSetId;
const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]);
uint8_t nr_of_candidates;
for (int i=0; i<5; i++) {
// for now taking the lowest value among the available aggregation levels
find_aggregation_candidates(&sched_ctrl->aggregation_level,
&nr_of_candidates,
sched_ctrl->search_space,
1<<i);
if(nr_of_candidates>0) break;
}
int CCEIndex = find_pdcch_candidate(RC.nrmac[module_id],
CC_id,
sched_ctrl->aggregation_level,
nr_of_candidates,
&sched_ctrl->sched_pdcch,
sched_ctrl->coreset,
Y);
if (CCEIndex<0) {
LOG_D(NR_MAC, "%4d.%2d no free CCE for UL DCI UE %04x\n", frame, slot, UE_info->rnti[UE_id]);
continue;
}
......@@ -1235,7 +1306,6 @@ void pf_ul(module_id_t module_id,
if (max_num_ue < 0)
return;
NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id];
NR_BWP_UplinkDedicated_t *ubwpd= cg ? cg->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP:NULL;
NR_BWP_t *genericParameters = sched_ctrl->active_ubwp ? &sched_ctrl->active_ubwp->bwp_Common->genericParameters : &scc->uplinkConfigCommon->initialUplinkBWP->genericParameters;
......@@ -1288,6 +1358,14 @@ void pf_ul(module_id_t module_id,
rbSize, sched_pusch->tb_size, sched_ctrl->estimated_ul_buffer, sched_ctrl->sched_ul_bytes, B,sched_ctrl->cce_index,ps->num_dmrs_symb,ps->N_PRB_DMRS);
/* Mark the corresponding RBs as used */
sched_ctrl->cce_index = CCEIndex;
fill_pdcch_vrb_map(RC.nrmac[module_id],
CC_id,
&sched_ctrl->sched_pdcch,
CCEIndex,
sched_ctrl->aggregation_level);
n_rb_sched -= sched_pusch->rbSize;
for (int rb = 0; rb < sched_ctrl->sched_pusch.rbSize; rb++)
rballoc_mask[rb + sched_ctrl->sched_pusch.rbStart] = 0;
......@@ -1447,7 +1525,8 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
/* a PDCCH PDU groups DCIs per BWP and CORESET. Save a pointer to each
* allocated PDCCH so we can easily allocate UE's DCIs independent of any
* CORESET order */
nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu_bwp_coreset[MAX_NUM_BWP][MAX_NUM_CORESET] = {{0}};
nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu_coreset[MAX_NUM_CORESET] = {0};
NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[0].ServingCellConfigCommon;
NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info;
......@@ -1686,7 +1765,7 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
NR_SearchSpace_t *ss = (sched_ctrl->active_bwp || ubwpd) ? sched_ctrl->search_space: RC.nrmac[module_id]->sched_ctrlCommon->search_space;
NR_ControlResourceSet_t *coreset = (sched_ctrl->active_bwp || ubwpd) ? sched_ctrl->coreset: RC.nrmac[module_id]->sched_ctrlCommon->coreset;
const int coresetid = coreset->controlResourceSetId;
nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu = pdcch_pdu_bwp_coreset[bwpid][coresetid];
nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu = pdcch_pdu_coreset[coresetid];
if (!pdcch_pdu) {
nfapi_nr_ul_dci_request_pdus_t *ul_dci_request_pdu = &ul_dci_req->ul_dci_pdu_list[ul_dci_req->numPdus];
memset(ul_dci_request_pdu, 0, sizeof(nfapi_nr_ul_dci_request_pdus_t));
......@@ -1694,8 +1773,8 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot)
ul_dci_request_pdu->PDUSize = (uint8_t)(2+sizeof(nfapi_nr_dl_tti_pdcch_pdu));
pdcch_pdu = &ul_dci_request_pdu->pdcch_pdu.pdcch_pdu_rel15;
ul_dci_req->numPdus += 1;
nr_configure_pdcch(nr_mac, pdcch_pdu, ss, coreset, scc, genericParameters, NULL);
pdcch_pdu_bwp_coreset[bwpid][coresetid] = pdcch_pdu;
nr_configure_pdcch(pdcch_pdu, coreset, genericParameters, &sched_ctrl->sched_pdcch);
pdcch_pdu_coreset[coresetid] = pdcch_pdu;
}
LOG_D(NR_MAC,"Configuring ULDCI/PDCCH in %d.%d at CCE %d, rnti %x\n", frame,slot,sched_ctrl->cce_index,rnti);
......
......@@ -248,13 +248,31 @@ void find_search_space(int ss_type,
NR_BWP_Downlink_t *bwp,
NR_SearchSpace_t *ss);
void nr_configure_pdcch(gNB_MAC_INST *gNB_mac,
nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu,
NR_SearchSpace_t *ss,
void nr_configure_pdcch(nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu,
NR_ControlResourceSet_t *coreset,
NR_ServingCellConfigCommon_t *scc,
NR_BWP_t *bwp,
NR_Type0_PDCCH_CSS_config_t *type0_PDCCH_CSS_config);
NR_sched_pdcch_t *pdcch);
NR_sched_pdcch_t set_pdcch_structure(gNB_MAC_INST *gNB_mac,
NR_SearchSpace_t *ss,
NR_ControlResourceSet_t *coreset,
NR_ServingCellConfigCommon_t *scc,
NR_BWP_t *bwp,
NR_Type0_PDCCH_CSS_config_t *type0_PDCCH_CSS_config);
int find_pdcch_candidate(gNB_MAC_INST *mac,
int cc_id,
int aggregation,
int nr_of_candidates,
NR_sched_pdcch_t *pdcch,
NR_ControlResourceSet_t *coreset,
uint16_t Y);
void fill_pdcch_vrb_map(gNB_MAC_INST *mac,
int CC_id,
NR_sched_pdcch_t *pdcch,
int first_cce,
int aggregation);
void fill_dci_pdu_rel15(const NR_ServingCellConfigCommon_t *scc,
const NR_CellGroupConfig_t *CellGroup,
......@@ -300,6 +318,8 @@ void nr_set_pusch_semi_static(const NR_ServingCellConfigCommon_t *scc,
uint8_t num_dmrs_cdm_grps_no_data,
NR_pusch_semi_static_t *ps);
uint16_t get_Y(int cid, int slot, rnti_t rnti);
uint8_t nr_get_tpc(int target, uint8_t cqi, int incr);
int get_spf(nfapi_nr_config_request_scf_t *cfg);
......
......@@ -73,7 +73,7 @@
/* Defs */
#define MAX_NUM_BWP 2
#define MAX_NUM_CORESET 2
#define MAX_NUM_CORESET 12
#define MAX_NUM_CCE 90
#define MAX_HARQ_ROUNDS 4
/*!\brief Maximum number of random access process */
......@@ -103,6 +103,20 @@ typedef struct NR_preamble_ue {
uint8_t *preamble_list;
} NR_preamble_ue_t;
typedef struct NR_sched_pdcch {
uint16_t BWPSize;
uint16_t BWPStart;
uint8_t CyclicPrefix;
uint8_t SubcarrierSpacing;
uint8_t StartSymbolIndex;
uint8_t CceRegMappingType;
uint8_t RegBundleSize;
uint8_t InterleaverSize;
uint16_t ShiftIndex;
uint8_t DurationSymbols;
int n_rb;
} NR_sched_pdcch_t;
/*! \brief gNB template for the Random access information */
typedef struct {
/// Flag to indicate this process is active
......@@ -165,6 +179,9 @@ typedef struct {
int mac_pdu_length;
/// RA search space
NR_SearchSpace_t *ra_ss;
/// RA Coreset
NR_ControlResourceSet_t *coreset;
NR_sched_pdcch_t sched_pdcch;
// Beam index
uint8_t beam_id;
/// CellGroup for UE that is to come (NSA is non-null, null for SA)
......@@ -300,7 +317,6 @@ typedef struct UE_info {
pdschTciStatesActDeact_t pdsch_TCI_States_ActDeact;
} NR_UE_mac_ce_ctrl_t;
typedef struct NR_sched_pucch {
int frame;
int ul_slot;
......@@ -535,6 +551,7 @@ typedef struct {
/// CCE index and aggregation, should be coherent with cce_list
NR_SearchSpace_t *search_space;
NR_ControlResourceSet_t *coreset;
NR_sched_pdcch_t sched_pdcch;
/// CCE index and Aggr. Level are shared for PUSCH/PDSCH allocation decisions
/// corresponding to the sched_pusch/sched_pdsch structures below
......@@ -652,12 +669,11 @@ typedef struct {
rnti_t rnti[MAX_MOBILES_PER_GNB];
NR_CellGroupConfig_t *CellGroup[MAX_MOBILES_PER_GNB];
/// CCE indexing
int Y[MAX_MOBILES_PER_GNB][3][160];
int m[MAX_MOBILES_PER_GNB];
int num_pdcch_cand[MAX_MOBILES_PER_GNB][MAX_NUM_CORESET];
// UE selected beam index
uint8_t UE_beam_index[MAX_MOBILES_PER_GNB];
bool Msg4_ACKed[MAX_MOBILES_PER_GNB];
} NR_UE_info_t;
typedef void (*nr_pp_impl_dl)(module_id_t mod_id,
......@@ -705,7 +721,7 @@ typedef struct gNB_MAC_INST_s {
/// a PDCCH PDU groups DCIs per BWP and CORESET. The following structure
/// keeps pointers to PDCCH PDUs within DL_req so that we can easily track
/// PDCCH PDUs per CC/BWP/CORESET
nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu_idx[NFAPI_CC_MAX][MAX_NUM_BWP][MAX_NUM_CORESET];
nfapi_nr_dl_tti_pdcch_pdu_rel15_t *pdcch_pdu_idx[NFAPI_CC_MAX][MAX_NUM_CORESET];
/// NFAPI UL TTI Request Structure, simple pointer into structure
/// UL_tti_req_ahead for current frame/slot
nfapi_nr_ul_tti_request_t *UL_tti_req[NFAPI_CC_MAX];
......@@ -716,7 +732,7 @@ typedef struct gNB_MAC_INST_s {
nfapi_nr_ul_dci_request_t UL_dci_req[NFAPI_CC_MAX];
/// NFAPI DL PDU structure
nfapi_nr_tx_data_request_t TX_req[NFAPI_CC_MAX];
int pdcch_cand[MAX_NUM_CORESET];
NR_UE_info_t UE_info;
/// UL handle
......@@ -748,8 +764,6 @@ typedef struct gNB_MAC_INST_s {
time_stats_t rx_ulsch_sdu; // include rlc_data_ind
/// processing time of eNB PCH scheduler
time_stats_t schedule_pch;
/// CCE lists
int cce_list[MAX_NUM_BWP][MAX_NUM_CORESET][MAX_NUM_CCE];
/// list of allocated beams per period
int16_t *tdd_beam_association;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment