Commit aad8facd authored by hardy's avatar hardy

Merge remote-tracking branch 'origin/NR_multiplexing_HARQ_CSI_PUCCH' into integration_2021_wk20_a

parents 751ad576 697bfa3c
...@@ -1621,7 +1621,7 @@ void nr_decode_pucch2(PHY_VARS_gNB *gNB, ...@@ -1621,7 +1621,7 @@ void nr_decode_pucch2(PHY_VARS_gNB *gNB,
if (pucch_pdu->bit_len_harq>0) { if (pucch_pdu->bit_len_harq>0) {
int harq_bytes=pucch_pdu->bit_len_harq>>3; int harq_bytes=pucch_pdu->bit_len_harq>>3;
if ((pucch_pdu->bit_len_harq&7) > 0) harq_bytes++; if ((pucch_pdu->bit_len_harq&7) > 0) harq_bytes++;
uci_pdu->pduBitmap|=1; uci_pdu->pduBitmap|=2;
uci_pdu->harq.harq_payload = (uint8_t*)malloc(harq_bytes); uci_pdu->harq.harq_payload = (uint8_t*)malloc(harq_bytes);
uci_pdu->harq.harq_crc = decoderState; uci_pdu->harq.harq_crc = decoderState;
int i=0; int i=0;
...@@ -1635,7 +1635,7 @@ void nr_decode_pucch2(PHY_VARS_gNB *gNB, ...@@ -1635,7 +1635,7 @@ void nr_decode_pucch2(PHY_VARS_gNB *gNB,
} }
if (pucch_pdu->sr_flag == 1) { if (pucch_pdu->sr_flag == 1) {
uci_pdu->pduBitmap|=2; uci_pdu->pduBitmap|=1;
uci_pdu->sr.sr_bit_len = 1; uci_pdu->sr.sr_bit_len = 1;
uci_pdu->sr.sr_payload = malloc(1); uci_pdu->sr.sr_payload = malloc(1);
uci_pdu->sr.sr_payload[0] = decodedPayload[0]&1; uci_pdu->sr.sr_payload[0] = decodedPayload[0]&1;
......
...@@ -1172,9 +1172,11 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra ...@@ -1172,9 +1172,11 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra
harq->is_waiting = true; harq->is_waiting = true;
ra->harq_pid = current_harq_pid; ra->harq_pid = current_harq_pid;
nr_acknack_scheduling(module_idP, UE_id, frameP, slotP); int alloc = nr_acknack_scheduling(module_idP, UE_id, frameP, slotP);
harq->feedback_slot = sched_ctrl->sched_pucch->ul_slot; AssertFatal(alloc>=0,"Couldn't find a pucch allocation for ack nack (msg4)\n");
harq->feedback_frame = sched_ctrl->sched_pucch->frame; NR_sched_pucch_t *pucch = &sched_ctrl->sched_pucch[alloc];
harq->feedback_slot = pucch->ul_slot;
harq->feedback_frame = pucch->frame;
// Bytes to be transmitted // Bytes to be transmitted
uint8_t *buf = (uint8_t *) harq->tb; uint8_t *buf = (uint8_t *) harq->tb;
...@@ -1376,10 +1378,10 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra ...@@ -1376,10 +1378,10 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra
dci_payload.rv = pdsch_pdu_rel15->rvIndex[0]; dci_payload.rv = pdsch_pdu_rel15->rvIndex[0];
dci_payload.harq_pid = current_harq_pid; dci_payload.harq_pid = current_harq_pid;
dci_payload.ndi = harq->ndi; dci_payload.ndi = harq->ndi;
dci_payload.dai[0].val = (sched_ctrl->sched_pucch->dai_c-1)&3; dci_payload.dai[0].val = (pucch->dai_c-1)&3;
dci_payload.tpc = sched_ctrl->tpc1; // TPC for PUCCH: table 7.2.1-1 in 38.213 dci_payload.tpc = sched_ctrl->tpc1; // TPC for PUCCH: table 7.2.1-1 in 38.213
dci_payload.pucch_resource_indicator = sched_ctrl->sched_pucch->resource_indicator; dci_payload.pucch_resource_indicator = pucch->resource_indicator;
dci_payload.pdsch_to_harq_feedback_timing_indicator.val = sched_ctrl->sched_pucch->timing_indicator; dci_payload.pdsch_to_harq_feedback_timing_indicator.val = pucch->timing_indicator;
LOG_D(NR_MAC, LOG_D(NR_MAC,
"[RAPROC] DCI type 1 payload: freq_alloc %d (%d,%d,%d), time_alloc %d, vrb to prb %d, mcs %d tb_scaling %d \n", "[RAPROC] DCI type 1 payload: freq_alloc %d (%d,%d,%d), time_alloc %d, vrb to prb %d, mcs %d tb_scaling %d \n",
......
...@@ -492,8 +492,8 @@ bool allocate_dl_retransmission(module_id_t module_id, ...@@ -492,8 +492,8 @@ bool allocate_dl_retransmission(module_id_t module_id,
/* Find PUCCH occasion: if it fails, undo CCE allocation (undoing PUCCH /* Find PUCCH occasion: if it fails, undo CCE allocation (undoing PUCCH
* allocation after CCE alloc fail would be more complex) */ * allocation after CCE alloc fail would be more complex) */
const bool alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot); const int alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot);
if (!alloc) { if (alloc<0) {
LOG_D(MAC, LOG_D(MAC,
"%s(): could not find PUCCH for UE %d/%04x@%d.%d\n", "%s(): could not find PUCCH for UE %d/%04x@%d.%d\n",
__func__, __func__,
...@@ -509,6 +509,8 @@ bool allocate_dl_retransmission(module_id_t module_id, ...@@ -509,6 +509,8 @@ bool allocate_dl_retransmission(module_id_t module_id,
return false; return false;
} }
sched_ctrl->sched_pdsch.pucch_allocation = alloc;
/* just reuse from previous scheduling opportunity, set new start RB */ /* just reuse from previous scheduling opportunity, set new start RB */
sched_ctrl->sched_pdsch = *retInfo; sched_ctrl->sched_pdsch = *retInfo;
sched_ctrl->sched_pdsch.rbStart = rbStart; sched_ctrl->sched_pdsch.rbStart = rbStart;
...@@ -615,8 +617,8 @@ void pf_dl(module_id_t module_id, ...@@ -615,8 +617,8 @@ void pf_dl(module_id_t module_id,
/* Find PUCCH occasion: if it fails, undo CCE allocation (undoing PUCCH /* Find PUCCH occasion: if it fails, undo CCE allocation (undoing PUCCH
* allocation after CCE alloc fail would be more complex) */ * allocation after CCE alloc fail would be more complex) */
const bool alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot); const int alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot);
if (!alloc) { if (alloc<0) {
LOG_D(MAC, LOG_D(MAC,
"%s(): could not find PUCCH for UE %d/%04x@%d.%d\n", "%s(): could not find PUCCH for UE %d/%04x@%d.%d\n",
__func__, __func__,
...@@ -648,7 +650,7 @@ void pf_dl(module_id_t module_id, ...@@ -648,7 +650,7 @@ void pf_dl(module_id_t module_id,
scc, UE_info->secondaryCellGroup[UE_id], sched_ctrl->active_bwp, tda, num_dmrs_cdm_grps_no_data, ps); scc, UE_info->secondaryCellGroup[UE_id], sched_ctrl->active_bwp, tda, num_dmrs_cdm_grps_no_data, ps);
sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, ps->mcsTableIdx); sched_pdsch->Qm = nr_get_Qm_dl(sched_pdsch->mcs, ps->mcsTableIdx);
sched_pdsch->R = nr_get_code_rate_dl(sched_pdsch->mcs, ps->mcsTableIdx); sched_pdsch->R = nr_get_code_rate_dl(sched_pdsch->mcs, ps->mcsTableIdx);
sched_pdsch->pucch_allocation = alloc;
uint32_t TBS = 0; uint32_t TBS = 0;
uint16_t rbSize; uint16_t rbSize;
const int oh = 2 + (sched_ctrl->num_total_bytes >= 256) const int oh = 2 + (sched_ctrl->num_total_bytes >= 256)
...@@ -811,7 +813,7 @@ void nr_schedule_ue_spec(module_id_t module_id, ...@@ -811,7 +813,7 @@ void nr_schedule_ue_spec(module_id_t module_id,
NR_UE_harq_t *harq = &sched_ctrl->harq_processes[current_harq_pid]; NR_UE_harq_t *harq = &sched_ctrl->harq_processes[current_harq_pid];
DevAssert(!harq->is_waiting); DevAssert(!harq->is_waiting);
add_tail_nr_list(&sched_ctrl->feedback_dl_harq, current_harq_pid); add_tail_nr_list(&sched_ctrl->feedback_dl_harq, current_harq_pid);
NR_sched_pucch_t *pucch = &sched_ctrl->sched_pucch[0]; NR_sched_pucch_t *pucch = &sched_ctrl->sched_pucch[sched_pdsch->pucch_allocation];
harq->feedback_frame = pucch->frame; harq->feedback_frame = pucch->frame;
harq->feedback_slot = pucch->ul_slot; harq->feedback_slot = pucch->ul_slot;
harq->is_waiting = true; harq->is_waiting = true;
......
...@@ -339,8 +339,8 @@ void nr_preprocessor_phytest(module_id_t module_id, ...@@ -339,8 +339,8 @@ void nr_preprocessor_phytest(module_id_t module_id,
__func__, __func__,
UE_id); UE_id);
const bool alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot); const int alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot);
if (!alloc) { if (alloc < 0) {
LOG_D(MAC, LOG_D(MAC,
"%s(): could not find PUCCH for UE %d/%04x@%d.%d\n", "%s(): could not find PUCCH for UE %d/%04x@%d.%d\n",
__func__, __func__,
...@@ -355,12 +355,13 @@ void nr_preprocessor_phytest(module_id_t module_id, ...@@ -355,12 +355,13 @@ void nr_preprocessor_phytest(module_id_t module_id,
return; return;
} }
AssertFatal(alloc, //AssertFatal(alloc,
"could not find uplink slot for PUCCH (RNTI %04x@%d.%d)!\n", // "could not find uplink slot for PUCCH (RNTI %04x@%d.%d)!\n",
rnti, frame, slot); // rnti, frame, slot);
NR_sched_pdsch_t *sched_pdsch = &sched_ctrl->sched_pdsch; NR_sched_pdsch_t *sched_pdsch = &sched_ctrl->sched_pdsch;
NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static; NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static;
sched_pdsch->pucch_allocation = alloc;
sched_pdsch->rbStart = rbStart; sched_pdsch->rbStart = rbStart;
sched_pdsch->rbSize = rbSize; sched_pdsch->rbSize = rbSize;
const int tda = RC.nrmac[module_id]->preferred_dl_tda[sched_ctrl->active_bwp->bwp_Id][slot]; const int tda = RC.nrmac[module_id]->preferred_dl_tda[sched_ctrl->active_bwp->bwp_Id][slot];
......
...@@ -553,9 +553,6 @@ void nr_csi_meas_reporting(int Mod_idP, ...@@ -553,9 +553,6 @@ void nr_csi_meas_reporting(int Mod_idP,
for (int i = start; i < start + len; ++i) { for (int i = start; i < start + len; ++i) {
vrb_map_UL[i] |= mask; vrb_map_UL[i] |= mask;
} }
AssertFatal(!curr_pucch->simultaneous_harqcsi,
"UE %04x has simultaneous HARQ/CSI configured, but we don't support that\n",
UE_info->rnti[UE_id]);
} }
} }
} }
...@@ -1088,10 +1085,14 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id, ...@@ -1088,10 +1085,14 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id,
// function to update pucch scheduling parameters in UE list when a USS DL is scheduled // function to update pucch scheduling parameters in UE list when a USS DL is scheduled
bool nr_acknack_scheduling(int mod_id, // this function returns an index to NR_sched_pucch structure
int UE_id, // currently this structure contains PUCCH0 at index 0 and PUCCH2 at index 1
frame_t frame, // if the function returns -1 it was not possible to schedule acknack
sub_frame_t slot) // when current pucch is ready to be scheduled nr_fill_nfapi_pucch is called
int nr_acknack_scheduling(int mod_id,
int UE_id,
frame_t frame,
sub_frame_t slot)
{ {
const NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon; const NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon;
const int n_slots_frame = nr_slots_per_frame[*scc->ssbSubcarrierSpacing]; const int n_slots_frame = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
...@@ -1101,6 +1102,7 @@ bool nr_acknack_scheduling(int mod_id, ...@@ -1101,6 +1102,7 @@ bool nr_acknack_scheduling(int mod_id,
const int nr_slots_period = tdd->nrofDownlinkSlots + tdd->nrofUplinkSlots + nr_mix_slots; const int nr_slots_period = tdd->nrofDownlinkSlots + tdd->nrofUplinkSlots + nr_mix_slots;
const int first_ul_slot_tdd = tdd->nrofDownlinkSlots + nr_slots_period * (slot / nr_slots_period); const int first_ul_slot_tdd = tdd->nrofDownlinkSlots + nr_slots_period * (slot / nr_slots_period);
const int CC_id = 0; const int CC_id = 0;
NR_sched_pucch_t *csi_pucch;
AssertFatal(slot < first_ul_slot_tdd + (tdd->nrofUplinkSymbols != 0), AssertFatal(slot < first_ul_slot_tdd + (tdd->nrofUplinkSymbols != 0),
"cannot handle multiple TDD periods (yet): slot %d first_ul_slot_tdd %d nrofUplinkSlots %ld\n", "cannot handle multiple TDD periods (yet): slot %d first_ul_slot_tdd %d nrofUplinkSlots %ld\n",
...@@ -1120,7 +1122,6 @@ bool nr_acknack_scheduling(int mod_id, ...@@ -1120,7 +1122,6 @@ bool nr_acknack_scheduling(int mod_id,
"%s(): csi_bits %d in sched_pucch[0]\n", "%s(): csi_bits %d in sched_pucch[0]\n",
__func__, __func__,
pucch->csi_bits); pucch->csi_bits);
/* if the currently allocated PUCCH of this UE is full, allocate it */ /* if the currently allocated PUCCH of this UE is full, allocate it */
if (pucch->dai_c == 2) { if (pucch->dai_c == 2) {
/* advance the UL slot information in PUCCH by one so we won't schedule in /* advance the UL slot information in PUCCH by one so we won't schedule in
...@@ -1132,17 +1133,13 @@ bool nr_acknack_scheduling(int mod_id, ...@@ -1132,17 +1133,13 @@ bool nr_acknack_scheduling(int mod_id,
pucch->frame = s == n_slots_frame - 1 ? (f + 1) % 1024 : f; pucch->frame = s == n_slots_frame - 1 ? (f + 1) % 1024 : f;
pucch->ul_slot = (s + 1) % n_slots_frame; pucch->ul_slot = (s + 1) % n_slots_frame;
// we assume that only two indices over the array sched_pucch exist // we assume that only two indices over the array sched_pucch exist
NR_sched_pucch_t *csi_pucch = &sched_ctrl->sched_pucch[1]; csi_pucch = &sched_ctrl->sched_pucch[1];
// skip the CSI PUCCH if it is present and if in the next frame/slot // skip the CSI PUCCH if it is present and if in the next frame/slot
// and if we don't multiplex
if (csi_pucch->csi_bits > 0 if (csi_pucch->csi_bits > 0
&& csi_pucch->frame == pucch->frame && csi_pucch->frame == pucch->frame
&& csi_pucch->ul_slot == pucch->ul_slot) { && csi_pucch->ul_slot == pucch->ul_slot
AssertFatal(!csi_pucch->simultaneous_harqcsi, && !csi_pucch->simultaneous_harqcsi) {
"%s(): %d.%d cannot handle simultaneous_harqcsi, but found for UE %d\n",
__func__,
pucch->frame,
pucch->ul_slot,
UE_id);
nr_fill_nfapi_pucch(mod_id, frame, slot, csi_pucch, UE_id); nr_fill_nfapi_pucch(mod_id, frame, slot, csi_pucch, UE_id);
memset(csi_pucch, 0, sizeof(*csi_pucch)); memset(csi_pucch, 0, sizeof(*csi_pucch));
pucch->frame = s >= n_slots_frame - 2 ? (f + 1) % 1024 : f; pucch->frame = s >= n_slots_frame - 2 ? (f + 1) % 1024 : f;
...@@ -1156,7 +1153,7 @@ bool nr_acknack_scheduling(int mod_id, ...@@ -1156,7 +1153,7 @@ bool nr_acknack_scheduling(int mod_id,
if ((pucch->frame == frame if ((pucch->frame == frame
&& (pucch->ul_slot >= first_ul_slot_tdd + nr_ulmix_slots)) && (pucch->ul_slot >= first_ul_slot_tdd + nr_ulmix_slots))
|| (pucch->frame == frame + 1)) || (pucch->frame == frame + 1))
return false; return -1;
// this is hardcoded for now as ue specific // this is hardcoded for now as ue specific
NR_SearchSpace__searchSpaceType_PR ss_type = NR_SearchSpace__searchSpaceType_PR_ue_Specific; NR_SearchSpace__searchSpaceType_PR ss_type = NR_SearchSpace__searchSpaceType_PR_ue_Specific;
...@@ -1190,7 +1187,7 @@ bool nr_acknack_scheduling(int mod_id, ...@@ -1190,7 +1187,7 @@ bool nr_acknack_scheduling(int mod_id,
pucch->timing_indicator = i; pucch->timing_indicator = i;
pucch->dai_c++; pucch->dai_c++;
// retain old resource indicator, and we are good // retain old resource indicator, and we are good
return true; return 0;
} }
/* we need to find a new PUCCH occasion */ /* we need to find a new PUCCH occasion */
...@@ -1213,30 +1210,6 @@ bool nr_acknack_scheduling(int mod_id, ...@@ -1213,30 +1210,6 @@ bool nr_acknack_scheduling(int mod_id,
// advance ul_slot if it is not reachable by UE // advance ul_slot if it is not reachable by UE
pucch->ul_slot = max(pucch->ul_slot, slot + pdsch_to_harq_feedback[0]); pucch->ul_slot = max(pucch->ul_slot, slot + pdsch_to_harq_feedback[0]);
// is there already CSI in this slot?
NR_sched_pucch_t *csi_pucch = &sched_ctrl->sched_pucch[1];
// skip the CSI PUCCH if it is present and if in the next frame/slot
if (csi_pucch->csi_bits > 0
&& csi_pucch->frame == pucch->frame
&& csi_pucch->ul_slot == pucch->ul_slot) {
AssertFatal(!csi_pucch->simultaneous_harqcsi,
"%s(): %d.%d cannot handle simultaneous_harqcsi, but found for UE %d\n",
__func__,
pucch->frame,
pucch->ul_slot,
UE_id);
nr_fill_nfapi_pucch(mod_id, frame, slot, csi_pucch, UE_id);
memset(csi_pucch, 0, sizeof(*csi_pucch));
/* advance the UL slot information in PUCCH by one so we won't schedule in
* the same slot again */
const int f = pucch->frame;
const int s = pucch->ul_slot;
memset(pucch, 0, sizeof(*pucch));
pucch->frame = s == n_slots_frame - 1 ? (f + 1) % 1024 : f;
pucch->ul_slot = (s + 1) % n_slots_frame;
return nr_acknack_scheduling(mod_id, UE_id, frame, slot);
}
// Find the right timing_indicator value. // Find the right timing_indicator value.
int i = 0; int i = 0;
while (i < 8) { while (i < 8) {
...@@ -1252,8 +1225,38 @@ bool nr_acknack_scheduling(int mod_id, ...@@ -1252,8 +1225,38 @@ bool nr_acknack_scheduling(int mod_id,
slot, slot,
UE_id, UE_id,
pucch->ul_slot); pucch->ul_slot);
return false; return -1;
}
// is there already CSI in this slot?
csi_pucch = &sched_ctrl->sched_pucch[1];
if (csi_pucch->csi_bits > 0
&& csi_pucch->frame == pucch->frame
&& csi_pucch->ul_slot == pucch->ul_slot) {
// skip the CSI PUCCH if it is present and if in the next frame/slot
// and if we don't multiplex
// FIXME currently we support at most 11 bits in pucch2 so skip also in that case
if(!csi_pucch->simultaneous_harqcsi
|| ((csi_pucch->csi_bits + csi_pucch->dai_c) >= 11)) {
nr_fill_nfapi_pucch(mod_id, frame, slot, csi_pucch, UE_id);
memset(csi_pucch, 0, sizeof(*csi_pucch));
/* advance the UL slot information in PUCCH by one so we won't schedule in
* the same slot again */
const int f = pucch->frame;
const int s = pucch->ul_slot;
memset(pucch, 0, sizeof(*pucch));
pucch->frame = s == n_slots_frame - 1 ? (f + 1) % 1024 : f;
pucch->ul_slot = (s + 1) % n_slots_frame;
return nr_acknack_scheduling(mod_id, UE_id, frame, slot);
}
// multiplexing harq and csi in a pucch
else {
csi_pucch->timing_indicator = i;
csi_pucch->dai_c++;
return 1;
}
} }
pucch->timing_indicator = i; // index in the list of timing indicators pucch->timing_indicator = i; // index in the list of timing indicators
pucch->dai_c++; pucch->dai_c++;
...@@ -1272,7 +1275,7 @@ bool nr_acknack_scheduling(int mod_id, ...@@ -1272,7 +1275,7 @@ bool nr_acknack_scheduling(int mod_id,
LOG_W(MAC, "symbol 0x%x is not free for PUCCH alloc in vrb_map_UL at RB %ld and slot %d.%d\n", symb, resource->startingPRB, pucch->frame, pucch->ul_slot); LOG_W(MAC, "symbol 0x%x is not free for PUCCH alloc in vrb_map_UL at RB %ld and slot %d.%d\n", symb, resource->startingPRB, pucch->frame, pucch->ul_slot);
vrb_map_UL[resource->startingPRB] |= symb; vrb_map_UL[resource->startingPRB] |= symb;
} }
return true; return 0;
} }
......
...@@ -186,10 +186,10 @@ void nr_csi_meas_reporting(int Mod_idP, ...@@ -186,10 +186,10 @@ void nr_csi_meas_reporting(int Mod_idP,
frame_t frameP, frame_t frameP,
sub_frame_t slotP); sub_frame_t slotP);
bool nr_acknack_scheduling(int Mod_idP, int nr_acknack_scheduling(int Mod_idP,
int UE_id, int UE_id,
frame_t frameP, frame_t frameP,
sub_frame_t slotP); sub_frame_t slotP);
void get_pdsch_to_harq_feedback(int Mod_idP, void get_pdsch_to_harq_feedback(int Mod_idP,
int UE_id, int UE_id,
......
...@@ -389,6 +389,9 @@ typedef struct NR_sched_pdsch { ...@@ -389,6 +389,9 @@ typedef struct NR_sched_pdsch {
/// DL HARQ PID to use for this UE, or -1 for "any new" /// DL HARQ PID to use for this UE, or -1 for "any new"
int8_t dl_harq_pid; int8_t dl_harq_pid;
// pucch format allocation
uint8_t pucch_allocation;
/// the Time Domain Allocation used for this transmission. Note that this is /// the Time Domain Allocation used for this transmission. Note that this is
/// only important for retransmissions; otherwise, the TDA in /// only important for retransmissions; otherwise, the TDA in
/// NR_pdsch_semi_static_t has precedence /// NR_pdsch_semi_static_t has precedence
......
...@@ -1009,10 +1009,11 @@ void fill_default_secondaryCellGroup(NR_ServingCellConfigCommon_t *servingcellco ...@@ -1009,10 +1009,11 @@ void fill_default_secondaryCellGroup(NR_ServingCellConfigCommon_t *servingcellco
pucchfmt2->interslotFrequencyHopping=NULL; pucchfmt2->interslotFrequencyHopping=NULL;
pucchfmt2->additionalDMRS=NULL; pucchfmt2->additionalDMRS=NULL;
pucchfmt2->maxCodeRate=calloc(1,sizeof(*pucchfmt2->maxCodeRate)); pucchfmt2->maxCodeRate=calloc(1,sizeof(*pucchfmt2->maxCodeRate));
*pucchfmt2->maxCodeRate=NR_PUCCH_MaxCodeRate_zeroDot25; *pucchfmt2->maxCodeRate=NR_PUCCH_MaxCodeRate_zeroDot35;
pucchfmt2->nrofSlots=NULL; pucchfmt2->nrofSlots=NULL;
pucchfmt2->pi2BPSK=NULL; pucchfmt2->pi2BPSK=NULL;
pucchfmt2->simultaneousHARQ_ACK_CSI=NULL; pucchfmt2->simultaneousHARQ_ACK_CSI=calloc(1,sizeof(*pucchfmt2->simultaneousHARQ_ACK_CSI));
*pucchfmt2->simultaneousHARQ_ACK_CSI=NR_PUCCH_FormatConfig__simultaneousHARQ_ACK_CSI_true;
// for scheduling requestresource // for scheduling requestresource
pucch_Config->schedulingRequestResourceToAddModList = calloc(1,sizeof(*pucch_Config->schedulingRequestResourceToAddModList)); pucch_Config->schedulingRequestResourceToAddModList = calloc(1,sizeof(*pucch_Config->schedulingRequestResourceToAddModList));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment