Commit 393363e2 authored by Sakthivel Velumani's avatar Sakthivel Velumani Committed by francescomani

Refactor PUSCH RE mapping

RE mapping time is reduced by 10x. Functions are made generic to be
later reused for PDSCH RE mapping at gNB.
parent add341e2
......@@ -85,6 +85,13 @@ void set_ptrs_symb_idx(uint16_t *ptrs_symbols,
}
}
unsigned int get_first_ptrs_re(const rnti_t rnti, const uint8_t K_ptrs, const uint16_t nRB, const uint8_t k_RE_ref)
{
const uint16_t nRB_Kptrs = nRB % K_ptrs;
const uint16_t k_RB_ref = nRB_Kptrs ? (rnti % nRB_Kptrs) : (rnti % K_ptrs);
return (k_RE_ref + k_RB_ref * NR_NB_SC_PER_RB);
}
/*******************************************************************
*
* NAME : is_ptrs_subcarrier
......
......@@ -53,6 +53,8 @@ void set_ptrs_symb_idx(uint16_t *ptrs_symbols,
uint8_t L_ptrs,
uint16_t dmrs_symb_pos);
unsigned int get_first_ptrs_re(const rnti_t rnti, const uint8_t K_ptrs, const uint16_t nRB, const uint8_t k_RE_ref);
uint8_t is_ptrs_subcarrier(uint16_t k,
uint16_t n_rnti,
uint8_t K_ptrs,
......
......@@ -85,6 +85,424 @@ void nr_pusch_codeword_scrambling(uint8_t *in, uint32_t size, uint32_t Nid, uint
nr_codeword_scrambling(in, size, 0, Nid, n_RNTI, out);
}
/*
The function pointers are set once before calling the mapping funcion for
all symbols based on different parameters. Then the mapping is done for
each symbol by calling the function pointers.
*/
static void (*map_dmrs_ptr)(const unsigned int, const c16_t *, c16_t *);
static void (*map_data_dmrs_ptr)(const c16_t *, c16_t *);
/*
The following set of functions map dmrs and/or data REs in one RB based on
configuration of DMRS type, number of CDM groups with no data and delta.
For all other combinations of the parameters not present below is not
applicable.
*/
/*
DMRS mapping in a RB for Type 1.
*/
static void map_dmrs_type1_cdm1_rb(const unsigned int delta, const c16_t *dmrs, c16_t *out)
{
*(out + delta) = *dmrs++;
*(out + delta + 2) = *dmrs++;
*(out + delta + 4) = *dmrs++;
*(out + delta + 6) = *dmrs++;
*(out + delta + 8) = *dmrs++;
*(out + delta + 10) = *dmrs++;
}
/*
Data in DMRS symbol for Type 1, NumCDMGroupNoData = 1 and delta 0 (antenna port 0 and 1).
There is no data in DMRS symbol for other scenarios in type 1.
*/
static void map_data_dmrs_type1_cdm1_rb(const c16_t *data, c16_t *out)
{
*(out + 1) = *data++;
*(out + 3) = *data++;
*(out + 5) = *data++;
*(out + 7) = *data++;
*(out + 9) = *data++;
*(out + 11) = *data++;
}
#define NR_DMRS_TYPE2_CDM_GRP_SIZE 2
#define NR_DMRS_TYPE2_NUM_CDM_GRP 3
/*
Map DMRS for type 2
*/
static void map_dmrs_type2_rb(const unsigned int delta, const c16_t *dmrs, c16_t *out)
{
memcpy(out + delta, dmrs, sizeof(c16_t) * NR_DMRS_TYPE2_CDM_GRP_SIZE);
out += (NR_DMRS_TYPE2_CDM_GRP_SIZE * NR_DMRS_TYPE2_NUM_CDM_GRP);
dmrs += NR_DMRS_TYPE2_CDM_GRP_SIZE;
memcpy(out + delta, dmrs, sizeof(c16_t) * NR_DMRS_TYPE2_CDM_GRP_SIZE);
}
/*
Map data if type 2, delta 0 NumCDMGrpNoData 1.
*/
static void map_data_dmrs_type2_rb_f1(const c16_t *data, c16_t *out)
{
memcpy(out + NR_DMRS_TYPE2_CDM_GRP_SIZE, data, sizeof(c16_t) * NR_DMRS_TYPE2_CDM_GRP_SIZE * 2);
out += NR_DMRS_TYPE2_CDM_GRP_SIZE * 4;
data += NR_DMRS_TYPE2_CDM_GRP_SIZE * 2;
memcpy(out, data, sizeof(c16_t) * NR_DMRS_TYPE2_CDM_GRP_SIZE * 2);
}
/*
Map data if type 2, delta (0, 2) NumCDMGrpNoData 2, or delta 2 NumCDMGrpNoData 1.
*/
static void map_data_dmrs_type2_rb_f2(const c16_t *data, c16_t *out)
{
memcpy(out + NR_DMRS_TYPE2_CDM_GRP_SIZE * 2, data, sizeof(c16_t) * NR_DMRS_TYPE2_CDM_GRP_SIZE);
out += NR_DMRS_TYPE2_CDM_GRP_SIZE * 5;
data += NR_DMRS_TYPE2_CDM_GRP_SIZE;
memcpy(out, data, sizeof(c16_t) * NR_DMRS_TYPE2_CDM_GRP_SIZE);
}
/*
Map data if type 2, delta 4 NumCDMGrpNoData 1.
*/
static void map_data_dmrs_type2_rb_f3(const c16_t *data, c16_t *out)
{
memcpy(out + NR_DMRS_TYPE2_CDM_GRP_SIZE, data, sizeof(c16_t) * NR_DMRS_TYPE2_CDM_GRP_SIZE);
out += NR_DMRS_TYPE2_CDM_GRP_SIZE * 4;
data += NR_DMRS_TYPE2_CDM_GRP_SIZE;
memcpy(out, data, sizeof(c16_t) * NR_DMRS_TYPE2_CDM_GRP_SIZE);
}
/*
Map data and PTRS in RB
*/
static void map_data_ptrs(const unsigned int ptrsIdx, const c16_t *data, const c16_t *ptrs, c16_t *out)
{
memcpy(out, data, sizeof(c16_t) * ptrsIdx);
data += ptrsIdx;
*(out + ptrsIdx) = *ptrs;
memcpy(out + ptrsIdx + 1, data, sizeof(c16_t) * NR_NB_SC_PER_RB - ptrsIdx - 1);
}
/*
Map data only in RB
*/
static void map_data_rb(const c16_t *data, c16_t *out)
{
memcpy(out, data, sizeof(c16_t) * NR_NB_SC_PER_RB);
}
/*
This function is used for a PRB which has the DC subcarrier
that is not first subcarrier.
*/
static void map_over_dc(const unsigned int right_dc,
const unsigned int fft_size,
const unsigned int dmrs_per_rb,
const unsigned int data_per_rb,
const unsigned int delta,
const unsigned int ptrsIdx,
unsigned int *rb,
const c16_t **ptrs,
const c16_t **dmrs,
const c16_t **data,
c16_t **out)
{
if (right_dc) {
c16_t *out_tmp = *out;
c16_t tmp_out_buf[NR_NB_SC_PER_RB];
const unsigned int left_dc = NR_NB_SC_PER_RB - right_dc;
/* copy out to temp buffer */
memcpy(tmp_out_buf, out_tmp, sizeof(c16_t) * left_dc);
out_tmp -= (fft_size - left_dc);
memcpy(tmp_out_buf + left_dc, out_tmp, sizeof(c16_t) * right_dc);
/* map on to temp buffer */
if (dmrs && data) {
map_data_dmrs_ptr(*data, tmp_out_buf);
*data += data_per_rb;
} else if (dmrs) {
map_dmrs_ptr(delta, *dmrs, tmp_out_buf);
*dmrs += dmrs_per_rb;
} else if (ptrs) {
map_data_ptrs(ptrsIdx, *data, *ptrs, tmp_out_buf);
*data += (NR_NB_SC_PER_RB - 1);
*ptrs += 1;
} else if (data) {
map_data_rb(*data, tmp_out_buf);
*data += NR_NB_SC_PER_RB;
} else {
DevAssert(false);
}
/* copy back to out buffer */
out_tmp = *out;
memcpy(out_tmp, tmp_out_buf, sizeof(c16_t) * left_dc);
out_tmp -= (fft_size - left_dc);
memcpy(out_tmp, tmp_out_buf + left_dc, sizeof(c16_t) * right_dc);
out_tmp += right_dc;
*out = out_tmp;
*rb += 1;
} else {
*out = (*out) - fft_size;
}
}
/*
Map all REs in one OFDM symbol
*/
static void map_current_symbol(const rnti_t rnti,
const unsigned int K_ptrs,
const unsigned int k_RE_ref,
const unsigned int nb_rb,
const unsigned int bwp_start,
const unsigned int start_rb,
const unsigned int first_sc_offset,
const pusch_dmrs_type_t dmrs_type,
const unsigned int fft_size,
const bool dmrs_symbol,
const bool ptrs_symbol,
const c16_t *dmrs_seq,
const c16_t *ptrs_seq,
const unsigned int delta,
const c16_t **data,
c16_t *out)
{
const unsigned int abs_start_rb = bwp_start + start_rb;
const unsigned int start_sc = (first_sc_offset + abs_start_rb * NR_NB_SC_PER_RB) % fft_size;
const bool cross_dc = start_sc + nb_rb * NR_NB_SC_PER_RB > fft_size;
const unsigned int rb_over_dc = cross_dc ? (fft_size - start_sc) % NR_NB_SC_PER_RB : 0;
const unsigned int stop_rb = cross_dc ? (fft_size - start_sc) / NR_NB_SC_PER_RB : nb_rb;
const c16_t *data_tmp = *data;
if (dmrs_symbol) {
const unsigned int dmrs_per_rb = (dmrs_type == pusch_dmrs_type1) ? 6 : 4;
const unsigned int data_per_rb = NR_NB_SC_PER_RB - dmrs_per_rb;
const c16_t *p_mod_dmrs = dmrs_seq + abs_start_rb * dmrs_per_rb;
c16_t *out_tmp = out + start_sc;
unsigned int rb = 0;
/* map below/above DC */
for (; rb < stop_rb; rb++) {
map_dmrs_ptr(delta, p_mod_dmrs, out_tmp);
p_mod_dmrs += dmrs_per_rb;
out_tmp += NR_NB_SC_PER_RB;
}
/* map RB at DC */
map_over_dc(rb_over_dc, fft_size, dmrs_per_rb, data_per_rb, delta, 0, &rb, NULL, &p_mod_dmrs, NULL, &out_tmp);
/* map above DC */
for (; rb < nb_rb; rb++) {
map_dmrs_ptr(delta, p_mod_dmrs, out_tmp);
p_mod_dmrs += dmrs_per_rb;
out_tmp += NR_NB_SC_PER_RB;
}
if (map_data_dmrs_ptr) {
c16_t *out_tmp = out + start_sc;
unsigned int rb = 0;
for (; rb < stop_rb; rb++) {
map_data_dmrs_ptr(data_tmp, out_tmp);
data_tmp += data_per_rb;
out_tmp += NR_NB_SC_PER_RB;
}
map_over_dc(rb_over_dc, fft_size, dmrs_per_rb, data_per_rb, delta, 0, &rb, NULL, &p_mod_dmrs, &data_tmp, &out_tmp);
for (; rb < nb_rb; rb++) {
map_data_dmrs_ptr(data_tmp, out_tmp);
data_tmp += data_per_rb;
out_tmp += NR_NB_SC_PER_RB;
}
}
} else if (ptrs_symbol) {
const unsigned int first_ptrs_re = get_first_ptrs_re(rnti, K_ptrs, nb_rb, k_RE_ref) + start_sc;
const unsigned int ptrs_idx_re = (start_sc - first_ptrs_re) % NR_NB_SC_PER_RB;
unsigned int ptrs_idx_rb = (start_sc - first_ptrs_re) / NR_NB_SC_PER_RB;
unsigned int rb = 0;
c16_t *out_tmp = out + start_sc;
for (; rb < ptrs_idx_rb; rb++) {
map_data_rb(data_tmp, out);
data_tmp += NR_NB_SC_PER_RB;
out_tmp += NR_NB_SC_PER_RB;
}
const c16_t *p_mod_ptrs = ptrs_seq;
ptrs_idx_rb = 0;
for (; rb < stop_rb; rb++) {
if (ptrs_idx_rb % K_ptrs) {
map_data_rb(data_tmp, out_tmp);
data_tmp += NR_NB_SC_PER_RB;
out_tmp += NR_NB_SC_PER_RB;
} else {
map_data_ptrs(ptrs_idx_re, data_tmp, p_mod_ptrs, out_tmp);
p_mod_ptrs++;
data_tmp += (NR_NB_SC_PER_RB - 1);
out_tmp += NR_NB_SC_PER_RB;
}
ptrs_idx_rb++;
}
if (ptrs_idx_rb % K_ptrs) {
map_over_dc(rb_over_dc, fft_size, 0, 0, delta, 0, &rb, NULL, NULL, &data_tmp, &out_tmp);
} else {
map_over_dc(rb_over_dc, fft_size, 0, 0, delta, ptrs_idx_re, &rb, &p_mod_ptrs, NULL, &data_tmp, &out_tmp);
}
for (; rb < nb_rb; rb++) {
if (ptrs_idx_rb % K_ptrs) {
map_data_rb(data_tmp, out_tmp);
data_tmp += NR_NB_SC_PER_RB;
out_tmp += NR_NB_SC_PER_RB;
} else {
map_data_ptrs(ptrs_idx_re, data_tmp, p_mod_ptrs, out_tmp);
p_mod_ptrs++;
data_tmp += (NR_NB_SC_PER_RB - 1);
out_tmp += NR_NB_SC_PER_RB;
}
ptrs_idx_rb++;
}
} else {
unsigned int rb = 0;
c16_t *out_tmp = out + start_sc;
for (; rb < stop_rb; rb++) {
map_data_rb(data_tmp, out_tmp);
data_tmp += NR_NB_SC_PER_RB;
out_tmp += NR_NB_SC_PER_RB;
}
map_over_dc(rb_over_dc, fft_size, 0, 0, delta, 0, &rb, NULL, NULL, &data_tmp, &out_tmp);
for (; rb < nb_rb; rb++) {
map_data_rb(data_tmp, out_tmp);
data_tmp += NR_NB_SC_PER_RB;
out_tmp += NR_NB_SC_PER_RB;
}
}
*data = data_tmp;
}
/*
TS 38.211 table 6.4.1.1.3-1 and 2
*/
static void dmrs_amp_mult(const uint32_t dmrs_port,
const int Wt,
const int Wf[2],
const c16_t *mod_dmrs,
c16_t *mod_dmrs_out,
const uint32_t n_dmrs,
const pusch_dmrs_type_t dmrs_type)
{
/*
A short array that holds amplitude values used for
multiplying with the modulated DMRS vector in 128bit SIMD.
*/
int16_t alpha_dmrs[8] __attribute((aligned(16)));
for (int_fast8_t i = 0; i < sizeofArray(alpha_dmrs) / 2; i++) {
const int16_t a = Wf[i % 2] * Wt * AMP;
alpha_dmrs[2 * i] = a; // multiplier for real part of DMRS symbol
alpha_dmrs[2 * i + 1] = a; // multiplier for img part of DMRS symbol
}
/* multiply mod_dmrs with alpha_dmrs in 4 symbol patches */
mult_real_vector_single_vector(mod_dmrs, alpha_dmrs, mod_dmrs_out, n_dmrs);
}
/*
Holds params needed for PUSCH resoruce mapping
*/
typedef struct {
rnti_t rnti;
unsigned int K_ptrs;
unsigned int k_RE_ref;
unsigned int first_sc_offset;
unsigned int fft_size;
unsigned int num_rb_max;
unsigned int symbols_per_slot;
unsigned int slot;
unsigned int dmrs_scrambling_id;
unsigned int scid;
unsigned int dmrs_port;
int Wt;
int *Wf;
unsigned int dmrs_symb_pos;
unsigned int ptrs_symb_pos;
unsigned int pdu_bit_map;
transformPrecoder_t transform_precoding;
unsigned int bwp_start;
unsigned int start_rb;
unsigned int nb_rb;
unsigned int start_symbol;
unsigned int num_symbols;
pusch_dmrs_type_t dmrs_type;
unsigned int delta;
unsigned int num_cdm_no_data;
} nr_phy_pxsch_params_t;
/*
Map ULSCH data and DMRS in all of the scheduled symbols and PRBs
*/
static void map_symbols(const nr_phy_pxsch_params_t p,
const unsigned int slot,
const c16_t *dmrs_seq,
const c16_t *data,
c16_t *out)
{
// asign the function pointers
if (p.dmrs_type == pusch_dmrs_type1) {
map_dmrs_ptr = map_dmrs_type1_cdm1_rb;
map_data_dmrs_ptr = (p.delta == 0 && p.num_cdm_no_data == 1) ? map_data_dmrs_type1_cdm1_rb : NULL;
} else {
map_dmrs_ptr = map_dmrs_type2_rb;
if (p.delta == 0 && p.num_cdm_no_data == 1) {
map_data_dmrs_ptr = map_data_dmrs_type2_rb_f1;
} else if ((p.delta == 0 && p.num_cdm_no_data == 2) || (p.delta == 2 && p.num_cdm_no_data == 2)
|| (p.delta == 2 && p.num_cdm_no_data == 1)) {
map_data_dmrs_ptr = map_data_dmrs_type2_rb_f2;
} else if (p.delta == 4 && p.num_cdm_no_data == 1) {
map_data_dmrs_ptr = map_data_dmrs_type2_rb_f3;
} else { // no data in DMRS symbols for other combinations
map_data_dmrs_ptr = NULL;
}
}
// for all symbols
const unsigned int n_dmrs = (p.bwp_start + p.start_rb + p.nb_rb) * ((p.dmrs_type == pusch_dmrs_type1) ? 6 : 4);
const c16_t *cur_data = data;
for (int l = p.start_symbol; l < p.start_symbol + p.num_symbols; l++) {
const bool dmrs_symbol = is_dmrs_symbol(l, p.dmrs_symb_pos);
const bool ptrs_symbol = is_ptrs_symbol(l, p.ptrs_symb_pos);
c16_t mod_dmrs_amp[ALNARS_16_4(n_dmrs)] __attribute((aligned(16)));
c16_t mod_ptrs_amp[ALNARS_16_4(p.nb_rb)] __attribute((aligned(16)));
const uint32_t *gold = NULL;
if (dmrs_symbol || ptrs_symbol) {
gold = nr_gold_pusch(p.num_rb_max, p.symbols_per_slot, p.dmrs_scrambling_id, p.scid, slot, l);
}
if (dmrs_symbol) {
c16_t mod_dmrs[ALNARS_16_4(n_dmrs)] __attribute((aligned(16)));
if (p.transform_precoding == transformPrecoder_disabled) {
nr_modulation(gold, n_dmrs * 2, DMRS_MOD_ORDER, (int16_t *)mod_dmrs);
dmrs_amp_mult(p.dmrs_port, p.Wt, p.Wf, mod_dmrs, mod_dmrs_amp, n_dmrs, p.dmrs_type);
} else {
dmrs_amp_mult(p.dmrs_port, p.Wt, p.Wf, dmrs_seq, mod_dmrs_amp, n_dmrs, p.dmrs_type);
}
} else if ((p.pdu_bit_map & PUSCH_PDU_BITMAP_PUSCH_PTRS) && ptrs_symbol) {
AssertFatal(p.transform_precoding == transformPrecoder_disabled, "PTRS NOT SUPPORTED IF TRANSFORM PRECODING IS ENABLED\n");
c16_t mod_ptrs[ALNARS_16_4(p.nb_rb)] __attribute((aligned(16)));
nr_modulation(gold, p.nb_rb, DMRS_MOD_ORDER, (int16_t *)mod_ptrs);
const unsigned int beta_ptrs = 1; // temp value until power control is implemented
multadd_complex_vector_real_scalar((int16_t *)mod_ptrs, beta_ptrs * AMP, (int16_t *)mod_ptrs_amp, 1, p.nb_rb);
}
map_current_symbol(p.rnti,
p.K_ptrs,
p.k_RE_ref,
p.nb_rb,
p.bwp_start,
p.start_rb,
p.first_sc_offset,
p.dmrs_type,
p.fft_size,
dmrs_symbol,
ptrs_symbol,
mod_dmrs_amp,
mod_ptrs_amp,
p.delta,
&cur_data, // increments every symbol
out + l * p.fft_size);
}
}
void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
const unsigned char harq_pid,
const uint32_t frame,
......@@ -95,11 +513,8 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
{
LOG_D(PHY,"nr_ue_ulsch_procedures hard_id %d %d.%d\n",harq_pid,frame,slot);
int Wf[2], Wt[2];
int l_prime[2], delta;
int l_prime[2];
uint8_t nb_dmrs_re_per_rb;
int i;
int sample_offsetF, N_RE_prime;
NR_DL_FRAME_PARMS *frame_parms = &UE->frame_parms;
......@@ -143,17 +558,18 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
LOG_D(PHY,"ulsch TX %x : start_rb %d nb_rb %d mod_order %d Nl %d Tpmi %d bwp_start %d start_sc %d start_symbol %d num_symbols %d cdmgrpsnodata %d num_dmrs %d dmrs_re_per_rb %d\n",
rnti,start_rb,nb_rb,mod_order,Nl,pusch_pdu->Tpmi,pusch_pdu->bwp_start,start_sc,start_symbol,number_of_symbols,cdm_grps_no_data,number_dmrs_symbols,nb_dmrs_re_per_rb);
// TbD num_of_mod_symbols is set but never used
N_RE_prime = NR_NB_SC_PER_RB*number_of_symbols - nb_dmrs_re_per_rb*number_dmrs_symbols - N_PRB_oh;
const uint32_t N_RE_prime = NR_NB_SC_PER_RB * number_of_symbols - nb_dmrs_re_per_rb * number_dmrs_symbols - N_PRB_oh;
harq_process_ul_ue->num_of_mod_symbols = N_RE_prime*nb_rb;
/////////////////////////PTRS parameters' initialization/////////////////////////
///////////
uint8_t L_ptrs, K_ptrs = 0;
unsigned int K_ptrs = 0, k_RE_ref = 0;
uint32_t unav_res = 0;
if (pusch_pdu->pdu_bit_map & PUSCH_PDU_BITMAP_PUSCH_PTRS) {
K_ptrs = pusch_pdu->pusch_ptrs.ptrs_freq_density;
L_ptrs = 1 << pusch_pdu->pusch_ptrs.ptrs_time_density;
k_RE_ref = pusch_pdu->pusch_ptrs.ptrs_ports_list[0].ptrs_re_offset;
uint8_t L_ptrs = 1 << pusch_pdu->pusch_ptrs.ptrs_time_density;
ulsch_ue->ptrs_symbols = 0;
......@@ -239,9 +655,9 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
/////////////////////////ULSCH layer mapping/////////////////////////
///////////
const int sz = available_bits / mod_order / Nl;
c16_t tx_layers[Nl][sz];
c16_t ulsch_mod[Nl][sz];
nr_ue_layer_mapping(d_mod, Nl, sz, tx_layers);
nr_ue_layer_mapping(d_mod, Nl, sz, ulsch_mod);
///////////
////////////////////////////////////////////////////////////////////////
......@@ -255,8 +671,8 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
uint8_t u = 0, v = 0;
c16_t *dmrs_seq = NULL;
/// Transform-coded "y"-sequences (for definition see 38-211 V15.3.0 2018-09, subsection 6.3.1.4)
c16_t y[max_num_re] __attribute__((aligned(16)));
memset(y, 0, sizeof(y));
c16_t ulsch_mod_tp[max_num_re] __attribute__((aligned(16)));
memset(ulsch_mod_tp, 0, sizeof(ulsch_mod_tp));
if (pusch_pdu->transform_precoding == transformPrecoder_enabled) {
......@@ -281,7 +697,7 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
/* In the symbol with DMRS no data would be transmitted CDM groups is 2*/
continue;
nr_dft(&y[y_offset], &tx_layers[0][y_offset], nb_re_pusch);
nr_dft(&ulsch_mod_tp[y_offset], &ulsch_mod[0][y_offset], nb_re_pusch);
y_offset = y_offset + nb_re_pusch;
......@@ -299,7 +715,7 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
printf("NR_ULSCH_UE: available_bits: %u, mod_order: %d", available_bits,mod_order);
for (int ll = 0; ll < (available_bits/mod_order); ll++) {
debug_symbols[ll] = ulsch_ue->y[ll];
debug_symbols[ll] = ulsch_ue->ulsch_mod_tp[ll];
}
printf("NR_ULSCH_UE: numSym: %d, num_dmrs_sym: %d", number_of_symbols,number_dmrs_symbols);
......@@ -307,8 +723,8 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
nr_idft(&debug_symbols[offset], nb_re_pusch);
offset = offset + nb_re_pusch;
}
LOG_M("preDFT_all_symbols.m","UE_preDFT", tx_layers[0],number_of_symbols*nb_re_pusch,1,1);
LOG_M("postDFT_all_symbols.m","UE_postDFT", y,number_of_symbols*nb_re_pusch,1,1);
LOG_M("preDFT_all_symbols.m", "UE_preDFT", ulsch_mod[0], number_of_symbols * nb_re_pusch, 1, 1);
LOG_M("postDFT_all_symbols.m", "UE_postDFT", ulsch_mod_tp, number_of_symbols * nb_re_pusch, 1, 1);
LOG_M("DEBUG_IDFT_SYMBOLS.m","UE_Debug_IDFT", debug_symbols,number_of_symbols*nb_re_pusch,1,1);
LOG_M("UE_DMRS_SEQ.m","UE_DMRS_SEQ", dmrs_seq,nb_re_pusch,1,1);
#endif
......@@ -323,167 +739,57 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
/////////////////////////ULSCH RE mapping/////////////////////////
///////////
const int encoded_length = frame_parms->N_RB_UL * 14 * NR_NB_SC_PER_RB * mod_order * Nl;
c16_t tx_precoding[Nl][encoded_length];
const int slot_sz = frame_parms->ofdm_symbol_size * frame_parms->symbols_per_slot;
c16_t tx_precoding[Nl][slot_sz];
memset(tx_precoding, 0, sizeof(tx_precoding));
for (int nl=0; nl < Nl; nl++) {
uint8_t k_prime = 0;
uint16_t m = 0;
for (int nl = 0; nl < Nl; nl++) {
#ifdef DEBUG_PUSCH_MAPPING
printf("NR_ULSCH_UE: Value of CELL ID %d /t, u %d \n", frame_parms->Nid_cell, u);
#endif
int dmrs_port = get_dmrs_port(nl,pusch_pdu->dmrs_ports);
// DMRS params for this dmrs port
const uint8_t dmrs_port = get_dmrs_port(nl, pusch_pdu->dmrs_ports);
const uint8_t delta = get_delta(dmrs_port, dmrs_type);
int Wt[2];
int Wf[2];
get_Wt(Wt, dmrs_port, dmrs_type);
get_Wf(Wf, dmrs_port, dmrs_type);
delta = get_delta(dmrs_port, dmrs_type);
for (int l=start_symbol; l<start_symbol+number_of_symbols; l++) {
uint16_t k = start_sc;
uint16_t n = 0;
uint8_t is_dmrs_sym = 0;
uint8_t is_ptrs_sym = 0;
uint16_t dmrs_idx = 0, ptrs_idx = 0;
uint16_t n_dmrs = (pusch_pdu->bwp_start + start_rb + nb_rb) * ((dmrs_type == pusch_dmrs_type1) ? 6 : 4);
c16_t mod_dmrs[n_dmrs] __attribute((aligned(16)));
c16_t mod_dmrs_amp[n_dmrs] __attribute((aligned(16)));
c16_t *data = (pusch_pdu->transform_precoding == transformPrecoder_enabled) ? ulsch_mod_tp : ulsch_mod[nl];
nr_phy_pxsch_params_t params = {.rnti = rnti,
.K_ptrs = K_ptrs,
.k_RE_ref = k_RE_ref,
.first_sc_offset = frame_parms->first_carrier_offset,
.fft_size = frame_parms->ofdm_symbol_size,
.num_rb_max = frame_parms->N_RB_UL,
.symbols_per_slot = frame_parms->symbols_per_slot,
.dmrs_scrambling_id = pusch_pdu->ul_dmrs_scrambling_id,
.scid = pusch_pdu->scid,
.dmrs_port = dmrs_port,
.Wt = Wt[l_prime[0]],
.Wf = Wf,
.dmrs_symb_pos = ul_dmrs_symb_pos,
.ptrs_symb_pos = ulsch_ue->ptrs_symbols,
.pdu_bit_map = pusch_pdu->pdu_bit_map,
.transform_precoding = pusch_pdu->transform_precoding,
.bwp_start = pusch_pdu->bwp_start,
.start_rb = start_rb,
.nb_rb = nb_rb,
.start_symbol = start_symbol,
.num_symbols = number_of_symbols,
.dmrs_type = dmrs_type,
.delta = delta,
.num_cdm_no_data = cdm_grps_no_data};
map_symbols(params, slot, dmrs_seq, data, tx_precoding[nl]);
c16_t mod_ptrs[nb_rb] __attribute((aligned(16))); // assume maximum number of PTRS per pusch allocation
c16_t mod_ptrs_amp[nb_rb] __attribute((aligned(16))); // assume maximum number of PTRS per pusch allocation
if ((ul_dmrs_symb_pos >> l) & 0x01) {
is_dmrs_sym = 1;
if (pusch_pdu->transform_precoding == transformPrecoder_disabled){
if (dmrs_type == pusch_dmrs_type1)
dmrs_idx = (pusch_pdu->bwp_start + start_rb)*6;
else
dmrs_idx = (pusch_pdu->bwp_start + start_rb)*4;
// TODO: performance improvement, we can skip the modulation of DMRS symbols outside the bandwidth part
// Perform this on gold sequence, not required when SC FDMA operation is done,
LOG_D(PHY,"DMRS in symbol %d\n",l);
const uint32_t *gold = nr_gold_pusch(frame_parms->N_RB_UL,
frame_parms->symbols_per_slot,
pusch_pdu->ul_dmrs_scrambling_id,
pusch_pdu->scid,
slot,
l);
nr_modulation(gold,
n_dmrs * 2,
DMRS_MOD_ORDER,
(int16_t *)mod_dmrs); // currently only codeword 0 is modulated. Qm = 2 as DMRS is QPSK modulated
const int tmp = Wt[l_prime[0]] * Wf[k_prime] * AMP;
//todo: mult only needed
multadd_complex_vector_real_scalar((int16_t*)mod_dmrs,tmp,(int16_t*)mod_dmrs_amp,1,n_dmrs*2);
} else {
dmrs_idx = 0;
}
} else if (pusch_pdu->pdu_bit_map & PUSCH_PDU_BITMAP_PUSCH_PTRS) {
AssertFatal(pusch_pdu->transform_precoding == transformPrecoder_disabled, "PTRS NOT SUPPORTED IF TRANSFORM PRECODING IS ENABLED\n");
if(is_ptrs_symbol(l, ulsch_ue->ptrs_symbols)) {
is_ptrs_sym = 1;
const uint32_t *gold = nr_gold_pusch(frame_parms->N_RB_UL,
frame_parms->symbols_per_slot,
pusch_pdu->ul_dmrs_scrambling_id,
pusch_pdu->scid,
slot,
l);
nr_modulation(gold, nb_rb, DMRS_MOD_ORDER, (int16_t *)mod_ptrs);
const uint16_t beta_ptrs = 1; // temp value until power control is implemented
multadd_complex_vector_real_scalar((int16_t*)mod_ptrs,beta_ptrs*AMP,(int16_t*)mod_ptrs_amp,1,nb_rb);
}
}
for (i=0; i< nb_rb*NR_NB_SC_PER_RB; i++) {
uint8_t is_dmrs = 0;
uint8_t is_ptrs = 0;
sample_offsetF = l*frame_parms->ofdm_symbol_size + k;
if (is_dmrs_sym) {
if (k == ((start_sc+get_dmrs_freq_idx_ul(n, k_prime, delta, dmrs_type))%frame_parms->ofdm_symbol_size))
is_dmrs = 1;
} else if (is_ptrs_sym) {
is_ptrs = is_ptrs_subcarrier(k,
rnti,
K_ptrs,
nb_rb,
pusch_pdu->pusch_ptrs.ptrs_ports_list[0].ptrs_re_offset,
start_sc,
frame_parms->ofdm_symbol_size);
}
if (is_dmrs == 1) {
// if transform precoding is enabled
const int tmp = Wt[l_prime[0]] * Wf[k_prime] * AMP;
if (pusch_pdu->transform_precoding == transformPrecoder_enabled)
tx_precoding[nl][sample_offsetF] = c16mulRealShift(dmrs_seq[dmrs_idx], tmp, 15);
else
tx_precoding[nl][sample_offsetF] = mod_dmrs_amp[dmrs_idx];
#ifdef DEBUG_PUSCH_MAPPING
printf("DMRS: Layer: %d\t, dmrs_idx %d\t l %d \t k %d \t k_prime %d \t n %d \t dmrs: %d %d\n",
nl,
dmrs_idx,
l,
k,
k_prime,
n,
tx_precoding[nl][sample_offsetF].r,
tx_precoding[nl][sample_offsetF].i);
#endif
dmrs_idx++;
k_prime++;
k_prime &= 1;
n += (k_prime) ? 0 : 1;
} else if (is_ptrs == 1) {
tx_precoding[nl][sample_offsetF] = mod_ptrs_amp[ptrs_idx];
ptrs_idx++;
} else if (!is_dmrs_sym
|| allowed_xlsch_re_in_dmrs_symbol(k, start_sc, frame_parms->ofdm_symbol_size, cdm_grps_no_data, dmrs_type)) {
if (pusch_pdu->transform_precoding == transformPrecoder_disabled)
tx_precoding[nl][sample_offsetF] = tx_layers[nl][m];
else
tx_precoding[nl][sample_offsetF] = y[m];
#ifdef DEBUG_PUSCH_MAPPING
printf("DATA: layer %d\t m %d\t l %d \t k %d \t tx_precoding: %d %d\n",
nl,
m,
l,
k,
tx_precoding[nl][sample_offsetF].r,
tx_precoding[nl][sample_offsetF].i);
#endif
m++;
} else {
tx_precoding[nl][sample_offsetF] = (c16_t){0};
}
if (++k >= frame_parms->ofdm_symbol_size)
k -= frame_parms->ofdm_symbol_size;
} // for (i=0; i< nb_rb*NR_NB_SC_PER_RB; i++)
} // for (l=start_symbol; l<start_symbol+number_of_symbols; l++)
} // for (nl=0; nl < Nl; nl++)
/////////////////////////ULSCH precoding/////////////////////////
///////////
/// Layer Precoding and Antenna port mapping
// tx_layers 0-3 are mapped on antenna ports
// ulsch_mod 0-3 are mapped on antenna ports
// The precoding info is supported by nfapi such as num_prgs, prg_size, prgs_list and pm_idx
// The same precoding matrix is applied on prg_size RBs, Thus
// pmi = prgs_list[rbidx/prg_size].pm_idx, rbidx =0,...,rbSize-1
......@@ -554,7 +860,7 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE,
for (int i = 0; i < NR_NB_SC_PER_RB; i++) {
int32_t re_offset = l * frame_parms->ofdm_symbol_size + k;
txdataF[ap][re_offset] = nr_layer_precoder(encoded_length, tx_precoding, W_prec, pusch_pdu->nrOfLayers, re_offset);
txdataF[ap][re_offset] = nr_layer_precoder(slot_sz, tx_precoding, W_prec, pusch_pdu->nrOfLayers, re_offset);
if (++k >= frame_parms->ofdm_symbol_size) {
k -= frame_parms->ofdm_symbol_size;
}
......
......@@ -24,6 +24,18 @@
#include <simde/simde-common.h>
#include <simde/x86/sse.h>
void mult_real_vector_single_vector(const c16_t *x, const int16_t *alpha, c16_t *y, const unsigned int N)
{
const simd_q15_t *alpha_128 = (const simd_q15_t *)alpha;
const simd_q15_t *x_128 = (const simd_q15_t *)x;
simd_q15_t *y_128 = (simd_q15_t *)y;
const unsigned int num_adds = (N + 3) / 4; // ceil(N/4)
for (uint_fast32_t n = 0; n < num_adds; n++) {
y_128[n] = mulhi_int16(x_128[n], *alpha_128);
}
}
void multadd_complex_vector_real_scalar(int16_t *x,
int16_t alpha,
int16_t *y,
......@@ -35,19 +47,20 @@ void multadd_complex_vector_real_scalar(int16_t *x,
int n;
alpha_128 = set1_int16(alpha);
const uint32_t num_adds = (N + 3) / 4; // ceil(N/4)
if (zero_flag == 1)
for (n=0; n<N>>2; n++) {
if (zero_flag == 1) {
for (n = 0; n < num_adds; n++) {
// print_shorts("x_128[n]=", &x_128[n]);
// print_shorts("alpha_128", &alpha_128);
y_128[n] = mulhi_int16(x_128[n],alpha_128);
// print_shorts("y_128[n]=", &y_128[n]);
}
else
for (n=0; n<N>>2; n++) {
} else {
for (n = 0; n < num_adds; n++) {
y_128[n] = adds_int16(y_128[n],mulhi_int16(x_128[n],alpha_128));
}
}
simde_mm_empty();
simde_m_empty();
......
......@@ -52,6 +52,9 @@
extern "C" {
#endif
#define ALIGNARRAYSIZE(a, b) (((a + b - 1) / b) * b)
#define ALNARS_16_4(a) ALIGNARRAYSIZE(a, 4)
typedef struct complexd {
double r;
double i;
......@@ -821,6 +824,8 @@ double interp(double x, double *xs, double *ys, int count);
void simde_mm128_separate_real_imag_parts(simde__m128i *out_re, simde__m128i *out_im, simde__m128i in0, simde__m128i in1);
void simde_mm256_separate_real_imag_parts(simde__m256i *out_re, simde__m256i *out_im, simde__m256i in0, simde__m256i in1);
void mult_real_vector_single_vector(const c16_t *x, const int16_t *alpha, c16_t *y, const unsigned int N);
#ifdef __cplusplus
}
#endif
......
......@@ -1421,17 +1421,6 @@ int main(int argc, char *argv[])
errors_decoding++;
}
}
if (n_trials == 1) {
for (int r = 0; r < UE->ul_harq_processes[harq_pid].C; r++)
for (int i = 0; i < UE->ul_harq_processes[harq_pid].K >> 3; i++) {
if ((UE->ul_harq_processes[harq_pid].c[r][i] ^ ulsch_gNB->harq_process->c[r][i]) != 0)
printf("************");
/*printf("r %d: in[%d] %x, out[%d] %x (%x)\n",r,
i,UE->ul_harq_processes[harq_pid].c[r][i],
i,ulsch_gNB->harq_process->c[r][i],
UE->ul_harq_processes[harq_pid].c[r][i]^ulsch_gNB->harq_process->c[r][i]);*/
}
}
if (errors_decoding > 0 && error_flag == 0) {
n_false_positive++;
if (n_trials==1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment