Commit 00d9af7a authored by Thomas Schlichter's avatar Thomas Schlichter

Merge remote-tracking branch 'eurecom/develop' into fhg-ra2

Conflicts:
	openair1/PHY/NR_UE_TRANSPORT/nr_dlsch_demodulation.c
	openair2/LAYER2/NR_MAC_UE/nr_ue_dci_configuration.c
	openair2/LAYER2/NR_MAC_UE/nr_ue_procedures.c
parents 90dc9e95 f7dc17ec
......@@ -1830,6 +1830,7 @@ set (MAC_SRC
${MAC_DIR}/eNB_scheduler_fairRR.c
${MAC_DIR}/eNB_scheduler_phytest.c
${MAC_DIR}/pre_processor.c
${MAC_DIR}/slicing/slicing.c
${MAC_DIR}/config.c
${MAC_DIR}/config_ue.c
)
......
......@@ -1120,6 +1120,7 @@ typedef struct
uint16_t ul_dmrs_symb_pos;
uint8_t dmrs_config_type;
uint16_t ul_dmrs_scrambling_id;
uint16_t pusch_identity;
uint8_t scid;
uint8_t num_dmrs_cdm_grps_no_data;
uint16_t dmrs_ports;//DMRS ports. [TS38.212 7.3.1.1.2] provides description between DCI 0-1 content and DMRS ports. Bitmap occupying the 11 LSBs with: bit 0: antenna port 1000 bit 11: antenna port 1011 and for each bit 0: DMRS port not used 1: DMRS port used
......
......@@ -197,8 +197,10 @@ decoder_node_t *add_nodes(int level, int first_leaf_index, t_nrPolar_params *pol
}
for (int i=0;i<Nv;i++) {
if (polarParams->information_bit_pattern[i+first_leaf_index]>0)
all_frozen_below=0;
if (polarParams->information_bit_pattern[i+first_leaf_index]>0) {
all_frozen_below=0;
break;
}
}
if (all_frozen_below==0)
......
......@@ -2006,11 +2006,6 @@ void fill_ulsch(PHY_VARS_eNB *eNB,int UE_id,nfapi_ul_config_ulsch_pdu *ulsch_pdu
return;
}
//AssertFatal(ulsch->harq_processes[harq_pid]->nb_rb>0,"nb_rb = 0\n");
if(ulsch->harq_processes[harq_pid]->nb_rb == 0) {
LOG_E(PHY, "fill_ulsch UE_id %d nb_rb = 0\n", UE_id);
}
ulsch->harq_processes[harq_pid]->frame = frame;
ulsch->harq_processes[harq_pid]->subframe = subframe;
ulsch->harq_processes[harq_pid]->handled = 0;
......
......@@ -183,9 +183,9 @@ int nr_rx_pdsch(PHY_VARS_NR_UE *ue,
beamforming_mode = ue->transmission_mode[eNB_id] < 7 ? 0 :ue->transmission_mode[eNB_id];
break;
default:
AssertFatal(1 == 0, "[UE][FATAL] nr_tti_rx %d: Unknown PDSCH format %d\n", nr_tti_rx, type);
return(-1);
default:
LOG_E(PHY, "[UE][FATAL] nr_tti_rx %d: Unknown PDSCH format %d\n", nr_tti_rx, type);
return -1;
break;
}
......@@ -222,7 +222,8 @@ int nr_rx_pdsch(PHY_VARS_NR_UE *ue,
printf("[DEMOD] I am assuming only TB1 is active, it is in cw %d\n", codeword_TB1);
#endif
AssertFatal(1 == 0, "[UE][FATAL] DLSCH: TB0 not active and TB1 active case is not supported\n");
LOG_E(PHY, "[UE][FATAL] DLSCH: TB0 not active and TB1 active case is not supported\n");
return -1;
} else {
LOG_E(PHY,"[UE][FATAL] nr_tti_rx %d: no active DLSCH\n", nr_tti_rx);
......@@ -241,8 +242,10 @@ int nr_rx_pdsch(PHY_VARS_NR_UE *ue,
return (-1);
}
if (dlsch0_harq == NULL)
AssertFatal(1 == 0, "Done\n");
if (dlsch0_harq == NULL) {
LOG_E(PHY, "Done\n");
return -1;
}
dlsch0_harq->Qm = nr_get_Qm_dl(dlsch[0]->harq_processes[harq_pid]->mcs, dlsch[0]->harq_processes[harq_pid]->mcs_table);
dlsch0_harq->R = nr_get_code_rate_dl(dlsch[0]->harq_processes[harq_pid]->mcs, dlsch[0]->harq_processes[harq_pid]->mcs_table);
......
......@@ -160,6 +160,7 @@ void nr_schedule_response(NR_Sched_Rsp_t *Sched_INFO){
gNB->dlsch[i][0]->harq_mask=0;
}
gNB->pdcch_pdu = NULL;
gNB->ul_dci_pdu = NULL;
gNB->pbch_configured=0;
for (int i=0;i<number_dl_pdu;i++) {
......
......@@ -115,7 +115,16 @@ int flexran_agent_unregister_mac_xface(mid_t mod_id);
/* Inform controller about possibility to update slice configuration */
void flexran_agent_slice_update(mid_t mod_id);
/* return a pointer to the current config */
Protocol__FlexSliceConfig *flexran_agent_get_slice_config(mid_t mod_id);
/* marks slice_config so that it can be applied later. Takes ownership of the
* FlexSliceConfig message */
void prepare_update_slice_config(mid_t mod_id, Protocol__FlexSliceConfig **slice);
/* inserts a new ue_config into the structure keeping ue to slice association
* updates and marks so it can be applied. Takes ownership of the FlexUeConfig message */
void prepare_ue_slice_assoc_update(mid_t mod_id, Protocol__FlexUeConfig **ue_config);
/* free slice_config part of flexCellConfig, filled in
* flexran_agent_fill_mac_cell_config() */
void flexran_agent_destroy_mac_slice_config(Protocol__FlexCellConfig *conf);
#endif
......@@ -103,43 +103,11 @@ int load_dl_scheduler_function(mid_t mod_id, const char *function_name);
/*** Functions for handling a slice config ***/
/* allocate memory for a Protocol__FlexSliceConfig structure with n_dl DL slice
* configs and m_ul UL slice configs */
Protocol__FlexSliceConfig *flexran_agent_create_slice_config(int n_dl, int m_ul);
/* read the general slice parameters via RAN into the given
* Protocol__FlexSliceConfig struct */
void flexran_agent_read_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *s);
/* read the DL slice config via the RAN into a given Protocol__FlexDlSlice
* struct */
void flexran_agent_read_slice_dl_config(mid_t mod_id, int slice_idx, Protocol__FlexDlSlice *dl_slice);
/* read the UL slice config via the RAN into a given Protocol__FlexUlSlice
* struct */
void flexran_agent_read_slice_ul_config(mid_t mod_id, int slice_idx, Protocol__FlexUlSlice *ul_slice);
/* reads content of slice over the sc_update structure, so that it can be
* applied later by performing a diff between slice_config and sc_update */
void prepare_update_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *slice);
/* apply generic slice parameters (e.g. intra-/interslice sharing activated or
* not) if there are changes. Returns the number of changed parameters. */
int apply_new_slice_config(mid_t mod_id, Protocol__FlexSliceConfig *olds, Protocol__FlexSliceConfig *news);
/* apply new configuration of slice in DL if there are changes between the
* parameters. Returns the number of changed parameters. */
int apply_new_slice_dl_config(mid_t mod_id, Protocol__FlexDlSlice *oldc, Protocol__FlexDlSlice *newc);
/* apply new configuration of slice in UL if there are changes between the
* parameters. Returns the number of changed parameters. */
int apply_new_slice_ul_config(mid_t mod_id, Protocol__FlexUlSlice *oldc, Protocol__FlexUlSlice *newc);
/* inserts a new ue_config into the structure keeping ue to slice association
* updates and marks so it can be applied */
void prepare_ue_slice_assoc_update(mid_t mod_id, Protocol__FlexUeConfig *ue_config);
/* Prepare the application of a slicing config */
void apply_update_dl_slice_config(mid_t mod_id, Protocol__FlexSliceDlUlConfig *slice);
void apply_update_ul_slice_config(mid_t mod_id, Protocol__FlexSliceDlUlConfig *slice);
/* apply a new association between a UE and a slice (both DL and UL) */
int apply_ue_slice_assoc_update(mid_t mod_id);
int apply_ue_slice_assoc_update(mid_t mod_id, Protocol__FlexUeConfig *ue_config);
#endif /*FLEXRAN_AGENT_MAC_INTERNAL_H_*/
......@@ -29,9 +29,3 @@
#include "flexran_agent_common_internal.h"
#include "flexran_agent_mac_internal.h"
int flexran_verify_dl_slice(mid_t mod_id, Protocol__FlexDlSlice *dls);
int flexran_verify_group_dl_slices(mid_t mod_id, Protocol__FlexDlSlice **existing,
int n_ex, Protocol__FlexDlSlice **update, int n_up);
int flexran_verify_ul_slice(mid_t mod_id, Protocol__FlexUlSlice *uls);
int flexran_verify_group_ul_slices(mid_t mod_id, Protocol__FlexUlSlice **existing,
int n_ex, Protocol__FlexUlSlice **update, int n_up);
......@@ -125,6 +125,63 @@ void flexran_agent_s1ap_destroy_stats_reply(Protocol__FlexStatsReply *reply) {
}
}
void flexran_agent_handle_mme_update(mid_t mod_id,
size_t n_mme,
Protocol__FlexS1apMme **mme) {
if (n_mme == 0 || n_mme > 1) {
LOG_E(FLEXRAN_AGENT, "cannot handle %lu MMEs yet\n", n_mme);
return;
}
if (!mme[0]->s1_ip) {
LOG_E(FLEXRAN_AGENT, "no S1 IP present, cannot handle request\n");
return;
}
if (mme[0]->has_state
&& mme[0]->state == PROTOCOL__FLEX_MME_STATE__FLMMES_DISCONNECTED) {
int rc = flexran_remove_s1ap_mme(mod_id, 1, &mme[0]->s1_ip);
if (rc == 0)
LOG_I(FLEXRAN_AGENT, "remove MME at IP %s\n", mme[0]->s1_ip);
else
LOG_W(FLEXRAN_AGENT,
"could not remove MME: flexran_remove_s1ap_mme() returned %d\n",
rc);
} else {
int rc = flexran_add_s1ap_mme(mod_id, 1, &mme[0]->s1_ip);
if (rc == 0)
LOG_I(FLEXRAN_AGENT, "add MME at IP %s\n", mme[0]->s1_ip);
else
LOG_W(FLEXRAN_AGENT,
"could not add MME: flexran_add_s1ap_mme() returned %d\n",
rc);
}
}
void flexran_agent_handle_plmn_update(mid_t mod_id,
int CC_id,
size_t n_plmn,
Protocol__FlexPlmn **plmn_id) {
if (n_plmn < 1 || n_plmn > 6) {
LOG_E(FLEXRAN_AGENT, "cannot handle %lu PLMNs\n", n_plmn);
return;
}
/* We assume the controller has checked all the parameters within each
* plmn_id */
int rc = flexran_set_new_plmn_id(mod_id, CC_id, n_plmn, plmn_id);
if (rc == 0) {
LOG_I(FLEXRAN_AGENT, "set %lu new PLMNs:\n", n_plmn);
for (int i = 0; i < (int)n_plmn; ++i)
LOG_I(FLEXRAN_AGENT, " MCC %d MNC %d MNC length %d\n",
plmn_id[i]->mcc, plmn_id[i]->mnc, plmn_id[0]->mnc_length);
} else {
LOG_W(FLEXRAN_AGENT,
"could not set new PLMN configuration: flexran_set_new_plmn_id() returned %d\n",
rc);
}
}
int flexran_agent_register_s1ap_xface(mid_t mod_id) {
if (agent_s1ap_xface[mod_id]) {
LOG_E(FLEXRAN_AGENT, "S1AP agent CM for eNB %d is already registered\n", mod_id);
......
......@@ -60,6 +60,17 @@ int flexran_agent_s1ap_stats_reply(mid_t mod_id,
/* Free allocated S1AP stats message */
void flexran_agent_s1ap_destroy_stats_reply(Protocol__FlexStatsReply *reply);
/* Add or remove MME updates */
void flexran_agent_handle_mme_update(mid_t mod_id,
size_t n_mme,
Protocol__FlexS1apMme **mme);
/* Set a new PLMN configuration */
void flexran_agent_handle_plmn_update(mid_t mod_id,
int CC_id,
size_t n_plmn,
Protocol__FlexPlmn **plmn_id);
/* Register technology specific interface callbacks */
int flexran_agent_register_s1ap_xface(mid_t mod_id);
......
......@@ -58,6 +58,7 @@
#define CONFIG_STRING_MACRLC_SCHED_MODE "scheduler_mode"
#define CONFIG_MACRLC_PUSCH10xSNR "puSch10xSnr"
#define CONFIG_MACRLC_PUCCH10xSNR "puCch10xSnr"
#define CONFIG_STRING_MACRLC_DEFAULT_SCHED_DL_ALGO "default_sched_dl_algo"
/*-------------------------------------------------------------------------------------------------------------------------------------------------------*/
/* MacRLC configuration parameters */
......@@ -84,6 +85,7 @@
{CONFIG_STRING_MACRLC_SCHED_MODE, NULL, 0, strptr:NULL, defstrval:"default", TYPE_STRING, 0}, \
{CONFIG_MACRLC_PUSCH10xSNR, NULL, 0, iptr:NULL, defintval:200, TYPE_INT, 0}, \
{CONFIG_MACRLC_PUCCH10xSNR, NULL, 0, iptr:NULL, defintval:200, TYPE_INT, 0}, \
{CONFIG_STRING_MACRLC_DEFAULT_SCHED_DL_ALGO, NULL, 0, strptr:NULL, defstrval:"round_robin_dl", TYPE_STRING, 0}, \
}
#define MACRLC_CC_IDX 0
#define MACRLC_TRANSPORT_N_PREFERENCE_IDX 1
......@@ -105,6 +107,7 @@
#define MACRLC_SCHED_MODE_IDX 17
#define MACRLC_PUSCH10xSNR_IDX 18
#define MACRLC_PUCCH10xSNR_IDX 19
#define MACRLC_DEFAULT_SCHED_DL_ALGO_IDX 20
/*---------------------------------------------------------------------------------------------------------------------------------------------------------*/
#endif
......@@ -61,82 +61,30 @@ enum flex_qam {
//
// Slice config related structures and enums
//
enum flex_dl_sorting {
CR_ROUND = 0; // Highest HARQ first
CR_SRB12 = 1; // Highest SRB1+2 first
CR_HOL = 2; // Highest HOL first
CR_LC = 3; // Greatest RLC buffer first
CR_CQI = 4; // Highest CQI first
CR_LCP = 5; // Highest LC priority first
}
enum flex_ul_sorting {
CRU_ROUND = 0; // Highest HARQ first
CRU_BUF = 1; // Highest BSR first
CRU_BTS = 2; // More bytes to schedule first
CRU_MCS = 3; // Highest MCS first
CRU_LCP = 4; // Highest LC priority first
CRU_HOL = 5; // Highest HOL first
}
enum flex_dl_accounting_policy {
POL_FAIR = 0;
POL_GREEDY = 1;
POL_NUM = 2;
}
enum flex_ul_accounting_policy {
POLU_FAIR = 0;
POLU_GREEDY = 1;
POLU_NUM = 2;
}
enum flex_slice_label {
xMBB = 0;
URLLC = 1;
mMTC = 2;
xMTC = 3;
Other = 4;
}
message flex_dl_slice {
optional uint32 id = 1;
optional flex_slice_label label = 2;
// should be between 0 and 100
optional uint32 percentage = 3;
// whether this slice should be exempted form interslice sharing
optional bool isolation = 4;
// increasing value means increasing prio
optional uint32 priority = 5;
// min and max RB to use (in frequency) in the range [0, N_RBG_MAX]
optional uint32 position_low = 6;
optional uint32 position_high = 7;
// maximum MCS to be allowed in this slice
optional uint32 maxmcs = 8;
repeated flex_dl_sorting sorting = 9;
optional flex_dl_accounting_policy accounting = 10;
optional string scheduler_name = 11;
}
message flex_ul_slice {
optional uint32 id = 1;
optional flex_slice_label label = 2;
// should be between 0 and 100
optional uint32 percentage = 3;
// whether this slice should be exempted form interslice sharing
optional bool isolation = 4;
// increasing value means increasing prio
optional uint32 priority = 5;
// RB start to use (in frequency) in the range [0, N_RB_MAX]
optional uint32 first_rb = 6;
// TODO RB number
//optional uint32 length_rb = 7;
// maximum MCS to be allowed in this slice
optional uint32 maxmcs = 8;
repeated flex_ul_sorting sorting = 9;
optional flex_ul_accounting_policy accounting = 10;
optional string scheduler_name = 11;
enum flex_slice_algorithm {
None = 0;
Static = 1;
NVS = 2;
}
message flex_slice_static {
optional uint32 posLow = 1;
optional uint32 posHigh = 2;
}
message flex_slice {
optional uint32 id = 1;
optional string label = 2;
optional string scheduler = 3;
oneof params {
flex_slice_static static = 10;
}
}
message flex_slice_dl_ul_config {
optional flex_slice_algorithm algorithm = 1;
repeated flex_slice slices = 2;
optional string scheduler = 3; // if no slicing
}
//
......
......@@ -49,14 +49,8 @@ message flex_cell_config {
}
message flex_slice_config {
// whether remaining RBs after first intra-slice allocation will
// be allocated to UEs of the same slice
optional bool intraslice_share_active = 3;
// whether remaining RBs after slice allocation will be allocated
// to UEs of another slice. Isolated slices will be ignored.
optional bool interslice_share_active = 4;
repeated flex_dl_slice dl = 1;
repeated flex_ul_slice ul = 2;
optional flex_slice_dl_ul_config dl = 6;
optional flex_slice_dl_ul_config ul = 7;
}
message flex_ue_config {
......
......@@ -273,7 +273,6 @@ void *eNB_app_task(void *args_p) {
if (EPC_MODE_ENABLED) {
LOG_I(ENB_APP, "[eNB %d] Received %s: associated MME %d\n", instance, ITTI_MSG_NAME (msg_p),
S1AP_REGISTER_ENB_CNF(msg_p).nb_mme);
DevAssert(register_enb_pending > 0);
register_enb_pending--;
/* Check if at least eNB is registered with one MME */
......
......@@ -29,6 +29,7 @@
#include <string.h>
#include <inttypes.h>
#include <dlfcn.h>
#include "common/utils/LOG/log.h"
#include "assertions.h"
......@@ -260,6 +261,16 @@ void RCconfig_macrlc(int macrlc_has_f1[MAX_MAC_INST]) {
global_scheduler_mode=SCHED_MODE_DEFAULT;
printf("sched mode = default %d [%s]\n",global_scheduler_mode,*(MacRLC_ParamList.paramarray[j][MACRLC_SCHED_MODE_IDX].strptr));
}
char *s = *MacRLC_ParamList.paramarray[j][MACRLC_DEFAULT_SCHED_DL_ALGO_IDX].strptr;
void *d = dlsym(NULL, s);
AssertFatal(d, "%s(): no default scheduler DL algo '%s' found\n", __func__, s);
// release default, add new
pp_impl_param_t *dl_pp = &RC.mac[j]->pre_processor_dl;
dl_pp->dl_algo.unset(&dl_pp->dl_algo.data);
dl_pp->dl_algo = *(default_sched_dl_algo_t *) d;
dl_pp->dl_algo.data = dl_pp->dl_algo.setup();
LOG_I(ENB_APP, "using default scheduler DL algo '%s'\n", dl_pp->dl_algo.name);
}// j=0..num_inst
} /*else {// MacRLC_ParamList.numelt > 0 // ignore it
......
......@@ -306,6 +306,7 @@ int flexran_agent_destroy_enb_config_reply(Protocol__FlexranMessage *msg) {
if (reply->cell_config[i]->mbsfn_subframe_config_sfalloc)
free(reply->cell_config[i]->mbsfn_subframe_config_sfalloc);
/* si_config is shared between MAC and RRC, free here */
if (reply->cell_config[i]->si_config) {
for(int j = 0; j < reply->cell_config[i]->si_config->n_si_message; j++) {
free(reply->cell_config[i]->si_config->si_message[j]);
......@@ -316,26 +317,7 @@ int flexran_agent_destroy_enb_config_reply(Protocol__FlexranMessage *msg) {
}
if (reply->cell_config[i]->slice_config) {
for (int j = 0; j < reply->cell_config[i]->slice_config->n_dl; ++j) {
if (reply->cell_config[i]->slice_config->dl[j]->n_sorting > 0)
free(reply->cell_config[i]->slice_config->dl[j]->sorting);
free(reply->cell_config[i]->slice_config->dl[j]->scheduler_name);
free(reply->cell_config[i]->slice_config->dl[j]);
}
free(reply->cell_config[i]->slice_config->dl);
for (int j = 0; j < reply->cell_config[i]->slice_config->n_ul; ++j) {
if (reply->cell_config[i]->slice_config->ul[j]->n_sorting > 0)
free(reply->cell_config[i]->slice_config->ul[j]->sorting);
free(reply->cell_config[i]->slice_config->ul[j]->scheduler_name);
free(reply->cell_config[i]->slice_config->ul[j]);
}
free(reply->cell_config[i]->slice_config->ul);
free(reply->cell_config[i]->slice_config);
flexran_agent_destroy_mac_slice_config(reply->cell_config[i]);
}
free(reply->cell_config[i]);
......@@ -893,31 +875,40 @@ int flexran_agent_handle_enb_config_reply(mid_t mod_id, const void *params, Prot
Protocol__FlexranMessage *input = (Protocol__FlexranMessage *)params;
Protocol__FlexEnbConfigReply *enb_config = input->enb_config_reply_msg;
if (enb_config->n_cell_config == 0) {
LOG_W(FLEXRAN_AGENT,
"received enb_config_reply message does not contain a cell_config\n");
*msg = NULL;
return 0;
}
if (enb_config->n_cell_config > 1)
LOG_W(FLEXRAN_AGENT, "ignoring slice configs for other cell except cell 0\n");
if (flexran_agent_get_mac_xface(mod_id) && enb_config->cell_config[0]->slice_config) {
prepare_update_slice_config(mod_id, enb_config->cell_config[0]->slice_config);
} else if (enb_config->cell_config[0]->has_eutra_band
&& enb_config->cell_config[0]->has_dl_freq
&& enb_config->cell_config[0]->has_ul_freq
&& enb_config->cell_config[0]->has_dl_bandwidth) {
initiate_soft_restart(mod_id, enb_config->cell_config[0]);
} else if (flexran_agent_get_rrc_xface(mod_id)
&& enb_config->cell_config[0]->has_x2_ho_net_control) {
LOG_I(FLEXRAN_AGENT,
"setting X2 HO NetControl to %d\n",
enb_config->cell_config[0]->x2_ho_net_control);
const int rc = flexran_set_x2_ho_net_control(mod_id, enb_config->cell_config[0]->x2_ho_net_control);
if (rc < 0)
LOG_E(FLEXRAN_AGENT, "Error in configuring X2 handover controlled by network");
if (enb_config->n_cell_config > 0) {
if (flexran_agent_get_mac_xface(mod_id) && enb_config->cell_config[0]->slice_config) {
prepare_update_slice_config(mod_id, &enb_config->cell_config[0]->slice_config);
}
if (enb_config->cell_config[0]->has_eutra_band
&& enb_config->cell_config[0]->has_dl_freq
&& enb_config->cell_config[0]->has_ul_freq
&& enb_config->cell_config[0]->has_dl_bandwidth) {
initiate_soft_restart(mod_id, enb_config->cell_config[0]);
}
if (flexran_agent_get_rrc_xface(mod_id)
&& enb_config->cell_config[0]->has_x2_ho_net_control) {
LOG_I(FLEXRAN_AGENT,
"setting X2 HO NetControl to %d\n",
enb_config->cell_config[0]->x2_ho_net_control);
const int rc = flexran_set_x2_ho_net_control(mod_id, enb_config->cell_config[0]->x2_ho_net_control);
if (rc < 0)
LOG_E(FLEXRAN_AGENT, "Error in configuring X2 handover controlled by network");
}
if (flexran_agent_get_rrc_xface(mod_id) && enb_config->cell_config[0]->n_plmn_id > 0) {
flexran_agent_handle_plmn_update(mod_id,
0,
enb_config->cell_config[0]->n_plmn_id,
enb_config->cell_config[0]->plmn_id);
}
}
if (flexran_agent_get_s1ap_xface(mod_id) && enb_config->s1ap) {
flexran_agent_handle_mme_update(mod_id,
enb_config->s1ap->n_mme,
enb_config->s1ap->mme);
}
*msg = NULL;
......@@ -930,7 +921,11 @@ int flexran_agent_handle_ue_config_reply(mid_t mod_id, const void *params, Proto
Protocol__FlexUeConfigReply *ue_config_reply = input->ue_config_reply_msg;
for (i = 0; flexran_agent_get_mac_xface(mod_id) && i < ue_config_reply->n_ue_config; i++)
prepare_ue_slice_assoc_update(mod_id, ue_config_reply->ue_config[i]);
prepare_ue_slice_assoc_update(mod_id, &ue_config_reply->ue_config[i]);
/* prepare_ue_slice_assoc_update takes ownership of the individual
* FlexUeConfig messages. Therefore, mark zero messages to not accidentally
* free them twice */
ue_config_reply->n_ue_config = 0;
*msg = NULL;
return 0;
......
This diff is collapsed.
This diff is collapsed.
......@@ -573,10 +573,7 @@ schedule_ue_spec(module_id_t module_idP,
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,
VCD_FUNCTION_IN);
start_meas(&eNB->schedule_dlsch_preprocessor);
dlsch_scheduler_pre_processor(module_idP,
CC_id,
frameP,
subframeP);
eNB->pre_processor_dl.dl(module_idP, CC_id, frameP, subframeP);
stop_meas(&eNB->schedule_dlsch_preprocessor);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,
VCD_FUNCTION_OUT);
......@@ -588,6 +585,7 @@ schedule_ue_spec(module_id_t module_idP,
UE_sched_ctrl_t *ue_sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
UE_TEMPLATE *ue_template = &UE_info->UE_template[CC_id][UE_id];
eNB_UE_STATS *eNB_UE_stats = &UE_info->eNB_UE_stats[CC_id][UE_id];
eNB_UE_stats->TBS = 0;
const rnti_t rnti = ue_template->rnti;
// If TDD
......@@ -679,15 +677,15 @@ schedule_ue_spec(module_id_t module_idP,
const uint32_t rbc = allocate_prbs_sub(
nb_rb, N_RB_DL, N_RBG, ue_sched_ctrl->rballoc_sub_UE[CC_id]);
if (nb_rb > ue_sched_ctrl->pre_nb_available_rbs[CC_id]) {
LOG_D(MAC,
"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n",
if (nb_rb > ue_sched_ctrl->pre_nb_available_rbs[CC_id])
LOG_W(MAC,
"[eNB %d] %d.%d CC_id %d : should not schedule UE %d, its "
"retransmission takes more resources than we have\n",
module_idP,
frameP,
subframeP,
CC_id,
UE_id);
continue;
}
/* CDRX */
ue_sched_ctrl->harq_rtt_timer[CC_id][harq_pid] = 1; // restart HARQ RTT timer
......@@ -799,6 +797,7 @@ schedule_ue_spec(module_id_t module_idP,
eNB_UE_stats->rbs_used_retx = nb_rb;
eNB_UE_stats->total_rbs_used_retx += nb_rb;
eNB_UE_stats->dlsch_mcs2 = eNB_UE_stats->dlsch_mcs1;
eNB_UE_stats->TBS = TBS;
} else {
// Now check RLC information to compute number of required RBs
// get maximum TBS size for RLC request
......@@ -842,13 +841,18 @@ schedule_ue_spec(module_id_t module_idP,
if (ue_sched_ctrl->dl_lc_bytes[i] == 0) // no data in this LC!
continue;
LOG_D(MAC, "[eNB %d] SFN/SF %d.%d, LC%d->DLSCH CC_id %d, Requesting %d bytes from RLC (RRC message)\n",
const uint32_t data =
min(ue_sched_ctrl->dl_lc_bytes[i],
TBS - ta_len - header_length_total - sdu_length_total - 3);
LOG_D(MAC,
"[eNB %d] SFN/SF %d.%d, LC%d->DLSCH CC_id %d, Requesting %d "
"bytes from RLC (RRC message)\n",
module_idP,
frameP,
subframeP,
lcid,
CC_id,
TBS - ta_len - header_length_total - sdu_length_total - 3);
data);
sdu_lengths[num_sdus] = mac_rlc_data_req(module_idP,
rnti,
......@@ -857,7 +861,7 @@ schedule_ue_spec(module_id_t module_idP,
ENB_FLAG_YES,
MBMS_FLAG_NO,
lcid,
TBS - ta_len - header_length_total - sdu_length_total - 3,
data,
(char *)&dlsch_buffer[sdu_length_total],
0,
0
......
......@@ -2131,33 +2131,36 @@ dump_ue_list(UE_list_t *listP) {
* Add a UE to UE_list listP
*/
inline void add_ue_list(UE_list_t *listP, int UE_id) {
if (listP->head == -1) {
listP->head = UE_id;
listP->next[UE_id] = -1;
} else {
int i = listP->head;
while (listP->next[i] >= 0)
i = listP->next[i];
listP->next[i] = UE_id;
listP->next[UE_id] = -1;
}
int *cur = &listP->head;
while (*cur >= 0)
cur = &listP->next[*cur];
*cur = UE_id;
}
//------------------------------------------------------------------------------
/*
* Remove a UE from the UE_list listP, return the previous element
* Remove a UE from the UE_list listP
*/
inline int remove_ue_list(UE_list_t *listP, int UE_id) {
listP->next[UE_id] = -1;
if (listP->head == UE_id) {
listP->head = listP->next[UE_id];
return -1;
}
int *cur = &listP->head;
while (*cur != -1 && *cur != UE_id)
cur = &listP->next[*cur];
if (*cur == -1)
return 0;
int *next = &listP->next[*cur];
*cur = listP->next[*cur];
*next = -1;
return 1;
}
int previous = prev(listP, UE_id);
if (previous != -1)
listP->next[previous] = listP->next[UE_id];
return previous;
//------------------------------------------------------------------------------
/*
* Initialize the UE_list listP
*/
inline void init_ue_list(UE_list_t *listP) {
listP->head = -1;
for (int i = 0; i < MAX_MOBILES_PER_ENB; ++i)
listP->next[i] = -1;
}
//------------------------------------------------------------------------------
......@@ -2196,6 +2199,12 @@ add_new_ue(module_id_t mod_idP,
UE_info->active[UE_id] = TRUE;
add_ue_list(&UE_info->list, UE_id);
dump_ue_list(&UE_info->list);
pp_impl_param_t* dl = &RC.mac[mod_idP]->pre_processor_dl;
if (dl->slices) // inform slice implementation about new UE
dl->add_UE(dl->slices, UE_id);
pp_impl_param_t* ul = &RC.mac[mod_idP]->pre_processor_ul;
if (ul->slices) // inform slice implementation about new UE
ul->add_UE(ul->slices, UE_id);
if (IS_SOFTMODEM_IQPLAYER)// not specific to record/playback ?
UE_info->UE_template[cc_idP][UE_id].pre_assigned_mcs_ul = 0;
UE_info->UE_template[cc_idP][UE_id].rach_resource_type = rach_resource_type;
......@@ -2259,6 +2268,12 @@ rrc_mac_remove_ue(module_id_t mod_idP,
UE_info->num_UEs--;
remove_ue_list(&UE_info->list, UE_id);
pp_impl_param_t* dl = &RC.mac[mod_idP]->pre_processor_dl;
if (dl->slices) // inform slice implementation about new UE
dl->remove_UE(dl->slices, UE_id);
pp_impl_param_t* ul = &RC.mac[mod_idP]->pre_processor_ul;
if (ul->slices) // inform slice implementation about new UE
ul->remove_UE(ul->slices, UE_id);
/* Clear all remaining pending transmissions */
memset(&UE_info->UE_template[pCC_id][UE_id], 0, sizeof(UE_TEMPLATE));
......
......@@ -1301,11 +1301,11 @@ schedule_ulsch_rnti(module_id_t module_idP,
/*
* ULSCH preprocessor: set UE_template->
* pre_allocated_nb_rb_ul[slice_idx]
* pre_allocated_nb_rb_ul
* pre_assigned_mcs_ul
* pre_allocated_rb_table_index_ul
*/
ulsch_scheduler_pre_processor(module_idP, CC_id, frameP, subframeP, sched_frame, sched_subframeP);
mac->pre_processor_ul.ul(module_idP, CC_id, frameP, subframeP, sched_frame, sched_subframeP);
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
if (UE_info->UE_template[CC_id][UE_id].rach_resource_type > 0)
......
......@@ -155,9 +155,6 @@
/*!\brief minimum MAC data needed for transmitting 1 min RLC PDU size + 1 byte MAC subHeader */
#define MIN_MAC_HDR_RLC_SIZE (1 + MIN_RLC_PDU_SIZE)
/*!\brief maximum number of slices / groups */
#define MAX_NUM_SLICES 10
#define U_PLANE_INACTIVITY_VALUE 0 /* defined 10ms order (zero means infinity) */
......@@ -543,7 +540,6 @@ typedef enum {
SCHED_MODE_FAIR_RR /// fair raund robin
} SCHEDULER_MODES;
/*! \brief temporary struct for ULSCH sched */
typedef struct {
rnti_t rnti;
......@@ -1150,9 +1146,6 @@ typedef struct {
UE_list_t list;
int num_UEs;
boolean_t active[MAX_MOBILES_PER_ENB];
/// Sorting criteria for the UE list in the MAC preprocessor
uint16_t sorting_criteria[MAX_NUM_SLICES][CR_NUM];
} UE_info_t;
/*! \brief deleting control information*/
......@@ -1172,107 +1165,88 @@ typedef struct {
} UE_free_list_t;
/**
* slice specific scheduler for the DL
* describes contiguous RBs
*/
typedef void (*slice_scheduler_dl)(module_id_t mod_id,
int slice_idx,
frame_t frame,
sub_frame_t subframe,
int *mbsfn_flag);
typedef struct {
slice_id_t id;
/// RB share for each slice
float pct;
/// whether this slice is isolated from the others
int isol;
int prio;
/// Frequency ranges for slice positioning
int pos_low;
int pos_high;
// max mcs for each slice
int maxmcs;
/// criteria for sorting policies of the slices
uint32_t sorting;
/// Accounting policy (just greedy(1) or fair(0) setting for now)
int accounting;
/// name of available scheduler
char *sched_name;
/// pointer to the slice specific scheduler in DL
slice_scheduler_dl sched_cb;
} slice_sched_conf_dl_t;
typedef void (*slice_scheduler_ul)(module_id_t mod_id,
int slice_idx,
frame_t frame,
sub_frame_t subframe,
unsigned char sched_subframe,
uint16_t *first_rb);
typedef struct {
slice_id_t id;
/// RB share for each slice
float pct;
// MAX MCS for each slice
int maxmcs;
/// criteria for sorting policies of the slices
uint32_t sorting;
/// starting RB (RB offset) of UL scheduling
int first_rb;
/// name of available scheduler
char *sched_name;
/// pointer to the slice specific scheduler in UL
slice_scheduler_ul sched_cb;
} slice_sched_conf_ul_t;
int start;
int length;
} contig_rbs_t;
/**
* definition of a scheduling algorithm implementation used in the
* default DL scheduler
*/
typedef struct {
/// counter used to indicate when all slices have pre-allocated UEs
//int slice_counter;
/// indicates whether remaining RBs after first intra-slice allocation will
/// be allocated to UEs of the same slice
int intraslice_share_active;
/// indicates whether remaining RBs after slice allocation will be
/// allocated to UEs of another slice. Isolated slices will be ignored
int interslice_share_active;
/// number of active DL slices
int n_dl;
slice_sched_conf_dl_t dl[MAX_NUM_SLICES];
/// number of active UL slices
int n_ul;
slice_sched_conf_ul_t ul[MAX_NUM_SLICES];
/// common rb allocation list between slices
uint8_t rballoc_sub[NFAPI_CC_MAX][N_RBG_MAX];
} slice_info_t;
char *name;
void *(*setup)(void);
void (*unset)(void **);
int (*run)(
module_id_t, int, int, int, UE_list_t *, int, int, uint8_t *, void *);
void *data;
} default_sched_dl_algo_t;
/**
* describes contiguous RBs
* definition of a scheduling algorithm implementation used in the
* default UL scheduler
*/
typedef struct {
int start;
int length;
} contig_rbs_t;
char *name;
void *(*setup)(void);
void (*unset)(void **);
int (*run)(
module_id_t, int, int, int, int, int, UE_list_t *, int, int, contig_rbs_t *, void *);
void *data;
} default_sched_ul_algo_t;
typedef void (*pp_impl_dl)(module_id_t mod_id,
int CC_id,
frame_t frame,
sub_frame_t subframe);
typedef void (*pp_impl_ul)(module_id_t mod_id,
int CC_id,
frame_t frame,
sub_frame_t subframe,
frame_t sched_frame,
sub_frame_t sched_subframe);
struct slice_info_s;
typedef struct {
int algorithm;
/// inform the slice algorithm about a new UE
void (*add_UE)(struct slice_info_s *s, int UE_id);
/// inform the slice algorithm about a UE that disconnected
void (*remove_UE)(struct slice_info_s *s, int UE_id);
/// move a UE to a slice in DL/UL, -1 means don't move (no-op).
void (*move_UE)(struct slice_info_s *s, int UE_id, int idx);
/// Adds a new slice through admission control. slice_params are
/// algorithm-specific parameters. sched is either a default_sched_ul_algo_t
/// or default_sched_dl_algo_t, depending on whether this implementation
/// handles UL/DL. If slice at index exists, updates existing
/// slice. Returns index of new slice or -1 on failure.
int (*addmod_slice)(struct slice_info_s *s,
int id,
char *label,
void *sched,
void *slice_params);
/// Returns slice through slice_idx. 1 if successful, 0 if not.
int (*remove_slice)(struct slice_info_s *s, uint8_t slice_idx);
union {
pp_impl_dl dl;
pp_impl_ul ul;
};
union {
default_sched_ul_algo_t ul_algo;
default_sched_dl_algo_t dl_algo;
};
void (*destroy)(struct slice_info_s **s);
struct slice_info_s *slices;
} pp_impl_param_t;
/*! \brief eNB common channels */
typedef struct {
......@@ -1395,9 +1369,6 @@ typedef struct eNB_MAC_INST_s {
uint32_t ul_handle;
UE_info_t UE_info;
/// slice-related configuration
slice_info_t slice_info;
///subband bitmap configuration
SBMAP_CONF sbmap_conf;
/// CCE table used to build DCI scheduling information
......@@ -1432,6 +1403,11 @@ typedef struct eNB_MAC_INST_s {
UE_free_list_t UE_free_list;
/// for scheduling selection
SCHEDULER_MODES scheduler_mode;
/// Default scheduler: Pre-processor implementation. Algorithms for UL/DL
/// are called by ULSCH/DLSCH, respectively. Pro-processor implementation can
/// encapsulate slicing.
pp_impl_param_t pre_processor_dl;
pp_impl_param_t pre_processor_ul;
int32_t puSch10xSnr;
int32_t puCch10xSnr;
......
......@@ -187,8 +187,6 @@ void add_msg3(module_id_t module_idP, int CC_id, RA_t *ra, frame_t frameP,
void init_UE_info(UE_info_t *UE_info);
void init_slice_info(slice_info_t *sli);
int mac_top_init(int eMBMS_active, char *uecap_xer,
uint8_t cba_group_active, uint8_t HO_active);
......@@ -661,6 +659,7 @@ int prev(UE_list_t *listP, int nodeP);
void add_ue_list(UE_list_t *listP, int UE_id);
int remove_ue_list(UE_list_t *listP, int UE_id);
void dump_ue_list(UE_list_t *listP);
void init_ue_list(UE_list_t *listP);
int UE_num_active_CC(UE_info_t *listP, int ue_idP);
int UE_PCCID(module_id_t mod_idP, int ue_idP);
rnti_t UE_RNTI(module_id_t mod_idP, int ue_idP);
......@@ -675,10 +674,10 @@ void set_ul_DAI(int module_idP,
void ulsch_scheduler_pre_processor(module_id_t module_idP,
int CC_id,
int frameP,
frame_t frameP,
sub_frame_t subframeP,
int sched_frameP,
unsigned char sched_subframeP);
frame_t sched_frameP,
sub_frame_t sched_subframeP);
int phy_stats_exist(module_id_t Mod_id, int rnti);
......
......@@ -45,11 +45,8 @@ extern RAN_CONTEXT_t RC;
void init_UE_info(UE_info_t *UE_info)
{
int list_el;
UE_info->num_UEs = 0;
UE_info->list.head = -1;
for (list_el = 0; list_el < MAX_MOBILES_PER_ENB; list_el++)
UE_info->list.next[list_el] = -1;
init_ue_list(&UE_info->list);
memset(UE_info->DLSCH_pdu, 0, sizeof(UE_info->DLSCH_pdu));
memset(UE_info->UE_template, 0, sizeof(UE_info->UE_template));
memset(UE_info->eNB_UE_stats, 0, sizeof(UE_info->eNB_UE_stats));
......@@ -57,32 +54,6 @@ void init_UE_info(UE_info_t *UE_info)
memset(UE_info->active, 0, sizeof(UE_info->active));
}
void init_slice_info(slice_info_t *sli)
{
sli->intraslice_share_active = 1;
sli->interslice_share_active = 1;
sli->n_dl = 1;
memset(sli->dl, 0, sizeof(slice_sched_conf_dl_t) * MAX_NUM_SLICES);
sli->dl[0].pct = 1.0;
sli->dl[0].prio = 10;
sli->dl[0].pos_high = N_RBG_MAX;
sli->dl[0].maxmcs = 28;
sli->dl[0].sorting = 0x012345;
sli->dl[0].sched_name = "schedule_ue_spec";
sli->dl[0].sched_cb = dlsym(NULL, sli->dl[0].sched_name);
AssertFatal(sli->dl[0].sched_cb, "DLSCH scheduler callback is NULL\n");
sli->n_ul = 1;
memset(sli->ul, 0, sizeof(slice_sched_conf_ul_t) * MAX_NUM_SLICES);
sli->ul[0].pct = 1.0;
sli->ul[0].maxmcs = 20;
sli->ul[0].sorting = 0x0123;
sli->ul[0].sched_name = "schedule_ulsch_rnti";
sli->ul[0].sched_cb = dlsym(NULL, sli->ul[0].sched_name);
AssertFatal(sli->ul[0].sched_cb, "ULSCH scheduler callback is NULL\n");
}
void mac_top_init_eNB(void)
{
module_id_t i, j;
......@@ -129,8 +100,23 @@ void mac_top_init_eNB(void)
mac[i]->if_inst = IF_Module_init(i);
mac[i]->pre_processor_dl.algorithm = 0;
mac[i]->pre_processor_dl.dl = dlsch_scheduler_pre_processor;
char *s = "round_robin_dl";
void *d = dlsym(NULL, s);
AssertFatal(d, "%s(): no scheduler algo '%s' found\n", __func__, s);
mac[i]->pre_processor_dl.dl_algo = *(default_sched_dl_algo_t *) d;
mac[i]->pre_processor_dl.dl_algo.data = mac[i]->pre_processor_dl.dl_algo.setup();
mac[i]->pre_processor_ul.algorithm = 0;
mac[i]->pre_processor_ul.ul = ulsch_scheduler_pre_processor;
s = "round_robin_ul";
d = dlsym(NULL, s);
AssertFatal(d, "%s(): no scheduler algo '%s' found\n", __func__, s);
mac[i]->pre_processor_ul.ul_algo = *(default_sched_ul_algo_t *) d;
mac[i]->pre_processor_ul.ul_algo.data = mac[i]->pre_processor_ul.ul_algo.setup();
init_UE_info(&mac[i]->UE_info);
init_slice_info(&mac[i]->slice_info);
}
RC.mac = mac;
......
This diff is collapsed.
This diff is collapsed.
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*!
* \file slicing.h
* \brief General slice definition and helper parameters
* \author Robert Schmidt
* \date 2020
* \email robert.schmidt@eurecom.fr
*/
#ifndef __SLICING_H__
#define __SLICING_H__
#include "openair2/LAYER2/MAC/mac.h"
typedef struct slice_s {
/// Arbitrary ID
slice_id_t id;
/// Arbitrary label
char *label;
union {
default_sched_dl_algo_t dl_algo;
default_sched_ul_algo_t ul_algo;
};
/// A specific algorithm's implementation parameters
void *algo_data;
/// Internal data that might be kept alongside a slice's params
void *int_data;
// list of users in this slice
UE_list_t UEs;
} slice_t;
typedef struct slice_info_s {
uint8_t num;
slice_t **s;
uint8_t UE_assoc_slice[MAX_MOBILES_PER_ENB];
} slice_info_t;
int slicing_get_UE_slice_idx(slice_info_t *si, int UE_id);
#define STATIC_SLICING 10
/* only four static slices for UL, DL resp. (not enough DCIs) */
#define MAX_STATIC_SLICES 4
typedef struct {
uint16_t posLow;
uint16_t posHigh;
} static_slice_param_t;
pp_impl_param_t static_dl_init(module_id_t mod_id, int CC_id);
pp_impl_param_t static_ul_init(module_id_t mod_id, int CC_id);
#endif /* __SLICING_H__ */
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*!
* \file slicing_internal.h
* \brief Internal slice helper functions
* \author Robert Schmidt
* \date 2020
* \email robert.schmidt@eurecom.fr
*/
#ifndef __SLICING_INTERNAL_H__
#define __SLICING_INTERNAL_H__
#include "slicing.h"
void slicing_add_UE(slice_info_t *si, int UE_id);
void _remove_UE(slice_t **s, uint8_t *assoc, int UE_id);
void slicing_remove_UE(slice_info_t *si, int UE_id);
void _move_UE(slice_t **s, uint8_t *assoc, int UE_id, int to);
void slicing_move_UE(slice_info_t *si, int UE_id, int idx);
slice_t *_add_slice(uint8_t *n, slice_t **s);
slice_t *_remove_slice(uint8_t *n, slice_t **s, uint8_t *assoc, int idx);
#endif /* __SLICING_INTERNAL_H__ */
......@@ -251,9 +251,8 @@ typedef struct {
uint8_t short_messages; //8 bits
uint8_t tb_scaling; //2 bits
uint8_t pucch_resource_indicator; //3 bits
uint8_t dmrs_sequence_initialization; //1 bit
uint8_t system_info_indicator; //1 bit
uint8_t ulsch_indicator;
uint8_t slot_format_indicator_count;
uint8_t *slot_format_indicators;
......@@ -290,6 +289,7 @@ typedef struct {
dci_field_t cloded_loop_indicator; //variable
dci_field_t ul_sul_indicator; //variable
dci_field_t antenna_ports; //variable
dci_field_t dmrs_sequence_initialization;
dci_field_t reserved; //1_0/C-RNTI:10 bits, 1_0/P-RNTI: 6 bits, 1_0/SI-&RA-RNTI: 16 bits
} dci_pdu_rel15_t;
......
......@@ -77,7 +77,8 @@ int is_nr_DL_slot(NR_ServingCellConfigCommon_t *scc,slot_t slotP);
int is_nr_UL_slot(NR_ServingCellConfigCommon_t *scc,slot_t slotP);
uint16_t nr_dci_size(NR_CellGroupConfig_t *secondaryCellGroup,
uint16_t nr_dci_size(NR_ServingCellConfigCommon_t *scc,
NR_CellGroupConfig_t *secondaryCellGroup,
dci_pdu_rel15_t *dci_pdu,
nr_dci_format_t format,
nr_rnti_type_t rnti_type,
......@@ -103,6 +104,8 @@ uint8_t compute_nr_root_seq(NR_RACH_ConfigCommon_t *rach_config,
uint8_t nb_preambles,
uint8_t unpaired);
int ul_ant_bits(NR_DMRS_UplinkConfig_t *NR_DMRS_UplinkConfig,long transformPrecoder);
int get_format0(uint8_t index, uint8_t unpaired);
uint16_t get_NCS(uint8_t index, uint16_t format, uint8_t restricted_set_config);
......
......@@ -691,7 +691,7 @@ void nr_generate_Msg2(module_id_t module_idP,
pdcch_pdu_rel15->StartSymbolIndex,
pdcch_pdu_rel15->DurationSymbols);
fill_dci_pdu_rel15(secondaryCellGroup,pdcch_pdu_rel15, &dci_pdu_rel15[0], dci_formats, rnti_types,dci10_bw,ra->bwp_id);
fill_dci_pdu_rel15(scc,secondaryCellGroup,pdcch_pdu_rel15, &dci_pdu_rel15[0], dci_formats, rnti_types,dci10_bw,ra->bwp_id);
dl_req->nPDUs+=2;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment