Commit e2223d71 authored by Robert Schmidt's avatar Robert Schmidt Committed by Chieh-Chun Chen

Add 5G preprocessor with NVS slicing

parent 3f3a9869
......@@ -1366,6 +1366,7 @@ set (MAC_NR_SRC
${NR_GNB_MAC_DIR}/mac_rrc_dl_handler.c
${NR_GNB_MAC_DIR}/mac_rrc_ul_direct.c
${NR_GNB_MAC_DIR}/mac_rrc_ul_f1ap.c
${NR_GNB_MAC_DIR}/slicing/nr_slicing.c
)
......
......@@ -274,7 +274,7 @@ nr_preprocessor_phytest()], multiple users in FR1
2) Checks the quantity of waiting data in RLC
3) Either set up resource allocation directly (e.g., for a single UE,
phytest), or call into a function to perform actual resource allocation.
Currently, this is done using pf_dl() which implements a basic
Currently, this is done using nr_pf_dl() which implements a basic
proportional fair scheduler:
* for every UE, check for retransmission and allocate as necessary
* Calculate the PF coefficient and put eligible UEs into a list
......
......@@ -723,7 +723,7 @@ int main(int argc, char **argv)
nr_mac_add_test_ue(RC.nrmac[0], secondaryCellGroup->spCellConfig->reconfigurationWithSync->newUE_Identity, secondaryCellGroup);
// reset preprocessor to the one of DLSIM after it has been set during
// nr_mac_config_scc()
gNB_mac->pre_processor_dl = nr_dlsim_preprocessor;
gNB_mac->pre_processor_dl.dl = nr_dlsim_preprocessor;
phy_init_nr_gNB(gNB);
N_RB_DL = gNB->frame_parms.N_RB_DL;
NR_UE_info_t *UE_info = RC.nrmac[0]->UE_info.list[0];
......
......@@ -818,7 +818,7 @@ void nr_mac_config_scc(gNB_MAC_INST *nrmac, NR_ServingCellConfigCommon_t *scc, c
}
if (get_softmodem_params()->phy_test) {
nrmac->pre_processor_dl = nr_preprocessor_phytest;
nrmac->pre_processor_dl.dl = nr_preprocessor_phytest;
nrmac->pre_processor_ul = nr_ul_preprocessor_phytest;
} else {
nrmac->pre_processor_dl = nr_init_fr1_dlsch_preprocessor(0);
......
......@@ -317,7 +317,7 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP,
return offset;
}
static void nr_store_dlsch_buffer(module_id_t module_id, frame_t frame, sub_frame_t slot)
void nr_store_dlsch_buffer(module_id_t module_id, frame_t frame, sub_frame_t slot)
{
UE_iterator(RC.nrmac[module_id]->UE_info.list, UE) {
NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
......@@ -589,13 +589,30 @@ static int comparator(const void *p, const void *q) {
return ((UEsched_t*)p)->coef < ((UEsched_t*)q)->coef;
}
static void pf_dl(module_id_t module_id,
static void *nr_pf_dl_setup(void)
{
void *data = malloc(MAX_MOBILES_PER_GNB * sizeof(float));
AssertFatal(data, "%s(): could not allocate data\n", __func__);
for (int i = 0; i < MAX_MOBILES_PER_ENB; i++)
*(float *) data = 0.0f;
return data;
}
static void nr_pf_dl_unset(void **data)
{
DevAssert(data);
if (*data)
free(*data);
*data = NULL;
}
static void nr_pf_dl(module_id_t module_id,
frame_t frame,
sub_frame_t slot,
NR_UE_info_t **UE_list,
int max_num_ue,
int n_rb_sched,
uint16_t *rballoc_mask)
uint16_t *rballoc_mask,
void *data)
{
gNB_MAC_INST *mac = RC.nrmac[module_id];
NR_ServingCellConfigCommon_t *scc=mac->common_channels[0].ServingCellConfigCommon;
......@@ -827,6 +844,13 @@ static void pf_dl(module_id_t module_id,
iterator++;
}
}
nr_dl_sched_algo_t nr_proportional_fair_wbcqi_dl = {
.name = "nr_proportional_fair_wbcqi_dl",
.setup = nr_pf_dl_setup,
.unset = nr_pf_dl_unset,
.run = nr_pf_dl,
.data = NULL
};
static void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t slot)
{
......@@ -878,16 +902,17 @@ static void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_
int max_sched_ues = bw / (average_agg_level * NR_NB_REG_PER_CCE);
/* proportional fair scheduling algorithm */
pf_dl(module_id,
RC.nrmac[module_id]->pre_processor_dl.dl_algo.run(module_id,
frame,
slot,
UE_info->list,
max_sched_ues,
n_rb_sched,
rballoc_mask);
rballoc_mask,
RC.nrmac[module_id]->pre_processor_dl.dl_algo.data);
}
nr_pp_impl_dl nr_init_fr1_dlsch_preprocessor(int CC_id) {
nr_pp_impl_param_dl_t nr_init_fr1_dlsch_preprocessor(int CC_id) {
/* during initialization: no mutex needed */
/* in the PF algorithm, we have to use the TBsize to compute the coefficient.
* This would include the number of DMRS symbols, which in turn depends on
......@@ -912,7 +937,12 @@ nr_pp_impl_dl nr_init_fr1_dlsch_preprocessor(int CC_id) {
}
}
return nr_fr1_dlsch_preprocessor;
nr_pp_impl_param_dl_t impl;
memset(&impl, 0, sizeof(impl));
impl.dl = nr_fr1_dlsch_preprocessor;
impl.dl_algo = nr_proportional_fair_wbcqi_dl;
impl.dl_algo.data = impl.dl_algo.setup();
return impl;
}
void nr_schedule_ue_spec(module_id_t module_id,
......@@ -930,7 +960,10 @@ void nr_schedule_ue_spec(module_id_t module_id,
return;
/* PREPROCESSOR */
gNB_mac->pre_processor_dl(module_id, frame, slot);
pthread_mutex_lock(&gNB_mac->UE_info.mutex);
gNB_mac->pre_processor_dl.dl(module_id, frame, slot);
pthread_mutex_unlock(&gNB_mac->UE_info.mutex);
const int CC_id = 0;
NR_ServingCellConfigCommon_t *scc = gNB_mac->common_channels[CC_id].ServingCellConfigCommon;
NR_UEs_t *UE_info = &gNB_mac->UE_info;
......
......@@ -76,7 +76,7 @@ void nr_schedule_ue_spec(module_id_t module_id,
nfapi_nr_tx_data_request_t *TX_req);
/* \brief default FR1 DL preprocessor init routine, returns preprocessor to call */
nr_pp_impl_dl nr_init_fr1_dlsch_preprocessor(int CC_id);
nr_pp_impl_param_dl_t nr_init_fr1_dlsch_preprocessor(int CC_id);
void schedule_nr_sib1(module_id_t module_idP,
frame_t frameP,
......
......@@ -259,7 +259,7 @@ void mac_top_init_gNB(ngran_node_t node_type,
uid_linear_allocator_init(&RC.nrmac[i]->UE_info.uid_allocator);
if (get_softmodem_params()->phy_test) {
RC.nrmac[i]->pre_processor_dl = nr_preprocessor_phytest;
RC.nrmac[i]->pre_processor_dl.dl = nr_preprocessor_phytest;
RC.nrmac[i]->pre_processor_ul = nr_ul_preprocessor_phytest;
} else {
RC.nrmac[i]->pre_processor_dl = nr_init_fr1_dlsch_preprocessor(0);
......
......@@ -716,6 +716,8 @@ typedef struct {
float ul_thr_ue;
float dl_thr_ue;
long pdsch_HARQ_ACK_Codebook;
/// Assoc slice
slice_id_t dl_id;
} NR_UE_info_t;
typedef struct {
......@@ -730,6 +732,19 @@ typedef struct {
#define UE_iterator(BaSe, VaR) NR_UE_info_t ** VaR##pptr=BaSe, *VaR; while ((VaR=*(VaR##pptr++)))
/**
* definition of a scheduling algorithm implementation used in the
* default DL scheduler
*/
typedef struct {
char *name;
void *(*setup)(void);
void (*unset)(void **);
void (*run)(
module_id_t, frame_t, sub_frame_t, NR_UE_info_t **, int, int, uint16_t *, void *);
void *data;
} nr_dl_sched_algo_t;
typedef void (*nr_pp_impl_dl)(module_id_t mod_id,
frame_t frame,
sub_frame_t slot);
......@@ -737,6 +752,43 @@ typedef bool (*nr_pp_impl_ul)(module_id_t mod_id,
frame_t frame,
sub_frame_t slot);
struct nr_slice_info_s;
struct nr_slice_s;
typedef struct {
int algorithm;
/// inform the slice algorithm about a new UE
void (*add_UE)(struct nr_slice_info_s *s, NR_UE_info_t **UE_list);
/// inform the slice algorithm about a UE that disconnected
void (*remove_UE)(struct nr_slice_info_s *s, NR_UE_info_t* rm_ue, int idx);
/// move a UE to a slice in DL/UL, -1 means don't move (no-op).
void (*move_UE)(struct nr_slice_info_s *s, NR_UE_info_t* assoc_ue, int old_idx, int new_idx);
/// get UE associated slice's index
int (*get_UE_slice_idx)(struct nr_slice_info_s *s, uint16_t rnti);
/// get UE's index from the slice
int (*get_UE_idx)(struct nr_slice_s *si, uint16_t rnti);
/// Adds a new slice through admission control. slice_params are
/// algorithm-specific parameters. sched is either a default_sched_ul_algo_t
/// or default_sched_dl_algo_t, depending on whether this implementation
/// handles UL/DL. If slice at index exists, updates existing
/// slice. Returns index of new slice or -1 on failure.
int (*addmod_slice)(struct nr_slice_info_s *s,
int id,
char *label,
void *sched,
void *slice_params);
/// Returns slice through slice_idx. 1 if successful, 0 if not.
int (*remove_slice)(struct nr_slice_info_s *s, uint8_t slice_idx);
nr_pp_impl_dl dl;
nr_dl_sched_algo_t dl_algo;
void (*destroy)(struct nr_slice_info_s **s);
struct nr_slice_info_s *slices;
} nr_pp_impl_param_dl_t;
typedef struct f1_config_t {
f1ap_setup_req_t *setup_req;
f1ap_setup_resp_t *setup_resp;
......@@ -835,7 +887,7 @@ typedef struct gNB_MAC_INST_s {
uint32_t ulsch_max_frame_inactivity;
/// DL preprocessor for differentiated scheduling
nr_pp_impl_dl pre_processor_dl;
nr_pp_impl_param_dl_t pre_processor_dl;
/// UL preprocessor for differentiated scheduling
nr_pp_impl_ul pre_processor_ul;
......
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*!
* \file nr_slicing.c
* \brief Generic NR Slicing helper functions and Static Slicing Implementation
* \author Robert Schmidt
* \date 2021
* \email robert.schmidt@eurecom.fr
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <dlfcn.h>
#include "assertions.h"
#include "common/utils/LOG/log.h"
#include "NR_MAC_COMMON/nr_mac_extern.h"
#include "NR_MAC_COMMON/nr_mac.h"
#include "openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h"
#include "openair2/LAYER2/NR_MAC_gNB/mac_proto.h"
#include "openair2/LAYER2/RLC/rlc.h"
#include "nr_slicing.h"
#include "nr_slicing_internal.h"
#include "executables/softmodem-common.h"
extern RAN_CONTEXT_t RC;
#define RET_FAIL(ret, x...) do { LOG_E(MAC, x); return ret; } while (0)
int nr_slicing_get_UE_slice_idx(nr_slice_info_t *si, uint16_t rnti) {
for (int s_len = 0; s_len < si->num; s_len++) {
// Using UE_iterator will stop at NULL => not work for 2 UEs
for (int i = 0; i < MAX_MOBILES_PER_GNB; i++) {
if (si->s[s_len]->UE_list[i] != NULL) {
if (si->s[s_len]->UE_list[i]->rnti == rnti) {
return si->s[s_len]->id;
}
}
}
}
LOG_E(NR_MAC, "cannot find slice idx for UE rnti 0x%04x\n", rnti);
return -99;
}
int nr_slicing_get_UE_idx(nr_slice_t *si, uint16_t rnti) {
for (int i = 0; i < MAX_MOBILES_PER_GNB; i++) {
// Check for NULL because w.r.t 2 UEs, the former UE may already be moved!
if (si->UE_list[i] != NULL) {
if (si->UE_list[i]->rnti == rnti)
return i;
}
}
LOG_E(NR_MAC, "cannot find ue idx for UE rnti 0x%04x\n", rnti);
return -99;
}
void nr_slicing_add_UE(nr_slice_info_t *si, NR_UE_info_t **UE_list) {
// Add all the connected UEs to the first slice 0
UE_iterator(UE_list, UE) {
if (UE) {
UE->dl_id = 0;
for (int i = 0; i < MAX_MOBILES_PER_GNB; i++) {
if (si->s[0]->UE_list[i] == NULL) {
si->s[0]->UE_list[i] = UE;
LOG_D(NR_MAC, "%s(), add UE_list[%d], rnti 0x%04x to slice idx 0\n", __func__, i, si->s[0]->UE_list[i]->rnti);
break;
}
}
}
}
}
void nr_slicing_remove_UE(nr_slice_info_t *si, NR_UE_info_t* rm_ue, int idx) {
for (int i = 0; i < MAX_MOBILES_PER_GNB; i++) {
// Check for NULL because w.r.t 2 UEs, the former UE may already be moved!
if(si->s[idx]->UE_list[i] != NULL) {
if (si->s[idx]->UE_list[i]->rnti == rm_ue->rnti) {
si->s[idx]->UE_list[i] = NULL;
rm_ue->dl_id = -1;
break;
}
}
}
}
void nr_slicing_move_UE(nr_slice_info_t *si, NR_UE_info_t* assoc_ue, int old_idx, int new_idx) {
DevAssert(new_idx >= -1 && new_idx < si->num);
DevAssert(old_idx >= -1 && old_idx < si->num);
// remove UE from old slice
nr_slicing_remove_UE(si, assoc_ue, old_idx);
// add UE to new slice
for (int i = 0; i < MAX_MOBILES_PER_GNB; i++) {
if (si->s[new_idx]->UE_list[i] == NULL) {
assoc_ue->dl_id = si->s[new_idx]->id;
si->s[new_idx]->UE_list[i] = assoc_ue;
break;
}
}
}
int _nr_exists_slice(uint8_t n, nr_slice_t **s, int id) {
for (int i = 0; i < n; ++i)
if (s[i]->id == id)
return i;
return -1;
}
nr_slice_t *_nr_add_slice(uint8_t *n, nr_slice_t **s) {
s[*n] = calloc(1, sizeof(nr_slice_t));
if (!s[*n])
return NULL;
create_nr_list(&s[*n]->UEs, MAX_MOBILES_PER_GNB);
*n += 1;
return s[*n - 1];
}
nr_slice_t *_nr_remove_slice(uint8_t *n, nr_slice_t **s, int idx) {
if (idx >= *n)
return NULL;
nr_slice_t *sr = s[idx];
for (int i = idx + 1; i < *n; ++i)
s[i - 1] = s[i];
*n -= 1;
s[*n] = NULL;
if (sr->label)
free(sr->label);
return sr;
}
/************************* NVS Slicing Implementation **************************/
typedef struct {
float exp; // exponential weight. mov. avg for weight calc
int rb; // number of RBs this slice has been scheduled in last round
float eff; // effective rate for rate slices
float beta_eff; // averaging coeff so we average over roughly one second
int active; // activity state for rate slices
} _nvs_int_t;
int _nvs_nr_admission_control(const nr_slice_info_t *si,
const nvs_nr_slice_param_t *p,
int idx)
{
if (p->type != NVS_RATE && p->type != NVS_RES)
RET_FAIL(-1, "%s(): invalid slice type %d\n", __func__, p->type);
if (p->type == NVS_RATE && p->Mbps_reserved > p->Mbps_reference)
RET_FAIL(-1,
"%s(): a rate slice cannot reserve more than the reference rate\n",
__func__);
if (p->type == NVS_RES && p->pct_reserved > 1.0f)
RET_FAIL(-1, "%s(): cannot reserve more than 1.0\n", __func__);
float sum_req = 0.0f;
for (int i = 0; i < si->num; ++i) {
const nvs_nr_slice_param_t *sp = i == idx ? p : si->s[i]->algo_data;
if (sp->type == NVS_RATE)
sum_req += sp->Mbps_reserved / sp->Mbps_reference;
else
sum_req += sp->pct_reserved;
}
if (idx < 0) { /* not an existing slice */
if (p->type == NVS_RATE)
sum_req += p->Mbps_reserved / p->Mbps_reference;
else
sum_req += p->pct_reserved;
}
if (sum_req > 1.0)
RET_FAIL(-3,
"%s(): admission control failed: sum of resources is %f > 1.0\n",
__func__, sum_req);
return 0;
}
int addmod_nvs_nr_slice_dl(nr_slice_info_t *si,
int id,
char *label,
void *algo,
void *slice_params_dl)
{
nvs_nr_slice_param_t *dl = slice_params_dl;
int index = _nr_exists_slice(si->num, si->s, id);
if (index < 0 && si->num >= MAX_NVS_SLICES)
RET_FAIL(-2, "%s(): cannot handle more than %d slices\n", __func__, MAX_NVS_SLICES);
if (index < 0 && !dl)
RET_FAIL(-100, "%s(): no parameters for new slice %d, aborting\n", __func__, id);
if (dl) {
int rc = _nvs_nr_admission_control(si, dl, index);
if (rc < 0)
return rc;
}
nr_slice_t *s = NULL;
if (index >= 0) {
s = si->s[index];
if (label) {
if (s->label) free(s->label);
s->label = label;
}
if (algo && s->dl_algo.run != ((nr_dl_sched_algo_t*)algo)->run) {
s->dl_algo.unset(&s->dl_algo.data);
s->dl_algo = *(nr_dl_sched_algo_t*) algo;
if (!s->dl_algo.data)
s->dl_algo.data = s->dl_algo.setup();
}
if (dl) {
free(s->algo_data);
s->algo_data = dl;
} else { /* we have no parameters: we are done */
return index;
}
} else {
if (!algo)
RET_FAIL(-14, "%s(): no scheduler algorithm provided\n", __func__);
s = _nr_add_slice(&si->num, si->s);
if (!s)
RET_FAIL(-4, "%s(): cannot allocate memory for slice\n", __func__);
s->int_data = malloc(sizeof(_nvs_int_t));
if (!s->int_data)
RET_FAIL(-5, "%s(): cannot allocate memory for slice internal data\n", __func__);
s->id = id;
s->label = label;
s->dl_algo = *(nr_dl_sched_algo_t*) algo;
if (!s->dl_algo.data)
s->dl_algo.data = s->dl_algo.setup();
s->algo_data = dl;
}
_nvs_int_t *nvs_p = s->int_data;
/* reset all slice-internal parameters */
nvs_p->rb = 0;
nvs_p->active = 0;
if (dl->type == NVS_RATE) {
nvs_p->exp = dl->Mbps_reserved / dl->Mbps_reference;
nvs_p->eff = dl->Mbps_reference;
} else {
nvs_p->exp = dl->pct_reserved;
nvs_p->eff = 0; // not used
}
// scale beta so we (roughly) average the eff rate over 1s
nvs_p->beta_eff = BETA / nvs_p->exp;
return index < 0 ? si->num - 1 : index;
}
//int addmod_nvs_slice_ul(nr_slice_info_t *si,
// int id,
// char *label,
// void *slice_params_ul) {
// nvs_nr_slice_param_t *sp = slice_params_ul;
// int index = _nr_exists_slice(si->num, si->s, id);
// if (index < 0 && si->num >= MAX_NVS_SLICES)
// RET_FAIL(-2, "%s(): cannot handle more than %d slices\n", __func__, MAX_NVS_SLICES);
//
// int rc = _nvs_admission_control(si->num, si->s, sp, index);
// if (rc < 0)
// return rc;
//
// nr_slice_t *ns = NULL;
// if (index < 0) {
// ns = _add_slice(&si->num, si->s);
// if (!ns)
// RET_FAIL(-4, "%s(): cannot allocate memory for slice\n", __func__);
// ns->id = id;
// ns->int_data = malloc(sizeof(_nvs_int_t));
// if (!ns->int_data)
// RET_FAIL(-5, "%s(): cannot allocate memory for slice internal data\n",
// __func__);
// } else {
// ns = si->s[index];
// free(ns->algo_data);
// }
// if (label) {
// if (ns->label)
// free(ns->label);
// ns->label = label;
// }
// ns->algo_data = sp;
// _nvs_int_t *nvs_p = ns->int_data;
// nvs_p->rb = 0;
// nvs_p->active = 0;
// if (sp->type == NVS_RATE) {
// nvs_p->exp = sp->Mbps_reserved;
// nvs_p->eff = sp->Mbps_reference;
// } else {
// nvs_p->exp = sp->pct_reserved;
// nvs_p->eff = 0; // not used
// }
//
// return si->num - 1;
//}
int remove_nvs_nr_slice_dl(nr_slice_info_t *si, uint8_t slice_idx)
{
if (slice_idx == 0 && si->num <= 1)
return 0;
UE_iterator(si->s[slice_idx]->UE_list, rm_ue) {
nr_slicing_remove_UE(si, rm_ue, slice_idx);
rm_ue->dl_id = si->s[0]->id;
LOG_D(NR_MAC, "%s(), move UE rnti 0x%04x in slice ID %d idx %d to slice ID %d idx %d\n",
__func__, rm_ue->rnti, si->s[slice_idx]->id, slice_idx, si->s[0]->id, 0);
for (int i = 0; i < MAX_MOBILES_PER_GNB; i++) {
if (si->s[0]->UE_list[i] == NULL) {
si->s[0]->UE_list[i] = rm_ue;
break;
}
}
}
nr_slice_t *sr = _nr_remove_slice(&si->num, si->s, slice_idx);
if (!sr)
return 0;
free(sr->algo_data);
free(sr->int_data);
sr->dl_algo.unset(&sr->dl_algo.data);
free(sr);
return 1;
}
//int remove_nvs_slice_ul(nr_slice_info_t *si, uint8_t slice_idx) {
// if (slice_idx == 0)
// return 0;
// nr_slice_t *sr = _remove_slice(&si->num, si->s, si->UE_assoc_slice, slice_idx);
// if (!sr)
// return 0;
// free(sr->algo_data);
// free(sr->int_data);
// free(sr);
// return 1;
//}
extern void nr_store_dlsch_buffer(module_id_t, frame_t, sub_frame_t);
void nvs_nr_dl(module_id_t mod_id,
frame_t frame,
sub_frame_t slot)
{
gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
NR_UEs_t *UE_info = &nrmac->UE_info;
if (UE_info->list[0] == NULL) /* no UEs at all -> don't bother */
return;
/* check if we are supposed to schedule something */
NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels[0].ServingCellConfigCommon;
const int CC_id = 0;
/* Get bwpSize and TDAfrom the first UE */
/* This is temporary and it assumes all UEs have the same BWP and TDA*/
NR_UE_info_t *first_UE=UE_info->list[0];
NR_UE_sched_ctrl_t *sched_ctrl = &first_UE->UE_sched_ctrl;
NR_UE_DL_BWP_t *current_BWP = &first_UE->current_DL_BWP;
const int tda = get_dl_tda(RC.nrmac[mod_id], scc, slot);
int startSymbolIndex, nrOfSymbols;
const int coresetid = sched_ctrl->coreset->controlResourceSetId;
const struct NR_PDSCH_TimeDomainResourceAllocationList *tdaList = get_dl_tdalist(current_BWP, coresetid, sched_ctrl->search_space->searchSpaceType->present, TYPE_C_RNTI_);
AssertFatal(tda < tdaList->list.count, "time_domain_allocation %d>=%d\n", tda, tdaList->list.count);
const int startSymbolAndLength = tdaList->list.array[tda]->startSymbolAndLength;
SLIV2SL(startSymbolAndLength, &startSymbolIndex, &nrOfSymbols);
const uint16_t bwpSize = coresetid == 0 ? RC.nrmac[mod_id]->cset0_bwp_size : current_BWP->BWPSize;
const uint16_t BWPStart = coresetid == 0 ? RC.nrmac[mod_id]->cset0_bwp_start : current_BWP->BWPStart;
const uint16_t slbitmap = SL_to_bitmap(startSymbolIndex, nrOfSymbols);
uint16_t *vrb_map = RC.nrmac[mod_id]->common_channels[CC_id].vrb_map;
uint16_t rballoc_mask[bwpSize];
int n_rb_sched = 0;
for (int i = 0; i < bwpSize; i++) {
// calculate mask: init with "NOT" vrb_map:
// if any RB in vrb_map is blocked (1), the current RBG will be 0
rballoc_mask[i] = (~vrb_map[i+BWPStart])&0x3fff; //bitwise not and 14 symbols
// if all the pdsch symbols are free
if ((rballoc_mask[i]&slbitmap) == slbitmap) {
n_rb_sched++;
}
}
/* Retrieve amount of data to send */
nr_store_dlsch_buffer(mod_id, frame, slot);
nr_slice_info_t *si = RC.nrmac[mod_id]->pre_processor_dl.slices;
int bytes_last_round = 0;
UE_iterator(UE_info->list, UE) {
const NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl;
bytes_last_round += UE->mac_stats.dl.current_bytes;
int s_idx;
for (s_idx = 0; s_idx < si->num; s_idx++)
if (si->s[s_idx]->id == UE->dl_id)
break;
DevAssert(s_idx >= 0 && s_idx < si->num);
/* if UE has data or retransmission, mark respective slice as active */
const int retx_pid = sched_ctrl->retrans_dl_harq.head;
const bool active = sched_ctrl->num_total_bytes > 0 || retx_pid >= 0;
((_nvs_int_t *)si->s[s_idx]->int_data)->active |= active;
}
float maxw = 0.0f;
int maxidx = -1;
for (int i = 0; i < si->num; ++i) {
nr_slice_t *s = si->s[i];
nvs_nr_slice_param_t *p = s->algo_data;
_nvs_int_t *ip = s->int_data;
float w = 0.0f;
if (p->type == NVS_RATE) {
/* if this slice has been marked as inactive, disable to prevent that
* it's exp rate is uselessly driven down */
if (!ip->active)
continue;
float inst = 0.0f;
if (ip->rb > 0) { /* it was scheduled last round */
/* inst rate: B in last round * 8(bit) / 1000000 (Mbps) * 1000 (1ms) */
inst = (float) bytes_last_round * 8 / 1000;
ip->eff = (1.0f - ip->beta_eff) * ip->eff + ip->beta_eff * inst;
//LOG_W(NR_MAC, "i %d slice %d ip->rb %d inst %f ip->eff %f\n", i, s->id, ip->rb, inst, ip->eff);
ip->rb = 0;
}
ip->exp = (1 - BETA) * ip->exp + BETA * inst;
const float rsv = p->Mbps_reserved * min(1.0f, ip->eff / p->Mbps_reference);
w = rsv / ip->exp;
} else {
float inst = (float)ip->rb / bwpSize;
ip->exp = (1.0f - BETA) * ip->exp + BETA * inst;
w = p->pct_reserved / ip->exp;
}
//LOG_I(NR_MAC, "i %d slice %d type %d ip->exp %f w %f\n", i, s->id, p->type, ip->exp, w);
ip->rb = 0;
if (w > maxw + 0.001f) {
maxw = w;
maxidx = i;
}
}
if (maxidx < 0)
return;
((_nvs_int_t *)si->s[maxidx]->int_data)->rb = n_rb_sched;
//int rbg_rem = n_rb_sched;
if (si->s[maxidx]->UE_list[0] != NULL) {
/*rbg_rem = */
LOG_D(NR_MAC, "%4d.%2d scheduling slice idx %d ID %d (first UE rnti 0x%04x)\n", frame, slot, maxidx, si->s[maxidx]->id, si->s[maxidx]->UE_list[0]->rnti);
si->s[maxidx]->dl_algo.run(mod_id,
frame,
slot,
si->s[maxidx]->UE_list,
2, // max_num_ue
n_rb_sched,
rballoc_mask,
si->s[maxidx]->dl_algo.data);
} else {
LOG_D(NR_MAC, "%4d.%2d not scheduling slice idx %d ID %d (no UEs)\n", frame, slot, maxidx, si->s[maxidx]->id);
}
// TODO
//if (rbg_rem == n_rbg_sched) // if no RBGs have been used mark as inactive
// ((_nvs_int_t *)si->s[maxidx]->int_data)->active = 0;
}
/*
void nvs_ul(module_id_t mod_id,
int CC_id,
frame_t frame,
sub_frame_t subframe,
frame_t sched_frame,
sub_frame_t sched_subframe) {
ulsch_scheduler_pre_processor(mod_id, CC_id, frame, subframe, sched_frame, sched_subframe);
}
*/
void nvs_nr_destroy(nr_slice_info_t **si) {
const int n_dl = (*si)->num;
(*si)->num = 0;
for (int i = 0; i < n_dl; ++i) {
nr_slice_t *s = (*si)->s[i];
if (s->label)
free(s->label);
free(s->algo_data);
free(s->int_data);
free(s);
}
free((*si)->s);
}
nr_pp_impl_param_dl_t nvs_nr_dl_init(module_id_t mod_id, int CC_id)
{
nr_slice_info_t *si = calloc(1, sizeof(nr_slice_info_t));
DevAssert(si);
si->num = 0;
si->s = calloc(MAX_NVS_SLICES, sizeof(nr_slice_t));
DevAssert(si->s);
for (int i = 0; i < MAX_MOBILES_PER_GNB; ++i)
si->UE_assoc_slice[i] = -1;
/* insert default slice, all resources */
nvs_nr_slice_param_t *dlp = malloc(sizeof(nvs_nr_slice_param_t));
DevAssert(dlp);
dlp->type = NVS_RES;
dlp->pct_reserved = 1.0f;
nr_dl_sched_algo_t *algo = &RC.nrmac[mod_id]->pre_processor_dl.dl_algo;
algo->data = NULL;
const int rc = addmod_nvs_nr_slice_dl(si, 0, strdup("default"), algo, dlp);
DevAssert(0 == rc);
NR_UEs_t *UE_info = &RC.nrmac[mod_id]->UE_info;
nr_slicing_add_UE(si, UE_info->list);
nr_pp_impl_param_dl_t nvs;
nvs.algorithm = NVS_SLICING;
nvs.add_UE = nr_slicing_add_UE;
nvs.remove_UE = nr_slicing_remove_UE;
nvs.move_UE = nr_slicing_move_UE;
nvs.get_UE_slice_idx = nr_slicing_get_UE_slice_idx;
nvs.get_UE_idx = nr_slicing_get_UE_idx;
nvs.addmod_slice = addmod_nvs_nr_slice_dl;
nvs.remove_slice = remove_nvs_nr_slice_dl;
nvs.dl = nvs_nr_dl;
// current DL algo becomes default scheduler
nvs.dl_algo = *algo;
nvs.destroy = nvs_nr_destroy;
nvs.slices = si;
return nvs;
}
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*!
* \file nr_slicing.h
* \brief General NR slice definition and helper parameters
* \author Robert Schmidt
* \date 2021
* \email robert.schmidt@eurecom.fr
*/
#ifndef NR_SLICING_H__
#define NR_SLICING_H__
#include "openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h"
typedef struct nr_slice_s {
/// Arbitrary ID
slice_id_t id;
/// Arbitrary label
char *label;
nr_dl_sched_algo_t dl_algo;
/// A specific algorithm's implementation parameters
void *algo_data;
/// Internal data that might be kept alongside a slice's params
void *int_data;
// list of users in this slice
NR_list_t UEs;
NR_UE_info_t *UE_list[MAX_MOBILES_PER_GNB+1];
} nr_slice_t;
typedef struct nr_slice_info_s {
uint8_t num;
nr_slice_t **s;
uint8_t UE_assoc_slice[MAX_MOBILES_PER_GNB+1];
} nr_slice_info_t;
#define NVS_SLICING 20
/* arbitrary upper limit, increase if you want to instantiate more slices */
#define MAX_NVS_SLICES 10
/* window for slice weight averaging -> 1s for fine granularity */
#define BETA 0.001f
typedef struct {
enum nvs_type {NVS_RATE, NVS_RES} type;
union {
struct { float Mbps_reserved; float Mbps_reference; };
struct { float pct_reserved; };
};
} nvs_nr_slice_param_t;
nr_pp_impl_param_dl_t nvs_nr_dl_init(module_id_t mod_id, int CC_id);
#endif /* NR_SLICING_H__ */
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*!
* \file nr_slicing_internal.h
* \brief Internal NR slice helper functions
* \author Robert Schmidt
* \date 2021
* \email robert.schmidt@eurecom.fr
*/
#ifndef NR_SLICING_INTERNAL_H__
#define NR_SLICING_INTERNAL_H__
#include "nr_slicing.h"
void nr_slicing_add_UE(nr_slice_info_t *si, NR_UE_info_t **UE_list);
void nr_slicing_remove_UE(nr_slice_info_t *si, NR_UE_info_t* rm_ue, int idx);
void nr_slicing_move_UE(nr_slice_info_t *si, NR_UE_info_t* assoc_ue, int old_idx, int new_idx);
int nr_slicing_get_UE_slice_idx(nr_slice_info_t *si, uint16_t rnti);
int nr_slicing_get_UE_idx(nr_slice_t *si, uint16_t rnti);
nr_slice_t *_nr_add_slice(uint8_t *n, nr_slice_t **s);
nr_slice_t *_nr_remove_slice(uint8_t *n, nr_slice_t **s, int idx);
#endif /* NR_SLICING_INTERNAL_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment