Commit 7a8ef65b authored by hardy's avatar hardy

Merge remote-tracking branch 'origin/nr_pucch_pusch_sameslot' into integration_2021_wk51_a

parents 06b9e3f3 6e2518ec
......@@ -233,20 +233,15 @@ nrUE_params_t *get_nrUE_params(void) {
/* initialie thread pools used for NRUE processing paralleliation */
void init_tpools(uint8_t nun_dlsch_threads) {
char *params = NULL;
if (IS_SOFTMODEM_RFSIM) {
params = calloc(1,2);
memcpy(params,"N",1);
}
else {
params = calloc(1,(NR_RX_NB_TH*NR_NB_TH_SLOT*3)+1);
for (int i=0; i<NR_RX_NB_TH*NR_NB_TH_SLOT; i++) {
memcpy(params+(i*3),"-1,",3);
}
}
initTpool(params, &(nrUE_params.Tpool), false);
free(params);
init_dlsch_tpool( nun_dlsch_threads);
}
static void get_options(void) {
nrUE_params.ofdm_offset_divisor = 8;
......
......@@ -57,6 +57,7 @@
/// UL_CONFIG_REQ
#define FAPI_NR_UL_CONFIG_LIST_NUM 10
#define FAPI_NR_UL_CONFIG_TYPE_DONE 0x00
#define FAPI_NR_UL_CONFIG_TYPE_PRACH 0x01
#define FAPI_NR_UL_CONFIG_TYPE_PUCCH 0x02
#define FAPI_NR_UL_CONFIG_TYPE_PUSCH 0x03
......
......@@ -153,8 +153,10 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
if (scheduled_response->ul_config != NULL){
fapi_nr_ul_config_request_t *ul_config = scheduled_response->ul_config;
int pdu_done = 0;
pthread_mutex_lock(&ul_config->mutex_ul_config);
LOG_D(PHY, "%d.%d ul S ul_config %p pdu_done %d number_pdus %d\n", scheduled_response->frame, slot, ul_config, pdu_done, ul_config->number_pdus);
for (i = 0; i < ul_config->number_pdus; ++i){
AssertFatal(ul_config->ul_config_list[i].pdu_type <= FAPI_NR_UL_CONFIG_TYPES,"pdu_type %d out of bounds\n",ul_config->ul_config_list[i].pdu_type);
......@@ -168,6 +170,7 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
nfapi_nr_ue_pusch_pdu_t *pusch_config_pdu;
/* PUCCH */
fapi_nr_ul_config_pucch_pdu *pucch_config_pdu;
LOG_D(PHY, "%d.%d ul B ul_config %p t %d pdu_done %d number_pdus %d\n", scheduled_response->frame, slot, ul_config, pdu_type, pdu_done, ul_config->number_pdus);
switch (pdu_type){
......@@ -189,10 +192,13 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
if (scheduled_response->tx_request) {
for (int j=0; j<scheduled_response->tx_request->number_of_pdus; j++) {
fapi_nr_tx_request_body_t *tx_req_body = &scheduled_response->tx_request->tx_request_body[j];
if (tx_req_body->pdu_index == i) {
if ((tx_req_body->pdu_index == i) && (tx_req_body->pdu_length > 0)) {
LOG_D(PHY,"%d.%d Copying %d bytes to harq_process_ul_ue->a (harq_pid %d)\n",scheduled_response->frame,slot,tx_req_body->pdu_length,current_harq_pid);
memcpy(harq_process_ul_ue->a, tx_req_body->pdu, tx_req_body->pdu_length);
harq_process_ul_ue->status = ACTIVE;
ul_config->ul_config_list[i].pdu_type = FAPI_NR_UL_CONFIG_TYPE_DONE; // not handle it any more
pdu_done++;
LOG_D(PHY, "%d.%d ul A ul_config %p t %d pdu_done %d number_pdus %d\n", scheduled_response->frame, slot, ul_config, pdu_type, pdu_done, ul_config->number_pdus);
break;
}
}
......@@ -216,6 +222,9 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
memcpy((void*)&(pucch_vars->pucch_pdu[j]), (void*)pucch_config_pdu, sizeof(fapi_nr_ul_config_pucch_pdu));
pucch_vars->active[j] = true;
found = true;
ul_config->ul_config_list[i].pdu_type = FAPI_NR_UL_CONFIG_TYPE_DONE; // not handle it any more
pdu_done++;
LOG_D(PHY, "%d.%d ul A ul_config %p t %d pdu_done %d number_pdus %d\n", scheduled_response->frame, slot, ul_config, pdu_type, pdu_done, ul_config->number_pdus);
break;
}
}
......@@ -227,18 +236,34 @@ int8_t nr_ue_scheduled_response(nr_scheduled_response_t *scheduled_response){
// prach config pdu
prach_config_pdu = &ul_config->ul_config_list[i].prach_config_pdu;
memcpy((void*)&(PHY_vars_UE_g[module_id][cc_id]->prach_vars[gNB_id]->prach_pdu), (void*)prach_config_pdu, sizeof(fapi_nr_ul_config_prach_pdu));
ul_config->ul_config_list[i].pdu_type = FAPI_NR_UL_CONFIG_TYPE_DONE; // not handle it any more
pdu_done++;
LOG_D(PHY, "%d.%d ul A ul_config %p t %d pdu_done %d number_pdus %d\n", scheduled_response->frame, slot, ul_config, pdu_type, pdu_done, ul_config->number_pdus);
break;
case (FAPI_NR_UL_CONFIG_TYPE_DONE):
pdu_done++; // count the no of pdu processed
LOG_D(PHY, "%d.%d ul A ul_config %p t %d pdu_done %d number_pdus %d\n", scheduled_response->frame, slot, ul_config, pdu_type, pdu_done, ul_config->number_pdus);
break;
default:
ul_config->ul_config_list[i].pdu_type = FAPI_NR_UL_CONFIG_TYPE_DONE; // not handle it any more
pdu_done++; // count the no of pdu processed
LOG_D(PHY, "%d.%d ul A ul_config %p t %d pdu_done %d number_pdus %d\n", scheduled_response->frame, slot, ul_config, pdu_type, pdu_done, ul_config->number_pdus);
break;
}
}
//Clear the fields when all the config pdu are done
if (pdu_done == ul_config->number_pdus) {
if (scheduled_response->tx_request)
scheduled_response->tx_request->number_of_pdus = 0;
ul_config->sfn = 0;
ul_config->slot = 0;
ul_config->number_pdus = 0;
LOG_D(PHY, "%d.%d clear ul_config %p\n", scheduled_response->frame, slot, ul_config);
memset(ul_config->ul_config_list, 0, sizeof(ul_config->ul_config_list));
}
pthread_mutex_unlock(&ul_config->mutex_ul_config);
}
}
......
......@@ -60,6 +60,12 @@ static prach_association_pattern_t prach_assoc_pattern;
static ssb_list_info_t ssb_list;
void fill_ul_config(fapi_nr_ul_config_request_t *ul_config, frame_t frame_tx, int slot_tx, uint8_t pdu_type){
// clear ul_config for new frame/slot
if ((ul_config->slot != slot_tx || ul_config->sfn != frame_tx) && ul_config->number_pdus != 0) {
LOG_D(MAC, "%d.%d %d.%d f clear ul_config %p t %d pdu %d\n", frame_tx, slot_tx, ul_config->sfn, ul_config->slot, ul_config, pdu_type, ul_config->number_pdus);
ul_config->number_pdus = 0;
memset(ul_config->ul_config_list, 0, sizeof(ul_config->ul_config_list));
}
ul_config->ul_config_list[ul_config->number_pdus].pdu_type = pdu_type;
ul_config->slot = slot_tx;
ul_config->sfn = frame_tx;
......@@ -931,8 +937,8 @@ NR_UE_L2_STATE_t nr_ue_scheduler(nr_downlink_indication_t *dl_info, nr_uplink_in
} else if (ul_info) {
int cc_id = ul_info->cc_id;
frame_t rx_frame = ul_info->frame_rx;
slot_t rx_slot = ul_info->slot_rx;
//frame_t rx_frame = ul_info->frame_rx;
//slot_t rx_slot = ul_info->slot_rx;
frame_t frame_tx = ul_info->frame_tx;
slot_t slot_tx = ul_info->slot_tx;
module_id_t mod_id = ul_info->module_id;
......@@ -1020,7 +1026,7 @@ NR_UE_L2_STATE_t nr_ue_scheduler(nr_downlink_indication_t *dl_info, nr_uplink_in
}
pthread_mutex_unlock(&ul_config->mutex_ul_config); // avoid double lock
fill_scheduled_response(&scheduled_response, NULL, ul_config, &tx_req, mod_id, cc_id, rx_frame, rx_slot, ul_info->thread_id);
fill_scheduled_response(&scheduled_response, NULL, ul_config, &tx_req, mod_id, cc_id, frame_tx, slot_tx, ul_info->thread_id);
if(mac->if_module != NULL && mac->if_module->scheduled_response != NULL){
mac->if_module->scheduled_response(&scheduled_response);
}
......
......@@ -68,64 +68,6 @@ typedef enum {
} UE_MAC_Tx_ind_type_e;
// *** UE_UL_Config.request related structures
typedef struct{
//module_id_t module_idP;
//int CC_id;
//frame_t frameP;
uint8_t eNB_id;
//uint16_t rnti;
//sub_frame_t subframe_tx;
uint32_t SR_payload; //0 or 1
}UE_MAC_ul_config_SR;
typedef struct{
//module_id_t module_idP;
//int CC_id;
//frame_t frameP;
uint8_t eNB_indexP;
//sub_frame_t subframeP;
uint8_t ra_RACH_MaskIndex;
int8_t ra_PREAMBLE_RECEIVED_TARGET_POWER;
uint8_t ra_TDD_map_index;
uint16_t ra_RNTI;
uint8_t *Msg3;
}UE_MAC_ul_config_rach;
typedef struct {
union {
UE_MAC_ul_config_rach ue_rach_config;
//UE_MAC_ul_config_ULSCH ue_ULSCH_pdu;
UE_MAC_ul_config_SR ue_SR_config;
};
} UE_MAC_ul_config_request_list;
typedef struct {
nfapi_tl_t tl;
uint16_t length_list;
UE_MAC_ul_config_request_list* ue_ul_config_list;
} UE_MAC_ul_config_request_body_t;
typedef struct {
//nfapi_p7_message_header_t header;
uint16_t sfn_sf;
UE_MAC_ul_config_request_body_t ue_ul_config_request_body; //nfapi_dl_config_request_body_t
} UE_MAC_ul_config_request_t;
// *** UE_Tx.request related structures
typedef struct {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment