Commit 76d299f1 authored by Remi Hardy's avatar Remi Hardy

Integration_2021_wk45_c

MR !1307: WIP: [CI] real time stats and monitoring update
-MR !1280 (closed) : MAC statistics in separate thread
-log X1AP-PDU messages for eNB
-update real time stats collection (from stats logs file instead of stdout)
-update runtime monitoring (conf file, pages)

MR !1301: Fix for blocked process in FR2

MR !1312: CI: adding NSA-B200 sanity check pipeline to automatic regression test-suite
-Validate that all required routes are present 
-Copy the image from a "build" server to a "test" server 
-fixes for the log collection on the CoreNetwork components

MR !1282 : Add MCS from HARQ BLER
-Estimates correct MCS using EMA, kept between target thresholds dl_bler_target_upper and dl_bler_target_lower (conf options) for 1st retx and below dl_rd2_bler_threshold for 2nd retx 
-Maximum MCS configurable 
-on 3rd retx, reduce MCS by 5

Other commits in the integration branch:
-push DL traffic to 60Mb
-solve long ping time in SA
parents 6e5527e0 0de1ebde
...@@ -218,6 +218,26 @@ pipeline { ...@@ -218,6 +218,26 @@ pipeline {
} }
} }
} }
stage ("NSA B200 Sanity Check") {
when { expression {doMandatoryTests} }
steps {
script {
triggerSlaveJob ('RAN-NSA-B200-Module-LTEBOX-Container', 'Test-NSA-B200')
}
}
post {
always {
script {
finalizeSlaveJob('RAN-NSA-B200-Module-LTEBOX-Container')
}
}
failure {
script {
currentBuild.result = 'FAILURE'
}
}
}
}
} }
} }
stage ("Images Push to Registries") { stage ("Images Push to Registries") {
...@@ -227,8 +247,14 @@ pipeline { ...@@ -227,8 +247,14 @@ pipeline {
triggerSlaveJob ('RAN-DockerHub-Push', 'Push-to-Docker-Hub') triggerSlaveJob ('RAN-DockerHub-Push', 'Push-to-Docker-Hub')
} }
post { post {
always {
script {
echo "Push to Docker-Hub OK"
}
}
failure { failure {
script { script {
echo "Push to Docker-Hub KO"
currentBuild.result = 'FAILURE' currentBuild.result = 'FAILURE'
} }
} }
...@@ -281,6 +307,11 @@ pipeline { ...@@ -281,6 +307,11 @@ pipeline {
// ---- Slave Job functions // ---- Slave Job functions
def triggerSlaveJob (jobName, gitlabStatusName) { def triggerSlaveJob (jobName, gitlabStatusName) {
if ("MERGE".equals(env.gitlabActionType)) {
MR_NUMBER = env.gitlabMergeRequestIid
} else {
MR_NUMBER = 'develop'
}
// Workaround for the "cancelled" GitLab pipeline notification // Workaround for the "cancelled" GitLab pipeline notification
// The slave job is triggered with the propagate false so the following commands are executed // The slave job is triggered with the propagate false so the following commands are executed
// Its status is now PASS/SUCCESS from a stage pipeline point of view // Its status is now PASS/SUCCESS from a stage pipeline point of view
...@@ -290,6 +321,7 @@ def triggerSlaveJob (jobName, gitlabStatusName) { ...@@ -290,6 +321,7 @@ def triggerSlaveJob (jobName, gitlabStatusName) {
string(name: 'eNB_Repository', value: String.valueOf(GIT_URL)), string(name: 'eNB_Repository', value: String.valueOf(GIT_URL)),
string(name: 'eNB_Branch', value: String.valueOf(env.gitlabSourceBranch)), string(name: 'eNB_Branch', value: String.valueOf(env.gitlabSourceBranch)),
string(name: 'eNB_CommitID', value: String.valueOf(env.gitlabMergeRequestLastCommit)), string(name: 'eNB_CommitID', value: String.valueOf(env.gitlabMergeRequestLastCommit)),
string(name: 'eNB_MR', value: String.valueOf(MR_NUMBER)),
booleanParam(name: 'eNB_mergeRequest', value: "MERGE".equals(env.gitlabActionType)), booleanParam(name: 'eNB_mergeRequest', value: "MERGE".equals(env.gitlabActionType)),
string(name: 'eNB_TargetBranch', value: String.valueOf(env.gitlabTargetBranch)) string(name: 'eNB_TargetBranch', value: String.valueOf(env.gitlabTargetBranch))
], propagate: false ], propagate: false
...@@ -306,6 +338,11 @@ def triggerSlaveJob (jobName, gitlabStatusName) { ...@@ -306,6 +338,11 @@ def triggerSlaveJob (jobName, gitlabStatusName) {
} }
def triggerSlaveJobNoGitLab (jobName) { def triggerSlaveJobNoGitLab (jobName) {
if ("MERGE".equals(env.gitlabActionType)) {
MR_NUMBER = env.gitlabMergeRequestIid
} else {
MR_NUMBER = 'develop'
}
// Workaround for the "cancelled" GitLab pipeline notification // Workaround for the "cancelled" GitLab pipeline notification
// The slave job is triggered with the propagate false so the following commands are executed // The slave job is triggered with the propagate false so the following commands are executed
// Its status is now PASS/SUCCESS from a stage pipeline point of view // Its status is now PASS/SUCCESS from a stage pipeline point of view
...@@ -315,6 +352,7 @@ def triggerSlaveJobNoGitLab (jobName) { ...@@ -315,6 +352,7 @@ def triggerSlaveJobNoGitLab (jobName) {
string(name: 'eNB_Repository', value: String.valueOf(GIT_URL)), string(name: 'eNB_Repository', value: String.valueOf(GIT_URL)),
string(name: 'eNB_Branch', value: String.valueOf(env.gitlabSourceBranch)), string(name: 'eNB_Branch', value: String.valueOf(env.gitlabSourceBranch)),
string(name: 'eNB_CommitID', value: String.valueOf(env.gitlabMergeRequestLastCommit)), string(name: 'eNB_CommitID', value: String.valueOf(env.gitlabMergeRequestLastCommit)),
string(name: 'eNB_MR', value: String.valueOf(MR_NUMBER)),
booleanParam(name: 'eNB_mergeRequest', value: "MERGE".equals(env.gitlabActionType)), booleanParam(name: 'eNB_mergeRequest', value: "MERGE".equals(env.gitlabActionType)),
string(name: 'eNB_TargetBranch', value: String.valueOf(env.gitlabTargetBranch)) string(name: 'eNB_TargetBranch', value: String.valueOf(env.gitlabTargetBranch))
], propagate: false ], propagate: false
......
...@@ -51,270 +51,307 @@ def StatusForDb = "" ...@@ -51,270 +51,307 @@ def StatusForDb = ""
pipeline { pipeline {
agent {label pythonExecutor} agent {label pythonExecutor}
options { options {
disableConcurrentBuilds() disableConcurrentBuilds()
ansiColor('xterm') ansiColor('xterm')
lock(extra: [[resource: ciSmartPhonesResource2]], resource: ciSmartPhonesResource1) lock(extra: [[resource: ciSmartPhonesResource2]], resource: ciSmartPhonesResource1)
}
stages {
stage("Build Init") {
steps {
// update the build name and description
buildName "${params.eNB_MR}"
buildDescription "Branch : ${params.eNB_Branch}"
}
} }
stages { stage ("Verify Parameters") {
stage("Build Init") { steps {
steps { script {
// update the build name and description echo '\u2705 \u001B[32mVerify Parameters\u001B[0m'
buildName "${params.eNB_MR}" def allParametersPresent = true
buildDescription "Branch : ${params.eNB_Branch}"
}
}
stage ("Verify Parameters") {
steps {
script {
echo '\u2705 \u001B[32mVerify Parameters\u001B[0m'
def allParametersPresent = true
// It is already to late to check it // It is already to late to check it
if (params.pythonExecutor != null) { if (params.pythonExecutor != null) {
echo "eNB CI executor node : ${pythonExecutor}" echo "eNB CI executor node : ${pythonExecutor}"
} }
// If not present picking a default Stage Name // If not present picking a default Stage Name
if (params.pipelineTestStageName == null) { if (params.pipelineTestStageName == null) {
// picking default // picking default
testStageName = 'Template Test Stage' testStageName = 'Template Test Stage'
} }
if (params.SmartPhonesResource1 == null) { if (params.SmartPhonesResource1 == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.SmartPhonesResource2 == null) { if (params.SmartPhonesResource2 == null) {
allParametersPresent = false allParametersPresent = false
} }
// 1st eNB parameters // 1st eNB parameters
if (params.eNB_IPAddress == null) { if (params.eNB_IPAddress == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.eNB_SourceCodePath == null) { if (params.eNB_SourceCodePath == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.eNB_Credentials == null) { if (params.eNB_Credentials == null) {
allParametersPresent = false allParametersPresent = false
} }
// 2nd eNB parameters // 2nd eNB parameters
if (params.eNB1_IPAddress == null) { if (params.eNB1_IPAddress == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.eNB1_SourceCodePath == null) { if (params.eNB1_SourceCodePath == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.eNB1_Credentials == null) { if (params.eNB1_Credentials == null) {
allParametersPresent = false allParametersPresent = false
} }
// 3rd eNB parameters // 3rd eNB parameters
if (params.eNB2_IPAddress == null) { if (params.eNB2_IPAddress == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.eNB2_SourceCodePath == null) { if (params.eNB2_SourceCodePath == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.eNB2_Credentials == null) { if (params.eNB2_Credentials == null) {
allParametersPresent = false allParametersPresent = false
} }
// the following 4 parameters should be pushed by the master trigger // the following 4 parameters should be pushed by the master trigger
// if not present, take the job GIT variables (used for developing) // if not present, take the job GIT variables (used for developing)
if (params.eNB_Repository == null) { if (params.eNB_Repository == null) {
eNB_Repository = env.GIT_URL eNB_Repository = env.GIT_URL
} else { } else {
eNB_Repository = params.eNB_Repository eNB_Repository = params.eNB_Repository
} }
echo "eNB_Repository : ${eNB_Repository}" echo "eNB_Repository : ${eNB_Repository}"
if (params.eNB_Branch == null) { if (params.eNB_Branch == null) {
eNB_Branch = env.GIT_BRANCH eNB_Branch = env.GIT_BRANCH
} else { } else {
eNB_Branch = params.eNB_Branch eNB_Branch = params.eNB_Branch
} }
echo "eNB_Branch : ${eNB_Branch}" echo "eNB_Branch : ${eNB_Branch}"
if (params.eNB_CommitID == null) { if (params.eNB_CommitID == null) {
eNB_CommitID = env.GIT_COMMIT eNB_CommitID = env.GIT_COMMIT
} else { } else {
eNB_CommitID = params.eNB_CommitID eNB_CommitID = params.eNB_CommitID
} }
echo "eNB_CommitID : ${eNB_CommitID}" echo "eNB_CommitID : ${eNB_CommitID}"
if (params.eNB_AllowMergeRequestProcess!= null) { if (params.eNB_AllowMergeRequestProcess!= null) {
eNB_AllowMergeRequestProcess = params.eNB_AllowMergeRequestProcess eNB_AllowMergeRequestProcess = params.eNB_AllowMergeRequestProcess
if (eNB_AllowMergeRequestProcess) { if (eNB_AllowMergeRequestProcess) {
if (params.eNB_TargetBranch != null) { if (params.eNB_TargetBranch != null) {
eNB_TargetBranch = params.eNB_TargetBranch eNB_TargetBranch = params.eNB_TargetBranch
} else { } else {
eNB_TargetBranch = 'develop' eNB_TargetBranch = 'develop'
} }
echo "eNB_TargetBranch : ${eNB_TargetBranch}" echo "eNB_TargetBranch : ${eNB_TargetBranch}"
} }
} }
if (params.EPC_IPAddress == null) { if (params.EPC_IPAddress == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.EPC_Type == null) { if (params.EPC_Type == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.EPC_SourceCodePath == null) { if (params.EPC_SourceCodePath == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.EPC_Credentials == null) { if (params.EPC_Credentials == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.ADB_IPAddress == null) { if (params.ADB_IPAddress == null) {
allParametersPresent = false allParametersPresent = false
} }
if (params.ADB_Credentials == null) { if (params.ADB_Credentials == null) {
allParametersPresent = false allParametersPresent = false
} }
if (allParametersPresent) { if (allParametersPresent) {
echo "All parameters are present" echo "All parameters are present"
if (eNB_AllowMergeRequestProcess) { if (eNB_AllowMergeRequestProcess) {
sh "git fetch" sh "git fetch"
sh "./ci-scripts/doGitLabMerge.sh --src-branch ${eNB_Branch} --src-commit ${eNB_CommitID} --target-branch ${eNB_TargetBranch} --target-commit latest" sh "./ci-scripts/doGitLabMerge.sh --src-branch ${eNB_Branch} --src-commit ${eNB_CommitID} --target-branch ${eNB_TargetBranch} --target-commit latest"
} else { } else {
sh "git fetch" sh "git fetch"
sh "git checkout -f ${eNB_CommitID}" sh "git checkout -f ${eNB_CommitID}"
}
} else {
echo "Some parameters are missing"
sh "./ci-scripts/fail.sh"
}
}
} }
} else {
echo "Some parameters are missing"
sh "./ci-scripts/fail.sh"
}
} }
stage ("Build and Test") { }
steps { }
script { stage ("Build and Test") {
dir ('ci-scripts') { steps {
echo "\u2705 \u001B[32m${testStageName}\u001B[0m" script {
// If not present picking a default XML file dir ('ci-scripts') {
if (params.pythonTestXmlFile == null) { echo "\u2705 \u001B[32m${testStageName}\u001B[0m"
// picking default // If not present picking a default XML file
testXMLFile = 'xml_files/enb_usrpB210_band7_50PRB.xml' if (params.pythonTestXmlFile == null) {
echo "Test XML file(default): ${testXMLFile}" // picking default
mainPythonAllXmlFiles += "--XMLTestFile=" + testXMLFile + " " testXMLFile = 'xml_files/enb_usrpB210_band7_50PRB.xml'
} else { echo "Test XML file(default): ${testXMLFile}"
String[] myXmlTestSuite = testXMLFile.split("\\r?\\n") mainPythonAllXmlFiles += "--XMLTestFile=" + testXMLFile + " "
for (xmlFile in myXmlTestSuite) { } else {
if (fileExists(xmlFile)) { String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
mainPythonAllXmlFiles += "--XMLTestFile=" + xmlFile + " " for (xmlFile in myXmlTestSuite) {
echo "Test XML file : ${xmlFile}" if (fileExists(xmlFile)) {
} mainPythonAllXmlFiles += "--XMLTestFile=" + xmlFile + " "
} echo "Test XML file : ${xmlFile}"
} }
withCredentials([ }
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password'], }
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB1_Credentials}", usernameVariable: 'eNB1_Username', passwordVariable: 'eNB1_Password'], withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB2_Credentials}", usernameVariable: 'eNB2_Username', passwordVariable: 'eNB2_Password'], [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password'],
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password'], [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB1_Credentials}", usernameVariable: 'eNB1_Username', passwordVariable: 'eNB1_Password'],
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.ADB_Credentials}", usernameVariable: 'ADB_Username', passwordVariable: 'ADB_Password'] [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB2_Credentials}", usernameVariable: 'eNB2_Username', passwordVariable: 'eNB2_Password'],
]) { [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password'],
sh "python3 main.py --mode=InitiateHtml --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --ADBIPAddress=${params.ADB_IPAddress} --ADBUserName=${ADB_Username} --ADBPassword=${ADB_Password} ${mainPythonAllXmlFiles}" [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.ADB_Credentials}", usernameVariable: 'ADB_Username', passwordVariable: 'ADB_Password']
String[] myXmlTestSuite = testXMLFile.split("\\r?\\n") ]) {
for (xmlFile in myXmlTestSuite) { sh "python3 main.py --mode=InitiateHtml --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --ADBIPAddress=${params.ADB_IPAddress} --ADBUserName=${ADB_Username} --ADBPassword=${ADB_Password} ${mainPythonAllXmlFiles}"
if (fileExists(xmlFile)) { String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
try { for (xmlFile in myXmlTestSuite) {
sh "python3 main.py --mode=TesteNB --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath} --eNB1IPAddress=${params.eNB1_IPAddress} --eNB1UserName=${eNB1_Username} --eNB1Password=${eNB1_Password} --eNB1SourceCodePath=${params.eNB1_SourceCodePath} --eNB2IPAddress=${params.eNB2_IPAddress} --eNB2UserName=${eNB2_Username} --eNB2Password=${eNB2_Password} --eNB2SourceCodePath=${params.eNB2_SourceCodePath} --EPCIPAddress=${params.EPC_IPAddress} --EPCType=${params.EPC_Type} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --ADBIPAddress=${params.ADB_IPAddress} --ADBUserName=${ADB_Username} --ADBPassword=${ADB_Password} --XMLTestFile=${xmlFile}" if (fileExists(xmlFile)) {
} catch (Exception e) { try {
currentBuild.result = 'FAILURE' sh "python3 main.py --mode=TesteNB --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath} --eNB1IPAddress=${params.eNB1_IPAddress} --eNB1UserName=${eNB1_Username} --eNB1Password=${eNB1_Password} --eNB1SourceCodePath=${params.eNB1_SourceCodePath} --eNB2IPAddress=${params.eNB2_IPAddress} --eNB2UserName=${eNB2_Username} --eNB2Password=${eNB2_Password} --eNB2SourceCodePath=${params.eNB2_SourceCodePath} --EPCIPAddress=${params.EPC_IPAddress} --EPCType=${params.EPC_Type} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --ADBIPAddress=${params.ADB_IPAddress} --ADBUserName=${ADB_Username} --ADBPassword=${ADB_Password} --XMLTestFile=${xmlFile}"
buildStageStatus = false } catch (Exception e) {
} currentBuild.result = 'FAILURE'
} buildStageStatus = false
} }
sh "python3 main.py --mode=FinalizeHtml --finalStatus=${buildStageStatus} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password}"
}
}
} }
}
sh "python3 main.py --mode=FinalizeHtml --finalStatus=${buildStageStatus} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password}"
} }
}
} }
stage('Log Collection') { }
parallel { }
stage('Log Collection (eNB - Build)') { stage('Log Collection') {
steps { parallel {
withCredentials([ stage('Log Collection (eNB - Build)') {
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password'] steps {
]) { withCredentials([
echo '\u2705 \u001B[32mLog Collection (eNB - Build)\u001B[0m' [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
sh "python3 ci-scripts/main.py --mode=LogCollectBuild --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}" ]) {
echo '\u2705 \u001B[32mLog Collection (eNB - Build)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectBuild --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}"
echo '\u2705 \u001B[32mLog Transfer (eNB - Build)\u001B[0m' echo '\u2705 \u001B[32mLog Transfer (eNB - Build)\u001B[0m'
sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/build.log.zip ./build.log.${env.BUILD_ID}.zip || true" sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/build.log.zip ./build.log.${env.BUILD_ID}.zip || true"
} }
script { script {
if(fileExists("build.log.${env.BUILD_ID}.zip")) { if(fileExists("build.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "build.log.${env.BUILD_ID}.zip" archiveArtifacts "build.log.${env.BUILD_ID}.zip"
} }
} }
} }
} }
stage('Log Collection (eNB - Run)') { stage('Log Collection (eNB - Run)') {
steps { steps {
withCredentials([ withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password'] [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
]) { ]) {
echo '\u2705 \u001B[32mLog Collection (eNB - Run)\u001B[0m' echo '\u2705 \u001B[32mLog Collection (eNB - Run)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollecteNB --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}" sh "python3 ci-scripts/main.py --mode=LogCollecteNB --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}"
echo '\u2705 \u001B[32mLog Transfer (eNB - Run)\u001B[0m' echo '\u2705 \u001B[32mLog Transfer (eNB - Run)\u001B[0m'
sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/enb.log.zip ./enb.log.${env.BUILD_ID}.zip || true" sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/enb.log.zip ./enb.log.${env.BUILD_ID}.zip || true"
} }
script { script {
if(fileExists("enb.log.${env.BUILD_ID}.zip")) { if(fileExists("enb.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "enb.log.${env.BUILD_ID}.zip" archiveArtifacts "enb.log.${env.BUILD_ID}.zip"
} }
} }
} }
}
stage('Log Collection (CN)') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
script {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (HSS)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectHSS --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
if (params.EPC_Type != 'OAICN5G') {
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/hss.log.zip ./hss.log.${env.BUILD_ID}.zip || true"
} }
stage('Log Collection (CN)') { echo '\u2705 \u001B[32mLog Collection (MME or AMF)\u001B[0m'
steps { sh "python3 ci-scripts/main.py --mode=LogCollectMME --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
withCredentials([ if (params.EPC_Type == 'OAICN5G') {
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password'] sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/mme.log.zip ./amf.log.${env.BUILD_ID}.zip || true"
]) { } else {
echo '\u2705 \u001B[32mLog Transfer (CN)\u001B[0m' sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/mme.log.zip ./mme.log.${env.BUILD_ID}.zip || true"
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/logs/oai-cn5g.log.zip ./oai-cn5g.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("oai-cn5g.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "oai-cn5g.log.${env.BUILD_ID}.zip"
}
if(fileExists("ci-scripts/test_results.html")) {
sh "mv ci-scripts/test_results.html test_results-${JOB_NAME}.html"
sh "sed -i -e 's#TEMPLATE_JOB_NAME#${JOB_NAME}#' -e 's@build #TEMPLATE_BUILD_ID@build #${BUILD_ID}@' -e 's#Build-ID: TEMPLATE_BUILD_ID#Build-ID: <a href=\"${BUILD_URL}\">${BUILD_ID}</a>#' -e 's#TEMPLATE_STAGE_NAME#${testStageName}#' test_results-${JOB_NAME}.html"
archiveArtifacts "test_results-${JOB_NAME}.html"
}
}
}
} }
stage ("SQL Collect"){ echo '\u2705 \u001B[32mLog Collection (SPGW or SMF/UPF)\u001B[0m'
agent {label DataBaseHost} sh "python3 ci-scripts/main.py --mode=LogCollectSPGW --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
steps { if (params.EPC_Type == 'OAICN5G') {
script { sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/spgw.log.zip ./smf-upf.log.${env.BUILD_ID}.zip || true"
if (currentBuild.result=='FAILURE') {StatusForDb = 'FAIL'} else {StatusForDb = 'PASS'} } else {
sh "python3 /home/oaicicd/mysql/sql_connect.py ${JOB_NAME} ${params.eNB_MR} ${params.eNB_Branch} ${env.BUILD_ID} ${env.BUILD_URL} ${StatusForDb} ''" sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/spgw.log.zip ./spgw.log.${env.BUILD_ID}.zip || true"
}
}
} }
}
if(fileExists("hss.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "hss.log.${env.BUILD_ID}.zip"
}
if(fileExists("mme.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "mme.log.${env.BUILD_ID}.zip"
}
if(fileExists("spgw.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "spgw.log.${env.BUILD_ID}.zip"
}
if(fileExists("amf.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "amf.log.${env.BUILD_ID}.zip"
}
if(fileExists("smf-upf.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "smf-upf.log.${env.BUILD_ID}.zip"
}
echo '\u2705 \u001B[32mLog Collection for CoreNetwork Done!\u001B[0m'
} }
}
} }
} stage ("SQL Collect"){
when {
post { expression { DataBaseHost != "none" }
always { }
agent {label DataBaseHost}
steps {
script { script {
if (params.pipelineZipsConsoleLog != null) { if (currentBuild.result=='FAILURE') {StatusForDb = 'FAIL'} else {StatusForDb = 'PASS'}
if (params.pipelineZipsConsoleLog) { sh "python3 /home/oaicicd/mysql/sql_connect.py ${JOB_NAME} ${params.eNB_MR} ${params.eNB_Branch} ${env.BUILD_ID} ${env.BUILD_URL} ${StatusForDb} ''"
echo "Archiving Jenkins console log" }
sh "wget --no-check-certificate --no-proxy ${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_ID}/consoleText -O consoleText.log || true" }
sh "zip -m consoleText.log.${env.BUILD_ID}.zip consoleText.log || true" }
if(fileExists("consoleText.log.${env.BUILD_ID}.zip")) { }
archiveArtifacts "consoleText.log.${env.BUILD_ID}.zip" }
} }
}
} post {
always {
script {
if(fileExists("ci-scripts/test_results.html")) {
sh "mv ci-scripts/test_results.html test_results-${JOB_NAME}.html"
sh "sed -i -e 's#TEMPLATE_JOB_NAME#${JOB_NAME}#' -e 's@build #TEMPLATE_BUILD_ID@build #${BUILD_ID}@' -e 's#Build-ID: TEMPLATE_BUILD_ID#Build-ID: <a href=\"${BUILD_URL}\">${BUILD_ID}</a>#' -e 's#TEMPLATE_STAGE_NAME#${testStageName}#' test_results-${JOB_NAME}.html"
archiveArtifacts "test_results-${JOB_NAME}.html"
}
if (params.pipelineZipsConsoleLog != null) {
if (params.pipelineZipsConsoleLog) {
echo "Archiving Jenkins console log"
sh "wget --no-check-certificate --no-proxy ${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_ID}/consoleText -O consoleText.log || true"
sh "zip -m consoleText.log.${env.BUILD_ID}.zip consoleText.log || true"
if(fileExists("consoleText.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "consoleText.log.${env.BUILD_ID}.zip"
} }
}
} }
}
} }
}
} }
...@@ -101,10 +101,43 @@ class Containerize(): ...@@ -101,10 +101,43 @@ class Containerize():
self.cliContName = '' self.cliContName = ''
self.cliOptions = '' self.cliOptions = ''
self.imageToCopy = ''
self.registrySvrId = ''
self.testSvrId = ''
#----------------------------------------------------------- #-----------------------------------------------------------
# Container management functions # Container management functions
#----------------------------------------------------------- #-----------------------------------------------------------
def _createWorkspace(self, sshSession, password, sourcePath):
# on RedHat/CentOS .git extension is mandatory
result = re.search('([a-zA-Z0-9\:\-\.\/])+\.git', self.ranRepository)
if result is not None:
full_ran_repo_name = self.ranRepository.replace('git/', 'git')
else:
full_ran_repo_name = self.ranRepository + '.git'
sshSession.command('mkdir -p ' + sourcePath, '\$', 5)
sshSession.command('cd ' + sourcePath, '\$', 5)
sshSession.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + full_ran_repo_name + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# Raphael: here add a check if git clone or git fetch went smoothly
sshSession.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
sshSession.command('git config user.name "OAI Jenkins"', '\$', 5)
sshSession.command('echo ' + password + ' | sudo -S git clean -x -d -ff', '\$', 30)
sshSession.command('mkdir -p cmake_targets/log', '\$', 5)
# if the commit ID is provided use it to point to it
if self.ranCommitID != '':
sshSession.command('git checkout -f ' + self.ranCommitID, '\$', 30)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if (self.ranAllowMerge):
if self.ranTargetBranch == '':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
sshSession.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
sshSession.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
def BuildImage(self, HTML): def BuildImage(self, HTML):
if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '': if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '':
HELP.GenericHelp(CONST.Version) HELP.GenericHelp(CONST.Version)
...@@ -173,53 +206,28 @@ class Containerize(): ...@@ -173,53 +206,28 @@ class Containerize():
self.testCase_id = HTML.testCase_id self.testCase_id = HTML.testCase_id
# on RedHat/CentOS .git extension is mandatory self._createWorkspace(mySSH, lPassWord, lSourcePath)
result = re.search('([a-zA-Z0-9\:\-\.\/])+\.git', self.ranRepository)
if result is not None:
full_ran_repo_name = self.ranRepository.replace('git/', 'git')
else:
full_ran_repo_name = self.ranRepository + '.git'
mySSH.command('mkdir -p ' + lSourcePath, '\$', 5)
mySSH.command('cd ' + lSourcePath, '\$', 5)
mySSH.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + full_ran_repo_name + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# Raphael: here add a check if git clone or git fetch went smoothly
mySSH.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
mySSH.command('git config user.name "OAI Jenkins"', '\$', 5)
mySSH.command('echo ' + lPassWord + ' | sudo -S git clean -x -d -ff', '\$', 30) # if asterix, copy the entitlement and subscription manager configurations
mySSH.command('mkdir -p cmake_targets/log', '\$', 5) if self.host == 'Red Hat':
# if the commit ID is provided use it to point to it mySSH.command('mkdir -p tmp/ca/', '\$', 5)
if self.ranCommitID != '': mySSH.command('mkdir -p tmp/entitlement/', '\$', 5)
mySSH.command('git checkout -f ' + self.ranCommitID, '\$', 30) mySSH.command('sudo cp /etc/rhsm/ca/redhat-uep.pem tmp/ca/', '\$', 5)
# if the branch is not develop, then it is a merge request and we need to do mySSH.command('sudo cp /etc/pki/entitlement/*.pem tmp/entitlement/', '\$', 5)
# the potential merge. Note that merge conflicts should already been checked earlier
imageTag = 'develop' sharedimage = 'ran-build'
sharedTag = 'develop' sharedTag = 'develop'
forceSharedImageBuild = False forceSharedImageBuild = False
imageTag = 'develop'
if (self.ranAllowMerge): if (self.ranAllowMerge):
imageTag = 'ci-temp' imageTag = 'ci-temp'
if self.ranTargetBranch == '': if self.ranTargetBranch == 'develop':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'): mySSH.command('git diff HEAD..origin/develop -- docker/Dockerfile.ran' + self.dockerfileprefix + ' | grep --colour=never -i INDEX', '\$', 5)
mySSH.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
mySSH.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
mySSH.command('git diff HEAD..origin/develop -- docker/Dockerfile.ran' + self.dockerfileprefix + ' | grep -i INDEX', '\$', 5)
result = re.search('index', mySSH.getBefore()) result = re.search('index', mySSH.getBefore())
if result is not None: if result is not None:
forceSharedImageBuild = True forceSharedImageBuild = True
sharedTag = 'ci-temp' sharedTag = 'ci-temp'
else:
forceSharedImageBuild = True
# if asterix, copy the entitlement and subscription manager configurations
if self.host == 'Red Hat':
mySSH.command('mkdir -p tmp/ca/', '\$', 5)
mySSH.command('mkdir -p tmp/entitlement/', '\$', 5)
mySSH.command('sudo cp /etc/rhsm/ca/redhat-uep.pem tmp/ca/', '\$', 5)
mySSH.command('sudo cp /etc/pki/entitlement/*.pem tmp/entitlement/', '\$', 5)
sharedimage = 'ran-build'
# Let's remove any previous run artifacts if still there # Let's remove any previous run artifacts if still there
mySSH.command(self.cli + ' image prune --force', '\$', 30) mySSH.command(self.cli + ' image prune --force', '\$', 30)
if forceSharedImageBuild: if forceSharedImageBuild:
...@@ -397,6 +405,56 @@ class Containerize(): ...@@ -397,6 +405,56 @@ class Containerize():
HTML.CreateHtmlTabFooter(False) HTML.CreateHtmlTabFooter(False)
sys.exit(1) sys.exit(1)
def Copy_Image_to_Test_Server(self, HTML):
imageTag = 'develop'
if (self.ranAllowMerge):
imageTag = 'ci-temp'
lSsh = SSH.SSHConnection()
# Going to the Docker Registry server
if self.registrySvrId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
elif self.registrySvrId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
elif self.registrySvrId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSsh.open(lIpAddr, lUserName, lPassWord)
lSsh.command('docker save ' + self.imageToCopy + ':' + imageTag + ' | gzip > ' + self.imageToCopy + '-' + imageTag + '.tar.gz', '\$', 60)
lSsh.copyin(lIpAddr, lUserName, lPassWord, '~/' + self.imageToCopy + '-' + imageTag + '.tar.gz', '.')
lSsh.command('rm ' + self.imageToCopy + '-' + imageTag + '.tar.gz', '\$', 60)
lSsh.close()
# Going to the Test Server
if self.testSvrId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
elif self.testSvrId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
elif self.testSvrId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSsh.open(lIpAddr, lUserName, lPassWord)
lSsh.copyout(lIpAddr, lUserName, lPassWord, './' + self.imageToCopy + '-' + imageTag + '.tar.gz', '~')
lSsh.command('docker rmi ' + self.imageToCopy + ':' + imageTag, '\$', 10)
lSsh.command('docker load < ' + self.imageToCopy + '-' + imageTag + '.tar.gz', '\$', 60)
lSsh.command('rm ' + self.imageToCopy + '-' + imageTag + '.tar.gz', '\$', 60)
lSsh.close()
if os.path.isfile('./' + self.imageToCopy + '-' + imageTag + '.tar.gz'):
os.remove('./' + self.imageToCopy + '-' + imageTag + '.tar.gz')
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def DeployObject(self, HTML, EPC): def DeployObject(self, HTML, EPC):
if self.eNB_serverId[self.eNB_instance] == '0': if self.eNB_serverId[self.eNB_instance] == '0':
lIpAddr = self.eNBIPAddress lIpAddr = self.eNBIPAddress
...@@ -417,35 +475,33 @@ class Containerize(): ...@@ -417,35 +475,33 @@ class Containerize():
HELP.GenericHelp(CONST.Version) HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter') sys.exit('Insufficient Parameter')
logging.debug('\u001B[1m Deploying OAI Object on server: ' + lIpAddr + '\u001B[0m') logging.debug('\u001B[1m Deploying OAI Object on server: ' + lIpAddr + '\u001B[0m')
mySSH = SSH.SSHConnection() mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord) mySSH.open(lIpAddr, lUserName, lPassWord)
# Putting the CPUs in a good state, we do that only on a few servers
mySSH.command('hostname', '\$', 5)
result = re.search('obelix|asterix', mySSH.getBefore())
if result is not None:
mySSH.command('if command -v cpupower &> /dev/null; then echo ' + lPassWord + ' | sudo -S cpupower idle-set -D 0; fi', '\$', 5)
time.sleep(5)
self._createWorkspace(mySSH, lPassWord, lSourcePath)
mySSH.command('cd ' + lSourcePath + '/' + self.yamlPath[self.eNB_instance], '\$', 5) mySSH.command('cd ' + lSourcePath + '/' + self.yamlPath[self.eNB_instance], '\$', 5)
mySSH.command('cp docker-compose.yml ci-docker-compose.yml', '\$', 5) mySSH.command('cp docker-compose.yml ci-docker-compose.yml', '\$', 5)
imageTag = 'develop' imageTag = 'develop'
if (self.ranAllowMerge): if (self.ranAllowMerge):
imageTag = 'ci-temp' imageTag = 'ci-temp'
mySSH.command('sed -i -e "s/image: oai-enb:latest/image: oai-enb:' + imageTag + '/" ci-docker-compose.yml', '\$', 2) mySSH.command('sed -i -e "s/image: oai-enb:latest/image: oai-enb:' + imageTag + '/" ci-docker-compose.yml', '\$', 2)
mySSH.command('sed -i -e "s/image: oai-gnb:latest/image: oai-gnb:' + imageTag + '/" ci-docker-compose.yml', '\$', 2)
localMmeIpAddr = EPC.MmeIPAddress localMmeIpAddr = EPC.MmeIPAddress
mySSH.command('sed -i -e "s/CI_MME_IP_ADDR/' + localMmeIpAddr + '/" ci-docker-compose.yml', '\$', 2) mySSH.command('sed -i -e "s/CI_MME_IP_ADDR/' + localMmeIpAddr + '/" ci-docker-compose.yml', '\$', 2)
if self.flexranCtrlDeployed: # if self.flexranCtrlDeployed:
mySSH.command('sed -i -e \'s/FLEXRAN_ENABLED:.*/FLEXRAN_ENABLED: "yes"/\' ci-docker-compose.yml', '\$', 2) # mySSH.command('sed -i -e "s/FLEXRAN_ENABLED:.*/FLEXRAN_ENABLED: \'yes\'/" ci-docker-compose.yml', '\$', 2)
mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/' + self.flexranCtrlIpAddress + '/" ci-docker-compose.yml', '\$', 2) # mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/' + self.flexranCtrlIpAddress + '/" ci-docker-compose.yml', '\$', 2)
else: # else:
mySSH.command('sed -i -e "s/FLEXRAN_ENABLED:.*$/FLEXRAN_ENABLED: \"no\"/" ci-docker-compose.yml', '\$', 2) # mySSH.command('sed -i -e "s/FLEXRAN_ENABLED:.*$/FLEXRAN_ENABLED: \'no\'/" ci-docker-compose.yml', '\$', 2)
mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/127.0.0.1/" ci-docker-compose.yml', '\$', 2) # mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/127.0.0.1/" ci-docker-compose.yml', '\$', 2)
# Currently support only one # Currently support only one
mySSH.command('docker-compose --file ci-docker-compose.yml config --services | sed -e "s@^@service=@"', '\$', 2) mySSH.command('docker-compose --file ci-docker-compose.yml config --services | sed -e "s@^@service=@" 2>&1', '\$', 10)
result = re.search('service=(?P<svc_name>[a-zA-Z0-9\_]+)', mySSH.getBefore()) result = re.search('service=(?P<svc_name>[a-zA-Z0-9\_]+)', mySSH.getBefore())
if result is not None: if result is not None:
svcName = result.group('svc_name') svcName = result.group('svc_name')
mySSH.command('docker-compose --file ci-docker-compose.yml up -d ' + svcName, '\$', 2) mySSH.command('docker-compose --file ci-docker-compose.yml up -d ' + svcName, '\$', 10)
# Checking Status # Checking Status
mySSH.command('docker-compose --file ci-docker-compose.yml config', '\$', 5) mySSH.command('docker-compose --file ci-docker-compose.yml config', '\$', 5)
...@@ -459,7 +515,7 @@ class Containerize(): ...@@ -459,7 +515,7 @@ class Containerize():
time.sleep(5) time.sleep(5)
cnt = 0 cnt = 0
while (cnt < 3): while (cnt < 3):
mySSH.command('docker inspect --format=\'{{.State.Health.Status}}\' ' + containerName, '\$', 5) mySSH.command('docker inspect --format="{{.State.Health.Status}}" ' + containerName, '\$', 5)
unhealthyNb = mySSH.getBefore().count('unhealthy') unhealthyNb = mySSH.getBefore().count('unhealthy')
healthyNb = mySSH.getBefore().count('healthy') - unhealthyNb healthyNb = mySSH.getBefore().count('healthy') - unhealthyNb
startingNb = mySSH.getBefore().count('starting') startingNb = mySSH.getBefore().count('starting')
...@@ -528,12 +584,9 @@ class Containerize(): ...@@ -528,12 +584,9 @@ class Containerize():
time.sleep(5) time.sleep(5)
mySSH.command('docker logs ' + containerName + ' > ' + lSourcePath + '/cmake_targets/' + self.eNB_logFile[self.eNB_instance], '\$', 30) mySSH.command('docker logs ' + containerName + ' > ' + lSourcePath + '/cmake_targets/' + self.eNB_logFile[self.eNB_instance], '\$', 30)
mySSH.command('docker rm -f ' + containerName, '\$', 30) mySSH.command('docker rm -f ' + containerName, '\$', 30)
# Forcing the down now to remove the networks and any artifacts
mySSH.command('docker-compose --file ci-docker-compose.yml down', '\$', 5)
# Putting the CPUs back in a idle state, we do that only on a few servers
mySSH.command('hostname', '\$', 5)
result = re.search('obelix|asterix', mySSH.getBefore())
if result is not None:
mySSH.command('if command -v cpupower &> /dev/null; then echo ' + lPassWord + ' | sudo -S cpupower idle-set -E; fi', '\$', 5)
mySSH.close() mySSH.close()
# Analyzing log file! # Analyzing log file!
...@@ -841,3 +894,114 @@ class Containerize(): ...@@ -841,3 +894,114 @@ class Containerize():
else: else:
self.exitStatus = 1 self.exitStatus = 1
HTML.CreateHtmlTestRowQueue(self.cliOptions, 'KO', 1, html_queue) HTML.CreateHtmlTestRowQueue(self.cliOptions, 'KO', 1, html_queue)
def CheckAndAddRoute(self, svrName, ipAddr, userName, password):
logging.debug('Checking IP routing on ' + svrName)
mySSH = SSH.SSHConnection()
if svrName == 'porcepix':
mySSH.open(ipAddr, userName, password)
# Check if route to asterix gnb exists
mySSH.command('ip route | grep --colour=never "192.168.68.64/26"', '\$', 10)
result = re.search('192.168.18.194', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.64/26 via 192.168.18.194 dev eno1', '\$', 10)
# Check if route to obelix enb exists
mySSH.command('ip route | grep --colour=never "192.168.68.128/26"', '\$', 10)
result = re.search('192.168.18.193', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.128/26 via 192.168.18.193 dev eno1', '\$', 10)
# Check if route to nepes gnb exists
mySSH.command('ip route | grep --colour=never "192.168.68.192/26"', '\$', 10)
result = re.search('192.168.18.209', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.192/26 via 192.168.18.209 dev eno1', '\$', 10)
# Check if forwarding is enabled
mySSH.command('sysctl net.ipv4.conf.all.forwarding', '\$', 10)
result = re.search('net.ipv4.conf.all.forwarding = 1', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S sysctl net.ipv4.conf.all.forwarding=1', '\$', 10)
# Check if iptables forwarding is accepted
mySSH.command('echo ' + password + ' | sudo -S iptables -L', '\$', 10)
result = re.search('Chain FORWARD .*policy ACCEPT', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S iptables -P FORWARD ACCEPT', '\$', 10)
mySSH.close()
if svrName == 'asterix':
mySSH.open(ipAddr, userName, password)
# Check if route to porcepix epc exists
mySSH.command('ip route | grep --colour=never "192.168.61.192/26"', '\$', 10)
result = re.search('192.168.18.210', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.61.192/26 via 192.168.18.210 dev em1', '\$', 10)
# Check if route to porcepix cn5g exists
mySSH.command('ip route | grep --colour=never "192.168.70.128/26"', '\$', 10)
result = re.search('192.168.18.210', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.70.128/26 via 192.168.18.210 dev em1', '\$', 10)
# Check if X2 route to obelix enb exists
mySSH.command('ip route | grep --colour=never "192.168.68.128/26"', '\$', 10)
result = re.search('192.168.18.193', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.128/26 via 192.168.18.193 dev em1', '\$', 10)
# Check if forwarding is enabled
mySSH.command('sysctl net.ipv4.conf.all.forwarding', '\$', 10)
result = re.search('net.ipv4.conf.all.forwarding = 1', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S sysctl net.ipv4.conf.all.forwarding=1', '\$', 10)
# Check if iptables forwarding is accepted
mySSH.command('echo ' + password + ' | sudo -S iptables -L', '\$', 10)
result = re.search('Chain FORWARD .*policy ACCEPT', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S iptables -P FORWARD ACCEPT', '\$', 10)
mySSH.close()
if svrName == 'obelix':
mySSH.open(ipAddr, userName, password)
# Check if route to porcepix epc exists
mySSH.command('ip route | grep --colour=never "192.168.61.192/26"', '\$', 10)
result = re.search('192.168.18.210', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.61.192/26 via 192.168.18.210 dev eno1', '\$', 10)
# Check if X2 route to asterix gnb exists
mySSH.command('ip route | grep --colour=never "192.168.68.64/26"', '\$', 10)
result = re.search('192.168.18.194', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.64/26 via 192.168.18.194 dev eno1', '\$', 10)
# Check if X2 route to nepes gnb exists
mySSH.command('ip route | grep --colour=never "192.168.68.192/26"', '\$', 10)
result = re.search('192.168.18.209', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.192/26 via 192.168.18.209 dev eno1', '\$', 10)
# Check if forwarding is enabled
mySSH.command('sysctl net.ipv4.conf.all.forwarding', '\$', 10)
result = re.search('net.ipv4.conf.all.forwarding = 1', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S sysctl net.ipv4.conf.all.forwarding=1', '\$', 10)
# Check if iptables forwarding is accepted
mySSH.command('echo ' + password + ' | sudo -S iptables -L', '\$', 10)
result = re.search('Chain FORWARD .*policy ACCEPT', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S iptables -P FORWARD ACCEPT', '\$', 10)
mySSH.close()
if svrName == 'nepes':
mySSH.open(ipAddr, userName, password)
# Check if route to porcepix epc exists
mySSH.command('ip route | grep --colour=never "192.168.61.192/26"', '\$', 10)
result = re.search('192.168.18.210', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.61.192/26 via 192.168.18.210 dev enp0s31f6', '\$', 10)
# Check if X2 route to obelix enb exists
mySSH.command('ip route | grep --colour=never "192.168.68.128/26"', '\$', 10)
result = re.search('192.168.18.193', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.128/26 via 192.168.18.193 dev enp0s31f6', '\$', 10)
# Check if forwarding is enabled
mySSH.command('sysctl net.ipv4.conf.all.forwarding', '\$', 10)
result = re.search('net.ipv4.conf.all.forwarding = 1', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S sysctl net.ipv4.conf.all.forwarding=1', '\$', 10)
# Check if iptables forwarding is accepted
mySSH.command('echo ' + password + ' | sudo -S iptables -L', '\$', 10)
result = re.search('Chain FORWARD .*policy ACCEPT', mySSH.getBefore())
if result is None:
mySSH.command('echo ' + password + ' | sudo -S iptables -P FORWARD ACCEPT', '\$', 10)
mySSH.close()
...@@ -63,7 +63,7 @@ class Module_UE: ...@@ -63,7 +63,7 @@ class Module_UE:
#if not it will be started #if not it will be started
def CheckCMProcess(self,CNType): def CheckCMProcess(self,CNType):
HOST=self.HostUsername+'@'+self.HostIPAddress HOST=self.HostUsername+'@'+self.HostIPAddress
COMMAND="ps aux | grep " + self.Process['Name'] + " | grep -v grep " COMMAND="ps aux | grep --colour=never " + self.Process['Name'] + " | grep -v grep "
logging.debug(COMMAND) logging.debug(COMMAND)
ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE) ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result = ssh.stdout.readlines() result = ssh.stdout.readlines()
...@@ -81,7 +81,7 @@ class Module_UE: ...@@ -81,7 +81,7 @@ class Module_UE:
#checking the process #checking the process
time.sleep(5) time.sleep(5)
HOST=self.HostUsername+'@'+self.HostIPAddress HOST=self.HostUsername+'@'+self.HostIPAddress
COMMAND="ps aux | grep " + self.Process['Name'] + " | grep -v grep " COMMAND="ps aux | grep --colour=never " + self.Process['Name'] + " | grep -v grep "
logging.debug(COMMAND) logging.debug(COMMAND)
ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE) ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result = ssh.stdout.readlines() result = ssh.stdout.readlines()
...@@ -108,7 +108,7 @@ class Module_UE: ...@@ -108,7 +108,7 @@ class Module_UE:
response= [] response= []
tentative = 3 tentative = 3
while (len(response)==0) and (tentative>0): while (len(response)==0) and (tentative>0):
COMMAND="ip a show dev " + self.UENetwork + " | grep inet | grep " + self.UENetwork COMMAND="ip a show dev " + self.UENetwork + " | grep --colour=never inet | grep " + self.UENetwork
logging.debug(COMMAND) logging.debug(COMMAND)
ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE) ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
response = ssh.stdout.readlines() response = ssh.stdout.readlines()
...@@ -136,7 +136,7 @@ class Module_UE: ...@@ -136,7 +136,7 @@ class Module_UE:
response= [] response= []
tentative = 3 tentative = 3
while (len(response)==0) and (tentative>0): while (len(response)==0) and (tentative>0):
COMMAND="ip a show dev " + self.UENetwork + " | grep mtu" COMMAND="ip a show dev " + self.UENetwork + " | grep --colour=never mtu"
logging.debug(COMMAND) logging.debug(COMMAND)
ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE) ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
response = ssh.stdout.readlines() response = ssh.stdout.readlines()
......
...@@ -192,16 +192,16 @@ class OaiCiTest(): ...@@ -192,16 +192,16 @@ class OaiCiTest():
result = re.search('LAST_BUILD_INFO', SSH.getBefore()) result = re.search('LAST_BUILD_INFO', SSH.getBefore())
if result is not None: if result is not None:
mismatch = False mismatch = False
SSH.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2) SSH.command('grep --colour=never SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
result = re.search(self.ranCommitID, SSH.getBefore()) result = re.search(self.ranCommitID, SSH.getBefore())
if result is None: if result is None:
mismatch = True mismatch = True
SSH.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2) SSH.command('grep --colour=never MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranAllowMerge: if self.ranAllowMerge:
result = re.search('YES', SSH.getBefore()) result = re.search('YES', SSH.getBefore())
if result is None: if result is None:
mismatch = True mismatch = True
SSH.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2) SSH.command('grep --colour=never TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '': if self.ranTargetBranch == '':
result = re.search('develop', SSH.getBefore()) result = re.search('develop', SSH.getBefore())
else: else:
...@@ -451,13 +451,13 @@ class OaiCiTest(): ...@@ -451,13 +451,13 @@ class OaiCiTest():
SSH = sshconnection.SSHConnection() SSH = sshconnection.SSHConnection()
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword) SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
# b2xx_fx3_utils reset procedure # b2xx_fx3_utils reset procedure
SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 90) SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 180)
result = re.search('type: b200', SSH.getBefore()) result = re.search('type: b200', SSH.getBefore())
if result is not None: if result is not None:
logging.debug('Found a B2xx device --> resetting it') logging.debug('Found a B2xx device --> resetting it')
SSH.command('echo ' + self.UEPassword + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10) SSH.command('echo ' + self.UEPassword + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
# Reloading FGPA bin firmware # Reloading FGPA bin firmware
SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 90) SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 180)
result = re.search('type: n3xx', str(SSH.getBefore())) result = re.search('type: n3xx', str(SSH.getBefore()))
if result is not None: if result is not None:
logging.debug('Found a N3xx device --> resetting it') logging.debug('Found a N3xx device --> resetting it')
...@@ -660,7 +660,7 @@ class OaiCiTest(): ...@@ -660,7 +660,7 @@ class OaiCiTest():
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword) SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
count = 0 count = 0
while count < 5: while count < 5:
SSH.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep ttyUSB0', '\$', 10) SSH.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep --colour=never ttyUSB0', '\$', 10)
result = re.search('picocom', SSH.getBefore()) result = re.search('picocom', SSH.getBefore())
if result is None: if result is None:
count = 10 count = 10
...@@ -1328,7 +1328,7 @@ class OaiCiTest(): ...@@ -1328,7 +1328,7 @@ class OaiCiTest():
SSH = sshconnection.SSHConnection() SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword) SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized: if self.ADBCentralized:
SSH.command('lsusb | egrep "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15) SSH.command('lsusb | egrep --colour=never "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15)
#self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore()) #self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore())
self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore()) self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore())
else: else:
...@@ -1583,7 +1583,7 @@ class OaiCiTest(): ...@@ -1583,7 +1583,7 @@ class OaiCiTest():
if re.match('OAI-Rel14-Docker', EPC.Type, re.IGNORECASE): if re.match('OAI-Rel14-Docker', EPC.Type, re.IGNORECASE):
Target = EPC.MmeIPAddress Target = EPC.MmeIPAddress
elif re.match('OAICN5G', EPC.Type, re.IGNORECASE): elif re.match('OAICN5G', EPC.Type, re.IGNORECASE):
Target = '8.8.8.8' Target = EPC.MmeIPAddress
else: else:
Target = EPC.IPAddress Target = EPC.IPAddress
#ping from module NIC rather than IP address to make sure round trip is over the air #ping from module NIC rather than IP address to make sure round trip is over the air
...@@ -2325,7 +2325,7 @@ class OaiCiTest(): ...@@ -2325,7 +2325,7 @@ class OaiCiTest():
server_filename = 'iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log' server_filename = 'iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command('docker exec -it prod-trf-gen /bin/bash -c "killall --signal SIGKILL iperf"', '\$', 5) SSH.command('docker exec -it prod-trf-gen /bin/bash -c "killall --signal SIGKILL iperf"', '\$', 5)
iperf_cmd = 'echo $USER; nohup bin/iperf -s -u 2>&1 > ' + server_filename iperf_cmd = 'echo $USER; nohup bin/iperf -s -u 2>&1 > ' + server_filename
cmd = 'docker exec -it prod-trf-gen /bin/bash -c \"' + iperf_cmd + '\"' cmd = 'docker exec -d prod-trf-gen /bin/bash -c \"' + iperf_cmd + '\"'
SSH.command(cmd,'\$',5) SSH.command(cmd,'\$',5)
SSH.close() SSH.close()
...@@ -3601,7 +3601,7 @@ class OaiCiTest(): ...@@ -3601,7 +3601,7 @@ class OaiCiTest():
UhdVersion = result.group('uhd_version') UhdVersion = result.group('uhd_version')
logging.debug('UHD Version is: ' + UhdVersion) logging.debug('UHD Version is: ' + UhdVersion)
HTML.UhdVersion[idx]=UhdVersion HTML.UhdVersion[idx]=UhdVersion
SSH.command('echo ' + Password + ' | sudo -S uhd_find_devices', '\$', 90) SSH.command('echo ' + Password + ' | sudo -S uhd_find_devices', '\$', 180)
usrp_boards = re.findall('product: ([0-9A-Za-z]+)\\\\r\\\\n', SSH.getBefore()) usrp_boards = re.findall('product: ([0-9A-Za-z]+)\\\\r\\\\n', SSH.getBefore())
count = 0 count = 0
for board in usrp_boards: for board in usrp_boards:
......
...@@ -140,7 +140,7 @@ class PhySim: ...@@ -140,7 +140,7 @@ class PhySim:
logging.debug('oai-physim size is unknown') logging.debug('oai-physim size is unknown')
# logging to OC Cluster and then switch to corresponding project # logging to OC Cluster and then switch to corresponding project
mySSH.command(f'oc login -u {ocUserName} -p {ocPassword}', '\$', 6) mySSH.command(f'oc login -u {ocUserName} -p {ocPassword}', '\$', 30)
if mySSH.getBefore().count('Login successful.') == 0: if mySSH.getBefore().count('Login successful.') == 0:
logging.error('\u001B[1m OC Cluster Login Failed\u001B[0m') logging.error('\u001B[1m OC Cluster Login Failed\u001B[0m')
mySSH.close() mySSH.close()
...@@ -149,7 +149,7 @@ class PhySim: ...@@ -149,7 +149,7 @@ class PhySim:
return return
else: else:
logging.debug('\u001B[1m Login to OC Cluster Successfully\u001B[0m') logging.debug('\u001B[1m Login to OC Cluster Successfully\u001B[0m')
mySSH.command(f'oc project {ocProjectName}', '\$', 6) mySSH.command(f'oc project {ocProjectName}', '\$', 30)
if mySSH.getBefore().count(f'Already on project "{ocProjectName}"') == 0 and mySSH.getBefore().count(f'Now using project "{self.OCProjectName}"') == 0: if mySSH.getBefore().count(f'Already on project "{ocProjectName}"') == 0 and mySSH.getBefore().count(f'Now using project "{self.OCProjectName}"') == 0:
logging.error(f'\u001B[1m Unable to access OC project {ocProjectName}\u001B[0m') logging.error(f'\u001B[1m Unable to access OC project {ocProjectName}\u001B[0m')
mySSH.close() mySSH.close()
...@@ -160,7 +160,7 @@ class PhySim: ...@@ -160,7 +160,7 @@ class PhySim:
logging.debug(f'\u001B[1m Now using project {ocProjectName}\u001B[0m') logging.debug(f'\u001B[1m Now using project {ocProjectName}\u001B[0m')
# Tag the image and push to the OC cluster # Tag the image and push to the OC cluster
mySSH.command('oc whoami -t | sudo podman login -u ' + ocUserName + ' --password-stdin https://default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/ --tls-verify=false', '\$', 6) mySSH.command('oc whoami -t | sudo podman login -u ' + ocUserName + ' --password-stdin https://default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/ --tls-verify=false', '\$', 30)
if mySSH.getBefore().count('Login Succeeded!') == 0: if mySSH.getBefore().count('Login Succeeded!') == 0:
logging.error('\u001B[1m Podman Login to OC Cluster Registry Failed\u001B[0m') logging.error('\u001B[1m Podman Login to OC Cluster Registry Failed\u001B[0m')
mySSH.close() mySSH.close()
...@@ -170,7 +170,7 @@ class PhySim: ...@@ -170,7 +170,7 @@ class PhySim:
else: else:
logging.debug('\u001B[1m Podman Login to OC Cluster Registry Successfully\u001B[0m') logging.debug('\u001B[1m Podman Login to OC Cluster Registry Successfully\u001B[0m')
time.sleep(2) time.sleep(2)
mySSH.command('oc create -f openshift/oai-physim-image-stream.yml', '\$', 6) mySSH.command('oc create -f openshift/oai-physim-image-stream.yml', '\$', 30)
if mySSH.getBefore().count('(AlreadyExists):') == 0 and mySSH.getBefore().count('created') == 0: if mySSH.getBefore().count('(AlreadyExists):') == 0 and mySSH.getBefore().count('created') == 0:
logging.error(f'\u001B[1m Image Stream "oai-physim" Creation Failed on OC Cluster {ocProjectName}\u001B[0m') logging.error(f'\u001B[1m Image Stream "oai-physim" Creation Failed on OC Cluster {ocProjectName}\u001B[0m')
mySSH.close() mySSH.close()
...@@ -180,9 +180,9 @@ class PhySim: ...@@ -180,9 +180,9 @@ class PhySim:
else: else:
logging.debug(f'\u001B[1m Image Stream "oai-physim" created on OC project {ocProjectName}\u001B[0m') logging.debug(f'\u001B[1m Image Stream "oai-physim" created on OC project {ocProjectName}\u001B[0m')
time.sleep(2) time.sleep(2)
mySSH.command(f'sudo podman tag oai-physim:{imageTag} default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 6) mySSH.command(f'sudo podman tag oai-physim:{imageTag} default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 30)
time.sleep(2) time.sleep(2)
mySSH.command(f'sudo podman push default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag} --tls-verify=false', '\$', 30) mySSH.command(f'sudo podman push default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag} --tls-verify=false', '\$', 180)
if mySSH.getBefore().count('Storing signatures') == 0: if mySSH.getBefore().count('Storing signatures') == 0:
logging.error('\u001B[1m Image "oai-physim" push to OC Cluster Registry Failed\u001B[0m') logging.error('\u001B[1m Image "oai-physim" push to OC Cluster Registry Failed\u001B[0m')
mySSH.close() mySSH.close()
...@@ -195,18 +195,18 @@ class PhySim: ...@@ -195,18 +195,18 @@ class PhySim:
# Using helm charts deployment # Using helm charts deployment
time.sleep(5) time.sleep(5)
mySSH.command(f'sed -i -e "s#TAG#{imageTag}#g" ./charts/physims/values.yaml', '\$', 6) mySSH.command(f'sed -i -e "s#TAG#{imageTag}#g" ./charts/physims/values.yaml', '\$', 6)
mySSH.command('helm install physim ./charts/physims/ | tee -a cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 6) mySSH.command('helm install physim ./charts/physims/ | tee -a cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 30)
if mySSH.getBefore().count('STATUS: deployed') == 0: if mySSH.getBefore().count('STATUS: deployed') == 0:
logging.error('\u001B[1m Deploying PhySim Failed using helm chart on OC Cluster\u001B[0m') logging.error('\u001B[1m Deploying PhySim Failed using helm chart on OC Cluster\u001B[0m')
mySSH.command('helm uninstall physim >> cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 6) mySSH.command('helm uninstall physim >> cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 30)
isFinished1 = False isFinished1 = False
while(isFinished1 == False): while(isFinished1 == False):
time.sleep(20) time.sleep(20)
mySSH.command('oc get pods -l app.kubernetes.io/instance=physim', '\$', 6, resync=True) mySSH.command('oc get pods -l app.kubernetes.io/instance=physim', '\$', 6, resync=True)
if re.search('No resources found', mySSH.getBefore()): if re.search('No resources found', mySSH.getBefore()):
isFinished1 = True isFinished1 = True
mySSH.command(f'sudo podman rmi default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 6) mySSH.command(f'sudo podman rmi default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 30)
mySSH.command('oc delete is oai-physim', '\$', 6) mySSH.command('oc delete is oai-physim', '\$', 30)
mySSH.close() mySSH.close()
self.AnalyzeLogFile_phySim(HTML) self.AnalyzeLogFile_phySim(HTML)
RAN.prematureExit = True RAN.prematureExit = True
...@@ -217,7 +217,7 @@ class PhySim: ...@@ -217,7 +217,7 @@ class PhySim:
count = 0 count = 0
while(count < 2 and isRunning == False): while(count < 2 and isRunning == False):
time.sleep(60) time.sleep(60)
mySSH.command('oc get pods -o wide -l app.kubernetes.io/instance=physim | tee -a cmake_targets/log/physim_pods_summary.txt', '\$', 6, resync=True) mySSH.command('oc get pods -o wide -l app.kubernetes.io/instance=physim | tee -a cmake_targets/log/physim_pods_summary.txt', '\$', 30, resync=True)
if mySSH.getBefore().count('Running') == 12: if mySSH.getBefore().count('Running') == 12:
logging.debug('\u001B[1m Running the physim test Scenarios\u001B[0m') logging.debug('\u001B[1m Running the physim test Scenarios\u001B[0m')
isRunning = True isRunning = True
......
...@@ -255,7 +255,7 @@ MACRLCs = ( ...@@ -255,7 +255,7 @@ MACRLCs = (
tr_n_preference = "local_RRC"; tr_n_preference = "local_RRC";
# pusch_TargetSNRx10 = 200; # pusch_TargetSNRx10 = 200;
# pucch_TargetSNRx10 = 150; # pucch_TargetSNRx10 = 150;
ulsch_max_slots_inactivity=20; ulsch_max_frame_inactivity = 1;
} }
); );
......
...@@ -53,7 +53,7 @@ import constants as CONST ...@@ -53,7 +53,7 @@ import constants as CONST
class EPCManagement(): class EPCManagement():
def __init__(self): def __init__(self):
self.IPAddress = '' self.IPAddress = ''
self.UserName = '' self.UserName = ''
self.Password = '' self.Password = ''
...@@ -62,7 +62,6 @@ class EPCManagement(): ...@@ -62,7 +62,6 @@ class EPCManagement():
self.PcapFileName = '' self.PcapFileName = ''
self.testCase_id = '' self.testCase_id = ''
self.MmeIPAddress = '' self.MmeIPAddress = ''
self.AmfIPAddress = ''
self.containerPrefix = 'prod' self.containerPrefix = 'prod'
self.mmeConfFile = 'mme.conf' self.mmeConfFile = 'mme.conf'
self.yamlPath = '' self.yamlPath = ''
...@@ -228,6 +227,7 @@ class EPCManagement(): ...@@ -228,6 +227,7 @@ class EPCManagement():
sys.exit('Insufficient EPC Parameters') sys.exit('Insufficient EPC Parameters')
mySSH = SSH.SSHConnection() mySSH = SSH.SSHConnection()
mySSH.open(self.IPAddress, self.UserName, self.Password) mySSH.open(self.IPAddress, self.UserName, self.Password)
html_cell = '<pre style="background-color:white">\n'
if re.match('ltebox', self.Type, re.IGNORECASE): if re.match('ltebox', self.Type, re.IGNORECASE):
logging.debug('Using the SABOX simulated HSS') logging.debug('Using the SABOX simulated HSS')
mySSH.command('if [ -d ' + self.SourceCodePath + '/scripts ]; then echo ' + self.Password + ' | sudo -S rm -Rf ' + self.SourceCodePath + '/scripts ; fi', '\$', 5) mySSH.command('if [ -d ' + self.SourceCodePath + '/scripts ]; then echo ' + self.Password + ' | sudo -S rm -Rf ' + self.SourceCodePath + '/scripts ; fi', '\$', 5)
...@@ -238,16 +238,48 @@ class EPCManagement(): ...@@ -238,16 +238,48 @@ class EPCManagement():
logging.debug('Using the sabox') logging.debug('Using the sabox')
mySSH.command('cd /opt/ltebox/tools', '\$', 5) mySSH.command('cd /opt/ltebox/tools', '\$', 5)
mySSH.command('echo ' + self.Password + ' | sudo -S ./start_sabox', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S ./start_sabox', '\$', 5)
html_cell += 'N/A\n'
elif re.match('OAICN5G', self.Type, re.IGNORECASE): elif re.match('OAICN5G', self.Type, re.IGNORECASE):
logging.debug('Starting OAI CN5G') logging.debug('Starting OAI CN5G')
mySSH.command('if [ -d ' + self.SourceCodePath + '/scripts ]; then echo ' + self.Password + ' | sudo -S rm -Rf ' + self.SourceCodePath + '/scripts ; fi', '\$', 5) mySSH.command('if [ -d ' + self.SourceCodePath + '/scripts ]; then echo ' + self.Password + ' | sudo -S rm -Rf ' + self.SourceCodePath + '/scripts ; fi', '\$', 5)
mySSH.command('mkdir -p ' + self.SourceCodePath + '/scripts', '\$', 5) mySSH.command('mkdir -p ' + self.SourceCodePath + '/scripts', '\$', 5)
mySSH.command('cd /opt/oai-cn5g-fed/docker-compose', '\$', 5) mySSH.command('cd /opt/oai-cn5g-fed/docker-compose', '\$', 5)
mySSH.command('./core-network.sh start nrf spgwu', '\$', 60) mySSH.command('./core-network.sh start nrf spgwu', '\$', 60)
time.sleep(2)
mySSH.command('docker-compose -p 5gcn ps -a', '\$', 60)
if mySSH.getBefore().count('Up (healthy)') != 6:
logging.error('Not all container healthy')
else:
logging.debug('OK')
mySSH.command('docker-compose config | grep --colour=never image', '\$', 10)
listOfImages = mySSH.getBefore()
for imageLine in listOfImages.split('\\r\\n'):
res1 = re.search('image: (?P<name>[a-zA-Z0-9\-]+):(?P<tag>[a-zA-Z0-9\-]+)', str(imageLine))
res2 = re.search('mysql', str(imageLine))
if res1 is not None and res2 is None:
html_cell += res1.group('name') + ':' + res1.group('tag') + ' '
nbChars = len(res1.group('name')) + len(res1.group('tag')) + 2
while (nbChars < 32):
html_cell += ' '
nbChars += 1
mySSH.command('docker image inspect --format="Size = {{.Size}} bytes" ' + res1.group('name') + ':' + res1.group('tag'), '\$', 10)
res3 = re.search('Size *= *(?P<size>[0-9\-]*) *bytes', mySSH.getBefore())
if res3 is not None:
imageSize = int(res3.group('size'))
imageSize = int(imageSize/(1024*1024))
html_cell += str(imageSize) + ' MBytes '
mySSH.command('docker image inspect --format="Date = {{.Created}}" ' + res1.group('name') + ':' + res1.group('tag'), '\$', 10)
res4 = re.search('Date *= *(?P<date>[0-9\-]*)T', mySSH.getBefore())
if res4 is not None:
html_cell += '(' + res4.group('date') + ')'
html_cell += '\n'
else: else:
logging.error('This option should not occur!') logging.error('This option should not occur!')
html_cell += '</pre>'
mySSH.close() mySSH.close()
HTML.CreateHtmlTestRow(self.Type, 'OK', CONST.ALL_PROCESSES_OK) html_queue = SimpleQueue()
html_queue.put(html_cell)
HTML.CreateHtmlTestRowQueue(self.Type, 'OK', 1, html_queue)
def SetAmfIPAddress(self): def SetAmfIPAddress(self):
# Not an error if we don't need an 5GCN # Not an error if we don't need an 5GCN
...@@ -371,7 +403,7 @@ class EPCManagement(): ...@@ -371,7 +403,7 @@ class EPCManagement():
elif re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE): elif re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE):
mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGINT oai_hss || true', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGINT oai_hss || true', '\$', 5)
time.sleep(2) time.sleep(2)
mySSH.command('stdbuf -o0 ps -aux | grep hss | grep -v grep', '\$', 5) mySSH.command('stdbuf -o0 ps -aux | grep --colour=never hss | grep -v grep', '\$', 5)
result = re.search('oai_hss -j', mySSH.getBefore()) result = re.search('oai_hss -j', mySSH.getBefore())
if result is not None: if result is not None:
mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGKILL oai_hss || true', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGKILL oai_hss || true', '\$', 5)
...@@ -379,7 +411,7 @@ class EPCManagement(): ...@@ -379,7 +411,7 @@ class EPCManagement():
elif re.match('OAI', self.Type, re.IGNORECASE): elif re.match('OAI', self.Type, re.IGNORECASE):
mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGINT run_hss oai_hss || true', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGINT run_hss oai_hss || true', '\$', 5)
time.sleep(2) time.sleep(2)
mySSH.command('stdbuf -o0 ps -aux | grep hss | grep -v grep', '\$', 5) mySSH.command('stdbuf -o0 ps -aux | grep --colour=never hss | grep -v grep', '\$', 5)
result = re.search('\/bin\/bash .\/run_', mySSH.getBefore()) result = re.search('\/bin\/bash .\/run_', mySSH.getBefore())
if result is not None: if result is not None:
mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGKILL run_hss oai_hss || true', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGKILL run_hss oai_hss || true', '\$', 5)
...@@ -465,6 +497,7 @@ class EPCManagement(): ...@@ -465,6 +497,7 @@ class EPCManagement():
def Terminate5GCN(self, HTML): def Terminate5GCN(self, HTML):
mySSH = SSH.SSHConnection() mySSH = SSH.SSHConnection()
mySSH.open(self.IPAddress, self.UserName, self.Password) mySSH.open(self.IPAddress, self.UserName, self.Password)
message = ''
if re.match('ltebox', self.Type, re.IGNORECASE): if re.match('ltebox', self.Type, re.IGNORECASE):
logging.debug('Terminating SA BOX') logging.debug('Terminating SA BOX')
mySSH.command('cd /opt/ltebox/tools', '\$', 5) mySSH.command('cd /opt/ltebox/tools', '\$', 5)
...@@ -475,15 +508,31 @@ class EPCManagement(): ...@@ -475,15 +508,31 @@ class EPCManagement():
time.sleep(1) time.sleep(1)
mySSH.command('echo ' + self.Password + ' | sudo -S screen -S simulated_5g_hss -X quit', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S screen -S simulated_5g_hss -X quit', '\$', 5)
elif re.match('OAICN5G', self.Type, re.IGNORECASE): elif re.match('OAICN5G', self.Type, re.IGNORECASE):
self.LogCollectOAICN5G() logging.debug('OAI CN5G Collecting Log files to workspace')
mySSH.command('echo ' + self.Password + ' | sudo rm -rf ' + self.SourceCodePath + '/logs', '\$', 5)
mySSH.command('mkdir ' + self.SourceCodePath + '/logs','\$', 5)
containers_list=['oai-smf','oai-spgwu','oai-amf','oai-nrf']
for c in containers_list:
mySSH.command('docker logs ' + c + ' > ' + self.SourceCodePath + '/logs/' + c + '.log', '\$', 5)
logging.debug('Terminating OAI CN5G') logging.debug('Terminating OAI CN5G')
mySSH.command('cd /opt/oai-cn5g-fed/docker-compose', '\$', 5) mySSH.command('cd /opt/oai-cn5g-fed/docker-compose', '\$', 5)
mySSH.command('docker-compose down', '\$', 5)
mySSH.command('./core-network.sh stop nrf spgwu', '\$', 60) mySSH.command('./core-network.sh stop nrf spgwu', '\$', 60)
time.sleep(2)
mySSH.command('tshark -r /tmp/oai-cn5g.pcap | egrep --colour=never "Tracking area update" ','\$', 30)
result = re.search('Tracking area update request', mySSH.getBefore())
if result is not None:
message = 'UE requested ' + str(mySSH.getBefore().count('Tracking area update request')) + 'Tracking area update request(s)'
else:
message = 'No Tracking area update request'
logging.debug(message)
else: else:
logging.error('This should not happen!') logging.error('This should not happen!')
mySSH.close() mySSH.close()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK) html_queue = SimpleQueue()
html_cell = '<pre style="background-color:white">' + message + '</pre>'
html_queue.put(html_cell)
HTML.CreateHtmlTestRowQueue(self.Type, 'OK', 1, html_queue)
def DeployEpc(self, HTML): def DeployEpc(self, HTML):
logging.debug('Trying to deploy') logging.debug('Trying to deploy')
...@@ -529,6 +578,10 @@ class EPCManagement(): ...@@ -529,6 +578,10 @@ class EPCManagement():
mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/mme_fd.sprint.conf', self.SourceCodePath + '/scripts') mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/mme_fd.sprint.conf', self.SourceCodePath + '/scripts')
mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/redis_extern.conf', self.SourceCodePath + '/scripts') mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/redis_extern.conf', self.SourceCodePath + '/scripts')
mySSH.command('chmod a+x ' + self.SourceCodePath + '/scripts/entrypoint.sh', '\$', 5) mySSH.command('chmod a+x ' + self.SourceCodePath + '/scripts/entrypoint.sh', '\$', 5)
else:
mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/entrypoint.sh', self.SourceCodePath + '/scripts')
mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/mme.conf', self.SourceCodePath + '/scripts')
mySSH.command('chmod 775 entrypoint.sh', '\$', 60)
mySSH.command('wget --quiet --tries=3 --retry-connrefused https://raw.githubusercontent.com/OPENAIRINTERFACE/openair-hss/develop/src/hss_rel14/db/oai_db.cql', '\$', 30) mySSH.command('wget --quiet --tries=3 --retry-connrefused https://raw.githubusercontent.com/OPENAIRINTERFACE/openair-hss/develop/src/hss_rel14/db/oai_db.cql', '\$', 30)
mySSH.command('docker-compose down', '\$', 60) mySSH.command('docker-compose down', '\$', 60)
mySSH.command('docker-compose up -d db_init', '\$', 60) mySSH.command('docker-compose up -d db_init', '\$', 60)
...@@ -572,6 +625,30 @@ class EPCManagement(): ...@@ -572,6 +625,30 @@ class EPCManagement():
listOfContainers += ' prod-trf-gen' listOfContainers += ' prod-trf-gen'
expectedHealthyContainers += 1 expectedHealthyContainers += 1
mySSH.command('docker-compose config | grep --colour=never image', '\$', 10)
html_cell = '<pre style="background-color:white">\n'
listOfImages = mySSH.getBefore()
for imageLine in listOfImages.split('\\r\\n'):
res1 = re.search('image: (?P<name>[a-zA-Z0-9\-]+):(?P<tag>[a-zA-Z0-9\-]+)', str(imageLine))
res2 = re.search('cassandra|redis', str(imageLine))
if res1 is not None and res2 is None:
html_cell += res1.group('name') + ':' + res1.group('tag') + ' '
nbChars = len(res1.group('name')) + len(res1.group('tag')) + 2
while (nbChars < 32):
html_cell += ' '
nbChars += 1
mySSH.command('docker image inspect --format="Size = {{.Size}} bytes" ' + res1.group('name') + ':' + res1.group('tag'), '\$', 10)
res3 = re.search('Size *= *(?P<size>[0-9\-]*) *bytes', mySSH.getBefore())
if res3 is not None:
imageSize = int(res3.group('size'))
imageSize = int(imageSize/(1024*1024))
html_cell += str(imageSize) + ' MBytes '
mySSH.command('docker image inspect --format="Date = {{.Created}}" ' + res1.group('name') + ':' + res1.group('tag'), '\$', 10)
res4 = re.search('Date *= *(?P<date>[0-9\-]*)T', mySSH.getBefore())
if res4 is not None:
html_cell += '(' + res4.group('date') + ')'
html_cell += '\n'
html_cell += '</pre>'
# Checking if all are healthy # Checking if all are healthy
cnt = 0 cnt = 0
while (cnt < 3): while (cnt < 3):
...@@ -587,6 +664,8 @@ class EPCManagement(): ...@@ -587,6 +664,8 @@ class EPCManagement():
logging.debug(' -- ' + str(healthyNb) + ' healthy container(s)') logging.debug(' -- ' + str(healthyNb) + ' healthy container(s)')
logging.debug(' -- ' + str(unhealthyNb) + ' unhealthy container(s)') logging.debug(' -- ' + str(unhealthyNb) + ' unhealthy container(s)')
logging.debug(' -- ' + str(startingNb) + ' still starting container(s)') logging.debug(' -- ' + str(startingNb) + ' still starting container(s)')
html_queue = SimpleQueue()
html_queue.put(html_cell)
if healthyNb == expectedHealthyContainers: if healthyNb == expectedHealthyContainers:
mySSH.command('docker exec -d prod-oai-hss /bin/bash -c "nohup tshark -i any -f \'port 9042 or port 3868\' -w /tmp/hss_check_run.pcap 2>&1 > /dev/null"', '\$', 5) mySSH.command('docker exec -d prod-oai-hss /bin/bash -c "nohup tshark -i any -f \'port 9042 or port 3868\' -w /tmp/hss_check_run.pcap 2>&1 > /dev/null"', '\$', 5)
if self.isMagmaUsed: if self.isMagmaUsed:
...@@ -598,11 +677,11 @@ class EPCManagement(): ...@@ -598,11 +677,11 @@ class EPCManagement():
mySSH.command('docker exec -d prod-oai-spgwu-tiny /bin/bash -c "nohup tshark -i any -f \'port 8805\' -w /tmp/spgwu_check_run.pcap 2>&1 > /dev/null"', '\$', 10) mySSH.command('docker exec -d prod-oai-spgwu-tiny /bin/bash -c "nohup tshark -i any -f \'port 8805\' -w /tmp/spgwu_check_run.pcap 2>&1 > /dev/null"', '\$', 10)
mySSH.close() mySSH.close()
logging.debug('Deployment OK') logging.debug('Deployment OK')
HTML.CreateHtmlTestRow(self.Type, 'OK', CONST.ALL_PROCESSES_OK) HTML.CreateHtmlTestRowQueue(self.Type, 'OK', 1, html_queue)
else: else:
mySSH.close() mySSH.close()
logging.debug('Deployment went wrong') logging.debug('Deployment went wrong')
HTML.CreateHtmlTestRow(self.Type, 'KO', CONST.INVALID_PARAMETER) HTML.CreateHtmlTestRowQueue(self.Type, 'KO', 1, html_queue)
def UndeployEpc(self, HTML): def UndeployEpc(self, HTML):
logging.debug('Trying to undeploy') logging.debug('Trying to undeploy')
...@@ -639,6 +718,13 @@ class EPCManagement(): ...@@ -639,6 +718,13 @@ class EPCManagement():
mySSH.command('docker cp prod-magma-mme:/tmp/mme_check_run.pcap mme_' + self.testCase_id + '.pcap', '\$', 60) mySSH.command('docker cp prod-magma-mme:/tmp/mme_check_run.pcap mme_' + self.testCase_id + '.pcap', '\$', 60)
else: else:
mySSH.command('docker cp prod-oai-mme:/tmp/mme_check_run.pcap mme_' + self.testCase_id + '.pcap', '\$', 60) mySSH.command('docker cp prod-oai-mme:/tmp/mme_check_run.pcap mme_' + self.testCase_id + '.pcap', '\$', 60)
mySSH.command('tshark -r mme_' + self.testCase_id + '.pcap | egrep --colour=never "Tracking area update"', '\$', 60)
result = re.search('Tracking area update request', mySSH.getBefore())
if result is not None:
message = 'UE requested ' + str(mySSH.getBefore().count('Tracking area update request')) + 'Tracking area update request(s)'
else:
message = 'No Tracking area update request'
logging.debug(message)
mySSH.command('docker cp prod-oai-spgwc:/tmp/spgwc_check_run.pcap spgwc_' + self.testCase_id + '.pcap', '\$', 60) mySSH.command('docker cp prod-oai-spgwc:/tmp/spgwc_check_run.pcap spgwc_' + self.testCase_id + '.pcap', '\$', 60)
mySSH.command('docker cp prod-oai-spgwu-tiny:/tmp/spgwu_check_run.pcap spgwu_' + self.testCase_id + '.pcap', '\$', 60) mySSH.command('docker cp prod-oai-spgwu-tiny:/tmp/spgwu_check_run.pcap spgwu_' + self.testCase_id + '.pcap', '\$', 60)
# Remove all # Remove all
...@@ -665,12 +751,15 @@ class EPCManagement(): ...@@ -665,12 +751,15 @@ class EPCManagement():
mySSH.command('docker inspect --format=\'{{.Name}}\' prod-oai-public-net prod-oai-private-net', '\$', 10) mySSH.command('docker inspect --format=\'{{.Name}}\' prod-oai-public-net prod-oai-private-net', '\$', 10)
noMoreNetworkNb = mySSH.getBefore().count('No such object') noMoreNetworkNb = mySSH.getBefore().count('No such object')
mySSH.close() mySSH.close()
html_queue = SimpleQueue()
html_cell = '<pre style="background-color:white">' + message + '</pre>'
html_queue.put(html_cell)
if noMoreContainerNb == nbContainers and noMoreNetworkNb == 2: if noMoreContainerNb == nbContainers and noMoreNetworkNb == 2:
logging.debug('Undeployment OK') logging.debug('Undeployment OK')
HTML.CreateHtmlTestRow(self.Type, 'OK', CONST.ALL_PROCESSES_OK) HTML.CreateHtmlTestRowQueue(self.Type, 'OK', 1, html_queue)
else: else:
logging.debug('Undeployment went wrong') logging.debug('Undeployment went wrong')
HTML.CreateHtmlTestRow(self.Type, 'KO', CONST.INVALID_PARAMETER) HTML.CreateHtmlTestRowQueu(self.Type, 'KO', 1, html_queue)
def LogCollectHSS(self): def LogCollectHSS(self):
mySSH = SSH.SSHConnection() mySSH = SSH.SSHConnection()
...@@ -689,6 +778,8 @@ class EPCManagement(): ...@@ -689,6 +778,8 @@ class EPCManagement():
mySSH.command('docker cp ' + self.containerPrefix + '-oai-hss:/openair-hss/hss_check_run.log .', '\$', 60) mySSH.command('docker cp ' + self.containerPrefix + '-oai-hss:/openair-hss/hss_check_run.log .', '\$', 60)
mySSH.command('docker cp ' + self.containerPrefix + '-oai-hss:/tmp/hss_check_run.pcap .', '\$', 60) mySSH.command('docker cp ' + self.containerPrefix + '-oai-hss:/tmp/hss_check_run.pcap .', '\$', 60)
mySSH.command('zip hss.log.zip hss_check_run.*', '\$', 60) mySSH.command('zip hss.log.zip hss_check_run.*', '\$', 60)
elif re.match('OAICN5G', self.Type, re.IGNORECASE):
logging.debug('LogCollect is bypassed for that variant')
elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE): elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE):
mySSH.command('zip hss.log.zip hss*.log', '\$', 60) mySSH.command('zip hss.log.zip hss*.log', '\$', 60)
mySSH.command('echo ' + self.Password + ' | sudo -S rm hss*.log', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S rm hss*.log', '\$', 5)
...@@ -719,6 +810,11 @@ class EPCManagement(): ...@@ -719,6 +810,11 @@ class EPCManagement():
mySSH.command('docker cp ' + self.containerPrefix + '-oai-mme:/openair-mme/mme_check_run.log .', '\$', 60) mySSH.command('docker cp ' + self.containerPrefix + '-oai-mme:/openair-mme/mme_check_run.log .', '\$', 60)
mySSH.command('docker cp ' + self.containerPrefix + '-oai-mme:/tmp/mme_check_run.pcap .', '\$', 60) mySSH.command('docker cp ' + self.containerPrefix + '-oai-mme:/tmp/mme_check_run.pcap .', '\$', 60)
mySSH.command('zip mme.log.zip mme_check_run.*', '\$', 60) mySSH.command('zip mme.log.zip mme_check_run.*', '\$', 60)
elif re.match('OAICN5G', self.Type, re.IGNORECASE):
mySSH.command('cd ' + self.SourceCodePath + '/logs','\$', 5)
mySSH.command('cp -f /tmp/oai-cn5g.pcap .','\$', 30)
mySSH.command('zip mme.log.zip oai-amf.log oai-nrf.log oai-cn5g.pcap','\$', 30)
mySSH.command('mv mme.log.zip ' + self.SourceCodePath + '/scripts','\$', 30)
elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE): elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE):
mySSH.command('zip mme.log.zip mme*.log', '\$', 60) mySSH.command('zip mme.log.zip mme*.log', '\$', 60)
mySSH.command('echo ' + self.Password + ' | sudo -S rm mme*.log', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S rm mme*.log', '\$', 5)
...@@ -748,6 +844,10 @@ class EPCManagement(): ...@@ -748,6 +844,10 @@ class EPCManagement():
mySSH.command('docker cp ' + self.containerPrefix + '-oai-spgwc:/tmp/spgwc_check_run.pcap .', '\$', 60) mySSH.command('docker cp ' + self.containerPrefix + '-oai-spgwc:/tmp/spgwc_check_run.pcap .', '\$', 60)
mySSH.command('docker cp ' + self.containerPrefix + '-oai-spgwu-tiny:/tmp/spgwu_check_run.pcap .', '\$', 60) mySSH.command('docker cp ' + self.containerPrefix + '-oai-spgwu-tiny:/tmp/spgwu_check_run.pcap .', '\$', 60)
mySSH.command('zip spgw.log.zip spgw*_check_run.*', '\$', 60) mySSH.command('zip spgw.log.zip spgw*_check_run.*', '\$', 60)
elif re.match('OAICN5G', self.Type, re.IGNORECASE):
mySSH.command('cd ' + self.SourceCodePath + '/logs','\$', 5)
mySSH.command('zip spgw.log.zip oai-smf.log oai-spgwu.log','\$', 30)
mySSH.command('mv spgw.log.zip ' + self.SourceCodePath + '/scripts','\$', 30)
elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE): elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE):
mySSH.command('zip spgw.log.zip spgw*.log', '\$', 60) mySSH.command('zip spgw.log.zip spgw*.log', '\$', 60)
mySSH.command('echo ' + self.Password + ' | sudo -S rm spgw*.log', '\$', 5) mySSH.command('echo ' + self.Password + ' | sudo -S rm spgw*.log', '\$', 5)
...@@ -757,17 +857,3 @@ class EPCManagement(): ...@@ -757,17 +857,3 @@ class EPCManagement():
else: else:
logging.error('This option should not occur!') logging.error('This option should not occur!')
mySSH.close() mySSH.close()
def LogCollectOAICN5G(self):
mySSH = SSH.SSHConnection()
mySSH.open(self.IPAddress, self.UserName, self.Password)
logging.debug('OAI CN5G Collecting Log files to workspace')
mySSH.command('echo ' + self.Password + ' | sudo rm -rf ' + self.SourceCodePath + '/logs', '\$', 5)
mySSH.command('mkdir ' + self.SourceCodePath + '/logs','\$', 5)
containers_list=['oai-smf','oai-spgwu','oai-amf','oai-nrf']
for c in containers_list:
mySSH.command('docker logs ' + c + ' > ' + self.SourceCodePath + '/logs/' + c + '.log', '\$', 5)
mySSH.command('cd ' + self.SourceCodePath + '/logs', '\$', 5)
mySSH.command('zip oai-cn5g.log.zip *.log', '\$', 60)
mySSH.close()
...@@ -410,6 +410,17 @@ def GetParametersFromXML(action): ...@@ -410,6 +410,17 @@ def GetParametersFromXML(action):
if (string_field is not None): if (string_field is not None):
CONTAINERS.cliOptions = string_field CONTAINERS.cliOptions = string_field
elif action == 'Copy_Image_to_Test':
string_field = test.findtext('image_name')
if (string_field is not None):
CONTAINERS.imageToCopy = string_field
string_field = test.findtext('registry_svr_id')
if (string_field is not None):
CONTAINERS.registrySvrId = string_field
string_field = test.findtext('test_svr_id')
if (string_field is not None):
CONTAINERS.testSvrId = string_field
else: # ie action == 'Run_PhySim': else: # ie action == 'Run_PhySim':
ldpc.runargs = test.findtext('physim_run_args') ldpc.runargs = test.findtext('physim_run_args')
...@@ -734,6 +745,22 @@ elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re ...@@ -734,6 +745,22 @@ elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re
HTML.SethtmlUEConnected(len(CiTestObj.UEDevices) + len(CiTestObj.CatMDevices)) HTML.SethtmlUEConnected(len(CiTestObj.UEDevices) + len(CiTestObj.CatMDevices))
HTML.CreateHtmlTabHeader() HTML.CreateHtmlTabHeader()
# On CI bench w/ containers, we need to validate if IP routes are set
if EPC.IPAddress == '192.168.18.210':
CONTAINERS.CheckAndAddRoute('porcepix', EPC.IPAddress, EPC.UserName, EPC.Password)
if CONTAINERS.eNBIPAddress == '192.168.18.194':
CONTAINERS.CheckAndAddRoute('asterix', CONTAINERS.eNBIPAddress, CONTAINERS.eNBUserName, CONTAINERS.eNBPassword)
if CONTAINERS.eNB1IPAddress == '192.168.18.194':
CONTAINERS.CheckAndAddRoute('asterix', CONTAINERS.eNB1IPAddress, CONTAINERS.eNB1UserName, CONTAINERS.eNB1Password)
if CONTAINERS.eNBIPAddress == '192.168.18.193':
CONTAINERS.CheckAndAddRoute('obelix', CONTAINERS.eNBIPAddress, CONTAINERS.eNBUserName, CONTAINERS.eNBPassword)
if CONTAINERS.eNB1IPAddress == '192.168.18.193':
CONTAINERS.CheckAndAddRoute('obelix', CONTAINERS.eNB1IPAddress, CONTAINERS.eNB1UserName, CONTAINERS.eNB1Password)
if CONTAINERS.eNBIPAddress == '192.168.18.209':
CONTAINERS.CheckAndAddRoute('nepes', CONTAINERS.eNBIPAddress, CONTAINERS.eNBUserName, CONTAINERS.eNBPassword)
if CONTAINERS.eNB1IPAddress == '192.168.18.209':
CONTAINERS.CheckAndAddRoute('nepes', CONTAINERS.eNB1IPAddress, CONTAINERS.eNB1UserName, CONTAINERS.eNB1Password)
CiTestObj.FailReportCnt = 0 CiTestObj.FailReportCnt = 0
RAN.prematureExit=True RAN.prematureExit=True
HTML.startTime=int(round(time.time() * 1000)) HTML.startTime=int(round(time.time() * 1000))
...@@ -853,6 +880,8 @@ elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re ...@@ -853,6 +880,8 @@ elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re
HTML=ldpc.Run_PhySim(HTML,CONST,id) HTML=ldpc.Run_PhySim(HTML,CONST,id)
elif action == 'Build_Image': elif action == 'Build_Image':
CONTAINERS.BuildImage(HTML) CONTAINERS.BuildImage(HTML)
elif action == 'Copy_Image_to_Test':
CONTAINERS.Copy_Image_to_Test_Server(HTML)
elif action == 'Deploy_Object': elif action == 'Deploy_Object':
CONTAINERS.DeployObject(HTML, EPC) CONTAINERS.DeployObject(HTML, EPC)
elif action == 'Undeploy_Object': elif action == 'Undeploy_Object':
......
...@@ -165,16 +165,16 @@ class RANManagement(): ...@@ -165,16 +165,16 @@ class RANManagement():
result = re.search('LAST_BUILD_INFO', mySSH.getBefore()) result = re.search('LAST_BUILD_INFO', mySSH.getBefore())
if result is not None: if result is not None:
mismatch = False mismatch = False
mySSH.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2) mySSH.command('grep --colour=never SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
result = re.search(self.ranCommitID, mySSH.getBefore()) result = re.search(self.ranCommitID, mySSH.getBefore())
if result is None: if result is None:
mismatch = True mismatch = True
mySSH.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2) mySSH.command('grep --colour=never MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if (self.ranAllowMerge): if (self.ranAllowMerge):
result = re.search('YES', mySSH.getBefore()) result = re.search('YES', mySSH.getBefore())
if result is None: if result is None:
mismatch = True mismatch = True
mySSH.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2) mySSH.command('grep --colour=never TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '': if self.ranTargetBranch == '':
result = re.search('develop', mySSH.getBefore()) result = re.search('develop', mySSH.getBefore())
else: else:
...@@ -423,13 +423,13 @@ class RANManagement(): ...@@ -423,13 +423,13 @@ class RANManagement():
# do not reset board twice in IF4.5 case # do not reset board twice in IF4.5 case
result = re.search('^rru|^enb|^du.band', str(config_file)) result = re.search('^rru|^enb|^du.band', str(config_file))
if result is not None: if result is not None:
mySSH.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 90) mySSH.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 180)
result = re.search('type: b200', mySSH.getBefore()) result = re.search('type: b200', mySSH.getBefore())
if result is not None: if result is not None:
logging.debug('Found a B2xx device --> resetting it') logging.debug('Found a B2xx device --> resetting it')
mySSH.command('echo ' + lPassWord + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10) mySSH.command('echo ' + lPassWord + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
# Reloading FGPA bin firmware # Reloading FGPA bin firmware
mySSH.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 90) mySSH.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 180)
# Make a copy and adapt to EPC / eNB IP addresses # Make a copy and adapt to EPC / eNB IP addresses
mySSH.command('cp ' + full_config_file + ' ' + ci_full_config_file, '\$', 5) mySSH.command('cp ' + full_config_file + ' ' + ci_full_config_file, '\$', 5)
localMmeIpAddr = EPC.MmeIPAddress localMmeIpAddr = EPC.MmeIPAddress
...@@ -446,7 +446,7 @@ class RANManagement(): ...@@ -446,7 +446,7 @@ class RANManagement():
else: else:
mySSH.command('sed -i -e \'s/FLEXRAN_ENABLED.*;/FLEXRAN_ENABLED = "no";/\' ' + ci_full_config_file, '\$', 2); mySSH.command('sed -i -e \'s/FLEXRAN_ENABLED.*;/FLEXRAN_ENABLED = "no";/\' ' + ci_full_config_file, '\$', 2);
self.eNBmbmsEnables[int(self.eNB_instance)] = False self.eNBmbmsEnables[int(self.eNB_instance)] = False
mySSH.command('grep enable_enb_m2 ' + ci_full_config_file, '\$', 2); mySSH.command('grep --colour=never enable_enb_m2 ' + ci_full_config_file, '\$', 2);
result = re.search('yes', mySSH.getBefore()) result = re.search('yes', mySSH.getBefore())
if result is not None: if result is not None:
self.eNBmbmsEnables[int(self.eNB_instance)] = True self.eNBmbmsEnables[int(self.eNB_instance)] = True
...@@ -593,8 +593,12 @@ class RANManagement(): ...@@ -593,8 +593,12 @@ class RANManagement():
lPassWord = self.eNBPassword lPassWord = self.eNBPassword
mySSH = SSH.SSHConnection() mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord) mySSH.open(lIpAddr, lUserName, lPassWord)
mySSH.command('stdbuf -o0 ps -aux | grep --color=never ' + self.air_interface[self.eNB_instance] + ' | grep -v grep', '\$', 5) if self.air_interface[self.eNB_instance] == '':
result = re.search(self.air_interface[self.eNB_instance], mySSH.getBefore()) pattern = 'softmodem'
else:
pattern = self.air_interface[self.eNB_instance]
mySSH.command('stdbuf -o0 ps -aux | grep --color=never ' + pattern + ' | grep -v grep', '\$', 5)
result = re.search(pattern, mySSH.getBefore())
if result is None: if result is None:
logging.debug('\u001B[1;37;41m eNB Process Not Found! \u001B[0m') logging.debug('\u001B[1;37;41m eNB Process Not Found! \u001B[0m')
status_queue.put(CONST.ENB_PROCESS_FAILED) status_queue.put(CONST.ENB_PROCESS_FAILED)
...@@ -734,8 +738,8 @@ class RANManagement(): ...@@ -734,8 +738,8 @@ class RANManagement():
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/enb_*.pcap .','\$',20) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/enb_*.pcap .','\$',20)
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/gnb_*.pcap .','\$',20) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/gnb_*.pcap .','\$',20)
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm -f enb.log.zip', '\$', 5) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm -f enb.log.zip', '\$', 5)
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor.png', '\$', 60) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor*.png', '\$', 60)
mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *.pickle *.png', '\$', 5) mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor*.png', '\$', 5)
mySSH.close() mySSH.close()
def AnalyzeLogFile_eNB(self, eNBlogFile, HTML): def AnalyzeLogFile_eNB(self, eNBlogFile, HTML):
...@@ -792,23 +796,11 @@ class RANManagement(): ...@@ -792,23 +796,11 @@ class RANManagement():
pb_receiving_samples_cnt = 0 pb_receiving_samples_cnt = 0
#count "removing UE" msg #count "removing UE" msg
removing_ue = 0 removing_ue = 0
#count"X2AP-PDU"
x2ap_pdu = 0
#NSA specific log markers #NSA specific log markers
nsa_markers ={'SgNBReleaseRequestAcknowledge': [],'FAILURE': [], 'scgFailureInformationNR-r15': [], 'SgNBReleaseRequest': []} nsa_markers ={'SgNBReleaseRequestAcknowledge': [],'FAILURE': [], 'scgFailureInformationNR-r15': [], 'SgNBReleaseRequest': []}
#the datalog config file has to be loaded
datalog_rt_stats_file='datalog_rt_stats.yaml'
if (os.path.isfile(datalog_rt_stats_file)):
yaml_file=datalog_rt_stats_file
elif (os.path.isfile('ci-scripts/'+datalog_rt_stats_file)):
yaml_file='ci-scripts/'+datalog_rt_stats_file
else:
logging.error("Datalog RT stats yaml file cannot be found")
sys.exit("Datalog RT stats yaml file cannot be found")
with open(yaml_file,'r') as f:
datalog_rt_stats = yaml.load(f,Loader=yaml.FullLoader)
rt_keys = datalog_rt_stats['Ref'] #we use the keys from the Ref field
line_cnt=0 #log file line counter line_cnt=0 #log file line counter
for line in enb_log_file.readlines(): for line in enb_log_file.readlines():
line_cnt+=1 line_cnt+=1
...@@ -975,15 +967,7 @@ class RANManagement(): ...@@ -975,15 +967,7 @@ class RANManagement():
if result is not None: if result is not None:
#remove 1- all useless char before relevant info (ulsch or dlsch) 2- trailing char #remove 1- all useless char before relevant info (ulsch or dlsch) 2- trailing char
dlsch_ulsch_stats[k]=re.sub(r'^.*\]\s+', r'' , line.rstrip()) dlsch_ulsch_stats[k]=re.sub(r'^.*\]\s+', r'' , line.rstrip())
#real time statistics for gNB
for k in rt_keys:
result = re.search(k, line)
if result is not None:
#remove 1- all useless char before relevant info 2- trailing char
line=line.replace('[0m','')
tmp=re.match(rf'^.*?(\b{k}\b.*)',line.rstrip()) #from python 3.6 we can use literal string interpolation for the variable k, using rf' in the regex
if tmp!=None: #with ULULULUULULULLLL at the head of the line, we skip it
real_time_stats[k]=tmp.group(1)
#count "problem receiving samples" msg #count "problem receiving samples" msg
result = re.search('\[PHY\]\s+problem receiving samples', str(line)) result = re.search('\[PHY\]\s+problem receiving samples', str(line))
...@@ -993,7 +977,10 @@ class RANManagement(): ...@@ -993,7 +977,10 @@ class RANManagement():
result = re.search('\[MAC\]\s+Removing UE', str(line)) result = re.search('\[MAC\]\s+Removing UE', str(line))
if result is not None: if result is not None:
removing_ue += 1 removing_ue += 1
#count "X2AP-PDU"
result = re.search('X2AP-PDU', str(line))
if result is not None:
x2ap_pdu += 1
#nsa markers logging #nsa markers logging
for k in nsa_markers: for k in nsa_markers:
result = re.search(k, line) result = re.search(k, line)
...@@ -1001,7 +988,55 @@ class RANManagement(): ...@@ -1001,7 +988,55 @@ class RANManagement():
nsa_markers[k].append(line_cnt) nsa_markers[k].append(line_cnt)
enb_log_file.close() enb_log_file.close()
logging.debug(' File analysis completed')
#the following part takes the *_stats.log files as source (not the stdout log file)
#the datalog config file has to be loaded
datalog_rt_stats_file='datalog_rt_stats.yaml'
if (os.path.isfile(datalog_rt_stats_file)):
yaml_file=datalog_rt_stats_file
elif (os.path.isfile('ci-scripts/'+datalog_rt_stats_file)):
yaml_file='ci-scripts/'+datalog_rt_stats_file
else:
logging.error("Datalog RT stats yaml file cannot be found")
sys.exit("Datalog RT stats yaml file cannot be found")
with open(yaml_file,'r') as f:
datalog_rt_stats = yaml.load(f,Loader=yaml.FullLoader)
rt_keys = datalog_rt_stats['Ref'] #we use the keys from the Ref field
if (os.path.isfile('./nrL1_stats.log')) and (os.path.isfile('./nrL1_stats.log')):
stat_files_present=True
else:
stat_files_present=False
logging.debug("NR Stats files for RT analysis not found")
if stat_files_present:
nrL1_stats = open('./nrL1_stats.log', 'r')
nrMAC_stats = open('./nrMAC_stats.log', 'r')
for line in nrL1_stats.readlines():
for k in rt_keys:
result = re.search(k, line)
if result is not None:
#remove 1- all useless char before relevant info 2- trailing char
tmp=re.match(rf'^.*?(\b{k}\b.*)',line.rstrip()) #from python 3.6 we can use literal string interpolation for the variable k, using rf' in the regex
if tmp!=None:
real_time_stats[k]=tmp.group(1)
for line in nrMAC_stats.readlines():
for k in rt_keys:
result = re.search(k, line)
if result is not None:
#remove 1- all useless char before relevant info 2- trailing char
tmp=re.match(rf'^.*?(\b{k}\b.*)',line.rstrip()) #from python 3.6 we can use literal string interpolation for the variable k, using rf' in the regex
if tmp!=None:
real_time_stats[k]=tmp.group(1)
nrL1_stats.close()
nrMAC_stats.close()
#stdout log file and stat log files analysis completed
logging.debug(' File analysis (stdout, stats) completed')
#post processing depending on the node type
if (self.air_interface[self.eNB_instance] == 'lte-softmodem') or (self.air_interface[self.eNB_instance] == 'ocp-enb'): if (self.air_interface[self.eNB_instance] == 'lte-softmodem') or (self.air_interface[self.eNB_instance] == 'ocp-enb'):
nodeB_prefix = 'e' nodeB_prefix = 'e'
else: else:
...@@ -1087,6 +1122,11 @@ class RANManagement(): ...@@ -1087,6 +1122,11 @@ class RANManagement():
htmlMsg = statMsg+'\n' htmlMsg = statMsg+'\n'
logging.debug(statMsg) logging.debug(statMsg)
htmleNBFailureMsg += htmlMsg htmleNBFailureMsg += htmlMsg
#X2AP-PDU log
statMsg = 'X2AP-PDU msg count = '+str(x2ap_pdu)
htmlMsg = statMsg+'\n'
logging.debug(statMsg)
htmleNBFailureMsg += htmlMsg
#nsa markers #nsa markers
statMsg = 'logfile line count = ' + str(line_cnt) statMsg = 'logfile line count = ' + str(line_cnt)
htmlMsg = statMsg+'\n' htmlMsg = statMsg+'\n'
......
...@@ -60,7 +60,8 @@ class SSHConnection(): ...@@ -60,7 +60,8 @@ class SSHConnection():
connect_status = False connect_status = False
while count < 4: while count < 4:
self.ssh = pexpect.spawn('ssh -o PubkeyAuthentication=no {}@{}'.format(username,ipaddress)) self.ssh = pexpect.spawn('ssh -o PubkeyAuthentication=no {}@{}'.format(username,ipaddress))
self.ssh.timeout = 5 # Longer timeout at connection due to asterix slowness
self.ssh.timeout = 25
self.sshresponse = self.ssh.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', 'Last login', pexpect.EOF, pexpect.TIMEOUT]) self.sshresponse = self.ssh.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', 'Last login', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0: if self.sshresponse == 0:
self.ssh.sendline('yes') self.ssh.sendline('yes')
......
...@@ -17,9 +17,15 @@ class StatMonitor(): ...@@ -17,9 +17,15 @@ class StatMonitor():
def __init__(self,cfg_file): def __init__(self,cfg_file):
with open(cfg_file,'r') as file: with open(cfg_file,'r') as file:
self.d = yaml.load(file) self.d = yaml.load(file)
for node in self.d: for node in self.d:#so far we have enb or gnb as nodes
for metric in self.d[node]: for metric_l1 in self.d[node]: #first level of metric keys
self.d[node][metric]=[] if metric_l1!="graph": #graph is a reserved word to configure graph paging, so it is disregarded
if self.d[node][metric_l1] is None:#first level is None -> create array
self.d[node][metric_l1]=[]
else: #first level is not None -> there is a second level -> create array
for metric_l2 in self.d[node][metric_l1]:
self.d[node][metric_l1][metric_l2]=[]
def process_gnb (self,node_type,output): def process_gnb (self,node_type,output):
...@@ -36,6 +42,11 @@ class StatMonitor(): ...@@ -36,6 +42,11 @@ class StatMonitor():
percentage=float(result.group(2))/float(result.group(1)) percentage=float(result.group(2))/float(result.group(1))
self.d[node_type]['ulsch_err_perc_round_1'].append(percentage) self.d[node_type]['ulsch_err_perc_round_1'].append(percentage)
for k in self.d[node_type]['rt']:
result=re.match(rf'^.*\b{k}\b:\s+([0-9\.]+) us;\s+([0-9]+);\s+([0-9\.]+) us;',tmp)
if result is not None:
self.d[node_type]['rt'][k].append(float(result.group(3)))
def process_enb (self,node_type,output): def process_enb (self,node_type,output):
for line in output: for line in output:
...@@ -62,23 +73,37 @@ class StatMonitor(): ...@@ -62,23 +73,37 @@ class StatMonitor():
def graph(self,node_type): def graph(self,node_type):
col = 1 for page in self.d[node_type]['graph']:#work out a set a graphs per page
figure, axis = plt.subplots(len(self.d[node_type]), col ,figsize=(10, 10)) col = 1
i=0 figure, axis = plt.subplots(len(self.d[node_type]['graph'][page]), col ,figsize=(10, 10))
for metric in self.d[node_type]: i=0
major_ticks = np.arange(0, len(self.d[node_type][metric])+1, 1) for m in self.d[node_type]['graph'][page]:#metric may refer to 1 level or 2 levels
axis[i].set_xticks(major_ticks) metric_path=m.split('.')
axis[i].set_xticklabels([]) if len(metric_path)==1:#1 level
axis[i].plot(self.d[node_type][metric],marker='o') metric_l1=metric_path[0]
axis[i].set_xlabel('time') major_ticks = np.arange(0, len(self.d[node_type][metric_l1])+1, 1)
axis[i].set_ylabel(metric) axis[i].set_xticks(major_ticks)
axis[i].set_title(metric) axis[i].set_xticklabels([])
i+=1 axis[i].plot(self.d[node_type][metric_l1],marker='o')
axis[i].set_xlabel('time')
plt.tight_layout() axis[i].set_ylabel(metric_l1)
# Combine all the operations and display axis[i].set_title(metric_l1)
plt.savefig(node_type+'_stats_monitor.png')
plt.show() else:#2 levels
metric_l1=metric_path[0]
metric_l2=metric_path[1]
major_ticks = np.arange(0, len(self.d[node_type][metric_l1][metric_l2])+1, 1)
axis[i].set_xticks(major_ticks)
axis[i].set_xticklabels([])
axis[i].plot(self.d[node_type][metric_l1][metric_l2],marker='o')
axis[i].set_xlabel('time')
axis[i].set_ylabel(metric_l2)
axis[i].set_title(metric_l2)
i+=1
plt.tight_layout()
#save as png
plt.savefig(node_type+'_stats_monitor_'+page+'.png')
if __name__ == "__main__": if __name__ == "__main__":
...@@ -88,7 +113,7 @@ if __name__ == "__main__": ...@@ -88,7 +113,7 @@ if __name__ == "__main__":
mon=StatMonitor(cfg_filename) mon=StatMonitor(cfg_filename)
#collecting stats when modem process is stopped #collecting stats when modem process is stopped
CMD='ps aux | grep mode | grep -v grep' CMD='ps aux | grep modem | grep -v grep'
process=subprocess.Popen(CMD, shell=True, stdout=subprocess.PIPE) process=subprocess.Popen(CMD, shell=True, stdout=subprocess.PIPE)
output = process.stdout.readlines() output = process.stdout.readlines()
while len(output)!=0 : while len(output)!=0 :
......
...@@ -2,10 +2,50 @@ enb : ...@@ -2,10 +2,50 @@ enb :
PHR: PHR:
bler: bler:
mcsoff: mcsoff:
mcs: mcs:
graph:
page1:
PHR:
bler:
mcsoff:
mcs:
gnb : gnb :
dlsch_err: dlsch_err:
dlsch_err_perc_round_1: dlsch_err_perc_round_1:
ulsch_err: ulsch_err:
ulsch_err_perc_round_1: ulsch_err_perc_round_1:
\ No newline at end of file rt :
feprx:
feptx_prec:
feptx_ofdm:
feptx_total:
L1 Tx processing thread 0:
L1 Tx processing thread 1:
DLSCH encoding:
L1 Rx processing:
PUSCH inner-receiver:
PUSCH decoding:
DL & UL scheduling timing stats:
UL Indication:
graph :
page1:
dlsch_err:
dlsch_err_perc_round_1:
ulsch_err:
ulsch_err_perc_round_1:
page2:
rt.feprx:
rt.feptx_prec:
rt.feptx_ofdm:
rt.feptx_total:
page3:
rt.L1 Tx processing thread 0:
rt.L1 Tx processing thread 1:
rt.DLSCH encoding:
rt.L1 Rx processing:
page4:
rt.PUSCH inner-receiver:
rt.PUSCH decoding:
rt.DL & UL scheduling timing stats:
rt.UL Indication:
\ No newline at end of file
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
- IdleSleep - IdleSleep
- Perform_X2_Handover - Perform_X2_Handover
- Build_Image - Build_Image
- Copy_Image_to_Test
- Deploy_Object - Deploy_Object
- Undeploy_Object - Undeploy_Object
- Cppcheck_Analysis - Cppcheck_Analysis
......
<!--
Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The OpenAirInterface Software Alliance licenses this file to You under
the OAI Public License, Version 1.1 (the "License"); you may not use this file
except in compliance with the License.
You may obtain a copy of the License at
http://www.openairinterface.org/?page_id=698
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information about the OpenAirInterface (OAI) Software Alliance:
contact@openairinterface.org
-->
<testCaseList>
<htmlTabRef>TEST-NSA-FR1-TM1-B200</htmlTabRef>
<htmlTabName>NSA SanityCheck with QUECTEL</htmlTabName>
<htmlTabIcon>tasks</htmlTabIcon>
<repeatCount>1</repeatCount>
<TestCaseRequestedList>
000001
010002
030000
030101
000001
030102
000001
010000
000001
050000
050001
070000
070001
010002
000001
030202
030201
</TestCaseRequestedList>
<TestCaseExclusionList></TestCaseExclusionList>
<testCase id="010000">
<class>Initialize_UE</class>
<desc>Initialize Quectel</desc>
<id>idefix</id>
<UE_Trace>yes</UE_Trace>
</testCase>
<testCase id="010002">
<class>Terminate_UE</class>
<desc>Terminate Quectel</desc>
<id>idefix</id>
</testCase>
<testCase id="030000">
<class>Copy_Image_to_Test</class>
<desc>Copy gNB image to test server</desc>
<image_name>oai-gnb</image_name>
<registry_svr_id>0</registry_svr_id>
<test_svr_id>1</test_svr_id>
</testCase>
<testCase id="030101">
<class>Deploy_Object</class>
<desc>Deploy eNB (FDD/Band7/5MHz/B200) in a container</desc>
<yaml_path>ci-scripts/yaml_files/nsa_b200_enb</yaml_path>
<eNB_instance>0</eNB_instance>
<eNB_serverId>0</eNB_serverId>
</testCase>
<testCase id="030102">
<class>Deploy_Object</class>
<desc>Deploy gNB (TDD/Band78/40MHz/B200) in a container</desc>
<yaml_path>ci-scripts/yaml_files/nsa_b200_gnb</yaml_path>
<eNB_instance>1</eNB_instance>
<eNB_serverId>1</eNB_serverId>
</testCase>
<testCase id="000001">
<class>IdleSleep</class>
<desc>Sleep</desc>
<idle_sleep_time_in_sec>5</idle_sleep_time_in_sec>
</testCase>
<testCase id="000002">
<class>IdleSleep</class>
<desc>Sleep</desc>
<idle_sleep_time_in_sec>20</idle_sleep_time_in_sec>
</testCase>
<testCase id="050000">
<class>Ping</class>
<desc>Ping: 20pings in 20sec</desc>
<id>idefix</id>
<ping_args>-c 20</ping_args>
<ping_packetloss_threshold>1</ping_packetloss_threshold>
</testCase>
<testCase id="050001">
<class>Ping</class>
<desc>Ping: 100pings in 20sec</desc>
<id>idefix</id>
<ping_args>-c 100 -i 0.2</ping_args>
<ping_packetloss_threshold>1</ping_packetloss_threshold>
</testCase>
<testCase id="070000">
<class>Iperf</class>
<desc>iperf (DL/20Mbps/UDP)(60 sec)(single-ue profile)</desc>
<iperf_args>-u -b 20M -t 60</iperf_args>
<direction>DL</direction>
<id>idefix</id>
<iperf_packetloss_threshold>3</iperf_packetloss_threshold>
<iperf_profile>single-ue</iperf_profile>
</testCase>
<testCase id="070001">
<class>Iperf</class>
<desc>iperf (UL/2Mbps/UDP)(60 sec)(single-ue profile)</desc>
<iperf_args>-u -b 2M -t 60</iperf_args>
<direction>UL</direction>
<id>idefix</id>
<iperf_packetloss_threshold>1</iperf_packetloss_threshold>
<iperf_profile>single-ue</iperf_profile>
</testCase>
<testCase id="030201">
<class>Undeploy_Object</class>
<desc>Undeploy eNB</desc>
<yaml_path>ci-scripts/yaml_files/nsa_b200_enb</yaml_path>
<eNB_instance>0</eNB_instance>
<eNB_serverId>0</eNB_serverId>
</testCase>
<testCase id="030202">
<class>Undeploy_Object</class>
<desc>Undeploy gNB</desc>
<yaml_path>ci-scripts/yaml_files/nsa_b200_gnb</yaml_path>
<eNB_instance>1</eNB_instance>
<eNB_serverId>1</eNB_serverId>
</testCase>
</testCaseList>
...@@ -113,8 +113,8 @@ ...@@ -113,8 +113,8 @@
<testCase id="070000"> <testCase id="070000">
<class>Iperf</class> <class>Iperf</class>
<desc>iperf (DL/20Mbps/UDP)(60 sec)(single-ue profile)</desc> <desc>iperf (DL/60Mbps/UDP)(60 sec)(single-ue profile)</desc>
<iperf_args>-u -b 20M -t 60</iperf_args> <iperf_args>-u -b 60M -t 60</iperf_args>
<direction>DL</direction> <direction>DL</direction>
<id>nrmodule2_quectel</id> <id>nrmodule2_quectel</id>
<iperf_packetloss_threshold>5</iperf_packetloss_threshold> <iperf_packetloss_threshold>5</iperf_packetloss_threshold>
......
...@@ -110,8 +110,8 @@ ...@@ -110,8 +110,8 @@
<testCase id="070000"> <testCase id="070000">
<class>Iperf</class> <class>Iperf</class>
<desc>iperf (DL/20Mbps/UDP)(60 sec)(single-ue profile)</desc> <desc>iperf (DL/60Mbps/UDP)(60 sec)(single-ue profile)</desc>
<iperf_args>-u -b 20M -t 60</iperf_args> <iperf_args>-u -b 60M -t 60</iperf_args>
<direction>DL</direction> <direction>DL</direction>
<id>idefix</id> <id>idefix</id>
<iperf_packetloss_threshold>3</iperf_packetloss_threshold> <iperf_packetloss_threshold>3</iperf_packetloss_threshold>
......
...@@ -98,8 +98,8 @@ ...@@ -98,8 +98,8 @@
<testCase id="070000"> <testCase id="070000">
<class>Iperf</class> <class>Iperf</class>
<desc>iperf (DL/20Mbps/UDP)(60 sec)(single-ue profile)</desc> <desc>iperf (DL/60Mbps/UDP)(60 sec)(single-ue profile)</desc>
<iperf_args>-u -b 20M -t 60</iperf_args> <iperf_args>-u -b 60M -t 60</iperf_args>
<direction>DL</direction> <direction>DL</direction>
<id>nrmodule2_quectel</id> <id>nrmodule2_quectel</id>
<iperf_packetloss_threshold>5</iperf_packetloss_threshold> <iperf_packetloss_threshold>5</iperf_packetloss_threshold>
......
...@@ -111,6 +111,9 @@ services: ...@@ -111,6 +111,9 @@ services:
TAC_LB_SGW_TEST_0: '03' TAC_LB_SGW_TEST_0: '03'
TAC_HB_SGW_TEST_0: '00' TAC_HB_SGW_TEST_0: '00'
SGW_IPV4_ADDRESS_FOR_S11_TEST_0: 0.0.0.0 SGW_IPV4_ADDRESS_FOR_S11_TEST_0: 0.0.0.0
volumes:
- ./mme.conf:/openair-mme/etc/mme.conf
- ./entrypoint.sh:/openair-mme/bin/entrypoint.sh
healthcheck: healthcheck:
test: /bin/bash -c "pgrep oai_mme" test: /bin/bash -c "pgrep oai_mme"
interval: 10s interval: 10s
......
#!/bin/bash
set -euo pipefail
# First see if all interfaces are up
ifconfig
# S10 might be on loopback --> needs bring-up
if [[ "$MME_INTERFACE_NAME_FOR_S10" == *"lo:"* ]]
then
ifconfig ${MME_INTERFACE_NAME_FOR_S10} ${MME_IPV4_ADDRESS_FOR_S10} up
fi
LIST_OF_NETWORKS=`ifconfig -s | egrep -v "^Iface|^lo" | cut -d' ' -f1`
for if_name in $LIST_OF_NETWORKS
do
IF_IP_ADDR=`ifconfig $if_name | grep inet | sed -e "s# *inet#inet#" | cut -d' ' -f2`
if [[ "${IF_IP_ADDR}" == "${MME_IPV4_ADDRESS_FOR_S1_MME}" ]]; then
echo "S1C is on $if_name"
MME_INTERFACE_NAME_FOR_S1_MME=$if_name
fi
if [[ "${IF_IP_ADDR}" == "${MME_IPV4_ADDRESS_FOR_S10}" ]]; then
echo "S10 is on $if_name"
MME_INTERFACE_NAME_FOR_S10=$if_name
fi
if [[ "${IF_IP_ADDR}" == "${MME_IPV4_ADDRESS_FOR_S11}" ]]; then
echo "S11 is on $if_name"
MME_INTERFACE_NAME_FOR_S11=$if_name
fi
done
CONFIG_DIR="/openair-mme/etc"
for c in ${CONFIG_DIR}/mme_fd.conf; do
#echo "entrypoint.sh process config file $c"
sed -i -e "s#@TAC-LB#@TAC_LB#" -e "s#TAC-HB_#TAC_HB_#" ${c}
# grep variable names (format: ${VAR}) from template to be rendered
VARS=$(grep -oP '@[a-zA-Z0-9_]+@' ${c} | sort | uniq | xargs)
#echo "entrypoint.sh process vars $VARS"
# create sed expressions for substituting each occurrence of ${VAR}
# with the value of the environment variable "VAR"
EXPRESSIONS=""
for v in ${VARS}; do
#echo "var is $v"
NEW_VAR=`echo $v | sed -e "s#@##g"`
#echo "NEW_VAR is $NEW_VAR"
if [[ "${!NEW_VAR}x" == "x" ]]; then
echo "Error: Environment variable '${NEW_VAR}' is not set." \
"Config file '$(basename $c)' requires all of $VARS."
exit 1
fi
# Some fields require CIDR format
if [[ "${NEW_VAR}" == "MME_IPV4_ADDRESS_FOR_S1_MME" ]] || \
[[ "${NEW_VAR}" == "MME_IPV4_ADDRESS_FOR_S11" ]] || \
[[ "${NEW_VAR}" == "MME_IPV4_ADDRESS_FOR_S10" ]]; then
EXPRESSIONS="${EXPRESSIONS};s|${v}|${!NEW_VAR}/24|g"
else
EXPRESSIONS="${EXPRESSIONS};s|${v}|${!NEW_VAR}|g"
fi
done
EXPRESSIONS="${EXPRESSIONS#';'}"
# render template and inline replace config file
sed -i "${EXPRESSIONS}" ${c}
done
pushd /openair-mme/scripts
./check_mme_s6a_certificate ${PREFIX} ${MME_FQDN}
popd
exec "$@"
MME :
{
REALM = "openairinterface.org"; # YOUR REALM HERE
INSTANCE = 1; # 0 is the default
PID_DIRECTORY = "/var/run"; # /var/run is the default
MAX_S1_ENB = 64;
MAX_UE = 4096;
RELATIVE_CAPACITY = 10;
EMERGENCY_ATTACH_SUPPORTED = "no";
UNAUTHENTICATED_IMSI_SUPPORTED = "no";
DUMMY_HANDOVER_FORWARDING_ENABLED = "yes";
EPS_NETWORK_FEATURE_SUPPORT_IMS_VOICE_OVER_PS_SESSION_IN_S1 = "no"; # DO NOT CHANGE
EPS_NETWORK_FEATURE_SUPPORT_EMERGENCY_BEARER_SERVICES_IN_S1_MODE = "no"; # DO NOT CHANGE
EPS_NETWORK_FEATURE_SUPPORT_LOCATION_SERVICES_VIA_EPC = "no"; # DO NOT CHANGE
EPS_NETWORK_FEATURE_SUPPORT_EXTENDED_SERVICE_REQUEST = "no"; # DO NOT CHANGE
# Display statistics about whole system (expressed in seconds)
MME_STATISTIC_TIMER = 10;
MME_MOBILITY_COMPLETION_TIMER = 2; # Amount of time in seconds the source MME waits to release resources after HANDOVER/TAU is complete (with or without.
MME_S10_HANDOVER_COMPLETION_TIMER = 2; # Amount of time in soconds the target MME waits to check if a handover/tau process has completed successfully.
IP_CAPABILITY = "IPV4V6";
INTERTASK_INTERFACE :
{
ITTI_QUEUE_SIZE = 2000000;
};
S6A :
{
S6A_CONF = "/openair-mme/etc/mme_fd.conf";
HSS_HOSTNAME = "hss.openairinterface.org"; # THE HSS FQDN ex: hss.epc.mnc001.mcc001.3gppnetwork.org
HSS_REALM = "openairinterface.org"; # THE HSS REALM ex: epc.mnc001.mcc001.3gppnetwork.org
};
SCTP :
{
SCTP_INSTREAMS = 8;
SCTP_OUTSTREAMS = 8;
};
S1AP :
{
S1AP_OUTCOME_TIMER = 10;
};
GUMMEI_LIST = (
{MCC="208" ; MNC="97"; MME_GID="32768" ; MME_CODE="3"; } # YOUR GUMMEI CONFIG HERE
);
TAI_LIST = (
{MCC="208" ; MNC="97"; TAC = "1"; }, # YOUR TAI CONFIG HERE
{MCC="208" ; MNC="97"; TAC = "2"; }, # YOUR TAI CONFIG HERE
{MCC="208" ; MNC="97"; TAC = "3"; } # YOUR TAI CONFIG HERE
);
NAS :
{
ORDERED_SUPPORTED_INTEGRITY_ALGORITHM_LIST = [ "EIA2" , "EIA1" , "EIA0" ];
ORDERED_SUPPORTED_CIPHERING_ALGORITHM_LIST = [ "EEA0" , "EEA1" , "EEA2" ];
T3402 = 12
T3412 = 0
T3422 = 6
T3450 = 6
T3460 = 6
T3470 = 6
T3485 = 3
T3486 = 3
T3489 = 4
T3495 = 3
NAS_FORCE_TAU = 0
STRICT_FILLER_BITS_CHECK = "yes";
};
NETWORK_INTERFACES :
{
# MME binded interface for S1-C or S1-MME communication (S1AP), can be ethernet interface, virtual ethernet interface, we don't advise wireless interfaces
MME_INTERFACE_NAME_FOR_S1_MME = "eth0"; # YOUR NETWORK CONFIG HERE
MME_IPV4_ADDRESS_FOR_S1_MME = "192.168.61.195/24"; # CIDR, YOUR NETWORK CONFIG HERE
# MME_IPV6_ADDRESS_FOR_S1_MME = "fd00::191/118"; # YOUR NETWORK CONFIG HERE
# MME binded interface for S11 communication (GTPV2-C)
MME_INTERFACE_NAME_FOR_S11 = "eth0"; # YOUR NETWORK CONFIG HERE
MME_IPV4_ADDRESS_FOR_S11 = "192.168.61.195/24"; # CIDR, YOUR NETWORK CONFIG HERE
# MME_IPV6_ADDRESS_FOR_S11 = "fd00:0:0:4::191/64";
MME_PORT_FOR_S11 = 2123; # YOUR NETWORK CONFIG HERE
#S10 Interface
MME_INTERFACE_NAME_FOR_S10 = "lo"; # YOUR NETWORK CONFIG HERE
MME_IPV4_ADDRESS_FOR_S10 = "127.0.0.10/24"; # CIDR, YOUR NETWORK CONFIG HERE
# MME_IPV6_ADDRESS_FOR_S10 = "fd00:0:0:4::191/64";
MME_PORT_FOR_S10 = 2123; # YOUR NETWORK CONFIG HERE
};
LOGGING :
{
# OUTPUT choice in { "CONSOLE", `path to file`", "`IPv4@`:`TCP port num`"}
# `path to file` must start with '.' or '/'
# if TCP stream choice, then you can easily dump the traffic on the remote or local host: nc -l `TCP port num` > received.txt
OUTPUT = "CONSOLE";
THREAD_SAFE = "no"; # THREAD_SAFE choice in { "yes", "no" }, safe to let 'no'
COLOR = "yes"; # COLOR choice in { "yes", "no" } means use of ANSI styling codes or no
# Log level choice in { "EMERGENCY", "ALERT", "CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG", "TRACE"}
SCTP_LOG_LEVEL = "TRACE";
S10_LOG_LEVEL = "TRACE";
S11_LOG_LEVEL = "TRACE";
# NEW LOGS FOR MCE
SM_LOG_LEVEL = "TRACE";
MCE_APP_LOG_LEVEL = "TRACE";
M2AP_LOG_LEVEL = "TRACE";
GTPV2C_LOG_LEVEL = "TRACE";
UDP_LOG_LEVEL = "DEBUG";
S1AP_LOG_LEVEL = "DEBUG";
NAS_LOG_LEVEL = "TRACE";
MME_APP_LOG_LEVEL = "TRACE";
S6A_LOG_LEVEL = "TRACE";
UTIL_LOG_LEVEL = "ERROR";
MSC_LOG_LEVEL = "ERROR";
ITTI_LOG_LEVEL = "ERROR";
ASN1_VERBOSITY = "annoying";
};
# WRR_LIST_SELECTION = (
# {ID="tac-lb03.tac-hb00.tac.epc.mnc001.mcc001.3gppnetwork.org" ; SGW_IP_ADDRESS_FOR_S11="192.168.61.196";},
# {ID="tac-lb01.tac-hb00.tac.epc.mnc097.mcc208.3gppnetwork.org" ; SGW_IP_ADDRESS_FOR_S11="192.168.61.196";},
# {ID="tac-lb02.tac-hb00.tac.epc.mnc097.mcc208.3gppnetwork.org" ; MME_IP_ADDRESS_FOR_S10="0.0.0.0";},
# {ID="tac-lb03.tac-hb00.tac.epc.mnc097.mcc208.3gppnetwork.org" ; MME_IP_ADDRESS_FOR_S10="0.0.0.0";}
# );
WRR_LIST_SELECTION = (
{ID="tac-lb01.tac-hb00.tac.epc.mnc097.mcc208.3gppnetwork.org" ; SGW_IP_ADDRESS_FOR_S11="192.168.61.196";}
);
};
...@@ -56,9 +56,7 @@ MME : ...@@ -56,9 +56,7 @@ MME :
# ------- MME served TAIs # ------- MME served TAIs
TAI_LIST = ( TAI_LIST = (
{MCC="208" ; MNC="97"; TAC = "1"; }, {MCC="208" ; MNC="97"; TAC = "1"; }
{MCC="208" ; MNC="97"; TAC = "2"; },
{MCC="208" ; MNC="97"; TAC = "3"; }
); );
TAC_LIST = ( TAC_LIST = (
......
version: '3.8'
services:
enb_mono_fdd:
image: oai-enb:latest
privileged: true
container_name: nsa-b200-enb
environment:
USE_FDD_MONO: 'yes'
USE_B2XX: 'yes'
ENB_NAME: eNB-in-docker
MCC: '222'
MNC: '01'
MNC_LENGTH: 2
TAC: 1
UTRA_BAND_ID: 7
DL_FREQUENCY_IN_MHZ: 2680
UL_FREQUENCY_OFFSET_IN_MHZ: 120
NID_CELL: 0
NB_PRB: 25
ENABLE_MEASUREMENT_REPORTS: 'yes'
ENABLE_X2: 'yes'
MME_S1C_IP_ADDRESS: 192.168.18.210
ENB_S1C_IF_NAME: eth0
ENB_S1C_IP_ADDRESS: 192.168.68.130
ENB_S1U_IF_NAME: eth0
ENB_S1U_IP_ADDRESS: 192.168.68.130
ENB_X2_IP_ADDRESS: 192.168.68.130
RRC_INACTIVITY_THRESHOLD: 0
FLEXRAN_ENABLED: 'no'
FLEXRAN_INTERFACE_NAME: eth0
FLEXRAN_IPV4_ADDRESS: 192.168.18.210
THREAD_PARALLEL_CONFIG: PARALLEL_RU_L1_TRX_SPLIT
volumes:
- /dev:/dev
networks:
public_net:
ipv4_address: 192.168.68.130
healthcheck:
# pgrep does NOT work
test: /bin/bash -c "ps aux | grep -v grep | grep -c softmodem"
interval: 10s
timeout: 5s
retries: 5
networks:
public_net:
name: nsa-b200-enb-net
ipam:
config:
- subnet: 192.168.68.128/26
driver_opts:
com.docker.network.bridge.name: "nsa-enb-net"
version: '3.8'
services:
gnb_mono_tdd:
image: oai-gnb:latest
privileged: true
container_name: nsa-b200-gnb
environment:
USE_NSA_TDD_MONO: 'yes'
USE_B2XX: 'yes'
GNB_NAME: gNB-in-docker
MCC: '222'
MNC: '01'
MNC_LENGTH: 2
TAC: 1
ENABLE_X2: 'yes'
ENB_X2_IP_ADDRESS: 192.168.68.130
MME_S1C_IP_ADDRESS: 192.168.18.210
GNB_S1C_IF_NAME: eth0
GNB_S1C_IP_ADDRESS: 192.168.68.194
GNB_S1U_IF_NAME: eth0
GNB_S1U_IP_ADDRESS: 192.168.68.194
GNB_X2_IP_ADDRESS: 192.168.68.194
RRC_INACTIVITY_THRESHOLD: 0
FLEXRAN_ENABLED: 'no'
FLEXRAN_INTERFACE_NAME: eth0
FLEXRAN_IPV4_ADDRESS: 192.168.18.210
THREAD_PARALLEL_CONFIG: PARALLEL_RU_L1_TRX_SPLIT
USE_ADDITIONAL_OPTIONS: '-E -q'
volumes:
- /dev:/dev
networks:
public_net:
ipv4_address: 192.168.68.194
#entrypoint: /bin/bash -c "sleep infinity"
healthcheck:
# pgrep does NOT work
test: /bin/bash -c "ps aux | grep -v grep | grep -c softmodem"
interval: 10s
timeout: 5s
retries: 5
networks:
public_net:
name: nsa-b200-gnb-net
ipam:
config:
- subnet: 192.168.68.192/26
driver_opts:
com.docker.network.bridge.name: "nsa-gnb-net"
...@@ -104,7 +104,7 @@ The MAC layer implements a subset of the **3GPP 36.321** release v8.6 in support ...@@ -104,7 +104,7 @@ The MAC layer implements a subset of the **3GPP 36.321** release v8.6 in support
- RLC interface (AM, UM) - RLC interface (AM, UM)
- UL power control - UL power control
- Link adaptation - Link adaptation
- Connected DRX (CDRX) support for FDD LTE UE. Compatible with R13 from 3GPP. Support for Cat-M1 UE comming soon. - Connected DRX (CDRX) support for FDD LTE UE. Compatible with R13 from 3GPP. Support for Cat-M1 UE comming soon.
## eNB RLC Layer ## ## eNB RLC Layer ##
...@@ -206,7 +206,7 @@ The Physical layer implements **3GPP 36.211**, **36.212**, **36.213** and provid ...@@ -206,7 +206,7 @@ The Physical layer implements **3GPP 36.211**, **36.212**, **36.213** and provid
- PRACH preamble format 0 - PRACH preamble format 0
- All downlink (DL) channels are supported: PSS, SSS, PBCH, PCFICH, PHICH, PDCCH, PDSCH, PMCH - All downlink (DL) channels are supported: PSS, SSS, PBCH, PCFICH, PHICH, PDCCH, PDSCH, PMCH
- All uplink (UL) channels are supported: PRACH, PUSCH, PUCCH (format 1/1a/1b), SRS, DRS - All uplink (UL) channels are supported: PRACH, PUSCH, PUCCH (format 1/1a/1b), SRS, DRS
- LTE MBMS-dedicated cell (feMBMS) procedures subset for LTE release 14 (experimental) - LTE MBMS-dedicated cell (feMBMS) procedures subset for LTE release 14 (experimental)
- LTE non-MBSFN subframe (feMBMS) Carrier Adquistion Subframe-CAS procedures (PSS/SSS/PBCH/PDSH) (experimental) - LTE non-MBSFN subframe (feMBMS) Carrier Adquistion Subframe-CAS procedures (PSS/SSS/PBCH/PDSH) (experimental)
- LTE MBSFN MBSFN subframe channel (feMBMS): PMCH (CS@1.25KHz) (channel estimation for 25MHz bandwidth) (experimental) - LTE MBSFN MBSFN subframe channel (feMBMS): PMCH (CS@1.25KHz) (channel estimation for 25MHz bandwidth) (experimental)
...@@ -313,6 +313,7 @@ The following features are valid for the gNB and the 5G-NR UE. ...@@ -313,6 +313,7 @@ The following features are valid for the gNB and the 5G-NR UE.
- MAC downlink scheduler - MAC downlink scheduler
- phy-test scheduler (fixed allocation and usable also without UE) - phy-test scheduler (fixed allocation and usable also without UE)
- regular scheduler with dynamic allocation - regular scheduler with dynamic allocation
- MCS adaptation from HARQ BLER
- MAC header generation (including timing advance) - MAC header generation (including timing advance)
- ACK / NACK handling and HARQ procedures for downlink - ACK / NACK handling and HARQ procedures for downlink
- MAC uplink scheduler - MAC uplink scheduler
...@@ -398,7 +399,7 @@ The following features are valid for the gNB and the 5G-NR UE. ...@@ -398,7 +399,7 @@ The following features are valid for the gNB and the 5G-NR UE.
- Creates TUN interface to PDCP to inject and receive user-place traffic - Creates TUN interface to PDCP to inject and receive user-place traffic
- No connection to the core network - No connection to the core network
* Supporting Standalone (SA) mode: * Supporting Standalone (SA) mode:
- UE can register with the 5G Core Network, establish a PDU Session and exchange user-plane traffic - UE can register with the 5G Core Network, establish a PDU Session and exchange user-plane traffic
## NR UE PHY Layer ## ## NR UE PHY Layer ##
...@@ -484,7 +485,7 @@ The following features are valid for the gNB and the 5G-NR UE. ...@@ -484,7 +485,7 @@ The following features are valid for the gNB and the 5G-NR UE.
- Interfaces with PDCP, MAC - Interfaces with PDCP, MAC
**UE PDCP** **UE PDCP**
* Tx/Rx operations according to 38.323 Rel.16 * Tx/Rx operations according to 38.323 Rel.16
- Integrity protection and ciphering procedures - Integrity protection and ciphering procedures
- Sequence number management, SDU dicard and in-order delivery - Sequence number management, SDU dicard and in-order delivery
- Radio bearer establishment/handling and association with PDCP entities - Radio bearer establishment/handling and association with PDCP entities
......
...@@ -108,6 +108,7 @@ time_stats_t softmodem_stats_rx_sf; // total rx time ...@@ -108,6 +108,7 @@ time_stats_t softmodem_stats_rx_sf; // total rx time
//#define TICK_TO_US(ts) (ts.diff) //#define TICK_TO_US(ts) (ts.diff)
#define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials) #define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials)
#define L1STATSSTRLEN 16384
void tx_func(void *param) { void tx_func(void *param) {
...@@ -329,46 +330,63 @@ void rx_func(void *param) { ...@@ -329,46 +330,63 @@ void rx_func(void *param) {
); );
#endif #endif
} }
static void *process_stats_thread(void *param) { static void dump_L1_meas_stats(PHY_VARS_gNB *gNB, RU_t *ru, char *output) {
int stroff = 0;
PHY_VARS_gNB *gNB = (PHY_VARS_gNB *)param; stroff += print_meas_log(gNB->phy_proc_tx_0, "L1 Tx processing thread 0", NULL, NULL, output);
stroff += print_meas_log(gNB->phy_proc_tx_1, "L1 Tx processing thread 1", NULL, NULL, output+stroff);
stroff += print_meas_log(&gNB->dlsch_encoding_stats, "DLSCH encoding", NULL, NULL, output+stroff);
stroff += print_meas_log(&gNB->phy_proc_rx, "L1 Rx processing", NULL, NULL, output+stroff);
stroff += print_meas_log(&gNB->ul_indication_stats, "UL Indication", NULL, NULL, output+stroff);
stroff += print_meas_log(&gNB->rx_pusch_stats, "PUSCH inner-receiver", NULL, NULL, output+stroff);
stroff += print_meas_log(&gNB->ulsch_decoding_stats, "PUSCH decoding", NULL, NULL, output+stroff);
if (ru->feprx) stroff += print_meas_log(&ru->ofdm_demod_stats,"feprx",NULL,NULL, output+stroff);
if (ru->feptx_ofdm) {
stroff += print_meas_log(&ru->precoding_stats,"feptx_prec",NULL,NULL, output+stroff);
stroff += print_meas_log(&ru->txdataF_copy_stats,"txdataF_copy",NULL,NULL, output+stroff);
stroff += print_meas_log(&ru->ofdm_mod_stats,"feptx_ofdm",NULL,NULL, output+stroff);
stroff += print_meas_log(&ru->ofdm_total_stats,"feptx_total",NULL,NULL, output+stroff);
}
reset_meas(&gNB->dlsch_encoding_stats); if (ru->fh_north_asynch_in) stroff += print_meas_log(&ru->rx_fhaul,"rx_fhaul",NULL,NULL, output+stroff);
reset_meas(&gNB->phy_proc_rx);
reset_meas(&gNB->ul_indication_stats);
reset_meas(&gNB->rx_pusch_stats);
reset_meas(&gNB->ulsch_decoding_stats);
wait_sync("process_stats_thread"); stroff += print_meas_log(&ru->tx_fhaul,"tx_fhaul",NULL,NULL, output+stroff);
while(!oai_exit) if (ru->fh_north_out) {
{ stroff += print_meas_log(&ru->compression,"compression",NULL,NULL, output+stroff);
sleep(1); stroff += print_meas_log(&ru->transport,"transport",NULL,NULL, output+stroff);
print_meas(gNB->phy_proc_tx_0, "L1 Tx processing thread 0", NULL, NULL);
print_meas(gNB->phy_proc_tx_1, "L1 Tx processing thread 1", NULL, NULL);
print_meas(&gNB->dlsch_encoding_stats, "DLSCH encoding", NULL, NULL);
print_meas(&gNB->phy_proc_rx, "L1 Rx processing", NULL, NULL);
print_meas(&gNB->ul_indication_stats, "UL Indication", NULL, NULL);
print_meas(&gNB->rx_pusch_stats, "PUSCH inner-receiver", NULL, NULL);
print_meas(&gNB->ulsch_decoding_stats, "PUSCH decoding", NULL, NULL);
} }
return(NULL);
} }
void *nrL1_stats_thread(void *param) { void *nrL1_stats_thread(void *param) {
PHY_VARS_gNB *gNB = (PHY_VARS_gNB *)param; PHY_VARS_gNB *gNB = (PHY_VARS_gNB *)param;
RU_t *ru = RC.ru[0];
char output[L1STATSSTRLEN];
memset(output,0,L1STATSSTRLEN);
wait_sync("L1_stats_thread"); wait_sync("L1_stats_thread");
FILE *fd; FILE *fd;
fd=fopen("nrL1_stats.log","w");
AssertFatal(fd!=NULL,"Cannot open nrL1_stats.log\n");
reset_meas(gNB->phy_proc_tx_0);
reset_meas(gNB->phy_proc_tx_1);
reset_meas(&gNB->dlsch_encoding_stats);
reset_meas(&gNB->phy_proc_rx);
reset_meas(&gNB->ul_indication_stats);
reset_meas(&gNB->rx_pusch_stats);
reset_meas(&gNB->ulsch_decoding_stats);
while (!oai_exit) { while (!oai_exit) {
sleep(1); sleep(1);
fd=fopen("nrL1_stats.log","w");
AssertFatal(fd!=NULL,"Cannot open nrL1_stats.log\n");
dump_nr_I0_stats(fd,gNB); dump_nr_I0_stats(fd,gNB);
dump_pdsch_stats(fd,gNB); dump_pdsch_stats(fd,gNB);
dump_pusch_stats(fd,gNB); dump_pusch_stats(fd,gNB);
// nr_dump_uci_stats(fd,eNB,eNB->proc.L1_proc_tx.frame_tx); dump_L1_meas_stats(gNB, ru, output);
fclose(fd); fprintf(fd,"%s\n",output);
fflush(fd);
fseek(fd,0,SEEK_SET);
} }
fclose(fd);
return(NULL); return(NULL);
} }
...@@ -429,11 +447,10 @@ void init_gNB_Tpool(int inst) { ...@@ -429,11 +447,10 @@ void init_gNB_Tpool(int inst) {
initNotifiedFIFO(gNB->resp_RU_tx); initNotifiedFIFO(gNB->resp_RU_tx);
notifiedFIFO_elt_t *msgRUTx = newNotifiedFIFO_elt(sizeof(processingData_RU_t),0,gNB->resp_RU_tx,ru_tx_func); notifiedFIFO_elt_t *msgRUTx = newNotifiedFIFO_elt(sizeof(processingData_RU_t),0,gNB->resp_RU_tx,ru_tx_func);
processingData_RU_t *msgData = (processingData_RU_t*)msgRUTx->msgData; processingData_RU_t *msgData = (processingData_RU_t*)msgRUTx->msgData;
msgData->next_slot = sf_ahead*gNB->frame_parms.slots_per_subframe; // first Tx slot int first_tx_slot = sf_ahead*gNB->frame_parms.slots_per_subframe;
msgData->next_slot = get_next_downlink_slot(gNB, &gNB->gNB_config, 0, first_tx_slot-1);
pushNotifiedFIFO(gNB->resp_RU_tx,msgRUTx); // to unblock the process in the beginning pushNotifiedFIFO(gNB->resp_RU_tx,msgRUTx); // to unblock the process in the beginning
// Stats measurement thread
if(opp_enabled == 1) threadCreate(&proc->process_stats_thread, process_stats_thread,(void *)gNB, "time_meas", -1, OAI_PRIORITY_RT_LOW);
threadCreate(&proc->L1_stats_thread,nrL1_stats_thread,(void*)gNB,"L1_stats",-1,OAI_PRIORITY_RT_LOW); threadCreate(&proc->L1_stats_thread,nrL1_stats_thread,(void*)gNB,"L1_stats",-1,OAI_PRIORITY_RT_LOW);
} }
......
...@@ -1508,7 +1508,6 @@ void init_RU_proc(RU_t *ru) { ...@@ -1508,7 +1508,6 @@ void init_RU_proc(RU_t *ru) {
if (ru->feptx_ofdm) nr_init_feptx_thread(ru); if (ru->feptx_ofdm) nr_init_feptx_thread(ru);
} }
if (opp_enabled == 1) threadCreate(&ru->ru_stats_thread,ru_stats_thread,(void *)ru, "emulateRF", -1, OAI_PRIORITY_RT_LOW);
} }
void kill_NR_RU_proc(int inst) { void kill_NR_RU_proc(int inst) {
......
...@@ -131,6 +131,49 @@ void print_meas(time_stats_t *ts, ...@@ -131,6 +131,49 @@ void print_meas(time_stats_t *ts,
} }
} }
int print_meas_log(time_stats_t *ts,
const char *name,
time_stats_t *total_exec_time,
time_stats_t *sf_exec_time,
char *output)
{
int stroff = 0;
static int first_time = 0;
static double cpu_freq_GHz = 0.0;
if (cpu_freq_GHz == 0.0)
cpu_freq_GHz = get_cpu_freq_GHz();
if (first_time == 0) {
first_time=1;
if ((total_exec_time == NULL) || (sf_exec_time== NULL))
stroff += sprintf(output, "%25s %25s %25s %25s %25s %6f\n","Name","Total","Per Trials", "Num Trials","CPU_F_GHz", cpu_freq_GHz);
else
stroff += sprintf(output+stroff, "%25s %25s %25s %20s %15s %6f\n","Name","Total","Average/Frame","Trials", "CPU_F_GHz", cpu_freq_GHz);
}
if (ts->trials>0) {
//printf("%20s: total: %10.3f ms, average: %10.3f us (%10d trials)\n", name, ts->diff/cpu_freq_GHz/1000000.0, ts->diff/ts->trials/cpu_freq_GHz/1000.0, ts->trials);
if ((total_exec_time == NULL) || (sf_exec_time== NULL)) {
stroff += sprintf(output+stroff, "%25s: %15.3f us; %15d; %15.3f us;\n",
name,
(ts->diff/ts->trials/cpu_freq_GHz/1000.0),
ts->trials,
ts->max/cpu_freq_GHz/1000.0);
} else {
stroff += sprintf(output+stroff, "%25s: %15.3f ms (%5.2f%%); %15.3f us (%5.2f%%); %15d;\n",
name,
(ts->diff/cpu_freq_GHz/1000000.0),
((ts->diff/cpu_freq_GHz/1000000.0)/(total_exec_time->diff/cpu_freq_GHz/1000000.0))*100, // percentage
(ts->diff/ts->trials/cpu_freq_GHz/1000.0),
((ts->diff/ts->trials/cpu_freq_GHz/1000.0)/(sf_exec_time->diff/sf_exec_time->trials/cpu_freq_GHz/1000.0))*100, // percentage
ts->trials);
}
}
return stroff;
}
double get_time_meas_us(time_stats_t *ts) double get_time_meas_us(time_stats_t *ts)
{ {
static double cpu_freq_GHz = 0.0; static double cpu_freq_GHz = 0.0;
......
...@@ -88,6 +88,7 @@ static inline void stop_meas(time_stats_t *ts) __attribute__((always_inline)); ...@@ -88,6 +88,7 @@ static inline void stop_meas(time_stats_t *ts) __attribute__((always_inline));
void print_meas_now(time_stats_t *ts, const char *name, FILE *file_name); void print_meas_now(time_stats_t *ts, const char *name, FILE *file_name);
void print_meas(time_stats_t *ts, const char *name, time_stats_t *total_exec_time, time_stats_t *sf_exec_time); void print_meas(time_stats_t *ts, const char *name, time_stats_t *total_exec_time, time_stats_t *sf_exec_time);
int print_meas_log(time_stats_t *ts, const char *name, time_stats_t *total_exec_time, time_stats_t *sf_exec_time, char *output);
double get_time_meas_us(time_stats_t *ts); double get_time_meas_us(time_stats_t *ts);
double get_cpu_freq_GHz(void); double get_cpu_freq_GHz(void);
......
...@@ -60,6 +60,11 @@ ...@@ -60,6 +60,11 @@
#define CONFIG_STRING_MACRLC_PUCCHTARGETSNRX10 "pucch_TargetSNRx10" #define CONFIG_STRING_MACRLC_PUCCHTARGETSNRX10 "pucch_TargetSNRx10"
#define CONFIG_STRING_MACRLC_PUCCHFAILURETHRES "pucch_FailureThres" #define CONFIG_STRING_MACRLC_PUCCHFAILURETHRES "pucch_FailureThres"
#define CONFIG_STRING_MACRLC_PUSCHFAILURETHRES "pusch_FailureThres" #define CONFIG_STRING_MACRLC_PUSCHFAILURETHRES "pusch_FailureThres"
#define CONFIG_STRING_MACRLC_DL_BLER_TARGET_UPPER "dl_bler_target_upper"
#define CONFIG_STRING_MACRLC_DL_BLER_TARGET_LOWER "dl_bler_target_lower"
#define CONFIG_STRING_MACRLC_DL_RD2_BLER_THRESHOLD "dl_rd2_bler_threshold"
#define CONFIG_STRING_MACRLC_DL_MAX_MCS "dl_max_mcs"
/*-------------------------------------------------------------------------------------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------------------------------------------------------------------------------------*/
/* MacRLC configuration parameters */ /* MacRLC configuration parameters */
...@@ -88,6 +93,10 @@ ...@@ -88,6 +93,10 @@
{CONFIG_STRING_MACRLC_PUCCHTARGETSNRX10, NULL, 0, iptr:NULL, defintval:150, TYPE_INT, 0}, \ {CONFIG_STRING_MACRLC_PUCCHTARGETSNRX10, NULL, 0, iptr:NULL, defintval:150, TYPE_INT, 0}, \
{CONFIG_STRING_MACRLC_PUCCHFAILURETHRES, NULL, 0, iptr:NULL, defintval:10, TYPE_INT, 0}, \ {CONFIG_STRING_MACRLC_PUCCHFAILURETHRES, NULL, 0, iptr:NULL, defintval:10, TYPE_INT, 0}, \
{CONFIG_STRING_MACRLC_PUSCHFAILURETHRES, NULL, 0, iptr:NULL, defintval:10, TYPE_INT, 0}, \ {CONFIG_STRING_MACRLC_PUSCHFAILURETHRES, NULL, 0, iptr:NULL, defintval:10, TYPE_INT, 0}, \
{CONFIG_STRING_MACRLC_DL_BLER_TARGET_UPPER, "Upper threshold of BLER to decrease DL MCS", 0, dblptr:NULL, defdblval:0.15, TYPE_DOUBLE, 0}, \
{CONFIG_STRING_MACRLC_DL_BLER_TARGET_LOWER, "Lower threshold of BLER to increase DL MCS", 0, dblptr:NULL, defdblval:0.05, TYPE_DOUBLE, 0}, \
{CONFIG_STRING_MACRLC_DL_RD2_BLER_THRESHOLD, "Threshold of RD2/RETX2 BLER to decrease DL MCS", 0, dblptr:NULL, defdblval:0.01, TYPE_DOUBLE, 0}, \
{CONFIG_STRING_MACRLC_DL_MAX_MCS, "Maximum DL MCS that should be used", 0, u8ptr:NULL, defintval:28, TYPE_UINT8, 0}, \
} }
#define MACRLC_CC_IDX 0 #define MACRLC_CC_IDX 0
#define MACRLC_TRANSPORT_N_PREFERENCE_IDX 1 #define MACRLC_TRANSPORT_N_PREFERENCE_IDX 1
...@@ -111,5 +120,10 @@ ...@@ -111,5 +120,10 @@
#define MACRLC_PUCCHTARGETSNRX10_IDX 19 #define MACRLC_PUCCHTARGETSNRX10_IDX 19
#define MACRLC_PUCCHFAILURETHRES_IDX 20 #define MACRLC_PUCCHFAILURETHRES_IDX 20
#define MACRLC_PUSCHFAILURETHRES_IDX 21 #define MACRLC_PUSCHFAILURETHRES_IDX 21
#define MACRLC_DL_BLER_TARGET_UPPER_IDX 22
#define MACRLC_DL_BLER_TARGET_LOWER_IDX 23
#define MACRLC_DL_RD2_BLER_THRESHOLD_IDX 24
#define MACRLC_DL_MAX_MCS_IDX 25
/*---------------------------------------------------------------------------------------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------------------------------------------------------------------------------------*/
#endif #endif
...@@ -755,6 +755,10 @@ void RCconfig_nr_macrlc() { ...@@ -755,6 +755,10 @@ void RCconfig_nr_macrlc() {
AssertFatal(1==0,"MACRLC %d: %s unknown southbound midhaul\n",j,*(MacRLC_ParamList.paramarray[j][MACRLC_TRANSPORT_S_PREFERENCE_IDX].strptr)); AssertFatal(1==0,"MACRLC %d: %s unknown southbound midhaul\n",j,*(MacRLC_ParamList.paramarray[j][MACRLC_TRANSPORT_S_PREFERENCE_IDX].strptr));
} }
RC.nrmac[j]->ulsch_max_frame_inactivity = *(MacRLC_ParamList.paramarray[j][MACRLC_ULSCH_MAX_FRAME_INACTIVITY].uptr); RC.nrmac[j]->ulsch_max_frame_inactivity = *(MacRLC_ParamList.paramarray[j][MACRLC_ULSCH_MAX_FRAME_INACTIVITY].uptr);
RC.nrmac[j]->dl_bler_target_upper = *(MacRLC_ParamList.paramarray[j][MACRLC_DL_BLER_TARGET_UPPER_IDX].dblptr);
RC.nrmac[j]->dl_bler_target_lower = *(MacRLC_ParamList.paramarray[j][MACRLC_DL_BLER_TARGET_LOWER_IDX].dblptr);
RC.nrmac[j]->dl_rd2_bler_threshold = *(MacRLC_ParamList.paramarray[j][MACRLC_DL_RD2_BLER_THRESHOLD_IDX].dblptr);
RC.nrmac[j]->dl_max_mcs = *(MacRLC_ParamList.paramarray[j][MACRLC_DL_MAX_MCS_IDX].u8ptr);
RC.nrmac[j]->num_ulprbbl = num_prbbl; RC.nrmac[j]->num_ulprbbl = num_prbbl;
LOG_I(NR_MAC,"Blacklisted PRBS %d\n",num_prbbl); LOG_I(NR_MAC,"Blacklisted PRBS %d\n",num_prbbl);
memcpy(RC.nrmac[j]->ulprbbl,prbbl,275*sizeof(prbbl[0])); memcpy(RC.nrmac[j]->ulprbbl,prbbl,275*sizeof(prbbl[0]));
......
...@@ -382,6 +382,74 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP, ...@@ -382,6 +382,74 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP,
return offset; return offset;
} }
#define BLER_UPDATE_FRAME 10
#define BLER_FILTER 0.9f
int get_mcs_from_bler(module_id_t mod_id, int CC_id, frame_t frame, sub_frame_t slot, int UE_id)
{
gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
const NR_ServingCellConfigCommon_t *scc = nrmac->common_channels[CC_id].ServingCellConfigCommon;
const int n = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
NR_DL_bler_stats_t *bler_stats = &nrmac->UE_info.UE_sched_ctrl[UE_id].dl_bler_stats;
/* first call: everything is zero. Initialize to sensible default */
if (bler_stats->last_frame_slot == 0 && bler_stats->mcs == 0) {
bler_stats->last_frame_slot = frame * n + slot;
bler_stats->mcs = 9;
bler_stats->bler = (nrmac->dl_bler_target_lower + nrmac->dl_bler_target_upper) / 2;
bler_stats->rd2_bler = nrmac->dl_rd2_bler_threshold;
}
const int now = frame * n + slot;
int diff = now - bler_stats->last_frame_slot;
if (diff < 0) // wrap around
diff += 1024 * n;
const uint8_t old_mcs = bler_stats->mcs;
const NR_mac_stats_t *stats = &nrmac->UE_info.mac_stats[UE_id];
// TODO put back this condition when relevant
/*const int dret3x = stats->dlsch_rounds[3] - bler_stats->dlsch_rounds[3];
if (dret3x > 0) {
if there is a third retransmission, decrease MCS for stabilization and
restart averaging window to stabilize transmission
bler_stats->last_frame_slot = now;
bler_stats->mcs = max(9, bler_stats->mcs - 1);
memcpy(bler_stats->dlsch_rounds, stats->dlsch_rounds, sizeof(stats->dlsch_rounds));
LOG_D(MAC, "%4d.%2d: %d retx in 3rd round, setting MCS to %d and restarting window\n", frame, slot, dret3x, bler_stats->mcs);
return bler_stats->mcs;
}*/
if (diff < BLER_UPDATE_FRAME * n)
return old_mcs; // no update
// last update is longer than x frames ago
const int dtx = stats->dlsch_rounds[0] - bler_stats->dlsch_rounds[0];
const int dretx = stats->dlsch_rounds[1] - bler_stats->dlsch_rounds[1];
const int dretx2 = stats->dlsch_rounds[2] - bler_stats->dlsch_rounds[2];
const float bler_window = dtx > 0 ? (float) dretx / dtx : bler_stats->bler;
const float rd2_bler_wnd = dtx > 0 ? (float) dretx2 / dtx : bler_stats->rd2_bler;
bler_stats->bler = BLER_FILTER * bler_stats->bler + (1 - BLER_FILTER) * bler_window;
bler_stats->rd2_bler = BLER_FILTER / 4 * bler_stats->rd2_bler + (1 - BLER_FILTER / 4) * rd2_bler_wnd;
int new_mcs = old_mcs;
// TODO put back this condition when relevant
/* first ensure that number of 2nd retx is below threshold. If this is the
* case, use 1st retx to adjust faster
if (bler_stats->rd2_bler > nrmac->dl_rd2_bler_threshold && old_mcs > 6) {
new_mcs -= 2;
} else if (bler_stats->rd2_bler < nrmac->dl_rd2_bler_threshold) {*/
if (bler_stats->bler < nrmac->dl_bler_target_lower && old_mcs < nrmac->dl_max_mcs && dtx > 9)
new_mcs += 1;
else if (bler_stats->bler > nrmac->dl_bler_target_upper && old_mcs > 6)
new_mcs -= 1;
// else we are within threshold boundaries
bler_stats->last_frame_slot = now;
bler_stats->mcs = new_mcs;
memcpy(bler_stats->dlsch_rounds, stats->dlsch_rounds, sizeof(stats->dlsch_rounds));
LOG_D(MAC, "%4d.%2d MCS %d -> %d (dtx %d, dretx %d, BLER wnd %.3f avg %.6f, dretx2 %d, RD2 BLER wnd %.3f avg %.6f)\n",
frame, slot, old_mcs, new_mcs, dtx, dretx, bler_window, bler_stats->bler, dretx2, rd2_bler_wnd, bler_stats->rd2_bler);
return new_mcs;
}
void nr_store_dlsch_buffer(module_id_t module_id, void nr_store_dlsch_buffer(module_id_t module_id,
frame_t frame, frame_t frame,
sub_frame_t slot) { sub_frame_t slot) {
...@@ -630,8 +698,8 @@ void pf_dl(module_id_t module_id, ...@@ -630,8 +698,8 @@ void pf_dl(module_id_t module_id,
continue; continue;
/* Calculate coeff */ /* Calculate coeff */
sched_pdsch->mcs = 9;
ps->nrOfLayers = 1; ps->nrOfLayers = 1;
sched_pdsch->mcs = get_mcs_from_bler(module_id, /* CC_id = */ 0, frame, slot, UE_id);
uint32_t tbs = pf_tbs[ps->mcsTableIdx][sched_pdsch->mcs]; uint32_t tbs = pf_tbs[ps->mcsTableIdx][sched_pdsch->mcs];
coeff_ue[UE_id] = (float) tbs / thr_ue[UE_id]; coeff_ue[UE_id] = (float) tbs / thr_ue[UE_id];
LOG_D(NR_MAC,"b %d, thr_ue[%d] %f, tbs %d, coeff_ue[%d] %f\n", LOG_D(NR_MAC,"b %d, thr_ue[%d] %f, tbs %d, coeff_ue[%d] %f\n",
......
...@@ -81,22 +81,28 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen) ...@@ -81,22 +81,28 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen)
for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
stroff+=sprintf(output+stroff,"UE ID %d RNTI %04x (%d/%d) PH %d dB PCMAX %d dBm\n", const NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
NR_mac_stats_t *stats = &UE_info->mac_stats[UE_id];
const int avg_rsrp = stats->num_rsrp_meas > 0 ? stats->cumul_rsrp / stats->num_rsrp_meas : 0;
stroff+=sprintf(output+stroff,"UE ID %d RNTI %04x (%d/%d) PH %d dB PCMAX %d dBm, average RSRP %d (%d meas)\n",
UE_id, UE_id,
UE_info->rnti[UE_id], UE_info->rnti[UE_id],
num++, num++,
UE_info->num_UEs, UE_info->num_UEs,
UE_info->UE_sched_ctrl[UE_id].ph, sched_ctrl->ph,
UE_info->UE_sched_ctrl[UE_id].pcmax); sched_ctrl->pcmax,
avg_rsrp,
stats->num_rsrp_meas);
NR_mac_stats_t *stats = &UE_info->mac_stats[UE_id]; stroff+=sprintf(output+stroff,"UE %d: dlsch_rounds %d/%d/%d/%d, dlsch_errors %d, pucch0_DTX %d, BLER %.5f MCS %d\n",
const int avg_rsrp = stats->num_rsrp_meas > 0 ? stats->cumul_rsrp / stats->num_rsrp_meas : 0;
stroff+=sprintf(output+stroff,"UE %d: dlsch_rounds %d/%d/%d/%d, dlsch_errors %d, pucch0_DTX %d average RSRP %d (%d meas)\n",
UE_id, UE_id,
stats->dlsch_rounds[0], stats->dlsch_rounds[1], stats->dlsch_rounds[0], stats->dlsch_rounds[1],
stats->dlsch_rounds[2], stats->dlsch_rounds[3], stats->dlsch_errors, stats->dlsch_rounds[2], stats->dlsch_rounds[3],
stats->dlsch_errors,
stats->pucch0_DTX, stats->pucch0_DTX,
avg_rsrp, stats->num_rsrp_meas); sched_ctrl->dl_bler_stats.bler,
sched_ctrl->dl_bler_stats.mcs);
stats->num_rsrp_meas = 0; stats->num_rsrp_meas = 0;
stats->cumul_rsrp = 0 ; stats->cumul_rsrp = 0 ;
stroff+=sprintf(output+stroff,"UE %d: dlsch_total_bytes %d\n", UE_id, stats->dlsch_total_bytes); stroff+=sprintf(output+stroff,"UE %d: dlsch_total_bytes %d\n", UE_id, stats->dlsch_total_bytes);
...@@ -121,7 +127,7 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen) ...@@ -121,7 +127,7 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen)
} }
} }
} }
print_meas(&gNB->eNB_scheduler, "DL & UL scheduling timing stats", NULL, NULL); print_meas_log(&gNB->eNB_scheduler, "DL & UL scheduling timing stats", NULL, NULL, output+stroff);
} }
......
...@@ -422,6 +422,13 @@ typedef struct NR_UE_harq { ...@@ -422,6 +422,13 @@ typedef struct NR_UE_harq {
//! fixme : need to enhace for the multiple TB CQI report //! fixme : need to enhace for the multiple TB CQI report
typedef struct NR_DL_bler_stats {
frame_t last_frame_slot;
float bler;
float rd2_bler;
uint8_t mcs;
int dlsch_rounds[8];
} NR_DL_bler_stats_t;
// //
/*! As per spec 38.214 section 5.2.1.4.2 /*! As per spec 38.214 section 5.2.1.4.2
...@@ -565,6 +572,9 @@ typedef struct { ...@@ -565,6 +572,9 @@ typedef struct {
/// per-LC status data /// per-LC status data
mac_rlc_status_resp_t rlc_status[MAX_NUM_LCID]; mac_rlc_status_resp_t rlc_status[MAX_NUM_LCID];
/// Estimation of HARQ from BLER
NR_DL_bler_stats_t dl_bler_stats;
int lcid_mask; int lcid_mask;
int lcid_to_schedule; int lcid_to_schedule;
uint16_t ta_frame; uint16_t ta_frame;
...@@ -763,6 +773,10 @@ typedef struct gNB_MAC_INST_s { ...@@ -763,6 +773,10 @@ typedef struct gNB_MAC_INST_s {
NR_Type0_PDCCH_CSS_config_t type0_PDCCH_CSS_config[64]; NR_Type0_PDCCH_CSS_config_t type0_PDCCH_CSS_config[64];
bool first_MIB; bool first_MIB;
double dl_bler_target_upper;
double dl_bler_target_lower;
double dl_rd2_bler_threshold;
uint8_t dl_max_mcs;
} gNB_MAC_INST; } gNB_MAC_INST;
#endif /*__LAYER2_NR_MAC_GNB_H__ */ #endif /*__LAYER2_NR_MAC_GNB_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment