Commit 198df43e authored by Robert Schmidt's avatar Robert Schmidt

Merge remote-tracking branch 'origin/build-cluster-hotfix' into integration_2022_wk37

parents 2715a705 b6c2dd78
......@@ -136,16 +136,16 @@ pipeline {
}
}
}
stage ("RHEL8 Build") {
stage ("RHEL8-Cluster-Image-Builder") {
steps {
script {
triggerSlaveJob ('RAN-RHEL8-Image-Builder', 'RHEL8-Images-Build')
triggerSlaveJob ('RAN-RHEL8-Cluster-Image-Builder', 'RHEL8-Cluster-Image-Builder')
}
}
post {
always {
script {
finalizeSlaveJob('RAN-RHEL8-Image-Builder')
finalizeSlaveJob('RAN-RHEL8-Cluster-Image-Builder')
}
}
failure {
......
......@@ -90,14 +90,23 @@ class Cluster:
logging.error('error while creating buildconfig: ' + sshSession.getBefore())
return False
def _recreate_is(self, sshSession, name, newTag, filename):
sshSession.command(f'sed -i -e "s#tag: *latest#tag: {newTag}#" {filename}', '\$', 5)
sshSession.command(f'oc delete -f {filename}', '\$', 5)
sshSession.command(f'oc create -f {filename}', '\$', 5)
def _recreate_is_tag(self, sshSession, name, newTag, filename):
sshSession.command(f'oc describe is {name}', '\$', 5)
if sshSession.getBefore().count('NotFound') > 0:
sshSession.command(f'oc create -f {filename}', '\$', 5)
before = sshSession.getBefore()
if re.search(f'imagestream.image.openshift.io/{name} created', before) is None:
logging.error('error while creating imagestream: ' + sshSession.getBefore())
return False
else:
logging.debug(f'-> imagestream {name} found')
image = f'{name}:{newTag}'
sshSession.command(f'oc delete istag {image}', '\$', 5) # we don't care if this fails, e.g., if it is missing
sshSession.command(f'oc create istag {image}', '\$', 5)
before = sshSession.getBefore()
if re.search('imagestream.image.openshift.io/[a-zA-Z\-0-9]+ created', before) is not None:
if re.search(f'imagestreamtag.image.openshift.io/{image} created', before) is not None:
return True
logging.error('error while creating imagestream: ' + sshSession.getBefore())
logging.error('error while creating imagestreamtag: ' + sshSession.getBefore())
return False
def _start_build(self, sshSession, name):
......@@ -125,7 +134,7 @@ class Cluster:
while timeout_sec > 0:
# check status
for j in jobs:
sshSession.command(f'oc get pods | grep {j}', '\$', 5, silent = True)
sshSession.command(f'oc get pods | grep {j}', '\$', 10, silent = True)
if sshSession.getBefore().count('Completed') > 0: jobs.remove(j)
if sshSession.getBefore().count('Error') > 0:
logging.error(f'error for job {j}: ' + sshSession.getBefore())
......@@ -141,18 +150,6 @@ class Cluster:
def _retag_image_statement(self, sshSession, oldImage, newImage, newTag, filename):
sshSession.command(f'sed -i -e "s#{oldImage}:latest#{newImage}:{newTag}#" {filename}', '\$', 5)
def _pull_image(self, sshSession, image, tag):
sshSession.command(f'oc whoami -t | sudo podman login -u oaicicd --password-stdin https://{self.OCRegistry} --tls-verify=false', '\$', 5, silent=True)
if sshSession.getBefore().count('Login Succeeded!') == 0:
return None
imageName = f'{self.OCRegistry}{self.OCProjectName}/{image}:{tag}'
sshSession.command(f'sudo podman pull {imageName} --tls-verify=false', '\$', 300)
pullResult = sshSession.getBefore()
sshSession.command(f'sudo podman logout https://{self.OCRegistry}', '\$', 10, silent=True)
if pullResult.count('Storing signatures') == 0:
return None
return imageName
def _get_image_size(self, sshSession, image, tag):
# get the SHA of the image we built using the image name and its tag
sshSession.command(f'oc describe is {image} | grep -A4 {tag}', '\$', 5)
......@@ -216,7 +213,7 @@ class Cluster:
# Workaround for some servers, we need to erase completely the workspace
if self.forcedWorkspaceCleanup:
mySSH.command(f'sudo rm -Rf {lSourcePath}', '\$', 15)
mySSH.command(f'rm -Rf {lSourcePath}', '\$', 15)
cls_containerize.CreateWorkspace(mySSH, lSourcePath, self.ranRepository, self.ranCommitID, self.ranTargetBranch, self.ranAllowMerge)
# we don't necessarily need a forced workspace cleanup, but in
......@@ -259,7 +256,7 @@ class Cluster:
status = True # flag to abandon compiling if any image fails
attemptedImages = []
if forceBaseImageBuild:
self._recreate_is(mySSH, 'ran-base', baseTag, 'openshift/ran-base-is.yaml')
self._recreate_is_tag(mySSH, 'ran-base', baseTag, 'openshift/ran-base-is.yaml')
self._recreate_bc(mySSH, 'ran-base', baseTag, 'openshift/ran-base-bc.yaml')
ranbase_job = self._start_build(mySSH, 'ran-base')
attemptedImages += ['ran-base']
......@@ -277,13 +274,13 @@ class Cluster:
status = False
if status:
self._recreate_is(mySSH, 'oai-physim', imageTag, 'openshift/oai-physim-is.yaml')
self._recreate_is_tag(mySSH, 'oai-physim', imageTag, 'openshift/oai-physim-is.yaml')
self._recreate_bc(mySSH, 'oai-physim', imageTag, 'openshift/oai-physim-bc.yaml')
self._retag_image_statement(mySSH, 'ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.phySim.rhel8.2')
physim_job = self._start_build(mySSH, 'oai-physim')
attemptedImages += ['oai-physim']
self._recreate_is(mySSH, 'ran-build', imageTag, 'openshift/ran-build-is.yaml')
self._recreate_is_tag(mySSH, 'ran-build', imageTag, 'openshift/ran-build-is.yaml')
self._recreate_bc(mySSH, 'ran-build', imageTag, 'openshift/ran-build-bc.yaml')
self._retag_image_statement(mySSH, 'ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.build.rhel8.2')
ranbuild_job = self._start_build(mySSH, 'ran-build')
......@@ -296,28 +293,28 @@ class Cluster:
mySSH.command(f'oc logs {physim_job} > cmake_targets/log/oai-physim.log', '\$', 10)
if status:
self._recreate_is(mySSH, 'oai-enb', imageTag, 'openshift/oai-enb-is.yaml')
self._recreate_is_tag(mySSH, 'oai-enb', imageTag, 'openshift/oai-enb-is.yaml')
self._recreate_bc(mySSH, 'oai-enb', imageTag, 'openshift/oai-enb-bc.yaml')
self._retag_image_statement(mySSH, 'ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.eNB.rhel8.2')
self._retag_image_statement(mySSH, 'ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.eNB.rhel8.2')
enb_job = self._start_build(mySSH, 'oai-enb')
attemptedImages += ['oai-enb']
self._recreate_is(mySSH, 'oai-gnb', imageTag, 'openshift/oai-gnb-is.yaml')
self._recreate_is_tag(mySSH, 'oai-gnb', imageTag, 'openshift/oai-gnb-is.yaml')
self._recreate_bc(mySSH, 'oai-gnb', imageTag, 'openshift/oai-gnb-bc.yaml')
self._retag_image_statement(mySSH, 'ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.gNB.rhel8.2')
self._retag_image_statement(mySSH, 'ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.gNB.rhel8.2')
gnb_job = self._start_build(mySSH, 'oai-gnb')
attemptedImages += ['oai-gnb']
self._recreate_is(mySSH, 'oai-lte-ue', imageTag, 'openshift/oai-lte-ue-is.yaml')
self._recreate_is_tag(mySSH, 'oai-lte-ue', imageTag, 'openshift/oai-lte-ue-is.yaml')
self._recreate_bc(mySSH, 'oai-lte-ue', imageTag, 'openshift/oai-lte-ue-bc.yaml')
self._retag_image_statement(mySSH, 'ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.lteUE.rhel8.2')
self._retag_image_statement(mySSH, 'ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.lteUE.rhel8.2')
lteue_job = self._start_build(mySSH, 'oai-lte-ue')
attemptedImages += ['oai-lte-ue']
self._recreate_is(mySSH, 'oai-nr-ue', imageTag, 'openshift/oai-nr-ue-is.yaml')
self._recreate_is_tag(mySSH, 'oai-nr-ue', imageTag, 'openshift/oai-nr-ue-is.yaml')
self._recreate_bc(mySSH, 'oai-nr-ue', imageTag, 'openshift/oai-nr-ue-bc.yaml')
self._retag_image_statement(mySSH, 'ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.nrUE.rhel8.2')
self._retag_image_statement(mySSH, 'ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.nrUE.rhel8.2')
......@@ -348,6 +345,9 @@ class Cluster:
imageSize[image] = f'{sizeMb:.1f} Mbytes (uncompressed: ~{sizeMb*2.5:.1f} Mbytes)'
logging.info(f'\u001B[1m{image} size is {imageSize[image]}\u001B[0m')
grep_exp = "\|".join(attemptedImages)
mySSH.command(f'oc get images | grep -e \'{grep_exp}\' > cmake_targets/log/image_registry.log', '\$', 10);
build_log_name = f'build_log_{self.testCase_id}'
cls_containerize.CopyLogsToExecutor(mySSH, lSourcePath, build_log_name, lIpAddr, 'oaicicd', CONST.CI_NO_PASSWORD)
......
......@@ -67,7 +67,7 @@ def CreateWorkspace(sshSession, sourcePath, ranRepository, ranCommitID, ranTarge
sshSession.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
sshSession.command('git config user.name "OAI Jenkins"', '\$', 5)
sshSession.command('sudo git clean -x -d -ff', '\$', 30)
sshSession.command('git clean -x -d -ff', '\$', 30)
sshSession.command('mkdir -p cmake_targets/log', '\$', 5)
# if the commit ID is provided use it to point to it
if ranCommitID != '':
......
......@@ -115,30 +115,6 @@ class PhySim:
mySSH.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
else:
imageTag = "develop"
# Check if image is exist on the Red Hat server, before pushing it to OC cluster
mySSH.command('sudo podman image inspect --format="Size = {{.Size}} bytes" oai-physim:' + imageTag, '\$', 60)
if mySSH.getBefore().count('no such image') != 0:
logging.error('\u001B[1m No such image oai-physim\u001B[0m')
mySSH.close()
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.PHYSIM_IMAGE_ABSENT)
RAN.prematureExit = True
return
else:
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', mySSH.getBefore())
if result is not None:
imageSize = float(result.group('size'))
imageSize = imageSize / 1000
if imageSize < 1000:
logging.debug('\u001B[1m oai-physim size is ' + ('%.0f' % imageSize) + ' kbytes\u001B[0m')
else:
imageSize = imageSize / 1000
if imageSize < 1000:
logging.debug('\u001B[1m oai-physim size is ' + ('%.0f' % imageSize) + ' Mbytes\u001B[0m')
else:
imageSize = imageSize / 1000
logging.debug('\u001B[1m oai-physim is ' + ('%.3f' % imageSize) + ' Gbytes\u001B[0m')
else:
logging.debug('oai-physim size is unknown')
# logging to OC Cluster and then switch to corresponding project
mySSH.command(f'oc login -u {ocUserName} -p {ocPassword} --server https://api.oai.cs.eurecom.fr:6443', '\$', 30)
......@@ -161,46 +137,7 @@ class PhySim:
else:
logging.debug(f'\u001B[1m Now using project {ocProjectName}\u001B[0m')
# Tag the image and push to the OC cluster
mySSH.command('oc whoami -t | sudo podman login -u ' + ocUserName + ' --password-stdin https://default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/ --tls-verify=false', '\$', 30)
if mySSH.getBefore().count('Login Succeeded!') == 0:
logging.error('\u001B[1m Podman Login to OC Cluster Registry Failed\u001B[0m')
mySSH.command('oc logout', '\$', 30)
mySSH.close()
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OC_LOGIN_FAIL)
RAN.prematureExit = True
return
else:
logging.debug('\u001B[1m Podman Login to OC Cluster Registry Successfully\u001B[0m')
time.sleep(2)
mySSH.command('oc create -f openshift/oai-physim-is.yaml || true', '\$', 30)
if mySSH.getBefore().count('(AlreadyExists):') == 0 and mySSH.getBefore().count('created') == 0:
logging.error(f'\u001B[1m Image Stream "oai-physim" Creation Failed on OC Cluster {ocProjectName}\u001B[0m')
mySSH.command('sudo podman logout https://default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/', '\$', 6)
mySSH.command('oc logout', '\$', 30)
mySSH.close()
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OC_IS_FAIL)
RAN.prematureExit = True
return
else:
logging.debug(f'\u001B[1m Image Stream "oai-physim" created on OC project {ocProjectName}\u001B[0m')
time.sleep(2)
mySSH.command(f'sudo podman tag oai-physim:{imageTag} default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 30)
time.sleep(2)
mySSH.command(f'sudo podman push default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag} --tls-verify=false', '\$', 180)
if mySSH.getBefore().count('Storing signatures') == 0:
logging.error('\u001B[1m Image "oai-physim" push to OC Cluster Registry Failed\u001B[0m')
mySSH.command('sudo podman logout https://default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/', '\$', 6)
mySSH.command('oc logout', '\$', 30)
mySSH.close()
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OC_IS_FAIL)
RAN.prematureExit = True
return
else:
logging.debug('\u001B[1m Image "oai-physim" push to OC Cluster Registry Successfully\u001B[0m')
# Using helm charts deployment
time.sleep(5)
mySSH.command(f'grep -rl OAICICD_PROJECT ./charts/ | xargs sed -i -e "s#OAICICD_PROJECT#{ocProjectName}#"', '\$', 30)
mySSH.command(f'sed -i -e "s#TAG#{imageTag}#g" ./charts/physims/values.yaml', '\$', 6)
mySSH.command('helm install physim ./charts/physims/ 2>&1 | tee -a cmake_targets/log/physim_helm_summary.txt', '\$', 30)
......@@ -213,9 +150,6 @@ class PhySim:
mySSH.command('oc get pods -l app.kubernetes.io/instance=physim', '\$', 6, resync=True)
if re.search('No resources found', mySSH.getBefore()):
isFinished1 = True
mySSH.command(f'sudo podman rmi default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 30)
mySSH.command('oc delete is oai-physim', '\$', 30)
mySSH.command('sudo podman logout https://default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/', '\$', 6)
mySSH.command('oc logout', '\$', 30)
mySSH.close()
self.AnalyzeLogFile_phySim(HTML)
......@@ -244,9 +178,6 @@ class PhySim:
mySSH.command('oc get pods -l app.kubernetes.io/instance=physim', '\$', 6, resync=True)
if re.search('No resources found', mySSH.getBefore()):
isFinished1 = True
mySSH.command(f'sudo podman rmi default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 6)
mySSH.command('oc delete is oai-physim', '\$', 6)
mySSH.command('sudo podman logout https://default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/', '\$', 6)
mySSH.command('oc logout', '\$', 30)
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OC_PHYSIM_DEPLOY_FAIL)
HTML.CreateHtmlTestRowPhySimTestResult(self.testSummary,self.testResult)
......@@ -304,10 +235,6 @@ class PhySim:
isFinished1 = True
if isFinished1 == True:
logging.debug('\u001B[1m UnDeployed PhySim Successfully on OC Cluster\u001B[0m')
mySSH.command(f'sudo podman rmi default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 6)
mySSH.command('oc delete is oai-physim', '\$', 6)
logging.debug('\u001B[1m Deleted the Image and ImageStream\u001B[0m')
mySSH.command('sudo podman logout https://default-route-openshift-image-registry.apps.oai.cs.eurecom.fr/', '\$', 6)
mySSH.command('oc logout', '\$', 6)
mySSH.close()
self.AnalyzeLogFile_phySim(HTML)
......
......@@ -12,6 +12,8 @@
</tr>
</table>
[[_TOC_]]
# 1. Introduction
## General
......@@ -119,13 +121,14 @@ The `<image>` could be `oai-gnb`, and the `<tag>` `ci-temp`.
# 6. Deployment using HELM Charts
**CAUTION: even more experimental.**
Helm charts are located in another repository:
Helm charts are located under `charts`. Assuming that the image is in the image
registry, the physims could be deployed as shown in the following steps:
```bash
git clone https://github.com/OPENAIRINTERFACE/openair-k8s.git
cd openair-k8s
git checkout helm-deployment-S6a-S1C-S1U-in-network-18-with-enb
helm install mme /path-to-your-cloned/openair-k8s/charts/oai-mme/
grep -rl OAICICD_PROJECT ./charts/ | xargs sed -i -e "s#OAICICD_PROJECT#oaicicd-ran#" # select the correct project
sed -i -e "s#TAG#ci-temp#g" ./charts/physims/values.yaml # select the correct tag
helm install physim ./charts/physims/ # deploy
oc get pods # get the list of deployed containers
oc logs <pod> # inspect the logs of a pod
helm uninstall physim # undeploy
```
......@@ -28,5 +28,3 @@ metadata:
spec:
lookupPolicy:
local: true
status:
tag: latest
......@@ -28,6 +28,3 @@ metadata:
spec:
lookupPolicy:
local: true
status:
tag: latest
......@@ -28,5 +28,3 @@ metadata:
spec:
lookupPolicy:
local: true
status:
tag: latest
......@@ -28,5 +28,3 @@ metadata:
spec:
lookupPolicy:
local: true
status:
tag: latest
......@@ -28,5 +28,3 @@ metadata:
spec:
lookupPolicy:
local: true
status:
tag: latest
......@@ -27,5 +27,3 @@ metadata:
spec:
lookupPolicy:
local: true
status:
tag: latest
......@@ -28,6 +28,3 @@ metadata:
spec:
lookupPolicy:
local: true
status:
tag: latest
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment