Commit 8773e423 authored by Robert Schmidt's avatar Robert Schmidt

Merge branch 'integration_2022_wk47' into 'develop'

integration_2022_wk47

See merge request oai/openairinterface5g!1832

!1824 NR msg4 retransmission fix
!1827 Print ASN1 messages to console only if DEBUG_ASN1 flag is enabled
!1794 Replace gaussdouble() by gaussZiggurat()
!1829 bugfix in SRS configuration
!1830 Fix infinite loop in ULSCH scheduler when scheduling for multiple UEs
!1821 Send RRCReconfiguration when RA with Msg3 through DCCH also when the UE is in the InitialBWP (review pending)
!1797 NR UE move ULSCH PHY memory to stack
!1792 NR SA Tutorials v5
!1826 Update docker documentation
!1825 Fixes to improve CI
parents 1576f353 68de3b71
......@@ -2545,7 +2545,6 @@ target_link_libraries(nrscope ${XFORMS_LIBRARIES})
add_library(rfsimulator MODULE
${OPENAIR_DIR}/radio/rfsimulator/simulator.c
${OPENAIR_DIR}/radio/rfsimulator/apply_channelmod.c
${OPENAIR_DIR}/radio/rfsimulator/new_channel_sim.c
${OPENAIR1_DIR}/PHY/TOOLS/signal_energy.c
)
target_link_libraries(rfsimulator SIMU_COMMON ${ATLAS_LIBRARIES})
......
......@@ -36,11 +36,20 @@ Please see [NOTICE](NOTICE.md) file for third party software that is included in
* [How to build](./doc/BUILD.md)
* [How to run the modems](./doc/RUNMODEM.md)
More information and documentation can be found in the [doc folder](./doc).
Unfortunately, not all information is available there, and information for
specific sub-systems might be available in the corresponding sub-directories.
To find all READMEs, this command might be handy:
```
find . -iname "readme*"
```
# RAN repository structure #
The OpenAirInterface (OAI) software is composed of the following parts:
<pre>
```
openairinterface5g
├── ci-scripts : Meta-scripts used by the OSA CI process. Contains also configuration files used day-to-day by CI.
├── cmake_targets : Build utilities to compile (simulation, emulation and real-time platforms), and generated build files.
......@@ -88,5 +97,6 @@ openairinterface5g
│   ├── TEST
│   ├── UDP
│   └── UTILS
├── radio : drivers for various radios such as USRP, AW2S, RFsim, ...
└── targets : Top-level wrappers for unitary simulation for PHY channels, system-level emulation (eNB-UE with and without S1), and realtime eNB and UE and RRH GW.
</pre>
```
......@@ -16,7 +16,7 @@ icon: http://www.openairinterface.org/wp-content/uploads/2015/06/cropped-oai_fin
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.1
version: 1.0.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-dlsim-100rb-tm2.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-dlsim-100rb-tm2.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-dlsim-100rb-tm2.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-dlsim-100rb-tm2.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-dlsim-100rb-tm2.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-dlsim-100rb-tm2.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-dlsim-100rb-tm2.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "dlsim.100rb+tm2" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-dlsim-100rb-tm2.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-dlsim-100rb-tm2
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-dlsim-100rb-tm2.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-dlsim-basic.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-dlsim-basic.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-dlsim-basic.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-dlsim-basic.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-dlsim-basic.labels" . | nindent 4 }}
name: {{ .Chart.Name }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-dlsim-basic.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-dlsim-basic.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "dlsim.basic" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-dlsim-basic.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-dlsim-basic
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-dlsim-basic.selectorLabels" . | nindent 4 }}
......@@ -27,31 +27,15 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
privileged: false
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
nodeSelector: {}
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-ldpctest.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-ldpctest.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-ldpctest.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-ldpctest.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-ldpctest.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-ldpctest.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-ldpctest.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,18 +20,18 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
command: ["/bin/sh", "-c"]
args:
- >
- >
cmake_targets/autotests/run_exec_autotests.bash -g "ldpctest" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-ldpctest.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-ldpctest
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-ldpctest.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-dlschsim.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-dlschsim.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-dlschsim.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-dlschsim.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlschsim.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-dlschsim.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-dlschsim.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,18 +20,18 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
command: ["/bin/sh", "-c"]
args:
- >
- >
cmake_targets/autotests/run_exec_autotests.bash -g "nr_dlschsim" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlschsim.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-dlschsim
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-dlschsim.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-dlsim-basic.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-dlsim-basic.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-dlsim-basic.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-dlsim-basic.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlsim-basic.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-dlsim-basic.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-dlsim-basic.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_dlsim.basic" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlsim-basic.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-dlsim-basic
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-dlsim-basic.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-dlsim-dmrs-ptrs.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-dlsim-dmrs-ptrs.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-dlsim-dmrs-ptrs.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-dlsim-dmrs-ptrs.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlsim-dmrs-ptrs.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-dlsim-dmrs-ptrs.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-dlsim-dmrs-ptrs.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_dlsim.dmrs+ptrs" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlsim-dmrs-ptrs.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-dlsim-dmrs-ptrs
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-dlsim-dmrs-ptrs.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-dlsim-mcs-mimo.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-dlsim-mcs-mimo.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-dlsim-mcs-mimo.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-dlsim-mcs-mimo.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlsim-mcs-mimo.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-dlsim-mcs-mimo.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-dlsim-mcs-mimo.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_dlsim.mcs+mimo" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlsim-mcs-mimo.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-dlsim-mcs-mimo
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-dlsim-mcs-mimo.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-dlsim-offset.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-dlsim-offset.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-dlsim-offset.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-dlsim-offset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlsim-offset.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-dlsim-offset.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-dlsim-offset.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_dlsim.offset" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-dlsim-offset.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-dlsim-offset
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-dlsim-offset.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-pbchsim-106rb.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-pbchsim-106rb.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-pbchsim-106rb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-pbchsim-106rb.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-pbchsim-106rb.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-pbchsim-106rb.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-pbchsim-106rb.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_pbchsim.106rb" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-pbchsim-106rb.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-pbchsim-106rb
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-pbchsim-106rb.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-pbchsim-217rb.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-pbchsim-217rb.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-pbchsim-217rb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-pbchsim-217rb.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-pbchsim-217rb.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-pbchsim-217rb.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-pbchsim-217rb.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_pbchsim.217rb" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-pbchsim-217rb.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-pbchsim-217rb
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-pbchsim-217rb.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-pbchsim-273rb.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-pbchsim-273rb.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-pbchsim-273rb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-pbchsim-273rb.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-pbchsim-273rb.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-pbchsim-273rb.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-pbchsim-273rb.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_pbchsim.273rb" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-pbchsim-273rb.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-pbchsim-273rb
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-pbchsim-273rb.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-prachsim.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-prachsim.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-prachsim.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-prachsim.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-prachsim.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-prachsim.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-prachsim.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,18 +20,18 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
command: ["/bin/sh", "-c"]
args:
- >
- >
cmake_targets/autotests/run_exec_autotests.bash -g "nr_prachsim" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-prachsim.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-prachsim
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-prachsim.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-pucchsim.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-pucchsim.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-pucchsim.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-pucchsim.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-pucchsim.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-pucchsim.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-pucchsim.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,18 +20,18 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
command: ["/bin/sh", "-c"]
args:
- >
- >
cmake_targets/autotests/run_exec_autotests.bash -g "nr_pucchsim" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-pucchsim.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-pucchsim
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-pucchsim.selectorLabels" . | nindent 4 }}
......@@ -27,15 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
privileged: false
service:
type: ClusterIP
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-ulschsim.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-ulschsim.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-ulschsim.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-ulschsim.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulschsim.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-ulschsim.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-ulschsim.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,18 +20,18 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
command: ["/bin/sh", "-c"]
args:
- >
- >
cmake_targets/autotests/run_exec_autotests.bash -g "nr_ulschsim" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulschsim.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-ulschsim
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-ulschsim.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-ulsim-3gpp.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-ulsim-3gpp.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-ulsim-3gpp.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-ulsim-3gpp.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulsim-3gpp.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-ulsim-3gpp.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-ulsim-3gpp.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_ulsim.3gpp" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulsim-3gpp.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-ulsim-3gpp
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-ulsim-3gpp.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-ulsim-mimo.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-ulsim-mimo.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-ulsim-mimo.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-ulsim-mimo.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulsim-mimo.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-ulsim-mimo.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-ulsim-mimo.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_ulsim.mimo" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulsim-mimo.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-ulsim-mimo
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-ulsim-mimo.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-ulsim-misc.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-ulsim-misc.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-ulsim-misc.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-ulsim-misc.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulsim-misc.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-ulsim-misc.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-ulsim-misc.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,8 +20,6 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
......@@ -39,7 +29,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_ulsim.misc" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulsim-misc.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-ulsim-misc
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-ulsim-misc.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-nr-ulsim-sc-fdma.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-nr-ulsim-sc-fdma.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-nr-ulsim-sc-fdma.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-nr-ulsim-sc-fdma.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulsim-sc-fdma.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-nr-ulsim-sc-fdma.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-nr-ulsim-sc-fdma.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -39,7 +31,9 @@ spec:
cmake_targets/autotests/run_exec_autotests.bash -g "nr_ulsim.sc-fdma" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-nr-ulsim-sc-fdma.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-nr-ulsim-sc-fdma
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-nr-ulsim-sc-fdma.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-polartest.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-polartest.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-polartest.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-polartest.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-polartest.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-polartest.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-polartest.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,18 +20,18 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
command: ["/bin/sh", "-c"]
args:
- >
- >
cmake_targets/autotests/run_exec_autotests.bash -g "polartest" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-polartest.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-polartest
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-polartest.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-smallblocktest.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-smallblocktest.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-smallblocktest.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-smallblocktest.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-smallblocktest.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-smallblocktest.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-smallblocktest.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,18 +20,18 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
command: ["/bin/sh", "-c"]
args:
- >
- >
cmake_targets/autotests/run_exec_autotests.bash -g "smallblocktest" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-smallblocktest.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-smallblocktest
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-smallblocktest.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oai-ulsim.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oai-ulsim.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oai-ulsim.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oai-ulsim.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
apiVersion: apps/v1
kind: Deployment
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-ulsim.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "oai-ulsim.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "oai-ulsim.selectorLabels" . | nindent 8 }}
app: physim
spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
......@@ -28,18 +20,18 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
ports:
- containerPort: 80
env:
- name: OPENAIR_DIR
value: /opt/oai-physim
command: ["/bin/sh", "-c"]
args:
- >
- >
cmake_targets/autotests/run_exec_autotests.bash -g "ulsim" -d bin/ &&
echo "FINISHED" && sleep infinity
dnsPolicy: ClusterFirst
restartPolicy: Always
restartPolicy: Never
schedulerName: default-scheduler
serviceAccountName: {{ .Values.global.serviceAccountName }}
terminationGracePeriodSeconds: 30
nodeSelector:
{{- toYaml .Values.global.nodeSelector | nindent 12 }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: {{ .Chart.Name }}
labels:
{{- include "oai-ulsim.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if contains "ClusterIP" .Values.service.type }}
clusterIP: None
{{- end }}
ports:
- name: oai-ulsim
# Port accessible outside cluster
port: {{ .Values.service.port }}
# Port to forward to inside the pod
targetPort: {{ .Values.service.Port }}
protocol: TCP
selector:
{{- include "oai-ulsim.selectorLabels" . | nindent 4 }}
......@@ -27,19 +27,7 @@ podSecurityContext:
runAsGroup: 0
securityContext:
privileged: true
capabilities:
add:
- SYS_CAP_PTRACE
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
privileged: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
......
......@@ -6,7 +6,7 @@ rules:
- apiGroups:
- security.openshift.io
resourceNames:
- privileged
- anyuid
resources:
- securitycontextconstraints
verbs:
......
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.global.serviceAccountName }}
name: oai-physim-sa #{{ .Values.global.serviceAccountName }}
......@@ -5,137 +5,12 @@
global:
serviceAccountName: oai-physim-sa
namespace: "OAICICD_PROJECT"
image:
image:
registry: local
repository: image-registry.openshift-image-registry.svc:5000/oaicicd-ran/oai-physim
version: TAG
# pullPolicy: IfNotPresent or Never or Always
pullPolicy: Always
nodeSelector:
worker: "telco"
## Declaring values specific to coressponding physim to overwrite
dlsim.basic:
replicaCount: 1
service:
type: ClusterIP
port: 80
dlsim.100rb+tm2:
replicaCount: 1
service:
type: ClusterIP
port: 80
ulsim:
replicaCount: 1
service:
type: ClusterIP
port: 80
ldpctest:
replicaCount: 1
service:
type: ClusterIP
port: 80
polartest:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-pbchsim.106rb:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-pbchsim.217rb:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-pbchsim.273rb:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-dlsim.basic:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-dlsim.dmrs+ptrs:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-dlsim.mcs+mimo:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-dlsim.offset:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-dlschsim:
replicaCount: 1
service:
type: ClusterIP
port: 80
smallblocktest:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-ulschsim:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-pucchsim:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-ulsim.3gpp:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-ulsim.mimo:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-ulsim.misc:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-ulsim.sc-fdma:
replicaCount: 1
service:
type: ClusterIP
port: 80
nr-prachsim:
replicaCount: 1
service:
type: ClusterIP
port: 80
......@@ -332,7 +332,7 @@ pipeline {
script {
if (doMandatoryTests) {
dir ('archives') {
sh "if [ -d rf_sim/test ] || [ -d l2_sim/test ]; then zip -r -qq vm_tests_logs.zip */test ; fi"
sh "if [ -d rf_sim/test ] || [ -d rf5g_sim/test ] || [ -d l2_sim/test ]; then zip -r -qq vm_tests_logs.zip */test ; fi"
}
if(fileExists('archives/vm_tests_logs.zip')) {
archiveArtifacts artifacts: 'archives/vm_tests_logs.zip'
......
......@@ -139,14 +139,14 @@ class PhySim:
# Using helm charts deployment
mySSH.command(f'grep -rl OAICICD_PROJECT ./charts/ | xargs sed -i -e "s#OAICICD_PROJECT#{ocProjectName}#"', '\$', 30)
mySSH.command(f'sed -i -e "s#TAG#{imageTag}#g" ./charts/physims/values.yaml', '\$', 6)
mySSH.command('helm install physim ./charts/physims/ 2>&1 | tee -a cmake_targets/log/physim_helm_summary.txt', '\$', 30)
mySSH.command('helm install physim ./charts/physims/ --wait 2>&1 | tee -a cmake_targets/log/physim_helm_summary.txt', '\$', 30)
if mySSH.getBefore().count('STATUS: deployed') == 0:
logging.error('\u001B[1m Deploying PhySim Failed using helm chart on OC Cluster\u001B[0m')
mySSH.command('helm uninstall physim >> cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 30)
mySSH.command('helm uninstall physim | tee -a cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 30)
isFinished1 = False
while(isFinished1 == False):
time.sleep(20)
mySSH.command('oc get pods -l app.kubernetes.io/instance=physim', '\$', 6, resync=True)
mySSH.command('oc get pods -l app=physim', '\$', 6, resync=True)
if re.search('No resources found', mySSH.getBefore()):
isFinished1 = True
mySSH.command('oc logout', '\$', 30)
......@@ -158,24 +158,27 @@ class PhySim:
logging.debug('\u001B[1m Deployed PhySim Successfully using helm chart\u001B[0m')
isRunning = False
count = 0
# Check whether the containers are in Running state or not under 2 mins
while(count < 2 and isRunning == False):
time.sleep(60)
mySSH.command('oc get pods -o wide -l app.kubernetes.io/instance=physim | tee -a cmake_targets/log/physim_pods_summary.txt', '\$', 30, resync=True)
if mySSH.getBefore().count('Running') == 21:
mySSH.command('oc get pods -o wide -l app=physim | tee -a cmake_targets/log/physim_pods_summary.txt', '\$', 30, resync=True)
running_count = mySSH.getBefore().count('Running')
completed_count = mySSH.getBefore().count('Completed')
if (running_count + completed_count) == 21:
logging.debug('\u001B[1m Running the physim test Scenarios\u001B[0m')
isRunning = True
podNames = re.findall('oai-[\S\d\w]+', mySSH.getBefore())
count +=1
if isRunning == False:
logging.error('\u001B[1m Some PODS Running FAILED \u001B[0m')
mySSH.command('oc get pods -l app.kubernetes.io/instance=physim 2>&1 | tee -a cmake_targets/log/physim_pods_summary.txt', '\$', 6)
logging.error('\u001B[1m Some pods are in Error state \u001B[0m')
mySSH.command('oc get pods -l app=physim 2>&1 | tee -a cmake_targets/log/physim_pods_summary.txt', '\$', 6)
mySSH.command('for pod in $(oc get pods | tail -n +2 | awk \'{print $1}\'); do oc describe pod $pod >> cmake_targets/log/physim_pods_summary.txt; done', '\$', 10)
mySSH.command('helm uninstall physim 2>&1 >> cmake_targets/log/physim_helm_summary.txt', '\$', 6)
mySSH.command('helm uninstall physim | tee -a cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 6)
self.AnalyzeLogFile_phySim()
isFinished1 = False
while(isFinished1 == False):
time.sleep(20)
mySSH.command('oc get pods -l app.kubernetes.io/instance=physim', '\$', 6, resync=True)
mySSH.command('oc get pods -l app=physim', '\$', 6, resync=True)
if re.search('No resources found', mySSH.getBefore()):
isFinished1 = True
mySSH.command('oc logout', '\$', 30)
......@@ -188,7 +191,7 @@ class PhySim:
isFinished = False
# doing a deep copy!
tmpPodNames = podNames.copy()
while(count < 15 and isFinished == False):
while(count < 9 and isFinished == False):
time.sleep(60)
for podName in tmpPodNames:
mySSH.command2(f'oc logs --tail=1 {podName} 2>&1', 6, silent=True)
......@@ -230,7 +233,7 @@ class PhySim:
isFinished1 = False
while(isFinished1 == False):
time.sleep(20)
mySSH.command('oc get pods -l app.kubernetes.io/instance=physim', '\$', 6, resync=True)
mySSH.command('oc get pods -l app=physim', '\$', 6, resync=True)
if re.search('No resources found', mySSH.getBefore()):
isFinished1 = True
if isFinished1 == True:
......
......@@ -211,8 +211,8 @@ MACRLCs = (
num_cc = 1;
tr_s_preference = "local_L1";
tr_n_preference = "local_RRC";
pusch_TargetSNRx10 = 250;
pucch_TargetSNRx10 = 250;
pusch_TargetSNRx10 = 220;
pucch_TargetSNRx10 = 220;
ulsch_max_frame_inactivity = 10;
}
);
......
......@@ -25,7 +25,6 @@ gNBs =
pdsch_AntennaPorts_XP = 2;
pusch_AntennaPorts = 2;
ul_prbblacklist = "51,52,53,54"
do_SRS = 1;
pdcch_ConfigSIB1 = (
......@@ -210,8 +209,8 @@ MACRLCs = (
num_cc = 1;
tr_s_preference = "local_L1";
tr_n_preference = "local_RRC";
# pusch_TargetSNRx10 = 200;
# pucch_TargetSNRx10 = 150;
pusch_TargetSNRx10 = 220;
pucch_TargetSNRx10 = 200;
ulsch_max_frame_inactivity = 0;
}
);
......@@ -220,7 +219,7 @@ L1s = (
{
num_cc = 1;
tr_n_preference = "local_mac";
prach_dtx_threshold = 120;
prach_dtx_threshold = 150;
# pucch0_dtx_threshold = 150;
}
);
......@@ -234,7 +233,7 @@ RUs = (
att_rx = 0;
bands = [78];
max_pdschReferenceSignalPower = -27;
max_rxgain = 75;
max_rxgain = 68;
eNB_instances = [0];
##beamforming 1x2 matrix: 1 layer x 2 antennas
bf_weights = [0x00007fff, 0x0000];
......
......@@ -209,8 +209,8 @@ MACRLCs = (
num_cc = 1;
tr_s_preference = "local_L1";
tr_n_preference = "local_RRC";
# pusch_TargetSNRx10 = 200;
# pucch_TargetSNRx10 = 150;
pusch_TargetSNRx10 = 220;
pucch_TargetSNRx10 = 200;
ulsch_max_frame_inactivity = 0;
}
);
......@@ -219,7 +219,7 @@ L1s = (
{
num_cc = 1;
tr_n_preference = "local_mac";
prach_dtx_threshold = 120;
prach_dtx_threshold = 150;
# pucch0_dtx_threshold = 150;
}
);
......@@ -233,7 +233,7 @@ RUs = (
att_rx = 0;
bands = [78];
max_pdschReferenceSignalPower = -27;
max_rxgain = 75;
max_rxgain = 68;
eNB_instances = [0];
##beamforming 1x2 matrix: 1 layer x 2 antennas
bf_weights = [0x00007fff, 0x0000];
......
......@@ -8,7 +8,7 @@ ColNames :
- Average vs Reference Deviation (Reference Value; Acceptability Threshold)
Ref :
feprx : 120.0
feptx_prec : 16.0
feptx_prec : 30.0
feptx_ofdm : 50.0
feptx_total : 120.0
L1 Tx processing : 300.0
......
......@@ -45,7 +45,7 @@
<testCase id="040000">
<class>Initialize_eNB</class>
<desc>Initialize gNB</desc>
<Initialize_eNB_args>-O ci-scripts/conf_files/gnb.band78.sa.fr1.106PRB.2x2.usrpn310.asue.conf --sa -q --usrp-tx-thread-config 1 --T_stdout 2 --tune-offset 30000000 --log_config.global_log_options level,nocolor,time</Initialize_eNB_args>
<Initialize_eNB_args>-O ci-scripts/conf_files/gnb.band78.sa.fr1.106PRB.2x2.usrpn310.asue.conf --sa -q --usrp-tx-thread-config 1 --T_stdout 2 --tune-offset 30000000 --thread-pool 1,3,5,7,9,11,13,15 --log_config.global_log_options level,nocolor,time</Initialize_eNB_args>
<eNB_instance>0</eNB_instance>
<eNB_serverId>0</eNB_serverId>
<air_interface>nr</air_interface>
......@@ -53,6 +53,7 @@
<eNB_Stats>yes</eNB_Stats>
<rt_stats_cfg>datalog_rt_stats.2x2.yaml</rt_stats_cfg>
<USRP_IPAddress>172.21.19.14</USRP_IPAddress>
<cmd_prefix>numactl --cpunodebind=netdev:ens2f0np0 --membind=netdev:ens2f0np0</cmd_prefix>
</testCase>
<testCase id="050000">
......
......@@ -119,8 +119,8 @@
<testCase id="070001">
<class>Iperf</class>
<desc>iperf (UL/3Mbps/UDP)(60 sec)(single-ue profile)</desc>
<iperf_args>-u -b 3M -t 60</iperf_args>
<desc>iperf (UL/7Mbps/UDP)(60 sec)(single-ue profile)</desc>
<iperf_args>-u -b 7M -t 60</iperf_args>
<direction>UL</direction>
<id>nrmodule2_quectel</id>
<iperf_packetloss_threshold>5</iperf_packetloss_threshold>
......
#!/bin/bash
#set -xv
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
......@@ -68,7 +69,7 @@ function test_run() {
mkdir -p $log_dir
echo "" > $temp_exec_log
echo "" > $log_file
echo "start at $(date)" > "$log_file"
#echo "log_dir = $log_dir"
#echo "log_file = $log_file"
#echo "exec_file = $exec_file"
......@@ -92,78 +93,61 @@ function test_run() {
tags_array_index=0
for main_exec_args_array_index in "${main_exec_args_array[@]}"
do
global_result=1
global_result=PASS
result_string=""
PROPER_DESC=`echo ${desc_array[$tags_array_index]} | sed -e "s@^.*Test cases.*(Test@Test@" -e "s@^ *(@@" -e"s/)$//" -e "s/),$//"`
echo_info "$PROPER_DESC"
for (( run_index=1; run_index <= $nruns; run_index++ ))
do
temp_exec_log=$log_dir/test.$test_case_name.${tags_array[$tags_array_index]}.run_$run_index
echo "" > $temp_exec_log
temp_exec_log="$log_dir/test.$test_case_name.${tags_array[$tags_array_index]}.run_$run_index"
echo "Executing test case $test_case_name.${tags_array[$tags_array_index]}, Run Index = $run_index, Execution Log file = $temp_exec_log"
echo "-----------------------------------------------------------------------------" >> $temp_exec_log 2>&1
echo "<EXECUTION LOG Test Case = $test_case_name.${tags_array[$tags_array_index]}, Run = $run_index >" >> $temp_exec_log 2>&1
echo "Executing $main_exec $main_exec_args_array_index " | tee $temp_exec_log
{ uname -a ; eval "$main_exec $main_exec_args_array_index" ;} >> $temp_exec_log 2>&1
echo "-----------------------------------------------------------------------------" > "$temp_exec_log"
echo "<EXECUTION LOG Test Case = $test_case_name.${tags_array[$tags_array_index]}, Run = $run_index >" >> "$temp_exec_log"
echo "Executing $main_exec $main_exec_args_array_index at $(date)" >> "$temp_exec_log"
uname -a >> "$temp_exec_log"
time "$main_exec" $main_exec_args_array_index >> "$temp_exec_log" 2>&1 &
done
echo "</EXECUTION LOG Test Case = $test_case_name.${tags_array[$tags_array_index]}, Run = $run_index >" >> $temp_exec_log 2>&1
cat $temp_exec_log >> $log_file 2>&1
wait
for (( run_index=1; run_index <= $nruns; run_index++ ))
do
echo "</EXECUTION LOG Test Case = $test_case_name.${tags_array[$tags_array_index]}, Run = $run_index >" >> $temp_exec_log 2>&1
cat "$temp_exec_log" >> "$log_file" 2>&1
result=1
result=PASS
for search_expr in "${search_expr_array[@]}"
do
search_result=`grep -E "$search_expr" $temp_exec_log`
if [ -z "$search_result" ]; then
let "result = result & 0"
else
let "result = result & 1"
fi
grep -Eq "$search_expr" $temp_exec_log || result=FAIL
done
#If we find a negative search result then there is crash of program and test case is failed even if above condition is true
search_result=`grep -iE "$search_expr_negative" $temp_exec_log`
if [ -n "$search_result" ]; then
result=0
fi
let "global_result = global_result & result"
#echo "result = $result"
if [ "$result" -eq "0" ]; then
result_string=$result_string" Run_$run_index =FAIL"
grep -q -iE "$search_expr_negative" $temp_exec_log && result=FAIL
if [ "$result" != PASS ]; then
echo_error "$test_case_name.${tags_array[$tags_array_index]} RUN = $run_index Result = FAIL"
fi
if [ "$result" -eq "1" ]; then
result_string=$result_string" Run_$run_index =PASS"
else
echo_success "$test_case_name.${tags_array[$tags_array_index]} RUN = $run_index Result = PASS"
fi
result_string=$result_string" Run_$run_index =$result"
if [ $result != PASS ] ; then
global_result=FAIL
fi
done #End of for loop (nindex)
done #End of for loop (nindex)
echo " Result String = $result_string"
echo "END at $(date)" >> "$log_file"
if [ "$result_string" == "" ]; then
echo_error "execution $test_case_name.${tags_array[$tags_array_index]} {$PROPER_DESC} Run_Result = \"$result_string\" Result = FAIL"
xUnit_fail "execution" "$test_case_name.${tags_array[$tags_array_index]}" "FAIL" "$result_string" "$xmlfile_testcase" "$PROPER_DESC"
else
if [ "$global_result" == "0" ]; then
if [ "$global_result" != PASS ]; then
echo_error "execution $test_case_name.${tags_array[$tags_array_index]} {$PROPER_DESC} Run_Result = \"$result_string\" Result = FAIL"
xUnit_fail "execution" "$test_case_name.${tags_array[$tags_array_index]}" "FAIL" "$result_string" "$xmlfile_testcase" "$PROPER_DESC"
fi
if [ "$global_result" == "1" ]; then
else
echo_success "execution $test_case_name.${tags_array[$tags_array_index]} {$PROPER_DESC} Run_Result = \"$result_string\" Result = PASS "
xUnit_success "execution" "$test_case_name.${tags_array[$tags_array_index]}" "PASS" "$result_string" "$xmlfile_testcase" "$PROPER_DESC"
fi
fi
let "tags_array_index++"
done
......
......@@ -127,8 +127,8 @@
<desc>nr_pbchsim Test cases. (Test1: PBCH-only, 217 PRB),
(Test2: PBCH and synchronization, 217 RPB)</desc>
<main_exec>nr_pbchsim</main_exec>
<main_exec_args>-s-11 -S-8 -n10 -R217
-s-11 -S-8 -n10 -o8000 -I -R217</main_exec_args>
<main_exec_args>-s-10 -S-8 -n10 -R217
-s-10 -S-8 -n10 -o8000 -I -R217</main_exec_args>
<tags>test1 test2</tags>
<search_expr_true>PBCH test OK</search_expr_true>
<search_expr_false>segmentation fault|assertion|exiting|fatal</search_expr_false>
......@@ -139,8 +139,8 @@
<desc>nr_pbchsim Test cases. (Test1: PBCH-only, 273 PRB),
(Test2: PBCH and synchronization, 273 PRB)</desc>
<main_exec>nr_pbchsim</main_exec>
<main_exec_args>-s-11 -S-8 -n10 -R273
-s-11 -S-8 -n10 -o8000 -I -R273</main_exec_args>
<main_exec_args>-s-10 -S-8 -n10 -R273
-s-10 -S-8 -n10 -o8000 -I -R273</main_exec_args>
<tags>test1 test2</tags>
<search_expr_true>PBCH test OK</search_expr_true>
<search_expr_false>segmentation fault|assertion|exiting|fatal</search_expr_false>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment