Commit 331f3d28 authored by Raphael Defosseux's avatar Raphael Defosseux

fix(ci): increased a few command timeouts for RF sim failure cases

Signed-off-by: default avatarRaphael Defosseux <raphael.defosseux@eurecom.fr>
parent 3a9e601d
...@@ -638,7 +638,7 @@ class Containerize(): ...@@ -638,7 +638,7 @@ class Containerize():
cmd = 'cd ' + self.yamlPath[0] + ' && docker-compose -f docker-compose-ci.yml up -d ' + self.services[0] cmd = 'cd ' + self.yamlPath[0] + ' && docker-compose -f docker-compose-ci.yml up -d ' + self.services[0]
logging.debug(cmd) logging.debug(cmd)
try: try:
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=30) deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=100)
except Exception as e: except Exception as e:
self.exitStatus = 1 self.exitStatus = 1
logging.error('Could not deploy') logging.error('Could not deploy')
...@@ -651,7 +651,7 @@ class Containerize(): ...@@ -651,7 +651,7 @@ class Containerize():
healthy = 0 healthy = 0
while (count < 10): while (count < 10):
count += 1 count += 1
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10) deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=30)
healthy = 0 healthy = 0
for state in deployStatus.split('\n'): for state in deployStatus.split('\n'):
res = re.search('Up \(healthy\)', state) res = re.search('Up \(healthy\)', state)
...@@ -683,7 +683,7 @@ class Containerize(): ...@@ -683,7 +683,7 @@ class Containerize():
# if the containers are running, recover the logs! # if the containers are running, recover the logs!
cmd = 'cd ' + self.yamlPath[0] + ' && docker-compose -f docker-compose-ci.yml ps --all' cmd = 'cd ' + self.yamlPath[0] + ' && docker-compose -f docker-compose-ci.yml ps --all'
logging.debug(cmd) logging.debug(cmd)
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10) deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=30)
anyLogs = False anyLogs = False
for state in deployStatus.split('\n'): for state in deployStatus.split('\n'):
res = re.search('Name|----------', state) res = re.search('Name|----------', state)
...@@ -697,7 +697,7 @@ class Containerize(): ...@@ -697,7 +697,7 @@ class Containerize():
cName = res.group('container_name') cName = res.group('container_name')
cmd = 'cd ' + self.yamlPath[0] + ' && docker logs ' + cName + ' > ' + cName + '.log 2>&1' cmd = 'cd ' + self.yamlPath[0] + ' && docker logs ' + cName + ' > ' + cName + '.log 2>&1'
logging.debug(cmd) logging.debug(cmd)
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10) deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=30)
if anyLogs: if anyLogs:
cmd = 'mkdir -p ../cmake_targets/log && mv ' + self.yamlPath[0] + '/*.log ../cmake_targets/log' cmd = 'mkdir -p ../cmake_targets/log && mv ' + self.yamlPath[0] + '/*.log ../cmake_targets/log'
logging.debug(cmd) logging.debug(cmd)
...@@ -805,7 +805,7 @@ class Containerize(): ...@@ -805,7 +805,7 @@ class Containerize():
time.sleep(5) time.sleep(5)
cmd = 'docker cp ' + self.svrContName + ':/tmp/iperf_server.log ../cmake_targets/log/iperf_server_' + HTML.testCase_id + '.log' cmd = 'docker cp ' + self.svrContName + ':/tmp/iperf_server.log ../cmake_targets/log/iperf_server_' + HTML.testCase_id + '.log'
logging.debug(cmd) logging.debug(cmd)
serverStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10) serverStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=30)
# Analyze client output # Analyze client output
result = re.search('Server Report:', clientStatus) result = re.search('Server Report:', clientStatus)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment