I'm trying to create helm charts and push them to Nexus repository in parallel(multiprocessing) on 100s of folders and its working good.
But, I would like to fail the script or the build, incase the exit status is other than 0
. With my current code set up, even though the exit code returns non zero value, here 512, the build ends up as success.
Expectation: even if a single process fails, fail the build..
folder structure:
--/tmp/dir1
-- values.yaml
-- zip file to be processed
--/tmp/dir2
-- values.yaml
-- zip file to be processed
.........
--/tmp/dirN
-- values.yaml
-- zip file to be processed
code:
#!/usr/bin/env python3
import concurrent.futures
import logging
import sys
import shutil
import os
import glob
import multiprocessing as mp
import traceback
import json
from os import path
def slave(path1, path2, target, logger):
logging.basicConfig(level=logging.DEBUG, format="%(levelname)s:%(processName)s:%(message)s")
# create tmp directory where the zip files and yaml files are temporarily copied and follwing commands are ran against them.
os.makedirs(target)
logging.info("Create folder %s", target)
logging.info("Copying files to %s", target)
try:
shutil.copy(path1, target)
shutil.copy(path2, target)
except:
traceback.print_exc()
try:
result = os.system(<artifactory login command>)
if result != 0:
sys.exit(1)
except:
traceback.print_exc()
try:
# Generate Helm chart and images and push it to Docker registry
res = os.system("helm create" target " --docker-registry dockerio.com/airlines_solutions/ -i")
if res != 0:
sys.exit(1)
except:
traceback.print_exc()
def main():
logger = logging.getLogger()
processed = {}
with open('example.json', 'r') as f:
data = json.load(f)
for value in data.items():
zip = ""
yaml = ""
for line in value[1]:
if line.endswith('.zip'):
zip = line
elif line.endswith('.yaml'):
yaml = line
processed[zip] = yaml
with concurrent.futures.ProcessPoolExecutor() as executor:
for id, (path2, path1) in enumerate(processed.items(), 1):
target = path.join("/tmp", "dir" str(id))
executor.submit(slave, path1, path2, target, logger)
# waits for the parallel processes to complete & exits at the end.
executor.shutdown()
if __name__ == "__main__":
mp.set_start_method('spawn')
main()
Output:
Error: Invalid value for "manifest": Error: http2: server sent GOAWAY and closed the connection; LastStreamID=3, ErrCode=NO_ERROR, debug="". Either add it there or provide it as an option to this command
Helm chart generation failed
exit code: 512
Traceback (most recent call last):
File "/home/myud/workspace/jenkins/bin/helm.py", line 65, in worker
sys.exit(1)
SystemExit: 1
ps -ef |grep python
myud 8483 8454 0 03:36 ? 00:00:00 /home/myud/workspace/venv/bin/python -c from multiprocessing.spawn import spawn_main; spawn_main(tracker_fd=6, pipe_handle=37) --multiprocessing-fork
myud 8484 8454 0 03:36 ? 00:00:00 /home/myud/workspace/venv/bin/python -c from multiprocessing.spawn import spawn_main; spawn_main(tracker_fd=6, pipe_handle=38) --multiprocessing-fork
myud 8485 8454 0 03:36 ? 00:00:00 /home/myud/workspace/venv/bin/python -c from multiprocessing.spawn import spawn_main; spawn_main(tracker_fd=6, pipe_handle=39) --multiprocessing-fork
myud 8486 8454 0 03:36 ? 00:00:00 /home/myud/workspace/venv/bin/python -c from multiprocessing.spawn import spawn_main; spawn_main(tracker_fd=6, pipe_handle=40) --multiprocessing-fork
myud 8487 8454 0 03:36 ? 00:00:00 /home/myud/workspace/venv/bin/python -c from multiprocessing.spawn import spawn_main; spawn_main(tracker_fd=6, pipe_handle=41) --multiprocessing-fork
myud 10345 10338 0 03:41 ? 00:00:00 grep python
Edit: Tried an alternate option to capture the result of executor.submit
, but after the failure of the 1st process, it exits the script, instead of waiting for all the threads or process to complete and then fail the build.
Chart creation failed: <Future at 0x7f2460b40518 state=running>
INFO:Process-2:Create folder /tmp/dir1
INFO:Process-2:Copying files to /tmp/dir1
Error: Invalid value for "manifest": Invalid values /tmp/dir1/values.yaml , version mismatch
alternate approach
with concurrent.futures.ProcessPoolExecutor() as executor:
for id, (path2, path1) in enumerate(processed.items(), 1):
target = path.join("/tmp", "dir" str(id))
result = executor.submit(worker, path1, path2, target, logger)
if result != 0:
sys.exit(1)
# waits for the parallel processes to complete & exits at the end.
executor.shutdown()
CodePudding user response:
You should use subprocess.run
with the keyword argument check=True
. From the doc:
If check is true, and the process exits with a non-zero exit code, a CalledProcessError exception will be raised. Attributes of that exception hold the arguments, the exit code, and stdout and stderr if they were captured.
Your command will become:
process = subprocess.run(<command as a list>, check=True, capture_output=True)
result = process.stdout.decode("utf-8")
CodePudding user response:
You should check the result of executor.submit
and in case of an error stop the executor.