summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@yahoo-inc.com>2016-11-14 17:56:18 +0100
committerGitHub <noreply@github.com>2016-11-14 17:56:18 +0100
commitce9b11c20d8426929e891bf5710d12aac85528ab (patch)
tree52d370e9c1787d24fa9e1d1007cb39cd249ce940
parent090f3cfe7aee8a2091d8eb40f10142640da7d14a (diff)
parent75fda85ff89499f7383f40248bd4d25c893cfd66 (diff)
Merge pull request #1083 from yahoo/vegard/make-cppunit-parallelize-work-with-valgrind
Vegard/make cppunit parallelize work with valgrind
-rwxr-xr-xcppunit-parallelize.py75
1 files changed, 48 insertions, 27 deletions
diff --git a/cppunit-parallelize.py b/cppunit-parallelize.py
index 048e9d919b6..70d1a2eca12 100755
--- a/cppunit-parallelize.py
+++ b/cppunit-parallelize.py
@@ -1,10 +1,12 @@
+#!/usr/bin/env python
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+# @author Vegard Sjonfjell
import sys
import argparse
import copy
import os
import subprocess
import time
-import collections
def parse_arguments():
argparser = argparse.ArgumentParser(description="Run Vespa cppunit tests in parallell")
@@ -26,44 +28,63 @@ def chunkify(lst, chunks):
return result
+class Process:
+ def __init__(self, cmd, group):
+ self.group = group
+ self.finished = False
+ self.output = ""
+ self.handle = subprocess.Popen(
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ preexec_fn=os.setpgrp)
+
def build_processes(test_groups):
+ valgrind = os.getenv("VALGRIND")
+ testrunner = (valgrind, args.testrunner) if valgrind else (args.testrunner,)
processes = []
+
for group in test_groups:
- cmd = (args.testrunner,) + tuple(group)
- processes.append((group,
- subprocess.Popen(
- cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- preexec_fn=os.setpgrp)))
+ cmd = testrunner + tuple(group)
+ processes.append(Process(cmd, group))
+
return processes
+def cleanup_processes(processes):
+ for proc in processes:
+ try:
+ proc.handle.kill()
+ except OSError as e:
+ if e.errno != os.errno.ESRCH: # "No such process"
+ print >>sys.stderr, e.message
+
args = parse_arguments()
test_suites = subprocess.check_output((args.testrunner, "--list")).strip().split("\n")
test_suite_groups = chunkify(test_suites, args.chunks)
processes = build_processes(test_suite_groups)
-output = collections.defaultdict(str)
print "Running %d test suites in %d parallel chunks with ~%d tests each" % (len(test_suites), len(test_suite_groups), len(test_suite_groups[0]))
+processes_left = len(processes)
while True:
- prevlen = len(processes)
- for group, proc in processes:
- return_code = proc.poll()
- output[proc] += proc.stdout.read()
-
- if return_code == 0:
- processes.remove((group, proc))
- if not len(processes):
- print "All tests suites ran successfully"
- sys.exit(0)
- elif return_code is not None:
- print "One of '%s' test suites failed:" % ", ".join(group)
- print >>sys.stderr, output[proc]
- sys.exit(return_code)
+ try:
+ for proc in processes:
+ return_code = proc.handle.poll()
+ proc.output += proc.handle.stdout.read()
- if prevlen != len(processes):
- prevlen = len(processes)
- print "%d test suite(s) left" % prevlen
+ if return_code == 0:
+ proc.finished = True
+ processes_left -= 1
+ if processes_left > 0:
+ print "%d test suite(s) left" % processes_left
+ else:
+ print "All test suites ran successfully"
+ sys.exit(0)
+ elif return_code is not None:
+ print "One of '%s' test suites failed:" % ", ".join(proc.group)
+ print >>sys.stderr, proc.output
+ sys.exit(return_code)
- time.sleep(0.01)
+ time.sleep(0.01)
+ finally:
+ cleanup_processes(processes)