Commit b72048c9 authored by Stef Walter's avatar Stef Walter

Makefile.am: Use a single Makefile.am and parallel tests

Allow parallel building and testing by using a single Makefile.am

Implement parallel testing using TAP, with various drivers and
compilers living in the build/ directory.

Fix all sorts of issues that this caused, including builddir != srcdir,
leaks in tests and so on.

It would have been nice to break out all the above into separate
commits ... blush.
parent ec89646b
## Process this file with automake to produce Makefile.in
include $(top_srcdir)/Makefile.decl
NULL =
ACLOCAL_AMFLAGS = -I build/m4 ${ACLOCAL_FLAGS}
SUBDIRS = build egg libsecret tool po docs .
SUBDIRS = \
. \
po \
docs/reference/libsecret
DISTCHECK_CONFIGURE_FLAGS = \
--enable-debug=yes \
......@@ -13,9 +16,6 @@ DISTCHECK_CONFIGURE_FLAGS = \
--enable-gtk-doc \
--enable-vala
EXTRA_DIST = \
COPYING.TESTS
dist-hook:
@if test -d "$(srcdir)/.git"; \
then \
......@@ -55,3 +55,113 @@ dist-hook: dist-check-valac
distcleancheck_listfiles = \
find . -name '*.gc[dn][oa]' -prune -o -type f -print
TEST_SUPPRESSIONS = $(top_builddir)/build/valgrind-suppressions
perform-memcheck: $(TEST_PROGS) $(TEST_SUPPRESSIONS)
@make -C $(top_builddir)/build all
@for test in $(TEST_PROGS); do \
G_SLICE=always-malloc libtool --mode=execute \
valgrind --trace-children=no --gen-suppressions=all \
--suppressions=$(TEST_SUPPRESSIONS) \
--leak-check=full --show-reachable=yes --num-callers=16 \
--quiet --error-exitcode=33 \
$(builddir)/$$test; \
done
if WITH_COVERAGE
coverage:
mkdir -p $(top_builddir)/build/coverage
$(LCOV) --directory . --capture --output-file $(top_builddir)/build/coverage.info
$(GENHTML) --output-directory $(top_builddir)/build/coverage $(top_builddir)/build/coverage.info
$(LCOV) --directory . --zerocounters
@echo "file://$(abs_top_builddir)/build/coverage/index.html"
clear-coverage:
$(LCOV) --directory . --zerocounters
endif
AM_CPPFLAGS = \
-I$(top_srcdir) \
-I$(top_srcdir)/build \
-DSRCDIR="\"@abs_srcdir@\"" \
-DLOCALEDIR=\""$(datadir)/locale"\" \
-DWITH_VALGRIND \
-DSECRET_COMPILATION \
$(LIBGCRYPT_CFLAGS) \
$(GLIB_CFLAGS)
bin_PROGRAMS =
BUILT_SOURCES =
check_PROGRAMS =
DISTCLEANFILES =
lib_LTLIBRARIES =
man_MANS =
nodist_noinst_DATA =
noinst_DATA =
noinst_LTLIBRARIES =
TESTS =
noinst_PROGRAMS = $(check_PROGRAMS)
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA =
if HAVE_INTROSPECTION
include $(INTROSPECTION_MAKEFILE)
INTROSPECTION_GIRS =
INTROSPECTION_SCANNER_ARGS = $(INTROSPECTION_FLAGS) --warn-all \
--add-include-path=$(srcdir) --add-include-path=$(builddir)
INTROSPECTION_COMPILER_ARGS = --includedir=$(srcdir) --includedir=$(builddir)
girdir = $(datadir)/gir-1.0
gir_DATA =
typelibsdir = $(libdir)/girepository-1.0
typelibs_DATA = $(gir_DATA:.gir=.typelib)
if ENABLE_VAPIGEN
include $(VAPIGEN_MAKEFILE)
VAPIGEN_VAPIS =
vapidir = $(datadir)/vala/vapi
vapi_DATA =
endif
endif
CLEANFILES = \
$(pkgconfig_DATA) \
$(gir_DATA) \
$(typelibs_DATA) \
$(BUILT_SOURCES) \
$(vapi_DATA) \
$(man_MANS)
EXTRA_DIST = \
COPYING.TESTS
LOG_DRIVER = $(srcdir)/build/tap-driver
LOG_COMPILER = $(srcdir)/build/tap-compiler
TESTS_ENVIRONMENT = LD_LIBRARY_PATH=$(builddir)/.libs GI_TYPELIB_PATH=$(builddir)
TEST_EXTENSIONS = .py .js
PY_LOG_DRIVER = $(srcdir)/build/tap-driver
PY_LOG_COMPILER = $(srcdir)/build/tap-unittest
JS_LOG_DRIVER = $(srcdir)/build/test-driver
JS_LOG_COMPILER = gjs
include build/Makefile.am
include egg/Makefile.am
include libsecret/Makefile.am
include tool/Makefile.am
if WITH_MANPAGES
include docs/man/Makefile.am
endif
NULL =
TEST_SUPPRESSIONS = $(top_builddir)/build/valgrind-suppressions
perform-memcheck: $(TEST_PROGS) $(TEST_SUPPRESSIONS)
@make -C $(top_builddir)/build all
@for test in $(TEST_PROGS); do \
G_SLICE=always-malloc libtool --mode=execute \
valgrind --trace-children=no --gen-suppressions=all \
--suppressions=$(TEST_SUPPRESSIONS) \
--leak-check=full --show-reachable=yes --num-callers=16 \
--quiet --error-exitcode=33 \
$(builddir)/$$test; \
done
if WITH_COVERAGE
coverage:
mkdir -p $(top_builddir)/build/coverage
$(LCOV) --directory . --capture --output-file $(top_builddir)/build/coverage.info
$(GENHTML) --output-directory $(top_builddir)/build/coverage $(top_builddir)/build/coverage.info
$(LCOV) --directory . --zerocounters
@echo "file://$(abs_top_builddir)/build/coverage/index.html"
clear-coverage:
$(LCOV) --directory . --zerocounters
endif
\ No newline at end of file
include $(top_srcdir)/Makefile.decl
VALGRIND_CONTRIB = \
valgrind.h \
memcheck.h \
$(NULL)
SUPPRESSIONS = \
$(srcdir)/gcrypt.supp \
$(srcdir)/glib.supp \
$(srcdir)/pthread.supp \
$(srcdir)/unknown.supp \
build/gcrypt.supp \
build/glib.supp \
build/pthread.supp \
build/unknown.supp \
$(NULL)
valgrind-suppressions: $(SUPPRESSIONS)
$(AM_V_GEN) cat $(SUPPRESSIONS) > $@
$(AM_V_GEN) cat $^ > $@
EXTRA_DIST = \
valgrind \
EXTRA_DIST += \
build/valgrind \
build/tap-compiler \
build/tap-driver \
build/tap-unittest \
build/test-driver \
$(SUPPRESSIONS)
CLEANFILES = \
CLEANFILES += \
valgrind-suppressions \
$(NULL)
all-local: valgrind-suppressions
nodist_noinst_DATA += valgrind-suppressions
#!/usr/bin/python
# Copyright (C) 2014 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
#
# This is a test output compiler which produces TAP from GTest output
# if GTest output is detected.
#
# Versions of glib later than 2.38.x output TAP natively when tests are
# run with the --tap option. However we can't depend on such a recent
# version of glib for our purposes.
#
# This implements the Test Anything Protocol (ie: TAP)
# https://metacpan.org/pod/release/PETDANCE/Test-Harness-2.64/lib/Test/Harness/TAP.pod
#
import argparse
import os
import select
import subprocess
import sys
class NullCompiler:
def __init__(self, command):
self.command = command
def input(self, line):
sys.stdout.write(line)
def process(self, proc):
while True:
line = proc.stdout.readline()
if not line:
break
self.input(line)
proc.wait()
return proc.returncode
def run(self, proc, line=None):
if line:
self.input(line)
return self.process(proc)
class GTestCompiler(NullCompiler):
def __init__(self, filename):
NullCompiler.__init__(self, filename)
self.test_num = 0
self.test_name = None
self.test_remaining = []
def input(self, line):
line = line.strip()
if line.startswith("GTest: "):
(cmd, unused, data) = line[7:].partition(": ")
cmd = cmd.strip()
data = data.strip()
if cmd == "run":
self.test_name = data
assert self.test_name in self.test_remaining, "%s %s" % (self.test_name, repr(self.test_remaining))
self.test_remaining.remove(self.test_name)
self.test_num += 1
elif cmd == "result":
if data == "OK":
print "ok %d %s" % (self.test_num, self.test_name)
if data == "FAIL":
print "not ok %d %s", (self.test_num, self.test_name)
self.test_name = None
elif cmd == "skipping":
print "ok %d # skip -- %s" % (self.test_num, self.test_name)
self.test_name = None
elif data:
print "# %s: %s" % (cmd, data)
else:
print "# %s" % cmd
elif line.startswith("(MSG: "):
print "# %s" % line[6:-1]
elif line:
print "# %s" % line
sys.stdout.flush()
def run(self, proc, output=""):
# Complete retrieval of the list of tests
output += proc.stdout.read()
proc.wait()
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, self.command)
self.test_remaining = []
for line in output.split("\n"):
if line.startswith("/"):
self.test_remaining.append(line.strip())
if not self.test_remaining:
print "Bail out! No tests found in GTest: %s" % self.command[0]
return 0
print "1..%d" % len(self.test_remaining)
# First try to run all the tests in a batch
proc = subprocess.Popen(self.command + ["--verbose" ], close_fds=True, stdout=subprocess.PIPE)
result = self.process(proc)
if result == 0:
return 0
# Now pick up any stragglers due to failures
while True:
# Assume that the last test failed
if self.test_name:
print "not ok %d %s" % (self.test_num, self.test_name)
self.test_name = None
# Run any tests which didn't get run
if not self.test_remaining:
break
proc = subprocess.Popen(self.command + ["--verbose", "-p", self.test_remaining[0]],
close_fds=True, stdout=subprocess.PIPE)
result = self.process(proc)
# The various exit codes and signals we continue for
if result not in [ 0, 1, -4, -5, -6, -7, -8, -11 ]:
break
return result
def main(argv):
parser = argparse.ArgumentParser(description='Automake TAP compiler')
parser.add_argument('--format', metavar='FORMAT', choices=[ "auto", "GTest", "TAP" ],
default="auto", help='The input format to compile')
parser.add_argument('--verbose', action='store_true',
default=True, help='Verbose mode (ignored)')
parser.add_argument('command', nargs='+', help="A test command to run")
args = parser.parse_args(argv[1:])
output = None
format = args.format
cmd = args.command
proc = None
if format in ["auto", "GTest"]:
list_cmd = cmd + ["-l", "--verbose"]
proc = subprocess.Popen(list_cmd, close_fds=True, stdout=subprocess.PIPE)
output = proc.stdout.readline()
# Smell whether we're dealing with GTest list output from first line
if "random seed" in output or "GTest" in output or output.startswith("/"):
format = "GTest"
else:
format = "TAP"
else:
proc = subprocess.Popen(cmd, close_fds=True, stdout=subprocess.PIPE)
if format == "GTest":
compiler = GTestCompiler(cmd)
elif format == "TAP":
compiler = NullCompiler(cmd)
else:
assert False, "not reached"
return compiler.run(proc, output)
if __name__ == "__main__":
sys.exit(main(sys.argv))
#!/usr/bin/python
# Copyright (C) 2013 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
#
# This is a TAP driver for automake
#
# In particular it leaves stderr untouched, and is cleaner than the
# one implemented in shell that is making the rounds.
#
# This implements the automake "Custom Test Driver" protocol:
# https://www.gnu.org/software/automake/manual/html_node/Custom-Test-Drivers.html
#
# This consumes the Test Anything Protocol (ie: TAP)
# https://metacpan.org/pod/release/PETDANCE/Test-Harness-2.64/lib/Test/Harness/TAP.pod
#
import argparse
import os
import select
import subprocess
import sys
class Driver:
def __init__(self, command, args):
self.argv = command
self.output = ""
self.test_name = args.test_name
self.log = open(args.log_file, "w")
self.trs = open(args.trs_file, "w")
self.color_tests = args.color_tests
self.expect_failure = args.expect_failure
self.reported = { }
self.test_plan = None
self.late_plan = False
self.errored = False
self.bail_out = False
def report(self, code, num, *args):
CODES = {
"XPASS": '\x1b[0;31m', # red
"FAIL": '\x1b[0;31m', # red
"PASS": '\x1b[0;32m', # grn
"XFAIL": '\x1b[1;32m', # lgn
"SKIP": '\x1b[1;34m', # blu
"ERROR": '\x1b[0;35m', # mgn
}
# Print out to console
if self.color_tests:
if code in CODES:
sys.stdout.write(CODES[code])
sys.stdout.write(code)
if self.color_tests:
sys.stdout.write('\x1b[m')
sys.stdout.write(": ")
sys.stdout.write(self.test_name)
sys.stdout.write(" ")
if num:
sys.stdout.write(str(num))
sys.stdout.write(" ")
for arg in args:
sys.stdout.write(str(arg))
sys.stdout.write("\n")
sys.stdout.flush()
# Book keeping
if code in CODES:
if num != None:
self.reported[num] = code
self.trs.write(":test-result: %s\n" % code)
if code == "ERROR":
self.errored = True
def result_pass(self, num, description):
if self.expect_failure:
self.report("XPASS", num, description)
else:
self.report("PASS", num, description)
def result_fail(self, num, description):
if self.expect_failure:
self.report("XFAIL", num, description)
else:
self.report("FAIL", num, description)
def result_skip(self, num, description, ok):
if self.expect_failure:
self.report("XFAIL", num, description)
else:
self.report("SKIP", num, description)
def report_error(self, problem):
self.report("ERROR", None, problem)
def consume_test_line(self, ok, data):
# It's an error if the caller sends a test plan in the middle of tests
if self.late_plan:
self.report_error("Got tests after late TAP test plan")
self.late_plan = False
# Parse out a number and then description
(num, unused, description) = data.partition(" ")
try:
num = int(num)
except ValueError:
self.report_error("Invalid test number: %s" % data)
return
description = description.lstrip()
# Special case if description starts with this, then skip
if description.lower().startswith("# skip"):
self.result_skip(num, description, ok)
elif ok:
self.result_pass(num, description)
else:
self.result_fail(num, description)
def consume_test_plan(self, first, last):
# Only one test plan is supported
if self.test_plan:
self.report_error("Get a second TAP test plan")
return
try:
first = int(first)
last = int(last)
except ValueError:
self.report_error("Invalid test plan: %s..%s" % (first, last))
return
self.test_plan = (first, last)
self.late_plan = self.reported and True or False
def consume_bail_out(self, line):
self.bail_out = True
self.report("SKIP", 0, line)
def drain(self):
(ready, unused, self.output) = self.output.rpartition("\n")
for line in ready.split("\n"):
self.log.write(line)
self.log.write("\n")
if line.startswith("ok "):
self.consume_test_line(True, line[3:])
elif line.startswith("not ok "):
self.consume_test_line(False, line[7:])
elif line and line[0].isdigit() and ".." in line:
(first, unused, last) = line.partition("..")
self.consume_test_plan(first, last)
elif line.lower().startswith("bail out!"):
self.consume_bail_out(line)
def execute(self):
try:
proc = subprocess.Popen(self.argv, close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError, ex:
self.report_error("Couldn't run %s: %s" % (self.argv[0], str(ex)))
return
outf = proc.stdout.fileno()
errf = proc.stderr.fileno()
rset = [outf, errf]
while len(rset) > 0:
ret = select.select(rset, [], [], 10)
if outf in ret[0]:
data = os.read(outf, 1024)
if data == "":
if self.output:
self.output += "\n"
rset.remove(outf)
else:
self.output += data
self.drain()
if errf in ret[0]:
data = os.read(errf, 1024)
if data == "":
rset.remove(errf)
self.log.write(data)
sys.stderr.write(data)
proc.wait()
self.returncode = proc.returncode
def run(self):
self.execute()
failed = False
skipped = True
# Basic collation of results
for (num, code) in self.reported.items():
if code == "ERROR":
self.errored = True
elif code == "FAIL" or code == "XPASS":
failed = True
if code != "SKIP":
skipped = False
# Check the plan
if not self.errored:
if not self.test_plan:
if not self.bail_out:
if self.returncode:
self.report_error("Test process failed: %d" % self.returncode)
else:
self.report_error("Didn't receive a TAP test plan")
else:
for i in range(self.test_plan[0], self.test_plan[1] + 1):
if i not in self.reported:
if self.bail_out:
self.report("SKIP", i, "- bailed out")
else:
self.report("ERROR", i, "- missing test")
skipped = False
self.errored = True
if self.errored:
self.trs.write(":global-test-result: ERROR\n")
self.trs.write(":test-global-result: ERROR\n")
self.trs.write(":recheck: no\n")
elif failed:
self.trs.write(":global-test-result: FAIL\n")
self.trs.write(":test-global-result: FAIL\n")
self.trs.write(":recheck: yes\n")
elif skipped:
self.trs.write(":global-test-result: SKIP\n")
self.trs.write(":test-global-result: SKIP\n")
self.trs.write(":recheck: no\n")
if self.errored or failed:
self.trs.write(":copy-in-global-log: yes\n")
# Process result code
return self.errored and 1 or 0
class YesNoAction(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
argparse.Action.__init__(self, option_strings, dest, **kwargs)
self.metavar = "[yes|no]"
def __call__(self, parser, namespace, values, option_string=None):
if not values or "yes" in values:
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, False)
def main(argv):
parser = argparse.ArgumentParser(description='Automake TAP driver')
parser.add_argument('--test-name', metavar='NAME',
help='The name of the test')
parser.add_argument('--log-file', metavar='PATH.log', required=True,
help='The .log file the driver creates')
parser.add_argument('--trs-file', metavar='PATH.trs', required=True,
help='The .trs file the driver creates')
parser.add_argument('--color-tests', default=True, action=YesNoAction,
help='Whether the console output should be colorized or not')
parser.add_argument('--expect-failure', default=False, action=YesNoAction,
help="Whether the tested program is expected to fail")
parser.add_argument('--enable-hard-errors', default=False, action=YesNoAction,
help="Whether hard errors in the tested program are treated differently")
parser.add_argument('command', nargs='+',
help="A test command line to run")
args = parser.parse_args(argv[1:])
if not args.test_name:
args.test_name = os.path.basename(args.command[0])
driver = Driver(args.command, args)
return driver.run()
if __name__ == "__main__":
sys.exit(main(sys.argv))
#!/usr/bin/python
#
# This is a TAP compiler for python unittest
#
# It hooks into python's standard unittest module, and produces TAP output.
#
# This produces the Test Anything Protocol (ie: TAP)
# https://metacpan.org/pod/release/PETDANCE/Test-Harness-2.64/lib/Test/Harness/TAP.pod
#
# Based on code from here:
# https://github.com/vit1251/unittest-tap-reporting
#
import argparse
import imp
import os
import sys
import time
import traceback
import unittest
def write_line(format, *args):
sys.stdout.write(format % args)
sys.stdout.write("\n")
sys.stdout.flush()
class TAPTestResult(unittest.result.TestResult):
def __init__(self):
unittest.result.TestResult.__init__(self)
self.number = 0
def addSuccess(self, test):
self.number += 1
write_line("ok %d %s", self.number, test.id())
def addSkip(self, test, reason):
self.number += 1
write_line("not ok %d # skip %s", self.number, test.id())
write_line("# %s", reason)
def addError(self, test, exc):
(etype, evalue, etraceback) = exc
traceback.print_exception(etype, evalue, etraceback, file=sys.stderr)
self.number += 1