Generated v2 prefixes

This commit is contained in:
geky-bot
2021-01-20 02:41:58 +00:00
13 changed files with 1556 additions and 584 deletions

214
scripts/code.py Executable file
View File

@@ -0,0 +1,214 @@
#!/usr/bin/env python3
#
# Script to find code size at the function level. Basically just a bit wrapper
# around nm with some extra conveniences for comparing builds. Heavily inspired
# by Linux's Bloat-O-Meter.
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
OBJ_PATHS = ['*.o', 'bd/*.o']
def collect(paths, **args):
results = co.defaultdict(lambda: 0)
pattern = re.compile(
'^(?P<size>[0-9a-fA-F]+)' +
' (?P<type>[%s])' % re.escape(args['type']) +
' (?P<func>.+?)$')
for path in paths:
# note nm-tool may contain extra args
cmd = args['nm_tool'] + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True)
for line in proc.stdout:
m = pattern.match(line)
if m:
results[(path, m.group('func'))] += int(m.group('size'), 16)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, func), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# discard internal functions
if func.startswith('__'):
continue
# discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func)
flat_results.append((file, func, size))
return flat_results
def main(**args):
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with open(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['function'],
int(result['size']))
for result in r]
total = 0
for _, _, size in results:
total += size
# find previous results?
if args.get('diff'):
with open(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['function'],
int(result['size']))
for result in r]
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# write results to CSV
if args.get('output'):
with open(args['output'], 'w') as f:
w = csv.writer(f)
w.writerow(['file', 'function', 'size'])
for file, func, size in sorted(results):
w.writerow((file, func, size))
# print results
def dedup_entries(results, by='function'):
entries = co.defaultdict(lambda: 0)
for file, func, size in results:
entry = (file if by == 'file' else func)
entries[entry] += size
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entries(by='function'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted(entries.items()):
print("%-36s %7d" % (name, size))
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted(diff.items(),
key=lambda x: (-x[1][3], x)):
if ratio or args.get('all'):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_totals():
if not args.get('diff'):
print("%-36s %7d" % ('TOTAL', total))
else:
ratio = (total-prev_total)/prev_total if prev_total else 1.0
print("%-36s %7s %7s %+7d%s" % (
'TOTAL',
prev_total if prev_total else '-',
total if total else '-',
total-prev_total,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='function')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find code size at the function level.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find code sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('--files', action='store_true',
help="Show file-level code sizes. Note this does not include padding! "
"So sizes may differ from other tools.")
parser.add_argument('-s', '--summary', action='store_true',
help="Only show the total code size.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('--type', default='tTrRdDbB',
help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %(default)r.")
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
help="Path to the nm tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

254
scripts/coverage.py Executable file
View File

@@ -0,0 +1,254 @@
#!/usr/bin/env python3
#
# Parse and report coverage info from .info files generated by lcov
#
import os
import glob
import csv
import re
import collections as co
import bisect as b
INFO_PATHS = ['tests/*.toml.info']
def collect(paths, **args):
file = None
funcs = []
lines = co.defaultdict(lambda: 0)
pattern = re.compile(
'^(?P<file>SF:/?(?P<file_name>.*))$'
'|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$'
'|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$')
for path in paths:
with open(path) as f:
for line in f:
m = pattern.match(line)
if m and m.group('file'):
file = m.group('file_name')
elif m and file and m.group('func'):
funcs.append((file, int(m.group('func_lineno')),
m.group('func_name')))
elif m and file and m.group('line'):
lines[(file, int(m.group('line_lineno')))] += (
int(m.group('line_hits')))
# map line numbers to functions
funcs.sort()
def func_from_lineno(file, lineno):
i = b.bisect(funcs, (file, lineno))
if i and funcs[i-1][0] == file:
return funcs[i-1][2]
else:
return None
# reduce to function info
reduced_funcs = co.defaultdict(lambda: (0, 0))
for (file, line_lineno), line_hits in lines.items():
func = func_from_lineno(file, line_lineno)
if not func:
continue
hits, count = reduced_funcs[(file, func)]
reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
results = []
for (file, func), (hits, count) in reduced_funcs.items():
# discard internal/testing functions (test_* injected with
# internal testing)
if func.startswith('__') or func.startswith('test_'):
continue
# discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func)
results.append((file, func, hits, count))
return results
def main(**args):
# find coverage
if not args.get('use'):
# find *.info files
paths = []
for path in args['info_paths']:
if os.path.isdir(path):
path = path + '/*.gcov'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .info files found in %r?' % args['info_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with open(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['function'],
int(result['hits']),
int(result['count']))
for result in r]
total_hits, total_count = 0, 0
for _, _, hits, count in results:
total_hits += hits
total_count += count
# find previous results?
if args.get('diff'):
with open(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['function'],
int(result['hits']),
int(result['count']))
for result in r]
prev_total_hits, prev_total_count = 0, 0
for _, _, hits, count in prev_results:
prev_total_hits += hits
prev_total_count += count
# write results to CSV
if args.get('output'):
with open(args['output'], 'w') as f:
w = csv.writer(f)
w.writerow(['file', 'function', 'hits', 'count'])
for file, func, hits, count in sorted(results):
w.writerow((file, func, hits, count))
# print results
def dedup_entries(results, by='function'):
entries = co.defaultdict(lambda: (0, 0))
for file, func, hits, count in results:
entry = (file if by == 'file' else func)
entry_hits, entry_count = entries[entry]
entries[entry] = (entry_hits + hits, entry_count + count)
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
for name, (new_hits, new_count) in news.items():
diff[name] = (
0, 0,
new_hits, new_count,
new_hits, new_count,
(new_hits/new_count if new_count else 1.0) - 1.0)
for name, (old_hits, old_count) in olds.items():
_, _, new_hits, new_count, _, _, _ = diff[name]
diff[name] = (
old_hits, old_count,
new_hits, new_count,
new_hits-old_hits, new_count-old_count,
((new_hits/new_count if new_count else 1.0)
- (old_hits/old_count if old_count else 1.0)))
return diff
def print_header(by=''):
if not args.get('diff'):
print('%-36s %19s' % (by, 'hits/line'))
else:
print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
def print_entries(by='function'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, (hits, count) in sorted(entries.items()):
print("%-36s %11s %7s" % (name,
'%d/%d' % (hits, count)
if count else '-',
'%.1f%%' % (100*hits/count)
if count else '-'))
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
for name, (
old_hits, old_count,
new_hits, new_count,
diff_hits, diff_count, ratio) in sorted(diff.items(),
key=lambda x: (-x[1][6], x)):
if ratio or args.get('all'):
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
'%d/%d' % (old_hits, old_count)
if old_count else '-',
'%.1f%%' % (100*old_hits/old_count)
if old_count else '-',
'%d/%d' % (new_hits, new_count)
if new_count else '-',
'%.1f%%' % (100*new_hits/new_count)
if new_count else '-',
'%+d/%+d' % (diff_hits, diff_count),
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_totals():
if not args.get('diff'):
print("%-36s %11s %7s" % ('TOTAL',
'%d/%d' % (total_hits, total_count)
if total_count else '-',
'%.1f%%' % (100*total_hits/total_count)
if total_count else '-'))
else:
ratio = ((total_hits/total_count
if total_count else 1.0)
- (prev_total_hits/prev_total_count
if prev_total_count else 1.0))
print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL',
'%d/%d' % (prev_total_hits, prev_total_count)
if prev_total_count else '-',
'%.1f%%' % (100*prev_total_hits/prev_total_count)
if prev_total_count else '-',
'%d/%d' % (total_hits, total_count)
if total_count else '-',
'%.1f%%' % (100*total_hits/total_count)
if total_count else '-',
'%+d/%+d' % (total_hits-prev_total_hits,
total_count-prev_total_count),
' (%+.1f%%)' % (100*ratio) if ratio else ''))
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='function')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Parse and report coverage info from .info files \
generated by lcov")
parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
help="Description of where to find *.info files. May be a directory \
or list of paths. *.info files will be merged to show the total \
coverage. Defaults to %r." % INFO_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't do any work, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('--files', action='store_true',
help="Show file-level coverage.")
parser.add_argument('-s', '--summary', action='store_true',
help="Only show the total coverage.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
sys.exit(main(**vars(parser.parse_args())))

View File

@@ -20,19 +20,50 @@ import pty
import errno
import signal
TESTDIR = 'tests'
TEST_PATHS = 'tests'
RULES = """
# add block devices to sources
TESTSRC ?= $(SRC) $(wildcard bd/*.c)
define FLATTEN
tests/%$(subst /,.,$(target)): $(target)
%(path)s%%$(subst /,.,$(target)): $(target)
./scripts/explode_asserts.py $$< -o $$@
endef
$(foreach target,$(SRC),$(eval $(FLATTEN)))
-include tests/*.d
$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
-include %(path)s*.d
.SECONDARY:
%.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f)
%(path)s.test: %(path)s.test.o \\
$(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
# needed in case builddir is different
%(path)s%%.o: %(path)s%%.c
$(CC) -c -MMD $(CFLAGS) $< -o $@
"""
COVERAGE_RULES = """
%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
# delete lingering coverage
%(path)s.test: | %(path)s.info.clean
.PHONY: %(path)s.info.clean
%(path)s.info.clean:
rm -f %(path)s*.gcda
# accumulate coverage info
.PHONY: %(path)s.info
%(path)s.info:
$(strip $(LCOV) -c \\
$(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
--rc 'geninfo_adjust_src_path=$(shell pwd)' \\
-o $@)
$(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
ifdef COVERAGETARGET
$(strip $(LCOV) -a $@ \\
$(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
-o $(COVERAGETARGET))
endif
"""
GLOBALS = """
//////////////// AUTOGENERATED TEST ////////////////
@@ -119,6 +150,8 @@ class TestCase:
self.if_ = config.get('if', None)
self.in_ = config.get('in', None)
self.result = None
def __str__(self):
if hasattr(self, 'permno'):
if any(k not in self.case.defines for k in self.defines):
@@ -179,7 +212,7 @@ class TestCase:
len(self.filter) >= 2 and
self.filter[1] != self.permno):
return False
elif args.get('no_internal', False) and self.in_ is not None:
elif args.get('no_internal') and self.in_ is not None:
return False
elif self.if_ is not None:
if_ = self.if_
@@ -213,7 +246,7 @@ class TestCase:
try:
with open(disk, 'w') as f:
f.truncate(0)
if args.get('verbose', False):
if args.get('verbose'):
print('truncate --size=0', disk)
except FileNotFoundError:
pass
@@ -237,14 +270,14 @@ class TestCase:
'-ex', 'r'])
ncmd.extend(['--args'] + cmd)
if args.get('verbose', False):
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in ncmd))
signal.signal(signal.SIGINT, signal.SIG_IGN)
sys.exit(sp.call(ncmd))
# run test case!
mpty, spty = pty.openpty()
if args.get('verbose', False):
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
os.close(spty)
@@ -260,7 +293,7 @@ class TestCase:
break
raise
stdout.append(line)
if args.get('verbose', False):
if args.get('verbose'):
sys.stdout.write(line)
# intercept asserts
m = re.match(
@@ -299,7 +332,7 @@ class ValgrindTestCase(TestCase):
return not self.leaky and super().shouldtest(**args)
def test(self, exec=[], **args):
verbose = args.get('verbose', False)
verbose = args.get('verbose')
uninit = (self.defines.get('LFS2_ERASE_VALUE', None) == -1)
exec = [
'valgrind',
@@ -351,12 +384,17 @@ class TestSuite:
self.name = os.path.basename(path)
if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')]
self.path = path
if args.get('build_dir'):
self.toml = path
self.path = args['build_dir'] + '/' + path
else:
self.toml = path
self.path = path
self.classes = classes
self.defines = defines.copy()
self.filter = filter
with open(path) as f:
with open(self.toml) as f:
# load tests
config = toml.load(f)
@@ -467,7 +505,7 @@ class TestSuite:
def build(self, **args):
# build test files
tf = open(self.path + '.test.c.t', 'w')
tf = open(self.path + '.test.tc', 'w')
tf.write(GLOBALS)
if self.code is not None:
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
@@ -477,7 +515,7 @@ class TestSuite:
for case in self.cases:
if case.in_ not in tfs:
tfs[case.in_] = open(self.path+'.'+
case.in_.replace('/', '.')+'.t', 'w')
re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
with open(case.in_) as f:
for line in f:
@@ -516,25 +554,33 @@ class TestSuite:
# write makefiles
with open(self.path + '.mk', 'w') as mk:
mk.write(RULES.replace(4*' ', '\t'))
mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
mk.write('\n')
# add coverage hooks?
if args.get('coverage'):
mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
path=self.path))
mk.write('\n')
# add truely global defines globally
for k, v in sorted(self.defines.items()):
mk.write('%s: override CFLAGS += -D%s=%r\n' % (
self.path+'.test', k, v))
mk.write('%s.test: override CFLAGS += -D%s=%r\n'
% (self.path, k, v))
for path in tfs:
if path is None:
mk.write('%s: %s | %s\n' % (
self.path+'.test.c',
self.path,
self.path+'.test.c.t'))
self.toml,
self.path+'.test.tc'))
else:
mk.write('%s: %s %s | %s\n' % (
self.path+'.'+path.replace('/', '.'),
self.path, path,
self.path+'.'+path.replace('/', '.')+'.t'))
self.toml,
path,
self.path+'.'+re.sub('(\.c)?$', '.tc',
path.replace('/', '.'))))
mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
self.makefile = self.path + '.mk'
@@ -557,7 +603,7 @@ class TestSuite:
if not args.get('verbose', True):
sys.stdout.write(FAIL)
sys.stdout.flush()
if not args.get('keep_going', False):
if not args.get('keep_going'):
if not args.get('verbose', True):
sys.stdout.write('\n')
raise
@@ -579,30 +625,30 @@ def main(**args):
# and what class of TestCase to run
classes = []
if args.get('normal', False):
if args.get('normal'):
classes.append(TestCase)
if args.get('reentrant', False):
if args.get('reentrant'):
classes.append(ReentrantTestCase)
if args.get('valgrind', False):
if args.get('valgrind'):
classes.append(ValgrindTestCase)
if not classes:
classes = [TestCase]
suites = []
for testpath in args['testpaths']:
for testpath in args['test_paths']:
# optionally specified test case/perm
testpath, *filter = testpath.split('#')
filter = [int(f) for f in filter]
# figure out the suite's toml file
if os.path.isdir(testpath):
testpath = testpath + '/test_*.toml'
testpath = testpath + '/*.toml'
elif os.path.isfile(testpath):
testpath = testpath
elif testpath.endswith('.toml'):
testpath = TESTDIR + '/' + testpath
testpath = TEST_PATHS + '/' + testpath
else:
testpath = TESTDIR + '/' + testpath + '.toml'
testpath = TEST_PATHS + '/' + testpath + '.toml'
# find tests
for path in glob.glob(testpath):
@@ -628,7 +674,7 @@ def main(**args):
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
[target for target in targets])
mpty, spty = pty.openpty()
if args.get('verbose', False):
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
os.close(spty)
@@ -642,14 +688,14 @@ def main(**args):
break
raise
stdout.append(line)
if args.get('verbose', False):
if args.get('verbose'):
sys.stdout.write(line)
# intercept warnings
m = re.match(
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
.format('(?:\033\[[\d;]*.| )*', 'warning'),
line)
if m and not args.get('verbose', False):
if m and not args.get('verbose'):
try:
with open(m.group(1)) as f:
lineno = int(m.group(2))
@@ -662,27 +708,26 @@ def main(**args):
except:
pass
proc.wait()
if proc.returncode != 0:
if not args.get('verbose', False):
if not args.get('verbose'):
for line in stdout:
sys.stdout.write(line)
sys.exit(-3)
sys.exit(-1)
print('built %d test suites, %d test cases, %d permutations' % (
len(suites),
sum(len(suite.cases) for suite in suites),
sum(len(suite.perms) for suite in suites)))
filtered = 0
total = 0
for suite in suites:
for perm in suite.perms:
filtered += perm.shouldtest(**args)
if filtered != sum(len(suite.perms) for suite in suites):
print('filtered down to %d permutations' % filtered)
total += perm.shouldtest(**args)
if total != sum(len(suite.perms) for suite in suites):
print('filtered down to %d permutations' % total)
# only requested to build?
if args.get('build', False):
if args.get('build'):
return 0
print('====== testing ======')
@@ -697,15 +742,12 @@ def main(**args):
failed = 0
for suite in suites:
for perm in suite.perms:
if not hasattr(perm, 'result'):
continue
if perm.result == PASS:
passed += 1
else:
elif isinstance(perm.result, TestFailure):
sys.stdout.write(
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
"{perm} failed with {returncode}\n".format(
"{perm} failed\n".format(
perm=perm, path=perm.suite.path, lineno=perm.lineno,
returncode=perm.result.returncode or 0))
if perm.result.stdout:
@@ -723,11 +765,33 @@ def main(**args):
sys.stdout.write('\n')
failed += 1
if args.get('gdb', False):
if args.get('coverage'):
# collect coverage info
# why -j1? lcov doesn't work in parallel because of gcov limitations
cmd = (['make', '-j1', '-f', 'Makefile'] +
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
(['COVERAGETARGET=%s' % args['coverage']]
if isinstance(args['coverage'], str) else []) +
[suite.path + '.info' for suite in suites
if any(perm.result == PASS for perm in suite.perms)])
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE if not args.get('verbose') else None,
stderr=sp.STDOUT if not args.get('verbose') else None,
universal_newlines=True)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stdout:
sys.stdout.write(line)
sys.exit(-1)
if args.get('gdb'):
failure = None
for suite in suites:
for perm in suite.perms:
if getattr(perm, 'result', PASS) != PASS:
if isinstance(perm.result, TestFailure):
failure = perm.result
if failure is not None:
print('======= gdb ======')
@@ -735,20 +799,22 @@ def main(**args):
failure.case.test(failure=failure, **args)
sys.exit(0)
print('tests passed: %d' % passed)
print('tests failed: %d' % failed)
print('tests passed %d/%d (%.2f%%)' % (passed, total,
100*(passed/total if total else 1.0)))
print('tests failed %d/%d (%.2f%%)' % (failed, total,
100*(failed/total if total else 1.0)))
return 1 if failed > 0 else 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Run parameterized tests in various configurations.")
parser.add_argument('testpaths', nargs='*', default=[TESTDIR],
parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
help="Description of test(s) to run. By default, this is all tests \
found in the \"{0}\" directory. Here, you can specify a different \
directory of tests, a specific file, a suite by name, and even a \
specific test case by adding brackets. For example \
\"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR))
directory of tests, a specific file, a suite by name, and even \
specific test cases and permutations. For example \
\"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
parser.add_argument('-D', action='append', default=[],
help="Overriding parameter definitions.")
parser.add_argument('-v', '--verbose', action='store_true',
@@ -769,10 +835,19 @@ if __name__ == "__main__":
help="Run tests normally.")
parser.add_argument('-r', '--reentrant', action='store_true',
help="Run reentrant tests with simulated power-loss.")
parser.add_argument('-V', '--valgrind', action='store_true',
parser.add_argument('--valgrind', action='store_true',
help="Run non-leaky tests under valgrind to check for memory leaks.")
parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
parser.add_argument('--exec', default=[], type=lambda e: e.split(),
help="Run tests with another executable prefixed on the command line.")
parser.add_argument('-d', '--disk',
parser.add_argument('--disk',
help="Specify a file to use for persistent/reentrant tests.")
parser.add_argument('--coverage', type=lambda x: x if x else True,
nargs='?', const='',
help="Collect coverage information during testing. This uses lcov/gcov \
to accumulate coverage information into *.info files. May also \
a path to a *.info file to accumulate coverage info into.")
parser.add_argument('--build-dir',
help="Build relative to the specified directory instead of the \
current directory.")
sys.exit(main(**vars(parser.parse_args())))