Rewrote explode_asserts.py to be more efficient

Normally I wouldn't consider optimizing this sort of script, but
explode_asserts.py proved to be terribly inefficient and dominated
the build time for running tests. It was slow enough to be distracting
when attempting to test patches while debugging. Just running
explode_asserts.py was ~10x slower than the rest of the compilation
process.

After implementing a proper tokenizer and switching to a handwritten
recursive descent parser, I was able to speed up explode_asserts.py
by ~5x and make test compilation much more tolerable.

I don't think this was a limitaiton of parsy, but rather switching
to a recursive descent parser made it much easier to find the hotspots
where parsing was wasting cycles (string slicing for one).

It's interesting to note that while the assert patterns can be parsed
with a LL(1) parser (by dumping seen tokens if a pattern fails),
I didn't bother as it's much easier to write the patterns with LL(k)
and parsing asserts is predicated by the "assert" string.

A few other tweaks:
- allowed combining different test modes in one run
- added a --no-internal option
- changed test_.py to start counting cases from 1
- added assert(memcmp(a, b) == 0) matching
- added better handling of string escapes in assert messages

time to run tests:
before: 1m31.122s
after:  0m41.447s
This commit is contained in:
Christopher Haster
2020-01-25 15:59:17 -06:00
parent a5d614fbfb
commit b9d0695e0a
2 changed files with 403 additions and 236 deletions

View File

@@ -1,208 +1,372 @@
#!/usr/bin/env python3
import parsy as p
import re
import io
import sys
ASSERT_PATTERN = p.string('LFS_ASSERT') | p.string('assert')
ASSERT_CHARS = 'La'
ASSERT_TARGET = '__LFS_ASSERT_{TYPE}_{COMP}'
ASSERT_TESTS = {
'int': """
__typeof__({lh}) _lh = {lh};
__typeof__({lh}) _rh = (__typeof__({lh})){rh};
if (!(_lh {op} _rh)) {{
printf("%s:%d:assert: "
"assert failed with %"PRIiMAX", expected {comp} %"PRIiMAX"\\n",
{file}, {line}, (intmax_t)_lh, (intmax_t)_rh);
PATTERN = ['LFS_ASSERT', 'assert']
PREFIX = 'LFS'
MAXWIDTH = 16
ASSERT = "__{PREFIX}_ASSERT_{TYPE}_{COMP}"
FAIL = """
__attribute__((unused))
static void __{prefix}_assert_fail_{type}(
const char *file, int line, const char *comp,
{ctype} lh, size_t lsize,
{ctype} rh, size_t rsize) {{
printf("%s:%d:assert: assert failed with ", file, line);
__{prefix}_assert_print_{type}(lh, lsize);
printf(", expected %s ", comp);
__{prefix}_assert_print_{type}(rh, rsize);
printf("\\n");
fflush(NULL);
raise(SIGABRT);
}}
""",
'str': """
const char *_lh = {lh};
const char *_rh = {rh};
if (!(strcmp(_lh, _rh) {op} 0)) {{
printf("%s:%d:assert: "
"assert failed with \\\"%s\\\", expected {comp} \\\"%s\\\"\\n",
{file}, {line}, _lh, _rh);
fflush(NULL);
raise(SIGABRT);
}}
""",
'bool': """
bool _lh = !!({lh});
bool _rh = !!({rh});
if (!(_lh {op} _rh)) {{
printf("%s:%d:assert: "
"assert failed with %s, expected {comp} %s\\n",
{file}, {line}, _lh ? "true" : "false", _rh ? "true" : "false");
fflush(NULL);
raise(SIGABRT);
}}
""",
}}
"""
COMP = {
'==': 'eq',
'!=': 'ne',
'<=': 'le',
'>=': 'ge',
'<': 'lt',
'>': 'gt',
}
def mkassert(lh, rh='true', type='bool', comp='eq'):
return ((ASSERT_TARGET + "({lh}, {rh}, __FILE__, __LINE__, __func__)")
.format(
type=type, TYPE=type.upper(),
comp=comp, COMP=comp.upper(),
lh=lh.strip(' '),
rh=rh.strip(' ')))
TYPE = {
'int': {
'ctype': 'intmax_t',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
(void)size;
printf("%"PRIiMAX, v);
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
__typeof__(lh) _lh = lh;
__typeof__(lh) _rh = (__typeof__(lh))rh;
if (!(_lh {op} _rh)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
(intmax_t)_lh, 0, (intmax_t)_rh, 0);
}}
}} while (0)
"""
},
'bool': {
'ctype': 'bool',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
(void)size;
printf("%s", v ? "true" : "false");
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
bool _lh = !!(lh);
bool _rh = !!(rh);
if (!(_lh {op} _rh)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, 0, _rh, 0);
}}
}} while (0)
"""
},
'mem': {
'ctype': 'const void *',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
const uint8_t *s = v;
printf("\\\"");
for (size_t i = 0; i < size && i < {maxwidth}; i++) {{
if (s[i] >= ' ' && s[i] <= '~') {{
printf("%c", s[i]);
}} else {{
printf("\\\\x%02x", s[i]);
}}
}}
if (size > {maxwidth}) {{
printf("...");
}}
printf("\\\"");
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh, size)
do {{
const void *_lh = lh;
const void *_rh = rh;
if (!(memcmp(_lh, _rh, size) {op} 0)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, size, _rh, size);
}}
}} while (0)
"""
},
'str': {
'ctype': 'const char *',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
__{prefix}_assert_print_mem(v, size);
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
const char *_lh = lh;
const char *_rh = rh;
if (!(strcmp(_lh, _rh) {op} 0)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, strlen(_lh), _rh, strlen(_rh));
}}
}} while (0)
"""
}
}
def mkdecl(type, comp, op):
return ((
"#define "+ASSERT_TARGET+"(lh, rh, file, line, func)"
" do {{"+re.sub('\s+', ' ', ASSERT_TESTS[type])+"}} while (0)\n")
.format(
type=type, TYPE=type.upper(),
comp=comp, COMP=comp.upper(),
lh='lh', rh='rh', op=op,
file='file', line='line', func='func'))
def mkdecls(outf, maxwidth=16):
outf.write("#include <stdio.h>\n")
outf.write("#include <stdbool.h>\n")
outf.write("#include <stdint.h>\n")
outf.write("#include <inttypes.h>\n")
outf.write("#include <signal.h>\n")
# add custom until combinator
def until(self, end):
return end.should_fail('should fail').then(self).many()
p.Parser.until = until
for type, desc in sorted(TYPE.items()):
format = {
'type': type.lower(), 'TYPE': type.upper(),
'ctype': desc['ctype'],
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
'maxwidth': maxwidth,
}
outf.write(re.sub('\s+', ' ',
desc['print'].strip().format(**format))+'\n')
outf.write(re.sub('\s+', ' ',
desc['fail'].strip().format(**format))+'\n')
pcomp = (
p.string('==').tag('eq') |
p.string('!=').tag('ne') |
p.string('<=').tag('le') |
p.string('>=').tag('ge') |
p.string('<').tag('lt') |
p.string('>').tag('gt'));
for op, comp in sorted(COMP.items()):
format.update({
'comp': comp.lower(), 'COMP': comp.upper(),
'op': op,
})
outf.write(re.sub('\s+', ' ',
desc['assert'].strip().format(**format))+'\n')
plogic = p.string('&&') | p.string('||')
@p.generate
def pstrassert():
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*')
yield p.string('strcmp') + p.regex('\s*') + p.string('(') + p.regex('\s*')
lh = yield pexpr.until(p.string(',') | p.string(')') | plogic)
yield p.string(',') + p.regex('\s*')
rh = yield pexpr.until(p.string(')') | plogic)
yield p.string(')') + p.regex('\s*')
op = yield pcomp
yield p.regex('\s*') + p.string('0') + p.regex('\s*') + p.string(')')
return mkassert(''.join(lh), ''.join(rh), 'str', op[0])
@p.generate
def pintassert():
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*')
lh = yield pexpr.until(pcomp | p.string(')') | plogic)
op = yield pcomp
rh = yield pexpr.until(p.string(')') | plogic)
yield p.string(')')
return mkassert(''.join(lh), ''.join(rh), 'int', op[0])
@p.generate
def pboolassert():
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*')
expr = yield pexpr.until(p.string(')'))
yield p.string(')')
return mkassert(''.join(expr), 'true', 'bool', 'eq')
passert = p.peek(ASSERT_PATTERN) >> (pstrassert | pintassert | pboolassert)
@p.generate
def pcomment1():
yield p.string('//')
s = yield p.regex('[^\\n]*')
yield p.string('\n')
return '//' + s + '\n'
@p.generate
def pcomment2():
yield p.string('/*')
s = yield p.regex('((?!\*/).)*')
yield p.string('*/')
return '/*' + ''.join(s) + '*/'
@p.generate
def pcomment3():
yield p.string('#')
s = yield p.regex('[^\\n]*')
yield p.string('\n')
return '#' + s + '\n'
pws = p.regex('\s+') | pcomment1 | pcomment2 | pcomment3
@p.generate
def pstring():
q = yield p.regex('["\']')
s = yield (p.string('\\%s' % q) | p.regex('[^%s]' % q)).many()
yield p.string(q)
return q + ''.join(s) + q
@p.generate
def pnested():
l = yield p.string('(')
n = yield pexpr.until(p.string(')'))
r = yield p.string(')')
return l + ''.join(n) + r
pexpr = (
# shortcut for a bit better performance
p.regex('[^%s/#\'"():;{}=><,&|-]+' % ASSERT_CHARS) |
pws |
passert |
pstring |
pnested |
p.string('->') |
p.regex('.', re.DOTALL))
@p.generate
def pstmt():
ws = yield pws.many()
lh = yield pexpr.until(p.string('=>') | p.regex('[:;{}]'))
op = yield p.string('=>').optional()
if op == '=>':
rh = yield pstmt
return ''.join(ws) + mkassert(''.join(lh), rh, 'int', 'eq')
def mkassert(type, comp, lh, rh, size=None):
format = {
'type': type.lower(), 'TYPE': type.upper(),
'comp': comp.lower(), 'COMP': comp.upper(),
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
'lh': lh.strip(),
'rh': rh.strip(),
'size': size,
}
if size:
return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh}, {size})')
.format(**format))
else:
return ''.join(ws) + ''.join(lh)
return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh})')
.format(**format))
# simple recursive descent parser
LEX = {
'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
'assert': PATTERN,
'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
'arrow': ['=>'],
'paren': ['\(', '\)'],
'op': ['strcmp', 'memcmp', '->'],
'comp': ['==', '!=', '<=', '>=', '<', '>'],
'logic': ['\&\&', '\|\|'],
'sep': [':', ';', '\{', '\}', ','],
}
class ParseFailure(Exception):
def __init__(self, expected, found):
self.expected = expected
self.found = found
def __str__(self):
return "expected %r, found %s..." % (
self.expected, repr(self.found)[:70])
class Parse:
def __init__(self, inf, lexemes):
p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
for n, l in lexemes.items())
p = re.compile(p, re.DOTALL)
data = inf.read()
tokens = []
while True:
m = p.search(data)
if m:
if m.start() > 0:
tokens.append((None, data[:m.start()]))
tokens.append((m.lastgroup, m.group()))
data = data[m.end():]
else:
tokens.append((None, data))
break
self.tokens = tokens
self.off = 0
def lookahead(self, *pattern):
if self.off < len(self.tokens):
token = self.tokens[self.off]
if token[0] in pattern or token[1] in pattern:
self.m = token[1]
return self.m
self.m = None
return self.m
def accept(self, *patterns):
m = self.lookahead(*patterns)
if m is not None:
self.off += 1
return m
def expect(self, *patterns):
m = self.accept(*patterns)
if not m:
raise ParseFailure(patterns, self.tokens[self.off:])
return m
def push(self):
return self.off
def pop(self, state):
self.off = state
def passert(p):
def pastr(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
p.expect(')')
return mkassert('str', COMP[comp], lh, rh)
def pamem(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
p.expect('memcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
size = pexpr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
p.expect(')')
return mkassert('mem', COMP[comp], lh, rh, size)
def paint(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(')')
return mkassert('int', COMP[comp], lh, rh)
def pabool(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexprs(p) ; p.accept('ws')
p.expect(')')
return mkassert('bool', 'eq', lh, 'true')
def pa(p):
return p.expect('assert')
state = p.push()
lastf = None
for pa in [pastr, pamem, paint, pabool, pa]:
try:
return pa(p)
except ParseFailure as f:
p.pop(state)
lastf = f
else:
raise lastf
def pexpr(p):
res = []
while True:
if p.accept('('):
res.append(p.m)
while True:
res.append(pexprs(p))
if p.accept('sep'):
res.append(p.m)
else:
break
res.append(p.expect(')'))
elif p.lookahead('assert'):
res.append(passert(p))
elif p.accept('assert', 'ws', 'string', 'op', None):
res.append(p.m)
else:
return ''.join(res)
def pexprs(p):
res = []
while True:
res.append(pexpr(p))
if p.accept('comp', 'logic', ','):
res.append(p.m)
else:
return ''.join(res)
def pstmt(p):
ws = p.accept('ws') or ''
lh = pexprs(p)
if p.accept('=>'):
rh = pexprs(p)
return ws + mkassert('int', 'eq', lh, rh)
else:
return ws + lh
@p.generate
def pstmts():
a = yield pstmt
b = yield (p.regex('[:;{}]') + pstmt).many()
return [a] + b
def main(args):
inf = open(args.input, 'r') if args.input else sys.stdin
outf = open(args.output, 'w') if args.output else sys.stdout
# parse C code
input = inf.read()
stmts = pstmts.parse(input)
lexemes = LEX.copy()
if args.pattern:
lexemes['assert'] = args.pattern
p = Parse(inf, lexemes)
# write extra verbose asserts
outf.write("#include <stdbool.h>\n")
outf.write("#include <stdint.h>\n")
outf.write("#include <inttypes.h>\n")
outf.write("#include <signal.h>\n")
outf.write(mkdecl('int', 'eq', '=='))
outf.write(mkdecl('int', 'ne', '!='))
outf.write(mkdecl('int', 'lt', '<'))
outf.write(mkdecl('int', 'gt', '>'))
outf.write(mkdecl('int', 'le', '<='))
outf.write(mkdecl('int', 'ge', '>='))
outf.write(mkdecl('str', 'eq', '=='))
outf.write(mkdecl('str', 'ne', '!='))
outf.write(mkdecl('str', 'lt', '<'))
outf.write(mkdecl('str', 'gt', '>'))
outf.write(mkdecl('str', 'le', '<='))
outf.write(mkdecl('str', 'ge', '>='))
outf.write(mkdecl('bool', 'eq', '=='))
mkdecls(outf, maxwidth=args.maxwidth)
if args.input:
outf.write("#line %d \"%s\"\n" % (1, args.input))
# write parsed statements
for stmt in stmts:
outf.write(stmt)
# parse and write out stmt at a time
try:
while True:
outf.write(pstmt(p))
if p.accept('sep'):
outf.write(p.m)
else:
break
except ParseFailure as f:
pass
for i in range(p.off, len(p.tokens)):
outf.write(p.tokens[i][1])
if __name__ == "__main__":
import argparse
@@ -210,6 +374,10 @@ if __name__ == "__main__":
description="Cpp step that increases assert verbosity")
parser.add_argument('input', nargs='?',
help="Input C file after cpp.")
parser.add_argument('-o', '--output',
parser.add_argument('-o', '--output', required=True,
help="Output C file.")
parser.add_argument('-p', '--pattern', action='append',
help="Patterns to search for starting an assert statement.")
parser.add_argument('--maxwidth', default=MAXWIDTH, type=int,
help="Maximum number of characters to display for strcmp and memcmp.")
main(parser.parse_args())

View File

@@ -4,24 +4,6 @@
# .toml files stored in the tests directory.
#
# TODO
# x nargs > 1?
# x show perm config on failure
# x filtering
# n show perm config on verbose?
# x better lineno tracking for cases?
# n non-int perms?
# x different path format?
# - suite.prologue, suite.epilogue
# x in
# x change BLOCK_CYCLES to -1 by default
# x change persist behaviour
# x config chaining correct
# - why can't gdb see my defines?
# - say no to internal?
# x buffering stdout issues?
# - debug fast
import toml
import glob
import re
@@ -125,6 +107,7 @@ class TestFailure(Exception):
class TestCase:
def __init__(self, config, filter=filter,
suite=None, caseno=None, lineno=None, **_):
self.config = config
self.filter = filter
self.suite = suite
self.caseno = caseno
@@ -150,8 +133,10 @@ class TestCase:
return '%s#%d' % (
self.suite.name, self.caseno)
def permute(self, defines, permno=None, **_):
ncase = copy.copy(self)
def permute(self, class_=None, defines={}, permno=None, **_):
ncase = (class_ or type(self))(self.config)
for k, v in self.__dict__.items():
setattr(ncase, k, v)
ncase.case = self
ncase.perms = [ncase]
ncase.permno = permno
@@ -194,6 +179,8 @@ class TestCase:
len(self.filter) >= 2 and
self.filter[1] != self.permno):
return False
elif args.get('no_internal', False) and self.in_ is not None:
return False
elif self.if_ is not None:
return eval(self.if_, None, self.defines.copy())
else:
@@ -210,6 +197,8 @@ class TestCase:
if persist != 'noerase':
try:
os.remove(self.suite.path + '.disk')
if args.get('verbose', False):
print('rm', self.suite.path + '.disk')
except FileNotFoundError:
pass
@@ -225,7 +214,7 @@ class TestCase:
if gdb == 'assert':
ncmd.extend(['-ex', 'r'])
if failure.assert_:
ncmd.extend(['-ex', 'up'])
ncmd.extend(['-ex', 'up 2'])
elif gdb == 'start':
ncmd.extend([
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
@@ -331,13 +320,15 @@ class ReentrantTestCase(TestCase):
raise
class TestSuite:
def __init__(self, path, filter=None, TestCase=TestCase, **args):
def __init__(self, path, classes=[TestCase], defines={},
filter=None, **args):
self.name = os.path.basename(path)
if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')]
self.path = path
self.classes = classes
self.defines = defines
self.filter = filter
self.TestCase = TestCase
with open(path) as f:
# load tests
@@ -356,7 +347,9 @@ class TestSuite:
code_linenos.reverse()
# grab global config
self.defines = config.get('define', {})
for k, v in config.get('define', {}).items():
if k not in self.defines:
self.defines[k] = v
self.code = config.get('code', None)
if self.code is not None:
self.code_lineno = code_linenos.pop()
@@ -372,8 +365,8 @@ class TestSuite:
if k not in case:
case[k] = v
# initialize test case
self.cases.append(self.TestCase(case, filter=filter,
suite=self, caseno=i, lineno=lineno, **args))
self.cases.append(TestCase(case, filter=filter,
suite=self, caseno=i+1, lineno=lineno, **args))
def __str__(self):
return self.name
@@ -381,14 +374,14 @@ class TestSuite:
def __lt__(self, other):
return self.name < other.name
def permute(self, defines={}, **args):
def permute(self, **args):
for case in self.cases:
# lets find all parameterized definitions, in one of [args.D,
# suite.defines, case.defines, DEFINES]. Note that each of these
# can be either a dict of defines, or a list of dicts, expressing
# an initial set of permutations.
pending = [{}]
for inits in [defines, self.defines, case.defines, DEFINES]:
for inits in [self.defines, case.defines, DEFINES]:
if not isinstance(inits, list):
inits = [inits]
@@ -422,8 +415,10 @@ class TestSuite:
# generate permutations
case.perms = []
for i, perm in enumerate(expanded):
case.perms.append(case.permute(perm, permno=i, **args))
for i, (class_, defines) in enumerate(
it.product(self.classes, expanded)):
case.perms.append(case.permute(
class_, defines, permno=i+1, **args))
# also track non-unique defines
case.defines = {}
@@ -519,16 +514,12 @@ class TestSuite:
self.target = self.path + '.test'
return self.makefile, self.target
def test(self, caseno=None, permno=None, **args):
def test(self, **args):
# run test suite!
if not args.get('verbose', True):
sys.stdout.write(self.name + ' ')
sys.stdout.flush()
for perm in self.perms:
if caseno is not None and perm.caseno != caseno:
continue
if permno is not None and perm.permno != permno:
continue
if not perm.shouldtest(**args):
continue
@@ -553,6 +544,23 @@ class TestSuite:
sys.stdout.write('\n')
def main(**args):
# figure out explicit defines
defines = {}
for define in args['D']:
k, v, *_ = define.split('=', 2) + ['']
defines[k] = v
# and what class of TestCase to run
classes = []
if args.get('normal', False):
classes.append(TestCase)
if args.get('reentrant', False):
classes.append(ReentrantTestCase)
if args.get('valgrind', False):
classes.append(ValgrindTestCase)
if not classes:
classes = [TestCase]
suites = []
for testpath in args['testpaths']:
# optionally specified test case/perm
@@ -571,27 +579,14 @@ def main(**args):
# find tests
for path in glob.glob(testpath):
if args.get('valgrind', False):
TestCase_ = ValgrindTestCase
elif args.get('reentrant', False):
TestCase_ = ReentrantTestCase
else:
TestCase_ = TestCase
suites.append(TestSuite(path,
filter=filter, TestCase=TestCase_, **args))
suites.append(TestSuite(path, classes, defines, filter, **args))
# sort for reproducability
suites = sorted(suites)
# generate permutations
defines = {}
for define in args['D']:
k, v, *_ = define.split('=', 2) + ['']
defines[k] = v
for suite in suites:
suite.permute(defines, **args)
suite.permute(**args)
# build tests in parallel
print('====== building ======')
@@ -736,10 +731,14 @@ if __name__ == "__main__":
parser.add_argument('-g', '--gdb', choices=['init', 'start', 'assert'],
nargs='?', const='assert',
help="Drop into gdb on test failure.")
parser.add_argument('--valgrind', action='store_true',
help="Run non-leaky tests under valgrind to check for memory leaks.")
parser.add_argument('--reentrant', action='store_true',
parser.add_argument('--no-internal', action='store_true',
help="Don't run tests that require internal knowledge.")
parser.add_argument('-n', '--normal', action='store_true',
help="Run tests normally.")
parser.add_argument('-r', '--reentrant', action='store_true',
help="Run reentrant tests with simulated power-loss.")
parser.add_argument('-V', '--valgrind', action='store_true',
help="Run non-leaky tests under valgrind to check for memory leaks.")
parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
help="Run tests with another executable prefixed on the command line.")
sys.exit(main(**vars(parser.parse_args())))