Rewrote explode_asserts.py to be more efficient

Normally I wouldn't consider optimizing this sort of script, but
explode_asserts.py proved to be terribly inefficient and dominated
the build time for running tests. It was slow enough to be distracting
when attempting to test patches while debugging. Just running
explode_asserts.py was ~10x slower than the rest of the compilation
process.

After implementing a proper tokenizer and switching to a handwritten
recursive descent parser, I was able to speed up explode_asserts.py
by ~5x and make test compilation much more tolerable.

I don't think this was a limitaiton of parsy, but rather switching
to a recursive descent parser made it much easier to find the hotspots
where parsing was wasting cycles (string slicing for one).

It's interesting to note that while the assert patterns can be parsed
with a LL(1) parser (by dumping seen tokens if a pattern fails),
I didn't bother as it's much easier to write the patterns with LL(k)
and parsing asserts is predicated by the "assert" string.

A few other tweaks:
- allowed combining different test modes in one run
- added a --no-internal option
- changed test_.py to start counting cases from 1
- added assert(memcmp(a, b) == 0) matching
- added better handling of string escapes in assert messages

time to run tests:
before: 1m31.122s
after:  0m41.447s
This commit is contained in:
Christopher Haster
2020-01-25 15:59:17 -06:00
parent a5d614fbfb
commit b9d0695e0a
2 changed files with 403 additions and 236 deletions

View File

@@ -1,208 +1,372 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import parsy as p
import re import re
import io
import sys import sys
ASSERT_PATTERN = p.string('LFS_ASSERT') | p.string('assert') PATTERN = ['LFS_ASSERT', 'assert']
ASSERT_CHARS = 'La' PREFIX = 'LFS'
ASSERT_TARGET = '__LFS_ASSERT_{TYPE}_{COMP}' MAXWIDTH = 16
ASSERT_TESTS = {
'int': """ ASSERT = "__{PREFIX}_ASSERT_{TYPE}_{COMP}"
__typeof__({lh}) _lh = {lh}; FAIL = """
__typeof__({lh}) _rh = (__typeof__({lh})){rh}; __attribute__((unused))
if (!(_lh {op} _rh)) {{ static void __{prefix}_assert_fail_{type}(
printf("%s:%d:assert: " const char *file, int line, const char *comp,
"assert failed with %"PRIiMAX", expected {comp} %"PRIiMAX"\\n", {ctype} lh, size_t lsize,
{file}, {line}, (intmax_t)_lh, (intmax_t)_rh); {ctype} rh, size_t rsize) {{
fflush(NULL); printf("%s:%d:assert: assert failed with ", file, line);
raise(SIGABRT); __{prefix}_assert_print_{type}(lh, lsize);
}} printf(", expected %s ", comp);
""", __{prefix}_assert_print_{type}(rh, rsize);
'str': """ printf("\\n");
const char *_lh = {lh}; fflush(NULL);
const char *_rh = {rh}; raise(SIGABRT);
if (!(strcmp(_lh, _rh) {op} 0)) {{ }}
printf("%s:%d:assert: " """
"assert failed with \\\"%s\\\", expected {comp} \\\"%s\\\"\\n",
{file}, {line}, _lh, _rh); COMP = {
fflush(NULL); '==': 'eq',
raise(SIGABRT); '!=': 'ne',
}} '<=': 'le',
""", '>=': 'ge',
'bool': """ '<': 'lt',
bool _lh = !!({lh}); '>': 'gt',
bool _rh = !!({rh});
if (!(_lh {op} _rh)) {{
printf("%s:%d:assert: "
"assert failed with %s, expected {comp} %s\\n",
{file}, {line}, _lh ? "true" : "false", _rh ? "true" : "false");
fflush(NULL);
raise(SIGABRT);
}}
""",
} }
def mkassert(lh, rh='true', type='bool', comp='eq'): TYPE = {
return ((ASSERT_TARGET + "({lh}, {rh}, __FILE__, __LINE__, __func__)") 'int': {
.format( 'ctype': 'intmax_t',
type=type, TYPE=type.upper(), 'fail': FAIL,
comp=comp, COMP=comp.upper(), 'print': """
lh=lh.strip(' '), __attribute__((unused))
rh=rh.strip(' '))) static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
(void)size;
printf("%"PRIiMAX, v);
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
__typeof__(lh) _lh = lh;
__typeof__(lh) _rh = (__typeof__(lh))rh;
if (!(_lh {op} _rh)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
(intmax_t)_lh, 0, (intmax_t)_rh, 0);
}}
}} while (0)
"""
},
'bool': {
'ctype': 'bool',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
(void)size;
printf("%s", v ? "true" : "false");
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
bool _lh = !!(lh);
bool _rh = !!(rh);
if (!(_lh {op} _rh)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, 0, _rh, 0);
}}
}} while (0)
"""
},
'mem': {
'ctype': 'const void *',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
const uint8_t *s = v;
printf("\\\"");
for (size_t i = 0; i < size && i < {maxwidth}; i++) {{
if (s[i] >= ' ' && s[i] <= '~') {{
printf("%c", s[i]);
}} else {{
printf("\\\\x%02x", s[i]);
}}
}}
if (size > {maxwidth}) {{
printf("...");
}}
printf("\\\"");
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh, size)
do {{
const void *_lh = lh;
const void *_rh = rh;
if (!(memcmp(_lh, _rh, size) {op} 0)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, size, _rh, size);
}}
}} while (0)
"""
},
'str': {
'ctype': 'const char *',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
__{prefix}_assert_print_mem(v, size);
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
const char *_lh = lh;
const char *_rh = rh;
if (!(strcmp(_lh, _rh) {op} 0)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, strlen(_lh), _rh, strlen(_rh));
}}
}} while (0)
"""
}
}
def mkdecl(type, comp, op): def mkdecls(outf, maxwidth=16):
return (( outf.write("#include <stdio.h>\n")
"#define "+ASSERT_TARGET+"(lh, rh, file, line, func)" outf.write("#include <stdbool.h>\n")
" do {{"+re.sub('\s+', ' ', ASSERT_TESTS[type])+"}} while (0)\n") outf.write("#include <stdint.h>\n")
.format( outf.write("#include <inttypes.h>\n")
type=type, TYPE=type.upper(), outf.write("#include <signal.h>\n")
comp=comp, COMP=comp.upper(),
lh='lh', rh='rh', op=op,
file='file', line='line', func='func'))
# add custom until combinator for type, desc in sorted(TYPE.items()):
def until(self, end): format = {
return end.should_fail('should fail').then(self).many() 'type': type.lower(), 'TYPE': type.upper(),
p.Parser.until = until 'ctype': desc['ctype'],
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
'maxwidth': maxwidth,
}
outf.write(re.sub('\s+', ' ',
desc['print'].strip().format(**format))+'\n')
outf.write(re.sub('\s+', ' ',
desc['fail'].strip().format(**format))+'\n')
pcomp = ( for op, comp in sorted(COMP.items()):
p.string('==').tag('eq') | format.update({
p.string('!=').tag('ne') | 'comp': comp.lower(), 'COMP': comp.upper(),
p.string('<=').tag('le') | 'op': op,
p.string('>=').tag('ge') | })
p.string('<').tag('lt') | outf.write(re.sub('\s+', ' ',
p.string('>').tag('gt')); desc['assert'].strip().format(**format))+'\n')
plogic = p.string('&&') | p.string('||') def mkassert(type, comp, lh, rh, size=None):
format = {
@p.generate 'type': type.lower(), 'TYPE': type.upper(),
def pstrassert(): 'comp': comp.lower(), 'COMP': comp.upper(),
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*') 'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
yield p.string('strcmp') + p.regex('\s*') + p.string('(') + p.regex('\s*') 'lh': lh.strip(),
lh = yield pexpr.until(p.string(',') | p.string(')') | plogic) 'rh': rh.strip(),
yield p.string(',') + p.regex('\s*') 'size': size,
rh = yield pexpr.until(p.string(')') | plogic) }
yield p.string(')') + p.regex('\s*') if size:
op = yield pcomp return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh}, {size})')
yield p.regex('\s*') + p.string('0') + p.regex('\s*') + p.string(')') .format(**format))
return mkassert(''.join(lh), ''.join(rh), 'str', op[0])
@p.generate
def pintassert():
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*')
lh = yield pexpr.until(pcomp | p.string(')') | plogic)
op = yield pcomp
rh = yield pexpr.until(p.string(')') | plogic)
yield p.string(')')
return mkassert(''.join(lh), ''.join(rh), 'int', op[0])
@p.generate
def pboolassert():
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*')
expr = yield pexpr.until(p.string(')'))
yield p.string(')')
return mkassert(''.join(expr), 'true', 'bool', 'eq')
passert = p.peek(ASSERT_PATTERN) >> (pstrassert | pintassert | pboolassert)
@p.generate
def pcomment1():
yield p.string('//')
s = yield p.regex('[^\\n]*')
yield p.string('\n')
return '//' + s + '\n'
@p.generate
def pcomment2():
yield p.string('/*')
s = yield p.regex('((?!\*/).)*')
yield p.string('*/')
return '/*' + ''.join(s) + '*/'
@p.generate
def pcomment3():
yield p.string('#')
s = yield p.regex('[^\\n]*')
yield p.string('\n')
return '#' + s + '\n'
pws = p.regex('\s+') | pcomment1 | pcomment2 | pcomment3
@p.generate
def pstring():
q = yield p.regex('["\']')
s = yield (p.string('\\%s' % q) | p.regex('[^%s]' % q)).many()
yield p.string(q)
return q + ''.join(s) + q
@p.generate
def pnested():
l = yield p.string('(')
n = yield pexpr.until(p.string(')'))
r = yield p.string(')')
return l + ''.join(n) + r
pexpr = (
# shortcut for a bit better performance
p.regex('[^%s/#\'"():;{}=><,&|-]+' % ASSERT_CHARS) |
pws |
passert |
pstring |
pnested |
p.string('->') |
p.regex('.', re.DOTALL))
@p.generate
def pstmt():
ws = yield pws.many()
lh = yield pexpr.until(p.string('=>') | p.regex('[:;{}]'))
op = yield p.string('=>').optional()
if op == '=>':
rh = yield pstmt
return ''.join(ws) + mkassert(''.join(lh), rh, 'int', 'eq')
else: else:
return ''.join(ws) + ''.join(lh) return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh})')
.format(**format))
# simple recursive descent parser
LEX = {
'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
'assert': PATTERN,
'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
'arrow': ['=>'],
'paren': ['\(', '\)'],
'op': ['strcmp', 'memcmp', '->'],
'comp': ['==', '!=', '<=', '>=', '<', '>'],
'logic': ['\&\&', '\|\|'],
'sep': [':', ';', '\{', '\}', ','],
}
class ParseFailure(Exception):
def __init__(self, expected, found):
self.expected = expected
self.found = found
def __str__(self):
return "expected %r, found %s..." % (
self.expected, repr(self.found)[:70])
class Parse:
def __init__(self, inf, lexemes):
p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
for n, l in lexemes.items())
p = re.compile(p, re.DOTALL)
data = inf.read()
tokens = []
while True:
m = p.search(data)
if m:
if m.start() > 0:
tokens.append((None, data[:m.start()]))
tokens.append((m.lastgroup, m.group()))
data = data[m.end():]
else:
tokens.append((None, data))
break
self.tokens = tokens
self.off = 0
def lookahead(self, *pattern):
if self.off < len(self.tokens):
token = self.tokens[self.off]
if token[0] in pattern or token[1] in pattern:
self.m = token[1]
return self.m
self.m = None
return self.m
def accept(self, *patterns):
m = self.lookahead(*patterns)
if m is not None:
self.off += 1
return m
def expect(self, *patterns):
m = self.accept(*patterns)
if not m:
raise ParseFailure(patterns, self.tokens[self.off:])
return m
def push(self):
return self.off
def pop(self, state):
self.off = state
def passert(p):
def pastr(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
p.expect(')')
return mkassert('str', COMP[comp], lh, rh)
def pamem(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
p.expect('memcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
size = pexpr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
p.expect(')')
return mkassert('mem', COMP[comp], lh, rh, size)
def paint(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(')')
return mkassert('int', COMP[comp], lh, rh)
def pabool(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexprs(p) ; p.accept('ws')
p.expect(')')
return mkassert('bool', 'eq', lh, 'true')
def pa(p):
return p.expect('assert')
state = p.push()
lastf = None
for pa in [pastr, pamem, paint, pabool, pa]:
try:
return pa(p)
except ParseFailure as f:
p.pop(state)
lastf = f
else:
raise lastf
def pexpr(p):
res = []
while True:
if p.accept('('):
res.append(p.m)
while True:
res.append(pexprs(p))
if p.accept('sep'):
res.append(p.m)
else:
break
res.append(p.expect(')'))
elif p.lookahead('assert'):
res.append(passert(p))
elif p.accept('assert', 'ws', 'string', 'op', None):
res.append(p.m)
else:
return ''.join(res)
def pexprs(p):
res = []
while True:
res.append(pexpr(p))
if p.accept('comp', 'logic', ','):
res.append(p.m)
else:
return ''.join(res)
def pstmt(p):
ws = p.accept('ws') or ''
lh = pexprs(p)
if p.accept('=>'):
rh = pexprs(p)
return ws + mkassert('int', 'eq', lh, rh)
else:
return ws + lh
@p.generate
def pstmts():
a = yield pstmt
b = yield (p.regex('[:;{}]') + pstmt).many()
return [a] + b
def main(args): def main(args):
inf = open(args.input, 'r') if args.input else sys.stdin inf = open(args.input, 'r') if args.input else sys.stdin
outf = open(args.output, 'w') if args.output else sys.stdout outf = open(args.output, 'w') if args.output else sys.stdout
# parse C code lexemes = LEX.copy()
input = inf.read() if args.pattern:
stmts = pstmts.parse(input) lexemes['assert'] = args.pattern
p = Parse(inf, lexemes)
# write extra verbose asserts # write extra verbose asserts
outf.write("#include <stdbool.h>\n") mkdecls(outf, maxwidth=args.maxwidth)
outf.write("#include <stdint.h>\n")
outf.write("#include <inttypes.h>\n")
outf.write("#include <signal.h>\n")
outf.write(mkdecl('int', 'eq', '=='))
outf.write(mkdecl('int', 'ne', '!='))
outf.write(mkdecl('int', 'lt', '<'))
outf.write(mkdecl('int', 'gt', '>'))
outf.write(mkdecl('int', 'le', '<='))
outf.write(mkdecl('int', 'ge', '>='))
outf.write(mkdecl('str', 'eq', '=='))
outf.write(mkdecl('str', 'ne', '!='))
outf.write(mkdecl('str', 'lt', '<'))
outf.write(mkdecl('str', 'gt', '>'))
outf.write(mkdecl('str', 'le', '<='))
outf.write(mkdecl('str', 'ge', '>='))
outf.write(mkdecl('bool', 'eq', '=='))
if args.input: if args.input:
outf.write("#line %d \"%s\"\n" % (1, args.input)) outf.write("#line %d \"%s\"\n" % (1, args.input))
# write parsed statements # parse and write out stmt at a time
for stmt in stmts: try:
outf.write(stmt) while True:
outf.write(pstmt(p))
if p.accept('sep'):
outf.write(p.m)
else:
break
except ParseFailure as f:
pass
for i in range(p.off, len(p.tokens)):
outf.write(p.tokens[i][1])
if __name__ == "__main__": if __name__ == "__main__":
import argparse import argparse
@@ -210,6 +374,10 @@ if __name__ == "__main__":
description="Cpp step that increases assert verbosity") description="Cpp step that increases assert verbosity")
parser.add_argument('input', nargs='?', parser.add_argument('input', nargs='?',
help="Input C file after cpp.") help="Input C file after cpp.")
parser.add_argument('-o', '--output', parser.add_argument('-o', '--output', required=True,
help="Output C file.") help="Output C file.")
parser.add_argument('-p', '--pattern', action='append',
help="Patterns to search for starting an assert statement.")
parser.add_argument('--maxwidth', default=MAXWIDTH, type=int,
help="Maximum number of characters to display for strcmp and memcmp.")
main(parser.parse_args()) main(parser.parse_args())

View File

@@ -4,24 +4,6 @@
# .toml files stored in the tests directory. # .toml files stored in the tests directory.
# #
# TODO
# x nargs > 1?
# x show perm config on failure
# x filtering
# n show perm config on verbose?
# x better lineno tracking for cases?
# n non-int perms?
# x different path format?
# - suite.prologue, suite.epilogue
# x in
# x change BLOCK_CYCLES to -1 by default
# x change persist behaviour
# x config chaining correct
# - why can't gdb see my defines?
# - say no to internal?
# x buffering stdout issues?
# - debug fast
import toml import toml
import glob import glob
import re import re
@@ -125,6 +107,7 @@ class TestFailure(Exception):
class TestCase: class TestCase:
def __init__(self, config, filter=filter, def __init__(self, config, filter=filter,
suite=None, caseno=None, lineno=None, **_): suite=None, caseno=None, lineno=None, **_):
self.config = config
self.filter = filter self.filter = filter
self.suite = suite self.suite = suite
self.caseno = caseno self.caseno = caseno
@@ -150,8 +133,10 @@ class TestCase:
return '%s#%d' % ( return '%s#%d' % (
self.suite.name, self.caseno) self.suite.name, self.caseno)
def permute(self, defines, permno=None, **_): def permute(self, class_=None, defines={}, permno=None, **_):
ncase = copy.copy(self) ncase = (class_ or type(self))(self.config)
for k, v in self.__dict__.items():
setattr(ncase, k, v)
ncase.case = self ncase.case = self
ncase.perms = [ncase] ncase.perms = [ncase]
ncase.permno = permno ncase.permno = permno
@@ -194,6 +179,8 @@ class TestCase:
len(self.filter) >= 2 and len(self.filter) >= 2 and
self.filter[1] != self.permno): self.filter[1] != self.permno):
return False return False
elif args.get('no_internal', False) and self.in_ is not None:
return False
elif self.if_ is not None: elif self.if_ is not None:
return eval(self.if_, None, self.defines.copy()) return eval(self.if_, None, self.defines.copy())
else: else:
@@ -210,6 +197,8 @@ class TestCase:
if persist != 'noerase': if persist != 'noerase':
try: try:
os.remove(self.suite.path + '.disk') os.remove(self.suite.path + '.disk')
if args.get('verbose', False):
print('rm', self.suite.path + '.disk')
except FileNotFoundError: except FileNotFoundError:
pass pass
@@ -225,7 +214,7 @@ class TestCase:
if gdb == 'assert': if gdb == 'assert':
ncmd.extend(['-ex', 'r']) ncmd.extend(['-ex', 'r'])
if failure.assert_: if failure.assert_:
ncmd.extend(['-ex', 'up']) ncmd.extend(['-ex', 'up 2'])
elif gdb == 'start': elif gdb == 'start':
ncmd.extend([ ncmd.extend([
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno), '-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
@@ -331,13 +320,15 @@ class ReentrantTestCase(TestCase):
raise raise
class TestSuite: class TestSuite:
def __init__(self, path, filter=None, TestCase=TestCase, **args): def __init__(self, path, classes=[TestCase], defines={},
filter=None, **args):
self.name = os.path.basename(path) self.name = os.path.basename(path)
if self.name.endswith('.toml'): if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')] self.name = self.name[:-len('.toml')]
self.path = path self.path = path
self.classes = classes
self.defines = defines
self.filter = filter self.filter = filter
self.TestCase = TestCase
with open(path) as f: with open(path) as f:
# load tests # load tests
@@ -356,7 +347,9 @@ class TestSuite:
code_linenos.reverse() code_linenos.reverse()
# grab global config # grab global config
self.defines = config.get('define', {}) for k, v in config.get('define', {}).items():
if k not in self.defines:
self.defines[k] = v
self.code = config.get('code', None) self.code = config.get('code', None)
if self.code is not None: if self.code is not None:
self.code_lineno = code_linenos.pop() self.code_lineno = code_linenos.pop()
@@ -372,8 +365,8 @@ class TestSuite:
if k not in case: if k not in case:
case[k] = v case[k] = v
# initialize test case # initialize test case
self.cases.append(self.TestCase(case, filter=filter, self.cases.append(TestCase(case, filter=filter,
suite=self, caseno=i, lineno=lineno, **args)) suite=self, caseno=i+1, lineno=lineno, **args))
def __str__(self): def __str__(self):
return self.name return self.name
@@ -381,14 +374,14 @@ class TestSuite:
def __lt__(self, other): def __lt__(self, other):
return self.name < other.name return self.name < other.name
def permute(self, defines={}, **args): def permute(self, **args):
for case in self.cases: for case in self.cases:
# lets find all parameterized definitions, in one of [args.D, # lets find all parameterized definitions, in one of [args.D,
# suite.defines, case.defines, DEFINES]. Note that each of these # suite.defines, case.defines, DEFINES]. Note that each of these
# can be either a dict of defines, or a list of dicts, expressing # can be either a dict of defines, or a list of dicts, expressing
# an initial set of permutations. # an initial set of permutations.
pending = [{}] pending = [{}]
for inits in [defines, self.defines, case.defines, DEFINES]: for inits in [self.defines, case.defines, DEFINES]:
if not isinstance(inits, list): if not isinstance(inits, list):
inits = [inits] inits = [inits]
@@ -422,8 +415,10 @@ class TestSuite:
# generate permutations # generate permutations
case.perms = [] case.perms = []
for i, perm in enumerate(expanded): for i, (class_, defines) in enumerate(
case.perms.append(case.permute(perm, permno=i, **args)) it.product(self.classes, expanded)):
case.perms.append(case.permute(
class_, defines, permno=i+1, **args))
# also track non-unique defines # also track non-unique defines
case.defines = {} case.defines = {}
@@ -519,16 +514,12 @@ class TestSuite:
self.target = self.path + '.test' self.target = self.path + '.test'
return self.makefile, self.target return self.makefile, self.target
def test(self, caseno=None, permno=None, **args): def test(self, **args):
# run test suite! # run test suite!
if not args.get('verbose', True): if not args.get('verbose', True):
sys.stdout.write(self.name + ' ') sys.stdout.write(self.name + ' ')
sys.stdout.flush() sys.stdout.flush()
for perm in self.perms: for perm in self.perms:
if caseno is not None and perm.caseno != caseno:
continue
if permno is not None and perm.permno != permno:
continue
if not perm.shouldtest(**args): if not perm.shouldtest(**args):
continue continue
@@ -553,6 +544,23 @@ class TestSuite:
sys.stdout.write('\n') sys.stdout.write('\n')
def main(**args): def main(**args):
# figure out explicit defines
defines = {}
for define in args['D']:
k, v, *_ = define.split('=', 2) + ['']
defines[k] = v
# and what class of TestCase to run
classes = []
if args.get('normal', False):
classes.append(TestCase)
if args.get('reentrant', False):
classes.append(ReentrantTestCase)
if args.get('valgrind', False):
classes.append(ValgrindTestCase)
if not classes:
classes = [TestCase]
suites = [] suites = []
for testpath in args['testpaths']: for testpath in args['testpaths']:
# optionally specified test case/perm # optionally specified test case/perm
@@ -571,27 +579,14 @@ def main(**args):
# find tests # find tests
for path in glob.glob(testpath): for path in glob.glob(testpath):
if args.get('valgrind', False): suites.append(TestSuite(path, classes, defines, filter, **args))
TestCase_ = ValgrindTestCase
elif args.get('reentrant', False):
TestCase_ = ReentrantTestCase
else:
TestCase_ = TestCase
suites.append(TestSuite(path,
filter=filter, TestCase=TestCase_, **args))
# sort for reproducability # sort for reproducability
suites = sorted(suites) suites = sorted(suites)
# generate permutations # generate permutations
defines = {}
for define in args['D']:
k, v, *_ = define.split('=', 2) + ['']
defines[k] = v
for suite in suites: for suite in suites:
suite.permute(defines, **args) suite.permute(**args)
# build tests in parallel # build tests in parallel
print('====== building ======') print('====== building ======')
@@ -736,10 +731,14 @@ if __name__ == "__main__":
parser.add_argument('-g', '--gdb', choices=['init', 'start', 'assert'], parser.add_argument('-g', '--gdb', choices=['init', 'start', 'assert'],
nargs='?', const='assert', nargs='?', const='assert',
help="Drop into gdb on test failure.") help="Drop into gdb on test failure.")
parser.add_argument('--valgrind', action='store_true', parser.add_argument('--no-internal', action='store_true',
help="Run non-leaky tests under valgrind to check for memory leaks.") help="Don't run tests that require internal knowledge.")
parser.add_argument('--reentrant', action='store_true', parser.add_argument('-n', '--normal', action='store_true',
help="Run tests normally.")
parser.add_argument('-r', '--reentrant', action='store_true',
help="Run reentrant tests with simulated power-loss.") help="Run reentrant tests with simulated power-loss.")
parser.add_argument('-V', '--valgrind', action='store_true',
help="Run non-leaky tests under valgrind to check for memory leaks.")
parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '), parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
help="Run tests with another executable prefixed on the command line.") help="Run tests with another executable prefixed on the command line.")
sys.exit(main(**vars(parser.parse_args()))) sys.exit(main(**vars(parser.parse_args())))