Created initial implementation of revamped test.py

This is the start of reworking littlefs's testing framework based on
lessons learned from the initial testing framework.

1. The testing framework needs to be _flexible_. It was hacky, which by
   itself isn't a downside, but it wasn't _flexible_. This limited what
   could be done with the tests and there ended up being many
   workarounds just to reproduce bugs.

   The idea behind this revamped framework is to separate the
   description of tests (tests/test_dirs.toml) and the running of tests
   (scripts/test.py).

   Now, with the logic moved entirely to python, it's possible to run
   the test under varying environments. In addition to the "just don't
   assert" run, I'm also looking to run the tests in valgrind for memory
   checking, and an environment with simulated power-loss.

   The test description can also contain abstract attributes that help
   control how tests can be ran, such as "leaky" to identify tests where
   memory leaks are expected. This keeps test limitations at a minimum
   without limiting how the tests can be ran.

2. Multi-stage-process tests didn't really add value and limited what
   the testing environment.

   Unmounting + mounting can be done in a single process to test the
   same logic. It would be really difficult to make this fail only
   when memory is zeroed, though that can still be caught by
   power-resilient tests.

   Requiring every test to be a single process adds several options
   for test execution, such as using a RAM-backed block device for
   speed, or even running the tests on a device.

3. Added fancy assert interception. This wasn't really a requirement,
   but something I've been wanting to experiment with for a while.

   During testing, scripts/explode_asserts.py is added to the build
   process. This is a custom C-preprocessor that parses out assert
   statements and replaces them with _very_ verbose asserts that
   wouldn't normally be possible with just C macros.

   It even goes as far as to report the arguments to strcmp, since the
   lack of visibility here was very annoying.

   tests_/test_dirs.toml:186:assert: assert failed with "..", expected eq "..."
       assert(strcmp(info.name, "...") == 0);

   One downside is that simply parsing C in python is slower than the
   entire rest of the compilation, but fortunately this can be
   alleviated by parallelizing the test builds through make.

Other neat bits:
- All generated files are a suffix of the test description, this helps
  cleanup and means it's (theoretically) possible to parallelize the
  tests.
- The generated test.c is shoved base64 into an ad-hoc Makefile, this
  means it doesn't force a rebuild of tests all the time.
- Test parameterizing is now easier.
- Hopefully this framework can be repurposed also for benchmarks in the
  future.
This commit is contained in:
Christopher Haster
2019-12-28 23:13:59 -06:00
parent ce2c01f098
commit f42e007709
6 changed files with 932 additions and 13 deletions

View File

@@ -59,15 +59,20 @@ test: \
test_corrupt
@rm test.c
test_%: tests/test_%.sh
ifdef QUIET
@./$< | sed -nu '/^[-=]/p'
else
./$<
endif
test_:
./scripts/test_.py $(TFLAGS)
-include $(DEP)
%?:
@echo '$($*)'
lfs: $(OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@

View File

@@ -158,9 +158,9 @@ int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
uint8_t *data = buffer;
// Check if read is valid
assert(off % cfg->read_size == 0);
assert(size % cfg->read_size == 0);
assert(block < cfg->block_count);
LFS_ASSERT(off % cfg->read_size == 0);
LFS_ASSERT(size % cfg->read_size == 0);
LFS_ASSERT(block < cfg->block_count);
// Zero out buffer for debugging
memset(data, 0, size);
@@ -213,9 +213,9 @@ int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
const uint8_t *data = buffer;
// Check if write is valid
assert(off % cfg->prog_size == 0);
assert(size % cfg->prog_size == 0);
assert(block < cfg->block_count);
LFS_ASSERT(off % cfg->prog_size == 0);
LFS_ASSERT(size % cfg->prog_size == 0);
LFS_ASSERT(block < cfg->block_count);
// Program data
snprintf(emu->child, LFS_NAME_MAX, "%" PRIx32, block);
@@ -228,7 +228,7 @@ int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
}
// Check that file was erased
assert(f);
LFS_ASSERT(f);
int err = fseek(f, off, SEEK_SET);
if (err) {
@@ -287,7 +287,7 @@ int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block) {
lfs_emubd_t *emu = cfg->context;
// Check if erase is valid
assert(block < cfg->block_count);
LFS_ASSERT(block < cfg->block_count);
// Erase the block
snprintf(emu->child, LFS_NAME_MAX, "%" PRIx32, block);

View File

@@ -51,28 +51,28 @@ extern "C"
// Logging functions
#ifdef LFS_YES_TRACE
#define LFS_TRACE(fmt, ...) \
printf("lfs_trace:%d: " fmt "\n", __LINE__, __VA_ARGS__)
printf("%s:%d:trace: " fmt "\n", __FILE__, __LINE__, __VA_ARGS__)
#else
#define LFS_TRACE(fmt, ...)
#endif
#ifndef LFS_NO_DEBUG
#define LFS_DEBUG(fmt, ...) \
printf("lfs_debug:%d: " fmt "\n", __LINE__, __VA_ARGS__)
printf("%s:%d:debug: " fmt "\n", __FILE__, __LINE__, __VA_ARGS__)
#else
#define LFS_DEBUG(fmt, ...)
#endif
#ifndef LFS_NO_WARN
#define LFS_WARN(fmt, ...) \
printf("lfs_warn:%d: " fmt "\n", __LINE__, __VA_ARGS__)
printf("%s:%d:warn: " fmt "\n", __FILE__, __LINE__, __VA_ARGS__)
#else
#define LFS_WARN(fmt, ...)
#endif
#ifndef LFS_NO_ERROR
#define LFS_ERROR(fmt, ...) \
printf("lfs_error:%d: " fmt "\n", __LINE__, __VA_ARGS__)
printf("%s:%d:error: " fmt "\n", __FILE__, __LINE__, __VA_ARGS__)
#else
#define LFS_ERROR(fmt, ...)
#endif

211
scripts/explode_asserts.py Executable file
View File

@@ -0,0 +1,211 @@
#!/usr/bin/env python3
import parsy as p
import re
import io
import sys
ASSERT_PATTERN = p.string('LFS_ASSERT') | p.string('assert')
ASSERT_CHARS = 'La'
ASSERT_TARGET = '__LFS_ASSERT_{TYPE}_{COMP}'
ASSERT_TESTS = {
'int': """
__typeof__({lh}) _lh = {lh};
__typeof__({lh}) _rh = (__typeof__({lh})){rh};
if (!(_lh {op} _rh)) {{
printf("%s:%d:assert: "
"assert failed with %"PRIiMAX", expected {comp} %"PRIiMAX"\\n",
{file}, {line}, (intmax_t)_lh, (intmax_t)_rh);
exit(-2);
}}
""",
'str': """
const char *_lh = {lh};
const char *_rh = {rh};
if (!(strcmp(_lh, _rh) {op} 0)) {{
printf("%s:%d:assert: "
"assert failed with \\\"%s\\\", expected {comp} \\\"%s\\\"\\n",
{file}, {line}, _lh, _rh);
exit(-2);
}}
""",
'bool': """
bool _lh = !!({lh});
bool _rh = !!({rh});
if (!(_lh {op} _rh)) {{
printf("%s:%d:assert: "
"assert failed with %s, expected {comp} %s\\n",
{file}, {line}, _lh ? "true" : "false", _rh ? "true" : "false");
exit(-2);
}}
""",
}
def mkassert(lh, rh='true', type='bool', comp='eq'):
return ((ASSERT_TARGET + "({lh}, {rh}, __FILE__, __LINE__, __func__)")
.format(
type=type, TYPE=type.upper(),
comp=comp, COMP=comp.upper(),
lh=lh.strip(' '),
rh=rh.strip(' ')))
def mkdecl(type, comp, op):
return ((
"#define "+ASSERT_TARGET+"(lh, rh, file, line, func)"
" do {{"+re.sub('\s+', ' ', ASSERT_TESTS[type])+"}} while (0)\n")
.format(
type=type, TYPE=type.upper(),
comp=comp, COMP=comp.upper(),
lh='lh', rh='rh', op=op,
file='file', line='line', func='func'))
# add custom until combinator
def until(self, end):
return end.should_fail('should fail').then(self).many()
p.Parser.until = until
pcomp = (
p.string('==').tag('eq') |
p.string('!=').tag('ne') |
p.string('<=').tag('le') |
p.string('>=').tag('ge') |
p.string('<').tag('lt') |
p.string('>').tag('gt'));
plogic = p.string('&&') | p.string('||')
@p.generate
def pstrassert():
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*')
yield p.string('strcmp') + p.regex('\s*') + p.string('(') + p.regex('\s*')
lh = yield pexpr.until(p.string(',') | p.string(')') | plogic)
yield p.string(',') + p.regex('\s*')
rh = yield pexpr.until(p.string(')') | plogic)
yield p.string(')') + p.regex('\s*')
op = yield pcomp
yield p.regex('\s*') + p.string('0') + p.regex('\s*') + p.string(')')
return mkassert(''.join(lh), ''.join(rh), 'str', op[0])
@p.generate
def pintassert():
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*')
lh = yield pexpr.until(pcomp | p.string(')') | plogic)
op = yield pcomp
rh = yield pexpr.until(p.string(')') | plogic)
yield p.string(')')
return mkassert(''.join(lh), ''.join(rh), 'int', op[0])
@p.generate
def pboolassert():
yield ASSERT_PATTERN + p.regex('\s*') + p.string('(') + p.regex('\s*')
expr = yield pexpr.until(p.string(')'))
yield p.string(')')
return mkassert(''.join(expr), 'true', 'bool', 'eq')
passert = p.peek(ASSERT_PATTERN) >> (pstrassert | pintassert | pboolassert)
@p.generate
def pcomment1():
yield p.string('//')
s = yield p.regex('[^\\n]*')
yield p.string('\n')
return '//' + s + '\n'
@p.generate
def pcomment2():
yield p.string('/*')
s = yield p.regex('((?!\*/).)*')
yield p.string('*/')
return '/*' + ''.join(s) + '*/'
@p.generate
def pcomment3():
yield p.string('#')
s = yield p.regex('[^\\n]*')
yield p.string('\n')
return '#' + s + '\n'
pws = p.regex('\s+') | pcomment1 | pcomment2 | pcomment3
@p.generate
def pstring():
q = yield p.regex('["\']')
s = yield (p.string('\\%s' % q) | p.regex('[^%s]' % q)).many()
yield p.string(q)
return q + ''.join(s) + q
@p.generate
def pnested():
l = yield p.string('(')
n = yield pexpr.until(p.string(')'))
r = yield p.string(')')
return l + ''.join(n) + r
pexpr = (
# shortcut for a bit better performance
p.regex('[^%s/#\'"();{}=><,&|-]+' % ASSERT_CHARS) |
pws |
passert |
pstring |
pnested |
p.string('->') |
p.regex('.', re.DOTALL))
@p.generate
def pstmt():
ws = yield pws.many()
lh = yield pexpr.until(p.string('=>') | p.regex('[;{}]'))
op = yield p.string('=>').optional()
if op == '=>':
rh = yield pstmt
return ''.join(ws) + mkassert(''.join(lh), rh, 'int', 'eq')
else:
return ''.join(ws) + ''.join(lh)
@p.generate
def pstmts():
a = yield pstmt
b = yield (p.regex('[;{}]') + pstmt).many()
return [a] + b
def main(args):
inf = open(args.input, 'r') if args.input else sys.stdin
outf = open(args.output, 'w') if args.output else sys.stdout
# parse C code
input = inf.read()
stmts = pstmts.parse(input)
# write extra verbose asserts
outf.write("#include <stdbool.h>\n")
outf.write("#include <stdint.h>\n")
outf.write("#include <inttypes.h>\n")
outf.write(mkdecl('int', 'eq', '=='))
outf.write(mkdecl('int', 'ne', '!='))
outf.write(mkdecl('int', 'lt', '<'))
outf.write(mkdecl('int', 'gt', '>'))
outf.write(mkdecl('int', 'le', '<='))
outf.write(mkdecl('int', 'ge', '>='))
outf.write(mkdecl('str', 'eq', '=='))
outf.write(mkdecl('str', 'ne', '!='))
outf.write(mkdecl('str', 'lt', '<'))
outf.write(mkdecl('str', 'gt', '>'))
outf.write(mkdecl('str', 'le', '<='))
outf.write(mkdecl('str', 'ge', '>='))
outf.write(mkdecl('bool', 'eq', '=='))
if args.input:
outf.write("#line %d \"%s\"\n" % (1, args.input))
# write parsed statements
for stmt in stmts:
outf.write(stmt)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Cpp step that increases assert verbosity")
parser.add_argument('input', nargs='?',
help="Input C file after cpp.")
parser.add_argument('-o', '--output',
help="Output C file.")
main(parser.parse_args())

494
scripts/test_.py Executable file
View File

@@ -0,0 +1,494 @@
#!/usr/bin/env python3
# TODO
# -v --verbose
# --color
# --gdb
# --reentrant
import toml
import glob
import re
import os
import io
import itertools as it
import collections.abc as abc
import subprocess as sp
import base64
import sys
import copy
TEST_DIR = 'tests_'
RULES = """
define FLATTEN
%$(subst /,.,$(target:.c=.t.c)): $(target)
cat <(echo '#line 1 "$$<"') $$< > $$@
endef
$(foreach target,$(SRC),$(eval $(FLATTEN)))
-include tests_/*.d
%.c: %.t.c
./scripts/explode_asserts.py $< -o $@
%.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.test.$f)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
"""
GLOBALS = """
//////////////// AUTOGENERATED TEST ////////////////
#include "lfs.h"
#include "emubd/lfs_emubd.h"
#include <stdio.h>
"""
DEFINES = {
"LFS_READ_SIZE": 16,
"LFS_PROG_SIZE": "LFS_READ_SIZE",
"LFS_BLOCK_SIZE": 512,
"LFS_BLOCK_COUNT": 1024,
"LFS_BLOCK_CYCLES": 1024,
"LFS_CACHE_SIZE": "(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)",
"LFS_LOOKAHEAD_SIZE": 16,
}
PROLOGUE = """
// prologue
__attribute__((unused)) lfs_t lfs;
__attribute__((unused)) lfs_emubd_t bd;
__attribute__((unused)) lfs_file_t file;
__attribute__((unused)) lfs_dir_t dir;
__attribute__((unused)) struct lfs_info info;
__attribute__((unused)) uint8_t buffer[1024];
__attribute__((unused)) char path[1024];
__attribute__((unused)) const struct lfs_config cfg = {
.context = &bd,
.read = &lfs_emubd_read,
.prog = &lfs_emubd_prog,
.erase = &lfs_emubd_erase,
.sync = &lfs_emubd_sync,
.read_size = LFS_READ_SIZE,
.prog_size = LFS_PROG_SIZE,
.block_size = LFS_BLOCK_SIZE,
.block_count = LFS_BLOCK_COUNT,
.block_cycles = LFS_BLOCK_CYCLES,
.cache_size = LFS_CACHE_SIZE,
.lookahead_size = LFS_LOOKAHEAD_SIZE,
};
lfs_emubd_create(&cfg, "blocks");
"""
EPILOGUE = """
// epilogue
lfs_emubd_destroy(&cfg);
"""
PASS = '\033[32m✓\033[0m'
FAIL = '\033[31m✗\033[0m'
class TestFailure(Exception):
def __init__(self, case, stdout=None, assert_=None):
self.case = case
self.stdout = stdout
self.assert_ = assert_
class TestCase:
def __init__(self, suite, config, caseno=None, lineno=None, **_):
self.suite = suite
self.caseno = caseno
self.lineno = lineno
self.code = config['code']
self.defines = config.get('define', {})
self.leaky = config.get('leaky', False)
def __str__(self):
if hasattr(self, 'permno'):
return '%s[%d,%d]' % (self.suite.name, self.caseno, self.permno)
else:
return '%s[%d]' % (self.suite.name, self.caseno)
def permute(self, defines, permno=None, **_):
ncase = copy.copy(self)
ncase.case = self
ncase.perms = [ncase]
ncase.permno = permno
ncase.defines = defines
return ncase
def build(self, f, **_):
# prologue
f.write('void test_case%d(' % self.caseno)
defines = self.perms[0].defines
first = True
for k, v in sorted(defines.items()):
if not all(perm.defines[k] == v for perm in self.perms):
if not first:
f.write(',')
else:
first = False
f.write('\n')
f.write(8*' '+'int %s' % k)
f.write(') {\n')
defines = self.perms[0].defines
for k, v in sorted(defines.items()):
if all(perm.defines[k] == v for perm in self.perms):
f.write(4*' '+'#define %s %s\n' % (k, v))
f.write(PROLOGUE)
f.write('\n')
f.write(4*' '+'// test case %d\n' % self.caseno)
f.write(4*' '+'#line %d "%s"\n' % (self.lineno, self.suite.path))
# test case goes here
f.write(self.code)
# epilogue
f.write(EPILOGUE)
f.write('\n')
defines = self.perms[0].defines
for k, v in sorted(defines.items()):
if all(perm.defines[k] == v for perm in self.perms):
f.write(4*' '+'#undef %s\n' % k)
f.write('}\n')
def test(self, **args):
cmd = ['./%s.test' % self.suite.path,
repr(self.caseno), repr(self.permno)]
# run in valgrind?
if args.get('valgrind', False) and not self.leaky:
cmd = ['valgrind',
'--leak-check=full',
'--error-exitcode=4',
'-q'] + cmd
# run test case!
stdout = []
if args.get('verbose', False):
print(' '.join(cmd))
proc = sp.Popen(cmd,
universal_newlines=True,
bufsize=1,
stdout=sp.PIPE,
stderr=sp.STDOUT)
for line in iter(proc.stdout.readline, ''):
stdout.append(line)
if args.get('verbose', False):
sys.stdout.write(line)
proc.wait()
if proc.returncode != 0:
# failed, try to parse assert?
assert_ = None
for line in stdout:
try:
m = re.match('^([^:\\n]+):([0-9]+):assert: (.*)$', line)
# found an assert, print info from file
with open(m.group(1)) as f:
lineno = int(m.group(2))
line = next(it.islice(f, lineno-1, None)).strip('\n')
assert_ = {
'path': m.group(1),
'lineno': lineno,
'line': line,
'message': m.group(3),
}
except:
pass
self.result = TestFailure(self, stdout, assert_)
raise self.result
else:
self.result = PASS
return self.result
class TestSuite:
def __init__(self, path, TestCase=TestCase, **args):
self.name = os.path.basename(path)
if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')]
self.path = path
self.TestCase = TestCase
with open(path) as f:
# load tests
config = toml.load(f)
# find line numbers
f.seek(0)
linenos = []
for i, line in enumerate(f):
if re.match(r'^\s*code\s*=\s*(\'\'\'|""")', line):
linenos.append(i + 2)
# grab global config
self.defines = config.get('define', {})
# create initial test cases
self.cases = []
for i, (case, lineno) in enumerate(zip(config['case'], linenos)):
self.cases.append(self.TestCase(
self, case, caseno=i, lineno=lineno, **args))
def __str__(self):
return self.name
def __lt__(self, other):
return self.name < other.name
def permute(self, defines={}, **args):
for case in self.cases:
# lets find all parameterized definitions, in one of
# - args.D (defines)
# - suite.defines
# - case.defines
# - DEFINES
initial = {}
for define in it.chain(
defines.items(),
self.defines.items(),
case.defines.items(),
DEFINES.items()):
if define[0] not in initial:
try:
initial[define[0]] = eval(define[1])
except:
initial[define[0]] = define[1]
# expand permutations
expanded = []
pending = [initial]
while pending:
perm = pending.pop()
for k, v in sorted(perm.items()):
if not isinstance(v, str) and isinstance(v, abc.Iterable):
for nv in reversed(v):
nperm = perm.copy()
nperm[k] = nv
pending.append(nperm)
break
else:
expanded.append(perm)
case.perms = []
for i, defines in enumerate(expanded):
case.perms.append(case.permute(defines, permno=i, **args))
self.perms = [perm for case in self.cases for perm in case.perms]
return self.perms
def build(self, **args):
# build test.c
f = io.StringIO()
f.write(GLOBALS)
for case in self.cases:
f.write('\n')
case.build(f, **args)
f.write('\n')
f.write('int main(int argc, char **argv) {\n')
f.write(4*' '+'int case_ = (argc == 3) ? atoi(argv[1]) : 0;\n')
f.write(4*' '+'int perm = (argc == 3) ? atoi(argv[2]) : 0;\n')
for perm in self.perms:
f.write(4*' '+'if (argc != 3 || '
'(case_ == %d && perm == %d)) { ' % (
perm.caseno, perm.permno))
f.write('test_case%d(' % perm.caseno)
first = True
for k, v in sorted(perm.defines.items()):
if not all(perm.defines[k] == v for perm in perm.case.perms):
if not first:
f.write(', ')
else:
first = False
f.write(str(v))
f.write('); }\n')
f.write('}\n')
# add test-related rules
rules = RULES
rules = rules.replace(' ', '\t')
with open(self.path + '.test.mk', 'w') as mk:
mk.write(rules)
mk.write('\n')
mk.write('%s: %s\n' % (self.path+'.test.t.c', self.path))
mk.write('\tbase64 -d <<< ')
mk.write(base64.b64encode(
f.getvalue().encode('utf8')).decode('utf8'))
mk.write(' > $@\n')
self.makefile = self.path + '.test.mk'
self.target = self.path + '.test'
return self.makefile, self.target
def test(self, caseno=None, permno=None, **args):
# run test suite!
if not args.get('verbose', True):
sys.stdout.write(self.name + ' ')
sys.stdout.flush()
for perm in self.perms:
if caseno is not None and perm.caseno != caseno:
continue
if permno is not None and perm.permno != permno:
continue
try:
perm.test(**args)
except TestFailure as failure:
if not args.get('verbose', True):
sys.stdout.write(FAIL)
sys.stdout.flush()
if not args.get('keep_going', False):
if not args.get('verbose', True):
sys.stdout.write('\n')
raise
else:
if not args.get('verbose', True):
sys.stdout.write(PASS)
sys.stdout.flush()
if not args.get('verbose', True):
sys.stdout.write('\n')
def main(**args):
testpath = args['testpath']
# optional brackets for specific test
m = re.search(r'\[(\d+)(?:,(\d+))?\]$', testpath)
if m:
caseno = int(m.group(1))
permno = int(m.group(2)) if m.group(2) is not None else None
testpath = testpath[:m.start()]
else:
caseno = None
permno = None
# figure out the suite's toml file
if os.path.isdir(testpath):
testpath = testpath + '/test_*.toml'
elif os.path.isfile(testpath):
testpath = testpath
elif testpath.endswith('.toml'):
testpath = TEST_DIR + '/' + testpath
else:
testpath = TEST_DIR + '/' + testpath + '.toml'
# find tests
suites = []
for path in glob.glob(testpath):
suites.append(TestSuite(path, **args))
# sort for reproducability
suites = sorted(suites)
# generate permutations
defines = {}
for define in args['D']:
k, v, *_ = define.split('=', 2) + ['']
defines[k] = v
for suite in suites:
suite.permute(defines, **args)
# build tests in parallel
print('====== building ======')
makefiles = []
targets = []
for suite in suites:
makefile, target = suite.build(**args)
makefiles.append(makefile)
targets.append(target)
cmd = (['make', '-f', 'Makefile'] +
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
['CFLAGS+=-fdiagnostics-color=always'] +
[target for target in targets])
stdout = []
if args.get('verbose', False):
print(' '.join(cmd))
proc = sp.Popen(cmd,
universal_newlines=True,
bufsize=1,
stdout=sp.PIPE,
stderr=sp.STDOUT)
for line in iter(proc.stdout.readline, ''):
stdout.append(line)
if args.get('verbose', False):
sys.stdout.write(line)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose', False):
for line in stdout:
sys.stdout.write(line)
sys.exit(-3)
print('built %d test suites, %d test cases, %d permutations' % (
len(suites),
sum(len(suite.cases) for suite in suites),
sum(len(suite.perms) for suite in suites)))
print('====== testing ======')
try:
for suite in suites:
suite.test(caseno, permno, **args)
except TestFailure:
pass
print('====== results ======')
passed = 0
failed = 0
for suite in suites:
for perm in suite.perms:
if not hasattr(perm, 'result'):
continue
if perm.result == PASS:
passed += 1
else:
sys.stdout.write("--- %s ---\n" % perm)
if perm.result.assert_:
for line in perm.result.stdout[:-1]:
sys.stdout.write(line)
sys.stdout.write(
"\033[97m{path}:{lineno}:\033[91massert:\033[0m "
"{message}\n{line}\n".format(
**perm.result.assert_))
else:
for line in perm.result.stdout:
sys.stdout.write(line)
sys.stdout.write('\n')
failed += 1
print('tests passed: %d' % passed)
print('tests failed: %d' % failed)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Run parameterized tests in various configurations.")
parser.add_argument('testpath', nargs='?', default=TEST_DIR,
help="Description of test(s) to run. By default, this is all tests \
found in the \"{0}\" directory. Here, you can specify a different \
directory of tests, a specific file, a suite by name, and even a \
specific test case by adding brackets. For example \
\"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TEST_DIR))
parser.add_argument('-D', action='append', default=[],
help="Overriding parameter definitions.")
parser.add_argument('-v', '--verbose', action='store_true',
help="Output everything that is happening.")
parser.add_argument('-k', '--keep-going', action='store_true',
help="Run all tests instead of stopping on first error. Useful for CI.")
# parser.add_argument('--gdb', action='store_true',
# help="Run tests under gdb. Useful for debugging failures.")
parser.add_argument('--valgrind', action='store_true',
help="Run non-leaky tests under valgrind to check for memory leaks. \
Tests marked as \"leaky = true\" run normally.")
main(**vars(parser.parse_args()))

209
tests_/test_dirs.toml Normal file
View File

@@ -0,0 +1,209 @@
[[case]] # format
code = """
lfs_format(&lfs, &cfg) => 0;
"""
[[case]] # mount/unmount
code = """
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_unmount(&lfs) => 0;
"""
[[case]] # root
code = """
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
"""
[[case]] # directory creation
code = """
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "dir%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "dir%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
"""
define.N = 'range(0, 100, 3)'
[[case]] # directory removal
code = """
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
"""
define.N = 'range(3, 100, 11)'
[[case]] # file creation
code = """
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "file%03d", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "file%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
"""
define.N = 'range(3, 100, 11)'
[[case]] # file removal
code = """
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
"""
define.N = 'range(0, 100, 3)'
[[case]] # error cases
code = """
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato") => 0;
lfs_file_open(&lfs, &file, "burito", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
lfs_dir_open(&lfs, &dir, "tomato") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "burito") => LFS_ERR_NOTDIR;
lfs_file_open(&lfs, &file, "tomato", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "potato", LFS_O_RDONLY) => LFS_ERR_ISDIR;
lfs_unmount(&lfs) => 0;
"""