Compare commits

..

2 Commits

Author SHA1 Message Date
Christopher Haster
c1c0386bda Added test_new.toml with failure found by AFL
Found by pjsg
2020-03-26 15:27:30 -05:00
Christopher Haster
4677421aba Added "evil" tests and detecion/recovery from bad pointers and infinite loops
These two features have been much requested by users, and have even had
several PRs proposed to fix these in several cases. Before this, these
error conditions usually were caught by internal asserts, however
asserts prevented users from implementing their own workarounds.

It's taken me a while to provide/accept a useful recovery mechanism
(returning LFS_ERR_CORRUPT instead of asserting) because my original thinking
was that these error conditions only occur due to bugs in the filesystem, and
these bugs should be fixed properly.

While I still think this is mostly true, the point has been made clear
that being able to recover from these conditions is definitely worth the
code cost. Hopefully this new behaviour helps the longevity of devices
even if the storage code fails.

Another, less important, reason I didn't want to accept fixes for these
situations was the lack of tests that prove the code's value. This has
been fixed with the new testing framework thanks to the additional of
"internal tests" which can call C static functions and really take
advantage of the internal information of the filesystem.
2020-03-20 09:26:07 -05:00
9 changed files with 341 additions and 1125 deletions

1163
lfs.c

File diff suppressed because it is too large Load Diff

10
lfs.h
View File

@@ -111,14 +111,12 @@ enum lfs_type {
LFS_TYPE_INLINESTRUCT = 0x201,
LFS_TYPE_SOFTTAIL = 0x600,
LFS_TYPE_HARDTAIL = 0x601,
LFS_TYPE_BRANCH = 0x681,
LFS_TYPE_MOVESTATE = 0x7ff,
// internal chip sources
LFS_FROM_NOOP = 0x000,
LFS_FROM_MOVE = 0x101,
LFS_FROM_DROP = 0x102,
LFS_FROM_USERATTRS = 0x103,
LFS_FROM_USERATTRS = 0x102,
};
// File open flags
@@ -312,11 +310,8 @@ typedef struct lfs_mdir {
uint32_t etag;
uint16_t count;
bool erased;
bool first; // TODO come on
bool split;
bool mustrelocate; // TODO not great either
lfs_block_t tail[2];
lfs_block_t branch[2];
} lfs_mdir_t;
// littlefs directory type
@@ -371,9 +366,6 @@ typedef struct lfs {
lfs_cache_t pcache;
lfs_block_t root[2];
lfs_block_t relocate_tail[2];
lfs_block_t relocate_end[2];
bool relocate_do_hack; // TODO fixme
struct lfs_mlist {
struct lfs_mlist *next;
uint16_t id;

View File

@@ -18,10 +18,9 @@ TAG_TYPES = {
'ctzstruct': (0x7ff, 0x202),
'inlinestruct': (0x7ff, 0x201),
'userattr': (0x700, 0x300),
'tail': (0x700, 0x600), # TODO rename these?
'tail': (0x700, 0x600),
'softtail': (0x7ff, 0x600),
'hardtail': (0x7ff, 0x601),
'branch': (0x7ff, 0x681),
'gstate': (0x700, 0x700),
'movestate': (0x7ff, 0x7ff),
'crc': (0x700, 0x500),
@@ -104,7 +103,7 @@ class Tag:
def mkmask(self):
return Tag(
0x780 if self.is_('tail') else 0x700 if self.isunique else 0x7ff, # TODO best way?
0x700 if self.isunique else 0x7ff,
0x3ff if self.isattr else 0,
0)
@@ -234,8 +233,8 @@ class MetadataPair:
def __lt__(self, other):
# corrupt blocks don't count
if not self or not other:
return bool(other)
if not self and other:
return True
# use sequence arithmetic to avoid overflow
return not ((other.rev - self.rev) & 0x80000000)
@@ -326,7 +325,7 @@ def main(args):
mdir.rev,
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
if len(mdir.pair) > 1 else '',
' (corrupted!)' if not mdir else ''))
' (corrupted)' if not mdir else ''))
if args.all:
mdir.dump_all(truncate=not args.no_truncate)
elif args.log:

View File

@@ -5,7 +5,6 @@ import sys
import json
import io
import itertools as it
import collections as c
from readmdir import Tag, MetadataPair
def popc(x):
@@ -14,7 +13,7 @@ def popc(x):
def ctz(x):
return len(bin(x)) - len(bin(x).rstrip('0'))
def dumpentries(args, mdir, mdirs, f):
def dumpentries(args, mdir, f):
for k, id_ in enumerate(mdir.ids):
name = mdir[Tag('name', id_, 0)]
struct_ = mdir[Tag('struct', id_, 0)]
@@ -23,10 +22,8 @@ def dumpentries(args, mdir, mdirs, f):
id_, name.typerepr(),
json.dumps(name.data.decode('utf8')))
if struct_.is_('dirstruct'):
pair = struct.unpack('<II', struct_.data[:8].ljust(8, b'\xff'))
desc += " dir {%#x, %#x}%s" % (
pair[0], pair[1],
'?' if frozenset(pair) not in mdirs else '')
desc += " dir {%#x, %#x}" % struct.unpack(
'<II', struct_.data[:8].ljust(8, b'\xff'))
if struct_.is_('ctzstruct'):
desc += " ctz {%#x} size %d" % struct.unpack(
'<II', struct_.data[:8].ljust(8, b'\xff'))
@@ -95,17 +92,20 @@ def dumpentries(args, mdir, mdirs, f):
for c in map(chr, data[i:i+16]))))
def main(args):
superblock = None
gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0'
mdirs = c.OrderedDict()
corrupted = []
cycle = False
with open(args.disk, 'rb') as f:
dirs = []
superblock = None
gstate = b''
mdirs = []
cycle = False
tail = (args.block1, args.block2)
while tail:
if frozenset(tail) in mdirs:
# cycle detected
cycle = tail
hard = False
while True:
for m in it.chain((m for d in dirs for m in d), mdirs):
if set(m.blocks) == set(tail):
# cycle detected
cycle = m.blocks
if cycle:
break
# load mdir
@@ -128,13 +128,6 @@ def main(args):
except KeyError:
mdir.tail = None
try:
mdir.branch = mdir[Tag('branch', 0, 0)]
if mdir.branch.size != 8 or mdir.branch.data == 8*b'\xff':
mdir.branch = None
except KeyError:
mdir.branch = None
# have superblock?
try:
nsuperblock = mdir[
@@ -151,59 +144,41 @@ def main(args):
except KeyError:
pass
# corrupted?
if not mdir:
corrupted.append(mdir)
# add to directories
mdirs.append(mdir)
if mdir.tail is None or not mdir.tail.is_('hardtail'):
dirs.append(mdirs)
mdirs = []
# add to metadata-pairs
mdirs[frozenset(mdir.blocks)] = mdir
tail = (struct.unpack('<II', mdir.tail.data)
if mdir.tail else None)
if mdir.tail is None:
break
# derive paths and build directories
dirs = {}
rogue = {}
pending = [('/', (args.block1, args.block2))]
tail = struct.unpack('<II', mdir.tail.data)
hard = mdir.tail.is_('hardtail')
# find paths
dirtable = {}
for dir in dirs:
dirtable[frozenset(dir[0].blocks)] = dir
pending = [("/", dirs[0])]
while pending:
path, branch = pending.pop(0)
dir = []
while branch and frozenset(branch) in mdirs:
mdir = mdirs[frozenset(branch)]
dir.append(mdir)
path, dir = pending.pop(0)
for mdir in dir:
for tag in mdir.tags:
if tag.is_('dir'):
try:
npath = path + '/' + tag.data.decode('utf8')
npath = npath.replace('//', '/')
npath = tag.data.decode('utf8')
dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
npair = struct.unpack('<II', dirstruct.data)
pending.append((npath, npair))
nblocks = struct.unpack('<II', dirstruct.data)
nmdir = dirtable[frozenset(nblocks)]
pending.append(((path + '/' + npath), nmdir))
except KeyError:
pass
branch = (struct.unpack('<II', mdir.branch.data)
if mdir.branch else None)
dir[0].path = path.replace('//', '/')
if not dir:
rogue[path] = branch
else:
dirs[path] = dir
# also find orphans
not_orphans = {frozenset(mdir.blocks)
for dir in dirs.values()
for mdir in dir}
orphans = []
for pair, mdir in mdirs.items():
if pair not in not_orphans:
if len(orphans) > 0 and (pair == frozenset(
struct.unpack('<II', orphans[-1][-1].tail.data))):
orphans[-1].append(mdir)
else:
orphans.append([mdir])
# print littlefs + version info
# dump tree
version = ('?', '?')
if superblock:
version = tuple(reversed(
@@ -212,33 +187,24 @@ def main(args):
"data (truncated, if it fits)"
if not any([args.no_truncate, args.tags, args.log, args.all]) else ""))
# print gstate
badgstate = None
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
if tag.size or not tag.isvalid:
print(" orphans >=%d" % max(tag.size, 1))
if tag.type:
if frozenset(blocks) not in mdirs:
badgstate = gstate
print(" move dir {%#x, %#x}%s id %d" % (
blocks[0], blocks[1],
'?' if frozenset(blocks) not in mdirs else '',
tag.id))
if gstate:
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
if tag.size or not tag.isvalid:
print(" orphans >=%d" % max(tag.size, 1))
if tag.type:
print(" move dir {%#x, %#x} id %d" % (
blocks[0], blocks[1], tag.id))
# print dir info
for path, dir in it.chain(
sorted(dirs.items()),
zip(it.repeat(None), orphans)):
print("dir %s" % json.dumps(path) if path else "orphaned")
for i, dir in enumerate(dirs):
print("dir %s" % (json.dumps(dir[0].path)
if hasattr(dir[0], 'path') else '(orphan)'))
for j, mdir in enumerate(dir):
print("mdir {%#x, %#x} rev %d (was %d)%s%s" % (
mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev,
' (corrupted!)' if not mdir else '',
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
if mdir.tail else ''))
print("mdir {%#x, %#x} rev %d%s" % (
mdir.blocks[0], mdir.blocks[1], mdir.rev,
' (corrupted)' if not mdir else ''))
f = io.StringIO()
if args.tags:
@@ -248,38 +214,26 @@ def main(args):
elif args.all:
mdir.dump_all(f, truncate=not args.no_truncate)
else:
dumpentries(args, mdir, mdirs, f)
dumpentries(args, mdir, f)
lines = list(filter(None, f.getvalue().split('\n')))
for k, line in enumerate(lines):
print("%s %s" % (
' ' if j == len(dir)-1 else
' ' if i == len(dirs)-1 and j == len(dir)-1 else
'v' if k == len(lines)-1 else
'|' if path else '.',
'.' if j == len(dir)-1 else
'|',
line))
errcode = 0
for mdir in corrupted:
errcode = errcode or 1
print("*** corrupted mdir {%#x, %#x}! ***" % (
mdir.blocks[0], mdir.blocks[1]))
for path, pair in rogue.items():
errcode = errcode or 2
print("*** couldn't find dir %s {%#x, %#x}! ***" % (
json.dumps(path), pair[0], pair[1]))
if badgstate:
errcode = errcode or 3
print("*** bad gstate 0x%s! ***" %
''.join('%02x' % c for c in gstate))
if cycle:
print("*** cycle detected! -> {%#x, %#x} ***" % (cycle[0], cycle[1]))
if cycle:
errcode = errcode or 4
print("*** cycle detected {%#x, %#x}! ***" % (
cycle[0], cycle[1]))
return errcode
return 2
elif not all(mdir for dir in dirs for mdir in dir):
return 1
else:
return 0;
if __name__ == "__main__":
import argparse

View File

@@ -231,7 +231,7 @@ class TestCase:
ncmd.extend(['-ex', 'r'])
if failure.assert_:
ncmd.extend(['-ex', 'up 2'])
elif gdb == 'start' or isinstance(gdb, int):
elif gdb == 'start':
ncmd.extend([
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
'-ex', 'r'])
@@ -329,9 +329,7 @@ class ReentrantTestCase(TestCase):
persist = 'noerase'
# exact cycle we should drop into debugger?
if gdb and failure and (
failure.cycleno == cycles or
(isinstance(gdb, int) and gdb == cycles)):
if gdb and failure and failure.cycleno == cycles:
return super().test(gdb=gdb, persist=persist, cycles=cycles,
failure=failure, **args)
@@ -762,8 +760,7 @@ if __name__ == "__main__":
help="Store disk image in a file.")
parser.add_argument('-b', '--build', action='store_true',
help="Only build the tests, do not execute.")
parser.add_argument('-g', '--gdb', metavar='{init,start,assert},CYCLE',
type=lambda n: n if n in {'init', 'start', 'assert'} else int(n, 0),
parser.add_argument('-g', '--gdb', choices=['init', 'start', 'assert'],
nargs='?', const='assert',
help="Drop into gdb on test failure.")
parser.add_argument('--no-internal', action='store_true',

View File

@@ -246,8 +246,6 @@ code = '''
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
// TODO rm me
lfs_mkdir(&lfs, "a") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
@@ -258,9 +256,6 @@ code = '''
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "a") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "file%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;

View File

@@ -151,6 +151,7 @@ code = '''
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz)
=> LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
lfs_ctz_fromle32(&ctz);
// rewrite block to contain bad pointer
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
@@ -245,6 +246,7 @@ code = '''
LFS_MKTAG(0x7ff, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
lfs_pair_fromle32(pair);
// change tail-pointer to point to root
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
@@ -274,6 +276,7 @@ code = '''
LFS_MKTAG(0x7ff, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
lfs_pair_fromle32(pair);
// change tail-pointer to point to ourself
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(

85
tests/test_new.toml Normal file
View File

@@ -0,0 +1,85 @@
#open(1, "5file5.xxxxxxxxxxxx", 0x503) -> 0
# write(1, , 2007)[^ 1499 us] -> 2007
# write(1, , 2007)[^ 1411 us] -> 2007
# write(1, , 2007)[^ 1390 us] -> 2007
# write(1, , 2007)[^ 1401 us] -> 2007
# close(1) -> 0
# open(1, "1file1.xxxx", 0x503) -> 0
# mount
# open(0, "5file5.xxxxxxxxxxxx", 0x3) -> 0
# open(1, "5file5.xxxxxxxxxxxx", 0x503) -> 0
# close(1) -> 0
# open(1, "1file1.xxxx", 0x2) -> 0
# write(0, , 63) -> 63
#a.out: lfs.c:2169: lfs_ctz_find: Assertion `head >= 2 && head <= lfs->cfg->block_count' failed.
# close(0)Aborted
[[case]]
define.FILESIZE5 = '4*CHUNKSIZE5'
define.FILESIZE1 = '4*CHUNKSIZE1'
define.CHUNKSIZE5 = 2007
define.CHUNKSIZE1 = 63
code = '''
lfs_file_t files[2];
uint8_t chunk5[CHUNKSIZE5];
memset(chunk5, 'a', CHUNKSIZE5);
uint8_t chunk1[CHUNKSIZE1];
memset(chunk1, 'b', CHUNKSIZE1);
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &files[1], "5file5.xxxxxxxxxxxx",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
lfs_file_write(&lfs, &files[1], chunk5, CHUNKSIZE5) => CHUNKSIZE5;
}
lfs_file_close(&lfs, &files[1]) => 0;
lfs_file_open(&lfs, &files[1], "1file1.xxxx",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
// these should not change the result
// lfs_file_close(&lfs, &files[1]) => 0;
// lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &files[0], "5file5.xxxxxxxxxxxx",
LFS_O_RDWR) => 0;
lfs_file_open(&lfs, &files[1], "5file5.xxxxxxxxxxxx",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
lfs_file_close(&lfs, &files[1]) => 0;
lfs_file_open(&lfs, &files[1], "1file1.xxxx",
LFS_O_WRONLY) => 0;
for (int i = 0; i < FILESIZE1/CHUNKSIZE1; i++) {
lfs_file_write(&lfs, &files[1], chunk1, CHUNKSIZE1) => CHUNKSIZE1;
}
lfs_file_close(&lfs, &files[1]) => 0;
memset(chunk5, 'c', CHUNKSIZE5);
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
lfs_file_write(&lfs, &files[0], chunk5, CHUNKSIZE5) => CHUNKSIZE5;
}
lfs_file_close(&lfs, &files[0]) => 0;
lfs_unmount(&lfs) => 0;
// check results
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &files[0], "5file5.xxxxxxxxxxxx",
LFS_O_RDONLY) => 0;
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
uint8_t rchunk[CHUNKSIZE5];
lfs_file_read(&lfs, &files[0], rchunk, CHUNKSIZE5) => CHUNKSIZE5;
assert(memcmp(rchunk, chunk5, CHUNKSIZE5) == 0);
}
lfs_file_close(&lfs, &files[0]) => 0;
lfs_file_open(&lfs, &files[0], "1file1.xxxx",
LFS_O_RDONLY) => 0;
for (int i = 0; i < FILESIZE1/CHUNKSIZE1; i++) {
uint8_t rchunk[CHUNKSIZE1];
lfs_file_read(&lfs, &files[0], rchunk, CHUNKSIZE1) => CHUNKSIZE1;
assert(memcmp(rchunk, chunk1, CHUNKSIZE1) == 0);
}
lfs_file_close(&lfs, &files[0]) => 0;
lfs_unmount(&lfs) => 0;
'''

View File

@@ -148,7 +148,7 @@ code = '''
# almost every tree operation needs a relocation
reentrant = true
# TODO fix this case, caused by non-DAG trees
#if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
@@ -210,7 +210,7 @@ code = '''
[[case]] # reentrant testing for relocations, but now with random renames!
reentrant = true
# TODO fix this case, caused by non-DAG trees
#if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},