Compare commits

..

2 Commits

Author SHA1 Message Date
Christopher Haster
c1c0386bda Added test_new.toml with failure found by AFL
Found by pjsg
2020-03-26 15:27:30 -05:00
Christopher Haster
4677421aba Added "evil" tests and detecion/recovery from bad pointers and infinite loops
These two features have been much requested by users, and have even had
several PRs proposed to fix these in several cases. Before this, these
error conditions usually were caught by internal asserts, however
asserts prevented users from implementing their own workarounds.

It's taken me a while to provide/accept a useful recovery mechanism
(returning LFS_ERR_CORRUPT instead of asserting) because my original thinking
was that these error conditions only occur due to bugs in the filesystem, and
these bugs should be fixed properly.

While I still think this is mostly true, the point has been made clear
that being able to recover from these conditions is definitely worth the
code cost. Hopefully this new behaviour helps the longevity of devices
even if the storage code fails.

Another, less important, reason I didn't want to accept fixes for these
situations was the lack of tests that prove the code's value. This has
been fixed with the new testing framework thanks to the additional of
"internal tests" which can call C static functions and really take
advantage of the internal information of the filesystem.
2020-03-20 09:26:07 -05:00
9 changed files with 341 additions and 1125 deletions

1163
lfs.c

File diff suppressed because it is too large Load Diff

10
lfs.h
View File

@@ -111,14 +111,12 @@ enum lfs_type {
LFS_TYPE_INLINESTRUCT = 0x201, LFS_TYPE_INLINESTRUCT = 0x201,
LFS_TYPE_SOFTTAIL = 0x600, LFS_TYPE_SOFTTAIL = 0x600,
LFS_TYPE_HARDTAIL = 0x601, LFS_TYPE_HARDTAIL = 0x601,
LFS_TYPE_BRANCH = 0x681,
LFS_TYPE_MOVESTATE = 0x7ff, LFS_TYPE_MOVESTATE = 0x7ff,
// internal chip sources // internal chip sources
LFS_FROM_NOOP = 0x000, LFS_FROM_NOOP = 0x000,
LFS_FROM_MOVE = 0x101, LFS_FROM_MOVE = 0x101,
LFS_FROM_DROP = 0x102, LFS_FROM_USERATTRS = 0x102,
LFS_FROM_USERATTRS = 0x103,
}; };
// File open flags // File open flags
@@ -312,11 +310,8 @@ typedef struct lfs_mdir {
uint32_t etag; uint32_t etag;
uint16_t count; uint16_t count;
bool erased; bool erased;
bool first; // TODO come on
bool split; bool split;
bool mustrelocate; // TODO not great either
lfs_block_t tail[2]; lfs_block_t tail[2];
lfs_block_t branch[2];
} lfs_mdir_t; } lfs_mdir_t;
// littlefs directory type // littlefs directory type
@@ -371,9 +366,6 @@ typedef struct lfs {
lfs_cache_t pcache; lfs_cache_t pcache;
lfs_block_t root[2]; lfs_block_t root[2];
lfs_block_t relocate_tail[2];
lfs_block_t relocate_end[2];
bool relocate_do_hack; // TODO fixme
struct lfs_mlist { struct lfs_mlist {
struct lfs_mlist *next; struct lfs_mlist *next;
uint16_t id; uint16_t id;

View File

@@ -18,10 +18,9 @@ TAG_TYPES = {
'ctzstruct': (0x7ff, 0x202), 'ctzstruct': (0x7ff, 0x202),
'inlinestruct': (0x7ff, 0x201), 'inlinestruct': (0x7ff, 0x201),
'userattr': (0x700, 0x300), 'userattr': (0x700, 0x300),
'tail': (0x700, 0x600), # TODO rename these? 'tail': (0x700, 0x600),
'softtail': (0x7ff, 0x600), 'softtail': (0x7ff, 0x600),
'hardtail': (0x7ff, 0x601), 'hardtail': (0x7ff, 0x601),
'branch': (0x7ff, 0x681),
'gstate': (0x700, 0x700), 'gstate': (0x700, 0x700),
'movestate': (0x7ff, 0x7ff), 'movestate': (0x7ff, 0x7ff),
'crc': (0x700, 0x500), 'crc': (0x700, 0x500),
@@ -104,7 +103,7 @@ class Tag:
def mkmask(self): def mkmask(self):
return Tag( return Tag(
0x780 if self.is_('tail') else 0x700 if self.isunique else 0x7ff, # TODO best way? 0x700 if self.isunique else 0x7ff,
0x3ff if self.isattr else 0, 0x3ff if self.isattr else 0,
0) 0)
@@ -234,8 +233,8 @@ class MetadataPair:
def __lt__(self, other): def __lt__(self, other):
# corrupt blocks don't count # corrupt blocks don't count
if not self or not other: if not self and other:
return bool(other) return True
# use sequence arithmetic to avoid overflow # use sequence arithmetic to avoid overflow
return not ((other.rev - self.rev) & 0x80000000) return not ((other.rev - self.rev) & 0x80000000)
@@ -326,7 +325,7 @@ def main(args):
mdir.rev, mdir.rev,
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:]) ' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
if len(mdir.pair) > 1 else '', if len(mdir.pair) > 1 else '',
' (corrupted!)' if not mdir else '')) ' (corrupted)' if not mdir else ''))
if args.all: if args.all:
mdir.dump_all(truncate=not args.no_truncate) mdir.dump_all(truncate=not args.no_truncate)
elif args.log: elif args.log:

View File

@@ -5,7 +5,6 @@ import sys
import json import json
import io import io
import itertools as it import itertools as it
import collections as c
from readmdir import Tag, MetadataPair from readmdir import Tag, MetadataPair
def popc(x): def popc(x):
@@ -14,7 +13,7 @@ def popc(x):
def ctz(x): def ctz(x):
return len(bin(x)) - len(bin(x).rstrip('0')) return len(bin(x)) - len(bin(x).rstrip('0'))
def dumpentries(args, mdir, mdirs, f): def dumpentries(args, mdir, f):
for k, id_ in enumerate(mdir.ids): for k, id_ in enumerate(mdir.ids):
name = mdir[Tag('name', id_, 0)] name = mdir[Tag('name', id_, 0)]
struct_ = mdir[Tag('struct', id_, 0)] struct_ = mdir[Tag('struct', id_, 0)]
@@ -23,10 +22,8 @@ def dumpentries(args, mdir, mdirs, f):
id_, name.typerepr(), id_, name.typerepr(),
json.dumps(name.data.decode('utf8'))) json.dumps(name.data.decode('utf8')))
if struct_.is_('dirstruct'): if struct_.is_('dirstruct'):
pair = struct.unpack('<II', struct_.data[:8].ljust(8, b'\xff')) desc += " dir {%#x, %#x}" % struct.unpack(
desc += " dir {%#x, %#x}%s" % ( '<II', struct_.data[:8].ljust(8, b'\xff'))
pair[0], pair[1],
'?' if frozenset(pair) not in mdirs else '')
if struct_.is_('ctzstruct'): if struct_.is_('ctzstruct'):
desc += " ctz {%#x} size %d" % struct.unpack( desc += " ctz {%#x} size %d" % struct.unpack(
'<II', struct_.data[:8].ljust(8, b'\xff')) '<II', struct_.data[:8].ljust(8, b'\xff'))
@@ -95,17 +92,20 @@ def dumpentries(args, mdir, mdirs, f):
for c in map(chr, data[i:i+16])))) for c in map(chr, data[i:i+16]))))
def main(args): def main(args):
superblock = None
gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0'
mdirs = c.OrderedDict()
corrupted = []
cycle = False
with open(args.disk, 'rb') as f: with open(args.disk, 'rb') as f:
dirs = []
superblock = None
gstate = b''
mdirs = []
cycle = False
tail = (args.block1, args.block2) tail = (args.block1, args.block2)
while tail: hard = False
if frozenset(tail) in mdirs: while True:
# cycle detected for m in it.chain((m for d in dirs for m in d), mdirs):
cycle = tail if set(m.blocks) == set(tail):
# cycle detected
cycle = m.blocks
if cycle:
break break
# load mdir # load mdir
@@ -128,13 +128,6 @@ def main(args):
except KeyError: except KeyError:
mdir.tail = None mdir.tail = None
try:
mdir.branch = mdir[Tag('branch', 0, 0)]
if mdir.branch.size != 8 or mdir.branch.data == 8*b'\xff':
mdir.branch = None
except KeyError:
mdir.branch = None
# have superblock? # have superblock?
try: try:
nsuperblock = mdir[ nsuperblock = mdir[
@@ -151,59 +144,41 @@ def main(args):
except KeyError: except KeyError:
pass pass
# corrupted? # add to directories
if not mdir: mdirs.append(mdir)
corrupted.append(mdir) if mdir.tail is None or not mdir.tail.is_('hardtail'):
dirs.append(mdirs)
mdirs = []
# add to metadata-pairs if mdir.tail is None:
mdirs[frozenset(mdir.blocks)] = mdir break
tail = (struct.unpack('<II', mdir.tail.data)
if mdir.tail else None)
# derive paths and build directories tail = struct.unpack('<II', mdir.tail.data)
dirs = {} hard = mdir.tail.is_('hardtail')
rogue = {}
pending = [('/', (args.block1, args.block2))] # find paths
dirtable = {}
for dir in dirs:
dirtable[frozenset(dir[0].blocks)] = dir
pending = [("/", dirs[0])]
while pending: while pending:
path, branch = pending.pop(0) path, dir = pending.pop(0)
dir = [] for mdir in dir:
while branch and frozenset(branch) in mdirs:
mdir = mdirs[frozenset(branch)]
dir.append(mdir)
for tag in mdir.tags: for tag in mdir.tags:
if tag.is_('dir'): if tag.is_('dir'):
try: try:
npath = path + '/' + tag.data.decode('utf8') npath = tag.data.decode('utf8')
npath = npath.replace('//', '/')
dirstruct = mdir[Tag('dirstruct', tag.id, 0)] dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
npair = struct.unpack('<II', dirstruct.data) nblocks = struct.unpack('<II', dirstruct.data)
pending.append((npath, npair)) nmdir = dirtable[frozenset(nblocks)]
pending.append(((path + '/' + npath), nmdir))
except KeyError: except KeyError:
pass pass
branch = (struct.unpack('<II', mdir.branch.data) dir[0].path = path.replace('//', '/')
if mdir.branch else None)
if not dir: # dump tree
rogue[path] = branch
else:
dirs[path] = dir
# also find orphans
not_orphans = {frozenset(mdir.blocks)
for dir in dirs.values()
for mdir in dir}
orphans = []
for pair, mdir in mdirs.items():
if pair not in not_orphans:
if len(orphans) > 0 and (pair == frozenset(
struct.unpack('<II', orphans[-1][-1].tail.data))):
orphans[-1].append(mdir)
else:
orphans.append([mdir])
# print littlefs + version info
version = ('?', '?') version = ('?', '?')
if superblock: if superblock:
version = tuple(reversed( version = tuple(reversed(
@@ -212,33 +187,24 @@ def main(args):
"data (truncated, if it fits)" "data (truncated, if it fits)"
if not any([args.no_truncate, args.tags, args.log, args.all]) else "")) if not any([args.no_truncate, args.tags, args.log, args.all]) else ""))
# print gstate if gstate:
badgstate = None print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate)) tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0]) blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff')) if tag.size or not tag.isvalid:
if tag.size or not tag.isvalid: print(" orphans >=%d" % max(tag.size, 1))
print(" orphans >=%d" % max(tag.size, 1)) if tag.type:
if tag.type: print(" move dir {%#x, %#x} id %d" % (
if frozenset(blocks) not in mdirs: blocks[0], blocks[1], tag.id))
badgstate = gstate
print(" move dir {%#x, %#x}%s id %d" % (
blocks[0], blocks[1],
'?' if frozenset(blocks) not in mdirs else '',
tag.id))
# print dir info for i, dir in enumerate(dirs):
for path, dir in it.chain( print("dir %s" % (json.dumps(dir[0].path)
sorted(dirs.items()), if hasattr(dir[0], 'path') else '(orphan)'))
zip(it.repeat(None), orphans)):
print("dir %s" % json.dumps(path) if path else "orphaned")
for j, mdir in enumerate(dir): for j, mdir in enumerate(dir):
print("mdir {%#x, %#x} rev %d (was %d)%s%s" % ( print("mdir {%#x, %#x} rev %d%s" % (
mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev, mdir.blocks[0], mdir.blocks[1], mdir.rev,
' (corrupted!)' if not mdir else '', ' (corrupted)' if not mdir else ''))
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
if mdir.tail else ''))
f = io.StringIO() f = io.StringIO()
if args.tags: if args.tags:
@@ -248,38 +214,26 @@ def main(args):
elif args.all: elif args.all:
mdir.dump_all(f, truncate=not args.no_truncate) mdir.dump_all(f, truncate=not args.no_truncate)
else: else:
dumpentries(args, mdir, mdirs, f) dumpentries(args, mdir, f)
lines = list(filter(None, f.getvalue().split('\n'))) lines = list(filter(None, f.getvalue().split('\n')))
for k, line in enumerate(lines): for k, line in enumerate(lines):
print("%s %s" % ( print("%s %s" % (
' ' if j == len(dir)-1 else ' ' if i == len(dirs)-1 and j == len(dir)-1 else
'v' if k == len(lines)-1 else 'v' if k == len(lines)-1 else
'|' if path else '.', '.' if j == len(dir)-1 else
'|',
line)) line))
errcode = 0 if cycle:
for mdir in corrupted: print("*** cycle detected! -> {%#x, %#x} ***" % (cycle[0], cycle[1]))
errcode = errcode or 1
print("*** corrupted mdir {%#x, %#x}! ***" % (
mdir.blocks[0], mdir.blocks[1]))
for path, pair in rogue.items():
errcode = errcode or 2
print("*** couldn't find dir %s {%#x, %#x}! ***" % (
json.dumps(path), pair[0], pair[1]))
if badgstate:
errcode = errcode or 3
print("*** bad gstate 0x%s! ***" %
''.join('%02x' % c for c in gstate))
if cycle: if cycle:
errcode = errcode or 4 return 2
print("*** cycle detected {%#x, %#x}! ***" % ( elif not all(mdir for dir in dirs for mdir in dir):
cycle[0], cycle[1])) return 1
else:
return errcode return 0;
if __name__ == "__main__": if __name__ == "__main__":
import argparse import argparse

View File

@@ -231,7 +231,7 @@ class TestCase:
ncmd.extend(['-ex', 'r']) ncmd.extend(['-ex', 'r'])
if failure.assert_: if failure.assert_:
ncmd.extend(['-ex', 'up 2']) ncmd.extend(['-ex', 'up 2'])
elif gdb == 'start' or isinstance(gdb, int): elif gdb == 'start':
ncmd.extend([ ncmd.extend([
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno), '-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
'-ex', 'r']) '-ex', 'r'])
@@ -329,9 +329,7 @@ class ReentrantTestCase(TestCase):
persist = 'noerase' persist = 'noerase'
# exact cycle we should drop into debugger? # exact cycle we should drop into debugger?
if gdb and failure and ( if gdb and failure and failure.cycleno == cycles:
failure.cycleno == cycles or
(isinstance(gdb, int) and gdb == cycles)):
return super().test(gdb=gdb, persist=persist, cycles=cycles, return super().test(gdb=gdb, persist=persist, cycles=cycles,
failure=failure, **args) failure=failure, **args)
@@ -762,8 +760,7 @@ if __name__ == "__main__":
help="Store disk image in a file.") help="Store disk image in a file.")
parser.add_argument('-b', '--build', action='store_true', parser.add_argument('-b', '--build', action='store_true',
help="Only build the tests, do not execute.") help="Only build the tests, do not execute.")
parser.add_argument('-g', '--gdb', metavar='{init,start,assert},CYCLE', parser.add_argument('-g', '--gdb', choices=['init', 'start', 'assert'],
type=lambda n: n if n in {'init', 'start', 'assert'} else int(n, 0),
nargs='?', const='assert', nargs='?', const='assert',
help="Drop into gdb on test failure.") help="Drop into gdb on test failure.")
parser.add_argument('--no-internal', action='store_true', parser.add_argument('--no-internal', action='store_true',

View File

@@ -246,8 +246,6 @@ code = '''
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0; lfs_file_close(&lfs, &file) => 0;
} }
// TODO rm me
lfs_mkdir(&lfs, "a") => 0;
lfs_unmount(&lfs) => 0; lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
@@ -258,9 +256,6 @@ code = '''
lfs_dir_read(&lfs, &dir, &info) => 1; lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR); assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0); assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "a") == 0);
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
sprintf(path, "file%03d", i); sprintf(path, "file%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1; lfs_dir_read(&lfs, &dir, &info) => 1;

View File

@@ -151,6 +151,7 @@ code = '''
LFS_MKTAG(0x700, 0x3ff, 0), LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz) LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz)
=> LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz)); => LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
lfs_ctz_fromle32(&ctz);
// rewrite block to contain bad pointer // rewrite block to contain bad pointer
uint8_t bbuffer[LFS_BLOCK_SIZE]; uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0; cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
@@ -245,6 +246,7 @@ code = '''
LFS_MKTAG(0x7ff, 0x3ff, 0), LFS_MKTAG(0x7ff, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair) LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)); => LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
lfs_pair_fromle32(pair);
// change tail-pointer to point to root // change tail-pointer to point to root
lfs_dir_fetch(&lfs, &mdir, pair) => 0; lfs_dir_fetch(&lfs, &mdir, pair) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS( lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
@@ -274,6 +276,7 @@ code = '''
LFS_MKTAG(0x7ff, 0x3ff, 0), LFS_MKTAG(0x7ff, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair) LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)); => LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
lfs_pair_fromle32(pair);
// change tail-pointer to point to ourself // change tail-pointer to point to ourself
lfs_dir_fetch(&lfs, &mdir, pair) => 0; lfs_dir_fetch(&lfs, &mdir, pair) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS( lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(

85
tests/test_new.toml Normal file
View File

@@ -0,0 +1,85 @@
#open(1, "5file5.xxxxxxxxxxxx", 0x503) -> 0
# write(1, , 2007)[^ 1499 us] -> 2007
# write(1, , 2007)[^ 1411 us] -> 2007
# write(1, , 2007)[^ 1390 us] -> 2007
# write(1, , 2007)[^ 1401 us] -> 2007
# close(1) -> 0
# open(1, "1file1.xxxx", 0x503) -> 0
# mount
# open(0, "5file5.xxxxxxxxxxxx", 0x3) -> 0
# open(1, "5file5.xxxxxxxxxxxx", 0x503) -> 0
# close(1) -> 0
# open(1, "1file1.xxxx", 0x2) -> 0
# write(0, , 63) -> 63
#a.out: lfs.c:2169: lfs_ctz_find: Assertion `head >= 2 && head <= lfs->cfg->block_count' failed.
# close(0)Aborted
[[case]]
define.FILESIZE5 = '4*CHUNKSIZE5'
define.FILESIZE1 = '4*CHUNKSIZE1'
define.CHUNKSIZE5 = 2007
define.CHUNKSIZE1 = 63
code = '''
lfs_file_t files[2];
uint8_t chunk5[CHUNKSIZE5];
memset(chunk5, 'a', CHUNKSIZE5);
uint8_t chunk1[CHUNKSIZE1];
memset(chunk1, 'b', CHUNKSIZE1);
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &files[1], "5file5.xxxxxxxxxxxx",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
lfs_file_write(&lfs, &files[1], chunk5, CHUNKSIZE5) => CHUNKSIZE5;
}
lfs_file_close(&lfs, &files[1]) => 0;
lfs_file_open(&lfs, &files[1], "1file1.xxxx",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
// these should not change the result
// lfs_file_close(&lfs, &files[1]) => 0;
// lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &files[0], "5file5.xxxxxxxxxxxx",
LFS_O_RDWR) => 0;
lfs_file_open(&lfs, &files[1], "5file5.xxxxxxxxxxxx",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
lfs_file_close(&lfs, &files[1]) => 0;
lfs_file_open(&lfs, &files[1], "1file1.xxxx",
LFS_O_WRONLY) => 0;
for (int i = 0; i < FILESIZE1/CHUNKSIZE1; i++) {
lfs_file_write(&lfs, &files[1], chunk1, CHUNKSIZE1) => CHUNKSIZE1;
}
lfs_file_close(&lfs, &files[1]) => 0;
memset(chunk5, 'c', CHUNKSIZE5);
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
lfs_file_write(&lfs, &files[0], chunk5, CHUNKSIZE5) => CHUNKSIZE5;
}
lfs_file_close(&lfs, &files[0]) => 0;
lfs_unmount(&lfs) => 0;
// check results
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &files[0], "5file5.xxxxxxxxxxxx",
LFS_O_RDONLY) => 0;
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
uint8_t rchunk[CHUNKSIZE5];
lfs_file_read(&lfs, &files[0], rchunk, CHUNKSIZE5) => CHUNKSIZE5;
assert(memcmp(rchunk, chunk5, CHUNKSIZE5) == 0);
}
lfs_file_close(&lfs, &files[0]) => 0;
lfs_file_open(&lfs, &files[0], "1file1.xxxx",
LFS_O_RDONLY) => 0;
for (int i = 0; i < FILESIZE1/CHUNKSIZE1; i++) {
uint8_t rchunk[CHUNKSIZE1];
lfs_file_read(&lfs, &files[0], rchunk, CHUNKSIZE1) => CHUNKSIZE1;
assert(memcmp(rchunk, chunk1, CHUNKSIZE1) == 0);
}
lfs_file_close(&lfs, &files[0]) => 0;
lfs_unmount(&lfs) => 0;
'''

View File

@@ -148,7 +148,7 @@ code = '''
# almost every tree operation needs a relocation # almost every tree operation needs a relocation
reentrant = true reentrant = true
# TODO fix this case, caused by non-DAG trees # TODO fix this case, caused by non-DAG trees
#if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [ define = [
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, {FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
@@ -210,7 +210,7 @@ code = '''
[[case]] # reentrant testing for relocations, but now with random renames! [[case]] # reentrant testing for relocations, but now with random renames!
reentrant = true reentrant = true
# TODO fix this case, caused by non-DAG trees # TODO fix this case, caused by non-DAG trees
#if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [ define = [
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, {FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},