mirror of
https://github.com/eledio-devices/thirdparty-littlefs.git
synced 2025-11-01 00:38:29 +01:00
Also fixed a bug in dir splitting when there's a large number of open
files, which was the main reason I was trying to make it easier to debug
disk images.
One part of the recent test changes was to move away from the
file-per-block emubd and instead simulate storage with a single
contiguous file. The file-per-block format was marginally useful
at the beginning, but as the remaining bugs get more subtle, it
becomes more useful to inspect littlefs through scripts that
make the underlying metadata more human-readable.
The key benefit of switching to a contiguous file is these same
scripts can be reused for real disk images and can even read through
/dev/sdb or similar.
- ./scripts/readblock.py disk block_size block
off data
00000000: 71 01 00 00 f0 0f ff f7 6c 69 74 74 6c 65 66 73 q.......littlefs
00000010: 2f e0 00 10 00 00 02 00 00 02 00 00 00 04 00 00 /...............
00000020: ff 00 00 00 ff ff ff 7f fe 03 00 00 20 00 04 19 ...............
00000030: 61 00 00 0c 00 62 20 30 0c 09 a0 01 00 00 64 00 a....b 0......d.
...
readblock.py prints a hex dump of a given block on disk. It's basically
just "dd if=disk bs=block_size count=1 skip=block | xxd -g1 -" but with
less typing.
- ./scripts/readmdir.py disk block_size block1 block2
off tag type id len data (truncated)
0000003b: 0020000a dir 0 10 63 6f 6c 64 63 6f 66 66 coldcoff
00000049: 20000008 dirstruct 0 8 02 02 00 00 03 02 00 00 ........
00000008: 00200409 dir 1 9 68 6f 74 63 6f 66 66 65 hotcoffe
00000015: 20000408 dirstruct 1 8 fe 01 00 00 ff 01 00 00 ........
readmdir.py prints info about the tags in a metadata pair on disk. It
can print the currently active tags as well as the raw log of the
metadata pair.
- ./scripts/readtree.py disk block_size
superblock "littlefs"
version v2.0
block_size 512
block_count 1024
name_max 255
file_max 2147483647
attr_max 1022
gstate 0x000000000000000000000000
dir "/"
mdir {0x0, 0x1} rev 3
v id 0 superblock "littlefs" inline size 24
mdir {0x77, 0x78} rev 1
id 0 dir "coffee" dir {0x1fc, 0x1fd}
dir "/coffee"
mdir {0x1fd, 0x1fc} rev 2
id 0 dir "coldcoffee" dir {0x202, 0x203}
id 1 dir "hotcoffee" dir {0x1fe, 0x1ff}
dir "/coffee/coldcoffee"
mdir {0x202, 0x203} rev 1
dir "/coffee/warmcoffee"
mdir {0x200, 0x201} rev 1
readtree.py parses the littlefs tree and prints info about the
semantics of what's on disk. This includes the superblock,
global-state, and directories/metadata-pairs. It doesn't print
the filesystem tree though, that could be a different tool.
342 lines
11 KiB
TOML
342 lines
11 KiB
TOML
[[case]] # test running a filesystem to exhaustion
|
|
define.LFS_ERASE_CYCLES = 10
|
|
define.LFS_BLOCK_COUNT = 256 # small bd so it runs faster
|
|
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
|
|
define.LFS_BADBLOCK_BEHAVIOR = [
|
|
'LFS_TESTBD_BADBLOCK_NOPROG',
|
|
'LFS_TESTBD_BADBLOCK_NOERASE',
|
|
'LFS_TESTBD_BADBLOCK_NOREAD',
|
|
]
|
|
define.FILES = 10
|
|
code = '''
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
lfs_mkdir(&lfs, "roadrunner") => 0;
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
uint32_t cycle = 0;
|
|
while (true) {
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// chose name, roughly random seed, and random 2^n size
|
|
sprintf(path, "roadrunner/test%d", i);
|
|
srand(cycle * i);
|
|
size = 1 << ((rand() % 10)+2);
|
|
|
|
lfs_file_open(&lfs, &file, path,
|
|
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
|
|
|
for (lfs_size_t j = 0; j < size; j++) {
|
|
char c = 'a' + (rand() % 26);
|
|
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
|
assert(res == 1 || res == LFS_ERR_NOSPC);
|
|
if (res == LFS_ERR_NOSPC) {
|
|
goto exhausted;
|
|
}
|
|
}
|
|
|
|
err = lfs_file_close(&lfs, &file);
|
|
assert(err == 0 || err == LFS_ERR_NOSPC);
|
|
if (err == LFS_ERR_NOSPC) {
|
|
goto exhausted;
|
|
}
|
|
}
|
|
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// check for errors
|
|
sprintf(path, "roadrunner/test%d", i);
|
|
srand(cycle * i);
|
|
size = 1 << ((rand() % 10)+2);
|
|
|
|
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
|
for (lfs_size_t j = 0; j < size; j++) {
|
|
char c = 'a' + (rand() % 26);
|
|
char r;
|
|
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
|
assert(r == c);
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
}
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
cycle += 1;
|
|
}
|
|
|
|
exhausted:
|
|
// should still be readable
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// check for errors
|
|
sprintf(path, "roadrunner/test%d", i);
|
|
lfs_stat(&lfs, path, &info) => 0;
|
|
}
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
LFS_WARN("completed %d cycles", cycle);
|
|
'''
|
|
|
|
[[case]] # test running a filesystem to exhaustion
|
|
# which also requires expanding superblocks
|
|
define.LFS_ERASE_CYCLES = 10
|
|
define.LFS_BLOCK_COUNT = 256 # small bd so it runs faster
|
|
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
|
|
define.LFS_BADBLOCK_BEHAVIOR = [
|
|
'LFS_TESTBD_BADBLOCK_NOPROG',
|
|
'LFS_TESTBD_BADBLOCK_NOERASE',
|
|
'LFS_TESTBD_BADBLOCK_NOREAD',
|
|
]
|
|
define.FILES = 10
|
|
code = '''
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
uint32_t cycle = 0;
|
|
while (true) {
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// chose name, roughly random seed, and random 2^n size
|
|
sprintf(path, "test%d", i);
|
|
srand(cycle * i);
|
|
size = 1 << ((rand() % 10)+2);
|
|
|
|
lfs_file_open(&lfs, &file, path,
|
|
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
|
|
|
for (lfs_size_t j = 0; j < size; j++) {
|
|
char c = 'a' + (rand() % 26);
|
|
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
|
assert(res == 1 || res == LFS_ERR_NOSPC);
|
|
if (res == LFS_ERR_NOSPC) {
|
|
goto exhausted;
|
|
}
|
|
}
|
|
|
|
err = lfs_file_close(&lfs, &file);
|
|
assert(err == 0 || err == LFS_ERR_NOSPC);
|
|
if (err == LFS_ERR_NOSPC) {
|
|
goto exhausted;
|
|
}
|
|
}
|
|
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// check for errors
|
|
sprintf(path, "test%d", i);
|
|
srand(cycle * i);
|
|
size = 1 << ((rand() % 10)+2);
|
|
|
|
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
|
for (lfs_size_t j = 0; j < size; j++) {
|
|
char c = 'a' + (rand() % 26);
|
|
char r;
|
|
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
|
assert(r == c);
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
}
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
cycle += 1;
|
|
}
|
|
|
|
exhausted:
|
|
// should still be readable
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// check for errors
|
|
sprintf(path, "test%d", i);
|
|
lfs_stat(&lfs, path, &info) => 0;
|
|
}
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
LFS_WARN("completed %d cycles", cycle);
|
|
'''
|
|
|
|
# These are a sort of high-level litmus test for wear-leveling. One definition
|
|
# of wear-leveling is that increasing a block device's space translates directly
|
|
# into increasing the block devices lifetime. This is something we can actually
|
|
# check for.
|
|
|
|
[[case]] # wear-level test running a filesystem to exhaustion
|
|
define.LFS_ERASE_CYCLES = 10
|
|
define.LFS_BLOCK_COUNT = 256 # small bd so it runs faster
|
|
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
|
|
define.LFS_BADBLOCK_BEHAVIOR = [
|
|
'LFS_TESTBD_BADBLOCK_NOPROG',
|
|
'LFS_TESTBD_BADBLOCK_NOERASE',
|
|
'LFS_TESTBD_BADBLOCK_NOREAD',
|
|
]
|
|
define.FILES = 10
|
|
code = '''
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
lfs_mkdir(&lfs, "roadrunner") => 0;
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
uint32_t run_cycles[2];
|
|
const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
|
|
|
|
for (int run = 0; run < 2; run++) {
|
|
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
|
|
lfs_testbd_setwear(&cfg, b,
|
|
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
|
|
}
|
|
|
|
uint32_t cycle = 0;
|
|
while (true) {
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// chose name, roughly random seed, and random 2^n size
|
|
sprintf(path, "roadrunner/test%d", i);
|
|
srand(cycle * i);
|
|
size = 1 << ((rand() % 10)+2);
|
|
|
|
lfs_file_open(&lfs, &file, path,
|
|
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
|
|
|
for (lfs_size_t j = 0; j < size; j++) {
|
|
char c = 'a' + (rand() % 26);
|
|
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
|
assert(res == 1 || res == LFS_ERR_NOSPC);
|
|
if (res == LFS_ERR_NOSPC) {
|
|
goto exhausted;
|
|
}
|
|
}
|
|
|
|
err = lfs_file_close(&lfs, &file);
|
|
assert(err == 0 || err == LFS_ERR_NOSPC);
|
|
if (err == LFS_ERR_NOSPC) {
|
|
goto exhausted;
|
|
}
|
|
}
|
|
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// check for errors
|
|
sprintf(path, "roadrunner/test%d", i);
|
|
srand(cycle * i);
|
|
size = 1 << ((rand() % 10)+2);
|
|
|
|
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
|
for (lfs_size_t j = 0; j < size; j++) {
|
|
char c = 'a' + (rand() % 26);
|
|
char r;
|
|
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
|
assert(r == c);
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
}
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
cycle += 1;
|
|
}
|
|
|
|
exhausted:
|
|
// should still be readable
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// check for errors
|
|
sprintf(path, "roadrunner/test%d", i);
|
|
lfs_stat(&lfs, path, &info) => 0;
|
|
}
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
run_cycles[run] = cycle;
|
|
LFS_WARN("completed %d blocks %d cycles",
|
|
run_block_count[run], run_cycles[run]);
|
|
}
|
|
|
|
// check we increased the lifetime by 2x with ~10% error
|
|
LFS_ASSERT(run_cycles[1] > 2*run_cycles[0]-run_cycles[0]/10);
|
|
'''
|
|
|
|
[[case]] # wear-level test + expanding superblock
|
|
define.LFS_ERASE_CYCLES = 10
|
|
define.LFS_BLOCK_COUNT = 256 # small bd so it runs faster
|
|
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
|
|
define.LFS_BADBLOCK_BEHAVIOR = [
|
|
'LFS_TESTBD_BADBLOCK_NOPROG',
|
|
'LFS_TESTBD_BADBLOCK_NOERASE',
|
|
'LFS_TESTBD_BADBLOCK_NOREAD',
|
|
]
|
|
define.FILES = 10
|
|
code = '''
|
|
lfs_format(&lfs, &cfg) => 0;
|
|
|
|
uint32_t run_cycles[2];
|
|
const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
|
|
|
|
for (int run = 0; run < 2; run++) {
|
|
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
|
|
lfs_testbd_setwear(&cfg, b,
|
|
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
|
|
}
|
|
|
|
uint32_t cycle = 0;
|
|
while (true) {
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// chose name, roughly random seed, and random 2^n size
|
|
sprintf(path, "test%d", i);
|
|
srand(cycle * i);
|
|
size = 1 << ((rand() % 10)+2);
|
|
|
|
lfs_file_open(&lfs, &file, path,
|
|
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
|
|
|
for (lfs_size_t j = 0; j < size; j++) {
|
|
char c = 'a' + (rand() % 26);
|
|
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
|
assert(res == 1 || res == LFS_ERR_NOSPC);
|
|
if (res == LFS_ERR_NOSPC) {
|
|
goto exhausted;
|
|
}
|
|
}
|
|
|
|
err = lfs_file_close(&lfs, &file);
|
|
assert(err == 0 || err == LFS_ERR_NOSPC);
|
|
if (err == LFS_ERR_NOSPC) {
|
|
goto exhausted;
|
|
}
|
|
}
|
|
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// check for errors
|
|
sprintf(path, "test%d", i);
|
|
srand(cycle * i);
|
|
size = 1 << ((rand() % 10)+2);
|
|
|
|
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
|
for (lfs_size_t j = 0; j < size; j++) {
|
|
char c = 'a' + (rand() % 26);
|
|
char r;
|
|
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
|
assert(r == c);
|
|
}
|
|
|
|
lfs_file_close(&lfs, &file) => 0;
|
|
}
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
cycle += 1;
|
|
}
|
|
|
|
exhausted:
|
|
// should still be readable
|
|
lfs_mount(&lfs, &cfg) => 0;
|
|
for (uint32_t i = 0; i < FILES; i++) {
|
|
// check for errors
|
|
sprintf(path, "test%d", i);
|
|
lfs_stat(&lfs, path, &info) => 0;
|
|
}
|
|
lfs_unmount(&lfs) => 0;
|
|
|
|
run_cycles[run] = cycle;
|
|
LFS_WARN("completed %d blocks %d cycles",
|
|
run_block_count[run], run_cycles[run]);
|
|
}
|
|
|
|
// check we increased the lifetime by 2x with ~10% error
|
|
LFS_ASSERT(run_cycles[1] > 2*run_cycles[0]-run_cycles[0]/10);
|
|
'''
|