WIP crazy new idea work in progress

passing non-reentrant tests already!
This commit is contained in:
Christopher Haster
2020-03-06 20:14:27 -06:00
parent 3ee291de59
commit eecb06a9dc
5 changed files with 329 additions and 104 deletions

265
lfs.c
View File

@@ -241,6 +241,13 @@ static inline int lfs_pair_cmp(
paira[0] == pairb[1] || paira[1] == pairb[0]); paira[0] == pairb[1] || paira[1] == pairb[0]);
} }
static inline int lfs_pair_realcmp(
const lfs_block_t paira[2],
const lfs_block_t pairb[2]) {
return !((paira[0] == pairb[0] && paira[1] == pairb[1]) ||
(paira[0] == pairb[1] && paira[1] == pairb[0]));
}
static inline bool lfs_pair_sync( static inline bool lfs_pair_sync(
const lfs_block_t paira[2], const lfs_block_t paira[2],
const lfs_block_t pairb[2]) { const lfs_block_t pairb[2]) {
@@ -423,6 +430,7 @@ int lfs_fs_traverseraw(lfs_t *lfs,
int (*cb)(void *data, lfs_block_t block), void *data, int (*cb)(void *data, lfs_block_t block), void *data,
bool includeorphans); bool includeorphans);
static int lfs_fs_forceconsistency(lfs_t *lfs); static int lfs_fs_forceconsistency(lfs_t *lfs);
static int lfs_fs_deorphan(lfs_t *lfs);
static int lfs_deinit(lfs_t *lfs); static int lfs_deinit(lfs_t *lfs);
#ifdef LFS_MIGRATE #ifdef LFS_MIGRATE
static int lfs1_traverse(lfs_t *lfs, static int lfs1_traverse(lfs_t *lfs,
@@ -808,6 +816,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (err == LFS_ERR_CORRUPT) { if (err == LFS_ERR_CORRUPT) {
// can't continue? // can't continue?
dir->erased = false; dir->erased = false;
dir->first = false;
break; break;
} }
return err; return err;
@@ -820,9 +829,11 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (!lfs_tag_isvalid(tag)) { if (!lfs_tag_isvalid(tag)) {
dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC && dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
dir->off % lfs->cfg->prog_size == 0); dir->off % lfs->cfg->prog_size == 0);
dir->first = false;
break; break;
} else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) { } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
dir->erased = false; dir->erased = false;
dir->first = false;
break; break;
} }
@@ -837,6 +848,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (err) { if (err) {
if (err == LFS_ERR_CORRUPT) { if (err == LFS_ERR_CORRUPT) {
dir->erased = false; dir->erased = false;
dir->first = false;
break; break;
} }
return err; return err;
@@ -845,6 +857,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (crc != dcrc) { if (crc != dcrc) {
dir->erased = false; dir->erased = false;
dir->first = false;
break; break;
} }
@@ -880,6 +893,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (err) { if (err) {
if (err == LFS_ERR_CORRUPT) { if (err == LFS_ERR_CORRUPT) {
dir->erased = false; dir->erased = false;
dir->first = false;
break; break;
} }
return err; return err;
@@ -913,6 +927,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (err) { if (err) {
if (err == LFS_ERR_CORRUPT) { if (err == LFS_ERR_CORRUPT) {
dir->erased = false; dir->erased = false;
dir->first = false;
break; break;
} }
} }
@@ -935,6 +950,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (err) { if (err) {
if (err == LFS_ERR_CORRUPT) { if (err == LFS_ERR_CORRUPT) {
dir->erased = false; dir->erased = false;
dir->first = false;
break; break;
} }
} }
@@ -948,6 +964,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
if (res < 0) { if (res < 0) {
if (res == LFS_ERR_CORRUPT) { if (res == LFS_ERR_CORRUPT) {
dir->erased = false; dir->erased = false;
dir->first = false;
break; break;
} }
return res; return res;
@@ -1419,6 +1436,7 @@ static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
dir->branch[0] = LFS_BLOCK_NULL; dir->branch[0] = LFS_BLOCK_NULL;
dir->branch[1] = LFS_BLOCK_NULL; dir->branch[1] = LFS_BLOCK_NULL;
dir->erased = false; dir->erased = false;
dir->first = true;
dir->split = false; dir->split = false;
// don't write out yet, let caller take care of that // don't write out yet, let caller take care of that
@@ -1442,6 +1460,7 @@ static int lfs_dir_droptail(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
// } // }
// steal tail's tail // steal tail's tail
// TODO does this tail update cause problems
lfs_pair_tole32(tail->tail); lfs_pair_tole32(tail->tail);
err = lfs_dir_commit(lfs, dir, LFS_MKATTRS( err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail})); {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
@@ -1863,6 +1882,7 @@ relocate:
#if 1 #if 1
// find parent? // find parent?
// TODO do we need this if parent.tail == oldpair? The answer is no but how to organize
ptag = lfs_fs_parent(lfs, oldpair, &parent, false); ptag = lfs_fs_parent(lfs, oldpair, &parent, false);
if (ptag < 0 && ptag != LFS_ERR_NOENT) { if (ptag < 0 && ptag != LFS_ERR_NOENT) {
return ptag; return ptag;
@@ -1880,17 +1900,56 @@ relocate:
} }
// Have parent? Didn't give up? This is when we need to reinsert // Have parent? Didn't give up? This is when we need to reinsert
// ourself in the threaded linked-list. This evenutally creates an // ourself in the threaded linked-list. This eventually creates an
// orphan, but we can clean that up. We need to reinsert to avoid // orphan, but we can clean that up. We need to reinsert to avoid
// issues with cycles in non-DAG trees. // issues with cycles in non-DAG trees.
// //
// Note if parent's tail == us we can, and must, clean ourselves up // Note if parent's tail == us we can, and must, clean ourselves up
// without an orphan. // without an orphan.
if (ptag != LFS_ERR_NOENT && if (ptag != LFS_ERR_NOENT &&
lfs_pair_cmp(parent.tail, oldpair) != 0) { (lfs_pair_cmp(parent.tail, oldpair) != 0 || !lfs_pair_sync(parent.tail, oldpair))) { // TODO word this so much better
dir->tail[0] = parent.tail[0]; if (lfs_pair_isnull(lfs->relocate_tail)) {
dir->tail[1] = parent.tail[1]; // not relocating yet
printf("tail? %x %x\n", dir->tail[0], dir->tail[1]); dir->tail[0] = parent.tail[0];
dir->tail[1] = parent.tail[1];
} else {
printf("HEEEEEEEEEEEEY\n");
// already relocating, we need to update the last dir in our
// tail of new pairs
lfs_mdir_t relocatedir;
err = lfs_dir_fetch(lfs, &relocatedir, lfs->relocate_end);
if (err) {
return err;
}
LFS_ASSERT(!lfs->relocate_do_hack);
bool oldhack = lfs->relocate_do_hack;
lfs->relocate_do_hack = true;
err = lfs_dir_commit(lfs, &relocatedir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), parent.tail}));
lfs->relocate_do_hack = oldhack;
if (err) {
// TODO do we need to clean up anything?
return err;
}
dir->tail[0] = lfs->relocate_tail[0];
dir->tail[1] = lfs->relocate_tail[1];
}
// // TODO implement this
// if (!lfs_pair_isnull(lfs->relocate_tail)) {
// printf("HEEEEEY\n");
// }
// //LFS_ASSERT(lfs->relocate_len == 0);
// // TODO AH BUT WHAT IF WE'RE RELOCATING ALREADY
// dir->tail[0] = parent.tail[0];
// dir->tail[1] = parent.tail[1];
// printf("tail? %x %x\n", dir->tail[0], dir->tail[1]);
// lfs->relocate_tail[0] = dir->pair[0];
// lfs->relocate_tail[1] = dir->pair[1];
} }
#elif 0 // TODO rm me #elif 0 // TODO rm me
// find parent? // find parent?
@@ -1941,6 +2000,21 @@ relocate:
} }
if (relocated) { if (relocated) {
// TODO hm
if (lfs_pair_isnull(lfs->relocate_tail)) {
// TODO do this before?
lfs->relocate_end[0] = dir->pair[0];
lfs->relocate_end[1] = dir->pair[1];
}
lfs->relocate_tail[0] = dir->pair[0];
lfs->relocate_tail[1] = dir->pair[1];
// if (!dir->first) {
// // TODO is this the best way to force dir updates to be on one block?
// dir->pair[0] = dir->pair[0];
// dir->pair[1] = oldpair[1];
// }
//
// update references if we relocated // update references if we relocated
LFS_DEBUG("Relocating %"PRIx32" %"PRIx32" -> %"PRIx32" %"PRIx32, LFS_DEBUG("Relocating %"PRIx32" %"PRIx32" -> %"PRIx32" %"PRIx32,
oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]); oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
@@ -1948,6 +2022,17 @@ relocate:
if (err) { if (err) {
return err; return err;
} }
// TODO well this is the hackiest thing I've done in a while,
// needed because we may be changed during relocate (of course!)
//
// TODO we should be inserted into mlist
// TODO should mlist be reworked?
printf("refetch {%#x, %#x}\n", dir->pair[0], dir->pair[1]);
err = lfs_dir_fetch(lfs, dir, dir->pair);
if (err) {
return err;
}
} }
return 0; return 0;
@@ -2135,6 +2220,12 @@ compact:
} }
} }
// printf("dir before: {%#x, %#x}, mlist before: ", olddir.pair[0], olddir.pair[1]);
// for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
// printf("{%#x, %#x} ", d->m.pair[0], d->m.pair[1]);
// }
// printf("\n");
// this complicated bit of logic is for fixing up any active // this complicated bit of logic is for fixing up any active
// metadata-pairs that we may have affected // metadata-pairs that we may have affected
// //
@@ -2143,7 +2234,9 @@ compact:
// we need to copy the pair so they don't get clobbered if we refetch // we need to copy the pair so they don't get clobbered if we refetch
// our mdir. // our mdir.
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) { for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
if (&d->m != dir && lfs_pair_cmp(d->m.pair, olddir.pair) == 0) { if (&d->m != dir && (lfs_pair_realcmp(d->m.pair, olddir.pair) == 0 ||
lfs_pair_realcmp(d->m.pair, dir->pair) == 0)) { // TODO hmm, this was updated in lfs_fs_relocate? is this double work? what do we do with olddir??
//printf("hey {%#x, %#x} -> {%#x, %#x}\n", d->m.pair[0], d->m.pair[1], dir->pair[0], dir->pair[1]);
d->m = *dir; d->m = *dir;
for (int i = 0; i < attrcount; i++) { for (int i = 0; i < attrcount; i++) {
if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE && if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
@@ -2168,7 +2261,8 @@ compact:
} }
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) { for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
if (lfs_pair_cmp(d->m.pair, olddir.pair) == 0) { if (lfs_pair_realcmp(d->m.pair, olddir.pair) == 0 ||
lfs_pair_realcmp(d->m.pair, dir->pair) == 0) { // TODO hm, see above
while (d->id >= d->m.count && !lfs_pair_isnull(d->m.branch)) { while (d->id >= d->m.count && !lfs_pair_isnull(d->m.branch)) {
// //
// //
@@ -2202,6 +2296,18 @@ compact:
} }
} }
// printf("dir after: {%#x, %#x}, mlist after: ", dir->pair[0], dir->pair[1]);
// for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
// printf("{%#x, %#x} ", d->m.pair[0], d->m.pair[1]);
// }
// printf("\n");
if (!lfs->relocate_do_hack) {
lfs->relocate_tail[0] = LFS_BLOCK_NULL;
lfs->relocate_tail[1] = LFS_BLOCK_NULL;
lfs->relocate_end[0] = LFS_BLOCK_NULL; // TODO need these?
lfs->relocate_end[1] = LFS_BLOCK_NULL;
}
return 0; return 0;
} }
@@ -2293,6 +2399,10 @@ int lfs_mkdir(lfs_t *lfs, const char *path) {
lfs_pair_tole32(bpair); lfs_pair_tole32(bpair);
lfs_pair_tole32(dir.pair); lfs_pair_tole32(dir.pair);
// TODO need to change endianness of tail? // TODO need to change endianness of tail?
lfs->relocate_tail[0] = dir.pair[0]; // TODO
lfs->relocate_tail[1] = dir.pair[1];
lfs->relocate_end[0] = dir.pair[0]; // TODO
lfs->relocate_end[1] = dir.pair[1];
err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS( err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL}, {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
{LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path}, {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
@@ -3683,6 +3793,15 @@ int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
// let commit clean up after move (if we're different! otherwise move // let commit clean up after move (if we're different! otherwise move
// logic already fixed it for us) // logic already fixed it for us)
if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) { if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
// fetch again
// TODO should this be in mlist?
err = lfs_dir_fetch(lfs, &oldcwd, oldcwd.pair);
if (err) {
lfs->mlist = prevdir.next;
LFS_TRACE("lfs_rename -> %d", err);
return err;
}
// prep gstate and delete move id // prep gstate and delete move id
lfs_fs_prepmove(lfs, 0x3ff, NULL); lfs_fs_prepmove(lfs, 0x3ff, NULL);
err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS( err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
@@ -3893,6 +4012,11 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
// setup default state // setup default state
lfs->root[0] = LFS_BLOCK_NULL; lfs->root[0] = LFS_BLOCK_NULL;
lfs->root[1] = LFS_BLOCK_NULL; lfs->root[1] = LFS_BLOCK_NULL;
lfs->relocate_tail[0] = LFS_BLOCK_NULL;
lfs->relocate_tail[1] = LFS_BLOCK_NULL;
lfs->relocate_end[0] = LFS_BLOCK_NULL;
lfs->relocate_end[1] = LFS_BLOCK_NULL;
lfs->relocate_do_hack = false;
lfs->mlist = NULL; lfs->mlist = NULL;
lfs->seed = 0; lfs->seed = 0;
lfs->gdisk = (lfs_gstate_t){0}; lfs->gdisk = (lfs_gstate_t){0};
@@ -4270,10 +4394,33 @@ int lfs_fs_traverse(lfs_t *lfs,
static int lfs_fs_pred(lfs_t *lfs, static int lfs_fs_pred(lfs_t *lfs,
const lfs_block_t pair[2], lfs_mdir_t *pdir) { const lfs_block_t pair[2], lfs_mdir_t *pdir) {
// iterate over our relocation chain
// TODO combine these loops?
pdir->tail[0] = lfs->relocate_tail[0];
pdir->tail[1] = lfs->relocate_tail[1];
lfs_block_t cycle = 0;
while (!lfs_pair_isnull(pdir->tail) &&
lfs_pair_cmp(pdir->tail, lfs->relocate_end) != 0) {
if (cycle >= lfs->cfg->block_count/2) {
// loop detected
return LFS_ERR_CORRUPT;
}
cycle += 1;
if (lfs_pair_cmp(pdir->tail, pair) == 0) {
return 0;
}
int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
if (err) {
return err;
}
}
// iterate over all directory directory entries // iterate over all directory directory entries
pdir->tail[0] = 0; pdir->tail[0] = 0;
pdir->tail[1] = 1; pdir->tail[1] = 1;
lfs_block_t cycle = 0; cycle = 0;
while (!lfs_pair_isnull(pdir->tail)) { while (!lfs_pair_isnull(pdir->tail)) {
if (cycle >= lfs->cfg->block_count/2) { if (cycle >= lfs->cfg->block_count/2) {
// loop detected // loop detected
@@ -4357,6 +4504,12 @@ static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
static int lfs_fs_relocate(lfs_t *lfs, static int lfs_fs_relocate(lfs_t *lfs,
lfs_stag_t ptag, lfs_mdir_t *parent, lfs_stag_t ptag, lfs_mdir_t *parent,
const lfs_block_t oldpair[2], lfs_block_t newpair[2]) { const lfs_block_t oldpair[2], lfs_block_t newpair[2]) {
printf("mlist before: ");
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
printf("{%#x, %#x} ", d->m.pair[0], d->m.pair[1]);
}
printf("\n");
// update internal root // update internal root
if (lfs_pair_cmp(oldpair, lfs->root) == 0) { if (lfs_pair_cmp(oldpair, lfs->root) == 0) {
LFS_DEBUG("Relocating root %"PRIx32" %"PRIx32, LFS_DEBUG("Relocating root %"PRIx32" %"PRIx32,
@@ -4365,6 +4518,18 @@ static int lfs_fs_relocate(lfs_t *lfs,
lfs->root[1] = newpair[1]; lfs->root[1] = newpair[1];
} }
// update relocate chain if needed
if (lfs_pair_cmp(oldpair, lfs->relocate_tail) == 0) {
lfs->relocate_tail[0] = newpair[0];
lfs->relocate_tail[1] = newpair[1];
}
if (lfs_pair_cmp(oldpair, lfs->relocate_end) == 0) {
lfs->relocate_end[0] = newpair[0];
lfs->relocate_end[1] = newpair[1];
}
// TODO is this doing double work????
// update internally tracked dirs // update internally tracked dirs
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) { for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
if (lfs_pair_cmp(oldpair, d->m.pair) == 0) { if (lfs_pair_cmp(oldpair, d->m.pair) == 0) {
@@ -4379,6 +4544,12 @@ static int lfs_fs_relocate(lfs_t *lfs,
} }
} }
printf("mlist after: ");
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
printf("{%#x, %#x} ", d->m.pair[0], d->m.pair[1]);
}
printf("\n");
bool parentispred = (lfs_pair_cmp(parent->tail, oldpair) == 0); bool parentispred = (lfs_pair_cmp(parent->tail, oldpair) == 0);
// update parent if needed // update parent if needed
@@ -4413,43 +4584,53 @@ static int lfs_fs_relocate(lfs_t *lfs,
} }
// next step, clean up orphans // next step, clean up orphans
lfs_fs_preporphans(lfs, -!parentispred); // TODO hm
//lfs_fs_preporphans(lfs, -!parentispred);
} }
if (!parentispred) { // TODO clean this up?
// find pred if (lfs_gstate_hasorphans(&lfs->gstate)) {
int err = lfs_fs_pred(lfs, oldpair, parent); int err = lfs_fs_deorphan(lfs);
if (err && err != LFS_ERR_NOENT) { if (err) {
return err; return err;
} }
// if we can't find dir, it must be new
if (err != LFS_ERR_NOENT) {
// fix pending move in this pair? this looks like an optimization
// but is in fact _required_ since relocating may outdate the move.
uint16_t moveid = 0x3ff;
if (lfs_gstate_hasmovehere(&lfs->gstate, parent->pair)) {
moveid = lfs_tag_id(lfs->gstate.tag);
LFS_DEBUG("Fixing move while relocating "
"%"PRIx32" %"PRIx32" %"PRIx16"\n",
parent->pair[0], parent->pair[1], moveid);
lfs_fs_prepmove(lfs, 0x3ff, NULL);
}
// replace bad pair, either we clean up desync, or no desync occured
lfs_pair_tole32(newpair);
err = lfs_dir_commit(lfs, parent, LFS_MKATTRS(
{LFS_MKTAG_IF(moveid != 0x3ff,
LFS_TYPE_DELETE, moveid, 0)},
{LFS_MKTAG(LFS_TYPE_TAIL + parent->split, 0x3ff, 8),
newpair}));
lfs_pair_fromle32(newpair);
if (err) {
return err;
}
}
} }
//
// if (!parentispred) {
// // find pred
// int err = lfs_fs_pred(lfs, oldpair, parent);
// if (err && err != LFS_ERR_NOENT) {
// return err;
// }
//
// // if we can't find dir, it must be new
// if (err != LFS_ERR_NOENT) {
// // fix pending move in this pair? this looks like an optimization
// // but is in fact _required_ since relocating may outdate the move.
// uint16_t moveid = 0x3ff;
// if (lfs_gstate_hasmovehere(&lfs->gstate, parent->pair)) {
// moveid = lfs_tag_id(lfs->gstate.tag);
// LFS_DEBUG("Fixing move while relocating "
// "%"PRIx32" %"PRIx32" %"PRIx16"\n",
// parent->pair[0], parent->pair[1], moveid);
// lfs_fs_prepmove(lfs, 0x3ff, NULL);
// }
//
// // replace bad pair, either we clean up desync, or no desync occured
// lfs_pair_tole32(newpair);
// err = lfs_dir_commit(lfs, parent, LFS_MKATTRS(
// {LFS_MKTAG_IF(moveid != 0x3ff,
// LFS_TYPE_DELETE, moveid, 0)},
// {LFS_MKTAG(LFS_TYPE_TAIL + parent->split, 0x3ff, 8),
// newpair}));
// lfs_pair_fromle32(newpair);
// if (err) {
// return err;
// }
// }
// }
return 0; return 0;
} }
@@ -4503,12 +4684,16 @@ static int lfs_fs_deorphan(lfs_t *lfs) {
return 0; return 0;
} }
// TODO needme?
LFS_ASSERT(lfs_pair_isnull(lfs->relocate_tail));
// Fix any orphans // Fix any orphans
lfs_mdir_t pdir = {.split = true, .tail = {0, 1}}; lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
lfs_mdir_t dir; lfs_mdir_t dir;
// iterate over all directory directory entries // iterate over all directory directory entries
while (!lfs_pair_isnull(pdir.tail)) { // TODO handle this more gracefully
while (lfs_gstate_hasorphans(&lfs->gstate) && !lfs_pair_isnull(pdir.tail)) {
int err = lfs_dir_fetch(lfs, &dir, pdir.tail); int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
if (err) { if (err) {
return err; return err;

7
lfs.h
View File

@@ -117,7 +117,8 @@ enum lfs_type {
// internal chip sources // internal chip sources
LFS_FROM_NOOP = 0x000, LFS_FROM_NOOP = 0x000,
LFS_FROM_MOVE = 0x101, LFS_FROM_MOVE = 0x101,
LFS_FROM_USERATTRS = 0x102, LFS_FROM_DROP = 0x102,
LFS_FROM_USERATTRS = 0x103,
}; };
// File open flags // File open flags
@@ -311,6 +312,7 @@ typedef struct lfs_mdir {
uint32_t etag; uint32_t etag;
uint16_t count; uint16_t count;
bool erased; bool erased;
bool first; // TODO come on
bool split; bool split;
lfs_block_t tail[2]; lfs_block_t tail[2];
lfs_block_t branch[2]; lfs_block_t branch[2];
@@ -368,6 +370,9 @@ typedef struct lfs {
lfs_cache_t pcache; lfs_cache_t pcache;
lfs_block_t root[2]; lfs_block_t root[2];
lfs_block_t relocate_tail[2];
lfs_block_t relocate_end[2];
bool relocate_do_hack; // TODO fixme
struct lfs_mlist { struct lfs_mlist {
struct lfs_mlist *next; struct lfs_mlist *next;
uint16_t id; uint16_t id;

View File

@@ -326,7 +326,7 @@ def main(args):
mdir.rev, mdir.rev,
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:]) ' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
if len(mdir.pair) > 1 else '', if len(mdir.pair) > 1 else '',
' (corrupted)' if not mdir else '')) ' (corrupted!)' if not mdir else ''))
if args.all: if args.all:
mdir.dump_all(truncate=not args.no_truncate) mdir.dump_all(truncate=not args.no_truncate)
elif args.log: elif args.log:

View File

@@ -5,6 +5,7 @@ import sys
import json import json
import io import io
import itertools as it import itertools as it
import collections as c
from readmdir import Tag, MetadataPair from readmdir import Tag, MetadataPair
def popc(x): def popc(x):
@@ -13,7 +14,7 @@ def popc(x):
def ctz(x): def ctz(x):
return len(bin(x)) - len(bin(x).rstrip('0')) return len(bin(x)) - len(bin(x).rstrip('0'))
def dumpentries(args, mdir, f): def dumpentries(args, mdir, mdirs, f):
for k, id_ in enumerate(mdir.ids): for k, id_ in enumerate(mdir.ids):
name = mdir[Tag('name', id_, 0)] name = mdir[Tag('name', id_, 0)]
struct_ = mdir[Tag('struct', id_, 0)] struct_ = mdir[Tag('struct', id_, 0)]
@@ -22,8 +23,10 @@ def dumpentries(args, mdir, f):
id_, name.typerepr(), id_, name.typerepr(),
json.dumps(name.data.decode('utf8'))) json.dumps(name.data.decode('utf8')))
if struct_.is_('dirstruct'): if struct_.is_('dirstruct'):
desc += " dir {%#x, %#x}" % struct.unpack( pair = struct.unpack('<II', struct_.data[:8].ljust(8, b'\xff'))
'<II', struct_.data[:8].ljust(8, b'\xff')) desc += " dir {%#x, %#x}%s" % (
pair[0], pair[1],
'?' if frozenset(pair) not in mdirs else '')
if struct_.is_('ctzstruct'): if struct_.is_('ctzstruct'):
desc += " ctz {%#x} size %d" % struct.unpack( desc += " ctz {%#x} size %d" % struct.unpack(
'<II', struct_.data[:8].ljust(8, b'\xff')) '<II', struct_.data[:8].ljust(8, b'\xff'))
@@ -93,19 +96,15 @@ def dumpentries(args, mdir, f):
def main(args): def main(args):
with open(args.disk, 'rb') as f: with open(args.disk, 'rb') as f:
dirs = []
superblock = None superblock = None
gstate = b'' gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0'
mdirs = [] mdirs = c.OrderedDict()
cycle = False cycle = False
tail = (args.block1, args.block2) tail = (args.block1, args.block2)
hard = False while tail:
while True: if frozenset(tail) in mdirs:
for m in it.chain((m for d in dirs for m in d), mdirs): # cycle detected
if set(m.blocks) == set(tail): cycle = tail
# cycle detected
cycle = m.blocks
if cycle:
break break
# load mdir # load mdir
@@ -128,6 +127,13 @@ def main(args):
except KeyError: except KeyError:
mdir.tail = None mdir.tail = None
try:
mdir.branch = mdir[Tag('branch', 0, 0)]
if mdir.branch.size != 8 or mdir.branch.data == 8*b'\xff':
mdir.branch = None
except KeyError:
mdir.branch = None
# have superblock? # have superblock?
try: try:
nsuperblock = mdir[ nsuperblock = mdir[
@@ -144,41 +150,55 @@ def main(args):
except KeyError: except KeyError:
pass pass
# add to directories # add to metadata-pairs
mdirs.append(mdir) mdirs[frozenset(mdir.blocks)] = mdir
if mdir.tail is None or not mdir.tail.is_('hardtail'): tail = (struct.unpack('<II', mdir.tail.data)
dirs.append(mdirs) if mdir.tail else None)
mdirs = []
if mdir.tail is None: # derive paths and build directories
break dirs = {}
rogue = {}
tail = struct.unpack('<II', mdir.tail.data) pending = [('/', (args.block1, args.block2))]
hard = mdir.tail.is_('hardtail')
# find paths
dirtable = {}
for dir in dirs:
dirtable[frozenset(dir[0].blocks)] = dir
pending = [("/", dirs[0])]
while pending: while pending:
path, dir = pending.pop(0) path, branch = pending.pop(0)
for mdir in dir: dir = []
while branch and frozenset(branch) in mdirs:
mdir = mdirs[frozenset(branch)]
dir.append(mdir)
for tag in mdir.tags: for tag in mdir.tags:
if tag.is_('dir'): if tag.is_('dir'):
try: try:
npath = tag.data.decode('utf8') npath = path + '/' + tag.data.decode('utf8')
npath = npath.replace('//', '/')
dirstruct = mdir[Tag('dirstruct', tag.id, 0)] dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
nblocks = struct.unpack('<II', dirstruct.data) npair = struct.unpack('<II', dirstruct.data)
nmdir = dirtable[frozenset(nblocks)] pending.append((npath, npair))
pending.append(((path + '/' + npath), nmdir))
except KeyError: except KeyError:
pass pass
dir[0].path = path.replace('//', '/') branch = (struct.unpack('<II', mdir.branch.data)
if mdir.branch else None)
# dump tree if not dir:
rogue[path] = branch
else:
dirs[path] = dir
# also find orphans
not_orphans = {frozenset(mdir.blocks)
for dir in dirs.values()
for mdir in dir}
orphans = []
for pair, mdir in mdirs.items():
if pair not in not_orphans:
if len(orphans) > 0 and (pair == frozenset(
struct.unpack('<II', orphans[-1][-1].tail.data))):
orphans[-1].append(mdir)
else:
orphans.append([mdir])
# print littlefs + version info
version = ('?', '?') version = ('?', '?')
if superblock: if superblock:
version = tuple(reversed( version = tuple(reversed(
@@ -187,24 +207,30 @@ def main(args):
"data (truncated, if it fits)" "data (truncated, if it fits)"
if not any([args.no_truncate, args.tags, args.log, args.all]) else "")) if not any([args.no_truncate, args.tags, args.log, args.all]) else ""))
if gstate: # print gstate
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate)) print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0]) tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff')) blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
if tag.size or not tag.isvalid: if tag.size or not tag.isvalid:
print(" orphans >=%d" % max(tag.size, 1)) print(" orphans >=%d" % max(tag.size, 1))
if tag.type: if tag.type:
print(" move dir {%#x, %#x} id %d" % ( print(" move dir {%#x, %#x}%s id %d" % (
blocks[0], blocks[1], tag.id)) blocks[0], blocks[1],
'?' if frozenset(blocks) not in mdirs else '',
tag.id))
for i, dir in enumerate(dirs): # print dir info
print("dir %s" % (json.dumps(dir[0].path) for path, dir in it.chain(
if hasattr(dir[0], 'path') else '(orphan)')) sorted(dirs.items()),
zip(it.repeat(None), orphans)):
print("dir %s" % json.dumps(path) if path else "orphaned")
for j, mdir in enumerate(dir): for j, mdir in enumerate(dir):
print("mdir {%#x, %#x} rev %d%s" % ( print("mdir {%#x, %#x} rev %d (was %d)%s%s" % (
mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev,
' (corrupted)' if not mdir else '')) ' (corrupted!)' if not mdir else '',
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
if mdir.tail else ''))
f = io.StringIO() f = io.StringIO()
if args.tags: if args.tags:
@@ -214,21 +240,27 @@ def main(args):
elif args.all: elif args.all:
mdir.dump_all(f, truncate=not args.no_truncate) mdir.dump_all(f, truncate=not args.no_truncate)
else: else:
dumpentries(args, mdir, f) dumpentries(args, mdir, mdirs, f)
lines = list(filter(None, f.getvalue().split('\n'))) lines = list(filter(None, f.getvalue().split('\n')))
for k, line in enumerate(lines): for k, line in enumerate(lines):
print("%s %s" % ( print("%s %s" % (
' ' if i == len(dirs)-1 and j == len(dir)-1 else ' ' if j == len(dir)-1 else
'v' if k == len(lines)-1 else 'v' if k == len(lines)-1 else
'.' if j == len(dir)-1 else '|' if path else '.',
'|',
line)) line))
if cycle: for path, pair in rogue.items():
print("*** cycle detected! -> {%#x, %#x} ***" % (cycle[0], cycle[1])) print("*** couldn't find dir %s {%#x, %#x}! ***" % (
json.dumps(path), pair[0], pair[1]))
if cycle: if cycle:
print("*** cycle detected {%#x, %#x}! ***" % (
cycle[0], cycle[1]))
if cycle:
return 3
elif rogue:
return 2 return 2
elif not all(mdir for dir in dirs for mdir in dir): elif not all(mdir for dir in dirs for mdir in dir):
return 1 return 1

View File

@@ -231,7 +231,7 @@ class TestCase:
ncmd.extend(['-ex', 'r']) ncmd.extend(['-ex', 'r'])
if failure.assert_: if failure.assert_:
ncmd.extend(['-ex', 'up 2']) ncmd.extend(['-ex', 'up 2'])
elif gdb == 'start': elif gdb == 'start' or isinstance(gdb, int):
ncmd.extend([ ncmd.extend([
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno), '-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
'-ex', 'r']) '-ex', 'r'])
@@ -329,7 +329,9 @@ class ReentrantTestCase(TestCase):
persist = 'noerase' persist = 'noerase'
# exact cycle we should drop into debugger? # exact cycle we should drop into debugger?
if gdb and failure and failure.cycleno == cycles: if gdb and failure and (
failure.cycleno == cycles or
(isinstance(gdb, int) and gdb == cycles)):
return super().test(gdb=gdb, persist=persist, cycles=cycles, return super().test(gdb=gdb, persist=persist, cycles=cycles,
failure=failure, **args) failure=failure, **args)
@@ -760,7 +762,8 @@ if __name__ == "__main__":
help="Store disk image in a file.") help="Store disk image in a file.")
parser.add_argument('-b', '--build', action='store_true', parser.add_argument('-b', '--build', action='store_true',
help="Only build the tests, do not execute.") help="Only build the tests, do not execute.")
parser.add_argument('-g', '--gdb', choices=['init', 'start', 'assert'], parser.add_argument('-g', '--gdb', metavar='{init,start,assert},CYCLE',
type=lambda n: n if n in {'init', 'start', 'assert'} else int(n, 0),
nargs='?', const='assert', nargs='?', const='assert',
help="Drop into gdb on test failure.") help="Drop into gdb on test failure.")
parser.add_argument('--no-internal', action='store_true', parser.add_argument('--no-internal', action='store_true',