Initial implementation of forward-looking erase-state CRCs

This change is necessary to handle out-of-order writes found by pjsg's
fuzzing work.

The problem is that it is possible for (non-NOR) block devices to write
pages in any order, or to even write random data in the case of a
power-loss. This breaks littlefs's use of the first bit in a page to
indicate the erase-state.

pjsg notes this behavior is documented in the W25Q here:
https://community.cypress.com/docs/DOC-10507

---

The basic idea here is to CRC the next page, and use this "erase-state CRC" to
check if the next page is erased and ready to accept programs.

.------------------. \   commit
|     metadata     | |
|                  | +---.
|                  | |   |
|------------------| |   |
| erase-state CRC -----. |
|------------------| | | |
|   commit CRC    ---|-|-'
|------------------| / |
|     padding      |   | padding (doesn't need CRC)
|                  |   |
|------------------| \ | next prog
|     erased?      | +-'
|        |         | |
|        v         | /
|                  |
|                  |
'------------------'

This is made a bit annoying since littlefs doesn't actually store the
page (prog_size) in the superblock, since it doesn't need to know the
size for any other operation. We can work around this by storing both
the CRC and size of the next page when necessary.

Another interesting note is that we don't need to any bit tweaking
information, since we read the next page every time we would need to
know how to clobber the erase-state CRC. And since we only read
prog_size, this works really well with our caching, since the caches
must be a multiple of prog_size.

This also brings back the internal lfs_bd_crc function, in which we can
use some optimizations added to lfs_bd_cmp.

Needs some cleanup but the idea is passing most relevant tests.
This commit is contained in:
Christopher Haster
2020-12-05 14:29:52 -06:00
parent 288a5cbc8d
commit 01a3b1f5f7
5 changed files with 416 additions and 76 deletions

View File

@@ -80,11 +80,6 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
LFS_ASSERT(size % cfg->read_size == 0);
LFS_ASSERT(block < cfg->block_count);
// zero for reproducability (in case file is truncated)
if (bd->cfg->erase_value != -1) {
memset(buffer, bd->cfg->erase_value, size);
}
// read
off_t res1 = lseek(bd->fd,
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
@@ -101,6 +96,11 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
return err;
}
// file truncated? zero for reproducability
if (res2 < size) {
memset((uint8_t*)buffer + res2, 0, size-res2);
}
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", 0);
return 0;
}

View File

@@ -32,11 +32,8 @@ int lfs_rambd_createcfg(const struct lfs_config *cfg,
}
}
// zero for reproducability?
if (bd->cfg->erase_value != -1) {
memset(bd->buffer, bd->cfg->erase_value,
cfg->block_size * cfg->block_count);
}
// zero for reproducability (this matches filebd)
memset(bd->buffer, 0, cfg->block_size * cfg->block_count);
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0);
return 0;

235
lfs.c
View File

@@ -122,16 +122,15 @@ static int lfs_bd_cmp(lfs_t *lfs,
for (lfs_off_t i = 0; i < size; i += diff) {
uint8_t dat[8];
diff = lfs_min(size-i, sizeof(dat));
int res = lfs_bd_read(lfs,
int err = lfs_bd_read(lfs,
pcache, rcache, hint-i,
block, off+i, &dat, diff);
if (res) {
return res;
if (err) {
return err;
}
res = memcmp(dat, data + i, diff);
int res = memcmp(dat, data + i, diff);
if (res) {
return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
}
@@ -140,6 +139,27 @@ static int lfs_bd_cmp(lfs_t *lfs,
return LFS_CMP_EQ;
}
static int lfs_bd_crc(lfs_t *lfs,
const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) {
lfs_size_t diff = 0;
for (lfs_off_t i = 0; i < size; i += diff) {
uint8_t dat[8];
diff = lfs_min(size-i, sizeof(dat));
int err = lfs_bd_read(lfs,
pcache, rcache, hint-i,
block, off+i, &dat, diff);
if (err) {
return err;
}
*crc = lfs_crc(*crc, &dat, diff);
}
return 0;
}
#ifndef LFS_READONLY
static int lfs_bd_flush(lfs_t *lfs,
lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
@@ -394,6 +414,22 @@ static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
a->pair[1] = lfs_tole32(a->pair[1]);
}
// operations on estate in CRC tags
struct lfs_estate {
lfs_size_t size;
uint32_t crc;
};
static void lfs_estate_fromle32(struct lfs_estate *estate) {
estate->size = lfs_fromle32(estate->size);
estate->crc = lfs_fromle32(estate->crc);
}
static void lfs_estate_tole32(struct lfs_estate *estate) {
estate->size = lfs_tole32(estate->size);
estate->crc = lfs_tole32(estate->crc);
}
// other endianness operations
static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
ctz->head = lfs_fromle32(ctz->head);
@@ -901,14 +937,10 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
}
crc = lfs_crc(crc, &tag, sizeof(tag));
tag = lfs_frombe32(tag) ^ ptag;
tag = (lfs_frombe32(tag) ^ ptag) & 0x7fffffff;
// next commit not yet programmed or we're not in valid range
if (!lfs_tag_isvalid(tag)) {
dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
dir->off % lfs->cfg->prog_size == 0);
break;
} else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
// out of range?
if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
dir->erased = false;
break;
}
@@ -916,11 +948,31 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
ptag = tag;
if (lfs_tag_type1(tag) == LFS_TYPE_CRC) {
lfs_off_t noff = off + sizeof(tag);
struct lfs_estate estate;
if (lfs_tag_chunk(tag) == 3) {
err = lfs_bd_read(lfs,
NULL, &lfs->rcache, lfs->cfg->block_size,
dir->pair[0], noff, &estate, sizeof(estate));
if (err) {
if (err == LFS_ERR_CORRUPT) {
dir->erased = false;
break;
}
return err;
}
crc = lfs_crc(crc, &estate, sizeof(estate));
lfs_estate_fromle32(&estate);
noff += sizeof(estate);
}
// check the crc attr
uint32_t dcrc;
err = lfs_bd_read(lfs,
NULL, &lfs->rcache, lfs->cfg->block_size,
dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
dir->pair[0], noff, &dcrc, sizeof(dcrc));
if (err) {
if (err == LFS_ERR_CORRUPT) {
dir->erased = false;
@@ -935,9 +987,6 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
break;
}
// reset the next bit if we need to
ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
// toss our crc into the filesystem seed for
// pseudorandom numbers, note we use another crc here
// as a collection function because it is sufficiently
@@ -955,15 +1004,39 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
// reset crc
crc = 0xffffffff;
continue;
}
// crc the entry first, hopefully leaving it in the cache
for (lfs_off_t j = sizeof(tag); j < lfs_tag_dsize(tag); j++) {
uint8_t dat;
// check if the next page is erased
//
// this may look inefficient, but it's surprisingly efficient
// since cache_size is probably > prog_size, so the data will
// always remain in cache for the next iteration
if (lfs_tag_chunk(tag) == 3) {
// first we get a tag-worth of bits, this is so we can
// tweak our current tag to force future writes to be
// different than the erased state
lfs_tag_t etag;
err = lfs_bd_read(lfs,
NULL, &lfs->rcache, lfs->cfg->block_size,
dir->pair[0], off+j, &dat, 1);
dir->pair[0], dir->off, &etag, sizeof(etag));
if (err) {
// TODO can we stop duplicating this error condition?
if (err == LFS_ERR_CORRUPT) {
dir->erased = false;
break;
}
return err;
}
// perturb valid bit?
dir->etag |= 0x80000000 & ~lfs_frombe32(etag);
// crc the rest the full prog_size, including etag in case
// the actual esize is < tag size (though this shouldn't
// happen normally)
uint32_t tcrc = 0xffffffff;
err = lfs_bd_crc(lfs,
NULL, &lfs->rcache, lfs->cfg->block_size,
dir->pair[0], dir->off, estate.size, &tcrc);
if (err) {
if (err == LFS_ERR_CORRUPT) {
dir->erased = false;
@@ -972,7 +1045,31 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
return err;
}
crc = lfs_crc(crc, &dat, 1);
if (tcrc == estate.crc) {
dir->erased = true;
break;
}
} else {
// end of block commit
// TODO handle backwards compat?
dir->erased = false;
break;
}
continue;
}
// crc the entry first, hopefully leaving it in the cache
err = lfs_bd_crc(lfs,
NULL, &lfs->rcache, lfs->cfg->block_size,
dir->pair[0], off+sizeof(tag),
lfs_tag_dsize(tag)-sizeof(tag), &crc);
if (err) {
if (err == LFS_ERR_CORRUPT) {
dir->erased = false;
break;
}
return err;
}
// directory modification tags?
@@ -1338,9 +1435,18 @@ static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
#ifndef LFS_READONLY
static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
// align to program units
const lfs_off_t end = lfs_alignup(commit->off + 2*sizeof(uint32_t),
//
// this gets a bit complex as we have two types of crcs:
// - 4-word crc with estate to check following prog (middle of block)
// - 2-word crc with no following prog (end of block)
const lfs_off_t end = lfs_alignup(
lfs_min(commit->off + 4*sizeof(uint32_t), lfs->cfg->block_size),
lfs->cfg->prog_size);
// clamp erase size to tag size, this gives us the full tag as potential
// to intentionally invalidate erase CRCs
const lfs_size_t esize = lfs_max(lfs->cfg->prog_size, sizeof(lfs_tag_t));
lfs_off_t off1 = 0;
uint32_t crc1 = 0;
@@ -1354,40 +1460,77 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
noff = lfs_min(noff, end - 2*sizeof(uint32_t));
}
// read erased state from next program unit
lfs_tag_t tag = 0xffffffff;
// build crc tag
lfs_tag_t etag = 0;
lfs_tag_t tag = LFS_MKTAG(LFS_TYPE_CRC + 2, 0x3ff, noff - off);
lfs_size_t size = 2*sizeof(uint32_t);
struct {
lfs_tag_t tag;
union {
struct {
uint32_t crc;
} crc2;
struct {
struct lfs_estate estate;
uint32_t crc;
} crc3;
} u;
} data;
if (noff <= lfs->cfg->block_size - esize) {
// first we get a tag-worth of bits, this is so we can
// tweak our current tag to force future writes to be
// different than the erased state
int err = lfs_bd_read(lfs,
NULL, &lfs->rcache, sizeof(tag),
commit->block, noff, &tag, sizeof(tag));
NULL, &lfs->rcache, esize,
commit->block, noff, &etag, sizeof(etag));
// TODO handle erased-as-corrupt correctly?
if (err && err != LFS_ERR_CORRUPT) {
return err;
}
// build crc tag
bool reset = ~lfs_frombe32(tag) >> 31;
tag = LFS_MKTAG(LFS_TYPE_CRC + reset, 0x3ff, noff - off);
// find expected erased state
uint32_t ecrc = 0xffffffff;
err = lfs_bd_crc(lfs,
NULL, &lfs->rcache, esize,
commit->block, noff, esize, &ecrc);
// TODO handle erased-as-corrupt correctly?
if (err && err != LFS_ERR_CORRUPT) {
return err;
}
// write out crc
uint32_t footer[2];
footer[0] = lfs_tobe32(tag ^ commit->ptag);
commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0]));
footer[1] = lfs_tole32(commit->crc);
err = lfs_bd_prog(lfs,
data.u.crc3.estate.size = esize;
data.u.crc3.estate.crc = ecrc;
lfs_estate_tole32(&data.u.crc3.estate);
// indicate we include estate
tag |= LFS_MKTAG(1, 0, 0);
size += sizeof(struct lfs_estate);
}
data.tag = lfs_tobe32(tag ^ commit->ptag);
commit->crc = lfs_crc(commit->crc, &data, size-sizeof(uint32_t));
((uint32_t*)&data)[size/sizeof(uint32_t) - 1] = lfs_tole32(commit->crc);
int err = lfs_bd_prog(lfs,
&lfs->pcache, &lfs->rcache, false,
commit->block, commit->off, &footer, sizeof(footer));
commit->block, commit->off, &data, size);
if (err) {
return err;
}
// keep track of non-padding checksum to verify
if (off1 == 0) {
off1 = commit->off + sizeof(uint32_t);
//off1 = commit->off + sizeof(uint32_t);
off1 = commit->off + size-sizeof(uint32_t);
crc1 = commit->crc;
}
commit->off += sizeof(tag)+lfs_tag_size(tag);
commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
commit->crc = 0xffffffff; // reset crc for next "commit"
// perturb valid bit?
commit->ptag = tag | (0x80000000 & ~lfs_frombe32(etag));
// reset crc for next commit
commit->crc = 0xffffffff;
}
// flush buffers
@@ -1400,6 +1543,7 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
lfs_off_t off = commit->begin;
lfs_off_t noff = off1;
while (off < end) {
// TODO restructure to use lfs_bd_crc?
uint32_t crc = 0xffffffff;
for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
// check against written crc, may catch blocks that
@@ -1408,7 +1552,6 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
return LFS_ERR_CORRUPT;
}
// leave it up to caching to make this efficient
uint8_t dat;
err = lfs_bd_read(lfs,
NULL, &lfs->rcache, noff+sizeof(uint32_t)-i,
@@ -1425,12 +1568,16 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
return LFS_ERR_CORRUPT;
}
// skip padding
off = lfs_min(end - noff, 0x3fe) + noff;
// skip padding, note that these always contain estate
off = noff - 3*sizeof(uint32_t);
off = lfs_min(end - off, 0x3fe) + off;
if (off < end) {
off = lfs_min(off, end - 2*sizeof(uint32_t));
}
noff = off + sizeof(uint32_t);
if (noff <= lfs->cfg->block_size - esize) {
noff += 2*sizeof(uint32_t);
}
}
return 0;

View File

@@ -99,6 +99,15 @@ class Tag:
return struct.unpack('b', struct.pack('B', self.chunk))[0]
def is_(self, type):
try:
if ' ' in type:
type1, type3 = type.split()
return (self.is_(type1) and
(self.type & ~TAG_TYPES[type1][0]) == int(type3, 0))
return self.type == int(type, 0)
except (ValueError, KeyError):
return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
def mkmask(self):
@@ -112,11 +121,16 @@ class Tag:
if hasattr(self, 'off'): ntag.off = self.off
if hasattr(self, 'data'): ntag.data = self.data
if hasattr(self, 'crc'): ntag.crc = self.crc
if hasattr(self, 'erased'): ntag.erased = self.erased
return ntag
def typerepr(self):
if self.is_('crc') and getattr(self, 'crc', 0xffffffff) != 0xffffffff:
return 'crc (bad)'
crc_status = ' (bad)'
elif self.is_('crc') and getattr(self, 'erased', False):
crc_status = ' (era)'
else:
crc_status = ''
reverse_types = {v: k for k, v in TAG_TYPES.items()}
for prefix in range(12):
@@ -124,12 +138,12 @@ class Tag:
if (mask, self.type & mask) in reverse_types:
type = reverse_types[mask, self.type & mask]
if prefix > 0:
return '%s %#0*x' % (
type, prefix//4, self.type & ((1 << prefix)-1))
return '%s %#x%s' % (
type, self.type & ((1 << prefix)-1), crc_status)
else:
return type
return '%s%s' % (type, crc_status)
else:
return '%02x' % self.type
return '%02x%s' % (self.type, crc_status)
def idrepr(self):
return repr(self.id) if self.id != 0x3ff else '.'
@@ -182,11 +196,13 @@ class MetadataPair:
while len(block) - off >= 4:
ntag, = struct.unpack('>I', block[off:off+4])
tag = Tag(int(tag) ^ ntag)
tag = Tag((int(tag) ^ ntag) & 0x7fffffff)
tag.off = off + 4
tag.data = block[off+4:off+tag.dsize]
if tag.is_('crc'):
crc = binascii.crc32(block[off:off+4+4], crc)
if tag.is_('crc 0x3'):
crc = binascii.crc32(block[off:off+4*4], crc)
elif tag.is_('crc'):
crc = binascii.crc32(block[off:off+2*4], crc)
else:
crc = binascii.crc32(block[off:off+tag.dsize], crc)
tag.crc = crc
@@ -201,9 +217,18 @@ class MetadataPair:
if not corrupt:
self.log = self.all_.copy()
# end of commit?
if tag.is_('crc 0x3'):
esize, ecrc = struct.unpack('<II', tag.data[:8])
dcrc = 0xffffffff ^ binascii.crc32(block[off:off+esize])
if ecrc == dcrc:
tag.erased = True
corrupt = True
elif tag.is_('crc 0x2'):
corrupt = True
# reset tag parsing
crc = 0
tag = Tag(int(tag) ^ ((tag.type & 1) << 31))
# find active ids
self.ids = list(it.takewhile(

171
tests/test_powerloss.toml Normal file
View File

@@ -0,0 +1,171 @@
# There are already a number of tests that test general operations under
# power-loss (see the reentrant attribute). These tests are for explicitly
# testing specific corner cases.
[[case]] # only a revision count
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "notebook") => 0;
lfs_file_open(&lfs, &file, "notebook/paper",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
strcpy((char*)buffer, "hello");
size = strlen("hello");
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
char rbuffer[256];
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// get pair/rev count
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "notebook") => 0;
lfs_block_t pair[2] = {dir.m.pair[0], dir.m.pair[1]};
uint32_t rev = dir.m.rev;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
// write just the revision count
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, pair[1], 0, bbuffer, LFS_BLOCK_SIZE) => 0;
memcpy(bbuffer, &(uint32_t){lfs_tole32(rev+1)}, sizeof(uint32_t));
cfg.erase(&cfg, pair[1]) => 0;
cfg.prog(&cfg, pair[1], 0, bbuffer, LFS_BLOCK_SIZE) => 0;
lfs_mount(&lfs, &cfg) => 0;
// can read?
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
// can write?
lfs_file_open(&lfs, &file, "notebook/paper",
LFS_O_WRONLY | LFS_O_APPEND) => 0;
strcpy((char*)buffer, "goodbye");
size = strlen("goodbye");
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
strcpy((char*)buffer, "hello");
size = strlen("hello");
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
strcpy((char*)buffer, "goodbye");
size = strlen("goodbye");
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # partial prog, may not be byte in order!
define.BYTE_OFF = ["0", "LFS_PROG_SIZE-1", "LFS_PROG_SIZE/2"]
define.BYTE_VALUE = [0x33, 0xcc]
in = "lfs.c"
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "notebook") => 0;
lfs_file_open(&lfs, &file, "notebook/paper",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
strcpy((char*)buffer, "hello");
size = strlen("hello");
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
char rbuffer[256];
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// imitate a partial prog, value should not matter, if littlefs
// doesn't notice the partial prog testbd will assert
// get offset to next prog
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "notebook") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_off_t off = dir.m.off;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
// tweak byte
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
bbuffer[off + BYTE_OFF] = BYTE_VALUE;
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
lfs_mount(&lfs, &cfg) => 0;
// can read?
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
// can write?
lfs_file_open(&lfs, &file, "notebook/paper",
LFS_O_WRONLY | LFS_O_APPEND) => 0;
strcpy((char*)buffer, "goodbye");
size = strlen("goodbye");
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
strcpy((char*)buffer, "hello");
size = strlen("hello");
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
strcpy((char*)buffer, "goodbye");
size = strlen("goodbye");
for (int i = 0; i < 5; i++) {
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''