From 499083765c57f54f1f659126a22c43dd7fad4561 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Thu, 26 Nov 2020 15:27:01 -0600 Subject: [PATCH] Renamed cache_size -> buffer_size This makes littlefs's usage of the term "cache" an entirely internal concept and hopefully avoids some confusion about the usefulness of throwing RAM > block_size at these buffers. The term cache isn't entirely inaccurate, these buffers do act as single-line caches, however more often the term cache is used to describe multi-line caches. Maybe this will be added in littlefs's future, but the code-size cost makes this change not worth the overhead at the moment. --- .travis.yml | 4 ++-- lfs.c | 38 ++++++++++++++++++------------------- lfs.h | 12 ++++++------ scripts/test.py | 4 ++-- tests/test_orphans.toml | 2 +- tests/test_relocations.toml | 4 ++-- tests/test_truncate.toml | 2 +- 7 files changed, 33 insertions(+), 33 deletions(-) diff --git a/.travis.yml b/.travis.yml index cb9b039..88ff484 100644 --- a/.travis.yml +++ b/.travis.yml @@ -50,7 +50,7 @@ _: &test-no-intrinsics _: &test-no-inline - make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0" _: &test-byte-writes - - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" + - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BUFFER_SIZE=1" _: &test-block-cycles - make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1" _: &test-odd-block-count @@ -220,7 +220,7 @@ jobs: -DLFS_BLOCK_SIZE=512 -DLFS_BLOCK_COUNT=1024 -DLFS_BLOCK_CYCLES=-1 - -DLFS_CACHE_SIZE=64 + -DLFS_BUFFER_SIZE=64 -DLFS_LOOKAHEAD_SIZE=16 -DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR" if: branch !~ -prefix$ diff --git a/lfs.c b/lfs.c index a33836c..09462e7 100644 --- a/lfs.c +++ b/lfs.c @@ -23,7 +23,7 @@ #define LFS_CFG_BLOCK_SIZE(lfs) ((void)lfs, LFS_BLOCK_SIZE) #define LFS_CFG_BLOCK_COUNT(lfs) ((void)lfs, LFS_BLOCK_COUNT) #define LFS_CFG_BLOCK_CYCLES(lfs) ((void)lfs, LFS_BLOCK_CYCLES) -#define LFS_CFG_CACHE_SIZE(lfs) ((void)lfs, LFS_CACHE_SIZE) +#define LFS_CFG_BUFFER_SIZE(lfs) ((void)lfs, LFS_BUFFER_SIZE) #define LFS_CFG_LOOKAHEAD_SIZE(lfs) ((void)lfs, LFS_LOOKAHEAD_SIZE) #define LFS_CFG_READ_BUFFER(lfs) ((void)lfs, LFS_READ_BUFFER) #define LFS_CFG_PROG_BUFFER(lfs) ((void)lfs, LFS_PROG_BUFFER) @@ -46,7 +46,7 @@ #define LFS_CFG_BLOCK_SIZE(lfs) lfs->cfg->block_size #define LFS_CFG_BLOCK_COUNT(lfs) lfs->cfg->block_count #define LFS_CFG_BLOCK_CYCLES(lfs) lfs->cfg->block_cycles -#define LFS_CFG_CACHE_SIZE(lfs) lfs->cfg->cache_size +#define LFS_CFG_BUFFER_SIZE(lfs) lfs->cfg->buffer_size #define LFS_CFG_LOOKAHEAD_SIZE(lfs) lfs->cfg->lookahead_size #define LFS_CFG_READ_BUFFER(lfs) lfs->cfg->read_buffer #define LFS_CFG_PROG_BUFFER(lfs) lfs->cfg->prog_buffer @@ -83,7 +83,7 @@ static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) { static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) { // zero to avoid information leak - memset(pcache->buffer, 0xff, LFS_CFG_CACHE_SIZE(lfs)); + memset(pcache->buffer, 0xff, LFS_CFG_BUFFER_SIZE(lfs)); pcache->block = LFS_BLOCK_NULL; } @@ -158,7 +158,7 @@ static int lfs_cache_read(lfs_t *lfs, lfs_alignup(off+hint, LFS_CFG_READ_SIZE(lfs)), LFS_CFG_BLOCK_SIZE(lfs)) - rcache->off, - LFS_CFG_CACHE_SIZE(lfs)); + LFS_CFG_BUFFER_SIZE(lfs)); int err = LFS_CFG_BD_READ(lfs, rcache->block, rcache->off, rcache->buffer, rcache->size); LFS_ASSERT(err <= 0); @@ -257,10 +257,10 @@ static int lfs_cache_prog(lfs_t *lfs, while (size > 0) { if (block == pcache->block && off >= pcache->off && - off < pcache->off + LFS_CFG_CACHE_SIZE(lfs)) { + off < pcache->off + LFS_CFG_BUFFER_SIZE(lfs)) { // already fits in pcache? lfs_size_t diff = lfs_min(size, - LFS_CFG_CACHE_SIZE(lfs) - (off-pcache->off)); + LFS_CFG_BUFFER_SIZE(lfs) - (off-pcache->off)); memcpy(&pcache->buffer[off-pcache->off], data, diff); data += diff; @@ -268,7 +268,7 @@ static int lfs_cache_prog(lfs_t *lfs, size -= diff; pcache->size = lfs_max(pcache->size, off - pcache->off); - if (pcache->size == LFS_CFG_CACHE_SIZE(lfs)) { + if (pcache->size == LFS_CFG_BUFFER_SIZE(lfs)) { // eagerly flush out pcache if we fill up int err = lfs_cache_flush(lfs, pcache, rcache, validate); if (err) { @@ -694,7 +694,7 @@ static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir, rcache->block = LFS_BLOCK_INLINE; rcache->off = lfs_aligndown(off, LFS_CFG_READ_SIZE(lfs)); rcache->size = lfs_min(lfs_alignup(off+hint, LFS_CFG_READ_SIZE(lfs)), - LFS_CFG_CACHE_SIZE(lfs)); + LFS_CFG_BUFFER_SIZE(lfs)); int err = lfs_dir_getslice(lfs, dir, gmask, gtag, rcache->off, rcache->buffer, rcache->size); if (err < 0) { @@ -1787,7 +1787,7 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) { if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 && f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) && - f->ctz.size > LFS_CFG_CACHE_SIZE(lfs)) { + f->ctz.size > LFS_CFG_BUFFER_SIZE(lfs)) { int err = lfs_file_outline(lfs, f); if (err) { return err; @@ -2549,7 +2549,7 @@ static int lfs_file_opencommon(lfs_t *lfs, lfs_file_t *file, if (LFS_FILE_CFG_BUFFER(file)) { file->cache.buffer = LFS_FILE_CFG_BUFFER(file); } else { - file->cache.buffer = lfs_malloc(LFS_CFG_CACHE_SIZE(lfs)); + file->cache.buffer = lfs_malloc(LFS_CFG_BUFFER_SIZE(lfs)); if (!file->cache.buffer) { err = LFS_ERR_NOMEM; goto cleanup; @@ -2566,7 +2566,7 @@ static int lfs_file_opencommon(lfs_t *lfs, lfs_file_t *file, file->flags |= LFS_F_INLINE; file->cache.block = file->ctz.head; file->cache.off = 0; - file->cache.size = LFS_CFG_CACHE_SIZE(lfs); + file->cache.size = LFS_CFG_BUFFER_SIZE(lfs); // don't always read (may be new/trunc file) if (file->ctz.size > 0) { @@ -2696,7 +2696,7 @@ static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) { } // copy over new state of file - memcpy(file->cache.buffer, lfs->pcache.buffer, LFS_CFG_CACHE_SIZE(lfs)); + memcpy(file->cache.buffer, lfs->pcache.buffer, LFS_CFG_BUFFER_SIZE(lfs)); file->cache.block = lfs->pcache.block; file->cache.off = lfs->pcache.off; file->cache.size = lfs->pcache.size; @@ -2989,7 +2989,7 @@ lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, if ((file->flags & LFS_F_INLINE) && lfs_max(file->pos+nsize, file->ctz.size) > lfs_min(0x3fe, lfs_min( - LFS_CFG_CACHE_SIZE(lfs), LFS_CFG_BLOCK_SIZE(lfs)/8))) { + LFS_CFG_BUFFER_SIZE(lfs), LFS_CFG_BLOCK_SIZE(lfs)/8))) { // inline file doesn't fit anymore int err = lfs_file_outline(lfs, file); if (err) { @@ -3542,13 +3542,13 @@ static int lfs_initcommon(lfs_t *lfs) { // performing any arithmetic logics with them LFS_ASSERT(LFS_CFG_READ_SIZE(lfs) != 0); LFS_ASSERT(LFS_CFG_PROG_SIZE(lfs) != 0); - LFS_ASSERT(LFS_CFG_CACHE_SIZE(lfs) != 0); + LFS_ASSERT(LFS_CFG_BUFFER_SIZE(lfs) != 0); // check that block size is a multiple of cache size is a multiple // of prog and read sizes - LFS_ASSERT(LFS_CFG_CACHE_SIZE(lfs) % LFS_CFG_READ_SIZE(lfs) == 0); - LFS_ASSERT(LFS_CFG_CACHE_SIZE(lfs) % LFS_CFG_PROG_SIZE(lfs) == 0); - LFS_ASSERT(LFS_CFG_BLOCK_SIZE(lfs) % LFS_CFG_CACHE_SIZE(lfs) == 0); + LFS_ASSERT(LFS_CFG_BUFFER_SIZE(lfs) % LFS_CFG_READ_SIZE(lfs) == 0); + LFS_ASSERT(LFS_CFG_BUFFER_SIZE(lfs) % LFS_CFG_PROG_SIZE(lfs) == 0); + LFS_ASSERT(LFS_CFG_BLOCK_SIZE(lfs) % LFS_CFG_BUFFER_SIZE(lfs) == 0); // check that the block size is large enough to fit ctz pointers LFS_ASSERT(4*lfs_npw2(0xffffffff / (LFS_CFG_BLOCK_SIZE(lfs)-2*4)) @@ -3567,7 +3567,7 @@ static int lfs_initcommon(lfs_t *lfs) { if (LFS_CFG_READ_BUFFER(lfs)) { lfs->rcache.buffer = LFS_CFG_READ_BUFFER(lfs); } else { - lfs->rcache.buffer = lfs_malloc(LFS_CFG_CACHE_SIZE(lfs)); + lfs->rcache.buffer = lfs_malloc(LFS_CFG_BUFFER_SIZE(lfs)); if (!lfs->rcache.buffer) { err = LFS_ERR_NOMEM; goto cleanup; @@ -3578,7 +3578,7 @@ static int lfs_initcommon(lfs_t *lfs) { if (LFS_CFG_PROG_BUFFER(lfs)) { lfs->pcache.buffer = LFS_CFG_PROG_BUFFER(lfs); } else { - lfs->pcache.buffer = lfs_malloc(LFS_CFG_CACHE_SIZE(lfs)); + lfs->pcache.buffer = lfs_malloc(LFS_CFG_BUFFER_SIZE(lfs)); if (!lfs->pcache.buffer) { err = LFS_ERR_NOMEM; goto cleanup; diff --git a/lfs.h b/lfs.h index edff0c5..ac3d421 100644 --- a/lfs.h +++ b/lfs.h @@ -178,12 +178,12 @@ struct lfs_cfg { // Set to -1 to disable block-level wear-leveling. int32_t block_cycles; - // Size of block caches. Each cache buffers a portion of a block in RAM. - // The littlefs needs a read cache, a program cache, and one additional - // cache per file. Larger caches can improve performance by storing more + // Size of internal buffers used to cache slices of blocks in RAM. + // The littlefs needs a read buffer, a program buffer, and one additional + // buffer per file. Larger buffers can improve performance by storing more // data and reducing the number of disk accesses. Must be a multiple of // the read and program sizes, and a factor of the block size. - lfs_size_t cache_size; + lfs_size_t buffer_size; // Size of the lookahead buffer in bytes. A larger lookahead buffer // increases the number of blocks found during an allocation pass. The @@ -249,8 +249,8 @@ int lfs_bd_sync(void); #ifndef LFS_BLOCK_CYCLES #error "LFS_STATICCFG requires LFS_BLOCK_CYCLES" #endif -#ifndef LFS_CACHE_SIZE -#error "LFS_STATICCFG requires LFS_CACHE_SIZE" +#ifndef LFS_BUFFER_SIZE +#error "LFS_STATICCFG requires LFS_BUFFER_SIZE" #endif #ifndef LFS_LOOKAHEAD_SIZE #error "LFS_STATICCFG requires LFS_LOOKAHEAD_SIZE" diff --git a/scripts/test.py b/scripts/test.py index e9647a0..80a9241 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -78,7 +78,7 @@ DEFINES = { 'LFS_BLOCK_SIZE': 512, 'LFS_BLOCK_COUNT': 1024, 'LFS_BLOCK_CYCLES': -1, - 'LFS_CACHE_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)', + 'LFS_BUFFER_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)', 'LFS_LOOKAHEAD_SIZE': 16, 'LFS_ERASE_VALUE': 0xff, 'LFS_ERASE_CYCLES': 0, @@ -107,7 +107,7 @@ PROLOGUE = """ .block_size = LFS_BLOCK_SIZE, .block_count = LFS_BLOCK_COUNT, .block_cycles = LFS_BLOCK_CYCLES, - .cache_size = LFS_CACHE_SIZE, + .buffer_size = LFS_BUFFER_SIZE, .lookahead_size = LFS_LOOKAHEAD_SIZE, }; diff --git a/tests/test_orphans.toml b/tests/test_orphans.toml index 01bc463..d5651b1 100644 --- a/tests/test_orphans.toml +++ b/tests/test_orphans.toml @@ -59,7 +59,7 @@ code = ''' [[case]] # reentrant testing for orphans, basically just spam mkdir/remove reentrant = true # TODO fix this case, caused by non-DAG trees -if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' +if = '!(DEPTH == 3 && LFS_BUFFER_SIZE != 64)' define = [ {FILES=6, DEPTH=1, CYCLES=20}, {FILES=26, DEPTH=1, CYCLES=20}, diff --git a/tests/test_relocations.toml b/tests/test_relocations.toml index fb548dc..6b8ff89 100644 --- a/tests/test_relocations.toml +++ b/tests/test_relocations.toml @@ -148,7 +148,7 @@ code = ''' # almost every tree operation needs a relocation reentrant = true # TODO fix this case, caused by non-DAG trees -if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' +if = '!(DEPTH == 3 && LFS_BUFFER_SIZE != 64)' define = [ {FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, @@ -210,7 +210,7 @@ code = ''' [[case]] # reentrant testing for relocations, but now with random renames! reentrant = true # TODO fix this case, caused by non-DAG trees -if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' +if = '!(DEPTH == 3 && LFS_BUFFER_SIZE != 64)' define = [ {FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, diff --git a/tests/test_truncate.toml b/tests/test_truncate.toml index 38346e2..4fffb4a 100644 --- a/tests/test_truncate.toml +++ b/tests/test_truncate.toml @@ -100,7 +100,7 @@ code = ''' lfs_file_open(&lfs, &file, "sequence", LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0; - size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2); + size = lfs_min(lfs.cfg->buffer_size, sizeof(buffer)/2); lfs_size_t qsize = size / 4; uint8_t *wb = buffer; uint8_t *rb = buffer + size;