diff --git a/lfs.c b/lfs.c index 0417202..3fe6389 100644 --- a/lfs.c +++ b/lfs.c @@ -3192,11 +3192,15 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) { LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4)) <= lfs->cfg->block_size); - // block_cycles = 0 is no longer supported, must either set a number - // of erase cycles before moving logs to another block (~500 suggested), - // or explicitly disable wear-leveling with -1. + // block_cycles = 0 is no longer supported. + // + // block_cycles is the number of erase cycles before littlefs evicts + // metadata logs as a part of wear leveling. Suggested values are in the + // range of 100-1000, or set block_cycles to -1 to disable block-level + // wear-leveling. LFS_ASSERT(lfs->cfg->block_cycles != 0); + // setup read cache if (lfs->cfg->read_buffer) { lfs->rcache.buffer = lfs->cfg->read_buffer; diff --git a/lfs.h b/lfs.h index 276b5a5..6abfcdb 100644 --- a/lfs.h +++ b/lfs.h @@ -190,9 +190,10 @@ struct lfs_config { // Number of erasable blocks on the device. lfs_size_t block_count; - // Number of erase cycles before we should move logs to another block. - // Suggested values are in the range 100-1000, with large values having - // better performance at the cost of less consistent wear distribution. + // Number of erase cycles before littlefs evicts metadata logs and moves + // the metadata to another block. Suggested values are in the + // range 100-1000, with large values having better performance at the cost + // of less consistent wear distribution. // // Set to -1 to disable block-level wear-leveling. int32_t block_cycles;