mirror of
https://github.com/eledio-devices/thirdparty-littlefs.git
synced 2025-11-01 16:14:13 +01:00
Compare commits
1 Commits
v2.1.2
...
fix-lfs-mi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb3b83d647 |
26
.travis.yml
26
.travis.yml
@@ -248,30 +248,34 @@ jobs:
|
||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||
git reset --hard
|
||||
# Update major version branches (vN and vN-prefix)
|
||||
git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
|
||||
git push https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
|
||||
v$LFS_VERSION_MAJOR \
|
||||
v$LFS_VERSION_MAJOR-prefix
|
||||
# Create patch version tag (vN.N.N)
|
||||
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs \
|
||||
-d "{
|
||||
\"ref\": \"refs/tags/$LFS_VERSION\",
|
||||
\"sha\": \"$TRAVIS_COMMIT\"
|
||||
}"
|
||||
# Create minor release?
|
||||
[[ "$LFS_VERSION" == *.0 ]] || exit 0
|
||||
# Build release notes
|
||||
PREV=$(git tag --sort=-v:refname -l "v*" | head -1)
|
||||
PREV=$(git tag --sort=-v:refname -l "v*.0" | head -1)
|
||||
if [ ! -z "$PREV" ]
|
||||
then
|
||||
echo "PREV $PREV"
|
||||
CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep)
|
||||
CHANGES=$'### Changes\n\n'$( \
|
||||
git log --oneline $PREV.. --grep='^Merge' --invert-grep)
|
||||
printf "CHANGES\n%s\n\n" "$CHANGES"
|
||||
fi
|
||||
case ${GEKY_BOT_DRAFT:-minor} in
|
||||
true) DRAFT=true ;;
|
||||
minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;;
|
||||
false) DRAFT=false ;;
|
||||
esac
|
||||
# Create the release and patch version tag (vN.N.N)
|
||||
# Create the release
|
||||
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
|
||||
-d "{
|
||||
\"tag_name\": \"$LFS_VERSION\",
|
||||
\"name\": \"${LFS_VERSION%.0}\",
|
||||
\"target_commitish\": \"$TRAVIS_COMMIT\",
|
||||
\"draft\": $DRAFT,
|
||||
\"draft\": true,
|
||||
\"body\": $(jq -sR '.' <<< "$CHANGES")
|
||||
}" #"
|
||||
SCRIPT
|
||||
|
||||
54
DESIGN.md
54
DESIGN.md
@@ -254,7 +254,7 @@ have weaknesses that limit their usefulness. But if we merge the two they can
|
||||
mutually solve each other's limitations.
|
||||
|
||||
This is the idea behind littlefs. At the sub-block level, littlefs is built
|
||||
out of small, two block logs that provide atomic updates to metadata anywhere
|
||||
out of small, two blocks logs that provide atomic updates to metadata anywhere
|
||||
on the filesystem. At the super-block level, littlefs is a CObW tree of blocks
|
||||
that can be evicted on demand.
|
||||
|
||||
@@ -676,7 +676,7 @@ block, this cost is fairly reasonable.
|
||||
---
|
||||
|
||||
This is a new data structure, so we still have several questions. What is the
|
||||
storage overhead? Can the number of pointers exceed the size of a block? How do
|
||||
storage overage? Can the number of pointers exceed the size of a block? How do
|
||||
we store a CTZ skip-list in our metadata pairs?
|
||||
|
||||
To find the storage overhead, we can look at the data structure as multiple
|
||||
@@ -742,8 +742,8 @@ where:
|
||||
2. popcount(![x]) = the number of bits that are 1 in ![x]
|
||||
|
||||
Initial tests of this surprising property seem to hold. As ![n] approaches
|
||||
infinity, we end up with an average overhead of 2 pointers, which matches our
|
||||
assumption from earlier. During iteration, the popcount function seems to
|
||||
infinity, we end up with an average overhead of 2 pointers, which matches what
|
||||
our assumption from earlier. During iteration, the popcount function seems to
|
||||
handle deviations from this average. Of course, just to make sure I wrote a
|
||||
quick script that verified this property for all 32-bit integers.
|
||||
|
||||
@@ -767,7 +767,7 @@ overflow, but we can avoid this by rearranging the equation a bit:
|
||||
|
||||
![off = N - (B-2w/8)n - (w/8)popcount(n)][ctz-formula7]
|
||||
|
||||
Our solution requires quite a bit of math, but computers are very good at math.
|
||||
Our solution requires quite a bit of math, but computer are very good at math.
|
||||
Now we can find both our block index and offset from a size in _O(1)_, letting
|
||||
us store CTZ skip-lists with only a pointer and size.
|
||||
|
||||
@@ -850,7 +850,7 @@ nearly every write to the filesystem.
|
||||
|
||||
Normally, block allocation involves some sort of free list or bitmap stored on
|
||||
the filesystem that is updated with free blocks. However, with power
|
||||
resilience, keeping these structures consistent becomes difficult. It doesn't
|
||||
resilience, keeping these structure consistent becomes difficult. It doesn't
|
||||
help that any mistake in updating these structures can result in lost blocks
|
||||
that are impossible to recover.
|
||||
|
||||
@@ -894,9 +894,9 @@ high-risk error conditions.
|
||||
---
|
||||
|
||||
Our block allocator needs to find free blocks efficiently. You could traverse
|
||||
through every block on storage and check each one against our filesystem tree;
|
||||
however, the runtime would be abhorrent. We need to somehow collect multiple
|
||||
blocks per traversal.
|
||||
through every block on storage and check each one against our filesystem tree,
|
||||
however the runtime would be abhorrent. We need to somehow collect multiple
|
||||
blocks each traversal.
|
||||
|
||||
Looking at existing designs, some larger filesystems that use a similar "drop
|
||||
it on the floor" strategy store a bitmap of the entire storage in [RAM]. This
|
||||
@@ -920,8 +920,8 @@ a brute force traversal. Instead of a bitmap the size of storage, we keep track
|
||||
of a small, fixed-size bitmap called the lookahead buffer. During block
|
||||
allocation, we take blocks from the lookahead buffer. If the lookahead buffer
|
||||
is empty, we scan the filesystem for more free blocks, populating our lookahead
|
||||
buffer. In each scan we use an increasing offset, circling the storage as
|
||||
blocks are allocated.
|
||||
buffer. Each scan we use an increasing offset, circling the storage as blocks
|
||||
are allocated.
|
||||
|
||||
Here's what it might look like to allocate 4 blocks on a decently busy
|
||||
filesystem with a 32 bit lookahead and a total of 128 blocks (512 KiB
|
||||
@@ -950,7 +950,7 @@ alloc = 112 lookahead: ffff8000
|
||||
```
|
||||
|
||||
This lookahead approach has a runtime complexity of _O(n²)_ to completely
|
||||
scan storage; however, bitmaps are surprisingly compact, and in practice only
|
||||
scan storage, however, bitmaps are surprisingly compact, and in practice only
|
||||
one or two passes are usually needed to find free blocks. Additionally, the
|
||||
performance of the allocator can be optimized by adjusting the block size or
|
||||
size of the lookahead buffer, trading either write granularity or RAM for
|
||||
@@ -1173,9 +1173,9 @@ We may find that the new block is also bad, but hopefully after repeating this
|
||||
cycle we'll eventually find a new block where a write succeeds. If we don't,
|
||||
that means that all blocks in our storage are bad, and we've reached the end of
|
||||
our device's usable life. At this point, littlefs will return an "out of space"
|
||||
error. This is technically true, as there are no more good blocks, but as an
|
||||
added benefit it also matches the error condition expected by users of
|
||||
dynamically sized data.
|
||||
error, which is technically true, there are no more good blocks, but as an
|
||||
added benefit also matches the error condition expected by users of dynamically
|
||||
sized data.
|
||||
|
||||
---
|
||||
|
||||
@@ -1187,7 +1187,7 @@ original data even after it has been corrupted. One such mechanism for this is
|
||||
ECC is an extension to the idea of a checksum. Where a checksum such as CRC can
|
||||
detect that an error has occurred in the data, ECC can detect and actually
|
||||
correct some amount of errors. However, there is a limit to how many errors ECC
|
||||
can detect: the [Hamming bound][wikipedia-hamming-bound]. As the number of
|
||||
can detect, call the [Hamming bound][wikipedia-hamming-bound]. As the number of
|
||||
errors approaches the Hamming bound, we may still be able to detect errors, but
|
||||
can no longer fix the data. If we've reached this point the block is
|
||||
unrecoverable.
|
||||
@@ -1202,7 +1202,7 @@ chip itself.
|
||||
In littlefs, ECC is entirely optional. Read errors can instead be prevented
|
||||
proactively by wear leveling. But it's important to note that ECC can be used
|
||||
at the block device level to modestly extend the life of a device. littlefs
|
||||
respects any errors reported by the block device, allowing a block device to
|
||||
respects any errors reported by the block device, allow a block device to
|
||||
provide additional aggressive error detection.
|
||||
|
||||
---
|
||||
@@ -1231,7 +1231,7 @@ Generally, wear leveling algorithms fall into one of two categories:
|
||||
we need to consider all blocks, including blocks that already contain data.
|
||||
|
||||
As a tradeoff for code size and complexity, littlefs (currently) only provides
|
||||
dynamic wear leveling. This is a best effort solution. Wear is not distributed
|
||||
dynamic wear leveling. This is a best efforts solution. Wear is not distributed
|
||||
perfectly, but it is distributed among the free blocks and greatly extends the
|
||||
life of a device.
|
||||
|
||||
@@ -1378,7 +1378,7 @@ We can make several improvements. First, instead of giving each file its own
|
||||
metadata pair, we can store multiple files in a single metadata pair. One way
|
||||
to do this is to directly associate a directory with a metadata pair (or a
|
||||
linked list of metadata pairs). This makes it easy for multiple files to share
|
||||
the directory's metadata pair for logging and reduces the collective storage
|
||||
the directory's metadata pair for logging and reduce the collective storage
|
||||
overhead.
|
||||
|
||||
The strict binding of metadata pairs and directories also gives users
|
||||
@@ -1816,12 +1816,12 @@ while manipulating the directory tree (foreshadowing!).
|
||||
|
||||
## The move problem
|
||||
|
||||
We have one last challenge: the move problem. Phrasing the problem is simple:
|
||||
We have one last challenge. The move problem. Phrasing the problem is simple:
|
||||
|
||||
How do you atomically move a file between two directories?
|
||||
|
||||
In littlefs we can atomically commit to directories, but we can't create
|
||||
an atomic commit that spans multiple directories. The filesystem must go
|
||||
an atomic commit that span multiple directories. The filesystem must go
|
||||
through a minimum of two distinct states to complete a move.
|
||||
|
||||
To make matters worse, file moves are a common form of synchronization for
|
||||
@@ -1831,13 +1831,13 @@ atomic moves right.
|
||||
So what can we do?
|
||||
|
||||
- We definitely can't just let power-loss result in duplicated or lost files.
|
||||
This could easily break users' code and would only reveal itself in extreme
|
||||
This could easily break user's code and would only reveal itself in extreme
|
||||
cases. We were only able to be lazy about the threaded linked-list because
|
||||
it isn't user facing and we can handle the corner cases internally.
|
||||
|
||||
- Some filesystems propagate COW operations up the tree until a common parent
|
||||
is found. Unfortunately this interacts poorly with our threaded tree and
|
||||
brings back the issue of upward propagation of wear.
|
||||
- Some filesystems propagate COW operations up the tree until finding a common
|
||||
parent. Unfortunately this interacts poorly with our threaded tree and brings
|
||||
back the issue of upward propagation of wear.
|
||||
|
||||
- In a previous version of littlefs we tried to solve this problem by going
|
||||
back and forth between the source and destination, marking and unmarking the
|
||||
@@ -1852,7 +1852,7 @@ introduction of a mechanism called "global state".
|
||||
---
|
||||
|
||||
Global state is a small set of state that can be updated from _any_ metadata
|
||||
pair. Combining global state with metadata pairs' ability to update multiple
|
||||
pair. Combining global state with metadata pair's ability to update multiple
|
||||
entries in one commit gives us a powerful tool for crafting complex atomic
|
||||
operations.
|
||||
|
||||
@@ -1910,7 +1910,7 @@ the filesystem is mounted.
|
||||
|
||||
You may have noticed that global state is very expensive. We keep a copy in
|
||||
RAM and a delta in an unbounded number of metadata pairs. Even if we reset
|
||||
the global state to its initial value, we can't easily clean up the deltas on
|
||||
the global state to its initial value we can't easily clean up the deltas on
|
||||
disk. For this reason, it's very important that we keep the size of global
|
||||
state bounded and extremely small. But, even with a strict budget, global
|
||||
state is incredibly valuable.
|
||||
|
||||
2
Makefile
2
Makefile
@@ -29,7 +29,7 @@ override CFLAGS += -DLFS_YES_TRACE
|
||||
endif
|
||||
override CFLAGS += -I.
|
||||
override CFLAGS += -std=c99 -Wall -pedantic
|
||||
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
|
||||
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init
|
||||
# Remove missing-field-initializers because of GCC bug
|
||||
override CFLAGS += -Wno-missing-field-initializers
|
||||
|
||||
|
||||
@@ -102,7 +102,6 @@ int lfs_emubd_create(const struct lfs_config *cfg, const char *path) {
|
||||
if (res < 1) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_create -> %"PRId32, err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -125,7 +124,6 @@ int lfs_emubd_create(const struct lfs_config *cfg, const char *path) {
|
||||
if (res < 1) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_create -> %"PRId32, err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -180,7 +178,6 @@ int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_read -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -188,7 +185,6 @@ int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
if (res < size && !feof(f)) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_read -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -234,7 +230,6 @@ int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_prog -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -242,7 +237,6 @@ int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
if (res < size) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_prog -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -250,7 +244,6 @@ int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_prog -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -259,7 +252,6 @@ int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
if (res < 1) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_prog -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -348,7 +340,6 @@ int lfs_emubd_sync(const struct lfs_config *cfg) {
|
||||
if (res < 1) {
|
||||
int err = -errno;
|
||||
LFS_TRACE("lfs_emubd_sync -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -373,7 +364,6 @@ int lfs_emubd_sync(const struct lfs_config *cfg) {
|
||||
if (res < 1) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_sync -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -398,7 +388,6 @@ int lfs_emubd_sync(const struct lfs_config *cfg) {
|
||||
if (res < 1) {
|
||||
err = -errno;
|
||||
LFS_TRACE("lfs_emubd_sync -> %d", err);
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
118
lfs.c
118
lfs.c
@@ -7,21 +7,19 @@
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
|
||||
#define LFS_BLOCK_NULL ((lfs_block_t)-1)
|
||||
#define LFS_BLOCK_INLINE ((lfs_block_t)-2)
|
||||
|
||||
/// Caching block device operations ///
|
||||
static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
|
||||
// do not zero, cheaper if cache is readonly or only going to be
|
||||
// written with identical data (during relocates)
|
||||
(void)lfs;
|
||||
rcache->block = LFS_BLOCK_NULL;
|
||||
rcache->block = 0xffffffff;
|
||||
}
|
||||
|
||||
static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
|
||||
// zero to avoid information leak
|
||||
memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
|
||||
pcache->block = LFS_BLOCK_NULL;
|
||||
pcache->block = 0xffffffff;
|
||||
}
|
||||
|
||||
static int lfs_bd_read(lfs_t *lfs,
|
||||
@@ -29,7 +27,7 @@ static int lfs_bd_read(lfs_t *lfs,
|
||||
lfs_block_t block, lfs_off_t off,
|
||||
void *buffer, lfs_size_t size) {
|
||||
uint8_t *data = buffer;
|
||||
LFS_ASSERT(block != LFS_BLOCK_NULL);
|
||||
LFS_ASSERT(block != 0xffffffff);
|
||||
if (off+size > lfs->cfg->block_size) {
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
@@ -123,7 +121,7 @@ static int lfs_bd_cmp(lfs_t *lfs,
|
||||
|
||||
static int lfs_bd_flush(lfs_t *lfs,
|
||||
lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
|
||||
if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
|
||||
if (pcache->block != 0xffffffff && pcache->block != 0xfffffffe) {
|
||||
LFS_ASSERT(pcache->block < lfs->cfg->block_count);
|
||||
lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
|
||||
int err = lfs->cfg->prog(lfs->cfg, pcache->block,
|
||||
@@ -173,7 +171,7 @@ static int lfs_bd_prog(lfs_t *lfs,
|
||||
lfs_block_t block, lfs_off_t off,
|
||||
const void *buffer, lfs_size_t size) {
|
||||
const uint8_t *data = buffer;
|
||||
LFS_ASSERT(block != LFS_BLOCK_NULL);
|
||||
LFS_ASSERT(block != 0xffffffff);
|
||||
LFS_ASSERT(off + size <= lfs->cfg->block_size);
|
||||
|
||||
while (size > 0) {
|
||||
@@ -203,7 +201,7 @@ static int lfs_bd_prog(lfs_t *lfs,
|
||||
|
||||
// pcache must have been flushed, either by programming and
|
||||
// entire block or manually flushing the pcache
|
||||
LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
|
||||
LFS_ASSERT(pcache->block == 0xffffffff);
|
||||
|
||||
// prepare pcache, first condition can no longer fail
|
||||
pcache->block = block;
|
||||
@@ -231,7 +229,7 @@ static inline void lfs_pair_swap(lfs_block_t pair[2]) {
|
||||
}
|
||||
|
||||
static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
|
||||
return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
|
||||
return pair[0] == 0xffffffff || pair[1] == 0xffffffff;
|
||||
}
|
||||
|
||||
static inline int lfs_pair_cmp(
|
||||
@@ -352,7 +350,7 @@ static inline bool lfs_gstate_hasmovehere(const struct lfs_gstate *a,
|
||||
|
||||
static inline void lfs_gstate_xororphans(struct lfs_gstate *a,
|
||||
const struct lfs_gstate *b, bool orphans) {
|
||||
a->tag ^= LFS_MKTAG(0x800, 0, 0) & (b->tag ^ ((uint32_t)orphans << 31));
|
||||
a->tag ^= LFS_MKTAG(0x800, 0, 0) & (b->tag ^ (orphans << 31));
|
||||
}
|
||||
|
||||
static inline void lfs_gstate_xormove(struct lfs_gstate *a,
|
||||
@@ -573,7 +571,7 @@ static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
|
||||
while (size > 0) {
|
||||
lfs_size_t diff = size;
|
||||
|
||||
if (pcache && pcache->block == LFS_BLOCK_INLINE &&
|
||||
if (pcache && pcache->block == 0xfffffffe &&
|
||||
off < pcache->off + pcache->size) {
|
||||
if (off >= pcache->off) {
|
||||
// is already in pcache?
|
||||
@@ -590,7 +588,7 @@ static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
|
||||
diff = lfs_min(diff, pcache->off-off);
|
||||
}
|
||||
|
||||
if (rcache->block == LFS_BLOCK_INLINE &&
|
||||
if (rcache->block == 0xfffffffe &&
|
||||
off < rcache->off + rcache->size) {
|
||||
if (off >= rcache->off) {
|
||||
// is already in rcache?
|
||||
@@ -608,7 +606,7 @@ static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
|
||||
}
|
||||
|
||||
// load to cache, first condition can no longer fail
|
||||
rcache->block = LFS_BLOCK_INLINE;
|
||||
rcache->block = 0xfffffffe;
|
||||
rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
|
||||
rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
|
||||
lfs->cfg->cache_size);
|
||||
@@ -725,7 +723,7 @@ static int lfs_dir_traverse(lfs_t *lfs,
|
||||
uint16_t fromid = lfs_tag_size(tag);
|
||||
uint16_t toid = lfs_tag_id(tag);
|
||||
int err = lfs_dir_traverse(lfs,
|
||||
buffer, 0, LFS_BLOCK_NULL, NULL, 0, true,
|
||||
buffer, 0, 0xffffffff, NULL, 0, true,
|
||||
LFS_MKTAG(0x600, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0),
|
||||
fromid, fromid+1, toid-fromid+diff,
|
||||
@@ -785,15 +783,15 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
// now scan tags to fetch the actual dir and find possible match
|
||||
for (int i = 0; i < 2; i++) {
|
||||
lfs_off_t off = 0;
|
||||
lfs_tag_t ptag = LFS_BLOCK_NULL;
|
||||
lfs_tag_t ptag = 0xffffffff;
|
||||
|
||||
uint16_t tempcount = 0;
|
||||
lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
|
||||
lfs_block_t temptail[2] = {0xffffffff, 0xffffffff};
|
||||
bool tempsplit = false;
|
||||
lfs_stag_t tempbesttag = besttag;
|
||||
|
||||
dir->rev = lfs_tole32(dir->rev);
|
||||
uint32_t crc = lfs_crc(LFS_BLOCK_NULL, &dir->rev, sizeof(dir->rev));
|
||||
uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
|
||||
dir->rev = lfs_fromle32(dir->rev);
|
||||
|
||||
while (true) {
|
||||
@@ -846,7 +844,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
}
|
||||
|
||||
// reset the next bit if we need to
|
||||
ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
|
||||
ptag ^= (lfs_tag_chunk(tag) & 1U) << 31;
|
||||
|
||||
// toss our crc into the filesystem seed for
|
||||
// pseudorandom numbers
|
||||
@@ -862,7 +860,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
dir->split = tempsplit;
|
||||
|
||||
// reset crc
|
||||
crc = LFS_BLOCK_NULL;
|
||||
crc = 0xffffffff;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1250,7 +1248,7 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
}
|
||||
|
||||
// read erased state from next program unit
|
||||
lfs_tag_t tag = LFS_BLOCK_NULL;
|
||||
lfs_tag_t tag = 0xffffffff;
|
||||
int err = lfs_bd_read(lfs,
|
||||
NULL, &lfs->rcache, sizeof(tag),
|
||||
commit->block, noff, &tag, sizeof(tag));
|
||||
@@ -1275,8 +1273,8 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
}
|
||||
|
||||
commit->off += sizeof(tag)+lfs_tag_size(tag);
|
||||
commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
|
||||
commit->crc = LFS_BLOCK_NULL; // reset crc for next "commit"
|
||||
commit->ptag = tag ^ (reset << 31);
|
||||
commit->crc = 0xffffffff; // reset crc for next "commit"
|
||||
}
|
||||
|
||||
// flush buffers
|
||||
@@ -1289,7 +1287,7 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
lfs_off_t off = commit->begin;
|
||||
lfs_off_t noff = off1;
|
||||
while (off < end) {
|
||||
uint32_t crc = LFS_BLOCK_NULL;
|
||||
uint32_t crc = 0xffffffff;
|
||||
for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
|
||||
// leave it up to caching to make this efficient
|
||||
uint8_t dat;
|
||||
@@ -1343,10 +1341,10 @@ static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
|
||||
|
||||
// set defaults
|
||||
dir->off = sizeof(dir->rev);
|
||||
dir->etag = LFS_BLOCK_NULL;
|
||||
dir->etag = 0xffffffff;
|
||||
dir->count = 0;
|
||||
dir->tail[0] = LFS_BLOCK_NULL;
|
||||
dir->tail[1] = LFS_BLOCK_NULL;
|
||||
dir->tail[0] = 0xffffffff;
|
||||
dir->tail[1] = 0xffffffff;
|
||||
dir->erased = false;
|
||||
dir->split = false;
|
||||
|
||||
@@ -1436,7 +1434,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
// find size
|
||||
lfs_size_t size = 0;
|
||||
int err = lfs_dir_traverse(lfs,
|
||||
source, 0, LFS_BLOCK_NULL, attrs, attrcount, false,
|
||||
source, 0, 0xffffffff, attrs, attrcount, false,
|
||||
LFS_MKTAG(0x400, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
|
||||
begin, end, -begin,
|
||||
@@ -1528,8 +1526,8 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
struct lfs_commit commit = {
|
||||
.block = dir->pair[1],
|
||||
.off = 0,
|
||||
.ptag = LFS_BLOCK_NULL,
|
||||
.crc = LFS_BLOCK_NULL,
|
||||
.ptag = 0xffffffff,
|
||||
.crc = 0xffffffff,
|
||||
|
||||
.begin = 0,
|
||||
.end = lfs->cfg->block_size - 8,
|
||||
@@ -1558,7 +1556,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
|
||||
// traverse the directory, this time writing out all unique tags
|
||||
err = lfs_dir_traverse(lfs,
|
||||
source, 0, LFS_BLOCK_NULL, attrs, attrcount, false,
|
||||
source, 0, 0xffffffff, attrs, attrcount, false,
|
||||
LFS_MKTAG(0x400, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
|
||||
begin, end, -begin,
|
||||
@@ -1684,8 +1682,8 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
}
|
||||
|
||||
// calculate changes to the directory
|
||||
lfs_tag_t deletetag = LFS_BLOCK_NULL;
|
||||
lfs_tag_t createtag = LFS_BLOCK_NULL;
|
||||
lfs_tag_t deletetag = 0xffffffff;
|
||||
lfs_tag_t createtag = 0xffffffff;
|
||||
for (int i = 0; i < attrcount; i++) {
|
||||
if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
|
||||
createtag = attrs[i].tag;
|
||||
@@ -1731,7 +1729,7 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
.block = dir->pair[0],
|
||||
.off = dir->off,
|
||||
.ptag = dir->etag,
|
||||
.crc = LFS_BLOCK_NULL,
|
||||
.crc = 0xffffffff,
|
||||
|
||||
.begin = dir->off,
|
||||
.end = lfs->cfg->block_size - 8,
|
||||
@@ -1815,8 +1813,8 @@ compact:
|
||||
if (lfs_pair_cmp(d->m.pair, copy.pair) == 0) {
|
||||
d->m = *dir;
|
||||
if (d->id == lfs_tag_id(deletetag)) {
|
||||
d->m.pair[0] = LFS_BLOCK_NULL;
|
||||
d->m.pair[1] = LFS_BLOCK_NULL;
|
||||
d->m.pair[0] = 0xffffffff;
|
||||
d->m.pair[1] = 0xffffffff;
|
||||
} else if (d->id > lfs_tag_id(deletetag)) {
|
||||
d->id -= 1;
|
||||
if (d->type == LFS_TYPE_DIR) {
|
||||
@@ -2131,7 +2129,7 @@ static int lfs_ctz_find(lfs_t *lfs,
|
||||
lfs_block_t head, lfs_size_t size,
|
||||
lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
|
||||
if (size == 0) {
|
||||
*block = LFS_BLOCK_NULL;
|
||||
*block = 0xffffffff;
|
||||
*off = 0;
|
||||
return 0;
|
||||
}
|
||||
@@ -2329,7 +2327,6 @@ int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
|
||||
file->cfg = cfg;
|
||||
file->flags = flags | LFS_F_OPENED;
|
||||
file->pos = 0;
|
||||
file->off = 0;
|
||||
file->cache.buffer = NULL;
|
||||
|
||||
// allocate entry for file if it doesn't exist
|
||||
@@ -2429,7 +2426,7 @@ int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
|
||||
|
||||
if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
|
||||
// load inline files
|
||||
file->ctz.head = LFS_BLOCK_INLINE;
|
||||
file->ctz.head = 0xfffffffe;
|
||||
file->ctz.size = lfs_tag_size(tag);
|
||||
file->flags |= LFS_F_INLINE;
|
||||
file->cache.block = file->ctz.head;
|
||||
@@ -2617,7 +2614,7 @@ static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
|
||||
}
|
||||
|
||||
// keep our reference to the rcache in sync
|
||||
if (lfs->rcache.block != LFS_BLOCK_NULL) {
|
||||
if (lfs->rcache.block != 0xffffffff) {
|
||||
lfs_cache_drop(lfs, &orig.cache);
|
||||
lfs_cache_drop(lfs, &lfs->rcache);
|
||||
}
|
||||
@@ -2765,7 +2762,7 @@ lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
file->block = LFS_BLOCK_INLINE;
|
||||
file->block = 0xfffffffe;
|
||||
file->off = file->pos;
|
||||
}
|
||||
|
||||
@@ -2891,7 +2888,7 @@ lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
file->block = LFS_BLOCK_INLINE;
|
||||
file->block = 0xfffffffe;
|
||||
file->off = file->pos;
|
||||
}
|
||||
|
||||
@@ -2981,7 +2978,6 @@ int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
|
||||
return LFS_ERR_INVAL;
|
||||
}
|
||||
|
||||
lfs_off_t pos = file->pos;
|
||||
lfs_off_t oldsize = lfs_file_size(lfs, file);
|
||||
if (size < oldsize) {
|
||||
// need to flush since directly changing metadata
|
||||
@@ -3004,6 +3000,8 @@ int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
|
||||
file->ctz.size = size;
|
||||
file->flags |= LFS_F_DIRTY | LFS_F_READING;
|
||||
} else if (size > oldsize) {
|
||||
lfs_off_t pos = file->pos;
|
||||
|
||||
// flush+seek if not already at end
|
||||
if (file->pos != oldsize) {
|
||||
int err = lfs_file_seek(lfs, file, 0, LFS_SEEK_END);
|
||||
@@ -3021,13 +3019,13 @@ int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// restore pos
|
||||
int err = lfs_file_seek(lfs, file, pos, LFS_SEEK_SET);
|
||||
if (err < 0) {
|
||||
LFS_TRACE("lfs_file_truncate -> %d", err);
|
||||
return err;
|
||||
// restore pos
|
||||
int err = lfs_file_seek(lfs, file, pos, LFS_SEEK_SET);
|
||||
if (err < 0) {
|
||||
LFS_TRACE("lfs_file_truncate -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
LFS_TRACE("lfs_file_truncate -> %d", 0);
|
||||
@@ -3369,12 +3367,6 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
lfs->cfg = cfg;
|
||||
int err = 0;
|
||||
|
||||
// validate that the lfs-cfg sizes were initiated properly before
|
||||
// performing any arithmetic logics with them
|
||||
LFS_ASSERT(lfs->cfg->read_size != 0);
|
||||
LFS_ASSERT(lfs->cfg->prog_size != 0);
|
||||
LFS_ASSERT(lfs->cfg->cache_size != 0);
|
||||
|
||||
// check that block size is a multiple of cache size is a multiple
|
||||
// of prog and read sizes
|
||||
LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
|
||||
@@ -3382,7 +3374,7 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
|
||||
|
||||
// check that the block size is large enough to fit ctz pointers
|
||||
LFS_ASSERT(4*lfs_npw2(LFS_BLOCK_NULL / (lfs->cfg->block_size-2*4))
|
||||
LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
|
||||
<= lfs->cfg->block_size);
|
||||
|
||||
// block_cycles = 0 is no longer supported.
|
||||
@@ -3420,10 +3412,10 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
lfs_cache_zero(lfs, &lfs->rcache);
|
||||
lfs_cache_zero(lfs, &lfs->pcache);
|
||||
|
||||
// setup lookahead, must be multiple of 64-bits, 32-bit aligned
|
||||
// setup lookahead, must be multiple of 64-bits
|
||||
LFS_ASSERT(lfs->cfg->lookahead_size > 0);
|
||||
LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
|
||||
(uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
|
||||
(uintptr_t)lfs->cfg->lookahead_buffer % 8 == 0);
|
||||
if (lfs->cfg->lookahead_buffer) {
|
||||
lfs->free.buffer = lfs->cfg->lookahead_buffer;
|
||||
} else {
|
||||
@@ -3454,8 +3446,8 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
}
|
||||
|
||||
// setup default state
|
||||
lfs->root[0] = LFS_BLOCK_NULL;
|
||||
lfs->root[1] = LFS_BLOCK_NULL;
|
||||
lfs->root[0] = 0xffffffff;
|
||||
lfs->root[1] = 0xffffffff;
|
||||
lfs->mlist = NULL;
|
||||
lfs->seed = 0;
|
||||
lfs->gstate = (struct lfs_gstate){0};
|
||||
@@ -4258,7 +4250,7 @@ static int lfs1_dir_fetch(lfs_t *lfs,
|
||||
continue;
|
||||
}
|
||||
|
||||
uint32_t crc = LFS_BLOCK_NULL;
|
||||
uint32_t crc = 0xffffffff;
|
||||
lfs1_dir_tole32(&test);
|
||||
lfs1_crc(&crc, &test, sizeof(test));
|
||||
lfs1_dir_fromle32(&test);
|
||||
@@ -4443,8 +4435,8 @@ static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
|
||||
}
|
||||
|
||||
lfs->lfs1 = lfs1;
|
||||
lfs->lfs1->root[0] = LFS_BLOCK_NULL;
|
||||
lfs->lfs1->root[1] = LFS_BLOCK_NULL;
|
||||
lfs->lfs1->root[0] = 0xffffffff;
|
||||
lfs->lfs1->root[1] = 0xffffffff;
|
||||
|
||||
// setup free lookahead
|
||||
lfs->free.off = 0;
|
||||
@@ -4693,7 +4685,7 @@ int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
dir2.pair[1] = dir1.pair[1];
|
||||
dir2.rev = dir1.d.rev;
|
||||
dir2.off = sizeof(dir2.rev);
|
||||
dir2.etag = LFS_BLOCK_NULL;
|
||||
dir2.etag = 0xffffffff;
|
||||
dir2.count = 0;
|
||||
dir2.tail[0] = lfs->lfs1->root[0];
|
||||
dir2.tail[1] = lfs->lfs1->root[1];
|
||||
|
||||
@@ -107,57 +107,6 @@ scripts/test.py << TEST
|
||||
lfs_unmount(&lfs) => 0;
|
||||
TEST
|
||||
|
||||
echo "--- Write, truncate, and read ---"
|
||||
scripts/test.py << TEST
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "sequence",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
lfs_size_t size = lfs.cfg->cache_size;
|
||||
lfs_size_t qsize = size / 4;
|
||||
uint8_t *wb = buffer;
|
||||
uint8_t *rb = buffer + size;
|
||||
for (lfs_off_t j = 0; j < size; ++j) {
|
||||
wb[j] = j;
|
||||
}
|
||||
|
||||
/* Spread sequence over size */
|
||||
lfs_file_write(&lfs, &file, wb, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_tell(&lfs, &file) => size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
|
||||
/* Chop off the last quarter */
|
||||
lfs_size_t trunc = size - qsize;
|
||||
lfs_file_truncate(&lfs, &file, trunc) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
|
||||
/* Read should produce first 3/4 */
|
||||
lfs_file_read(&lfs, &file, rb, size) => trunc;
|
||||
memcmp(rb, wb, trunc) => 0;
|
||||
|
||||
/* Move to 1/4 */
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
lfs_file_seek(&lfs, &file, qsize, LFS_SEEK_SET) => qsize;
|
||||
lfs_file_tell(&lfs, &file) => qsize;
|
||||
|
||||
/* Chop to 1/2 */
|
||||
trunc -= qsize;
|
||||
lfs_file_truncate(&lfs, &file, trunc) => 0;
|
||||
lfs_file_tell(&lfs, &file) => qsize;
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
|
||||
/* Read should produce second quarter */
|
||||
lfs_file_read(&lfs, &file, rb, size) => trunc - qsize;
|
||||
memcmp(rb, wb + qsize, trunc - qsize) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
TEST
|
||||
|
||||
echo "--- Truncate and write ---"
|
||||
scripts/test.py << TEST
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
Reference in New Issue
Block a user