Compare commits

..

1 Commits

Author SHA1 Message Date
Christopher Haster
b24ddac95e Added revision sum as tracking for filesystem stability
The idea is pretty simple: The revision count for each metadata block
only really needs 3 distinct states. The additional bit-width of the
revision count is available for other information. In this case, we can
sum all revision counts in the filesystem to encode state that can be
updated for free during any metadata block update.

The obvious state to store first is if the filesystem is in a bad state
that requires a deorphan step.

The allows us to sidestep the expensive O(n^2) operation for the O(n)
sum in the much more common case.

This patch adds the following rule:
If the sum of all revision count is odd, the filesystem was caught
mid-operation and must be deorphaned.
2017-10-10 18:27:56 -05:00
22 changed files with 887 additions and 2487 deletions

9
.gitignore vendored
View File

@@ -1,9 +0,0 @@
# Compilation output
*.o
*.d
*.a
# Testing things
blocks/
lfs
test.c

View File

@@ -1,223 +1,48 @@
# Environment variables
env:
global:
- CFLAGS=-Werror
# Common test script
script:
# make sure example can at least compile
- sed -n '/``` c/,/```/{/```/d; p;}' README.md > test.c &&
make all CFLAGS+="
# make sure example can at least compile
- sed -n '/``` c/,/```/{/```/d; p;}' README.md > test.c &&
CFLAGS='
-Duser_provided_block_device_read=NULL
-Duser_provided_block_device_prog=NULL
-Duser_provided_block_device_erase=NULL
-Duser_provided_block_device_sync=NULL
-include stdio.h"
-include stdio.h -Werror' make all size
# run tests
- make test QUIET=1
# run tests
- make test
# run tests with a few different configurations
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=1 -DLFS_PROG_SIZE=1"
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=512 -DLFS_PROG_SIZE=512"
- make test QUIET=1 CFLAGS+="-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD=2048"
- make clean test QUIET=1 CFLAGS+="-DLFS_NO_INTRINSICS"
# compile and find the code size with the smallest configuration
- make clean size
OBJ="$(ls lfs*.o | tr '\n' ' ')"
CFLAGS+="-DLFS_NO{ASSERT,DEBUG,WARN,ERROR}"
| tee sizes
# update status if we succeeded, compare with master if possible
- |
if [ "$TRAVIS_TEST_RESULT" -eq 0 ]
then
CURR=$(tail -n1 sizes | awk '{print $1}')
PREV=$(curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \
| jq -re "select(.sha != \"$TRAVIS_COMMIT\")
| .statuses[] | select(.context == \"$STAGE/$NAME\").description
| capture(\"code size is (?<size>[0-9]+)\").size" \
|| echo 0)
STATUS="Passed, code size is ${CURR}B"
if [ "$PREV" -ne 0 ]
then
STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)"
fi
fi
# CI matrix
jobs:
include:
# native testing
- stage: test
env:
- STAGE=test
- NAME=littlefs-x86
# cross-compile with ARM (thumb mode)
- stage: test
env:
- STAGE=test
- NAME=littlefs-arm
- CC="arm-linux-gnueabi-gcc --static -mthumb"
- EXEC="qemu-arm"
install:
- sudo apt-get install gcc-arm-linux-gnueabi qemu-user
- arm-linux-gnueabi-gcc --version
- qemu-arm -version
# cross-compile with PowerPC
- stage: test
env:
- STAGE=test
- NAME=littlefs-powerpc
- CC="powerpc-linux-gnu-gcc --static"
- EXEC="qemu-ppc"
install:
- sudo apt-get install gcc-powerpc-linux-gnu qemu-user
- powerpc-linux-gnu-gcc --version
- qemu-ppc -version
# cross-compile with MIPS
- stage: test
env:
- STAGE=test
- NAME=littlefs-mips
- CC="mips-linux-gnu-gcc --static"
- EXEC="qemu-mips"
install:
- sudo add-apt-repository -y "deb http://archive.ubuntu.com/ubuntu/ xenial main universe"
- sudo apt-get -qq update
- sudo apt-get install gcc-mips-linux-gnu qemu-user
- mips-linux-gnu-gcc --version
- qemu-mips -version
# run tests with a few different configurations
- CFLAGS="-DLFS_READ_SIZE=1 -DLFS_PROG_SIZE=1" make test
- CFLAGS="-DLFS_READ_SIZE=512 -DLFS_PROG_SIZE=512" make test
- CFLAGS="-DLFS_BLOCK_COUNT=1023" make test
- CFLAGS="-DLFS_LOOKAHEAD=2047" make test
# self-host with littlefs-fuse for fuzz test
- stage: test
env:
- STAGE=test
- NAME=littlefs-fuse
install:
- sudo apt-get install libfuse-dev
- git clone --depth 1 https://github.com/geky/littlefs-fuse
- fusermount -V
- gcc --version
before_script:
# setup disk for littlefs-fuse
- rm -rf littlefs-fuse/littlefs/*
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
- make -C littlefs-fuse
- mkdir mount
- sudo chmod a+rw /dev/loop0
- dd if=/dev/zero bs=512 count=2048 of=disk
- losetup /dev/loop0 disk
script:
# self-host test
- make -C littlefs-fuse
- littlefs-fuse/lfs --format /dev/loop0
- littlefs-fuse/lfs /dev/loop0 mount
- littlefs-fuse/lfs --format /dev/loop0
- littlefs-fuse/lfs /dev/loop0 mount
- ls mount
- mkdir mount/littlefs
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
- cd mount/littlefs
- ls
- make -B test_dirs
- ls mount
- mkdir mount/littlefs
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
- cd mount/littlefs
- ls
- make -B test_dirs test_files QUIET=1
# Automatically update releases
- stage: deploy
env:
- STAGE=deploy
- NAME=deploy
script:
# Update tag for version defined in lfs.h
- LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
- LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
- LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
- LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR"
- echo "littlefs version $LFS_VERSION"
- |
curl -u $GEKY_BOT_RELEASES -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs \
-d "{
\"ref\": \"refs/tags/$LFS_VERSION\",
\"sha\": \"$TRAVIS_COMMIT\"
}"
- |
curl -f -u $GEKY_BOT_RELEASES -X PATCH \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/$LFS_VERSION \
-d "{
\"sha\": \"$TRAVIS_COMMIT\"
}"
# Create release notes from commits
- LFS_PREV_VERSION="v$LFS_VERSION_MAJOR.$(($LFS_VERSION_MINOR-1))"
- |
if [ $(git tag -l "$LFS_PREV_VERSION") ]
then
curl -u $GEKY_BOT_RELEASES -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
-d "{
\"tag_name\": \"$LFS_VERSION\",
\"name\": \"$LFS_VERSION\"
}"
RELEASE=$(
curl -f https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases/tags/$LFS_VERSION
)
CHANGES=$(
git log --oneline $LFS_PREV_VERSION.. --grep='^Merge' --invert-grep
)
curl -f -u $GEKY_BOT_RELEASES -X PATCH \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases/$(
jq -r '.id' <<< "$RELEASE"
) \
-d "$(
jq -s '{
"body": ((.[0] // "" | sub("(?<=\n)#+ Changes.*"; ""; "mi"))
+ "### Changes\n\n" + .[1])
}' <(jq '.body' <<< "$RELEASE") <(jq -sR '.' <<< "$CHANGES")
)"
fi
# Manage statuses
before_install:
- |
curl -u $GEKY_BOT_STATUSES -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{
\"context\": \"$STAGE/$NAME\",
\"state\": \"pending\",
\"description\": \"${STATUS:-In progress}\",
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
}"
- fusermount -V
- gcc --version
after_failure:
- |
curl -u $GEKY_BOT_STATUSES -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{
\"context\": \"$STAGE/$NAME\",
\"state\": \"failure\",
\"description\": \"${STATUS:-Failed}\",
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
}"
install:
- sudo apt-get install libfuse-dev
- git clone --depth 1 https://github.com/geky/littlefs-fuse
after_success:
- |
curl -u $GEKY_BOT_STATUSES -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{
\"context\": \"$STAGE/$NAME\",
\"state\": \"success\",
\"description\": \"${STATUS:-Passed}\",
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
}"
before_script:
- rm -rf littlefs-fuse/littlefs/*
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
# Job control
stages:
- name: test
- name: deploy
if: branch = master AND type = push
- mkdir mount
- sudo chmod a+rw /dev/loop0
- dd if=/dev/zero bs=512 count=2048 of=disk
- losetup /dev/loop0 disk

461
DESIGN.md
View File

@@ -1,6 +1,6 @@
## The design of the little filesystem
A little fail-safe filesystem designed for embedded systems.
The littlefs is a little fail-safe filesystem designed for embedded systems.
```
| | | .---._____
@@ -16,9 +16,9 @@ more about filesystem design by tackling the relative unsolved problem of
managing a robust filesystem resilient to power loss on devices
with limited RAM and ROM.
The embedded systems the littlefs is targeting are usually 32 bit
microcontrollers with around 32KB of RAM and 512KB of ROM. These are
often paired with SPI NOR flash chips with about 4MB of flash storage.
The embedded systems the littlefs is targeting are usually 32bit
microcontrollers with around 32Kbytes of RAM and 512Kbytes of ROM. These are
often paired with SPI NOR flash chips with about 4Mbytes of flash storage.
Flash itself is a very interesting piece of technology with quite a bit of
nuance. Unlike most other forms of storage, writing to flash requires two
@@ -27,23 +27,22 @@ cheap, and can be very granular. For NOR flash specifically, byte-level
programs are quite common. Erasing, however, requires an expensive operation
that forces the state of large blocks of memory to reset in a destructive
reaction that gives flash its name. The [Wikipedia entry](https://en.wikipedia.org/wiki/Flash_memory)
has more information if you are interested in how this works.
has more information if you are interesting in how this works.
This leaves us with an interesting set of limitations that can be simplified
to three strong requirements:
1. **Power-loss resilient** - This is the main goal of the littlefs and the
focus of this project.
Embedded systems are usually designed without a shutdown routine and a
notable lack of user interface for recovery, so filesystems targeting
embedded systems must be prepared to lose power at any given time.
1. **Fail-safe** - This is actually the main goal of the littlefs and the focus
of this project. Embedded systems are usually designed without a shutdown
routine and a notable lack of user interface for recovery, so filesystems
targeting embedded systems should be prepared to lose power an any given
time.
Despite this state of things, there are very few embedded filesystems that
handle power loss in a reasonable manner, and most can become corrupted if
the user is unlucky enough.
handle power loss in a reasonable manner, and can become corrupted if the
user is unlucky enough.
2. **Wear leveling** - Due to the destructive nature of flash, most flash
2. **Wear awareness** - Due to the destructive nature of flash, most flash
chips have a limited number of erase cycles, usually in the order of around
100,000 erases per block for NOR flash. Filesystems that don't take wear
into account can easily burn through blocks used to store frequently updated
@@ -53,8 +52,7 @@ to three strong requirements:
which stores a file allocation table (FAT) at a specific offset from the
beginning of disk. Every block allocation will update this table, and after
100,000 updates, the block will likely go bad, rendering the filesystem
unusable even if there are many more erase cycles available on the storage
as a whole.
unusable even if there are many more erase cycles available on the storage.
3. **Bounded RAM/ROM** - Even with the design difficulties presented by the
previous two limitations, we have already seen several flash filesystems
@@ -74,32 +72,33 @@ to three strong requirements:
## Existing designs?
There are of course, many different existing filesystem. Here is a very rough
There are of course, many different existing filesystem. Heres a very rough
summary of the general ideas behind some of them.
Most of the existing filesystems fall into the one big category of filesystem
designed in the early days of spinny magnet disks. While there is a vast amount
of interesting technology and ideas in this area, the nature of spinny magnet
disks encourage properties, such as grouping writes near each other, that don't
disks encourage properties such as grouping writes near each other, that don't
make as much sense on recent storage types. For instance, on flash, write
locality is not important and can actually increase wear.
locality is not as important and can actually increase wear destructively.
One of the most popular designs for flash filesystems is called the
[logging filesystem](https://en.wikipedia.org/wiki/Log-structured_file_system).
The flash filesystems [jffs](https://en.wikipedia.org/wiki/JFFS)
and [yaffs](https://en.wikipedia.org/wiki/YAFFS) are good examples. In a
logging filesystem, data is not stored in a data structure on disk, but instead
and [yaffs](https://en.wikipedia.org/wiki/YAFFS) are good examples. In
logging filesystem, data is not store in a data structure on disk, but instead
the changes to the files are stored on disk. This has several neat advantages,
such as the fact that the data is written in a cyclic log format and naturally
such as the fact that the data is written in a cyclic log format naturally
wear levels as a side effect. And, with a bit of error detection, the entire
filesystem can easily be designed to be resilient to power loss. The
journaling component of most modern day filesystems is actually a reduced
journalling component of most modern day filesystems is actually a reduced
form of a logging filesystem. However, logging filesystems have a difficulty
scaling as the size of storage increases. And most filesystems compensate by
caching large parts of the filesystem in RAM, a strategy that is inappropriate
caching large parts of the filesystem in RAM, a strategy that is unavailable
for embedded systems.
Another interesting filesystem design technique is that of [copy-on-write (COW)](https://en.wikipedia.org/wiki/Copy-on-write).
Another interesting filesystem design technique that the littlefs borrows the
most from, is the [copy-on-write (COW)](https://en.wikipedia.org/wiki/Copy-on-write).
A good example of this is the [btrfs](https://en.wikipedia.org/wiki/Btrfs)
filesystem. COW filesystems can easily recover from corrupted blocks and have
natural protection against power loss. However, if they are not designed with
@@ -109,14 +108,14 @@ where the COW data structures are synchronized.
## Metadata pairs
The core piece of technology that provides the backbone for the littlefs is
the concept of metadata pairs. The key idea here is that any metadata that
the concept of metadata pairs. The key idea here, is that any metadata that
needs to be updated atomically is stored on a pair of blocks tagged with
a revision count and checksum. Every update alternates between these two
pairs, so that at any time there is always a backup containing the previous
state of the metadata.
Consider a small example where each metadata pair has a revision count,
a number as data, and the XOR of the block as a quick checksum. If
a number as data, and the xor of the block as a quick checksum. If
we update the data to a value of 9, and then to a value of 5, here is
what the pair of blocks may look like after each update:
```
@@ -132,7 +131,7 @@ what the pair of blocks may look like after each update:
After each update, we can find the most up to date value of data by looking
at the revision count.
Now consider what the blocks may look like if we suddenly lose power while
Now consider what the blocks may look like if we suddenly loss power while
changing the value of data to 5:
```
block 1 block 2 block 1 block 2 block 1 block 2
@@ -151,19 +150,19 @@ check our checksum we notice that block 1 was corrupted. So we fall back to
block 2 and use the value 9.
Using this concept, the littlefs is able to update metadata blocks atomically.
There are a few other tweaks, such as using a 32 bit CRC and using sequence
There are a few other tweaks, such as using a 32bit crc and using sequence
arithmetic to handle revision count overflow, but the basic concept
is the same. These metadata pairs define the backbone of the littlefs, and the
rest of the filesystem is built on top of these atomic updates.
## Non-meta data
## Files
Now, the metadata pairs do come with some drawbacks. Most notably, each pair
requires two blocks for each block of data. I'm sure users would be very
unhappy if their storage was suddenly cut in half! Instead of storing
everything in these metadata blocks, the littlefs uses a COW data structure
for files which is in turn pointed to by a metadata block. When
we update a file, we create copies of any blocks that are modified until
we update a file, we create a copies of any blocks that are modified until
the metadata blocks are updated with the new copy. Once the metadata block
points to the new copy, we deallocate the old blocks that are no longer in use.
@@ -186,7 +185,7 @@ Here is what updating a one-block file may look like:
update data in file update metadata pair
```
It doesn't matter if we lose power while writing new data to block 5,
It doesn't matter if we lose power while writing block 5 with the new data,
since the old data remains unmodified in block 4. This example also
highlights how the atomic updates of the metadata blocks provide a
synchronization barrier for the rest of the littlefs.
@@ -201,14 +200,14 @@ Now we could just leave files here, copying the entire file on write
provides the synchronization without the duplicated memory requirements
of the metadata blocks. However, we can do a bit better.
## CTZ skip-lists
## CTZ linked-lists
There are many different data structures for representing the actual
files in filesystems. Of these, the littlefs uses a rather unique [COW](https://upload.wikimedia.org/wikipedia/commons/0/0c/Cow_female_black_white.jpg)
data structure that allows the filesystem to reuse unmodified parts of the
file without additional metadata pairs.
First lets consider storing files in a simple linked-list. What happens when we
First lets consider storing files in a simple linked-list. What happens when
append a block? We have to change the last block in the linked-list to point
to this new block, which means we have to copy out the last block, and change
the second-to-last block, and then the third-to-last, and so on until we've
@@ -225,12 +224,12 @@ Exhibit A: A linked-list
To get around this, the littlefs, at its heart, stores files backwards. Each
block points to its predecessor, with the first block containing no pointers.
If you think about for a while, it starts to make a bit of sense. Appending
blocks just point to their predecessor and no other blocks need to be updated.
If we update a block in the middle, we will need to copy out the blocks that
follow, but can reuse the blocks before the modified block. Since most file
operations either reset the file each write or append to files, this design
avoids copying the file in the most common cases.
If you think about this, it makes a bit of sense. Appending blocks just point
to their predecessor and no other blocks need to be updated. If we update
a block in the middle, we will need to copy out the blocks that follow,
but can reuse the blocks before the modified block. Since most file operations
either reset the file each write or append to files, this design avoids
copying the file in the most common cases.
```
Exhibit B: A backwards linked-list
@@ -242,24 +241,24 @@ Exhibit B: A backwards linked-list
```
However, a backwards linked-list does come with a rather glaring problem.
Iterating over a file _in order_ has a runtime cost of O(n^2). Gah! A quadratic
runtime to just _read_ a file? That's awful. Keep in mind reading files is
Iterating over a file _in order_ has a runtime of O(n^2). Gah! A quadratic
runtime to just _read_ a file? That's awful. Keep in mind reading files are
usually the most common filesystem operation.
To avoid this problem, the littlefs uses a multilayered linked-list. For
every nth block where n is divisible by 2^x, the block contains a pointer
to block n-2^x. So each block contains anywhere from 1 to log2(n) pointers
that skip to various sections of the preceding list. If you're familiar with
data-structures, you may have recognized that this is a type of deterministic
skip-list.
every block that is divisible by a power of two, the block contains an
additional pointer that points back by that power of two. Another way of
thinking about this design is that there are actually many linked-lists
threaded together, with each linked-lists skipping an increasing number
of blocks. If you're familiar with data-structures, you may have also
recognized that this is a deterministic skip-list.
The name comes from the use of the
[count trailing zeros (CTZ)](https://en.wikipedia.org/wiki/Count_trailing_zeros)
instruction, which allows us to calculate the power-of-two factors efficiently.
For a given block n, the block contains ctz(n)+1 pointers.
To find the power of two factors efficiently, we can use the instruction
[count trailing zeros (CTZ)](https://en.wikipedia.org/wiki/Count_trailing_zeros),
which is where this linked-list's name comes from.
```
Exhibit C: A backwards CTZ skip-list
Exhibit C: A backwards CTZ linked-list
.--------. .--------. .--------. .--------. .--------. .--------.
| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |<-| data 4 |<-| data 5 |
| |<-| |--| |<-| |--| | | |
@@ -267,9 +266,6 @@ Exhibit C: A backwards CTZ skip-list
'--------' '--------' '--------' '--------' '--------' '--------'
```
The additional pointers allow us to navigate the data-structure on disk
much more efficiently than in a singly linked-list.
Taking exhibit C for example, here is the path from data block 5 to data
block 1. You can see how data block 3 was completely skipped:
```
@@ -289,132 +285,15 @@ The path to data block 0 is even more quick, requiring only two jumps:
'--------' '--------' '--------' '--------' '--------' '--------'
```
We can find the runtime complexity by looking at the path to any block from
the block containing the most pointers. Every step along the path divides
the search space for the block in half. This gives us a runtime of O(log n).
To get to the block with the most pointers, we can perform the same steps
backwards, which puts the runtime at O(2 log n) = O(log n). The interesting
part about this data structure is that this optimal path occurs naturally
if we greedily choose the pointer that covers the most distance without passing
our target block.
The CTZ linked-list has quite a few interesting properties. All of the pointers
in the block can be found by just knowing the index in the list of the current
block, and, with a bit of math, the amortized overhead for the linked-list is
only two pointers per block. Most importantly, the CTZ linked-list has a
worst case lookup runtime of O(logn), which brings the runtime of reading a
file down to O(n logn). Given that the constant runtime is divided by the
amount of data we can store in a block, this is pretty reasonable.
So now we have a representation of files that can be appended trivially with
a runtime of O(1), and can be read with a worst case runtime of O(n log n).
Given that the the runtime is also divided by the amount of data we can store
in a block, this is pretty reasonable.
Unfortunately, the CTZ skip-list comes with a few questions that aren't
straightforward to answer. What is the overhead? How do we handle more
pointers than we can store in a block? How do we store the skip-list in
a directory entry?
One way to find the overhead per block is to look at the data structure as
multiple layers of linked-lists. Each linked-list skips twice as many blocks
as the previous linked-list. Another way of looking at it is that each
linked-list uses half as much storage per block as the previous linked-list.
As we approach infinity, the number of pointers per block forms a geometric
series. Solving this geometric series gives us an average of only 2 pointers
per block.
![overhead_per_block](https://latex.codecogs.com/svg.latex?%5Clim_%7Bn%5Cto%5Cinfty%7D%5Cfrac%7B1%7D%7Bn%7D%5Csum_%7Bi%3D0%7D%5E%7Bn%7D%5Cleft%28%5Ctext%7Bctz%7D%28i%29&plus;1%5Cright%29%20%3D%20%5Csum_%7Bi%3D0%7D%5Cfrac%7B1%7D%7B2%5Ei%7D%20%3D%202)
Finding the maximum number of pointers in a block is a bit more complicated,
but since our file size is limited by the integer width we use to store the
size, we can solve for it. Setting the overhead of the maximum pointers equal
to the block size we get the following equation. Note that a smaller block size
results in more pointers, and a larger word width results in larger pointers.
![maximum overhead](https://latex.codecogs.com/svg.latex?B%20%3D%20%5Cfrac%7Bw%7D%7B8%7D%5Cleft%5Clceil%5Clog_2%5Cleft%28%5Cfrac%7B2%5Ew%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D%5Cright%29%5Cright%5Crceil)
where:
B = block size in bytes
w = word width in bits
Solving the equation for B gives us the minimum block size for various word
widths:
32 bit CTZ skip-list = minimum block size of 104 bytes
64 bit CTZ skip-list = minimum block size of 448 bytes
Since littlefs uses a 32 bit word size, we are limited to a minimum block
size of 104 bytes. This is a perfectly reasonable minimum block size, with most
block sizes starting around 512 bytes. So we can avoid additional logic to
avoid overflowing our block's capacity in the CTZ skip-list.
So, how do we store the skip-list in a directory entry? A naive approach would
be to store a pointer to the head of the skip-list, the length of the file
in bytes, the index of the head block in the skip-list, and the offset in the
head block in bytes. However this is a lot of information, and we can observe
that a file size maps to only one block index + offset pair. So it should be
sufficient to store only the pointer and file size.
But there is one problem, calculating the block index + offset pair from a
file size doesn't have an obvious implementation.
We can start by just writing down an equation. The first idea that comes to
mind is to just use a for loop to sum together blocks until we reach our
file size. We can write this equation as a summation:
![summation1](https://latex.codecogs.com/svg.latex?N%20%3D%20%5Csum_i%5En%5Cleft%5BB-%5Cfrac%7Bw%7D%7B8%7D%5Cleft%28%5Ctext%7Bctz%7D%28i%29&plus;1%5Cright%29%5Cright%5D)
where:
B = block size in bytes
w = word width in bits
n = block index in skip-list
N = file size in bytes
And this works quite well, but is not trivial to calculate. This equation
requires O(n) to compute, which brings the entire runtime of reading a file
to O(n^2 log n). Fortunately, the additional O(n) does not need to touch disk,
so it is not completely unreasonable. But if we could solve this equation into
a form that is easily computable, we can avoid a big slowdown.
Unfortunately, the summation of the CTZ instruction presents a big challenge.
How would you even begin to reason about integrating a bitwise instruction?
Fortunately, there is a powerful tool I've found useful in these situations:
The [On-Line Encyclopedia of Integer Sequences (OEIS)](https://oeis.org/).
If we work out the first couple of values in our summation, we find that CTZ
maps to [A001511](https://oeis.org/A001511), and its partial summation maps
to [A005187](https://oeis.org/A005187), and surprisingly, both of these
sequences have relatively trivial equations! This leads us to a rather
unintuitive property:
![mindblown](https://latex.codecogs.com/svg.latex?%5Csum_i%5En%5Cleft%28%5Ctext%7Bctz%7D%28i%29&plus;1%5Cright%29%20%3D%202n-%5Ctext%7Bpopcount%7D%28n%29)
where:
ctz(x) = the number of trailing bits that are 0 in x
popcount(x) = the number of bits that are 1 in x
It's a bit bewildering that these two seemingly unrelated bitwise instructions
are related by this property. But if we start to dissect this equation we can
see that it does hold. As n approaches infinity, we do end up with an average
overhead of 2 pointers as we find earlier. And popcount seems to handle the
error from this average as it accumulates in the CTZ skip-list.
Now we can substitute into the original equation to get a trivial equation
for a file size:
![summation2](https://latex.codecogs.com/svg.latex?N%20%3D%20Bn%20-%20%5Cfrac%7Bw%7D%7B8%7D%5Cleft%282n-%5Ctext%7Bpopcount%7D%28n%29%5Cright%29)
Unfortunately, we're not quite done. The popcount function is non-injective,
so we can only find the file size from the block index, not the other way
around. However, we can solve for an n' block index that is greater than n
with an error bounded by the range of the popcount function. We can then
repeatedly substitute this n' into the original equation until the error
is smaller than the integer division. As it turns out, we only need to
perform this substitution once. Now we directly calculate our block index:
![formulaforn](https://latex.codecogs.com/svg.latex?n%20%3D%20%5Cleft%5Clfloor%5Cfrac%7BN-%5Cfrac%7Bw%7D%7B8%7D%5Cleft%28%5Ctext%7Bpopcount%7D%5Cleft%28%5Cfrac%7BN%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D-1%5Cright%29&plus;2%5Cright%29%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D%5Cright%5Crfloor)
Now that we have our block index n, we can just plug it back into the above
equation to find the offset. However, we do need to rearrange the equation
a bit to avoid integer overflow:
![formulaforoff](https://latex.codecogs.com/svg.latex?%5Cmathit%7Boff%7D%20%3D%20N%20-%20%5Cleft%28B-2%5Cfrac%7Bw%7D%7B8%7D%5Cright%29n%20-%20%5Cfrac%7Bw%7D%7B8%7D%5Ctext%7Bpopcount%7D%28n%29)
The solution involves quite a bit of math, but computers are very good at math.
Now we can solve for both the block index and offset from the file size in O(1).
Here is what it might look like to update a file stored with a CTZ skip-list:
Here is what it might look like to update a file stored with a CTZ linked-list:
```
block 1 block 2
.---------.---------.
@@ -488,7 +367,7 @@ v
## Block allocation
So those two ideas provide the grounds for the filesystem. The metadata pairs
give us directories, and the CTZ skip-lists give us files. But this leaves
give us directories, and the CTZ linked-lists give us files. But this leaves
one big [elephant](https://upload.wikimedia.org/wikipedia/commons/3/37/African_Bush_Elephant.jpg)
of a question. How do we get those blocks in the first place?
@@ -501,17 +380,16 @@ scanned to find the most recent free list, but once the list was found the
state of all free blocks becomes known.
However, this approach had several issues:
- There was a lot of nuanced logic for adding blocks to the free list without
modifying the blocks, since the blocks remain active until the metadata is
updated.
- The free list had to support both additions and removals in FIFO order while
- The free list had to support both additions and removals in fifo order while
minimizing block erases.
- The free list had to handle the case where the file system completely ran
out of blocks and may no longer be able to add blocks to the free list.
- If we used a revision count to track the most recently updated free list,
metadata blocks that were left unmodified were ticking time bombs that would
cause the system to go haywire if the revision count overflowed.
cause the system to go haywire if the revision count overflowed
- Every single metadata block wasted space to store these free list references.
Actually, to simplify, this approach had one massive glaring issue: complexity.
@@ -541,7 +419,7 @@ would have an abhorrent runtime.
So the littlefs compromises. It doesn't store a bitmap the size of the storage,
but it does store a little bit-vector that contains a fixed set lookahead
for block allocations. During a block allocation, the lookahead vector is
checked for any free blocks. If there are none, the lookahead region jumps
checked for any free blocks, if there are none, the lookahead region jumps
forward and the entire filesystem is scanned for free blocks.
Here's what it might look like to allocate 4 blocks on a decently busy
@@ -624,7 +502,7 @@ So, as a solution, the littlefs adopted a sort of threaded tree. Each
directory not only contains pointers to all of its children, but also a
pointer to the next directory. These pointers create a linked-list that
is threaded through all of the directories in the filesystem. Since we
only use this linked list to check for existence, the order doesn't actually
only use this linked list to check for existance, the order doesn't actually
matter. As an added plus, we can repurpose the pointer for the individual
directory linked-lists and avoid using any additional space.
@@ -775,17 +653,9 @@ deorphan step that simply iterates through every directory in the linked-list
and checks it against every directory entry in the filesystem to see if it
has a parent. The deorphan step occurs on the first block allocation after
boot, so orphans should never cause the littlefs to run out of storage
prematurely. Note that the deorphan step never needs to run in a read-only
filesystem.
prematurely.
## The move problem
Now we have a real problem. How do we move things between directories while
remaining power resilient? Even looking at the problem from a high level,
it seems impossible. We can update directory blocks atomically, but atomically
updating two independent directory blocks is not an atomic operation.
Here's the steps the filesystem may go through to move a directory:
And for my final trick, moving a directory:
```
.--------.
|root dir|-.
@@ -846,142 +716,25 @@ v
'--------'
```
We can leave any orphans up to the deorphan step to collect, but that doesn't
help the case where dir A has both dir B and the root dir as parents if we
lose power inconveniently.
Note that once again we don't care about the ordering of directories in the
linked-list, so we can simply leave directories in their old positions. This
does make the diagrams a bit hard to draw, but the littlefs doesn't really
care.
Initially, you might think this is fine. Dir A _might_ end up with two parents,
but the filesystem will still work as intended. But then this raises the
question of what do we do when the dir A wears out? For other directory blocks
we can update the parent pointer, but for a dir with two parents we would need
work out how to update both parents. And the check for multiple parents would
need to be carried out for every directory, even if the directory has never
been moved.
It also presents a bad user-experience, since the condition of ending up with
two parents is rare, it's unlikely user-level code will be prepared. Just think
about how a user would recover from a multi-parented directory. They can't just
remove one directory, since remove would report the directory as "not empty".
Other atomic filesystems simple COW the entire directory tree. But this
introduces a significant bit of complexity, which leads to code size, along
with a surprisingly expensive runtime cost during what most users assume is
a single pointer update.
Another option is to update the directory block we're moving from to point
to the destination with a sort of predicate that we have moved if the
destination exists. Unfortunately, the omnipresent concern of wear could
cause any of these directory entries to change blocks, and changing the
entry size before a move introduces complications if it spills out of
the current directory block.
So how do we go about moving a directory atomically?
We rely on the improbableness of power loss.
Power loss during a move is certainly possible, but it's actually relatively
rare. Unless a device is writing to a filesystem constantly, it's unlikely that
a power loss will occur during filesystem activity. We still need to handle
the condition, but runtime during a power loss takes a back seat to the runtime
during normal operations.
So what littlefs does is inelegantly simple. When littlefs moves a file, it
marks the file as "moving". This is stored as a single bit in the directory
entry and doesn't take up much space. Then littlefs moves the directory,
finishing with the complete remove of the "moving" directory entry.
```
.--------.
|root dir|-.
| pair 0 | |
.--------| |-'
| '--------'
| .-' '-.
| v v
| .--------. .--------.
'->| dir A |->| dir B |
| pair 0 | | pair 0 |
| | | |
'--------' '--------'
| update root directory to mark directory A as moving
v
.----------.
|root dir |-.
| pair 0 | |
.-------| moving A!|-'
| '----------'
| .-' '-.
| v v
| .--------. .--------.
'->| dir A |->| dir B |
| pair 0 | | pair 0 |
| | | |
'--------' '--------'
| update directory B to point to directory A
v
.----------.
|root dir |-.
| pair 0 | |
.-------| moving A!|-'
| '----------'
| .-----' '-.
| | v
| | .--------.
| | .->| dir B |
| | | | pair 0 |
| | | | |
| | | '--------'
| | .-------'
| v v |
| .--------. |
'->| dir A |-'
| pair 0 |
| |
'--------'
| update root to no longer contain directory A
v
.--------.
|root dir|-.
| pair 0 | |
.----| |-'
| '--------'
| |
| v
| .--------.
| .->| dir B |
| | | pair 0 |
| '--| |-.
| '--------' |
| | |
| v |
| .--------. |
'--->| dir A |-'
| pair 0 |
| |
'--------'
```
Now, if we run into a directory entry that has been marked as "moved", one
of two things is possible. Either the directory entry exists elsewhere in the
filesystem, or it doesn't. This is a O(n) operation, but only occurs in the
unlikely case we lost power during a move.
And we can easily fix the "moved" directory entry. Since we're already scanning
the filesystem during the deorphan step, we can also check for moved entries.
If we find one, we either remove the "moved" marking or remove the whole entry
if it exists elsewhere in the filesystem.
It's also worth noting that once again we have an operation that isn't actually
atomic. After we add directory A to directory B, we could lose power, leaving
directory A as a part of both the root directory and directory B. However,
there isn't anything inherent to the littlefs that prevents a directory from
having multiple parents, so in this case, we just allow that to happen. Extra
care is taken to only remove a directory from the linked-list if there are
no parents left in the filesystem.
## Wear awareness
So now that we have all of the pieces of a filesystem, we can look at a more
subtle attribute of embedded storage: The wear down of flash blocks.
The first concern for the littlefs, is that perfectly valid blocks can suddenly
The first concern for the littlefs, is that prefectly valid blocks can suddenly
become unusable. As a nice side-effect of using a COW data-structure for files,
we can simply move on to a different block when a file write fails. All
modifications to files are performed in copies, so we will only replace the
@@ -1153,28 +906,23 @@ develops errors and needs to be moved.
## Wear leveling
The second concern for the littlefs is that blocks in the filesystem may wear
The second concern for the littlefs, is that blocks in the filesystem may wear
unevenly. In this situation, a filesystem may meet an early demise where
there are no more non-corrupted blocks that aren't in use. It's common to
have files that were written once and left unmodified, wasting the potential
erase cycles of the blocks it sits on.
there are no more non-corrupted blocks that aren't in use. It may be entirely
possible that files were written once and left unmodified, wasting the
potential erase cycles of the blocks it sits on.
Wear leveling is a term that describes distributing block writes evenly to
avoid the early termination of a flash part. There are typically two levels
of wear leveling:
1. Dynamic wear leveling - Wear is distributed evenly across all **dynamic**
blocks. Usually this is accomplished by simply choosing the unused block
with the lowest amount of wear. Note this does not solve the problem of
static data.
2. Static wear leveling - Wear is distributed evenly across all **dynamic**
and **static** blocks. Unmodified blocks may be evicted for new block
writes. This does handle the problem of static data but may lead to
wear amplification.
1. Dynamic wear leveling - Blocks are distributed evenly during blocks writes.
Note that the issue with write-once files still exists in this case.
2. Static wear leveling - Unmodified blocks are evicted for new block writes.
This provides the longest lifetime for a flash device.
In littlefs's case, it's possible to use the revision count on metadata pairs
to approximate the wear of a metadata block. And combined with the COW nature
of files, littlefs could provide your usual implementation of dynamic wear
leveling.
Now, it's possible to use the revision count on metadata pairs to approximate
the wear of a metadata block. And combined with the COW nature of files, the
littlefs could provide a form of dynamic wear leveling.
However, the littlefs does not. This is for a few reasons. Most notably, even
if the littlefs did implement dynamic wear leveling, this would still not
@@ -1185,20 +933,19 @@ As a flash device reaches the end of its life, the metadata blocks will
naturally be the first to go since they are updated most often. In this
situation, the littlefs is designed to simply move on to another set of
metadata blocks. This travelling means that at the end of a flash device's
life, the filesystem will have worn the device down nearly as evenly as the
usual dynamic wear leveling could. More aggressive wear leveling would come
with a code-size cost for marginal benefit.
life, the filesystem will have worn the device down as evenly as a dynamic
wear leveling filesystem could anyways. Simply put, if the lifetime of flash
is a serious concern, static wear leveling is the only valid solution.
One important takeaway to note, if your storage stack uses highly sensitive
storage such as NAND flash, static wear leveling is the only valid solution.
In most cases you are going to be better off using a full [flash translation layer (FTL)](https://en.wikipedia.org/wiki/Flash_translation_layer).
This is a very important takeaway to note. If your storage stack uses highly
sensitive storage such as NAND flash. In most cases you are going to be better
off just using a [flash translation layer (FTL)](https://en.wikipedia.org/wiki/Flash_translation_layer).
NAND flash already has many limitations that make it poorly suited for an
embedded system: low erase cycles, very large blocks, errors that can develop
even during reads, errors that can develop during writes of neighboring blocks.
Managing sensitive storage such as NAND flash is out of scope for the littlefs.
The littlefs does have some properties that may be beneficial on top of a FTL,
such as limiting the number of writes where possible, but if you have the
such as limiting the number of writes where possible. But if you have the
storage requirements that necessitate the need of NAND flash, you should have
the RAM to match and just use an FTL or flash filesystem.
@@ -1208,18 +955,18 @@ So, to summarize:
1. The littlefs is composed of directory blocks
2. Each directory is a linked-list of metadata pairs
3. These metadata pairs can be updated atomically by alternating which
3. These metadata pairs can be updated atomically by alternative which
metadata block is active
4. Directory blocks contain either references to other directories or files
5. Files are represented by copy-on-write CTZ skip-lists which support O(1)
append and O(n log n) reading
6. Blocks are allocated by scanning the filesystem for used blocks in a
fixed-size lookahead region that is stored in a bit-vector
7. To facilitate scanning the filesystem, all directories are part of a
5. Files are represented by copy-on-write CTZ linked-lists
6. The CTZ linked-lists support appending in O(1) and reading in O(n logn)
7. Blocks are allocated by scanning the filesystem for used blocks in a
fixed-size lookahead region is that stored in a bit-vector
8. To facilitate scanning the filesystem, all directories are part of a
linked-list that is threaded through the entire filesystem
8. If a block develops an error, the littlefs allocates a new block, and
9. If a block develops an error, the littlefs allocates a new block, and
moves the data and references of the old block to the new.
9. Any case where an atomic operation is not possible, mistakes are resolved
10. Any case where an atomic operation is not possible, it is taken care of
by a deorphan step that occurs on the first allocation after boot
That's the little filesystem. Thanks for reading!

View File

@@ -1,8 +1,8 @@
TARGET = lfs
CC ?= gcc
AR ?= ar
SIZE ?= size
CC = gcc
AR = ar
SIZE = size
SRC += $(wildcard *.c emubd/*.c)
OBJ := $(SRC:.c=.o)
@@ -11,18 +11,16 @@ ASM := $(SRC:.c=.s)
TEST := $(patsubst tests/%.sh,%,$(wildcard tests/test_*))
SHELL = /bin/bash -o pipefail
ifdef DEBUG
override CFLAGS += -O0 -g3
CFLAGS += -O0 -g3
else
override CFLAGS += -Os
CFLAGS += -Os
endif
ifdef WORD
override CFLAGS += -m$(WORD)
CFLAGS += -m$(WORD)
endif
override CFLAGS += -I.
override CFLAGS += -std=c99 -Wall -pedantic
CFLAGS += -I.
CFLAGS += -std=c99 -Wall -pedantic
all: $(TARGET)
@@ -33,14 +31,10 @@ size: $(OBJ)
$(SIZE) -t $^
.SUFFIXES:
test: test_format test_dirs test_files test_seek test_truncate test_parallel \
test: test_format test_dirs test_files test_seek test_parallel \
test_alloc test_paths test_orphan test_move test_corrupt
test_%: tests/test_%.sh
ifdef QUIET
@./$< | sed -n '/^[-=]/p'
else
./$<
endif
-include $(DEP)

View File

@@ -11,17 +11,23 @@ A little fail-safe filesystem designed for embedded systems.
| | |
```
**Bounded RAM/ROM** - The littlefs is designed to work with a limited amount
of memory. Recursion is avoided and dynamic memory is limited to configurable
buffers that can be provided statically.
**Fail-safe** - The littlefs is designed to work consistently with random
power failures. During filesystem operations the storage on disk is always
kept in a valid state. The filesystem also has strong copy-on-write garuntees.
When updating a file, the original file will remain unmodified until the
file is closed, or sync is called.
**Power-loss resilient** - The littlefs is designed for systems that may have
random power failures. The littlefs has strong copy-on-write guarantees and
storage on disk is always kept in a valid state.
**Wear awareness** - While the littlefs does not implement static wear
leveling, the littlefs takes into account write errors reported by the
underlying block device and uses a limited form of dynamic wear leveling
to manage blocks that go bad during the lifetime of the filesystem.
**Wear leveling** - Since the most common form of embedded storage is erodible
flash memories, littlefs provides a form of dynamic wear leveling for systems
that can not fit a full flash translation layer.
**Bounded ram/rom** - The littlefs is designed to work in a
limited amount of memory, recursion is avoided, and dynamic memory is kept
to a minimum. The littlefs allocates two fixed-size buffers for general
operations, and one fixed-size buffer per file. If there is only ever one file
in use, all memory can be provided statically and the littlefs can be used
in a system without dynamic memory.
## Example
@@ -88,9 +94,9 @@ int main(void) {
## Usage
Detailed documentation (or at least as much detail as is currently available)
can be found in the comments in [lfs.h](lfs.h).
can be cound in the comments in [lfs.h](lfs.h).
As you may have noticed, littlefs takes in a configuration structure that
As you may have noticed, the littlefs takes in a configuration structure that
defines how the filesystem operates. The configuration struct provides the
filesystem with the block device operations and dimensions, tweakable
parameters that tradeoff memory usage for performance, and optional
@@ -98,16 +104,14 @@ static buffers if the user wants to avoid dynamic memory.
The state of the littlefs is stored in the `lfs_t` type which is left up
to the user to allocate, allowing multiple filesystems to be in use
simultaneously. With the `lfs_t` and configuration struct, a user can
simultaneously. With the `lfs_t` and configuration struct, a user can either
format a block device or mount the filesystem.
Once mounted, the littlefs provides a full set of POSIX-like file and
Once mounted, the littlefs provides a full set of posix-like file and
directory functions, with the deviation that the allocation of filesystem
structures must be provided by the user.
All POSIX operations, such as remove and rename, are atomic, even in event
of power-loss. Additionally, no file updates are actually committed to the
filesystem until sync or close is called on the file.
structures must be provided by the user. An important addition is that
no file updates will actually be written to disk until a sync or close
is called.
## Other notes
@@ -115,50 +119,27 @@ All littlefs have the potential to return a negative error code. The errors
can be either one of those found in the `enum lfs_error` in [lfs.h](lfs.h),
or an error returned by the user's block device operations.
In the configuration struct, the `prog` and `erase` function provided by the
user may return a `LFS_ERR_CORRUPT` error if the implementation already can
detect corrupt blocks. However, the wear leveling does not depend on the return
code of these functions, instead all data is read back and checked for
integrity.
It should also be noted that the littlefs does not do anything to insure
that the data written to disk is machine portable. It should be fine as
long as the machines involved share endianness and don't have really
strange padding requirements. If the question does come up, the littlefs
metadata should be stored on disk in little-endian format.
If your storage caches writes, make sure that the provided `sync` function
flushes all the data to memory and ensures that the next read fetches the data
from memory, otherwise data integrity can not be guaranteed. If the `write`
function does not perform caching, and therefore each `read` or `write` call
hits the memory, the `sync` function can simply return 0.
## Design
## Reference material
[DESIGN.md](DESIGN.md) - DESIGN.md contains a fully detailed dive into how
littlefs actually works. I would encourage you to read it since the
solutions and tradeoffs at work here are quite interesting.
[SPEC.md](SPEC.md) - SPEC.md contains the on-disk specification of littlefs
with all the nitty-gritty details. Can be useful for developing tooling.
the littlefs was developed with the goal of learning more about filesystem
design by tackling the relative unsolved problem of managing a robust
filesystem resilient to power loss on devices with limited RAM and ROM.
More detail on the solutions and tradeoffs incorporated into this filesystem
can be found in [DESIGN.md](DESIGN.md). The specification for the layout
of the filesystem on disk can be found in [SPEC.md](SPEC.md).
## Testing
The littlefs comes with a test suite designed to run on a PC using the
The littlefs comes with a test suite designed to run on a pc using the
[emulated block device](emubd/lfs_emubd.h) found in the emubd directory.
The tests assume a Linux environment and can be started with make:
The tests assume a linux environment and can be started with make:
``` bash
make test
```
## Related projects
[Mbed OS](https://github.com/ARMmbed/mbed-os/tree/master/features/filesystem/littlefs) -
The easiest way to get started with littlefs is to jump into [Mbed](https://os.mbed.com/),
which already has block device drivers for most forms of embedded storage. The
littlefs is available in Mbed OS as the [LittleFileSystem](https://os.mbed.com/docs/latest/reference/littlefilesystem.html)
class.
[littlefs-fuse](https://github.com/geky/littlefs-fuse) - A [FUSE](https://github.com/libfuse/libfuse)
wrapper for littlefs. The project allows you to mount littlefs directly on a
Linux machine. Can be useful for debugging littlefs if you have an SD card
handy.
[littlefs-js](https://github.com/geky/littlefs-js) - A javascript wrapper for
littlefs. I'm not sure why you would want this, but it is handy for demos.
You can see it in action [here](http://littlefs.geky.net/demo.html).

45
SPEC.md
View File

@@ -46,7 +46,7 @@ Here's the layout of metadata blocks on disk:
| 0x04 | 32 bits | dir size |
| 0x08 | 64 bits | tail pointer |
| 0x10 | size-16 bytes | dir entries |
| 0x00+s | 32 bits | CRC |
| 0x00+s | 32 bits | crc |
**Revision count** - Incremented every update, only the uncorrupted
metadata-block with the most recent revision count contains the valid metadata.
@@ -75,7 +75,7 @@ Here's an example of a simple directory stored on disk:
(32 bits) revision count = 10 (0x0000000a)
(32 bits) dir size = 154 bytes, end of dir (0x0000009a)
(64 bits) tail pointer = 37, 36 (0x00000025, 0x00000024)
(32 bits) CRC = 0xc86e3106
(32 bits) crc = 0xc86e3106
00000000: 0a 00 00 00 9a 00 00 00 25 00 00 00 24 00 00 00 ........%...$...
00000010: 22 08 00 03 05 00 00 00 04 00 00 00 74 65 61 22 "...........tea"
@@ -121,29 +121,24 @@ Here's the layout of entries on disk:
**Entry type** - Type of the entry, currently this is limited to the following:
- 0x11 - file entry
- 0x22 - directory entry
- 0x2e - superblock entry
- 0xe2 - superblock entry
Additionally, the type is broken into two 4 bit nibbles, with the upper nibble
Additionally, the type is broken into two 4 bit nibbles, with the lower nibble
specifying the type's data structure used when scanning the filesystem. The
lower nibble clarifies the type further when multiple entries share the same
upper nibble clarifies the type further when multiple entries share the same
data structure.
The highest bit is reserved for marking the entry as "moved". If an entry
is marked as "moved", the entry may also exist somewhere else in the
filesystem. If the entry exists elsewhere, this entry must be treated as
though it does not exist.
**Entry length** - Length in bytes of the entry-specific data. This does
not include the entry type size, attributes, or name. The full size in bytes
of the entry is 4 + entry length + attribute length + name length.
**Attribute length** - Length of system-specific attributes in bytes. Since
attributes are system specific, there is not much guarantee on the values in
attributes are system specific, there is not much garuntee on the values in
this section, and systems are expected to work even when it is empty. See the
[attributes](#entry-attributes) section for more details.
**Name length** - Length of the entry name. Entry names are stored as UTF8,
although most systems will probably only support ASCII. Entry names can not
**Name length** - Length of the entry name. Entry names are stored as utf8,
although most systems will probably only support ascii. Entry names can not
contain '/' and can not be '.' or '..' as these are a part of the syntax of
filesystem paths.
@@ -180,7 +175,7 @@ Here's the layout of the superblock entry:
| offset | size | description |
|--------|------------------------|----------------------------------------|
| 0x00 | 8 bits | entry type (0x2e for superblock entry) |
| 0x00 | 8 bits | entry type (0xe2 for superblock entry) |
| 0x01 | 8 bits | entry length (20 bytes) |
| 0x02 | 8 bits | attribute length |
| 0x03 | 8 bits | name length (8 bytes) |
@@ -213,7 +208,7 @@ Here's an example of a complete superblock:
(32 bits) revision count = 3 (0x00000003)
(32 bits) dir size = 52 bytes, end of dir (0x00000034)
(64 bits) tail pointer = 3, 2 (0x00000003, 0x00000002)
(8 bits) entry type = superblock (0x2e)
(8 bits) entry type = superblock (0xe2)
(8 bits) entry length = 20 bytes (0x14)
(8 bits) attribute length = 0 bytes (0x00)
(8 bits) name length = 8 bytes (0x08)
@@ -222,10 +217,10 @@ Here's an example of a complete superblock:
(32 bits) block count = 1024 blocks (0x00000400)
(32 bits) version = 1.1 (0x00010001)
(8 bytes) magic string = littlefs
(32 bits) CRC = 0xc50b74fa
(32 bits) crc = 0xc50b74fa
00000000: 03 00 00 00 34 00 00 00 03 00 00 00 02 00 00 00 ....4...........
00000010: 2e 14 00 08 03 00 00 00 02 00 00 00 00 02 00 00 ................
00000010: e2 14 00 08 03 00 00 00 02 00 00 00 00 02 00 00 ................
00000020: 00 04 00 00 01 00 01 00 6c 69 74 74 6c 65 66 73 ........littlefs
00000030: fa 74 0b c5 .t..
```
@@ -267,19 +262,15 @@ Here's an example of a directory entry:
Files are stored in entries with a pointer to the head of the file and the
size of the file. This is enough information to determine the state of the
CTZ skip-list that is being referenced.
CTZ linked-list that is being referenced.
How files are actually stored on disk is a bit complicated. The full
explanation of CTZ skip-lists can be found in [DESIGN.md](DESIGN.md#ctz-skip-lists).
explanation of CTZ linked-lists can be found in [DESIGN.md](DESIGN.md#ctz-linked-lists).
A terribly quick summary: For every nth block where n is divisible by 2^x,
the block contains a pointer to block n-2^x. These pointers are stored in
increasing order of x in each block of the file preceding the data in the
block.
The maximum number of pointers in a block is bounded by the maximum file size
divided by the block size. With 32 bits for file size, this results in a
minimum block size of 104 bytes.
the block contains a pointer that points x blocks towards the beginning of the
file. These pointers are stored in order of x in each block of the file
immediately before the data in the block.
Here's the layout of a file entry:
@@ -295,7 +286,7 @@ Here's the layout of a file entry:
| 0xc+a | name length bytes | directory name |
**File head** - Pointer to the block that is the head of the file's CTZ
skip-list.
linked-list.
**File size** - Size of file in bytes.

View File

@@ -1,19 +1,8 @@
/*
* Block device emulated on standard files
*
* Copyright (c) 2017 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* Copyright (c) 2017 Christopher Haster
* Distributed under the Apache 2.0 license
*/
#include "emubd/lfs_emubd.h"
@@ -138,8 +127,8 @@ int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
snprintf(emu->child, LFS_NAME_MAX, "%x", block);
FILE *f = fopen(emu->path, "r+b");
if (!f) {
return (errno == EACCES) ? 0 : -errno;
if (!f && errno != ENOENT) {
return -errno;
}
// Check that file was erased
@@ -189,14 +178,14 @@ int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block) {
return -errno;
}
if (!err && S_ISREG(st.st_mode) && (S_IWUSR & st.st_mode)) {
err = unlink(emu->path);
if (!err && S_ISREG(st.st_mode)) {
int err = unlink(emu->path);
if (err) {
return -errno;
}
}
if (err || (S_ISREG(st.st_mode) && (S_IWUSR & st.st_mode))) {
if (err || S_ISREG(st.st_mode)) {
FILE *f = fopen(emu->path, "w");
if (!f) {
return -errno;

View File

@@ -1,19 +1,8 @@
/*
* Block device emulated on standard files
*
* Copyright (c) 2017 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* Copyright (c) 2017 Christopher Haster
* Distributed under the Apache 2.0 license
*/
#ifndef LFS_EMUBD_H
#define LFS_EMUBD_H

1633
lfs.c

File diff suppressed because it is too large Load Diff

178
lfs.h
View File

@@ -1,19 +1,8 @@
/*
* The little filesystem
*
* Copyright (c) 2017 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* Copyright (c) 2017 Christopher Haster
* Distributed under the Apache 2.0 license
*/
#ifndef LFS_H
#define LFS_H
@@ -22,23 +11,6 @@
#include <stdbool.h>
/// Version info ///
// Software library version
// Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions
#define LFS_VERSION 0x00010004
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
// Version of On-disk data structures
// Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions
#define LFS_DISK_VERSION 0x00010002
#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16))
#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0))
/// Definitions ///
// Type definitions
@@ -50,78 +22,48 @@ typedef int32_t lfs_soff_t;
typedef uint32_t lfs_block_t;
// Maximum inline file size in bytes. Large inline files require a larger
// read and prog cache, but if a file can be inline it does not need its own
// data block. LFS_ATTRS_MAX + LFS_INLINE_MAX must be <= 0xffff. Stored in
// superblock and must be respected by other littlefs drivers.
#ifndef LFS_INLINE_MAX
#define LFS_INLINE_MAX 0x3ff
#endif
// Maximum size of all attributes per file in bytes, may be redefined but a
// a smaller LFS_ATTRS_MAX has no benefit. LFS_ATTRS_MAX + LFS_INLINE_MAX
// must be <= 0xffff. Stored in superblock and must be respected by other
// littlefs drivers.
#ifndef LFS_ATTRS_MAX
#define LFS_ATTRS_MAX 0x3f
#endif
// Max name size in bytes, may be redefined to reduce the size of the
// info struct. Stored in superblock and must be respected by other
// littlefs drivers.
// Max name size in bytes
#ifndef LFS_NAME_MAX
#define LFS_NAME_MAX 0xff
#define LFS_NAME_MAX 255
#endif
// Possible error codes, these are negative to allow
// valid positive return values
enum lfs_error {
LFS_ERR_OK = 0, // No error
LFS_ERR_IO = -5, // Error during device operation
LFS_ERR_CORRUPT = -52, // Corrupted
LFS_ERR_NOENT = -2, // No directory entry
LFS_ERR_EXIST = -17, // Entry already exists
LFS_ERR_NOTDIR = -20, // Entry is not a dir
LFS_ERR_ISDIR = -21, // Entry is a dir
LFS_ERR_NOTEMPTY = -39, // Dir is not empty
LFS_ERR_BADF = -9, // Bad file number
LFS_ERR_INVAL = -22, // Invalid parameter
LFS_ERR_NOSPC = -28, // No space left on device
LFS_ERR_NOMEM = -12, // No more memory available
LFS_ERR_NAMETOOLONG = -36, // File name too long
LFS_ERR_OK = 0, // No error
LFS_ERR_IO = -5, // Error during device operation
LFS_ERR_CORRUPT = -52, // Corrupted
LFS_ERR_NOENT = -2, // No directory entry
LFS_ERR_EXISTS = -17, // Entry already exists
LFS_ERR_NOTDIR = -20, // Entry is not a dir
LFS_ERR_ISDIR = -21, // Entry is a dir
LFS_ERR_INVAL = -22, // Invalid parameter
LFS_ERR_NOSPC = -28, // No space left on device
LFS_ERR_NOMEM = -12, // No more memory available
};
// File types
enum lfs_type {
// file type
LFS_TYPE_REG = 0x01,
LFS_TYPE_DIR = 0x02,
LFS_TYPE_SUPERBLOCK = 0x0e,
// on disk structure
LFS_STRUCT_CTZ = 0x10,
LFS_STRUCT_DIR = 0x20,
LFS_STRUCT_INLINE = 0x30,
LFS_STRUCT_MOVED = 0x80,
LFS_TYPE_REG = 0x11,
LFS_TYPE_DIR = 0x22,
LFS_TYPE_SUPERBLOCK = 0x2e,
};
// File open flags
enum lfs_open_flags {
// open flags
LFS_O_RDONLY = 1, // Open a file as read only
LFS_O_WRONLY = 2, // Open a file as write only
LFS_O_RDWR = 3, // Open a file as read and write
LFS_O_CREAT = 0x0100, // Create a file if it does not exist
LFS_O_EXCL = 0x0200, // Fail if a file already exists
LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size
LFS_O_APPEND = 0x0800, // Move to end of file on every write
LFS_O_RDONLY = 1, // Open a file as read only
LFS_O_WRONLY = 2, // Open a file as write only
LFS_O_RDWR = 3, // Open a file as read and write
LFS_O_CREAT = 0x0100, // Create a file if it does not exist
LFS_O_EXCL = 0x0200, // Fail if a file already exists
LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size
LFS_O_APPEND = 0x0800, // Move to end of file on every write
// internally used flags
LFS_F_DIRTY = 0x010000, // File does not match storage
LFS_F_WRITING = 0x020000, // File has been written since last flush
LFS_F_READING = 0x040000, // File has been read since last flush
LFS_F_ERRED = 0x080000, // An error occured during write
LFS_F_INLINE = 0x100000, // Currently inlined in directory entry
LFS_F_DIRTY = 0x10000, // File does not match storage
LFS_F_WRITING = 0x20000, // File has been written since last flush
LFS_F_READING = 0x40000, // File has been read since last flush
};
// File seek flags
@@ -145,14 +87,14 @@ struct lfs_config {
// Program a region in a block. The block must have previously
// been erased. Negative error codes are propogated to the user.
// May return LFS_ERR_CORRUPT if the block should be considered bad.
// The prog function must return LFS_ERR_CORRUPT if the block should
// be considered bad.
int (*prog)(const struct lfs_config *c, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size);
// Erase a block. A block must be erased before being programmed.
// The state of an erased block is undefined. Negative error codes
// are propogated to the user.
// May return LFS_ERR_CORRUPT if the block should be considered bad.
int (*erase)(const struct lfs_config *c, lfs_block_t block);
// Sync the state of the underlying block device. Negative error codes
@@ -167,13 +109,11 @@ struct lfs_config {
// Minimum size of a block program. This determines the size of program
// buffers. This may be larger than the physical program size to improve
// performance by caching more of the block device.
// Must be a multiple of the read size.
lfs_size_t prog_size;
// Size of an erasable block. This does not impact ram consumption and
// may be larger than the physical erase size. However, this should be
// kept small as each file currently takes up an entire block.
// Must be a multiple of the program size.
// kept small as each file currently takes up an entire block .
lfs_size_t block_size;
// Number of erasable blocks on the device.
@@ -198,25 +138,6 @@ struct lfs_config {
// Optional, statically allocated buffer for files. Must be program sized.
// If enabled, only one file may be opened at a time.
void *file_buffer;
// Optional upper limit on inlined files in bytes. Large inline files
// require a larger read and prog cache, but if a file can be inlined it
// does not need its own data block. Must be smaller than the read size
// and prog size. Defaults to min(LFS_INLINE_MAX, read_size) when zero.
// Stored in superblock and must be respected by other littlefs drivers.
lfs_size_t inline_size;
// Optional upper limit on attributes per file in bytes. No downside for
// larger attributes size but must be less than LFS_ATTRS_MAX. Defaults to
// LFS_ATTRS_MAX when zero.Stored in superblock and must be respected by
// other littlefs drivers.
lfs_size_t attrs_size;
// Optional upper limit on length of file names in bytes. No downside for
// larger names except the size of the info struct which is controlled by
// the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX when zero. Stored in
// superblock and must be respected by other littlefs drivers.
lfs_size_t name_size;
};
@@ -236,7 +157,6 @@ struct lfs_info {
/// littlefs data structures ///
typedef struct lfs_entry {
lfs_off_t off;
lfs_size_t size;
struct lfs_disk_entry {
uint8_t type;
@@ -268,7 +188,6 @@ typedef struct lfs_file {
lfs_size_t size;
uint32_t flags;
lfs_size_t inline_size;
lfs_off_t pos;
lfs_block_t block;
lfs_off_t off;
@@ -276,7 +195,6 @@ typedef struct lfs_file {
} lfs_file_t;
typedef struct lfs_dir {
struct lfs_dir *next;
lfs_block_t pair[2];
lfs_off_t off;
@@ -291,24 +209,26 @@ typedef struct lfs_dir {
} lfs_dir_t;
typedef struct lfs_superblock {
lfs_off_t off;
struct lfs_disk_superblock {
uint8_t type;
uint8_t elen;
uint8_t alen;
uint8_t nlen;
lfs_block_t root[2];
lfs_size_t block_size;
lfs_size_t block_count;
uint32_t block_size;
uint32_t block_count;
uint32_t version;
lfs_size_t inline_size;
lfs_size_t attrs_size;
lfs_size_t name_size;
char magic[8];
} d;
} lfs_superblock_t;
typedef struct lfs_free {
lfs_size_t lookahead;
lfs_block_t begin;
lfs_block_t size;
lfs_block_t end;
lfs_block_t off;
lfs_block_t ack;
uint32_t *buffer;
} lfs_free_t;
@@ -318,17 +238,13 @@ typedef struct lfs {
lfs_block_t root[2];
lfs_file_t *files;
lfs_dir_t *dirs;
lfs_cache_t rcache;
lfs_cache_t pcache;
lfs_free_t free;
bool deorphaned;
lfs_size_t inline_size;
lfs_size_t attrs_size;
lfs_size_t name_size;
uint8_t unstable;
uint8_t sum;
} lfs_t;
@@ -433,11 +349,6 @@ lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
lfs_soff_t off, int whence);
// Truncates the size of the file to the specified size
//
// Returns a negative error code on failure.
int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size);
// Return the position of the file
//
// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_CUR)
@@ -524,5 +435,8 @@ int lfs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
// Returns a negative error code on failure.
int lfs_deorphan(lfs_t *lfs);
// TODO doc
int lfs_deduplicate(lfs_t *lfs);
#endif

View File

@@ -1,27 +1,12 @@
/*
* lfs util functions
*
* Copyright (c) 2017 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* Copyright (c) 2017 Christopher Haster
* Distributed under the Apache 2.0 license
*/
#include "lfs_util.h"
// Only compile if user does not provide custom config
#ifndef LFS_CONFIG
// Software CRC implementation with small lookup table
void lfs_crc(uint32_t *restrict crc, const void *buffer, size_t size) {
static const uint32_t rtable[16] = {
0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
@@ -38,5 +23,3 @@ void lfs_crc(uint32_t *restrict crc, const void *buffer, size_t size) {
}
}
#endif

View File

@@ -1,90 +1,19 @@
/*
* lfs utility functions
*
* Copyright (c) 2017 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* Copyright (c) 2017 Christopher Haster
* Distributed under the Apache 2.0 license
*/
#ifndef LFS_UTIL_H
#define LFS_UTIL_H
// Users can override lfs_util.h with their own configuration by defining
// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h).
//
// If LFS_CONFIG is used, none of the default utils will be emitted and must be
// provided by the config file. To start I would suggest copying lfs_util.h and
// modifying as needed.
#ifdef LFS_CONFIG
#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x)
#define LFS_STRINGIZE2(x) #x
#include LFS_STRINGIZE(LFS_CONFIG)
#else
// System includes
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
#ifndef LFS_NO_MALLOC
#include <stdlib.h>
#endif
#ifndef LFS_NO_ASSERT
#include <assert.h>
#endif
#if !defined(LFS_NO_DEBUG) || !defined(LFS_NO_WARN) || !defined(LFS_NO_ERROR)
#include <stdint.h>
#include <stdio.h>
#endif
// Macros, may be replaced by system specific wrappers. Arguments to these
// macros must not have side-effects as the macros can be removed for a smaller
// code footprint
// Logging functions
#ifndef LFS_NO_DEBUG
#define LFS_DEBUG(fmt, ...) \
printf("lfs debug:%d: " fmt "\n", __LINE__, __VA_ARGS__)
#else
#define LFS_DEBUG(fmt, ...)
#endif
#ifndef LFS_NO_WARN
#define LFS_WARN(fmt, ...) \
printf("lfs warn:%d: " fmt "\n", __LINE__, __VA_ARGS__)
#else
#define LFS_WARN(fmt, ...)
#endif
#ifndef LFS_NO_ERROR
#define LFS_ERROR(fmt, ...) \
printf("lfs error:%d: " fmt "\n", __LINE__, __VA_ARGS__)
#else
#define LFS_ERROR(fmt, ...)
#endif
// Runtime assertions
#ifndef LFS_NO_ASSERT
#define LFS_ASSERT(test) assert(test)
#else
#define LFS_ASSERT(test)
#endif
// Builtin functions, these may be replaced by more efficient
// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more
// expensive basic C implementation for debugging purposes
// Min/max functions for unsigned 32-bit numbers
// Builtin functions, these may be replaced by more
// efficient implementations in the system
static inline uint32_t lfs_max(uint32_t a, uint32_t b) {
return (a > b) ? a : b;
}
@@ -93,93 +22,27 @@ static inline uint32_t lfs_min(uint32_t a, uint32_t b) {
return (a < b) ? a : b;
}
// Find the next smallest power of 2 less than or equal to a
static inline uint32_t lfs_npw2(uint32_t a) {
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
return 32 - __builtin_clz(a-1);
#else
uint32_t r = 0;
uint32_t s;
a -= 1;
s = (a > 0xffff) << 4; a >>= s; r |= s;
s = (a > 0xff ) << 3; a >>= s; r |= s;
s = (a > 0xf ) << 2; a >>= s; r |= s;
s = (a > 0x3 ) << 1; a >>= s; r |= s;
return (r | (a >> 1)) + 1;
#endif
}
// Count the number of trailing binary zeros in a
// lfs_ctz(0) may be undefined
static inline uint32_t lfs_ctz(uint32_t a) {
#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__)
return __builtin_ctz(a);
#else
return lfs_npw2((a & -a) + 1) - 1;
#endif
}
// Count the number of binary ones in a
static inline uint32_t lfs_popc(uint32_t a) {
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
return __builtin_popcount(a);
#else
a = a - ((a >> 1) & 0x55555555);
a = (a & 0x33333333) + ((a >> 2) & 0x33333333);
return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
#endif
static inline uint32_t lfs_npw2(uint32_t a) {
return 32 - __builtin_clz(a-1);
}
// Find the sequence comparison of a and b, this is the distance
// between a and b ignoring overflow
static inline int lfs_scmp(uint32_t a, uint32_t b) {
return (int)(unsigned)(a - b);
}
// Convert from 32-bit little-endian to native order
static inline uint32_t lfs_fromle32(uint32_t a) {
#if !defined(LFS_NO_INTRINSICS) && ( \
(defined( BYTE_ORDER ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
(defined(__BYTE_ORDER ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
return a;
#elif !defined(LFS_NO_INTRINSICS) && ( \
(defined( BYTE_ORDER ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
(defined(__BYTE_ORDER ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
return __builtin_bswap32(a);
#else
return (((uint8_t*)&a)[0] << 0) |
(((uint8_t*)&a)[1] << 8) |
(((uint8_t*)&a)[2] << 16) |
(((uint8_t*)&a)[3] << 24);
#endif
}
// Convert to 32-bit little-endian from native order
static inline uint32_t lfs_tole32(uint32_t a) {
return lfs_fromle32(a);
}
// Calculate CRC-32 with polynomial = 0x04c11db7
// CRC-32 with polynomial = 0x04c11db7
void lfs_crc(uint32_t *crc, const void *buffer, size_t size);
// Allocate memory, only used if buffers are not provided to littlefs
static inline void *lfs_malloc(size_t size) {
#ifndef LFS_NO_MALLOC
return malloc(size);
#else
return NULL;
#endif
}
// Deallocate memory, only used if buffers are not provided to littlefs
static inline void lfs_free(void *p) {
#ifndef LFS_NO_MALLOC
free(p);
#endif
}
// Logging functions, these may be replaced by system-specific
// logging functions
#define LFS_DEBUG(fmt, ...) printf("lfs debug: " fmt "\n", __VA_ARGS__)
#define LFS_WARN(fmt, ...) printf("lfs warn: " fmt "\n", __VA_ARGS__)
#define LFS_ERROR(fmt, ...) printf("lfs error: " fmt "\n", __VA_ARGS__)
#endif
#endif

View File

@@ -7,11 +7,11 @@
// test stuff
static void test_log(const char *s, uintmax_t v) {{
void test_log(const char *s, uintmax_t v) {{
printf("%s: %jd\n", s, v);
}}
static void test_assert(const char *file, unsigned line,
void test_assert(const char *file, unsigned line,
const char *s, uintmax_t v, uintmax_t e) {{
static const char *last[6] = {{0, 0}};
if (v != e || !(last[0] == s || last[1] == s ||
@@ -37,8 +37,7 @@ static void test_assert(const char *file, unsigned line,
// utility functions for traversals
static int __attribute__((used)) test_count(void *p, lfs_block_t b) {{
(void)b;
int test_count(void *p, lfs_block_t b) {{
unsigned *u = (unsigned*)p;
*u += 1;
return 0;
@@ -59,7 +58,7 @@ lfs_size_t size;
lfs_size_t wsize;
lfs_size_t rsize;
uintmax_t test;
uintmax_t res;
#ifndef LFS_READ_SIZE
#define LFS_READ_SIZE 16
@@ -97,7 +96,7 @@ const struct lfs_config cfg = {{
// Entry point
int main(void) {{
int main() {{
lfs_emubd_create(&cfg, "blocks");
{tests}

View File

@@ -14,34 +14,22 @@ def generate(test):
match = re.match('(?: *\n)*( *)(.*)=>(.*);', line, re.DOTALL | re.MULTILINE)
if match:
tab, test, expect = match.groups()
lines.append(tab+'test = {test};'.format(test=test.strip()))
lines.append(tab+'test_assert("{name}", test, {expect});'.format(
lines.append(tab+'res = {test};'.format(test=test.strip()))
lines.append(tab+'test_assert("{name}", res, {expect});'.format(
name = re.match('\w*', test.strip()).group(),
expect = expect.strip()))
else:
lines.append(line)
# Create test file
with open('test.c', 'w') as file:
file.write(template.format(tests='\n'.join(lines)))
# Remove build artifacts to force rebuild
try:
os.remove('test.o')
os.remove('lfs')
except OSError:
pass
def compile():
subprocess.check_call([
os.environ.get('MAKE', 'make'),
'--no-print-directory', '-s'])
os.environ['CFLAGS'] = os.environ.get('CFLAGS', '') + ' -Werror'
subprocess.check_call(['make', '--no-print-directory', '-s'], env=os.environ)
def execute():
if 'EXEC' in os.environ:
subprocess.check_call([os.environ['EXEC'], "./lfs"])
else:
subprocess.check_call(["./lfs"])
subprocess.check_call(["./lfs"])
def main(test=None):
if test and not test.startswith('-'):

View File

@@ -121,7 +121,6 @@ tests/test.py << TEST
size = strlen("exhaustion");
memcpy(buffer, "exhaustion", size);
lfs_file_write(&lfs, &file[0], buffer, size) => size;
lfs_file_sync(&lfs, &file[0]) => 0;
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
@@ -143,7 +142,6 @@ tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file[0], "exhaustion", LFS_O_RDONLY);
size = strlen("exhaustion");
lfs_file_size(&lfs, &file[0]) => size;
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "exhaustion", size) => 0;
lfs_file_close(&lfs, &file[0]) => 0;
@@ -168,7 +166,6 @@ tests/test.py << TEST
size = strlen("exhaustion");
memcpy(buffer, "exhaustion", size);
lfs_file_write(&lfs, &file[0], buffer, size) => size;
lfs_file_sync(&lfs, &file[0]) => 0;
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
@@ -190,7 +187,6 @@ tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file[0], "exhaustion", LFS_O_RDONLY);
size = strlen("exhaustion");
lfs_file_size(&lfs, &file[0]) => size;
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "exhaustion", size) => 0;
lfs_file_close(&lfs, &file[0]) => 0;
@@ -200,14 +196,14 @@ TEST
echo "--- Dir exhaustion test ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "exhaustion", &info) => 0;
lfs_size_t fullsize = info.size;
lfs_remove(&lfs, "exhaustion") => 0;
lfs_file_open(&lfs, &file[0], "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < (cfg.block_count-6)*(cfg.block_size-8);
i += size) {
for (lfs_size_t i = 0; i < fullsize - 2*512; i += size) {
lfs_file_write(&lfs, &file[0], buffer, size) => size;
}
lfs_file_close(&lfs, &file[0]) => 0;
@@ -218,11 +214,7 @@ tests/test.py << TEST
lfs_file_open(&lfs, &file[0], "exhaustion", LFS_O_WRONLY | LFS_O_APPEND);
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < (cfg.block_size-8);
i += size) {
lfs_file_write(&lfs, &file[0], buffer, size) => size;
}
lfs_file_write(&lfs, &file[0], buffer, size) => size;
lfs_file_close(&lfs, &file[0]) => 0;
lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
@@ -232,14 +224,14 @@ TEST
echo "--- Chained dir exhaustion test ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "exhaustion") => 0;
lfs_stat(&lfs, "exhaustion", &info) => 0;
lfs_size_t fullsize = info.size;
lfs_remove(&lfs, "exhaustion") => 0;
lfs_file_open(&lfs, &file[0], "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < (cfg.block_count-24)*(cfg.block_size-8);
i += size) {
for (lfs_size_t i = 0; i < fullsize - 19*512; i += size) {
lfs_file_write(&lfs, &file[0], buffer, size) => size;
}
lfs_file_close(&lfs, &file[0]) => 0;
@@ -255,9 +247,7 @@ tests/test.py << TEST
lfs_file_open(&lfs, &file[0], "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < (cfg.block_count-26)*(cfg.block_size-8);
i += size) {
for (lfs_size_t i = 0; i < fullsize - 20*512; i += size) {
lfs_file_write(&lfs, &file[0], buffer, size) => size;
}
lfs_file_close(&lfs, &file[0]) => 0;
@@ -266,46 +256,6 @@ tests/test.py << TEST
lfs_mkdir(&lfs, "exhaustiondir2") => LFS_ERR_NOSPC;
TEST
echo "--- Split dir test ---"
rm -rf blocks
tests/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
// create one block hole for half a directory
lfs_file_open(&lfs, &file[0], "bump", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
memcpy(&buffer[i], "hi", 2);
}
lfs_file_write(&lfs, &file[0], buffer, cfg.block_size) => cfg.block_size;
lfs_file_close(&lfs, &file[0]) => 0;
lfs_file_open(&lfs, &file[0], "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < (cfg.block_count-6)*(cfg.block_size-8);
i += size) {
lfs_file_write(&lfs, &file[0], buffer, size) => size;
}
lfs_file_close(&lfs, &file[0]) => 0;
// open hole
lfs_remove(&lfs, "bump") => 0;
lfs_mkdir(&lfs, "splitdir") => 0;
lfs_file_open(&lfs, &file[0], "splitdir/bump",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
memcpy(&buffer[i], "hi", 2);
}
lfs_file_write(&lfs, &file[0], buffer, cfg.block_size) => LFS_ERR_NOSPC;
lfs_file_close(&lfs, &file[0]) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Results ---"
tests/stats.py

View File

@@ -73,7 +73,7 @@ lfs_mktree
lfs_chktree
echo "--- Block corruption ---"
for i in {2..33}
for i in {0..33}
do
rm -rf blocks
mkdir blocks
@@ -82,17 +82,6 @@ do
lfs_chktree
done
echo "--- Block persistance ---"
for i in {2..33}
do
rm -rf blocks
mkdir blocks
lfs_mktree
chmod a-w blocks/$(printf '%x' $i) || true
lfs_mktree
lfs_chktree
done
echo "--- Big region corruption ---"
rm -rf blocks
mkdir blocks

View File

@@ -56,7 +56,7 @@ TEST
echo "--- Directory failures ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXISTS;
lfs_dir_open(&lfs, &dir[0], "tomato") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir[0], "burito") => LFS_ERR_NOTDIR;
lfs_file_open(&lfs, &file[0], "tomato", LFS_O_RDONLY) => LFS_ERR_NOENT;
@@ -118,7 +118,6 @@ tests/test.py << TEST
sprintf((char*)buffer, "test%d", i);
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, (char*)buffer) => 0;
info.type => LFS_TYPE_DIR;
}
lfs_dir_read(&lfs, &dir[0], &info) => 0;
lfs_unmount(&lfs) => 0;
@@ -127,7 +126,7 @@ TEST
echo "--- Directory remove ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "potato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "potato") => LFS_ERR_INVAL;
lfs_remove(&lfs, "potato/sweet") => 0;
lfs_remove(&lfs, "potato/baked") => 0;
lfs_remove(&lfs, "potato/fried") => 0;
@@ -221,7 +220,7 @@ tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "warmpotato") => 0;
lfs_mkdir(&lfs, "warmpotato/mushy") => 0;
lfs_rename(&lfs, "hotpotato", "warmpotato") => LFS_ERR_NOTEMPTY;
lfs_rename(&lfs, "hotpotato", "warmpotato") => LFS_ERR_INVAL;
lfs_remove(&lfs, "warmpotato/mushy") => 0;
lfs_rename(&lfs, "hotpotato", "warmpotato") => 0;
@@ -256,7 +255,7 @@ tests/test.py << TEST
lfs_rename(&lfs, "warmpotato/baked", "coldpotato/baked") => 0;
lfs_rename(&lfs, "warmpotato/sweet", "coldpotato/sweet") => 0;
lfs_rename(&lfs, "warmpotato/fried", "coldpotato/fried") => 0;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_INVAL;
lfs_remove(&lfs, "warmpotato") => 0;
lfs_unmount(&lfs) => 0;
TEST
@@ -283,53 +282,10 @@ tests/test.py << TEST
lfs_unmount(&lfs) => 0;
TEST
echo "--- Recursive remove ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
lfs_dir_open(&lfs, &dir[0], "coldpotato") => 0;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
while (true) {
int err = lfs_dir_read(&lfs, &dir[0], &info);
err >= 0 => 1;
if (err == 0) {
break;
}
strcpy((char*)buffer, "coldpotato/");
strcat((char*)buffer, info.name);
lfs_remove(&lfs, (char*)buffer) => 0;
}
lfs_remove(&lfs, "coldpotato") => 0;
TEST
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir[0], "/") => 0;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "cactus") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir[0], &info) => 0;
lfs_dir_close(&lfs, &dir[0]) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block remove ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "cactus") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "cactus") => LFS_ERR_INVAL;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "cactus/test%d", i);
@@ -351,71 +307,9 @@ tests/test.py << TEST
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir[0], &info) => 0;
lfs_dir_close(&lfs, &dir[0]) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block directory with files ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "prickly-pear") => 0;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "prickly-pear/test%d", i);
lfs_file_open(&lfs, &file[0], (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = 6;
memcpy(wbuffer, "Hello", size);
lfs_file_write(&lfs, &file[0], wbuffer, size) => size;
lfs_file_close(&lfs, &file[0]) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir[0], "prickly-pear") => 0;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, ".") => 0;
strcmp(info.name, "coldpotato") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "test%d", i);
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, (char*)buffer) => 0;
info.type => LFS_TYPE_REG;
info.size => 6;
}
lfs_dir_read(&lfs, &dir[0], &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block remove with files ---"
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf((char*)buffer, "prickly-pear/test%d", i);
lfs_remove(&lfs, (char*)buffer) => 0;
}
lfs_remove(&lfs, "prickly-pear") => 0;
lfs_unmount(&lfs) => 0;
TEST
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir[0], "/") => 0;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir[0], &info) => 0;
lfs_dir_close(&lfs, &dir[0]) => 0;
lfs_unmount(&lfs) => 0;

View File

@@ -34,8 +34,7 @@ tests/test.py << TEST
lfs_size_t chunk = 31;
srand(0);
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file[0], "$2",
${3:-LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC}) => 0;
lfs_file_open(&lfs, &file[0], "$2", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (lfs_size_t i = 0; i < size; i += chunk) {
chunk = (chunk < size - i) ? chunk : size - i;
for (lfs_size_t b = 0; b < chunk; b++) {
@@ -54,10 +53,7 @@ tests/test.py << TEST
lfs_size_t chunk = 29;
srand(0);
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "$2", &info) => 0;
info.type => LFS_TYPE_REG;
info.size => size;
lfs_file_open(&lfs, &file[0], "$2", ${3:-LFS_O_RDONLY}) => 0;
lfs_file_open(&lfs, &file[0], "$2", LFS_O_RDONLY) => 0;
for (lfs_size_t i = 0; i < size; i += chunk) {
chunk = (chunk < size - i) ? chunk : size - i;
lfs_file_read(&lfs, &file[0], buffer, chunk) => chunk;
@@ -82,27 +78,10 @@ echo "--- Large file test ---"
w_test $LARGESIZE largeavacado
r_test $LARGESIZE largeavacado
echo "--- Zero file test ---"
w_test 0 noavacado
r_test 0 noavacado
echo "--- Truncate small test ---"
w_test $SMALLSIZE mediumavacado
r_test $SMALLSIZE mediumavacado
w_test $MEDIUMSIZE mediumavacado
r_test $MEDIUMSIZE mediumavacado
echo "--- Truncate zero test ---"
w_test $SMALLSIZE noavacado
r_test $SMALLSIZE noavacado
w_test 0 noavacado
r_test 0 noavacado
echo "--- Non-overlap check ---"
r_test $SMALLSIZE smallavacado
r_test $MEDIUMSIZE mediumavacado
r_test $LARGESIZE largeavacado
r_test 0 noavacado
echo "--- Dir check ---"
tests/test.py << TEST
@@ -126,33 +105,10 @@ tests/test.py << TEST
strcmp(info.name, "largeavacado") => 0;
info.type => LFS_TYPE_REG;
info.size => $LARGESIZE;
lfs_dir_read(&lfs, &dir[0], &info) => 1;
strcmp(info.name, "noavacado") => 0;
info.type => LFS_TYPE_REG;
info.size => 0;
lfs_dir_read(&lfs, &dir[0], &info) => 0;
lfs_dir_close(&lfs, &dir[0]) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Many file test ---"
tests/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
tests/test.py << TEST
// Create 300 files of 6 bytes
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "directory") => 0;
for (unsigned i = 0; i < 300; i++) {
snprintf((char*)buffer, sizeof(buffer), "file_%03d", i);
lfs_file_open(&lfs, &file[0], (char*)buffer, LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = 6;
memcpy(wbuffer, "Hello", size);
lfs_file_write(&lfs, &file[0], wbuffer, size) => size;
lfs_file_close(&lfs, &file[0]) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
echo "--- Results ---"
tests/stats.py

View File

@@ -30,10 +30,20 @@ echo "--- Invalid mount ---"
tests/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
rm -f blocks/0 blocks/1
rm blocks/0 blocks/1
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
TEST
echo "--- Valid corrupt mount ---"
tests/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
rm blocks/0
tests/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Results ---"
tests/stats.py

View File

@@ -108,10 +108,6 @@ tests/test.py << TEST
lfs_stat(&lfs, "/", &info) => 0;
info.type => LFS_TYPE_DIR;
strcmp(info.name, "/") => 0;
lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file[0], "/", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_ISDIR;
lfs_unmount(&lfs) => 0;
TEST

View File

@@ -133,14 +133,6 @@ tests/test.py << TEST
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file[0], 0, LFS_SEEK_CUR) => size;
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file[0], size, LFS_SEEK_CUR) => 3*size;
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file[0], pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
@@ -182,14 +174,6 @@ tests/test.py << TEST
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file[0], 0, LFS_SEEK_CUR) => size;
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file[0], size, LFS_SEEK_CUR) => 3*size;
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file[0], pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;

View File

@@ -1,158 +0,0 @@
#!/bin/bash
set -eu
SMALLSIZE=32
MEDIUMSIZE=2048
LARGESIZE=8192
echo "=== Truncate tests ==="
rm -rf blocks
tests/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
truncate_test() {
STARTSIZES="$1"
STARTSEEKS="$2"
HOTSIZES="$3"
COLDSIZES="$4"
tests/test.py << TEST
static const lfs_off_t startsizes[] = {$STARTSIZES};
static const lfs_off_t startseeks[] = {$STARTSEEKS};
static const lfs_off_t hotsizes[] = {$HOTSIZES};
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf((char*)buffer, "hairyhead%d", i);
lfs_file_open(&lfs, &file[0], (const char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
for (int j = 0; j < startsizes[i]; j += size) {
lfs_file_write(&lfs, &file[0], buffer, size) => size;
}
lfs_file_size(&lfs, &file[0]) => startsizes[i];
if (startseeks[i] != startsizes[i]) {
lfs_file_seek(&lfs, &file[0],
startseeks[i], LFS_SEEK_SET) => startseeks[i];
}
lfs_file_truncate(&lfs, &file[0], hotsizes[i]) => 0;
lfs_file_size(&lfs, &file[0]) => hotsizes[i];
lfs_file_close(&lfs, &file[0]) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
tests/test.py << TEST
static const lfs_off_t startsizes[] = {$STARTSIZES};
static const lfs_off_t hotsizes[] = {$HOTSIZES};
static const lfs_off_t coldsizes[] = {$COLDSIZES};
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf((char*)buffer, "hairyhead%d", i);
lfs_file_open(&lfs, &file[0], (const char*)buffer, LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file[0]) => hotsizes[i];
size = strlen("hair");
int j = 0;
for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < hotsizes[i]; j += size) {
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
lfs_file_truncate(&lfs, &file[0], coldsizes[i]) => 0;
lfs_file_size(&lfs, &file[0]) => coldsizes[i];
lfs_file_close(&lfs, &file[0]) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
tests/test.py << TEST
static const lfs_off_t startsizes[] = {$STARTSIZES};
static const lfs_off_t hotsizes[] = {$HOTSIZES};
static const lfs_off_t coldsizes[] = {$COLDSIZES};
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf((char*)buffer, "hairyhead%d", i);
lfs_file_open(&lfs, &file[0], (const char*)buffer, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file[0]) => coldsizes[i];
size = strlen("hair");
int j = 0;
for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
j += size) {
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < coldsizes[i]; j += size) {
lfs_file_read(&lfs, &file[0], buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
lfs_file_close(&lfs, &file[0]) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
}
echo "--- Cold shrinking truncate ---"
truncate_test \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE"
echo "--- Cold expanding truncate ---"
truncate_test \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE"
echo "--- Warm shrinking truncate ---"
truncate_test \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, 0, 0, 0, 0"
echo "--- Warm expanding truncate ---"
truncate_test \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE"
echo "--- Mid-file shrinking truncate ---"
truncate_test \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
" $LARGESIZE, $LARGESIZE, $LARGESIZE, $LARGESIZE, $LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, 0, 0, 0, 0"
echo "--- Mid-file expanding truncate ---"
truncate_test \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE"
echo "--- Results ---"
tests/stats.py