mirror of
https://github.com/eledio-devices/thirdparty-littlefs.git
synced 2025-11-01 16:14:13 +01:00
Compare commits
8 Commits
test-revam
...
test-revam
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9e403d55c | ||
|
|
d58aaf88dc | ||
|
|
71c844be53 | ||
|
|
75cd51b39e | ||
|
|
fc354801fa | ||
|
|
557ec332fe | ||
|
|
5e839df234 | ||
|
|
47ab0426b1 |
633
.travis.yml
633
.travis.yml
@@ -1,70 +1,49 @@
|
||||
# environment variables
|
||||
# Environment variables
|
||||
env:
|
||||
global:
|
||||
- CFLAGS=-Werror
|
||||
- MAKEFLAGS=-j
|
||||
|
||||
# cache installation dirs
|
||||
cache:
|
||||
pip: true
|
||||
directories:
|
||||
- $HOME/.cache/apt
|
||||
|
||||
# common installation
|
||||
_: &install-common
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
- sudo apt-get install python3 python3-pip
|
||||
- sudo pip3 install toml
|
||||
# setup a ram-backed disk to speed up reentrant tests
|
||||
- mkdir disks
|
||||
- sudo mount -t tmpfs -o size=100m tmpfs disks
|
||||
- export TFLAGS="$TFLAGS --disk=disks/disk"
|
||||
|
||||
# test cases
|
||||
_: &test-example
|
||||
# Common test script
|
||||
script:
|
||||
# make sure example can at least compile
|
||||
- sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c &&
|
||||
- sed -n '/``` c/,/```/{/```/d; p;}' README.md > test.c &&
|
||||
make all CFLAGS+="
|
||||
-Duser_provided_block_device_read=NULL
|
||||
-Duser_provided_block_device_prog=NULL
|
||||
-Duser_provided_block_device_erase=NULL
|
||||
-Duser_provided_block_device_sync=NULL
|
||||
-include stdio.h"
|
||||
# default tests
|
||||
_: &test-default
|
||||
# normal+reentrant tests
|
||||
- make test TFLAGS+="-nrk"
|
||||
# common real-life geometries
|
||||
_: &test-nor
|
||||
# NOR flash: read/prog = 1 block = 4KiB
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
||||
_: &test-emmc
|
||||
# eMMC: read/prog = 512 block = 512
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
|
||||
_: &test-nand
|
||||
# NAND flash: read/prog = 4KiB block = 32KiB
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
|
||||
# other extreme geometries that are useful for testing various corner cases
|
||||
_: &test-no-intrinsics
|
||||
- make test TFLAGS+="-nrk -DLFS_NO_INTRINSICS"
|
||||
_: &test-no-inline
|
||||
- make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0"
|
||||
_: &test-byte-writes
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
|
||||
_: &test-block-cycles
|
||||
- make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1"
|
||||
_: &test-odd-block-count
|
||||
- make test TFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
||||
_: &test-odd-block-size
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
||||
|
||||
# report size
|
||||
_: &report-size
|
||||
# run tests
|
||||
- make test QUIET=1
|
||||
|
||||
# run tests with a few different configurations
|
||||
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=4"
|
||||
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=512 -DLFS_CACHE_SIZE=512 -DLFS_BLOCK_CYCLES=16"
|
||||
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=8 -DLFS_CACHE_SIZE=16 -DLFS_BLOCK_CYCLES=2"
|
||||
- make test QUIET=1 CFLAGS+="-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
||||
|
||||
- make clean test QUIET=1 CFLAGS+="-DLFS_INLINE_MAX=0"
|
||||
- make clean test QUIET=1 CFLAGS+="-DLFS_EMUBD_ERASE_VALUE=0xff"
|
||||
- make clean test QUIET=1 CFLAGS+="-DLFS_NO_INTRINSICS"
|
||||
|
||||
# additional configurations that don't support all tests (this should be
|
||||
# fixed but at the moment it is what it is)
|
||||
- make test_files QUIET=1
|
||||
CFLAGS+="-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
||||
- make test_files QUIET=1
|
||||
CFLAGS+="-DLFS_READ_SIZE=\(2*1024\) -DLFS_BLOCK_SIZE=\(64*1024\)"
|
||||
- make test_files QUIET=1
|
||||
CFLAGS+="-DLFS_READ_SIZE=\(8*1024\) -DLFS_BLOCK_SIZE=\(64*1024\)"
|
||||
- make test_files QUIET=1
|
||||
CFLAGS+="-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
||||
|
||||
# compile and find the code size with the smallest configuration
|
||||
- make -j1 clean size
|
||||
OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')"
|
||||
- make clean size
|
||||
OBJ="$(ls lfs*.o | tr '\n' ' ')"
|
||||
CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR"
|
||||
| tee sizes
|
||||
|
||||
# update status if we succeeded, compare with master if possible
|
||||
- |
|
||||
if [ "$TRAVIS_TEST_RESULT" -eq 0 ]
|
||||
@@ -72,7 +51,7 @@ _: &report-size
|
||||
CURR=$(tail -n1 sizes | awk '{print $1}')
|
||||
PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \
|
||||
| jq -re "select(.sha != \"$TRAVIS_COMMIT\")
|
||||
| .statuses[] | select(.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\").description
|
||||
| .statuses[] | select(.context == \"$STAGE/$NAME\").description
|
||||
| capture(\"code size is (?<size>[0-9]+)\").size" \
|
||||
|| echo 0)
|
||||
|
||||
@@ -83,347 +62,257 @@ _: &report-size
|
||||
fi
|
||||
fi
|
||||
|
||||
# stage control
|
||||
stages:
|
||||
- name: test
|
||||
- name: deploy
|
||||
if: branch = master AND type = push
|
||||
|
||||
# job control
|
||||
# CI matrix
|
||||
jobs:
|
||||
# native testing
|
||||
- &x86
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-x86
|
||||
install: *install-common
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *x86, script: [*test-default, *report-size]}
|
||||
- {<<: *x86, script: [*test-nor, *report-size]}
|
||||
- {<<: *x86, script: [*test-emmc, *report-size]}
|
||||
- {<<: *x86, script: [*test-nand, *report-size]}
|
||||
- {<<: *x86, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *x86, script: [*test-no-inline, *report-size]}
|
||||
- {<<: *x86, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *x86, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *x86, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *x86, script: [*test-odd-block-size, *report-size]}
|
||||
include:
|
||||
# native testing
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-x86
|
||||
|
||||
# cross-compile with ARM (thumb mode)
|
||||
- &arm
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-arm
|
||||
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-arm"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-arm-linux-gnueabi
|
||||
libc6-dev-armel-cross
|
||||
qemu-user
|
||||
- arm-linux-gnueabi-gcc --version
|
||||
- qemu-arm -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *arm, script: [*test-default, *report-size]}
|
||||
- {<<: *arm, script: [*test-nor, *report-size]}
|
||||
- {<<: *arm, script: [*test-emmc, *report-size]}
|
||||
- {<<: *arm, script: [*test-nand, *report-size]}
|
||||
- {<<: *arm, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *arm, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *arm, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *arm, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *arm, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *arm, script: [*test-odd-block-size, *report-size]}
|
||||
# cross-compile with ARM (thumb mode)
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-arm
|
||||
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||
- EXEC="qemu-arm"
|
||||
install:
|
||||
- sudo apt-get install
|
||||
gcc-arm-linux-gnueabi
|
||||
libc6-dev-armel-cross
|
||||
qemu-user
|
||||
- arm-linux-gnueabi-gcc --version
|
||||
- qemu-arm -version
|
||||
|
||||
# cross-compile with MIPS
|
||||
- &mips
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-mips
|
||||
- CC="mips-linux-gnu-gcc --static"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-mips"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-mips-linux-gnu
|
||||
libc6-dev-mips-cross
|
||||
qemu-user
|
||||
- mips-linux-gnu-gcc --version
|
||||
- qemu-mips -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *mips, script: [*test-default, *report-size]}
|
||||
- {<<: *mips, script: [*test-nor, *report-size]}
|
||||
- {<<: *mips, script: [*test-emmc, *report-size]}
|
||||
- {<<: *mips, script: [*test-nand, *report-size]}
|
||||
- {<<: *mips, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *mips, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *mips, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *mips, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *mips, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *mips, script: [*test-odd-block-size, *report-size]}
|
||||
# cross-compile with PowerPC
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-powerpc
|
||||
- CC="powerpc-linux-gnu-gcc --static"
|
||||
- EXEC="qemu-ppc"
|
||||
install:
|
||||
- sudo apt-get install
|
||||
gcc-powerpc-linux-gnu
|
||||
libc6-dev-powerpc-cross
|
||||
qemu-user
|
||||
- powerpc-linux-gnu-gcc --version
|
||||
- qemu-ppc -version
|
||||
|
||||
# cross-compile with PowerPC
|
||||
- &powerpc
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-powerpc
|
||||
- CC="powerpc-linux-gnu-gcc --static"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-ppc"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-powerpc-linux-gnu
|
||||
libc6-dev-powerpc-cross
|
||||
qemu-user
|
||||
- powerpc-linux-gnu-gcc --version
|
||||
- qemu-ppc -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *powerpc, script: [*test-default, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-nor, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-emmc, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-nand, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *powerpc, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-odd-block-size, *report-size]}
|
||||
# cross-compile with MIPS
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-mips
|
||||
- CC="mips-linux-gnu-gcc --static"
|
||||
- EXEC="qemu-mips"
|
||||
install:
|
||||
- sudo apt-get install
|
||||
gcc-mips-linux-gnu
|
||||
libc6-dev-mips-cross
|
||||
qemu-user
|
||||
- mips-linux-gnu-gcc --version
|
||||
- qemu-mips -version
|
||||
|
||||
# test under valgrind, checking for memory errors
|
||||
- &valgrind
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-valgrind
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install valgrind
|
||||
- valgrind --version
|
||||
script:
|
||||
- make test TFLAGS+="-k --valgrind"
|
||||
# self-host with littlefs-fuse for fuzz test
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-fuse
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
before_script:
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf littlefs-fuse/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
||||
|
||||
# self-host with littlefs-fuse for fuzz test
|
||||
- stage: test
|
||||
env:
|
||||
- NAME=littlefs-fuse
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=4096 of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# self-host test
|
||||
- make -C littlefs-fuse
|
||||
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf littlefs-fuse/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
||||
- littlefs-fuse/lfs --format /dev/loop0
|
||||
- littlefs-fuse/lfs /dev/loop0 mount
|
||||
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=128K of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# self-host test
|
||||
- make -C littlefs-fuse
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test_dirs test_files QUIET=1
|
||||
|
||||
- littlefs-fuse/lfs --format /dev/loop0
|
||||
- littlefs-fuse/lfs /dev/loop0 mount
|
||||
# self-host with littlefs-fuse for fuzz test
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-migration
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
before_script:
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf v2/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
||||
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=4096 of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# compile v1 and v2
|
||||
- make -C v1
|
||||
- make -C v2
|
||||
|
||||
# test migration using littlefs-fuse
|
||||
- stage: test
|
||||
env:
|
||||
- NAME=littlefs-migration
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
# run self-host test with v1
|
||||
- v1/lfs --format /dev/loop0
|
||||
- v1/lfs /dev/loop0 mount
|
||||
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf v2/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test_dirs test_files QUIET=1
|
||||
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=128K of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# compile v1 and v2
|
||||
- make -C v1
|
||||
- make -C v2
|
||||
# attempt to migrate
|
||||
- cd ../..
|
||||
- fusermount -u mount
|
||||
|
||||
# run self-host test with v1
|
||||
- v1/lfs --format /dev/loop0
|
||||
- v1/lfs /dev/loop0 mount
|
||||
- v2/lfs --migrate /dev/loop0
|
||||
- v2/lfs /dev/loop0 mount
|
||||
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
# run self-host test with v2 right where we left off
|
||||
- ls mount
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test_dirs test_files QUIET=1
|
||||
|
||||
# attempt to migrate
|
||||
- cd ../..
|
||||
- fusermount -u mount
|
||||
# Automatically create releases
|
||||
- stage: deploy
|
||||
env:
|
||||
- STAGE=deploy
|
||||
- NAME=deploy
|
||||
script:
|
||||
- |
|
||||
bash << 'SCRIPT'
|
||||
set -ev
|
||||
# Find version defined in lfs.h
|
||||
LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
|
||||
LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
|
||||
LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
|
||||
# Grab latests patch from repo tags, default to 0, needs finagling
|
||||
# to get past github's pagination api
|
||||
PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
|
||||
PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
|
||||
| sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
|
||||
|| echo $PREV_URL)
|
||||
LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
|
||||
| jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
|
||||
.captures[].string | tonumber) | max + 1' \
|
||||
|| echo 0)
|
||||
# We have our new version
|
||||
LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
|
||||
echo "VERSION $LFS_VERSION"
|
||||
# Check that we're the most recent commit
|
||||
CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
|
||||
| jq -re '.sha')
|
||||
[ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
|
||||
# Create major branch
|
||||
git branch v$LFS_VERSION_MAJOR HEAD
|
||||
# Create major prefix branch
|
||||
git config user.name "geky bot"
|
||||
git config user.email "bot@geky.net"
|
||||
git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
|
||||
--depth=50 v$LFS_VERSION_MAJOR-prefix || true
|
||||
./scripts/prefix.py lfs$LFS_VERSION_MAJOR
|
||||
git branch v$LFS_VERSION_MAJOR-prefix $( \
|
||||
git commit-tree $(git write-tree) \
|
||||
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
||||
-p HEAD \
|
||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||
git reset --hard
|
||||
# Update major version branches (vN and vN-prefix)
|
||||
git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
|
||||
v$LFS_VERSION_MAJOR \
|
||||
v$LFS_VERSION_MAJOR-prefix
|
||||
# Build release notes
|
||||
PREV=$(git tag --sort=-v:refname -l "v*" | head -1)
|
||||
if [ ! -z "$PREV" ]
|
||||
then
|
||||
echo "PREV $PREV"
|
||||
CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep)
|
||||
printf "CHANGES\n%s\n\n" "$CHANGES"
|
||||
fi
|
||||
case ${GEKY_BOT_DRAFT:-minor} in
|
||||
true) DRAFT=true ;;
|
||||
minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;;
|
||||
false) DRAFT=false ;;
|
||||
esac
|
||||
# Create the release and patch version tag (vN.N.N)
|
||||
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
|
||||
-d "{
|
||||
\"tag_name\": \"$LFS_VERSION\",
|
||||
\"name\": \"${LFS_VERSION%.0}\",
|
||||
\"target_commitish\": \"$TRAVIS_COMMIT\",
|
||||
\"draft\": $DRAFT,
|
||||
\"body\": $(jq -sR '.' <<< "$CHANGES")
|
||||
}" #"
|
||||
SCRIPT
|
||||
|
||||
- v2/lfs --migrate /dev/loop0
|
||||
- v2/lfs /dev/loop0 mount
|
||||
|
||||
# run self-host test with v2 right where we left off
|
||||
- ls mount
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
|
||||
# automatically create releases
|
||||
- stage: deploy
|
||||
env:
|
||||
- NAME=deploy
|
||||
script:
|
||||
- |
|
||||
bash << 'SCRIPT'
|
||||
set -ev
|
||||
# Find version defined in lfs.h
|
||||
LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
|
||||
LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
|
||||
LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
|
||||
# Grab latests patch from repo tags, default to 0, needs finagling
|
||||
# to get past github's pagination api
|
||||
PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
|
||||
PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
|
||||
| sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
|
||||
|| echo $PREV_URL)
|
||||
LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
|
||||
| jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
|
||||
.captures[].string | tonumber) | max + 1' \
|
||||
|| echo 0)
|
||||
# We have our new version
|
||||
LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
|
||||
echo "VERSION $LFS_VERSION"
|
||||
# Check that we're the most recent commit
|
||||
CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
|
||||
| jq -re '.sha')
|
||||
[ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
|
||||
# Create major branch
|
||||
git branch v$LFS_VERSION_MAJOR HEAD
|
||||
# Create major prefix branch
|
||||
git config user.name "geky bot"
|
||||
git config user.email "bot@geky.net"
|
||||
git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
|
||||
--depth=50 v$LFS_VERSION_MAJOR-prefix || true
|
||||
./scripts/prefix.py lfs$LFS_VERSION_MAJOR
|
||||
git branch v$LFS_VERSION_MAJOR-prefix $( \
|
||||
git commit-tree $(git write-tree) \
|
||||
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
||||
-p HEAD \
|
||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||
git reset --hard
|
||||
# Update major version branches (vN and vN-prefix)
|
||||
git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
|
||||
v$LFS_VERSION_MAJOR \
|
||||
v$LFS_VERSION_MAJOR-prefix
|
||||
# Build release notes
|
||||
PREV=$(git tag --sort=-v:refname -l "v*" | head -1)
|
||||
if [ ! -z "$PREV" ]
|
||||
then
|
||||
echo "PREV $PREV"
|
||||
CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep)
|
||||
printf "CHANGES\n%s\n\n" "$CHANGES"
|
||||
fi
|
||||
case ${GEKY_BOT_DRAFT:-minor} in
|
||||
true) DRAFT=true ;;
|
||||
minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;;
|
||||
false) DRAFT=false ;;
|
||||
esac
|
||||
# Create the release and patch version tag (vN.N.N)
|
||||
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
|
||||
-d "{
|
||||
\"tag_name\": \"$LFS_VERSION\",
|
||||
\"name\": \"${LFS_VERSION%.0}\",
|
||||
\"target_commitish\": \"$TRAVIS_COMMIT\",
|
||||
\"draft\": $DRAFT,
|
||||
\"body\": $(jq -sR '.' <<< "$CHANGES")
|
||||
}" #"
|
||||
SCRIPT
|
||||
|
||||
# manage statuses
|
||||
# Manage statuses
|
||||
before_install:
|
||||
- |
|
||||
# don't clobber other (not us) failures
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
.state == \"failure\" and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"pending\",
|
||||
\"description\": \"${STATUS:-In progress}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"$STAGE/$NAME\",
|
||||
\"state\": \"pending\",
|
||||
\"description\": \"${STATUS:-In progress}\",
|
||||
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
|
||||
}"
|
||||
|
||||
after_failure:
|
||||
- |
|
||||
# don't clobber other (not us) failures
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
.state == \"failure\" and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"failure\",
|
||||
\"description\": \"${STATUS:-Failed}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"$STAGE/$NAME\",
|
||||
\"state\": \"failure\",
|
||||
\"description\": \"${STATUS:-Failed}\",
|
||||
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
|
||||
}"
|
||||
|
||||
after_success:
|
||||
- |
|
||||
# don't clobber other (not us) failures
|
||||
# only update if we were last job to mark in progress,
|
||||
# this isn't perfect but is probably good enough
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
(.state == \"failure\" or .state == \"pending\") and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"success\",
|
||||
\"description\": \"${STATUS:-Passed}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"$STAGE/$NAME\",
|
||||
\"state\": \"success\",
|
||||
\"description\": \"${STATUS:-Passed}\",
|
||||
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
|
||||
}"
|
||||
|
||||
# Job control
|
||||
stages:
|
||||
- name: test
|
||||
- name: deploy
|
||||
if: branch = master AND type = push
|
||||
|
||||
111
lfs.c
111
lfs.c
@@ -29,8 +29,8 @@ static int lfs_bd_read(lfs_t *lfs,
|
||||
lfs_block_t block, lfs_off_t off,
|
||||
void *buffer, lfs_size_t size) {
|
||||
uint8_t *data = buffer;
|
||||
if (block >= lfs->cfg->block_count ||
|
||||
off+size > lfs->cfg->block_size) {
|
||||
LFS_ASSERT(block != LFS_BLOCK_NULL);
|
||||
if (off+size > lfs->cfg->block_size) {
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
|
||||
@@ -173,7 +173,7 @@ static int lfs_bd_prog(lfs_t *lfs,
|
||||
lfs_block_t block, lfs_off_t off,
|
||||
const void *buffer, lfs_size_t size) {
|
||||
const uint8_t *data = buffer;
|
||||
LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->cfg->block_count);
|
||||
LFS_ASSERT(block != LFS_BLOCK_NULL);
|
||||
LFS_ASSERT(off + size <= lfs->cfg->block_size);
|
||||
|
||||
while (size > 0) {
|
||||
@@ -269,7 +269,7 @@ typedef int32_t lfs_stag_t;
|
||||
((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
|
||||
|
||||
#define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
|
||||
((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
|
||||
((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(type2, id2, size2))
|
||||
|
||||
static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
|
||||
return !(tag & 0x80000000);
|
||||
@@ -714,7 +714,7 @@ static int lfs_dir_traverse(lfs_t *lfs,
|
||||
uint16_t fromid = lfs_tag_size(tag);
|
||||
uint16_t toid = lfs_tag_id(tag);
|
||||
int err = lfs_dir_traverse(lfs,
|
||||
buffer, 0, 0xffffffff, NULL, 0,
|
||||
buffer, 0, LFS_BLOCK_NULL, NULL, 0,
|
||||
LFS_MKTAG(0x600, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0),
|
||||
fromid, fromid+1, toid-fromid+diff,
|
||||
@@ -748,12 +748,6 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
// scanning the entire directory
|
||||
lfs_stag_t besttag = -1;
|
||||
|
||||
// if either block address is invalid we return LFS_ERR_CORRUPT here,
|
||||
// otherwise later writes to the pair could fail
|
||||
if (pair[0] >= lfs->cfg->block_count || pair[1] >= lfs->cfg->block_count) {
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
|
||||
// find the block with the most recent revision
|
||||
uint32_t revs[2] = {0, 0};
|
||||
int r = 0;
|
||||
@@ -780,7 +774,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
// now scan tags to fetch the actual dir and find possible match
|
||||
for (int i = 0; i < 2; i++) {
|
||||
lfs_off_t off = 0;
|
||||
lfs_tag_t ptag = 0xffffffff;
|
||||
lfs_tag_t ptag = LFS_BLOCK_NULL;
|
||||
|
||||
uint16_t tempcount = 0;
|
||||
lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
|
||||
@@ -788,7 +782,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
lfs_stag_t tempbesttag = besttag;
|
||||
|
||||
dir->rev = lfs_tole32(dir->rev);
|
||||
uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
|
||||
uint32_t crc = lfs_crc(LFS_BLOCK_NULL, &dir->rev, sizeof(dir->rev));
|
||||
dir->rev = lfs_fromle32(dir->rev);
|
||||
|
||||
while (true) {
|
||||
@@ -859,7 +853,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
dir->split = tempsplit;
|
||||
|
||||
// reset crc
|
||||
crc = 0xffffffff;
|
||||
crc = LFS_BLOCK_NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1237,14 +1231,14 @@ static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
|
||||
}
|
||||
|
||||
static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
const lfs_off_t off1 = commit->off;
|
||||
const uint32_t crc1 = commit->crc;
|
||||
// align to program units
|
||||
const lfs_off_t end = lfs_alignup(off1 + 2*sizeof(uint32_t),
|
||||
const lfs_off_t off1 = commit->off + sizeof(lfs_tag_t);
|
||||
const lfs_off_t end = lfs_alignup(off1 + sizeof(uint32_t),
|
||||
lfs->cfg->prog_size);
|
||||
uint32_t ncrc = commit->crc;
|
||||
|
||||
// create crc tags to fill up remainder of commit, note that
|
||||
// padding is not crced, which lets fetches skip padding but
|
||||
// padding is not crcd, which lets fetches skip padding but
|
||||
// makes committing a bit more complicated
|
||||
while (commit->off < end) {
|
||||
lfs_off_t off = commit->off + sizeof(lfs_tag_t);
|
||||
@@ -1254,7 +1248,7 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
}
|
||||
|
||||
// read erased state from next program unit
|
||||
lfs_tag_t tag = 0xffffffff;
|
||||
lfs_tag_t tag = LFS_BLOCK_NULL;
|
||||
int err = lfs_bd_read(lfs,
|
||||
NULL, &lfs->rcache, sizeof(tag),
|
||||
commit->block, noff, &tag, sizeof(tag));
|
||||
@@ -1278,9 +1272,10 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
return err;
|
||||
}
|
||||
|
||||
ncrc = commit->crc;
|
||||
commit->off += sizeof(tag)+lfs_tag_size(tag);
|
||||
commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
|
||||
commit->crc = 0xffffffff; // reset crc for next "commit"
|
||||
commit->crc = LFS_BLOCK_NULL; // reset crc for next "commit"
|
||||
}
|
||||
|
||||
// flush buffers
|
||||
@@ -1291,16 +1286,10 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
|
||||
// successful commit, check checksums to make sure
|
||||
lfs_off_t off = commit->begin;
|
||||
lfs_off_t noff = off1 + sizeof(uint32_t);
|
||||
lfs_off_t noff = off1;
|
||||
while (off < end) {
|
||||
uint32_t crc = 0xffffffff;
|
||||
uint32_t crc = LFS_BLOCK_NULL;
|
||||
for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
|
||||
// check against written crc, may catch blocks that
|
||||
// become readonly and match our commit size exactly
|
||||
if (i == off1 && crc != crc1) {
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
|
||||
// leave it up to caching to make this efficient
|
||||
uint8_t dat;
|
||||
err = lfs_bd_read(lfs,
|
||||
@@ -1310,6 +1299,12 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// check against written crc to detect if block is readonly
|
||||
// (we may pick up old commits)
|
||||
if (i == noff && crc != ncrc) {
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
|
||||
crc = lfs_crc(crc, &dat, 1);
|
||||
}
|
||||
|
||||
@@ -1356,7 +1351,7 @@ static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
|
||||
|
||||
// set defaults
|
||||
dir->off = sizeof(dir->rev);
|
||||
dir->etag = 0xffffffff;
|
||||
dir->etag = LFS_BLOCK_NULL;
|
||||
dir->count = 0;
|
||||
dir->tail[0] = LFS_BLOCK_NULL;
|
||||
dir->tail[1] = LFS_BLOCK_NULL;
|
||||
@@ -1450,7 +1445,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
// find size
|
||||
lfs_size_t size = 0;
|
||||
int err = lfs_dir_traverse(lfs,
|
||||
source, 0, 0xffffffff, attrs, attrcount,
|
||||
source, 0, LFS_BLOCK_NULL, attrs, attrcount,
|
||||
LFS_MKTAG(0x400, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
|
||||
begin, end, -begin,
|
||||
@@ -1545,8 +1540,8 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
struct lfs_commit commit = {
|
||||
.block = dir->pair[1],
|
||||
.off = 0,
|
||||
.ptag = 0xffffffff,
|
||||
.crc = 0xffffffff,
|
||||
.ptag = LFS_BLOCK_NULL,
|
||||
.crc = LFS_BLOCK_NULL,
|
||||
|
||||
.begin = 0,
|
||||
.end = lfs->cfg->block_size - 8,
|
||||
@@ -1575,7 +1570,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
|
||||
// traverse the directory, this time writing out all unique tags
|
||||
err = lfs_dir_traverse(lfs,
|
||||
source, 0, 0xffffffff, attrs, attrcount,
|
||||
source, 0, LFS_BLOCK_NULL, attrs, attrcount,
|
||||
LFS_MKTAG(0x400, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
|
||||
begin, end, -begin,
|
||||
@@ -1752,7 +1747,7 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
.block = dir->pair[0],
|
||||
.off = dir->off,
|
||||
.ptag = dir->etag,
|
||||
.crc = 0xffffffff,
|
||||
.crc = LFS_BLOCK_NULL,
|
||||
|
||||
.begin = dir->off,
|
||||
.end = lfs->cfg->block_size - 8,
|
||||
@@ -2203,6 +2198,7 @@ static int lfs_ctz_find(lfs_t *lfs,
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_ASSERT(head >= 2 && head <= lfs->cfg->block_count);
|
||||
current -= 1 << skip;
|
||||
}
|
||||
|
||||
@@ -2222,6 +2218,7 @@ static int lfs_ctz_extend(lfs_t *lfs,
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
LFS_ASSERT(nblock >= 2 && nblock <= lfs->cfg->block_count);
|
||||
|
||||
{
|
||||
err = lfs_bd_erase(lfs, nblock);
|
||||
@@ -2294,6 +2291,8 @@ static int lfs_ctz_extend(lfs_t *lfs,
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
LFS_ASSERT(nhead >= 2 && nhead <= lfs->cfg->block_count);
|
||||
}
|
||||
|
||||
*block = nblock;
|
||||
@@ -2709,12 +2708,6 @@ int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
|
||||
LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
|
||||
LFS_ASSERT(file->flags & LFS_F_OPENED);
|
||||
|
||||
if (file->flags & LFS_F_ERRED) {
|
||||
// it's not safe to do anything if our file errored
|
||||
LFS_TRACE("lfs_file_sync -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int err = lfs_file_flush(lfs, file);
|
||||
if (err) {
|
||||
file->flags |= LFS_F_ERRED;
|
||||
@@ -2723,6 +2716,7 @@ int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
|
||||
}
|
||||
|
||||
if ((file->flags & LFS_F_DIRTY) &&
|
||||
!(file->flags & LFS_F_ERRED) &&
|
||||
!lfs_pair_isnull(file->m.pair)) {
|
||||
// update dir entry
|
||||
uint16_t type;
|
||||
@@ -3451,7 +3445,7 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
|
||||
|
||||
// check that the block size is large enough to fit ctz pointers
|
||||
LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
|
||||
LFS_ASSERT(4*lfs_npw2(LFS_BLOCK_NULL / (lfs->cfg->block_size-2*4))
|
||||
<= lfs->cfg->block_size);
|
||||
|
||||
// block_cycles = 0 is no longer supported.
|
||||
@@ -3664,15 +3658,7 @@ int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
|
||||
// scan directory blocks for superblock and any global updates
|
||||
lfs_mdir_t dir = {.tail = {0, 1}};
|
||||
lfs_block_t cycle = 0;
|
||||
while (!lfs_pair_isnull(dir.tail)) {
|
||||
if (cycle >= lfs->cfg->block_count/2) {
|
||||
// loop detected
|
||||
err = LFS_ERR_CORRUPT;
|
||||
goto cleanup;
|
||||
}
|
||||
cycle += 1;
|
||||
|
||||
// fetch next block in tail list
|
||||
lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
|
||||
LFS_MKTAG(0x7ff, 0x3ff, 0),
|
||||
@@ -3814,14 +3800,7 @@ int lfs_fs_traverseraw(lfs_t *lfs,
|
||||
}
|
||||
#endif
|
||||
|
||||
lfs_block_t cycle = 0;
|
||||
while (!lfs_pair_isnull(dir.tail)) {
|
||||
if (cycle >= lfs->cfg->block_count/2) {
|
||||
// loop detected
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
cycle += 1;
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
int err = cb(data, dir.tail[i]);
|
||||
if (err) {
|
||||
@@ -3905,14 +3884,7 @@ static int lfs_fs_pred(lfs_t *lfs,
|
||||
// iterate over all directory directory entries
|
||||
pdir->tail[0] = 0;
|
||||
pdir->tail[1] = 1;
|
||||
lfs_block_t cycle = 0;
|
||||
while (!lfs_pair_isnull(pdir->tail)) {
|
||||
if (cycle >= lfs->cfg->block_count/2) {
|
||||
// loop detected
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
cycle += 1;
|
||||
|
||||
if (lfs_pair_cmp(pdir->tail, pair) == 0) {
|
||||
return 0;
|
||||
}
|
||||
@@ -3955,14 +3927,9 @@ static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
|
||||
// use fetchmatch with callback to find pairs
|
||||
parent->tail[0] = 0;
|
||||
parent->tail[1] = 1;
|
||||
lfs_block_t cycle = 0;
|
||||
int i = 0;
|
||||
while (!lfs_pair_isnull(parent->tail)) {
|
||||
if (cycle >= lfs->cfg->block_count/2) {
|
||||
// loop detected
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
cycle += 1;
|
||||
|
||||
i += 1;
|
||||
lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
|
||||
LFS_MKTAG(0x7ff, 0, 0x3ff),
|
||||
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
|
||||
@@ -4405,7 +4372,7 @@ static int lfs1_dir_fetch(lfs_t *lfs,
|
||||
continue;
|
||||
}
|
||||
|
||||
uint32_t crc = 0xffffffff;
|
||||
uint32_t crc = LFS_BLOCK_NULL;
|
||||
lfs1_dir_tole32(&test);
|
||||
lfs1_crc(&crc, &test, sizeof(test));
|
||||
lfs1_dir_fromle32(&test);
|
||||
@@ -4841,7 +4808,7 @@ int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
dir2.pair[1] = dir1.pair[1];
|
||||
dir2.rev = dir1.d.rev;
|
||||
dir2.off = sizeof(dir2.rev);
|
||||
dir2.etag = 0xffffffff;
|
||||
dir2.etag = LFS_BLOCK_NULL;
|
||||
dir2.count = 0;
|
||||
dir2.tail[0] = lfs->lfs1->root[0];
|
||||
dir2.tail[1] = lfs->lfs1->root[1];
|
||||
|
||||
@@ -166,8 +166,8 @@ def mkassert(type, comp, lh, rh, size=None):
|
||||
'type': type.lower(), 'TYPE': type.upper(),
|
||||
'comp': comp.lower(), 'COMP': comp.upper(),
|
||||
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
|
||||
'lh': lh.strip(' '),
|
||||
'rh': rh.strip(' '),
|
||||
'lh': lh.strip(),
|
||||
'rh': rh.strip(),
|
||||
'size': size,
|
||||
}
|
||||
if size:
|
||||
|
||||
@@ -318,14 +318,6 @@ def main(args):
|
||||
|
||||
# find most recent pair
|
||||
mdir = MetadataPair(blocks)
|
||||
print("mdir {%s} rev %d%s%s" % (
|
||||
', '.join('%#x' % b
|
||||
for b in [args.block1, args.block2]
|
||||
if b is not None),
|
||||
mdir.rev,
|
||||
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
|
||||
if len(mdir.pair) > 1 else '',
|
||||
' (corrupted)' if not mdir else ''))
|
||||
if args.all:
|
||||
mdir.dump_all(truncate=not args.no_truncate)
|
||||
elif args.log:
|
||||
|
||||
@@ -18,22 +18,26 @@ def dumpentries(args, mdir, f):
|
||||
name = mdir[Tag('name', id_, 0)]
|
||||
struct_ = mdir[Tag('struct', id_, 0)]
|
||||
|
||||
desc = "id %d %s %s" % (
|
||||
f.write("id %d %s %s" % (
|
||||
id_, name.typerepr(),
|
||||
json.dumps(name.data.decode('utf8')))
|
||||
json.dumps(name.data.decode('utf8'))))
|
||||
if struct_.is_('dirstruct'):
|
||||
desc += " dir {%#x, %#x}" % struct.unpack(
|
||||
'<II', struct_.data[:8].ljust(8, b'\xff'))
|
||||
f.write(" dir {%#x, %#x}" % struct.unpack(
|
||||
'<II', struct_.data[:8].ljust(8, b'\xff')))
|
||||
if struct_.is_('ctzstruct'):
|
||||
desc += " ctz {%#x} size %d" % struct.unpack(
|
||||
'<II', struct_.data[:8].ljust(8, b'\xff'))
|
||||
f.write(" ctz {%#x} size %d" % struct.unpack(
|
||||
'<II', struct_.data[:8].ljust(8, b'\xff')))
|
||||
if struct_.is_('inlinestruct'):
|
||||
desc += " inline size %d" % struct_.size
|
||||
f.write(" inline size %d" % struct_.size)
|
||||
f.write("\n")
|
||||
|
||||
data = None
|
||||
if struct_.is_('inlinestruct'):
|
||||
data = struct_.data
|
||||
elif struct_.is_('ctzstruct'):
|
||||
if args.data and struct_.is_('inlinestruct'):
|
||||
for i in range(0, len(struct_.data), 16):
|
||||
f.write(" %08x: %-47s %-16s\n" % (
|
||||
i, ' '.join('%02x' % c for c in struct_.data[i:i+16]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, struct_.data[i:i+16]))))
|
||||
elif args.data and struct_.is_('ctzstruct'):
|
||||
block, size = struct.unpack(
|
||||
'<II', struct_.data[:8].ljust(8, b'\xff'))
|
||||
data = []
|
||||
@@ -47,50 +51,28 @@ def dumpentries(args, mdir, f):
|
||||
data.append(dat[4*(ctz(i)+1) if i != 0 else 0:])
|
||||
block, = struct.unpack('<I', dat[:4].ljust(4, b'\xff'))
|
||||
i -= 1
|
||||
|
||||
data = bytes(it.islice(
|
||||
it.chain.from_iterable(reversed(data)), size))
|
||||
|
||||
f.write("%-45s%s\n" % (desc,
|
||||
"%-23s %-8s" % (
|
||||
' '.join('%02x' % c for c in data[:8]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, data[:8])))
|
||||
if not args.no_truncate and len(desc) < 45
|
||||
and data is not None else ""))
|
||||
|
||||
if name.is_('superblock') and struct_.is_('inlinestruct'):
|
||||
f.write(
|
||||
" block_size %d\n"
|
||||
" block_count %d\n"
|
||||
" name_max %d\n"
|
||||
" file_max %d\n"
|
||||
" attr_max %d\n" % struct.unpack(
|
||||
'<IIIII', struct_.data[4:4+20].ljust(20, b'\xff')))
|
||||
|
||||
for tag in mdir.tags:
|
||||
if tag.id==id_ and tag.is_('userattr'):
|
||||
desc = "%s size %d" % (tag.typerepr(), tag.size)
|
||||
f.write(" %-43s%s\n" % (desc,
|
||||
"%-23s %-8s" % (
|
||||
' '.join('%02x' % c for c in tag.data[:8]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[:8])))
|
||||
if not args.no_truncate and len(desc) < 43 else ""))
|
||||
|
||||
if args.no_truncate:
|
||||
for i in range(0, len(tag.data), 16):
|
||||
f.write(" %08x: %-47s %-16s\n" % (
|
||||
i, ' '.join('%02x' % c for c in tag.data[i:i+16]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[i:i+16]))))
|
||||
|
||||
if args.no_truncate and data is not None:
|
||||
for i in range(0, len(data), 16):
|
||||
for i in range(0, min(len(data), 256)
|
||||
if not args.no_truncate else len(data), 16):
|
||||
f.write(" %08x: %-47s %-16s\n" % (
|
||||
i, ' '.join('%02x' % c for c in data[i:i+16]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, data[i:i+16]))))
|
||||
|
||||
for tag in mdir.tags:
|
||||
if tag.id==id_ and tag.is_('userattr'):
|
||||
f.write("id %d %s size %d\n" % (
|
||||
id_, tag.typerepr(), tag.size))
|
||||
|
||||
if args.data:
|
||||
for i in range(0, len(tag.data), 16):
|
||||
f.write(" %-47s %-16s\n" % (
|
||||
' '.join('%02x' % c for c in tag.data[i:i+16]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[i:i+16]))))
|
||||
|
||||
def main(args):
|
||||
with open(args.disk, 'rb') as f:
|
||||
dirs = []
|
||||
@@ -179,51 +161,61 @@ def main(args):
|
||||
dir[0].path = path.replace('//', '/')
|
||||
|
||||
# dump tree
|
||||
version = ('?', '?')
|
||||
if superblock:
|
||||
version = tuple(reversed(
|
||||
struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff'))))
|
||||
print("%-47s%s" % ("littlefs v%s.%s" % version,
|
||||
"data (truncated, if it fits)"
|
||||
if not any([args.no_truncate, args.tags, args.log, args.all]) else ""))
|
||||
if not args.superblock and not args.gstate and not args.mdirs:
|
||||
args.superblock = True
|
||||
args.gstate = True
|
||||
args.mdirs = True
|
||||
|
||||
if gstate:
|
||||
if args.superblock and superblock:
|
||||
print("superblock %s v%d.%d" % (
|
||||
json.dumps(superblock[0].data.decode('utf8')),
|
||||
struct.unpack('<H', superblock[1].data[2:2+2])[0],
|
||||
struct.unpack('<H', superblock[1].data[0:0+2])[0]))
|
||||
print(
|
||||
" block_size %d\n"
|
||||
" block_count %d\n"
|
||||
" name_max %d\n"
|
||||
" file_max %d\n"
|
||||
" attr_max %d" % struct.unpack(
|
||||
'<IIIII', superblock[1].data[4:4+20].ljust(20, b'\xff')))
|
||||
|
||||
if args.gstate and gstate:
|
||||
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
|
||||
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
|
||||
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
|
||||
if tag.size or not tag.isvalid:
|
||||
print(" orphans >=%d" % max(tag.size, 1))
|
||||
if tag.size:
|
||||
print(" orphans %d" % tag.size)
|
||||
if tag.type:
|
||||
print(" move dir {%#x, %#x} id %d" % (
|
||||
blocks[0], blocks[1], tag.id))
|
||||
|
||||
for i, dir in enumerate(dirs):
|
||||
print("dir %s" % (json.dumps(dir[0].path)
|
||||
if hasattr(dir[0], 'path') else '(orphan)'))
|
||||
if args.mdirs:
|
||||
for i, dir in enumerate(dirs):
|
||||
print("dir %s" % (json.dumps(dir[0].path)
|
||||
if hasattr(dir[0], 'path') else '(orphan)'))
|
||||
|
||||
for j, mdir in enumerate(dir):
|
||||
print("mdir {%#x, %#x} rev %d%s" % (
|
||||
mdir.blocks[0], mdir.blocks[1], mdir.rev,
|
||||
' (corrupted)' if not mdir else ''))
|
||||
for j, mdir in enumerate(dir):
|
||||
print("mdir {%#x, %#x} rev %d%s" % (
|
||||
mdir.blocks[0], mdir.blocks[1], mdir.rev,
|
||||
' (corrupted)' if not mdir else ''))
|
||||
|
||||
f = io.StringIO()
|
||||
if args.tags:
|
||||
mdir.dump_tags(f, truncate=not args.no_truncate)
|
||||
elif args.log:
|
||||
mdir.dump_log(f, truncate=not args.no_truncate)
|
||||
elif args.all:
|
||||
mdir.dump_all(f, truncate=not args.no_truncate)
|
||||
else:
|
||||
dumpentries(args, mdir, f)
|
||||
f = io.StringIO()
|
||||
if args.tags:
|
||||
mdir.dump_tags(f, truncate=not args.no_truncate)
|
||||
elif args.log:
|
||||
mdir.dump_log(f, truncate=not args.no_truncate)
|
||||
elif args.all:
|
||||
mdir.dump_all(f, truncate=not args.no_truncate)
|
||||
else:
|
||||
dumpentries(args, mdir, f)
|
||||
|
||||
lines = list(filter(None, f.getvalue().split('\n')))
|
||||
for k, line in enumerate(lines):
|
||||
print("%s %s" % (
|
||||
' ' if i == len(dirs)-1 and j == len(dir)-1 else
|
||||
'v' if k == len(lines)-1 else
|
||||
'.' if j == len(dir)-1 else
|
||||
'|',
|
||||
line))
|
||||
lines = list(filter(None, f.getvalue().split('\n')))
|
||||
for k, line in enumerate(lines):
|
||||
print("%s %s" % (
|
||||
' ' if j == len(dir)-1 else
|
||||
'v' if k == len(lines)-1 else
|
||||
'|',
|
||||
line))
|
||||
|
||||
if cycle:
|
||||
print("*** cycle detected! -> {%#x, %#x} ***" % (cycle[0], cycle[1]))
|
||||
@@ -250,12 +242,20 @@ if __name__ == "__main__":
|
||||
parser.add_argument('block2', nargs='?', default=1,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional second block address for finding the root.")
|
||||
parser.add_argument('-s', '--superblock', action='store_true',
|
||||
help="Show contents of the superblock.")
|
||||
parser.add_argument('-g', '--gstate', action='store_true',
|
||||
help="Show contents of global-state.")
|
||||
parser.add_argument('-m', '--mdirs', action='store_true',
|
||||
help="Show contents of metadata-pairs/directories.")
|
||||
parser.add_argument('-t', '--tags', action='store_true',
|
||||
help="Show metadata tags instead of reconstructing entries.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-d', '--data', action='store_true',
|
||||
help="Also show the raw contents of files/attrs/tags.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Show the full contents of files/attrs/tags.")
|
||||
help="Don't truncate large amounts of data.")
|
||||
sys.exit(main(parser.parse_args()))
|
||||
|
||||
@@ -184,8 +184,7 @@ class TestCase:
|
||||
elif self.if_ is not None:
|
||||
if_ = self.if_
|
||||
while True:
|
||||
for k, v in sorted(self.defines.items(),
|
||||
key=lambda x: len(x[0]), reverse=True):
|
||||
for k, v in self.defines.items():
|
||||
if k in if_:
|
||||
if_ = if_.replace(k, '(%s)' % v)
|
||||
break
|
||||
@@ -200,25 +199,22 @@ class TestCase:
|
||||
return True
|
||||
|
||||
def test(self, exec=[], persist=False, cycles=None,
|
||||
gdb=False, failure=None, disk=None, **args):
|
||||
gdb=False, failure=None, **args):
|
||||
# build command
|
||||
cmd = exec + ['./%s.test' % self.suite.path,
|
||||
repr(self.caseno), repr(self.permno)]
|
||||
|
||||
# persist disk or keep in RAM for speed?
|
||||
if persist:
|
||||
if not disk:
|
||||
disk = self.suite.path + '.disk'
|
||||
if persist != 'noerase':
|
||||
try:
|
||||
with open(disk, 'w') as f:
|
||||
f.truncate(0)
|
||||
os.remove(self.suite.path + '.disk')
|
||||
if args.get('verbose', False):
|
||||
print('truncate --size=0', disk)
|
||||
print('rm', self.suite.path + '.disk')
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
cmd.append(disk)
|
||||
cmd.append(self.suite.path + '.disk')
|
||||
|
||||
# simulate power-loss after n cycles?
|
||||
if cycles:
|
||||
@@ -299,17 +295,11 @@ class ValgrindTestCase(TestCase):
|
||||
return not self.leaky and super().shouldtest(**args)
|
||||
|
||||
def test(self, exec=[], **args):
|
||||
verbose = args.get('verbose', False)
|
||||
uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
|
||||
exec = [
|
||||
exec = exec + [
|
||||
'valgrind',
|
||||
'--leak-check=full',
|
||||
] + (['--undef-value-errors=no'] if uninit else []) + [
|
||||
] + (['--track-origins=yes'] if not uninit else []) + [
|
||||
'--error-exitcode=4',
|
||||
'--error-limit=no',
|
||||
] + (['--num-callers=1'] if not verbose else []) + [
|
||||
'-q'] + exec
|
||||
'-q']
|
||||
return super().test(exec=exec, **args)
|
||||
|
||||
class ReentrantTestCase(TestCase):
|
||||
@@ -320,7 +310,7 @@ class ReentrantTestCase(TestCase):
|
||||
def shouldtest(self, **args):
|
||||
return self.reentrant and super().shouldtest(**args)
|
||||
|
||||
def test(self, persist=False, gdb=False, failure=None, **args):
|
||||
def test(self, exec=[], persist=False, gdb=False, failure=None, **args):
|
||||
for cycles in it.count(1):
|
||||
# clear disk first?
|
||||
if cycles == 1 and persist != 'noerase':
|
||||
@@ -386,11 +376,10 @@ class TestSuite:
|
||||
# code lineno?
|
||||
if 'code' in case:
|
||||
case['code_lineno'] = code_linenos.pop()
|
||||
# merge conditions if necessary
|
||||
if 'if' in config and 'if' in case:
|
||||
case['if'] = '(%s) && (%s)' % (config['if'], case['if'])
|
||||
elif 'if' in config:
|
||||
case['if'] = config['if']
|
||||
# give our case's config a copy of our "global" config
|
||||
for k, v in config.items():
|
||||
if k not in case:
|
||||
case[k] = v
|
||||
# initialize test case
|
||||
self.cases.append(TestCase(case, filter=filter,
|
||||
suite=self, caseno=i+1, lineno=lineno, **args))
|
||||
@@ -713,6 +702,8 @@ def main(**args):
|
||||
stdout = perm.result.stdout[:-1]
|
||||
else:
|
||||
stdout = perm.result.stdout
|
||||
if (not args.get('verbose', False) and len(stdout) > 5):
|
||||
sys.stdout.write('...\n')
|
||||
for line in stdout[-5:]:
|
||||
sys.stdout.write(line)
|
||||
if perm.result.assert_:
|
||||
@@ -773,6 +764,4 @@ if __name__ == "__main__":
|
||||
help="Run non-leaky tests under valgrind to check for memory leaks.")
|
||||
parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
|
||||
help="Run tests with another executable prefixed on the command line.")
|
||||
parser.add_argument('-d', '--disk',
|
||||
help="Specify a file to use for persistent/reentrant tests.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
# allocator tests
|
||||
# note for these to work there are a number constraints on the device geometry
|
||||
if = 'LFS_BLOCK_CYCLES == -1'
|
||||
# note for these to work there are many constraints on the device geometry
|
||||
|
||||
[[case]] # parallel allocation test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)'
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
lfs_file_t files[FILES];
|
||||
@@ -47,7 +46,7 @@ code = '''
|
||||
|
||||
[[case]] # serial allocation test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)'
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
@@ -86,7 +85,7 @@ code = '''
|
||||
|
||||
[[case]] # parallel allocation reuse test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)'
|
||||
define.CYCLES = [1, 10]
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
@@ -141,7 +140,7 @@ code = '''
|
||||
|
||||
[[case]] # serial allocation reuse test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)'
|
||||
define.CYCLES = [1, 10]
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
# bad blocks with block cycles should be tested in test_relocations
|
||||
if = 'LFS_BLOCK_CYCLES == -1'
|
||||
|
||||
[[case]] # single bad blocks
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
|
||||
@@ -155,7 +155,7 @@ code = '''
|
||||
'''
|
||||
|
||||
[[case]] # reentrant many directory creation/rename/removal
|
||||
define.N = [5, 11]
|
||||
define.N = [5, 10] # TODO changed from 20, should we be able to do more?
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# still pass with other inline sizes but wouldn't be testing anything.
|
||||
|
||||
define.LFS_CACHE_SIZE = 512
|
||||
if = 'LFS_CACHE_SIZE % LFS_PROG_SIZE == 0 && LFS_CACHE_SIZE == 512'
|
||||
if = 'LFS_CACHE_SIZE == 512'
|
||||
|
||||
[[case]] # entry grow test
|
||||
code = '''
|
||||
|
||||
@@ -1,288 +0,0 @@
|
||||
# Tests for recovering from conditions which shouldn't normally
|
||||
# happen during normal operation of littlefs
|
||||
|
||||
# invalid pointer tests (outside of block_count)
|
||||
|
||||
[[case]] # invalid tail-pointer test
|
||||
define.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
|
||||
define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// change tail-pointer to invalid pointers
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # invalid dir pointer test
|
||||
define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// make a dir
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "dir_here") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// change the dir pointer to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our directory
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("dir_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIR, 1, strlen("dir_here"));
|
||||
assert(memcmp((char*)buffer, "dir_here", strlen("dir_here")) == 0);
|
||||
// change dir pointer
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, 8),
|
||||
(lfs_block_t[2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad dir fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dir_here", &info) => 0;
|
||||
assert(strcmp(info.name, "dir_here") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "dir_here") => LFS_ERR_CORRUPT;
|
||||
lfs_stat(&lfs, "dir_here/file_here", &info) => LFS_ERR_CORRUPT;
|
||||
lfs_dir_open(&lfs, &dir, "dir_here/dir_here") => LFS_ERR_CORRUPT;
|
||||
lfs_file_open(&lfs, &file, "dir_here/file_here",
|
||||
LFS_O_RDONLY) => LFS_ERR_CORRUPT;
|
||||
lfs_file_open(&lfs, &file, "dir_here/file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_CORRUPT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # invalid file pointer test
|
||||
in = "lfs.c"
|
||||
define.SIZE = [10, 1000, 100000] # faked file size
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// make a file
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// change the file pointer to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our file
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
|
||||
assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
|
||||
// change file pointer
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz)),
|
||||
&(struct lfs_ctz){0xcccccccc, lfs_tole32(SIZE)}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad file fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "file_here", &info) => 0;
|
||||
assert(strcmp(info.name, "file_here") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
|
||||
lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// any allocs that traverse CTZ must unfortunately must fail
|
||||
if (SIZE > 2*LFS_BLOCK_SIZE) {
|
||||
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # invalid pointer in CTZ skip-list test
|
||||
define.SIZE = ['2*LFS_BLOCK_SIZE', '3*LFS_BLOCK_SIZE', '4*LFS_BLOCK_SIZE']
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
// make a file
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
char c = 'c';
|
||||
lfs_file_write(&lfs, &file, &c, 1) => 1;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
// change pointer in CTZ skip-list to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our file and get our CTZ structure
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
|
||||
assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
|
||||
struct lfs_ctz ctz;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz)
|
||||
=> LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
|
||||
lfs_ctz_fromle32(&ctz);
|
||||
// rewrite block to contain bad pointer
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
uint32_t bad = lfs_tole32(0xcccccccc);
|
||||
memcpy(&bbuffer[0], &bad, sizeof(bad));
|
||||
memcpy(&bbuffer[4], &bad, sizeof(bad));
|
||||
cfg.erase(&cfg, ctz.head) => 0;
|
||||
cfg.prog(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad file fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "file_here", &info) => 0;
|
||||
assert(strcmp(info.name, "file_here") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
|
||||
lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// any allocs that traverse CTZ must unfortunately must fail
|
||||
if (SIZE > 2*LFS_BLOCK_SIZE) {
|
||||
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
[[case]] # invalid gstate pointer
|
||||
define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// create an invalid gstate
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0});
|
||||
lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
// mount may not fail, but our first alloc should fail when
|
||||
// we try to fix the gstate
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# cycle detection/recovery tests
|
||||
|
||||
[[case]] # metadata-pair threaded-list loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
|
||||
// change tail-pointer to point to ourself
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){0, 1}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # metadata-pair threaded-list 2-length loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs with child dir
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// find child
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_block_t pair[2];
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x7ff, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
|
||||
lfs_pair_fromle32(pair);
|
||||
// change tail-pointer to point to root
|
||||
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){0, 1}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # metadata-pair threaded-list 1-length child loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs with child dir
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// find child
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_block_t pair[2];
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x7ff, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
|
||||
lfs_pair_fromle32(pair);
|
||||
// change tail-pointer to point to ourself
|
||||
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), pair})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
@@ -33,9 +33,6 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -43,7 +40,6 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -115,9 +111,6 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -125,7 +118,6 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -206,9 +198,6 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -216,7 +205,6 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -295,9 +283,6 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -305,7 +290,6 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -354,9 +338,9 @@ exhausted:
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1]
|
||||
#define.LFS_BLOCK_CYCLES = [4, 2]
|
||||
define.CYCLES = 100
|
||||
define.FILES = 10
|
||||
if = 'LFS_BLOCK_CYCLES < CYCLES/10'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
@@ -380,9 +364,6 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -390,7 +371,6 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,55 +27,41 @@ code = '''
|
||||
'''
|
||||
|
||||
[[case]] # expanding superblock
|
||||
define.LFS_BLOCK_CYCLES = [32, 33, 1]
|
||||
define.BLOCK_CYCLES = [32, 33, 1]
|
||||
define.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_mkdir(&lfs, "dummy") => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_mkdir(&lfs, "dummy") => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # expanding superblock with power cycle
|
||||
define.LFS_BLOCK_CYCLES = [32, 33, 1]
|
||||
define.BLOCK_CYCLES = [32, 33, 1]
|
||||
define.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
// remove lingering dummy?
|
||||
err = lfs_stat(&lfs, "dummy", &info);
|
||||
err = lfs_remove(&lfs, "dummy");
|
||||
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
|
||||
if (!err) {
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_mkdir(&lfs, "dummy") => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
@@ -83,12 +69,11 @@ code = '''
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant expanding superblock
|
||||
define.LFS_BLOCK_CYCLES = [2, 1]
|
||||
define.BLOCK_CYCLES = [2, 1]
|
||||
define.N = 24
|
||||
reentrant = true
|
||||
code = '''
|
||||
@@ -100,20 +85,12 @@ code = '''
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
// remove lingering dummy?
|
||||
err = lfs_stat(&lfs, "dummy", &info);
|
||||
err = lfs_remove(&lfs, "dummy");
|
||||
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
|
||||
if (!err) {
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_mkdir(&lfs, "dummy") => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
@@ -122,6 +99,5 @@ code = '''
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
@@ -148,7 +148,6 @@ code = '''
|
||||
|
||||
[[case]] # move file corrupt source and dest
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
@@ -240,7 +239,6 @@ code = '''
|
||||
|
||||
[[case]] # move file after corrupt
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
@@ -595,7 +593,6 @@ code = '''
|
||||
|
||||
[[case]] # move dir corrupt source and dest
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
@@ -695,7 +692,6 @@ code = '''
|
||||
|
||||
[[case]] # move dir after corrupt
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
|
||||
#open(1, "5file5.xxxxxxxxxxxx", 0x503) -> 0
|
||||
# write(1, , 2007)[^ 1499 us] -> 2007
|
||||
# write(1, , 2007)[^ 1411 us] -> 2007
|
||||
# write(1, , 2007)[^ 1390 us] -> 2007
|
||||
# write(1, , 2007)[^ 1401 us] -> 2007
|
||||
# close(1) -> 0
|
||||
# open(1, "1file1.xxxx", 0x503) -> 0
|
||||
# mount
|
||||
# open(0, "5file5.xxxxxxxxxxxx", 0x3) -> 0
|
||||
# open(1, "5file5.xxxxxxxxxxxx", 0x503) -> 0
|
||||
# close(1) -> 0
|
||||
# open(1, "1file1.xxxx", 0x2) -> 0
|
||||
# write(0, , 63) -> 63
|
||||
#a.out: lfs.c:2169: lfs_ctz_find: Assertion `head >= 2 && head <= lfs->cfg->block_count' failed.
|
||||
# close(0)Aborted
|
||||
|
||||
[[case]]
|
||||
define.FILESIZE5 = '4*CHUNKSIZE5'
|
||||
define.FILESIZE1 = '4*CHUNKSIZE1'
|
||||
define.CHUNKSIZE5 = 2007
|
||||
define.CHUNKSIZE1 = 63
|
||||
code = '''
|
||||
lfs_file_t files[2];
|
||||
uint8_t chunk5[CHUNKSIZE5];
|
||||
memset(chunk5, 'a', CHUNKSIZE5);
|
||||
uint8_t chunk1[CHUNKSIZE1];
|
||||
memset(chunk1, 'b', CHUNKSIZE1);
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &files[1], "5file5.xxxxxxxxxxxx",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
|
||||
lfs_file_write(&lfs, &files[1], chunk5, CHUNKSIZE5) => CHUNKSIZE5;
|
||||
}
|
||||
lfs_file_close(&lfs, &files[1]) => 0;
|
||||
lfs_file_open(&lfs, &files[1], "1file1.xxxx",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
// these should not change the result
|
||||
// lfs_file_close(&lfs, &files[1]) => 0;
|
||||
// lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &files[0], "5file5.xxxxxxxxxxxx",
|
||||
LFS_O_RDWR) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &files[1], "5file5.xxxxxxxxxxxx",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
lfs_file_close(&lfs, &files[1]) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &files[1], "1file1.xxxx",
|
||||
LFS_O_WRONLY) => 0;
|
||||
for (int i = 0; i < FILESIZE1/CHUNKSIZE1; i++) {
|
||||
lfs_file_write(&lfs, &files[1], chunk1, CHUNKSIZE1) => CHUNKSIZE1;
|
||||
}
|
||||
lfs_file_close(&lfs, &files[1]) => 0;
|
||||
|
||||
memset(chunk5, 'c', CHUNKSIZE5);
|
||||
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
|
||||
lfs_file_write(&lfs, &files[0], chunk5, CHUNKSIZE5) => CHUNKSIZE5;
|
||||
}
|
||||
lfs_file_close(&lfs, &files[0]) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// check results
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &files[0], "5file5.xxxxxxxxxxxx",
|
||||
LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < FILESIZE5/CHUNKSIZE5; i++) {
|
||||
uint8_t rchunk[CHUNKSIZE5];
|
||||
lfs_file_read(&lfs, &files[0], rchunk, CHUNKSIZE5) => CHUNKSIZE5;
|
||||
assert(memcmp(rchunk, chunk5, CHUNKSIZE5) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &files[0]) => 0;
|
||||
lfs_file_open(&lfs, &files[0], "1file1.xxxx",
|
||||
LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < FILESIZE1/CHUNKSIZE1; i++) {
|
||||
uint8_t rchunk[CHUNKSIZE1];
|
||||
lfs_file_read(&lfs, &files[0], rchunk, CHUNKSIZE1) => CHUNKSIZE1;
|
||||
assert(memcmp(rchunk, chunk1, CHUNKSIZE1) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &files[0]) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
@@ -1,6 +1,5 @@
|
||||
[[case]] # orphan test
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
@@ -247,14 +247,14 @@ code = '''
|
||||
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
|
||||
|
||||
memset(path, 'w', LFS_NAME_MAX+1);
|
||||
path[LFS_NAME_MAX+1] = '\0';
|
||||
path[LFS_NAME_MAX+2] = '\0';
|
||||
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_NAMETOOLONG;
|
||||
|
||||
memcpy(path, "coffee/", strlen("coffee/"));
|
||||
memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX+1);
|
||||
path[strlen("coffee/")+LFS_NAME_MAX+1] = '\0';
|
||||
path[strlen("coffee/")+LFS_NAME_MAX+2] = '\0';
|
||||
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_NAMETOOLONG;
|
||||
@@ -270,6 +270,7 @@ code = '''
|
||||
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(path, 'w', LFS_NAME_MAX);
|
||||
path[LFS_NAME_MAX] = '\0';
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
|
||||
@@ -100,7 +100,7 @@ code = '''
|
||||
lfs_file_open(&lfs, &file, "sequence",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
|
||||
size = lfs.cfg->cache_size;
|
||||
lfs_size_t qsize = size / 4;
|
||||
uint8_t *wb = buffer;
|
||||
uint8_t *rb = buffer + size;
|
||||
|
||||
Reference in New Issue
Block a user