mirror of
https://github.com/eledio-devices/thirdparty-littlefs.git
synced 2025-11-02 08:48:29 +01:00
Compare commits
17 Commits
test-revam
...
test-revam
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b392c49e36 | ||
|
|
17efa7b3b9 | ||
|
|
0990296619 | ||
|
|
d04b077506 | ||
|
|
c7987a3162 | ||
|
|
dcae185a00 | ||
|
|
f4b17b379c | ||
|
|
9f546f154f | ||
|
|
b69cf890e6 | ||
|
|
02c84ac5f4 | ||
|
|
6530cb3a61 | ||
|
|
fe957de892 | ||
|
|
6a550844f4 | ||
|
|
f9c2fd93f2 | ||
|
|
44d7112794 | ||
|
|
77e3078b9f | ||
|
|
517d3414c5 |
633
.travis.yml
633
.travis.yml
@@ -1,49 +1,70 @@
|
||||
# Environment variables
|
||||
# environment variables
|
||||
env:
|
||||
global:
|
||||
- CFLAGS=-Werror
|
||||
- MAKEFLAGS=-j
|
||||
|
||||
# Common test script
|
||||
script:
|
||||
# cache installation dirs
|
||||
cache:
|
||||
pip: true
|
||||
directories:
|
||||
- $HOME/.cache/apt
|
||||
|
||||
# common installation
|
||||
_: &install-common
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
- sudo apt-get install python3 python3-pip
|
||||
- sudo pip3 install toml
|
||||
# setup a ram-backed disk to speed up reentrant tests
|
||||
- mkdir disks
|
||||
- sudo mount -t tmpfs -o size=100m tmpfs disks
|
||||
- export TFLAGS="$TFLAGS --disk=disks/disk"
|
||||
|
||||
# test cases
|
||||
_: &test-example
|
||||
# make sure example can at least compile
|
||||
- sed -n '/``` c/,/```/{/```/d; p;}' README.md > test.c &&
|
||||
- sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c &&
|
||||
make all CFLAGS+="
|
||||
-Duser_provided_block_device_read=NULL
|
||||
-Duser_provided_block_device_prog=NULL
|
||||
-Duser_provided_block_device_erase=NULL
|
||||
-Duser_provided_block_device_sync=NULL
|
||||
-include stdio.h"
|
||||
# default tests
|
||||
_: &test-default
|
||||
# normal+reentrant tests
|
||||
- make test TFLAGS+="-nrk"
|
||||
# common real-life geometries
|
||||
_: &test-nor
|
||||
# NOR flash: read/prog = 1 block = 4KiB
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
||||
_: &test-emmc
|
||||
# eMMC: read/prog = 512 block = 512
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
|
||||
_: &test-nand
|
||||
# NAND flash: read/prog = 4KiB block = 32KiB
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
|
||||
# other extreme geometries that are useful for testing various corner cases
|
||||
_: &test-no-intrinsics
|
||||
- make test TFLAGS+="-nrk -DLFS_NO_INTRINSICS"
|
||||
_: &test-no-inline
|
||||
- make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0"
|
||||
_: &test-byte-writes
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
|
||||
_: &test-block-cycles
|
||||
- make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1"
|
||||
_: &test-odd-block-count
|
||||
- make test TFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
||||
_: &test-odd-block-size
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
||||
|
||||
# run tests
|
||||
- make test QUIET=1
|
||||
|
||||
# run tests with a few different configurations
|
||||
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=4"
|
||||
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=512 -DLFS_CACHE_SIZE=512 -DLFS_BLOCK_CYCLES=16"
|
||||
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=8 -DLFS_CACHE_SIZE=16 -DLFS_BLOCK_CYCLES=2"
|
||||
- make test QUIET=1 CFLAGS+="-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
||||
|
||||
- make clean test QUIET=1 CFLAGS+="-DLFS_INLINE_MAX=0"
|
||||
- make clean test QUIET=1 CFLAGS+="-DLFS_EMUBD_ERASE_VALUE=0xff"
|
||||
- make clean test QUIET=1 CFLAGS+="-DLFS_NO_INTRINSICS"
|
||||
|
||||
# additional configurations that don't support all tests (this should be
|
||||
# fixed but at the moment it is what it is)
|
||||
- make test_files QUIET=1
|
||||
CFLAGS+="-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
||||
- make test_files QUIET=1
|
||||
CFLAGS+="-DLFS_READ_SIZE=\(2*1024\) -DLFS_BLOCK_SIZE=\(64*1024\)"
|
||||
- make test_files QUIET=1
|
||||
CFLAGS+="-DLFS_READ_SIZE=\(8*1024\) -DLFS_BLOCK_SIZE=\(64*1024\)"
|
||||
- make test_files QUIET=1
|
||||
CFLAGS+="-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
||||
|
||||
# report size
|
||||
_: &report-size
|
||||
# compile and find the code size with the smallest configuration
|
||||
- make clean size
|
||||
OBJ="$(ls lfs*.o | tr '\n' ' ')"
|
||||
- make -j1 clean size
|
||||
OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')"
|
||||
CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR"
|
||||
| tee sizes
|
||||
|
||||
# update status if we succeeded, compare with master if possible
|
||||
- |
|
||||
if [ "$TRAVIS_TEST_RESULT" -eq 0 ]
|
||||
@@ -51,7 +72,7 @@ script:
|
||||
CURR=$(tail -n1 sizes | awk '{print $1}')
|
||||
PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \
|
||||
| jq -re "select(.sha != \"$TRAVIS_COMMIT\")
|
||||
| .statuses[] | select(.context == \"$STAGE/$NAME\").description
|
||||
| .statuses[] | select(.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\").description
|
||||
| capture(\"code size is (?<size>[0-9]+)\").size" \
|
||||
|| echo 0)
|
||||
|
||||
@@ -62,257 +83,347 @@ script:
|
||||
fi
|
||||
fi
|
||||
|
||||
# CI matrix
|
||||
# stage control
|
||||
stages:
|
||||
- name: test
|
||||
- name: deploy
|
||||
if: branch = master AND type = push
|
||||
|
||||
# job control
|
||||
jobs:
|
||||
include:
|
||||
# native testing
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-x86
|
||||
# native testing
|
||||
- &x86
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-x86
|
||||
install: *install-common
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *x86, script: [*test-default, *report-size]}
|
||||
- {<<: *x86, script: [*test-nor, *report-size]}
|
||||
- {<<: *x86, script: [*test-emmc, *report-size]}
|
||||
- {<<: *x86, script: [*test-nand, *report-size]}
|
||||
- {<<: *x86, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *x86, script: [*test-no-inline, *report-size]}
|
||||
- {<<: *x86, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *x86, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *x86, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *x86, script: [*test-odd-block-size, *report-size]}
|
||||
|
||||
# cross-compile with ARM (thumb mode)
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-arm
|
||||
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||
- EXEC="qemu-arm"
|
||||
install:
|
||||
- sudo apt-get install
|
||||
gcc-arm-linux-gnueabi
|
||||
libc6-dev-armel-cross
|
||||
qemu-user
|
||||
- arm-linux-gnueabi-gcc --version
|
||||
- qemu-arm -version
|
||||
# cross-compile with ARM (thumb mode)
|
||||
- &arm
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-arm
|
||||
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-arm"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-arm-linux-gnueabi
|
||||
libc6-dev-armel-cross
|
||||
qemu-user
|
||||
- arm-linux-gnueabi-gcc --version
|
||||
- qemu-arm -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *arm, script: [*test-default, *report-size]}
|
||||
- {<<: *arm, script: [*test-nor, *report-size]}
|
||||
- {<<: *arm, script: [*test-emmc, *report-size]}
|
||||
- {<<: *arm, script: [*test-nand, *report-size]}
|
||||
- {<<: *arm, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *arm, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *arm, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *arm, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *arm, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *arm, script: [*test-odd-block-size, *report-size]}
|
||||
|
||||
# cross-compile with PowerPC
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-powerpc
|
||||
- CC="powerpc-linux-gnu-gcc --static"
|
||||
- EXEC="qemu-ppc"
|
||||
install:
|
||||
- sudo apt-get install
|
||||
gcc-powerpc-linux-gnu
|
||||
libc6-dev-powerpc-cross
|
||||
qemu-user
|
||||
- powerpc-linux-gnu-gcc --version
|
||||
- qemu-ppc -version
|
||||
# cross-compile with MIPS
|
||||
- &mips
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-mips
|
||||
- CC="mips-linux-gnu-gcc --static"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-mips"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-mips-linux-gnu
|
||||
libc6-dev-mips-cross
|
||||
qemu-user
|
||||
- mips-linux-gnu-gcc --version
|
||||
- qemu-mips -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *mips, script: [*test-default, *report-size]}
|
||||
- {<<: *mips, script: [*test-nor, *report-size]}
|
||||
- {<<: *mips, script: [*test-emmc, *report-size]}
|
||||
- {<<: *mips, script: [*test-nand, *report-size]}
|
||||
- {<<: *mips, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *mips, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *mips, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *mips, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *mips, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *mips, script: [*test-odd-block-size, *report-size]}
|
||||
|
||||
# cross-compile with MIPS
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-mips
|
||||
- CC="mips-linux-gnu-gcc --static"
|
||||
- EXEC="qemu-mips"
|
||||
install:
|
||||
- sudo apt-get install
|
||||
gcc-mips-linux-gnu
|
||||
libc6-dev-mips-cross
|
||||
qemu-user
|
||||
- mips-linux-gnu-gcc --version
|
||||
- qemu-mips -version
|
||||
# cross-compile with PowerPC
|
||||
- &powerpc
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-powerpc
|
||||
- CC="powerpc-linux-gnu-gcc --static"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-ppc"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-powerpc-linux-gnu
|
||||
libc6-dev-powerpc-cross
|
||||
qemu-user
|
||||
- powerpc-linux-gnu-gcc --version
|
||||
- qemu-ppc -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *powerpc, script: [*test-default, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-nor, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-emmc, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-nand, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *powerpc, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-odd-block-size, *report-size]}
|
||||
|
||||
# self-host with littlefs-fuse for fuzz test
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-fuse
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
before_script:
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf littlefs-fuse/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
||||
# test under valgrind, checking for memory errors
|
||||
- &valgrind
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-valgrind
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install valgrind
|
||||
- valgrind --version
|
||||
script:
|
||||
- make test TFLAGS+="-k --valgrind"
|
||||
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=4096 of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# self-host test
|
||||
- make -C littlefs-fuse
|
||||
# self-host with littlefs-fuse for fuzz test
|
||||
- stage: test
|
||||
env:
|
||||
- NAME=littlefs-fuse
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
|
||||
- littlefs-fuse/lfs --format /dev/loop0
|
||||
- littlefs-fuse/lfs /dev/loop0 mount
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf littlefs-fuse/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
||||
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test_dirs test_files QUIET=1
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=128K of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# self-host test
|
||||
- make -C littlefs-fuse
|
||||
|
||||
# self-host with littlefs-fuse for fuzz test
|
||||
- stage: test
|
||||
env:
|
||||
- STAGE=test
|
||||
- NAME=littlefs-migration
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
before_script:
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf v2/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
||||
- littlefs-fuse/lfs --format /dev/loop0
|
||||
- littlefs-fuse/lfs /dev/loop0 mount
|
||||
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=4096 of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# compile v1 and v2
|
||||
- make -C v1
|
||||
- make -C v2
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
|
||||
# run self-host test with v1
|
||||
- v1/lfs --format /dev/loop0
|
||||
- v1/lfs /dev/loop0 mount
|
||||
# test migration using littlefs-fuse
|
||||
- stage: test
|
||||
env:
|
||||
- NAME=littlefs-migration
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test_dirs test_files QUIET=1
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf v2/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
||||
|
||||
# attempt to migrate
|
||||
- cd ../..
|
||||
- fusermount -u mount
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=128K of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# compile v1 and v2
|
||||
- make -C v1
|
||||
- make -C v2
|
||||
|
||||
- v2/lfs --migrate /dev/loop0
|
||||
- v2/lfs /dev/loop0 mount
|
||||
# run self-host test with v1
|
||||
- v1/lfs --format /dev/loop0
|
||||
- v1/lfs /dev/loop0 mount
|
||||
|
||||
# run self-host test with v2 right where we left off
|
||||
- ls mount
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test_dirs test_files QUIET=1
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
|
||||
# Automatically create releases
|
||||
- stage: deploy
|
||||
env:
|
||||
- STAGE=deploy
|
||||
- NAME=deploy
|
||||
script:
|
||||
- |
|
||||
bash << 'SCRIPT'
|
||||
set -ev
|
||||
# Find version defined in lfs.h
|
||||
LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
|
||||
LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
|
||||
LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
|
||||
# Grab latests patch from repo tags, default to 0, needs finagling
|
||||
# to get past github's pagination api
|
||||
PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
|
||||
PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
|
||||
| sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
|
||||
|| echo $PREV_URL)
|
||||
LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
|
||||
| jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
|
||||
.captures[].string | tonumber) | max + 1' \
|
||||
|| echo 0)
|
||||
# We have our new version
|
||||
LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
|
||||
echo "VERSION $LFS_VERSION"
|
||||
# Check that we're the most recent commit
|
||||
CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
|
||||
| jq -re '.sha')
|
||||
[ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
|
||||
# Create major branch
|
||||
git branch v$LFS_VERSION_MAJOR HEAD
|
||||
# Create major prefix branch
|
||||
git config user.name "geky bot"
|
||||
git config user.email "bot@geky.net"
|
||||
git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
|
||||
--depth=50 v$LFS_VERSION_MAJOR-prefix || true
|
||||
./scripts/prefix.py lfs$LFS_VERSION_MAJOR
|
||||
git branch v$LFS_VERSION_MAJOR-prefix $( \
|
||||
git commit-tree $(git write-tree) \
|
||||
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
||||
-p HEAD \
|
||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||
git reset --hard
|
||||
# Update major version branches (vN and vN-prefix)
|
||||
git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
|
||||
v$LFS_VERSION_MAJOR \
|
||||
v$LFS_VERSION_MAJOR-prefix
|
||||
# Build release notes
|
||||
PREV=$(git tag --sort=-v:refname -l "v*" | head -1)
|
||||
if [ ! -z "$PREV" ]
|
||||
then
|
||||
echo "PREV $PREV"
|
||||
CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep)
|
||||
printf "CHANGES\n%s\n\n" "$CHANGES"
|
||||
fi
|
||||
case ${GEKY_BOT_DRAFT:-minor} in
|
||||
true) DRAFT=true ;;
|
||||
minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;;
|
||||
false) DRAFT=false ;;
|
||||
esac
|
||||
# Create the release and patch version tag (vN.N.N)
|
||||
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
|
||||
-d "{
|
||||
\"tag_name\": \"$LFS_VERSION\",
|
||||
\"name\": \"${LFS_VERSION%.0}\",
|
||||
\"target_commitish\": \"$TRAVIS_COMMIT\",
|
||||
\"draft\": $DRAFT,
|
||||
\"body\": $(jq -sR '.' <<< "$CHANGES")
|
||||
}" #"
|
||||
SCRIPT
|
||||
# attempt to migrate
|
||||
- cd ../..
|
||||
- fusermount -u mount
|
||||
|
||||
# Manage statuses
|
||||
- v2/lfs --migrate /dev/loop0
|
||||
- v2/lfs /dev/loop0 mount
|
||||
|
||||
# run self-host test with v2 right where we left off
|
||||
- ls mount
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
|
||||
# automatically create releases
|
||||
- stage: deploy
|
||||
env:
|
||||
- NAME=deploy
|
||||
script:
|
||||
- |
|
||||
bash << 'SCRIPT'
|
||||
set -ev
|
||||
# Find version defined in lfs.h
|
||||
LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
|
||||
LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
|
||||
LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
|
||||
# Grab latests patch from repo tags, default to 0, needs finagling
|
||||
# to get past github's pagination api
|
||||
PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
|
||||
PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
|
||||
| sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
|
||||
|| echo $PREV_URL)
|
||||
LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
|
||||
| jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
|
||||
.captures[].string | tonumber) | max + 1' \
|
||||
|| echo 0)
|
||||
# We have our new version
|
||||
LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
|
||||
echo "VERSION $LFS_VERSION"
|
||||
# Check that we're the most recent commit
|
||||
CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
|
||||
| jq -re '.sha')
|
||||
[ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
|
||||
# Create major branch
|
||||
git branch v$LFS_VERSION_MAJOR HEAD
|
||||
# Create major prefix branch
|
||||
git config user.name "geky bot"
|
||||
git config user.email "bot@geky.net"
|
||||
git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
|
||||
--depth=50 v$LFS_VERSION_MAJOR-prefix || true
|
||||
./scripts/prefix.py lfs$LFS_VERSION_MAJOR
|
||||
git branch v$LFS_VERSION_MAJOR-prefix $( \
|
||||
git commit-tree $(git write-tree) \
|
||||
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
||||
-p HEAD \
|
||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||
git reset --hard
|
||||
# Update major version branches (vN and vN-prefix)
|
||||
git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
|
||||
v$LFS_VERSION_MAJOR \
|
||||
v$LFS_VERSION_MAJOR-prefix
|
||||
# Build release notes
|
||||
PREV=$(git tag --sort=-v:refname -l "v*" | head -1)
|
||||
if [ ! -z "$PREV" ]
|
||||
then
|
||||
echo "PREV $PREV"
|
||||
CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep)
|
||||
printf "CHANGES\n%s\n\n" "$CHANGES"
|
||||
fi
|
||||
case ${GEKY_BOT_DRAFT:-minor} in
|
||||
true) DRAFT=true ;;
|
||||
minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;;
|
||||
false) DRAFT=false ;;
|
||||
esac
|
||||
# Create the release and patch version tag (vN.N.N)
|
||||
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
|
||||
-d "{
|
||||
\"tag_name\": \"$LFS_VERSION\",
|
||||
\"name\": \"${LFS_VERSION%.0}\",
|
||||
\"target_commitish\": \"$TRAVIS_COMMIT\",
|
||||
\"draft\": $DRAFT,
|
||||
\"body\": $(jq -sR '.' <<< "$CHANGES")
|
||||
}" #"
|
||||
SCRIPT
|
||||
|
||||
# manage statuses
|
||||
before_install:
|
||||
- |
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"$STAGE/$NAME\",
|
||||
\"state\": \"pending\",
|
||||
\"description\": \"${STATUS:-In progress}\",
|
||||
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
|
||||
}"
|
||||
# don't clobber other (not us) failures
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
.state == \"failure\" and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"pending\",
|
||||
\"description\": \"${STATUS:-In progress}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
|
||||
after_failure:
|
||||
- |
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"$STAGE/$NAME\",
|
||||
\"state\": \"failure\",
|
||||
\"description\": \"${STATUS:-Failed}\",
|
||||
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
|
||||
}"
|
||||
# don't clobber other (not us) failures
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
.state == \"failure\" and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"failure\",
|
||||
\"description\": \"${STATUS:-Failed}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
|
||||
after_success:
|
||||
- |
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"$STAGE/$NAME\",
|
||||
\"state\": \"success\",
|
||||
\"description\": \"${STATUS:-Passed}\",
|
||||
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
|
||||
}"
|
||||
|
||||
# Job control
|
||||
stages:
|
||||
- name: test
|
||||
- name: deploy
|
||||
if: branch = master AND type = push
|
||||
# don't clobber other (not us) failures
|
||||
# only update if we were last job to mark in progress,
|
||||
# this isn't perfect but is probably good enough
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
(.state == \"failure\" or .state == \"pending\") and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"success\",
|
||||
\"description\": \"${STATUS:-Passed}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
|
||||
92
lfs.c
92
lfs.c
@@ -269,7 +269,7 @@ typedef int32_t lfs_stag_t;
|
||||
((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
|
||||
|
||||
#define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
|
||||
((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(type2, id2, size2))
|
||||
((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
|
||||
|
||||
static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
|
||||
return !(tag & 0x80000000);
|
||||
@@ -418,6 +418,7 @@ int lfs_fs_traverseraw(lfs_t *lfs,
|
||||
int (*cb)(void *data, lfs_block_t block), void *data,
|
||||
bool includeorphans);
|
||||
static int lfs_fs_forceconsistency(lfs_t *lfs);
|
||||
static int lfs_fs_deorphan(lfs_t *lfs);
|
||||
static int lfs_deinit(lfs_t *lfs);
|
||||
#ifdef LFS_MIGRATE
|
||||
static int lfs1_traverse(lfs_t *lfs,
|
||||
@@ -714,7 +715,7 @@ static int lfs_dir_traverse(lfs_t *lfs,
|
||||
uint16_t fromid = lfs_tag_size(tag);
|
||||
uint16_t toid = lfs_tag_id(tag);
|
||||
int err = lfs_dir_traverse(lfs,
|
||||
buffer, 0, LFS_BLOCK_NULL, NULL, 0,
|
||||
buffer, 0, 0xffffffff, NULL, 0,
|
||||
LFS_MKTAG(0x600, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0),
|
||||
fromid, fromid+1, toid-fromid+diff,
|
||||
@@ -774,7 +775,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
// now scan tags to fetch the actual dir and find possible match
|
||||
for (int i = 0; i < 2; i++) {
|
||||
lfs_off_t off = 0;
|
||||
lfs_tag_t ptag = LFS_BLOCK_NULL;
|
||||
lfs_tag_t ptag = 0xffffffff;
|
||||
|
||||
uint16_t tempcount = 0;
|
||||
lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
|
||||
@@ -782,7 +783,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
lfs_stag_t tempbesttag = besttag;
|
||||
|
||||
dir->rev = lfs_tole32(dir->rev);
|
||||
uint32_t crc = lfs_crc(LFS_BLOCK_NULL, &dir->rev, sizeof(dir->rev));
|
||||
uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
|
||||
dir->rev = lfs_fromle32(dir->rev);
|
||||
|
||||
while (true) {
|
||||
@@ -796,6 +797,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
if (err == LFS_ERR_CORRUPT) {
|
||||
// can't continue?
|
||||
dir->erased = false;
|
||||
dir->first = false;
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
@@ -808,9 +810,11 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
if (!lfs_tag_isvalid(tag)) {
|
||||
dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
|
||||
dir->off % lfs->cfg->prog_size == 0);
|
||||
dir->first = false;
|
||||
break;
|
||||
} else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
|
||||
dir->erased = false;
|
||||
dir->first = false;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -825,6 +829,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
if (err) {
|
||||
if (err == LFS_ERR_CORRUPT) {
|
||||
dir->erased = false;
|
||||
dir->first = false;
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
@@ -833,6 +838,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
|
||||
if (crc != dcrc) {
|
||||
dir->erased = false;
|
||||
dir->first = false;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -853,7 +859,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
dir->split = tempsplit;
|
||||
|
||||
// reset crc
|
||||
crc = LFS_BLOCK_NULL;
|
||||
crc = 0xffffffff;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -866,6 +872,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
if (err) {
|
||||
if (err == LFS_ERR_CORRUPT) {
|
||||
dir->erased = false;
|
||||
dir->first = false;
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
@@ -899,6 +906,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
if (err) {
|
||||
if (err == LFS_ERR_CORRUPT) {
|
||||
dir->erased = false;
|
||||
dir->first = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -912,6 +920,7 @@ static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
|
||||
if (res < 0) {
|
||||
if (res == LFS_ERR_CORRUPT) {
|
||||
dir->erased = false;
|
||||
dir->first = false;
|
||||
break;
|
||||
}
|
||||
return res;
|
||||
@@ -1231,14 +1240,14 @@ static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
|
||||
}
|
||||
|
||||
static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
const lfs_off_t off1 = commit->off;
|
||||
const uint32_t crc1 = commit->crc;
|
||||
// align to program units
|
||||
const lfs_off_t off1 = commit->off + sizeof(lfs_tag_t);
|
||||
const lfs_off_t end = lfs_alignup(off1 + sizeof(uint32_t),
|
||||
const lfs_off_t end = lfs_alignup(off1 + 2*sizeof(uint32_t),
|
||||
lfs->cfg->prog_size);
|
||||
uint32_t ncrc = commit->crc;
|
||||
|
||||
// create crc tags to fill up remainder of commit, note that
|
||||
// padding is not crcd, which lets fetches skip padding but
|
||||
// padding is not crced, which lets fetches skip padding but
|
||||
// makes committing a bit more complicated
|
||||
while (commit->off < end) {
|
||||
lfs_off_t off = commit->off + sizeof(lfs_tag_t);
|
||||
@@ -1248,7 +1257,7 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
}
|
||||
|
||||
// read erased state from next program unit
|
||||
lfs_tag_t tag = LFS_BLOCK_NULL;
|
||||
lfs_tag_t tag = 0xffffffff;
|
||||
int err = lfs_bd_read(lfs,
|
||||
NULL, &lfs->rcache, sizeof(tag),
|
||||
commit->block, noff, &tag, sizeof(tag));
|
||||
@@ -1272,10 +1281,9 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
return err;
|
||||
}
|
||||
|
||||
ncrc = commit->crc;
|
||||
commit->off += sizeof(tag)+lfs_tag_size(tag);
|
||||
commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
|
||||
commit->crc = LFS_BLOCK_NULL; // reset crc for next "commit"
|
||||
commit->crc = 0xffffffff; // reset crc for next "commit"
|
||||
}
|
||||
|
||||
// flush buffers
|
||||
@@ -1286,10 +1294,16 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
|
||||
// successful commit, check checksums to make sure
|
||||
lfs_off_t off = commit->begin;
|
||||
lfs_off_t noff = off1;
|
||||
lfs_off_t noff = off1 + sizeof(uint32_t);
|
||||
while (off < end) {
|
||||
uint32_t crc = LFS_BLOCK_NULL;
|
||||
uint32_t crc = 0xffffffff;
|
||||
for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
|
||||
// check against written crc, may catch blocks that
|
||||
// become readonly and match our commit size exactly
|
||||
if (i == off1 && crc != crc1) {
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
|
||||
// leave it up to caching to make this efficient
|
||||
uint8_t dat;
|
||||
err = lfs_bd_read(lfs,
|
||||
@@ -1299,12 +1313,6 @@ static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// check against written crc to detect if block is readonly
|
||||
// (we may pick up old commits)
|
||||
if (i == noff && crc != ncrc) {
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
|
||||
crc = lfs_crc(crc, &dat, 1);
|
||||
}
|
||||
|
||||
@@ -1351,11 +1359,12 @@ static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
|
||||
|
||||
// set defaults
|
||||
dir->off = sizeof(dir->rev);
|
||||
dir->etag = LFS_BLOCK_NULL;
|
||||
dir->etag = 0xffffffff;
|
||||
dir->count = 0;
|
||||
dir->tail[0] = LFS_BLOCK_NULL;
|
||||
dir->tail[1] = LFS_BLOCK_NULL;
|
||||
dir->erased = false;
|
||||
dir->first = true;
|
||||
dir->split = false;
|
||||
|
||||
// don't write out yet, let caller take care of that
|
||||
@@ -1445,7 +1454,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
// find size
|
||||
lfs_size_t size = 0;
|
||||
int err = lfs_dir_traverse(lfs,
|
||||
source, 0, LFS_BLOCK_NULL, attrs, attrcount,
|
||||
source, 0, 0xffffffff, attrs, attrcount,
|
||||
LFS_MKTAG(0x400, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
|
||||
begin, end, -begin,
|
||||
@@ -1492,7 +1501,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
// 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
|
||||
// one metadata block in the pair, effectively making this useless
|
||||
if (lfs->cfg->block_cycles > 0 &&
|
||||
(dir->rev % ((lfs->cfg->block_cycles+1)|1) == 0)) {
|
||||
(dir->rev % ((lfs->cfg->block_cycles+4)/*|1*/) == 0)) { // TODO what
|
||||
if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
|
||||
// oh no! we're writing too much to the superblock,
|
||||
// should we expand?
|
||||
@@ -1540,8 +1549,8 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
struct lfs_commit commit = {
|
||||
.block = dir->pair[1],
|
||||
.off = 0,
|
||||
.ptag = LFS_BLOCK_NULL,
|
||||
.crc = LFS_BLOCK_NULL,
|
||||
.ptag = 0xffffffff,
|
||||
.crc = 0xffffffff,
|
||||
|
||||
.begin = 0,
|
||||
.end = lfs->cfg->block_size - 8,
|
||||
@@ -1570,7 +1579,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
||||
|
||||
// traverse the directory, this time writing out all unique tags
|
||||
err = lfs_dir_traverse(lfs,
|
||||
source, 0, LFS_BLOCK_NULL, attrs, attrcount,
|
||||
source, 0, 0xffffffff, attrs, attrcount,
|
||||
LFS_MKTAG(0x400, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
|
||||
begin, end, -begin,
|
||||
@@ -1673,6 +1682,11 @@ relocate:
|
||||
}
|
||||
|
||||
if (relocated) {
|
||||
if (!dir->first) {
|
||||
// TODO something funky!
|
||||
dir->pair[0] = dir->pair[0];
|
||||
dir->pair[1] = oldpair[1];
|
||||
}
|
||||
// update references if we relocated
|
||||
LFS_DEBUG("Relocating %"PRIx32" %"PRIx32" -> %"PRIx32" %"PRIx32,
|
||||
oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
|
||||
@@ -1747,7 +1761,7 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
.block = dir->pair[0],
|
||||
.off = dir->off,
|
||||
.ptag = dir->etag,
|
||||
.crc = LFS_BLOCK_NULL,
|
||||
.crc = 0xffffffff,
|
||||
|
||||
.begin = dir->off,
|
||||
.end = lfs->cfg->block_size - 8,
|
||||
@@ -2708,6 +2722,12 @@ int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
|
||||
LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
|
||||
LFS_ASSERT(file->flags & LFS_F_OPENED);
|
||||
|
||||
if (file->flags & LFS_F_ERRED) {
|
||||
// it's not safe to do anything if our file errored
|
||||
LFS_TRACE("lfs_file_sync -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int err = lfs_file_flush(lfs, file);
|
||||
if (err) {
|
||||
file->flags |= LFS_F_ERRED;
|
||||
@@ -2716,7 +2736,6 @@ int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
|
||||
}
|
||||
|
||||
if ((file->flags & LFS_F_DIRTY) &&
|
||||
!(file->flags & LFS_F_ERRED) &&
|
||||
!lfs_pair_isnull(file->m.pair)) {
|
||||
// update dir entry
|
||||
uint16_t type;
|
||||
@@ -3176,7 +3195,7 @@ int lfs_remove(lfs_t *lfs, const char *path) {
|
||||
}
|
||||
|
||||
lfs->mlist = dir.next;
|
||||
if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
|
||||
if (lfs_tag_type3(tag) == LFS_TYPE_DIR/* && lfs_tag_size(lfs->gstate.tag) > 0*/) {
|
||||
// fix orphan
|
||||
lfs_fs_preporphans(lfs, -1);
|
||||
|
||||
@@ -3445,7 +3464,7 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
|
||||
|
||||
// check that the block size is large enough to fit ctz pointers
|
||||
LFS_ASSERT(4*lfs_npw2(LFS_BLOCK_NULL / (lfs->cfg->block_size-2*4))
|
||||
LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
|
||||
<= lfs->cfg->block_size);
|
||||
|
||||
// block_cycles = 0 is no longer supported.
|
||||
@@ -3927,9 +3946,7 @@ static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
|
||||
// use fetchmatch with callback to find pairs
|
||||
parent->tail[0] = 0;
|
||||
parent->tail[1] = 1;
|
||||
int i = 0;
|
||||
while (!lfs_pair_isnull(parent->tail)) {
|
||||
i += 1;
|
||||
lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
|
||||
LFS_MKTAG(0x7ff, 0, 0x3ff),
|
||||
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
|
||||
@@ -4007,6 +4024,12 @@ static int lfs_fs_relocate(lfs_t *lfs,
|
||||
lfs_fs_preporphans(lfs, -1);
|
||||
}
|
||||
|
||||
#if 0
|
||||
int err = lfs_fs_deorphan(lfs);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
// find pred
|
||||
int err = lfs_fs_pred(lfs, oldpair, &parent);
|
||||
if (err && err != LFS_ERR_NOENT) {
|
||||
@@ -4037,6 +4060,7 @@ static int lfs_fs_relocate(lfs_t *lfs,
|
||||
return err;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -4372,7 +4396,7 @@ static int lfs1_dir_fetch(lfs_t *lfs,
|
||||
continue;
|
||||
}
|
||||
|
||||
uint32_t crc = LFS_BLOCK_NULL;
|
||||
uint32_t crc = 0xffffffff;
|
||||
lfs1_dir_tole32(&test);
|
||||
lfs1_crc(&crc, &test, sizeof(test));
|
||||
lfs1_dir_fromle32(&test);
|
||||
@@ -4808,7 +4832,7 @@ int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
|
||||
dir2.pair[1] = dir1.pair[1];
|
||||
dir2.rev = dir1.d.rev;
|
||||
dir2.off = sizeof(dir2.rev);
|
||||
dir2.etag = LFS_BLOCK_NULL;
|
||||
dir2.etag = 0xffffffff;
|
||||
dir2.count = 0;
|
||||
dir2.tail[0] = lfs->lfs1->root[0];
|
||||
dir2.tail[1] = lfs->lfs1->root[1];
|
||||
|
||||
1
lfs.h
1
lfs.h
@@ -311,6 +311,7 @@ typedef struct lfs_mdir {
|
||||
uint16_t count;
|
||||
bool erased;
|
||||
bool split;
|
||||
bool first;
|
||||
lfs_block_t tail[2];
|
||||
} lfs_mdir_t;
|
||||
|
||||
|
||||
@@ -184,7 +184,8 @@ class TestCase:
|
||||
elif self.if_ is not None:
|
||||
if_ = self.if_
|
||||
while True:
|
||||
for k, v in self.defines.items():
|
||||
for k, v in sorted(self.defines.items(),
|
||||
key=lambda x: len(x[0]), reverse=True):
|
||||
if k in if_:
|
||||
if_ = if_.replace(k, '(%s)' % v)
|
||||
break
|
||||
@@ -199,22 +200,25 @@ class TestCase:
|
||||
return True
|
||||
|
||||
def test(self, exec=[], persist=False, cycles=None,
|
||||
gdb=False, failure=None, **args):
|
||||
gdb=False, failure=None, disk=None, **args):
|
||||
# build command
|
||||
cmd = exec + ['./%s.test' % self.suite.path,
|
||||
repr(self.caseno), repr(self.permno)]
|
||||
|
||||
# persist disk or keep in RAM for speed?
|
||||
if persist:
|
||||
if not disk:
|
||||
disk = self.suite.path + '.disk'
|
||||
if persist != 'noerase':
|
||||
try:
|
||||
os.remove(self.suite.path + '.disk')
|
||||
with open(disk, 'w') as f:
|
||||
f.truncate(0)
|
||||
if args.get('verbose', False):
|
||||
print('rm', self.suite.path + '.disk')
|
||||
print('truncate --size=0', disk)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
cmd.append(self.suite.path + '.disk')
|
||||
cmd.append(disk)
|
||||
|
||||
# simulate power-loss after n cycles?
|
||||
if cycles:
|
||||
@@ -295,11 +299,17 @@ class ValgrindTestCase(TestCase):
|
||||
return not self.leaky and super().shouldtest(**args)
|
||||
|
||||
def test(self, exec=[], **args):
|
||||
exec = exec + [
|
||||
verbose = args.get('verbose', False)
|
||||
uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
|
||||
exec = [
|
||||
'valgrind',
|
||||
'--leak-check=full',
|
||||
] + (['--undef-value-errors=no'] if uninit else []) + [
|
||||
] + (['--track-origins=yes'] if not uninit else []) + [
|
||||
'--error-exitcode=4',
|
||||
'-q']
|
||||
'--error-limit=no',
|
||||
] + (['--num-callers=1'] if not verbose else []) + [
|
||||
'-q'] + exec
|
||||
return super().test(exec=exec, **args)
|
||||
|
||||
class ReentrantTestCase(TestCase):
|
||||
@@ -310,7 +320,7 @@ class ReentrantTestCase(TestCase):
|
||||
def shouldtest(self, **args):
|
||||
return self.reentrant and super().shouldtest(**args)
|
||||
|
||||
def test(self, exec=[], persist=False, gdb=False, failure=None, **args):
|
||||
def test(self, persist=False, gdb=False, failure=None, **args):
|
||||
for cycles in it.count(1):
|
||||
# clear disk first?
|
||||
if cycles == 1 and persist != 'noerase':
|
||||
@@ -376,10 +386,11 @@ class TestSuite:
|
||||
# code lineno?
|
||||
if 'code' in case:
|
||||
case['code_lineno'] = code_linenos.pop()
|
||||
# give our case's config a copy of our "global" config
|
||||
for k, v in config.items():
|
||||
if k not in case:
|
||||
case[k] = v
|
||||
# merge conditions if necessary
|
||||
if 'if' in config and 'if' in case:
|
||||
case['if'] = '(%s) && (%s)' % (config['if'], case['if'])
|
||||
elif 'if' in config:
|
||||
case['if'] = config['if']
|
||||
# initialize test case
|
||||
self.cases.append(TestCase(case, filter=filter,
|
||||
suite=self, caseno=i+1, lineno=lineno, **args))
|
||||
@@ -702,8 +713,6 @@ def main(**args):
|
||||
stdout = perm.result.stdout[:-1]
|
||||
else:
|
||||
stdout = perm.result.stdout
|
||||
if (not args.get('verbose', False) and len(stdout) > 5):
|
||||
sys.stdout.write('...\n')
|
||||
for line in stdout[-5:]:
|
||||
sys.stdout.write(line)
|
||||
if perm.result.assert_:
|
||||
@@ -764,4 +773,6 @@ if __name__ == "__main__":
|
||||
help="Run non-leaky tests under valgrind to check for memory leaks.")
|
||||
parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
|
||||
help="Run tests with another executable prefixed on the command line.")
|
||||
parser.add_argument('-d', '--disk',
|
||||
help="Specify a file to use for persistent/reentrant tests.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
# allocator tests
|
||||
# note for these to work there are many constraints on the device geometry
|
||||
# note for these to work there are a number constraints on the device geometry
|
||||
if = 'LFS_BLOCK_CYCLES == -1'
|
||||
|
||||
[[case]] # parallel allocation test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)'
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
lfs_file_t files[FILES];
|
||||
@@ -46,7 +47,7 @@ code = '''
|
||||
|
||||
[[case]] # serial allocation test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)'
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
@@ -85,7 +86,7 @@ code = '''
|
||||
|
||||
[[case]] # parallel allocation reuse test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)'
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
define.CYCLES = [1, 10]
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
@@ -140,7 +141,7 @@ code = '''
|
||||
|
||||
[[case]] # serial allocation reuse test
|
||||
define.FILES = 3
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)'
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
define.CYCLES = [1, 10]
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# bad blocks with block cycles should be tested in test_relocations
|
||||
if = 'LFS_BLOCK_CYCLES == -1'
|
||||
|
||||
[[case]] # single bad blocks
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
|
||||
@@ -155,7 +155,7 @@ code = '''
|
||||
'''
|
||||
|
||||
[[case]] # reentrant many directory creation/rename/removal
|
||||
define.N = [5, 10] # TODO changed from 20, should we be able to do more?
|
||||
define.N = [5, 11]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# still pass with other inline sizes but wouldn't be testing anything.
|
||||
|
||||
define.LFS_CACHE_SIZE = 512
|
||||
if = 'LFS_CACHE_SIZE == 512'
|
||||
if = 'LFS_CACHE_SIZE % LFS_PROG_SIZE == 0 && LFS_CACHE_SIZE == 512'
|
||||
|
||||
[[case]] # entry grow test
|
||||
code = '''
|
||||
|
||||
@@ -33,6 +33,9 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -40,6 +43,7 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -111,6 +115,9 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -118,6 +125,7 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -198,6 +206,9 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -205,6 +216,7 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -283,6 +295,9 @@ code = '''
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -290,6 +305,7 @@ code = '''
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
@@ -334,112 +350,117 @@ exhausted:
|
||||
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
|
||||
'''
|
||||
|
||||
[[case]] # test that we wear blocks roughly evenly
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1]
|
||||
#define.LFS_BLOCK_CYCLES = [4, 2]
|
||||
define.CYCLES = 100
|
||||
define.FILES = 10
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (cycle < CYCLES) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << 4; //((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
srand(cycle * i);
|
||||
size = 1 << 4; //((rand() % 10)+2);
|
||||
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (rand() % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
LFS_WARN("completed %d cycles", cycle);
|
||||
|
||||
// check the wear on our block device
|
||||
lfs_testbd_wear_t minwear = -1;
|
||||
lfs_testbd_wear_t totalwear = 0;
|
||||
lfs_testbd_wear_t maxwear = 0;
|
||||
// skip 0 and 1 as superblock movement is intentionally avoided
|
||||
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
|
||||
printf("%08x: wear %d\n", b, wear);
|
||||
assert(wear >= 0);
|
||||
if (wear < minwear) {
|
||||
minwear = wear;
|
||||
}
|
||||
if (wear > maxwear) {
|
||||
maxwear = wear;
|
||||
}
|
||||
totalwear += wear;
|
||||
}
|
||||
lfs_testbd_wear_t avgwear = totalwear / LFS_BLOCK_COUNT;
|
||||
LFS_WARN("max wear: %d cycles", maxwear);
|
||||
LFS_WARN("avg wear: %d cycles", totalwear / LFS_BLOCK_COUNT);
|
||||
LFS_WARN("min wear: %d cycles", minwear);
|
||||
|
||||
// find standard deviation^2
|
||||
lfs_testbd_wear_t dev2 = 0;
|
||||
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
|
||||
assert(wear >= 0);
|
||||
lfs_testbd_swear_t diff = wear - avgwear;
|
||||
dev2 += diff*diff;
|
||||
}
|
||||
dev2 /= totalwear;
|
||||
LFS_WARN("std dev^2: %d", dev2);
|
||||
assert(dev2 < 8);
|
||||
'''
|
||||
# TODO fixme
|
||||
#[[case]] # test that we wear blocks roughly evenly
|
||||
#define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
#define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
|
||||
#define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1]
|
||||
#define.CYCLES = 100
|
||||
#define.FILES = 10
|
||||
#if = 'LFS_BLOCK_CYCLES < CYCLES/10'
|
||||
#code = '''
|
||||
# lfs_format(&lfs, &cfg) => 0;
|
||||
# lfs_mount(&lfs, &cfg) => 0;
|
||||
# lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
# lfs_unmount(&lfs) => 0;
|
||||
#
|
||||
# uint32_t cycle = 0;
|
||||
# while (cycle < CYCLES) {
|
||||
# lfs_mount(&lfs, &cfg) => 0;
|
||||
# for (uint32_t i = 0; i < FILES; i++) {
|
||||
# // chose name, roughly random seed, and random 2^n size
|
||||
# sprintf(path, "roadrunner/test%d", i);
|
||||
# srand(cycle * i);
|
||||
# size = 1 << 4; //((rand() % 10)+2);
|
||||
#
|
||||
# lfs_file_open(&lfs, &file, path,
|
||||
# LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
#
|
||||
# for (lfs_size_t j = 0; j < size; j++) {
|
||||
# char c = 'a' + (rand() % 26);
|
||||
# lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
# assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
# if (res == LFS_ERR_NOSPC) {
|
||||
# err = lfs_file_close(&lfs, &file);
|
||||
# assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
# lfs_unmount(&lfs) => 0;
|
||||
# goto exhausted;
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# err = lfs_file_close(&lfs, &file);
|
||||
# assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
# if (err == LFS_ERR_NOSPC) {
|
||||
# lfs_unmount(&lfs) => 0;
|
||||
# goto exhausted;
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# for (uint32_t i = 0; i < FILES; i++) {
|
||||
# // check for errors
|
||||
# sprintf(path, "roadrunner/test%d", i);
|
||||
# srand(cycle * i);
|
||||
# size = 1 << 4; //((rand() % 10)+2);
|
||||
#
|
||||
# lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
# for (lfs_size_t j = 0; j < size; j++) {
|
||||
# char c = 'a' + (rand() % 26);
|
||||
# char r;
|
||||
# lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
# assert(r == c);
|
||||
# }
|
||||
#
|
||||
# lfs_file_close(&lfs, &file) => 0;
|
||||
# }
|
||||
# lfs_unmount(&lfs) => 0;
|
||||
#
|
||||
# cycle += 1;
|
||||
# }
|
||||
#
|
||||
#exhausted:
|
||||
# // should still be readable
|
||||
# lfs_mount(&lfs, &cfg) => 0;
|
||||
# for (uint32_t i = 0; i < FILES; i++) {
|
||||
# // check for errors
|
||||
# sprintf(path, "roadrunner/test%d", i);
|
||||
# lfs_stat(&lfs, path, &info) => 0;
|
||||
# }
|
||||
# lfs_unmount(&lfs) => 0;
|
||||
#
|
||||
# LFS_WARN("completed %d cycles", cycle);
|
||||
#
|
||||
# // check the wear on our block device
|
||||
# lfs_testbd_wear_t minwear = -1;
|
||||
# lfs_testbd_wear_t totalwear = 0;
|
||||
# lfs_testbd_wear_t maxwear = 0;
|
||||
# // skip 0 and 1 as superblock movement is intentionally avoided
|
||||
# for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
|
||||
# lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
|
||||
# printf("%08x: wear %d\n", b, wear);
|
||||
# assert(wear >= 0);
|
||||
# if (wear < minwear) {
|
||||
# minwear = wear;
|
||||
# }
|
||||
# if (wear > maxwear) {
|
||||
# maxwear = wear;
|
||||
# }
|
||||
# totalwear += wear;
|
||||
# }
|
||||
# lfs_testbd_wear_t avgwear = totalwear / LFS_BLOCK_COUNT;
|
||||
# LFS_WARN("max wear: %d cycles", maxwear);
|
||||
# LFS_WARN("avg wear: %d cycles", totalwear / LFS_BLOCK_COUNT);
|
||||
# LFS_WARN("min wear: %d cycles", minwear);
|
||||
#
|
||||
# // find standard deviation^2
|
||||
# lfs_testbd_wear_t dev2 = 0;
|
||||
# for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
|
||||
# lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
|
||||
# assert(wear >= 0);
|
||||
# lfs_testbd_swear_t diff = wear - avgwear;
|
||||
# dev2 += diff*diff;
|
||||
# }
|
||||
# dev2 /= totalwear;
|
||||
# LFS_WARN("std dev^2: %d", dev2);
|
||||
# assert(dev2 < 8);
|
||||
#'''
|
||||
|
||||
|
||||
@@ -148,6 +148,7 @@ code = '''
|
||||
|
||||
[[case]] # move file corrupt source and dest
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
@@ -239,6 +240,7 @@ code = '''
|
||||
|
||||
[[case]] # move file after corrupt
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
@@ -593,6 +595,7 @@ code = '''
|
||||
|
||||
[[case]] # move dir corrupt source and dest
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
@@ -692,6 +695,7 @@ code = '''
|
||||
|
||||
[[case]] # move dir after corrupt
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
[[case]] # orphan test
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
@@ -58,7 +59,7 @@ code = '''
|
||||
[[case]] # reentrant testing for orphans, basically just spam mkdir/remove
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
#if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20},
|
||||
{FILES=26, DEPTH=1, CYCLES=20},
|
||||
|
||||
@@ -247,14 +247,14 @@ code = '''
|
||||
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
|
||||
|
||||
memset(path, 'w', LFS_NAME_MAX+1);
|
||||
path[LFS_NAME_MAX+2] = '\0';
|
||||
path[LFS_NAME_MAX+1] = '\0';
|
||||
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_NAMETOOLONG;
|
||||
|
||||
memcpy(path, "coffee/", strlen("coffee/"));
|
||||
memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX+1);
|
||||
path[strlen("coffee/")+LFS_NAME_MAX+2] = '\0';
|
||||
path[strlen("coffee/")+LFS_NAME_MAX+1] = '\0';
|
||||
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
|
||||
=> LFS_ERR_NAMETOOLONG;
|
||||
@@ -270,7 +270,6 @@ code = '''
|
||||
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
memset(path, 'w', LFS_NAME_MAX);
|
||||
path[LFS_NAME_MAX] = '\0';
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
|
||||
@@ -31,7 +31,7 @@ code = '''
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
@@ -54,7 +54,7 @@ code = '''
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
@@ -97,7 +97,7 @@ code = '''
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
info.size => 0;
|
||||
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
@@ -113,7 +113,7 @@ code = '''
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
info.size => 2;
|
||||
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
@@ -129,7 +129,7 @@ code = '''
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
info.size => 2;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
@@ -143,12 +143,90 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # non-DAG tree test
|
||||
define.LFS_BLOCK_CYCLES = [8, 1]
|
||||
define.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
// first create directories
|
||||
lfs_mkdir(&lfs, "child_1") => 0;
|
||||
lfs_mkdir(&lfs, "child_2") => 0;
|
||||
// then move the second child under the first,
|
||||
// this creates a cycle since the second child should have been
|
||||
// inserted before the first
|
||||
lfs_rename(&lfs, "child_2", "child_1/child_2") => 0;
|
||||
// now try to force second child to relocate
|
||||
lfs_file_open(&lfs, &file, "child_1/child_2/grandchild",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size = 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
sprintf((char*)buffer, "%d", i);
|
||||
size = strlen((char*)buffer);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
// check that nothing broke
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "child_1") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/child_1") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "child_2") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "/child_1/child_2") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, "grandchild") == 0);
|
||||
assert(info.size == size);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "child_1/child_2/grandchild",
|
||||
LFS_O_RDONLY) => 0;
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, sizeof(rbuffer)) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant testing for relocations, this is the same as the
|
||||
# orphan testing, except here we also set block_cycles so that
|
||||
# almost every tree operation needs a relocation
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
#if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
@@ -210,7 +288,7 @@ code = '''
|
||||
[[case]] # reentrant testing for relocations, but now with random renames!
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
#if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
|
||||
@@ -100,7 +100,7 @@ code = '''
|
||||
lfs_file_open(&lfs, &file, "sequence",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
size = lfs.cfg->cache_size;
|
||||
size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
|
||||
lfs_size_t qsize = size / 4;
|
||||
uint8_t *wb = buffer;
|
||||
uint8_t *rb = buffer + size;
|
||||
|
||||
Reference in New Issue
Block a user