Compare commits

..

1 Commits

Author SHA1 Message Date
Christopher Haster
f3e4f78fe4 A number of small QOL changes related to lfs_util.h
- Removed stdlib includes from lfs.h, these should all go through
  lfs_util.h to let users override these definitions if stdlib is
  unavailable on their system.

- Changed the name of the LFS_CONFIG macro to LFS_UTIL to avoid
  confusion with the lfs_config struct. This also hints that LFS_UTIL
  is related to lfs_util.h.

  LFS_UTIL allows the user to override lfs_util.h so they can provide
  their own system-level dependencies such as malloc, tracing, builtins,
  stdint definitions, string.h, and others.

- Moved error code definitions to lfs_util.h. This lets users override
  the error codes to replace them with their own error codes and avoid
  a translation layer in some situations. Note the error codes must
  still be in the range of a negative int.
2019-11-30 14:40:05 -06:00
64 changed files with 5113 additions and 14741 deletions

View File

@@ -1,26 +0,0 @@
name: post-release
on:
release:
branches: [master]
types: [released]
jobs:
post-release:
runs-on: ubuntu-20.04
steps:
# trigger post-release in dependency repo, this indirection allows the
# dependency repo to be updated often without affecting this repo. At
# the time of this comment, the dependency repo is responsible for
# creating PRs for other dependent repos post-release.
- name: trigger-post-release
continue-on-error: true
run: |
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \
-d "$(jq -n '{
event_type: "post-release",
client_payload: {
repo: env.GITHUB_REPOSITORY,
version: "${{github.event.release.tag_name}}"}}' \
| tee /dev/stderr)"

View File

@@ -1,196 +0,0 @@
name: release
on:
workflow_run:
workflows: [test]
branches: [master]
types: [completed]
jobs:
release:
runs-on: ubuntu-20.04
# need to manually check for a couple things
# - tests passed?
# - we are the most recent commit on master?
if: ${{github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.head_sha == github.sha}}
steps:
- uses: actions/checkout@v2
with:
ref: ${{github.event.workflow_run.head_sha}}
# need workflow access since we push branches
# containing workflows
token: ${{secrets.BOT_TOKEN}}
# need all tags
fetch-depth: 0
# try to get results from tests
- uses: dawidd6/action-download-artifact@v2
continue-on-error: true
with:
workflow: ${{github.event.workflow_run.name}}
run_id: ${{github.event.workflow_run.id}}
name: results
path: results
- name: find-version
run: |
# rip version from lfs.h
LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \
| awk '{print $3}')"
LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))"
LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >> 0)))"
# find a new patch version based on what we find in our tags
LFS_VERSION_PATCH="$( \
( git describe --tags --abbrev=0 \
--match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \
|| echo 'v0.0.-1' ) \
| awk -F '.' '{print $3+1}')"
# found new version
LFS_VERSION="v$LFS_VERSION_MAJOR`
`.$LFS_VERSION_MINOR`
`.$LFS_VERSION_PATCH"
echo "LFS_VERSION=$LFS_VERSION"
echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV
echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV
echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV
echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV
# try to find previous version?
- name: find-prev-version
continue-on-error: true
run: |
LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')"
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION"
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV
# try to find results from tests
- name: collect-results
run: |
# previous results to compare against?
[ -n "$LFS_PREV_VERSION" ] && curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
`status/$LFS_PREV_VERSION?per_page=100" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
>> prev-results.json \
|| true
# build table for GitHub
echo "<table>" >> results.txt
echo "<thead>" >> results.txt
echo "<tr>" >> results.txt
echo "<th align=left>Configuration</th>" >> results.txt
for r in Code Stack Structs Coverage
do
echo "<th align=right>$r</th>" >> results.txt
done
echo "</tr>" >> results.txt
echo "</thead>" >> results.txt
echo "<tbody>" >> results.txt
for c in "" readonly threadsafe migrate error-asserts
do
echo "<tr>" >> results.txt
c_or_default=${c:-default}
echo "<td align=left>${c_or_default^}</td>" >> results.txt
for r in code stack structs
do
# per-config results
echo "<td align=right>" >> results.txt
[ -e results/thumb${c:+-$c}.csv ] && ( \
export PREV="$(jq -re '
select(.context == "'"results (thumb${c:+, $c}) / $r"'").description
| capture("(?<result>[0-9∞]+)").result' \
prev-results.json || echo 0)"
./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk '
NR==2 {printf "%s B",$2}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
| sed -e 's/ /\&nbsp;/g' \
>> results.txt)
echo "</td>" >> results.txt
done
# coverage results
if [ -z $c ]
then
echo "<td rowspan=0 align=right>" >> results.txt
[ -e results/coverage.csv ] && ( \
export PREV="$(jq -re '
select(.context == "results / coverage").description
| capture("(?<result>[0-9\\.]+)").result' \
prev-results.json || echo 0)"
./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
NR==2 {printf "%.1f%% of %d lines",$4,$3}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
| sed -e 's/ /\&nbsp;/g' \
>> results.txt)
echo "</td>" >> results.txt
fi
echo "</tr>" >> results.txt
done
echo "</tbody>" >> results.txt
echo "</table>" >> results.txt
cat results.txt
# find changes from history
- name: collect-changes
run: |
[ -n "$LFS_PREV_VERSION" ] || exit 0
# use explicit link to github commit so that release notes can
# be copied elsewhere
git log "$LFS_PREV_VERSION.." \
--grep='^Merge' --invert-grep \
--format="format:[\`%h\`](`
`https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \
> changes.txt
echo "CHANGES:"
cat changes.txt
# create and update major branches (vN and vN-prefix)
- name: create-major-branches
run: |
# create major branch
git branch "v$LFS_VERSION_MAJOR" HEAD
# create major prefix branch
git config user.name ${{secrets.BOT_USER}}
git config user.email ${{secrets.BOT_EMAIL}}
git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
"v$LFS_VERSION_MAJOR-prefix" || true
./scripts/prefix.py "lfs$LFS_VERSION_MAJOR"
git branch "v$LFS_VERSION_MAJOR-prefix" $( \
git commit-tree $(git write-tree) \
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
-p HEAD \
-m "Generated v$LFS_VERSION_MAJOR prefixes")
git reset --hard
# push!
git push --atomic origin \
"v$LFS_VERSION_MAJOR" \
"v$LFS_VERSION_MAJOR-prefix"
# build release notes
- name: create-release
run: |
# create release and patch version tag (vN.N.N)
# only draft if not a patch release
[ -e results.txt ] && export RESULTS="$(cat results.txt)"
[ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
-d "$(jq -n '{
tag_name: env.LFS_VERSION,
name: env.LFS_VERSION | rtrimstr(".0"),
target_commitish: "${{github.event.workflow_run.head_sha}}",
draft: env.LFS_VERSION | endswith(".0"),
body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
| tee /dev/stderr)"

View File

@@ -1,55 +0,0 @@
name: status
on:
workflow_run:
workflows: [test]
types: [completed]
jobs:
status:
runs-on: ubuntu-20.04
steps:
# custom statuses?
- uses: dawidd6/action-download-artifact@v2
continue-on-error: true
with:
workflow: ${{github.event.workflow_run.name}}
run_id: ${{github.event.workflow_run.id}}
name: status
path: status
- name: update-status
continue-on-error: true
run: |
ls status
for s in $(shopt -s nullglob ; echo status/*.json)
do
# parse requested status
export STATE="$(jq -er '.state' $s)"
export CONTEXT="$(jq -er '.context' $s)"
export DESCRIPTION="$(jq -er '.description' $s)"
# help lookup URL for job/steps because GitHub makes
# it VERY HARD to link to specific jobs
export TARGET_URL="$(
jq -er '.target_url // empty' $s || (
export TARGET_JOB="$(jq -er '.target_job' $s)"
export TARGET_STEP="$(jq -er '.target_step // ""' $s)"
curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/`
`${{github.event.workflow_run.id}}/jobs" \
| jq -er '.jobs[]
| select(.name == env.TARGET_JOB)
| .html_url
+ "?check_suite_focus=true"
+ ((.steps[]
| select(.name == env.TARGET_STEP)
| "#step:\(.number):0") // "")'))"
# update status
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/`
`${{github.event.workflow_run.head_sha}}" \
-d "$(jq -n '{
state: env.STATE,
context: env.CONTEXT,
description: env.DESCRIPTION,
target_url: env.TARGET_URL}' \
| tee /dev/stderr)"
done

View File

@@ -1,472 +0,0 @@
name: test
on: [push, pull_request]
env:
CFLAGS: -Werror
MAKEFLAGS: -j
jobs:
# run tests
test:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
arch: [x86_64, thumb, mips, powerpc]
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need a few additional tools
#
# note this includes gcc-10, which is required for -fcallgraph-info=su
sudo apt-get update -qq
sudo apt-get install -qq gcc-10 python3 python3-pip lcov
sudo pip3 install toml
echo "CC=gcc-10" >> $GITHUB_ENV
gcc-10 --version
lcov --version
python3 --version
# need newer lcov version for gcc-10
#sudo apt-get remove lcov
#wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb
#sudo apt install ./lcov_1.15-1_all.deb
#lcov --version
#which lcov
#ls -lha /usr/bin/lcov
wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz
tar xf lcov-1.15.tar.gz
sudo make -C lcov-1.15 install
# setup a ram-backed disk to speed up reentrant tests
mkdir disks
sudo mount -t tmpfs -o size=100m tmpfs disks
TESTFLAGS="$TESTFLAGS --disk=disks/disk"
# collect coverage
mkdir -p coverage
TESTFLAGS="$TESTFLAGS --coverage=`
`coverage/${{github.job}}-${{matrix.arch}}.info"
echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
# cross-compile with ARM Thumb (32-bit, little-endian)
- name: install-thumb
if: ${{matrix.arch == 'thumb'}}
run: |
sudo apt-get install -qq \
gcc-10-arm-linux-gnueabi \
libc6-dev-armel-cross \
qemu-user
echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV
echo "EXEC=qemu-arm" >> $GITHUB_ENV
arm-linux-gnueabi-gcc-10 --version
qemu-arm -version
# cross-compile with MIPS (32-bit, big-endian)
- name: install-mips
if: ${{matrix.arch == 'mips'}}
run: |
sudo apt-get install -qq \
gcc-10-mips-linux-gnu \
libc6-dev-mips-cross \
qemu-user
echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
echo "EXEC=qemu-mips" >> $GITHUB_ENV
mips-linux-gnu-gcc-10 --version
qemu-mips -version
# cross-compile with PowerPC (32-bit, big-endian)
- name: install-powerpc
if: ${{matrix.arch == 'powerpc'}}
run: |
sudo apt-get install -qq \
gcc-10-powerpc-linux-gnu \
libc6-dev-powerpc-cross \
qemu-user
echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
echo "EXEC=qemu-ppc" >> $GITHUB_ENV
powerpc-linux-gnu-gcc-10 --version
qemu-ppc -version
# make sure example can at least compile
- name: test-example
run: |
sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
make all CFLAGS+=" \
-Duser_provided_block_device_read=NULL \
-Duser_provided_block_device_prog=NULL \
-Duser_provided_block_device_erase=NULL \
-Duser_provided_block_device_sync=NULL \
-include stdio.h"
rm test.c
# test configurations
# normal+reentrant tests
- name: test-default
run: |
make clean
make test TESTFLAGS+="-nrk"
# NOR flash: read/prog = 1 block = 4KiB
- name: test-nor
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
# SD/eMMC: read/prog = 512 block = 512
- name: test-emmc
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
# NAND flash: read/prog = 4KiB block = 32KiB
- name: test-nand
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
# other extreme geometries that are useful for various corner cases
- name: test-no-intrinsics
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS_NO_INTRINSICS"
- name: test-byte-writes
# it just takes too long to test byte-level writes when in qemu,
# should be plenty covered by the other configurations
if: ${{matrix.arch == 'x86_64'}}
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
- name: test-block-cycles
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS_BLOCK_CYCLES=1"
- name: test-odd-block-count
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
- name: test-odd-block-size
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
# upload coverage for later coverage
- name: upload-coverage
uses: actions/upload-artifact@v2
with:
name: coverage
path: coverage
retention-days: 1
# update results
- name: results
run: |
mkdir -p results
make clean
make lfs.csv \
CFLAGS+=" \
-DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \
-DLFS_NO_WARN \
-DLFS_NO_ERROR"
cp lfs.csv results/${{matrix.arch}}.csv
./scripts/summary.py results/${{matrix.arch}}.csv
- name: results-readonly
run: |
mkdir -p results
make clean
make lfs.csv \
CFLAGS+=" \
-DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \
-DLFS_NO_WARN \
-DLFS_NO_ERROR \
-DLFS_READONLY"
cp lfs.csv results/${{matrix.arch}}-readonly.csv
./scripts/summary.py results/${{matrix.arch}}-readonly.csv
- name: results-threadsafe
run: |
mkdir -p results
make clean
make lfs.csv \
CFLAGS+=" \
-DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \
-DLFS_NO_WARN \
-DLFS_NO_ERROR \
-DLFS_THREADSAFE"
cp lfs.csv results/${{matrix.arch}}-threadsafe.csv
./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv
- name: results-migrate
run: |
mkdir -p results
make clean
make lfs.csv \
CFLAGS+=" \
-DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \
-DLFS_NO_WARN \
-DLFS_NO_ERROR \
-DLFS_MIGRATE"
cp lfs.csv results/${{matrix.arch}}-migrate.csv
./scripts/summary.py results/${{matrix.arch}}-migrate.csv
- name: results-error-asserts
run: |
mkdir -p results
make clean
make lfs.csv \
CFLAGS+=" \
-DLFS_NO_DEBUG \
-DLFS_NO_WARN \
-DLFS_NO_ERROR \
-D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
cp lfs.csv results/${{matrix.arch}}-error-asserts.csv
./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv
- name: upload-results
uses: actions/upload-artifact@v2
with:
name: results
path: results
# create statuses with results
- name: collect-status
run: |
mkdir -p status
for f in $(shopt -s nullglob ; echo results/*.csv)
do
export STEP="results$(
echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')"
for r in code stack structs
do
export CONTEXT="results (${{matrix.arch}}$(
echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r"
export PREV="$(curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
| select(.context == env.CONTEXT).description
| capture("(?<result>[0-9∞]+)").result' \
|| echo 0)"
export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk '
NR==2 {printf "%s B",$2}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
jq -n '{
state: "success",
context: env.CONTEXT,
description: env.DESCRIPTION,
target_job: "${{github.job}} (${{matrix.arch}})",
target_step: env.STEP}' \
| tee status/$r-${{matrix.arch}}$(
echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json
done
done
- name: upload-status
uses: actions/upload-artifact@v2
with:
name: status
path: status
retention-days: 1
# run under Valgrind to check for memory errors
valgrind:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip
sudo pip3 install toml
- name: install-valgrind
run: |
sudo apt-get update -qq
sudo apt-get install -qq valgrind
valgrind --version
# normal tests, we don't need to test all geometries
- name: test-valgrind
run: make test TESTFLAGS+="-k --valgrind"
# self-host with littlefs-fuse for a fuzz-like test
fuse:
runs-on: ubuntu-20.04
if: ${{!endsWith(github.ref, '-prefix')}}
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip libfuse-dev
sudo pip3 install toml
fusermount -V
gcc --version
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v2
path: littlefs-fuse
- name: setup
run: |
# copy our new version into littlefs-fuse
rm -rf littlefs-fuse/littlefs/*
cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
# setup disk for littlefs-fuse
mkdir mount
LOOP=$(sudo losetup -f)
sudo chmod a+rw $LOOP
dd if=/dev/zero bs=512 count=128K of=disk
losetup $LOOP disk
echo "LOOP=$LOOP" >> $GITHUB_ENV
- name: test
run: |
# self-host test
make -C littlefs-fuse
littlefs-fuse/lfs --format $LOOP
littlefs-fuse/lfs $LOOP mount
ls mount
mkdir mount/littlefs
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
cd mount/littlefs
stat .
ls -flh
make -B test
# test migration using littlefs-fuse
migrate:
runs-on: ubuntu-20.04
if: ${{!endsWith(github.ref, '-prefix')}}
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip libfuse-dev
sudo pip3 install toml
fusermount -V
gcc --version
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v2
path: v2
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v1
path: v1
- name: setup
run: |
# copy our new version into littlefs-fuse
rm -rf v2/littlefs/*
cp -r $(git ls-tree --name-only HEAD) v2/littlefs
# setup disk for littlefs-fuse
mkdir mount
LOOP=$(sudo losetup -f)
sudo chmod a+rw $LOOP
dd if=/dev/zero bs=512 count=128K of=disk
losetup $LOOP disk
echo "LOOP=$LOOP" >> $GITHUB_ENV
- name: test
run: |
# compile v1 and v2
make -C v1
make -C v2
# run self-host test with v1
v1/lfs --format $LOOP
v1/lfs $LOOP mount
ls mount
mkdir mount/littlefs
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
cd mount/littlefs
stat .
ls -flh
make -B test
# attempt to migrate
cd ../..
fusermount -u mount
v2/lfs --migrate $LOOP
v2/lfs $LOOP mount
# run self-host test with v2 right where we left off
ls mount
cd mount/littlefs
stat .
ls -flh
make -B test
# collect coverage info
coverage:
runs-on: ubuntu-20.04
needs: [test]
steps:
- uses: actions/checkout@v2
- name: install
run: |
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip lcov
sudo pip3 install toml
# yes we continue-on-error nearly every step, continue-on-error
# at job level apparently still marks a job as failed, which isn't
# what we want
- uses: actions/download-artifact@v2
continue-on-error: true
with:
name: coverage
path: coverage
- name: results-coverage
continue-on-error: true
run: |
mkdir -p results
lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
-o results/coverage.info
./scripts/coverage.py results/coverage.info -o results/coverage.csv
- name: upload-results
uses: actions/upload-artifact@v2
with:
name: results
path: results
- name: collect-status
run: |
mkdir -p status
[ -e results/coverage.csv ] || exit 0
export STEP="results-coverage"
export CONTEXT="results / coverage"
export PREV="$(curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
| select(.context == env.CONTEXT).description
| capture("(?<result>[0-9\\.]+)").result' \
|| echo 0)"
export DESCRIPTION="$(
./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
NR==2 {printf "%.1f%% of %d lines",$4,$3}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
jq -n '{
state: "success",
context: env.CONTEXT,
description: env.DESCRIPTION,
target_job: "${{github.job}}",
target_step: env.STEP}' \
| tee status/coverage.json
- name: upload-status
uses: actions/upload-artifact@v2
with:
name: status
path: status
retention-days: 1

5
.gitignore vendored
View File

@@ -2,13 +2,8 @@
*.o
*.d
*.a
*.ci
*.csv
# Testing things
blocks/
lfs
test.c
tests/*.toml.*
scripts/__pycache__
.gdb_history

321
.travis.yml Normal file
View File

@@ -0,0 +1,321 @@
# Environment variables
env:
global:
- CFLAGS=-Werror
# Common test script
script:
# make sure example can at least compile
- sed -n '/``` c/,/```/{/```/d; p;}' README.md > test.c &&
make all CFLAGS+="
-Duser_provided_block_device_read=NULL
-Duser_provided_block_device_prog=NULL
-Duser_provided_block_device_erase=NULL
-Duser_provided_block_device_sync=NULL
-include stdio.h"
# run tests
- make test QUIET=1
# run tests with a few different configurations
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=4"
- make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=512 -DLFS_CACHE_SIZE=512 -DLFS_BLOCK_CYCLES=16"
- make test QUIET=1 CFLAGS+="-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
- make clean test QUIET=1 CFLAGS+="-DLFS_INLINE_MAX=0"
- make clean test QUIET=1 CFLAGS+="-DLFS_EMUBD_ERASE_VALUE=0xff"
- make clean test QUIET=1 CFLAGS+="-DLFS_NO_INTRINSICS"
# additional configurations that don't support all tests (this should be
# fixed but at the moment it is what it is)
- make test_files QUIET=1
CFLAGS+="-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
- make test_files QUIET=1
CFLAGS+="-DLFS_READ_SIZE=\(2*1024\) -DLFS_BLOCK_SIZE=\(64*1024\)"
- make test_files QUIET=1
CFLAGS+="-DLFS_READ_SIZE=\(8*1024\) -DLFS_BLOCK_SIZE=\(64*1024\)"
- make test_files QUIET=1
CFLAGS+="-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
# compile and find the code size with the smallest configuration
- make clean size
OBJ="$(ls lfs*.o | tr '\n' ' ')"
CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR"
| tee sizes
# update status if we succeeded, compare with master if possible
- |
if [ "$TRAVIS_TEST_RESULT" -eq 0 ]
then
CURR=$(tail -n1 sizes | awk '{print $1}')
PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \
| jq -re "select(.sha != \"$TRAVIS_COMMIT\")
| .statuses[] | select(.context == \"$STAGE/$NAME\").description
| capture(\"code size is (?<size>[0-9]+)\").size" \
|| echo 0)
STATUS="Passed, code size is ${CURR}B"
if [ "$PREV" -ne 0 ]
then
STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)"
fi
fi
# CI matrix
jobs:
include:
# native testing
- stage: test
env:
- STAGE=test
- NAME=littlefs-x86
# cross-compile with ARM (thumb mode)
- stage: test
env:
- STAGE=test
- NAME=littlefs-arm
- CC="arm-linux-gnueabi-gcc --static -mthumb"
- EXEC="qemu-arm"
install:
- sudo apt-get install
gcc-arm-linux-gnueabi
libc6-dev-armel-cross
qemu-user
- arm-linux-gnueabi-gcc --version
- qemu-arm -version
# cross-compile with PowerPC
- stage: test
env:
- STAGE=test
- NAME=littlefs-powerpc
- CC="powerpc-linux-gnu-gcc --static"
- EXEC="qemu-ppc"
install:
- sudo apt-get install
gcc-powerpc-linux-gnu
libc6-dev-powerpc-cross
qemu-user
- powerpc-linux-gnu-gcc --version
- qemu-ppc -version
# cross-compile with MIPS
- stage: test
env:
- STAGE=test
- NAME=littlefs-mips
- CC="mips-linux-gnu-gcc --static"
- EXEC="qemu-mips"
install:
- sudo apt-get install
gcc-mips-linux-gnu
libc6-dev-mips-cross
qemu-user
- mips-linux-gnu-gcc --version
- qemu-mips -version
# self-host with littlefs-fuse for fuzz test
- stage: test
env:
- STAGE=test
- NAME=littlefs-fuse
if: branch !~ -prefix$
install:
- sudo apt-get install libfuse-dev
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2
- fusermount -V
- gcc --version
before_script:
# setup disk for littlefs-fuse
- rm -rf littlefs-fuse/littlefs/*
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
- mkdir mount
- sudo chmod a+rw /dev/loop0
- dd if=/dev/zero bs=512 count=4096 of=disk
- losetup /dev/loop0 disk
script:
# self-host test
- make -C littlefs-fuse
- littlefs-fuse/lfs --format /dev/loop0
- littlefs-fuse/lfs /dev/loop0 mount
- ls mount
- mkdir mount/littlefs
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
- cd mount/littlefs
- stat .
- ls -flh
- make -B test_dirs test_files QUIET=1
# self-host with littlefs-fuse for fuzz test
- stage: test
env:
- STAGE=test
- NAME=littlefs-migration
if: branch !~ -prefix$
install:
- sudo apt-get install libfuse-dev
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1
- fusermount -V
- gcc --version
before_script:
# setup disk for littlefs-fuse
- rm -rf v2/littlefs/*
- cp -r $(git ls-tree --name-only HEAD) v2/littlefs
- mkdir mount
- sudo chmod a+rw /dev/loop0
- dd if=/dev/zero bs=512 count=4096 of=disk
- losetup /dev/loop0 disk
script:
# compile v1 and v2
- make -C v1
- make -C v2
# run self-host test with v1
- v1/lfs --format /dev/loop0
- v1/lfs /dev/loop0 mount
- ls mount
- mkdir mount/littlefs
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
- cd mount/littlefs
- stat .
- ls -flh
- make -B test_dirs test_files QUIET=1
# attempt to migrate
- cd ../..
- fusermount -u mount
- v2/lfs --migrate /dev/loop0
- v2/lfs /dev/loop0 mount
# run self-host test with v2 right where we left off
- ls mount
- cd mount/littlefs
- stat .
- ls -flh
- make -B test_dirs test_files QUIET=1
# Automatically create releases
- stage: deploy
env:
- STAGE=deploy
- NAME=deploy
script:
- |
bash << 'SCRIPT'
set -ev
# Find version defined in lfs.h
LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
# Grab latests patch from repo tags, default to 0, needs finagling
# to get past github's pagination api
PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
| sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
|| echo $PREV_URL)
LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
| jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
.captures[].string | tonumber) | max + 1' \
|| echo 0)
# We have our new version
LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
echo "VERSION $LFS_VERSION"
# Check that we're the most recent commit
CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
| jq -re '.sha')
[ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
# Create major branch
git branch v$LFS_VERSION_MAJOR HEAD
# Create major prefix branch
git config user.name "geky bot"
git config user.email "bot@geky.net"
git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
--depth=50 v$LFS_VERSION_MAJOR-prefix || true
./scripts/prefix.py lfs$LFS_VERSION_MAJOR
git branch v$LFS_VERSION_MAJOR-prefix $( \
git commit-tree $(git write-tree) \
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
-p HEAD \
-m "Generated v$LFS_VERSION_MAJOR prefixes")
git reset --hard
# Update major version branches (vN and vN-prefix)
git push https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
v$LFS_VERSION_MAJOR \
v$LFS_VERSION_MAJOR-prefix
# Create patch version tag (vN.N.N)
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs \
-d "{
\"ref\": \"refs/tags/$LFS_VERSION\",
\"sha\": \"$TRAVIS_COMMIT\"
}"
# Create minor release?
[[ "$LFS_VERSION" == *.0 ]] || exit 0
# Build release notes
PREV=$(git tag --sort=-v:refname -l "v*.0" | head -1)
if [ ! -z "$PREV" ]
then
echo "PREV $PREV"
CHANGES=$'### Changes\n\n'$( \
git log --oneline $PREV.. --grep='^Merge' --invert-grep)
printf "CHANGES\n%s\n\n" "$CHANGES"
fi
# Create the release
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
-d "{
\"tag_name\": \"$LFS_VERSION\",
\"name\": \"${LFS_VERSION%.0}\",
\"draft\": true,
\"body\": $(jq -sR '.' <<< "$CHANGES")
}" #"
SCRIPT
# Manage statuses
before_install:
- |
curl -u "$GEKY_BOT_STATUSES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{
\"context\": \"$STAGE/$NAME\",
\"state\": \"pending\",
\"description\": \"${STATUS:-In progress}\",
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
}"
after_failure:
- |
curl -u "$GEKY_BOT_STATUSES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{
\"context\": \"$STAGE/$NAME\",
\"state\": \"failure\",
\"description\": \"${STATUS:-Failed}\",
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
}"
after_success:
- |
curl -u "$GEKY_BOT_STATUSES" -X POST \
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-d "{
\"context\": \"$STAGE/$NAME\",
\"state\": \"success\",
\"description\": \"${STATUS:-Passed}\",
\"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\"
}"
# Job control
stages:
- name: test
- name: deploy
if: branch = master AND type = push

View File

@@ -254,7 +254,7 @@ have weaknesses that limit their usefulness. But if we merge the two they can
mutually solve each other's limitations.
This is the idea behind littlefs. At the sub-block level, littlefs is built
out of small, two block logs that provide atomic updates to metadata anywhere
out of small, two blocks logs that provide atomic updates to metadata anywhere
on the filesystem. At the super-block level, littlefs is a CObW tree of blocks
that can be evicted on demand.
@@ -676,7 +676,7 @@ block, this cost is fairly reasonable.
---
This is a new data structure, so we still have several questions. What is the
storage overhead? Can the number of pointers exceed the size of a block? How do
storage overage? Can the number of pointers exceed the size of a block? How do
we store a CTZ skip-list in our metadata pairs?
To find the storage overhead, we can look at the data structure as multiple
@@ -742,8 +742,8 @@ where:
2. popcount(![x]) = the number of bits that are 1 in ![x]
Initial tests of this surprising property seem to hold. As ![n] approaches
infinity, we end up with an average overhead of 2 pointers, which matches our
assumption from earlier. During iteration, the popcount function seems to
infinity, we end up with an average overhead of 2 pointers, which matches what
our assumption from earlier. During iteration, the popcount function seems to
handle deviations from this average. Of course, just to make sure I wrote a
quick script that verified this property for all 32-bit integers.
@@ -767,7 +767,7 @@ overflow, but we can avoid this by rearranging the equation a bit:
![off = N - (B-2w/8)n - (w/8)popcount(n)][ctz-formula7]
Our solution requires quite a bit of math, but computers are very good at math.
Our solution requires quite a bit of math, but computer are very good at math.
Now we can find both our block index and offset from a size in _O(1)_, letting
us store CTZ skip-lists with only a pointer and size.
@@ -850,7 +850,7 @@ nearly every write to the filesystem.
Normally, block allocation involves some sort of free list or bitmap stored on
the filesystem that is updated with free blocks. However, with power
resilience, keeping these structures consistent becomes difficult. It doesn't
resilience, keeping these structure consistent becomes difficult. It doesn't
help that any mistake in updating these structures can result in lost blocks
that are impossible to recover.
@@ -894,9 +894,9 @@ high-risk error conditions.
---
Our block allocator needs to find free blocks efficiently. You could traverse
through every block on storage and check each one against our filesystem tree;
however, the runtime would be abhorrent. We need to somehow collect multiple
blocks per traversal.
through every block on storage and check each one against our filesystem tree,
however the runtime would be abhorrent. We need to somehow collect multiple
blocks each traversal.
Looking at existing designs, some larger filesystems that use a similar "drop
it on the floor" strategy store a bitmap of the entire storage in [RAM]. This
@@ -920,8 +920,8 @@ a brute force traversal. Instead of a bitmap the size of storage, we keep track
of a small, fixed-size bitmap called the lookahead buffer. During block
allocation, we take blocks from the lookahead buffer. If the lookahead buffer
is empty, we scan the filesystem for more free blocks, populating our lookahead
buffer. In each scan we use an increasing offset, circling the storage as
blocks are allocated.
buffer. Each scan we use an increasing offset, circling the storage as blocks
are allocated.
Here's what it might look like to allocate 4 blocks on a decently busy
filesystem with a 32 bit lookahead and a total of 128 blocks (512 KiB
@@ -950,7 +950,7 @@ alloc = 112 lookahead: ffff8000
```
This lookahead approach has a runtime complexity of _O(n&sup2;)_ to completely
scan storage; however, bitmaps are surprisingly compact, and in practice only
scan storage, however, bitmaps are surprisingly compact, and in practice only
one or two passes are usually needed to find free blocks. Additionally, the
performance of the allocator can be optimized by adjusting the block size or
size of the lookahead buffer, trading either write granularity or RAM for
@@ -1173,9 +1173,9 @@ We may find that the new block is also bad, but hopefully after repeating this
cycle we'll eventually find a new block where a write succeeds. If we don't,
that means that all blocks in our storage are bad, and we've reached the end of
our device's usable life. At this point, littlefs will return an "out of space"
error. This is technically true, as there are no more good blocks, but as an
added benefit it also matches the error condition expected by users of
dynamically sized data.
error, which is technically true, there are no more good blocks, but as an
added benefit also matches the error condition expected by users of dynamically
sized data.
---
@@ -1187,7 +1187,7 @@ original data even after it has been corrupted. One such mechanism for this is
ECC is an extension to the idea of a checksum. Where a checksum such as CRC can
detect that an error has occurred in the data, ECC can detect and actually
correct some amount of errors. However, there is a limit to how many errors ECC
can detect: the [Hamming bound][wikipedia-hamming-bound]. As the number of
can detect, call the [Hamming bound][wikipedia-hamming-bound]. As the number of
errors approaches the Hamming bound, we may still be able to detect errors, but
can no longer fix the data. If we've reached this point the block is
unrecoverable.
@@ -1202,7 +1202,7 @@ chip itself.
In littlefs, ECC is entirely optional. Read errors can instead be prevented
proactively by wear leveling. But it's important to note that ECC can be used
at the block device level to modestly extend the life of a device. littlefs
respects any errors reported by the block device, allowing a block device to
respects any errors reported by the block device, allow a block device to
provide additional aggressive error detection.
---
@@ -1231,7 +1231,7 @@ Generally, wear leveling algorithms fall into one of two categories:
we need to consider all blocks, including blocks that already contain data.
As a tradeoff for code size and complexity, littlefs (currently) only provides
dynamic wear leveling. This is a best effort solution. Wear is not distributed
dynamic wear leveling. This is a best efforts solution. Wear is not distributed
perfectly, but it is distributed among the free blocks and greatly extends the
life of a device.
@@ -1378,7 +1378,7 @@ We can make several improvements. First, instead of giving each file its own
metadata pair, we can store multiple files in a single metadata pair. One way
to do this is to directly associate a directory with a metadata pair (or a
linked list of metadata pairs). This makes it easy for multiple files to share
the directory's metadata pair for logging and reduces the collective storage
the directory's metadata pair for logging and reduce the collective storage
overhead.
The strict binding of metadata pairs and directories also gives users
@@ -1816,12 +1816,12 @@ while manipulating the directory tree (foreshadowing!).
## The move problem
We have one last challenge: the move problem. Phrasing the problem is simple:
We have one last challenge. The move problem. Phrasing the problem is simple:
How do you atomically move a file between two directories?
In littlefs we can atomically commit to directories, but we can't create
an atomic commit that spans multiple directories. The filesystem must go
an atomic commit that span multiple directories. The filesystem must go
through a minimum of two distinct states to complete a move.
To make matters worse, file moves are a common form of synchronization for
@@ -1831,13 +1831,13 @@ atomic moves right.
So what can we do?
- We definitely can't just let power-loss result in duplicated or lost files.
This could easily break users' code and would only reveal itself in extreme
This could easily break user's code and would only reveal itself in extreme
cases. We were only able to be lazy about the threaded linked-list because
it isn't user facing and we can handle the corner cases internally.
- Some filesystems propagate COW operations up the tree until a common parent
is found. Unfortunately this interacts poorly with our threaded tree and
brings back the issue of upward propagation of wear.
- Some filesystems propagate COW operations up the tree until finding a common
parent. Unfortunately this interacts poorly with our threaded tree and brings
back the issue of upward propagation of wear.
- In a previous version of littlefs we tried to solve this problem by going
back and forth between the source and destination, marking and unmarking the
@@ -1852,7 +1852,7 @@ introduction of a mechanism called "global state".
---
Global state is a small set of state that can be updated from _any_ metadata
pair. Combining global state with metadata pairs' ability to update multiple
pair. Combining global state with metadata pair's ability to update multiple
entries in one commit gives us a powerful tool for crafting complex atomic
operations.
@@ -1910,7 +1910,7 @@ the filesystem is mounted.
You may have noticed that global state is very expensive. We keep a copy in
RAM and a delta in an unbounded number of metadata pairs. Even if we reset
the global state to its initial value, we can't easily clean up the deltas on
the global state to its initial value we can't easily clean up the deltas on
disk. For this reason, it's very important that we keep the size of global
state bounded and extremely small. But, even with a strict budget, global
state is incredibly valuable.

185
Makefile
View File

@@ -1,173 +1,86 @@
ifdef BUILDDIR
# make sure BUILDDIR ends with a slash
override BUILDDIR := $(BUILDDIR)/
# bit of a hack, but we want to make sure BUILDDIR directory structure
# is correct before any commands
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
$(BUILDDIR) \
$(BUILDDIR)bd \
$(BUILDDIR)tests))
endif
# overridable target/src/tools/flags/etc
TARGET = lfs.a
ifneq ($(wildcard test.c main.c),)
TARGET ?= $(BUILDDIR)lfs
else
TARGET ?= $(BUILDDIR)lfs.a
override TARGET = lfs
endif
CC ?= gcc
AR ?= ar
SIZE ?= size
CC ?= gcc
AR ?= ar
SIZE ?= size
CTAGS ?= ctags
NM ?= nm
OBJDUMP ?= objdump
LCOV ?= lcov
SRC += $(wildcard *.c emubd/*.c)
OBJ := $(SRC:.c=.o)
DEP := $(SRC:.c=.d)
ASM := $(SRC:.c=.s)
SRC ?= $(wildcard *.c)
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
TEST := $(patsubst tests/%.sh,%,$(wildcard tests/test_*))
SHELL = /bin/bash -o pipefail
ifdef DEBUG
override CFLAGS += -O0
override CFLAGS += -O0 -g3
else
override CFLAGS += -Os
endif
ifdef WORD
override CFLAGS += -m$(WORD)
endif
ifdef TRACE
override CFLAGS += -DLFS_YES_TRACE
endif
override CFLAGS += -g3
override CFLAGS += -I.
override CFLAGS += -std=c99 -Wall -pedantic
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
ifdef VERBOSE
override TESTFLAGS += -v
override CALLSFLAGS += -v
override CODEFLAGS += -v
override DATAFLAGS += -v
override STACKFLAGS += -v
override STRUCTSFLAGS += -v
override COVERAGEFLAGS += -v
endif
ifdef EXEC
override TESTFLAGS += --exec="$(EXEC)"
endif
ifdef COVERAGE
override TESTFLAGS += --coverage
endif
ifdef BUILDDIR
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
override CALLSFLAGS += --build-dir="$(BUILDDIR:/=)"
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
override STRUCTSFLAGS += --build-dir="$(BUILDDIR:/=)"
override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
endif
ifneq ($(NM),nm)
override CODEFLAGS += --nm-tool="$(NM)"
override DATAFLAGS += --nm-tool="$(NM)"
endif
ifneq ($(OBJDUMP),objdump)
override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
endif
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init
# Remove missing-field-initializers because of GCC bug
override CFLAGS += -Wno-missing-field-initializers
# commands
.PHONY: all build
all build: $(TARGET)
all: $(TARGET)
.PHONY: asm
asm: $(ASM)
.PHONY: size
size: $(OBJ)
$(SIZE) -t $^
.PHONY: tags
tags:
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
.PHONY: calls
calls: $(CGI)
./scripts/calls.py $^ $(CALLSFLAGS)
.PHONY: test
test:
./scripts/test.py $(TESTFLAGS)
.SECONDEXPANSION:
test%: tests/test$$(firstword $$(subst \#, ,%)).toml
./scripts/test.py $@ $(TESTFLAGS)
.PHONY: code
code: $(OBJ)
./scripts/code.py $^ -S $(CODEFLAGS)
.PHONY: data
data: $(OBJ)
./scripts/data.py $^ -S $(DATAFLAGS)
.PHONY: stack
stack: $(CGI)
./scripts/stack.py $^ -S $(STACKFLAGS)
.PHONY: structs
structs: $(OBJ)
./scripts/structs.py $^ -S $(STRUCTSFLAGS)
.PHONY: coverage
coverage:
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS)
.PHONY: summary
summary: $(BUILDDIR)lfs.csv
./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
# rules
-include $(DEP)
.SUFFIXES:
test: \
test_format \
test_dirs \
test_files \
test_seek \
test_truncate \
test_entries \
test_interspersed \
test_alloc \
test_paths \
test_attrs \
test_move \
test_orphan \
test_corrupt
@rm test.c
test_%: tests/test_%.sh
$(BUILDDIR)lfs: $(OBJ)
ifdef QUIET
@./$< | sed -nu '/^[-=]/p'
else
./$<
endif
-include $(DEP)
lfs: $(OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
$(BUILDDIR)lfs.a: $(OBJ)
%.a: $(OBJ)
$(AR) rcs $@ $^
$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
$(if $(COVERAGE),\
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
-q -m $@ $(COVERAGEFLAGS) -o $@)
$(BUILDDIR)%.o: %.c
%.o: %.c
$(CC) -c -MMD $(CFLAGS) $< -o $@
$(BUILDDIR)%.s: %.c
%.s: %.c
$(CC) -S $(CFLAGS) $< -o $@
# gcc depends on the output file for intermediate file names, so
# we can't omit to .o output. We also need to serialize with the
# normal .o rule because otherwise we can end up with multiprocess
# problems with two instances of gcc modifying the same .o
$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
# clean everything
.PHONY: clean
clean:
rm -f $(BUILDDIR)lfs
rm -f $(BUILDDIR)lfs.a
rm -f $(BUILDDIR)lfs.csv
rm -f $(TARGET)
rm -f $(OBJ)
rm -f $(CGI)
rm -f $(DEP)
rm -f $(ASM)
rm -f $(BUILDDIR)tests/*.toml.*

View File

@@ -53,7 +53,6 @@ const struct lfs_config cfg = {
.block_count = 128,
.cache_size = 16,
.lookahead_size = 16,
.block_cycles = 500,
};
// entry point
@@ -110,14 +109,11 @@ directory functions, with the deviation that the allocation of filesystem
structures must be provided by the user.
All POSIX operations, such as remove and rename, are atomic, even in event
of power-loss. Additionally, file updates are not actually committed to
of power-loss. Additionally, no file updates are not actually committed to
the filesystem until sync or close is called on the file.
## Other notes
Littlefs is written in C, and specifically should compile with any compiler
that conforms to the `C99` standard.
All littlefs calls have the potential to return a negative error code. The
errors can be either one of those found in the `enum lfs_error` in
[lfs.h](lfs.h), or an error returned by the user's block device operations.
@@ -192,7 +188,7 @@ More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and
## Testing
The littlefs comes with a test suite designed to run on a PC using the
[emulated block device](bd/lfs_testbd.h) found in the `bd` directory.
[emulated block device](emubd/lfs_emubd.h) found in the emubd directory.
The tests assume a Linux environment and can be started with make:
``` bash
@@ -221,11 +217,6 @@ License Identifiers that are here available: http://spdx.org/licenses/
- [littlefs-js] - A javascript wrapper for littlefs. I'm not sure why you would
want this, but it is handy for demos. You can see it in action
[here][littlefs-js-demo].
- [littlefs-python] - A Python wrapper for littlefs. The project allows you
to create images of the filesystem on your PC. Check if littlefs will fit
your needs, create images for a later download to the target memory or
inspect the content of a binary image of the target memory.
- [mklfs] - A command line tool built by the [Lua RTOS] guys for making
littlefs images from a host PC. Supports Windows, Mac OS, and Linux.
@@ -255,4 +246,3 @@ License Identifiers that are here available: http://spdx.org/licenses/
[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/v5.12/apis/littlefilesystem.html
[SPIFFS]: https://github.com/pellepl/spiffs
[Dhara]: https://github.com/dlbeer/dhara
[littlefs-python]: https://pypi.org/project/littlefs-python/

18
SPEC.md
View File

@@ -289,8 +289,8 @@ Layout of the name tag:
```
tag data
[-- 32 --][--- variable length ---]
[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
^ ^ ^ ^ ^- size ^- file name
[1| 3| 8 | 10 | 10 ][--- (size) ---]
^ ^ ^ ^ ^- size ^- file name
| | | '------ id
| | '----------- file type
| '-------------- type1 (0x0)
@@ -470,8 +470,8 @@ Layout of the inline-struct tag:
```
tag data
[-- 32 --][--- variable length ---]
[1|- 11 -| 10 | 10 ][--- (size * 8) ---]
^ ^ ^ ^- size ^- inline data
[1|- 11 -| 10 | 10 ][--- (size) ---]
^ ^ ^ ^- size ^- inline data
| | '------ id
| '------------ type (0x201)
'----------------- valid bit
@@ -556,8 +556,8 @@ Layout of the user-attr tag:
```
tag data
[-- 32 --][--- variable length ---]
[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
^ ^ ^ ^ ^- size ^- attr data
[1| 3| 8 | 10 | 10 ][--- (size) ---]
^ ^ ^ ^ ^- size ^- attr data
| | | '------ id
| | '----------- attr type
| '-------------- type1 (0x3)
@@ -764,9 +764,9 @@ Layout of the CRC tag:
```
tag data
[-- 32 --][-- 32 --|--- variable length ---]
[1| 3| 8 | 10 | 10 ][-- 32 --|--- (size * 8 - 32) ---]
^ ^ ^ ^ ^ ^- crc ^- padding
| | | | '- size
[1| 3| 8 | 10 | 10 ][-- 32 --|--- (size) ---]
^ ^ ^ ^ ^ ^- crc ^- padding
| | | | '- size (12)
| | | '------ id (0x3ff)
| | '----------- valid state
| '-------------- type1 (0x5)

View File

@@ -1,205 +0,0 @@
/*
* Block device emulated in a file
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "bd/lfs_filebd.h"
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
const struct lfs_filebd_config *bdcfg) {
LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
"\"%s\", "
"%p {.erase_value=%"PRId32"})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
path, (void*)bdcfg, bdcfg->erase_value);
lfs_filebd_t *bd = cfg->context;
bd->cfg = bdcfg;
// open file
bd->fd = open(path, O_RDWR | O_CREAT, 0666);
if (bd->fd < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err);
return err;
}
LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", 0);
return 0;
}
int lfs_filebd_create(const struct lfs_config *cfg, const char *path) {
LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
"\"%s\")",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
path);
static const struct lfs_filebd_config defaults = {.erase_value=-1};
int err = lfs_filebd_createcfg(cfg, path, &defaults);
LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
return err;
}
int lfs_filebd_destroy(const struct lfs_config *cfg) {
LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg);
lfs_filebd_t *bd = cfg->context;
int err = close(bd->fd);
if (err < 0) {
err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", err);
return err;
}
LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", 0);
return 0;
}
int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size) {
LFS_FILEBD_TRACE("lfs_filebd_read(%p, "
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_filebd_t *bd = cfg->context;
// check if read is valid
LFS_ASSERT(off % cfg->read_size == 0);
LFS_ASSERT(size % cfg->read_size == 0);
LFS_ASSERT(block < cfg->block_count);
// zero for reproducability (in case file is truncated)
if (bd->cfg->erase_value != -1) {
memset(buffer, bd->cfg->erase_value, size);
}
// read
off_t res1 = lseek(bd->fd,
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
return err;
}
ssize_t res2 = read(bd->fd, buffer, size);
if (res2 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
return err;
}
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", 0);
return 0;
}
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size) {
LFS_FILEBD_TRACE("lfs_filebd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_filebd_t *bd = cfg->context;
// check if write is valid
LFS_ASSERT(off % cfg->prog_size == 0);
LFS_ASSERT(size % cfg->prog_size == 0);
LFS_ASSERT(block < cfg->block_count);
// check that data was erased? only needed for testing
if (bd->cfg->erase_value != -1) {
off_t res1 = lseek(bd->fd,
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
return err;
}
for (lfs_off_t i = 0; i < size; i++) {
uint8_t c;
ssize_t res2 = read(bd->fd, &c, 1);
if (res2 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
return err;
}
LFS_ASSERT(c == bd->cfg->erase_value);
}
}
// program data
off_t res1 = lseek(bd->fd,
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
return err;
}
ssize_t res2 = write(bd->fd, buffer, size);
if (res2 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
return err;
}
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", 0);
return 0;
}
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
lfs_filebd_t *bd = cfg->context;
// check if erase is valid
LFS_ASSERT(block < cfg->block_count);
// erase, only needed for testing
if (bd->cfg->erase_value != -1) {
off_t res1 = lseek(bd->fd, (off_t)block*cfg->block_size, SEEK_SET);
if (res1 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
return err;
}
for (lfs_off_t i = 0; i < cfg->block_size; i++) {
ssize_t res2 = write(bd->fd, &(uint8_t){bd->cfg->erase_value}, 1);
if (res2 < 0) {
int err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
return err;
}
}
}
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", 0);
return 0;
}
int lfs_filebd_sync(const struct lfs_config *cfg) {
LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
// file sync
lfs_filebd_t *bd = cfg->context;
int err = fsync(bd->fd);
if (err) {
err = -errno;
LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
return err;
}
LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
return 0;
}

View File

@@ -1,73 +0,0 @@
/*
* Block device emulated in a file
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef LFS_FILEBD_H
#define LFS_FILEBD_H
#include "lfs.h"
#include "lfs_util.h"
#ifdef __cplusplus
extern "C"
{
#endif
// Block device specific tracing
#ifdef LFS_FILEBD_YES_TRACE
#define LFS_FILEBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
#else
#define LFS_FILEBD_TRACE(...)
#endif
// filebd config (optional)
struct lfs_filebd_config {
// 8-bit erase value to use for simulating erases. -1 does not simulate
// erases, which can speed up testing by avoiding all the extra block-device
// operations to store the erase value.
int32_t erase_value;
};
// filebd state
typedef struct lfs_filebd {
int fd;
const struct lfs_filebd_config *cfg;
} lfs_filebd_t;
// Create a file block device using the geometry in lfs_config
int lfs_filebd_create(const struct lfs_config *cfg, const char *path);
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
const struct lfs_filebd_config *bdcfg);
// Clean up memory associated with block device
int lfs_filebd_destroy(const struct lfs_config *cfg);
// Read a block
int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size);
// Program a block
//
// The block must have previously been erased.
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size);
// Erase a block
//
// A block must be erased before being programmed. The
// state of an erased block is undefined.
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block);
// Sync the block device
int lfs_filebd_sync(const struct lfs_config *cfg);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif

View File

@@ -1,140 +0,0 @@
/*
* Block device emulated in RAM
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "bd/lfs_rambd.h"
int lfs_rambd_createcfg(const struct lfs_config *cfg,
const struct lfs_rambd_config *bdcfg) {
LFS_RAMBD_TRACE("lfs_rambd_createcfg(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
"%p {.erase_value=%"PRId32", .buffer=%p})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
(void*)bdcfg, bdcfg->erase_value, bdcfg->buffer);
lfs_rambd_t *bd = cfg->context;
bd->cfg = bdcfg;
// allocate buffer?
if (bd->cfg->buffer) {
bd->buffer = bd->cfg->buffer;
} else {
bd->buffer = lfs_malloc(cfg->block_size * cfg->block_count);
if (!bd->buffer) {
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
}
// zero for reproducability?
if (bd->cfg->erase_value != -1) {
memset(bd->buffer, bd->cfg->erase_value,
cfg->block_size * cfg->block_count);
}
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0);
return 0;
}
int lfs_rambd_create(const struct lfs_config *cfg) {
LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count);
static const struct lfs_rambd_config defaults = {.erase_value=-1};
int err = lfs_rambd_createcfg(cfg, &defaults);
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", err);
return err;
}
int lfs_rambd_destroy(const struct lfs_config *cfg) {
LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)cfg);
// clean up memory
lfs_rambd_t *bd = cfg->context;
if (!bd->cfg->buffer) {
lfs_free(bd->buffer);
}
LFS_RAMBD_TRACE("lfs_rambd_destroy -> %d", 0);
return 0;
}
int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size) {
LFS_RAMBD_TRACE("lfs_rambd_read(%p, "
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_rambd_t *bd = cfg->context;
// check if read is valid
LFS_ASSERT(off % cfg->read_size == 0);
LFS_ASSERT(size % cfg->read_size == 0);
LFS_ASSERT(block < cfg->block_count);
// read data
memcpy(buffer, &bd->buffer[block*cfg->block_size + off], size);
LFS_RAMBD_TRACE("lfs_rambd_read -> %d", 0);
return 0;
}
int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size) {
LFS_RAMBD_TRACE("lfs_rambd_prog(%p, "
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_rambd_t *bd = cfg->context;
// check if write is valid
LFS_ASSERT(off % cfg->prog_size == 0);
LFS_ASSERT(size % cfg->prog_size == 0);
LFS_ASSERT(block < cfg->block_count);
// check that data was erased? only needed for testing
if (bd->cfg->erase_value != -1) {
for (lfs_off_t i = 0; i < size; i++) {
LFS_ASSERT(bd->buffer[block*cfg->block_size + off + i] ==
bd->cfg->erase_value);
}
}
// program data
memcpy(&bd->buffer[block*cfg->block_size + off], buffer, size);
LFS_RAMBD_TRACE("lfs_rambd_prog -> %d", 0);
return 0;
}
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
lfs_rambd_t *bd = cfg->context;
// check if erase is valid
LFS_ASSERT(block < cfg->block_count);
// erase, only needed for testing
if (bd->cfg->erase_value != -1) {
memset(&bd->buffer[block*cfg->block_size],
bd->cfg->erase_value, cfg->block_size);
}
LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0);
return 0;
}
int lfs_rambd_sync(const struct lfs_config *cfg) {
LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg);
// sync does nothing because we aren't backed by anything real
(void)cfg;
LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0);
return 0;
}

View File

@@ -1,75 +0,0 @@
/*
* Block device emulated in RAM
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef LFS_RAMBD_H
#define LFS_RAMBD_H
#include "lfs.h"
#include "lfs_util.h"
#ifdef __cplusplus
extern "C"
{
#endif
// Block device specific tracing
#ifdef LFS_RAMBD_YES_TRACE
#define LFS_RAMBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
#else
#define LFS_RAMBD_TRACE(...)
#endif
// rambd config (optional)
struct lfs_rambd_config {
// 8-bit erase value to simulate erasing with. -1 indicates no erase
// occurs, which is still a valid block device
int32_t erase_value;
// Optional statically allocated buffer for the block device.
void *buffer;
};
// rambd state
typedef struct lfs_rambd {
uint8_t *buffer;
const struct lfs_rambd_config *cfg;
} lfs_rambd_t;
// Create a RAM block device using the geometry in lfs_config
int lfs_rambd_create(const struct lfs_config *cfg);
int lfs_rambd_createcfg(const struct lfs_config *cfg,
const struct lfs_rambd_config *bdcfg);
// Clean up memory associated with block device
int lfs_rambd_destroy(const struct lfs_config *cfg);
// Read a block
int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size);
// Program a block
//
// The block must have previously been erased.
int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size);
// Erase a block
//
// A block must be erased before being programmed. The
// state of an erased block is undefined.
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block);
// Sync the block device
int lfs_rambd_sync(const struct lfs_config *cfg);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif

View File

@@ -1,302 +0,0 @@
/*
* Testing block device, wraps filebd and rambd while providing a bunch
* of hooks for testing littlefs in various conditions.
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "bd/lfs_testbd.h"
#include <stdlib.h>
int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
const struct lfs_testbd_config *bdcfg) {
LFS_TESTBD_TRACE("lfs_testbd_createcfg(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
"\"%s\", "
"%p {.erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", "
".buffer=%p, .wear_buffer=%p})",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
path, (void*)bdcfg, bdcfg->erase_value, bdcfg->erase_cycles,
bdcfg->badblock_behavior, bdcfg->power_cycles,
bdcfg->buffer, bdcfg->wear_buffer);
lfs_testbd_t *bd = cfg->context;
bd->cfg = bdcfg;
// setup testing things
bd->persist = path;
bd->power_cycles = bd->cfg->power_cycles;
if (bd->cfg->erase_cycles) {
if (bd->cfg->wear_buffer) {
bd->wear = bd->cfg->wear_buffer;
} else {
bd->wear = lfs_malloc(sizeof(lfs_testbd_wear_t)*cfg->block_count);
if (!bd->wear) {
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM);
return LFS_ERR_NOMEM;
}
}
memset(bd->wear, 0, sizeof(lfs_testbd_wear_t) * cfg->block_count);
}
// create underlying block device
if (bd->persist) {
bd->u.file.cfg = (struct lfs_filebd_config){
.erase_value = bd->cfg->erase_value,
};
int err = lfs_filebd_createcfg(cfg, path, &bd->u.file.cfg);
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
return err;
} else {
bd->u.ram.cfg = (struct lfs_rambd_config){
.erase_value = bd->cfg->erase_value,
.buffer = bd->cfg->buffer,
};
int err = lfs_rambd_createcfg(cfg, &bd->u.ram.cfg);
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
return err;
}
}
int lfs_testbd_create(const struct lfs_config *cfg, const char *path) {
LFS_TESTBD_TRACE("lfs_testbd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
"\"%s\")",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
path);
static const struct lfs_testbd_config defaults = {.erase_value=-1};
int err = lfs_testbd_createcfg(cfg, path, &defaults);
LFS_TESTBD_TRACE("lfs_testbd_create -> %d", err);
return err;
}
int lfs_testbd_destroy(const struct lfs_config *cfg) {
LFS_TESTBD_TRACE("lfs_testbd_destroy(%p)", (void*)cfg);
lfs_testbd_t *bd = cfg->context;
if (bd->cfg->erase_cycles && !bd->cfg->wear_buffer) {
lfs_free(bd->wear);
}
if (bd->persist) {
int err = lfs_filebd_destroy(cfg);
LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
return err;
} else {
int err = lfs_rambd_destroy(cfg);
LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
return err;
}
}
/// Internal mapping to block devices ///
static int lfs_testbd_rawread(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size) {
lfs_testbd_t *bd = cfg->context;
if (bd->persist) {
return lfs_filebd_read(cfg, block, off, buffer, size);
} else {
return lfs_rambd_read(cfg, block, off, buffer, size);
}
}
static int lfs_testbd_rawprog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size) {
lfs_testbd_t *bd = cfg->context;
if (bd->persist) {
return lfs_filebd_prog(cfg, block, off, buffer, size);
} else {
return lfs_rambd_prog(cfg, block, off, buffer, size);
}
}
static int lfs_testbd_rawerase(const struct lfs_config *cfg,
lfs_block_t block) {
lfs_testbd_t *bd = cfg->context;
if (bd->persist) {
return lfs_filebd_erase(cfg, block);
} else {
return lfs_rambd_erase(cfg, block);
}
}
static int lfs_testbd_rawsync(const struct lfs_config *cfg) {
lfs_testbd_t *bd = cfg->context;
if (bd->persist) {
return lfs_filebd_sync(cfg);
} else {
return lfs_rambd_sync(cfg);
}
}
/// block device API ///
int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size) {
LFS_TESTBD_TRACE("lfs_testbd_read(%p, "
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_testbd_t *bd = cfg->context;
// check if read is valid
LFS_ASSERT(off % cfg->read_size == 0);
LFS_ASSERT(size % cfg->read_size == 0);
LFS_ASSERT(block < cfg->block_count);
// block bad?
if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles &&
bd->cfg->badblock_behavior == LFS_TESTBD_BADBLOCK_READERROR) {
LFS_TESTBD_TRACE("lfs_testbd_read -> %d", LFS_ERR_CORRUPT);
return LFS_ERR_CORRUPT;
}
// read
int err = lfs_testbd_rawread(cfg, block, off, buffer, size);
LFS_TESTBD_TRACE("lfs_testbd_read -> %d", err);
return err;
}
int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size) {
LFS_TESTBD_TRACE("lfs_testbd_prog(%p, "
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_testbd_t *bd = cfg->context;
// check if write is valid
LFS_ASSERT(off % cfg->prog_size == 0);
LFS_ASSERT(size % cfg->prog_size == 0);
LFS_ASSERT(block < cfg->block_count);
// block bad?
if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles) {
if (bd->cfg->badblock_behavior ==
LFS_TESTBD_BADBLOCK_PROGERROR) {
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", LFS_ERR_CORRUPT);
return LFS_ERR_CORRUPT;
} else if (bd->cfg->badblock_behavior ==
LFS_TESTBD_BADBLOCK_PROGNOOP ||
bd->cfg->badblock_behavior ==
LFS_TESTBD_BADBLOCK_ERASENOOP) {
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
return 0;
}
}
// prog
int err = lfs_testbd_rawprog(cfg, block, off, buffer, size);
if (err) {
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err);
return err;
}
// lose power?
if (bd->power_cycles > 0) {
bd->power_cycles -= 1;
if (bd->power_cycles == 0) {
// sync to make sure we persist the last changes
LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
// simulate power loss
exit(33);
}
}
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
return 0;
}
int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
LFS_TESTBD_TRACE("lfs_testbd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
lfs_testbd_t *bd = cfg->context;
// check if erase is valid
LFS_ASSERT(block < cfg->block_count);
// block bad?
if (bd->cfg->erase_cycles) {
if (bd->wear[block] >= bd->cfg->erase_cycles) {
if (bd->cfg->badblock_behavior ==
LFS_TESTBD_BADBLOCK_ERASEERROR) {
LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", LFS_ERR_CORRUPT);
return LFS_ERR_CORRUPT;
} else if (bd->cfg->badblock_behavior ==
LFS_TESTBD_BADBLOCK_ERASENOOP) {
LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", 0);
return 0;
}
} else {
// mark wear
bd->wear[block] += 1;
}
}
// erase
int err = lfs_testbd_rawerase(cfg, block);
if (err) {
LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", err);
return err;
}
// lose power?
if (bd->power_cycles > 0) {
bd->power_cycles -= 1;
if (bd->power_cycles == 0) {
// sync to make sure we persist the last changes
LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
// simulate power loss
exit(33);
}
}
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0);
return 0;
}
int lfs_testbd_sync(const struct lfs_config *cfg) {
LFS_TESTBD_TRACE("lfs_testbd_sync(%p)", (void*)cfg);
int err = lfs_testbd_rawsync(cfg);
LFS_TESTBD_TRACE("lfs_testbd_sync -> %d", err);
return err;
}
/// simulated wear operations ///
lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
lfs_block_t block) {
LFS_TESTBD_TRACE("lfs_testbd_getwear(%p, %"PRIu32")", (void*)cfg, block);
lfs_testbd_t *bd = cfg->context;
// check if block is valid
LFS_ASSERT(bd->cfg->erase_cycles);
LFS_ASSERT(block < cfg->block_count);
LFS_TESTBD_TRACE("lfs_testbd_getwear -> %"PRIu32, bd->wear[block]);
return bd->wear[block];
}
int lfs_testbd_setwear(const struct lfs_config *cfg,
lfs_block_t block, lfs_testbd_wear_t wear) {
LFS_TESTBD_TRACE("lfs_testbd_setwear(%p, %"PRIu32")", (void*)cfg, block);
lfs_testbd_t *bd = cfg->context;
// check if block is valid
LFS_ASSERT(bd->cfg->erase_cycles);
LFS_ASSERT(block < cfg->block_count);
bd->wear[block] = wear;
LFS_TESTBD_TRACE("lfs_testbd_setwear -> %d", 0);
return 0;
}

View File

@@ -1,141 +0,0 @@
/*
* Testing block device, wraps filebd and rambd while providing a bunch
* of hooks for testing littlefs in various conditions.
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef LFS_TESTBD_H
#define LFS_TESTBD_H
#include "lfs.h"
#include "lfs_util.h"
#include "bd/lfs_rambd.h"
#include "bd/lfs_filebd.h"
#ifdef __cplusplus
extern "C"
{
#endif
// Block device specific tracing
#ifdef LFS_TESTBD_YES_TRACE
#define LFS_TESTBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
#else
#define LFS_TESTBD_TRACE(...)
#endif
// Mode determining how "bad blocks" behave during testing. This simulates
// some real-world circumstances such as progs not sticking (prog-noop),
// a readonly disk (erase-noop), and ECC failures (read-error).
//
// Not that read-noop is not allowed. Read _must_ return a consistent (but
// may be arbitrary) value on every read.
enum lfs_testbd_badblock_behavior {
LFS_TESTBD_BADBLOCK_PROGERROR,
LFS_TESTBD_BADBLOCK_ERASEERROR,
LFS_TESTBD_BADBLOCK_READERROR,
LFS_TESTBD_BADBLOCK_PROGNOOP,
LFS_TESTBD_BADBLOCK_ERASENOOP,
};
// Type for measuring wear
typedef uint32_t lfs_testbd_wear_t;
typedef int32_t lfs_testbd_swear_t;
// testbd config, this is required for testing
struct lfs_testbd_config {
// 8-bit erase value to use for simulating erases. -1 does not simulate
// erases, which can speed up testing by avoiding all the extra block-device
// operations to store the erase value.
int32_t erase_value;
// Number of erase cycles before a block becomes "bad". The exact behavior
// of bad blocks is controlled by the badblock_mode.
uint32_t erase_cycles;
// The mode determining how bad blocks fail
uint8_t badblock_behavior;
// Number of write operations (erase/prog) before forcefully killing
// the program with exit. Simulates power-loss. 0 disables.
uint32_t power_cycles;
// Optional buffer for RAM block device.
void *buffer;
// Optional buffer for wear
void *wear_buffer;
};
// testbd state
typedef struct lfs_testbd {
union {
struct {
lfs_filebd_t bd;
struct lfs_filebd_config cfg;
} file;
struct {
lfs_rambd_t bd;
struct lfs_rambd_config cfg;
} ram;
} u;
bool persist;
uint32_t power_cycles;
lfs_testbd_wear_t *wear;
const struct lfs_testbd_config *cfg;
} lfs_testbd_t;
/// Block device API ///
// Create a test block device using the geometry in lfs_config
//
// Note that filebd is used if a path is provided, if path is NULL
// testbd will use rambd which can be much faster.
int lfs_testbd_create(const struct lfs_config *cfg, const char *path);
int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
const struct lfs_testbd_config *bdcfg);
// Clean up memory associated with block device
int lfs_testbd_destroy(const struct lfs_config *cfg);
// Read a block
int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size);
// Program a block
//
// The block must have previously been erased.
int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size);
// Erase a block
//
// A block must be erased before being programmed. The
// state of an erased block is undefined.
int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block);
// Sync the block device
int lfs_testbd_sync(const struct lfs_config *cfg);
/// Additional extended API for driving test features ///
// Get simulated wear on a given block
lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
lfs_block_t block);
// Manually set simulated wear on a given block
int lfs_testbd_setwear(const struct lfs_config *cfg,
lfs_block_t block, lfs_testbd_wear_t wear);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif

403
emubd/lfs_emubd.c Normal file
View File

@@ -0,0 +1,403 @@
/*
* Block device emulated on standard files
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "emubd/lfs_emubd.h"
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <limits.h>
#include <sys/stat.h>
#include <unistd.h>
#include <assert.h>
#include <stdbool.h>
#include <inttypes.h>
// Emulated block device utils
static inline void lfs_emubd_tole32(lfs_emubd_t *emu) {
emu->cfg.read_size = lfs_tole32(emu->cfg.read_size);
emu->cfg.prog_size = lfs_tole32(emu->cfg.prog_size);
emu->cfg.block_size = lfs_tole32(emu->cfg.block_size);
emu->cfg.block_count = lfs_tole32(emu->cfg.block_count);
emu->stats.read_count = lfs_tole32(emu->stats.read_count);
emu->stats.prog_count = lfs_tole32(emu->stats.prog_count);
emu->stats.erase_count = lfs_tole32(emu->stats.erase_count);
for (unsigned i = 0; i < sizeof(emu->history.blocks) /
sizeof(emu->history.blocks[0]); i++) {
emu->history.blocks[i] = lfs_tole32(emu->history.blocks[i]);
}
}
static inline void lfs_emubd_fromle32(lfs_emubd_t *emu) {
emu->cfg.read_size = lfs_fromle32(emu->cfg.read_size);
emu->cfg.prog_size = lfs_fromle32(emu->cfg.prog_size);
emu->cfg.block_size = lfs_fromle32(emu->cfg.block_size);
emu->cfg.block_count = lfs_fromle32(emu->cfg.block_count);
emu->stats.read_count = lfs_fromle32(emu->stats.read_count);
emu->stats.prog_count = lfs_fromle32(emu->stats.prog_count);
emu->stats.erase_count = lfs_fromle32(emu->stats.erase_count);
for (unsigned i = 0; i < sizeof(emu->history.blocks) /
sizeof(emu->history.blocks[0]); i++) {
emu->history.blocks[i] = lfs_fromle32(emu->history.blocks[i]);
}
}
// Block device emulated on existing filesystem
int lfs_emubd_create(const struct lfs_config *cfg, const char *path) {
LFS_TRACE("lfs_emubd_create(%p {.context=%p, "
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
".block_size=%"PRIu32", .block_count=%"PRIu32"}, \"%s\")",
(void*)cfg, cfg->context,
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
path);
lfs_emubd_t *emu = cfg->context;
emu->cfg.read_size = cfg->read_size;
emu->cfg.prog_size = cfg->prog_size;
emu->cfg.block_size = cfg->block_size;
emu->cfg.block_count = cfg->block_count;
// Allocate buffer for creating children files
size_t pathlen = strlen(path);
emu->path = malloc(pathlen + 1 + LFS_NAME_MAX + 1);
if (!emu->path) {
int err = -ENOMEM;
LFS_TRACE("lfs_emubd_create -> %"PRId32, err);
return err;
}
strcpy(emu->path, path);
emu->path[pathlen] = '/';
emu->child = &emu->path[pathlen+1];
memset(emu->child, '\0', LFS_NAME_MAX+1);
// Create directory if it doesn't exist
int err = mkdir(path, 0777);
if (err && errno != EEXIST) {
err = -errno;
LFS_TRACE("lfs_emubd_create -> %"PRId32, err);
return err;
}
// Load stats to continue incrementing
snprintf(emu->child, LFS_NAME_MAX, ".stats");
FILE *f = fopen(emu->path, "r");
if (!f) {
memset(&emu->stats, LFS_EMUBD_ERASE_VALUE, sizeof(emu->stats));
} else {
size_t res = fread(&emu->stats, sizeof(emu->stats), 1, f);
lfs_emubd_fromle32(emu);
if (res < 1) {
err = -errno;
LFS_TRACE("lfs_emubd_create -> %"PRId32, err);
return err;
}
err = fclose(f);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_create -> %"PRId32, err);
return err;
}
}
// Load history
snprintf(emu->child, LFS_NAME_MAX, ".history");
f = fopen(emu->path, "r");
if (!f) {
memset(&emu->history, 0, sizeof(emu->history));
} else {
size_t res = fread(&emu->history, sizeof(emu->history), 1, f);
lfs_emubd_fromle32(emu);
if (res < 1) {
err = -errno;
LFS_TRACE("lfs_emubd_create -> %"PRId32, err);
return err;
}
err = fclose(f);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_create -> %"PRId32, err);
return err;
}
}
LFS_TRACE("lfs_emubd_create -> %"PRId32, 0);
return 0;
}
void lfs_emubd_destroy(const struct lfs_config *cfg) {
LFS_TRACE("lfs_emubd_destroy(%p)", (void*)cfg);
lfs_emubd_sync(cfg);
lfs_emubd_t *emu = cfg->context;
free(emu->path);
LFS_TRACE("lfs_emubd_destroy -> %s", "void");
}
int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size) {
LFS_TRACE("lfs_emubd_read(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_emubd_t *emu = cfg->context;
uint8_t *data = buffer;
// Check if read is valid
assert(off % cfg->read_size == 0);
assert(size % cfg->read_size == 0);
assert(block < cfg->block_count);
// Zero out buffer for debugging
memset(data, 0, size);
// Read data
snprintf(emu->child, LFS_NAME_MAX, "%" PRIx32, block);
FILE *f = fopen(emu->path, "rb");
if (!f && errno != ENOENT) {
int err = -errno;
LFS_TRACE("lfs_emubd_read -> %d", err);
return err;
}
if (f) {
int err = fseek(f, off, SEEK_SET);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_read -> %d", err);
return err;
}
size_t res = fread(data, 1, size, f);
if (res < size && !feof(f)) {
err = -errno;
LFS_TRACE("lfs_emubd_read -> %d", err);
return err;
}
err = fclose(f);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_read -> %d", err);
return err;
}
}
emu->stats.read_count += size;
LFS_TRACE("lfs_emubd_read -> %d", 0);
return 0;
}
int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size) {
LFS_TRACE("lfs_emubd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
(void*)cfg, block, off, buffer, size);
lfs_emubd_t *emu = cfg->context;
const uint8_t *data = buffer;
// Check if write is valid
assert(off % cfg->prog_size == 0);
assert(size % cfg->prog_size == 0);
assert(block < cfg->block_count);
// Program data
snprintf(emu->child, LFS_NAME_MAX, "%" PRIx32, block);
FILE *f = fopen(emu->path, "r+b");
if (!f) {
int err = (errno == EACCES) ? 0 : -errno;
LFS_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
// Check that file was erased
assert(f);
int err = fseek(f, off, SEEK_SET);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
size_t res = fwrite(data, 1, size, f);
if (res < size) {
err = -errno;
LFS_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
err = fseek(f, off, SEEK_SET);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
uint8_t dat;
res = fread(&dat, 1, 1, f);
if (res < 1) {
err = -errno;
LFS_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
err = fclose(f);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_prog -> %d", err);
return err;
}
// update history and stats
if (block != emu->history.blocks[0]) {
memmove(&emu->history.blocks[1], &emu->history.blocks[0],
sizeof(emu->history) - sizeof(emu->history.blocks[0]));
emu->history.blocks[0] = block;
}
emu->stats.prog_count += size;
LFS_TRACE("lfs_emubd_prog -> %d", 0);
return 0;
}
int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block) {
LFS_TRACE("lfs_emubd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
lfs_emubd_t *emu = cfg->context;
// Check if erase is valid
assert(block < cfg->block_count);
// Erase the block
snprintf(emu->child, LFS_NAME_MAX, "%" PRIx32, block);
struct stat st;
int err = stat(emu->path, &st);
if (err && errno != ENOENT) {
err = -errno;
LFS_TRACE("lfs_emubd_erase -> %d", err);
return err;
}
if (!err && S_ISREG(st.st_mode) && (S_IWUSR & st.st_mode)) {
err = unlink(emu->path);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_erase -> %d", err);
return err;
}
}
if (err || (S_ISREG(st.st_mode) && (S_IWUSR & st.st_mode))) {
FILE *f = fopen(emu->path, "w");
if (!f) {
err = -errno;
LFS_TRACE("lfs_emubd_erase -> %d", err);
return err;
}
err = fclose(f);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_erase -> %d", err);
return err;
}
}
emu->stats.erase_count += cfg->block_size;
LFS_TRACE("lfs_emubd_erase -> %d", 0);
return 0;
}
int lfs_emubd_sync(const struct lfs_config *cfg) {
LFS_TRACE("lfs_emubd_sync(%p)", (void*)cfg);
lfs_emubd_t *emu = cfg->context;
// Just write out info/stats for later lookup
snprintf(emu->child, LFS_NAME_MAX, ".config");
FILE *f = fopen(emu->path, "w");
if (!f) {
int err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
lfs_emubd_tole32(emu);
size_t res = fwrite(&emu->cfg, sizeof(emu->cfg), 1, f);
lfs_emubd_fromle32(emu);
if (res < 1) {
int err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
int err = fclose(f);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
snprintf(emu->child, LFS_NAME_MAX, ".stats");
f = fopen(emu->path, "w");
if (!f) {
err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
lfs_emubd_tole32(emu);
res = fwrite(&emu->stats, sizeof(emu->stats), 1, f);
lfs_emubd_fromle32(emu);
if (res < 1) {
err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
err = fclose(f);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
snprintf(emu->child, LFS_NAME_MAX, ".history");
f = fopen(emu->path, "w");
if (!f) {
err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
lfs_emubd_tole32(emu);
res = fwrite(&emu->history, sizeof(emu->history), 1, f);
lfs_emubd_fromle32(emu);
if (res < 1) {
err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
err = fclose(f);
if (err) {
err = -errno;
LFS_TRACE("lfs_emubd_sync -> %d", err);
return err;
}
LFS_TRACE("lfs_emubd_sync -> %d", 0);
return 0;
}

79
emubd/lfs_emubd.h Normal file
View File

@@ -0,0 +1,79 @@
/*
* Block device emulated on standard files
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef LFS_EMUBD_H
#define LFS_EMUBD_H
#include "lfs.h"
#include "lfs_util.h"
#ifdef __cplusplus
extern "C"
{
#endif
// Config options
#ifndef LFS_EMUBD_ERASE_VALUE
#define LFS_EMUBD_ERASE_VALUE 0x00
#endif
// The emu bd state
typedef struct lfs_emubd {
char *path;
char *child;
struct {
uint64_t read_count;
uint64_t prog_count;
uint64_t erase_count;
} stats;
struct {
lfs_block_t blocks[4];
} history;
struct {
uint32_t read_size;
uint32_t prog_size;
uint32_t block_size;
uint32_t block_count;
} cfg;
} lfs_emubd_t;
// Create a block device using path for the directory to store blocks
int lfs_emubd_create(const struct lfs_config *cfg, const char *path);
// Clean up memory associated with emu block device
void lfs_emubd_destroy(const struct lfs_config *cfg);
// Read a block
int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, void *buffer, lfs_size_t size);
// Program a block
//
// The block must have previously been erased.
int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
lfs_off_t off, const void *buffer, lfs_size_t size);
// Erase a block
//
// A block must be erased before being programmed. The
// state of an erased block is undefined.
int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block);
// Sync the block device
int lfs_emubd_sync(const struct lfs_config *cfg);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif

3036
lfs.c

File diff suppressed because it is too large Load Diff

80
lfs.h
View File

@@ -7,8 +7,6 @@
#ifndef LFS_H
#define LFS_H
#include <stdint.h>
#include <stdbool.h>
#include "lfs_util.h"
#ifdef __cplusplus
@@ -22,7 +20,7 @@ extern "C"
// Software library version
// Major (top-nibble), incremented on backwards incompatible changes
// Minor (bottom-nibble), incremented on feature additions
#define LFS_VERSION 0x00020004
#define LFS_VERSION 0x00020001
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
@@ -67,25 +65,6 @@ typedef uint32_t lfs_block_t;
#define LFS_ATTR_MAX 1022
#endif
// Possible error codes, these are negative to allow
// valid positive return values
enum lfs_error {
LFS_ERR_OK = 0, // No error
LFS_ERR_IO = -5, // Error during device operation
LFS_ERR_CORRUPT = -84, // Corrupted
LFS_ERR_NOENT = -2, // No directory entry
LFS_ERR_EXIST = -17, // Entry already exists
LFS_ERR_NOTDIR = -20, // Entry is not a dir
LFS_ERR_ISDIR = -21, // Entry is a dir
LFS_ERR_NOTEMPTY = -39, // Dir is not empty
LFS_ERR_BADF = -9, // Bad file number
LFS_ERR_FBIG = -27, // File too large
LFS_ERR_INVAL = -22, // Invalid parameter
LFS_ERR_NOSPC = -28, // No space left on device
LFS_ERR_NOMEM = -12, // No more memory available
LFS_ERR_NOATTR = -61, // No data/attr available
LFS_ERR_NAMETOOLONG = -36, // File name too long
};
// File types
enum lfs_type {
@@ -124,25 +103,20 @@ enum lfs_type {
enum lfs_open_flags {
// open flags
LFS_O_RDONLY = 1, // Open a file as read only
#ifndef LFS_READONLY
LFS_O_WRONLY = 2, // Open a file as write only
LFS_O_RDWR = 3, // Open a file as read and write
LFS_O_CREAT = 0x0100, // Create a file if it does not exist
LFS_O_EXCL = 0x0200, // Fail if a file already exists
LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size
LFS_O_APPEND = 0x0800, // Move to end of file on every write
#endif
// internally used flags
#ifndef LFS_READONLY
LFS_F_DIRTY = 0x010000, // File does not match storage
LFS_F_WRITING = 0x020000, // File has been written since last flush
#endif
LFS_F_READING = 0x040000, // File has been read since last flush
#ifndef LFS_READONLY
LFS_F_ERRED = 0x080000, // An error occurred during write
#endif
LFS_F_ERRED = 0x080000, // An error occured during write
LFS_F_INLINE = 0x100000, // Currently inlined in directory entry
LFS_F_OPENED = 0x200000, // File has been opened
};
// File seek flags
@@ -180,16 +154,6 @@ struct lfs_config {
// are propogated to the user.
int (*sync)(const struct lfs_config *c);
#ifdef LFS_THREADSAFE
// Lock the underlying block device. Negative error codes
// are propogated to the user.
int (*lock)(const struct lfs_config *c);
// Unlock the underlying block device. Negative error codes
// are propogated to the user.
int (*unlock)(const struct lfs_config *c);
#endif
// Minimum size of a block read. All read operations will be a
// multiple of this value.
lfs_size_t read_size;
@@ -207,7 +171,7 @@ struct lfs_config {
// Number of erasable blocks on the device.
lfs_size_t block_count;
// Number of erase cycles before littlefs evicts metadata logs and moves
// Number of erase cycles before littlefs evicts metadata logs and moves
// the metadata to another block. Suggested values are in the
// range 100-1000, with large values having better performance at the cost
// of less consistent wear distribution.
@@ -256,12 +220,6 @@ struct lfs_config {
// larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
// LFS_ATTR_MAX when zero.
lfs_size_t attr_max;
// Optional upper limit on total space given to metadata pairs in bytes. On
// devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB)
// can help bound the metadata compaction time. Must be <= block_size.
// Defaults to block_size when zero.
lfs_size_t metadata_max;
};
// File info structure
@@ -377,11 +335,6 @@ typedef struct lfs_superblock {
lfs_size_t attr_max;
} lfs_superblock_t;
typedef struct lfs_gstate {
uint32_t tag;
lfs_block_t pair[2];
} lfs_gstate_t;
// The littlefs filesystem type
typedef struct lfs {
lfs_cache_t rcache;
@@ -396,9 +349,10 @@ typedef struct lfs {
} *mlist;
uint32_t seed;
lfs_gstate_t gstate;
lfs_gstate_t gdisk;
lfs_gstate_t gdelta;
struct lfs_gstate {
uint32_t tag;
lfs_block_t pair[2];
} gstate, gpending, gdelta;
struct lfs_free {
lfs_block_t off;
@@ -421,7 +375,6 @@ typedef struct lfs {
/// Filesystem functions ///
#ifndef LFS_READONLY
// Format a block device with the littlefs
//
// Requires a littlefs object and config struct. This clobbers the littlefs
@@ -430,7 +383,6 @@ typedef struct lfs {
//
// Returns a negative error code on failure.
int lfs_format(lfs_t *lfs, const struct lfs_config *config);
#endif
// Mounts a littlefs
//
@@ -450,15 +402,12 @@ int lfs_unmount(lfs_t *lfs);
/// General operations ///
#ifndef LFS_READONLY
// Removes a file or directory
//
// If removing a directory, the directory must be empty.
// Returns a negative error code on failure.
int lfs_remove(lfs_t *lfs, const char *path);
#endif
#ifndef LFS_READONLY
// Rename or move a file or directory
//
// If the destination exists, it must match the source in type.
@@ -466,7 +415,6 @@ int lfs_remove(lfs_t *lfs, const char *path);
//
// Returns a negative error code on failure.
int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath);
#endif
// Find info about a file or directory
//
@@ -489,7 +437,6 @@ int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info);
lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
uint8_t type, void *buffer, lfs_size_t size);
#ifndef LFS_READONLY
// Set custom attributes
//
// Custom attributes are uniquely identified by an 8-bit type and limited
@@ -499,16 +446,13 @@ lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
// Returns a negative error code on failure.
int lfs_setattr(lfs_t *lfs, const char *path,
uint8_t type, const void *buffer, lfs_size_t size);
#endif
#ifndef LFS_READONLY
// Removes a custom attribute
//
// If an attribute is not found, nothing happens.
//
// Returns a negative error code on failure.
int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
#endif
/// File operations ///
@@ -557,7 +501,6 @@ int lfs_file_sync(lfs_t *lfs, lfs_file_t *file);
lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
void *buffer, lfs_size_t size);
#ifndef LFS_READONLY
// Write data to file
//
// Takes a buffer and size indicating the data to write. The file will not
@@ -566,7 +509,6 @@ lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
// Returns the number of bytes written, or a negative error code on failure.
lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
const void *buffer, lfs_size_t size);
#endif
// Change the position of the file
//
@@ -575,12 +517,10 @@ lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
lfs_soff_t off, int whence);
#ifndef LFS_READONLY
// Truncates the size of the file to the specified size
//
// Returns a negative error code on failure.
int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size);
#endif
// Return the position of the file
//
@@ -603,12 +543,10 @@ lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file);
/// Directory operations ///
#ifndef LFS_READONLY
// Create a directory
//
// Returns a negative error code on failure.
int lfs_mkdir(lfs_t *lfs, const char *path);
#endif
// Open a directory
//
@@ -670,7 +608,6 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs);
// Returns a negative error code on failure.
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
#ifndef LFS_READONLY
#ifdef LFS_MIGRATE
// Attempts to migrate a previous version of littlefs
//
@@ -685,7 +622,6 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
// Returns a negative error code on failure.
int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg);
#endif
#endif
#ifdef __cplusplus

View File

@@ -7,7 +7,7 @@
#include "lfs_util.h"
// Only compile if user does not provide custom config
#ifndef LFS_CONFIG
#ifndef LFS_UTIL
// Software CRC implementation with small lookup table

View File

@@ -3,20 +3,21 @@
*
* Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Can be overridden by users with their own configuration by defining
* LFS_UTIL as a header file (-DLFS_UTIL=my_lfs_util.h)
*
* If LFS_UTIL is defined, none of the default definitions will be
* emitted and must be provided by the user's header file. To start, I would
* suggest copying lfs_util.h and modifying as needed.
*/
#ifndef LFS_UTIL_H
#define LFS_UTIL_H
// Users can override lfs_util.h with their own configuration by defining
// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h).
//
// If LFS_CONFIG is used, none of the default utils will be emitted and must be
// provided by the config file. To start, I would suggest copying lfs_util.h
// and modifying as needed.
#ifdef LFS_CONFIG
#ifdef LFS_UTIL
#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x)
#define LFS_STRINGIZE2(x) #x
#include LFS_STRINGIZE(LFS_CONFIG)
#include LFS_STRINGIZE(LFS_UTIL)
#else
// System includes
@@ -44,59 +45,67 @@ extern "C"
#endif
// Possible error codes, these are negative to allow valid positive
// return values. May be redefined to system error codes as long as
// they are negative.
enum lfs_error {
LFS_ERR_OK = 0, // No error
LFS_ERR_IO = -5, // Error during device operation
LFS_ERR_CORRUPT = -84, // Corrupted
LFS_ERR_NOENT = -2, // No directory entry
LFS_ERR_EXIST = -17, // Entry already exists
LFS_ERR_NOTDIR = -20, // Entry is not a dir
LFS_ERR_ISDIR = -21, // Entry is a dir
LFS_ERR_NOTEMPTY = -39, // Dir is not empty
LFS_ERR_BADF = -9, // Bad file number
LFS_ERR_FBIG = -27, // File too large
LFS_ERR_INVAL = -22, // Invalid parameter
LFS_ERR_NOSPC = -28, // No space left on device
LFS_ERR_NOMEM = -12, // No more memory available
LFS_ERR_NOATTR = -61, // No data/attr available
LFS_ERR_NAMETOOLONG = -36, // File name too long
};
// Macros, may be replaced by system specific wrappers. Arguments to these
// macros must not have side-effects as the macros can be removed for a smaller
// code footprint
// Logging functions
#ifndef LFS_TRACE
#ifdef LFS_YES_TRACE
#define LFS_TRACE_(fmt, ...) \
printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
#define LFS_TRACE(fmt, ...) \
printf("lfs_trace:%d: " fmt "\n", __LINE__, __VA_ARGS__)
#else
#define LFS_TRACE(...)
#endif
#define LFS_TRACE(fmt, ...)
#endif
#ifndef LFS_DEBUG
#ifndef LFS_NO_DEBUG
#define LFS_DEBUG_(fmt, ...) \
printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "")
#define LFS_DEBUG(fmt, ...) \
printf("lfs_debug:%d: " fmt "\n", __LINE__, __VA_ARGS__)
#else
#define LFS_DEBUG(...)
#endif
#define LFS_DEBUG(fmt, ...)
#endif
#ifndef LFS_WARN
#ifndef LFS_NO_WARN
#define LFS_WARN_(fmt, ...) \
printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "")
#define LFS_WARN(fmt, ...) \
printf("lfs_warn:%d: " fmt "\n", __LINE__, __VA_ARGS__)
#else
#define LFS_WARN(...)
#endif
#define LFS_WARN(fmt, ...)
#endif
#ifndef LFS_ERROR
#ifndef LFS_NO_ERROR
#define LFS_ERROR_(fmt, ...) \
printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "")
#define LFS_ERROR(fmt, ...) \
printf("lfs_error:%d: " fmt "\n", __LINE__, __VA_ARGS__)
#else
#define LFS_ERROR(...)
#endif
#define LFS_ERROR(fmt, ...)
#endif
// Runtime assertions
#ifndef LFS_ASSERT
#ifndef LFS_NO_ASSERT
#define LFS_ASSERT(test) assert(test)
#else
#define LFS_ASSERT(test)
#endif
#endif
// Builtin functions, these may be replaced by more efficient
@@ -121,7 +130,7 @@ static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) {
return lfs_aligndown(a + alignment-1, alignment);
}
// Find the smallest power of 2 greater than or equal to a
// Find the next smallest power of 2 less than or equal to a
static inline uint32_t lfs_npw2(uint32_t a) {
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
return 32 - __builtin_clz(a-1);
@@ -160,8 +169,8 @@ static inline uint32_t lfs_popc(uint32_t a) {
// Find the sequence comparison of a and b, this is the distance
// between a and b ignoring overflow
static inline int lfs_scmp(uint32_t a, uint32_t b) {
return (int)(unsigned)(a - b);
static inline int32_t lfs_scmp(uint32_t a, uint32_t b) {
return (int32_t)(uint32_t)(a - b);
}
// Convert between 32-bit little-endian and native order

View File

@@ -1,284 +0,0 @@
#!/usr/bin/env python3
#
# Script to find code size at the function level. Basically just a bit wrapper
# around nm with some extra conveniences for comparing builds. Heavily inspired
# by Linux's Bloat-O-Meter.
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
OBJ_PATHS = ['*.o']
def collect(paths, **args):
results = co.defaultdict(lambda: 0)
pattern = re.compile(
'^(?P<size>[0-9a-fA-F]+)' +
' (?P<type>[%s])' % re.escape(args['type']) +
' (?P<func>.+?)$')
for path in paths:
# note nm-tool may contain extra args
cmd = args['nm_tool'] + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
for line in proc.stdout:
m = pattern.match(line)
if m:
results[(path, m.group('func'))] += int(m.group('size'), 16)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, func), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
# discard internal functions
if not args.get('everything'):
if func.startswith('__'):
continue
# discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func)
flat_results.append((file, func, size))
return flat_results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['code_size']))
for result in r
if result.get('code_size') not in {None, ''}]
total = 0
for _, _, size in results:
total += size
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['code_size']))
for result in r
if result.get('code_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('code_size', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, size in results:
merged_results[(file, func)]['code_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: 0)
for file, func, size in results:
entry = (file if by == 'file' else func)
entries[entry] += size
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted_entries(entries.items()):
print_entry(name, size)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
ratio = (0.0 if not prev_total and not total
else 1.0 if not prev_total
else (total-prev_total)/prev_total)
print_diff_entry('TOTAL',
prev_total, total,
total-prev_total,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find code size at the function level.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find code sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level code sizes. Note this does not include padding! "
"So sizes may differ from other tools.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total code size.")
parser.add_argument('--type', default='tTrRdD',
help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %(default)r.")
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
help="Path to the nm tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

44
scripts/corrupt.py Executable file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env python2
import struct
import sys
import os
import argparse
def corrupt(block):
with open(block, 'r+b') as file:
# skip rev
file.read(4)
# go to last commit
tag = 0xffffffff
while True:
try:
ntag, = struct.unpack('>I', file.read(4))
except struct.error:
break
tag ^= ntag
size = (tag & 0x3ff) if (tag & 0x3ff) != 0x3ff else 0
file.seek(size, os.SEEK_CUR)
# lob off last 3 bytes
file.seek(-(size + 3), os.SEEK_CUR)
file.truncate()
def main(args):
if args.n or not args.blocks:
with open('blocks/.history', 'rb') as file:
for i in range(int(args.n or 1)):
last, = struct.unpack('<I', file.read(4))
args.blocks.append('blocks/%x' % last)
for block in args.blocks:
print 'corrupting %s' % block
corrupt(block)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-n')
parser.add_argument('blocks', nargs='*')
main(parser.parse_args())

View File

@@ -1,323 +0,0 @@
#!/usr/bin/env python3
#
# Parse and report coverage info from .info files generated by lcov
#
import os
import glob
import csv
import re
import collections as co
import bisect as b
INFO_PATHS = ['tests/*.toml.info']
def collect(paths, **args):
file = None
funcs = []
lines = co.defaultdict(lambda: 0)
pattern = re.compile(
'^(?P<file>SF:/?(?P<file_name>.*))$'
'|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$'
'|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$')
for path in paths:
with open(path) as f:
for line in f:
m = pattern.match(line)
if m and m.group('file'):
file = m.group('file_name')
elif m and file and m.group('func'):
funcs.append((file, int(m.group('func_lineno')),
m.group('func_name')))
elif m and file and m.group('line'):
lines[(file, int(m.group('line_lineno')))] += (
int(m.group('line_hits')))
# map line numbers to functions
funcs.sort()
def func_from_lineno(file, lineno):
i = b.bisect(funcs, (file, lineno))
if i and funcs[i-1][0] == file:
return funcs[i-1][2]
else:
return None
# reduce to function info
reduced_funcs = co.defaultdict(lambda: (0, 0))
for (file, line_lineno), line_hits in lines.items():
func = func_from_lineno(file, line_lineno)
if not func:
continue
hits, count = reduced_funcs[(file, func)]
reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
results = []
for (file, func), (hits, count) in reduced_funcs.items():
# discard internal/testing functions (test_* injected with
# internal testing)
if not args.get('everything'):
if func.startswith('__') or func.startswith('test_'):
continue
# discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func)
results.append((file, func, hits, count))
return results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find coverage
if not args.get('use'):
# find *.info files
paths = []
for path in args['info_paths']:
if os.path.isdir(path):
path = path + '/*.gcov'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .info files found in %r?' % args['info_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['coverage_hits']),
int(result['coverage_count']))
for result in r
if result.get('coverage_hits') not in {None, ''}
if result.get('coverage_count') not in {None, ''}]
total_hits, total_count = 0, 0
for _, _, hits, count in results:
total_hits += hits
total_count += count
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['coverage_hits']),
int(result['coverage_count']))
for result in r
if result.get('coverage_hits') not in {None, ''}
if result.get('coverage_count') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total_hits, prev_total_count = 0, 0
for _, _, hits, count in prev_results:
prev_total_hits += hits
prev_total_count += count
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('coverage_hits', None)
result.pop('coverage_count', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, hits, count in results:
merged_results[(file, func)]['coverage_hits'] = hits
merged_results[(file, func)]['coverage_count'] = count
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: (0, 0))
for file, func, hits, count in results:
entry = (file if by == 'file' else func)
entry_hits, entry_count = entries[entry]
entries[entry] = (entry_hits + hits, entry_count + count)
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
for name, (new_hits, new_count) in news.items():
diff[name] = (
0, 0,
new_hits, new_count,
new_hits, new_count,
(new_hits/new_count if new_count else 1.0) - 1.0)
for name, (old_hits, old_count) in olds.items():
_, _, new_hits, new_count, _, _, _ = diff[name]
diff[name] = (
old_hits, old_count,
new_hits, new_count,
new_hits-old_hits, new_count-old_count,
((new_hits/new_count if new_count else 1.0)
- (old_hits/old_count if old_count else 1.0)))
return diff
def sorted_entries(entries):
if args.get('coverage_sort'):
return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
elif args.get('reverse_coverage_sort'):
return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('coverage_sort'):
return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
elif args.get('reverse_coverage_sort'):
return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
else:
return sorted(entries, key=lambda x: (-x[1][6], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %19s' % (by, 'hits/line'))
else:
print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
def print_entry(name, hits, count):
print("%-36s %11s %7s" % (name,
'%d/%d' % (hits, count)
if count else '-',
'%.1f%%' % (100*hits/count)
if count else '-'))
def print_diff_entry(name,
old_hits, old_count,
new_hits, new_count,
diff_hits, diff_count,
ratio):
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
'%d/%d' % (old_hits, old_count)
if old_count else '-',
'%.1f%%' % (100*old_hits/old_count)
if old_count else '-',
'%d/%d' % (new_hits, new_count)
if new_count else '-',
'%.1f%%' % (100*new_hits/new_count)
if new_count else '-',
'%+d/%+d' % (diff_hits, diff_count),
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, (hits, count) in sorted_entries(entries.items()):
print_entry(name, hits, count)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
for name, (
old_hits, old_count,
new_hits, new_count,
diff_hits, diff_count, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name,
old_hits, old_count,
new_hits, new_count,
diff_hits, diff_count,
ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total_hits, total_count)
else:
ratio = ((total_hits/total_count
if total_count else 1.0)
- (prev_total_hits/prev_total_count
if prev_total_count else 1.0))
print_diff_entry('TOTAL',
prev_total_hits, prev_total_count,
total_hits, total_count,
total_hits-prev_total_hits, total_count-prev_total_count,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Parse and report coverage info from .info files \
generated by lcov")
parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
help="Description of where to find *.info files. May be a directory \
or list of paths. *.info files will be merged to show the total \
coverage. Defaults to %r." % INFO_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't do any work, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--coverage-sort', action='store_true',
help="Sort by coverage.")
parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
help="Sort by coverage, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level coverage.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total coverage.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

View File

@@ -1,283 +0,0 @@
#!/usr/bin/env python3
#
# Script to find data size at the function level. Basically just a bit wrapper
# around nm with some extra conveniences for comparing builds. Heavily inspired
# by Linux's Bloat-O-Meter.
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
OBJ_PATHS = ['*.o']
def collect(paths, **args):
results = co.defaultdict(lambda: 0)
pattern = re.compile(
'^(?P<size>[0-9a-fA-F]+)' +
' (?P<type>[%s])' % re.escape(args['type']) +
' (?P<func>.+?)$')
for path in paths:
# note nm-tool may contain extra args
cmd = args['nm_tool'] + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
for line in proc.stdout:
m = pattern.match(line)
if m:
results[(path, m.group('func'))] += int(m.group('size'), 16)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, func), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
# discard internal functions
if not args.get('everything'):
if func.startswith('__'):
continue
# discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func)
flat_results.append((file, func, size))
return flat_results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['data_size']))
for result in r
if result.get('data_size') not in {None, ''}]
total = 0
for _, _, size in results:
total += size
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['data_size']))
for result in r
if result.get('data_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('data_size', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, size in results:
merged_results[(file, func)]['data_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: 0)
for file, func, size in results:
entry = (file if by == 'file' else func)
entries[entry] += size
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted_entries(entries.items()):
print_entry(name, size)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
ratio = (0.0 if not prev_total and not total
else 1.0 if not prev_total
else (total-prev_total)/prev_total)
print_diff_entry('TOTAL',
prev_total, total,
total-prev_total,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find data size at the function level.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find data sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff data size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level data sizes. Note this does not include padding! "
"So sizes may differ from other tools.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total data size.")
parser.add_argument('--type', default='dDbB',
help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %(default)r.")
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
help="Path to the nm tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

112
scripts/debug.py Executable file
View File

@@ -0,0 +1,112 @@
#!/usr/bin/env python2
import struct
import binascii
TYPES = {
(0x700, 0x400): 'splice',
(0x7ff, 0x401): 'create',
(0x7ff, 0x4ff): 'delete',
(0x700, 0x000): 'name',
(0x7ff, 0x001): 'name reg',
(0x7ff, 0x002): 'name dir',
(0x7ff, 0x0ff): 'name superblock',
(0x700, 0x200): 'struct',
(0x7ff, 0x200): 'struct dir',
(0x7ff, 0x202): 'struct ctz',
(0x7ff, 0x201): 'struct inline',
(0x700, 0x300): 'userattr',
(0x700, 0x600): 'tail',
(0x7ff, 0x600): 'tail soft',
(0x7ff, 0x601): 'tail hard',
(0x700, 0x700): 'gstate',
(0x7ff, 0x7ff): 'gstate move',
(0x700, 0x500): 'crc',
}
def typeof(type):
for prefix in range(12):
mask = 0x7ff & ~((1 << prefix)-1)
if (mask, type & mask) in TYPES:
return TYPES[mask, type & mask] + (
' %0*x' % (prefix/4, type & ((1 << prefix)-1))
if prefix else '')
else:
return '%02x' % type
def main(*blocks):
# find most recent block
file = None
rev = None
crc = None
versions = []
for block in blocks:
try:
nfile = open(block, 'rb')
ndata = nfile.read(4)
ncrc = binascii.crc32(ndata)
nrev, = struct.unpack('<I', ndata)
assert rev != nrev
if not file or ((rev - nrev) & 0x80000000):
file = nfile
rev = nrev
crc = ncrc
versions.append((nrev, '%s (rev %d)' % (block, nrev)))
except (IOError, struct.error):
pass
if not file:
print 'Bad metadata pair {%s}' % ', '.join(blocks)
return 1
print "--- %s ---" % ', '.join(v for _,v in sorted(versions, reverse=True))
# go through each tag, print useful information
print "%-4s %-8s %-14s %3s %4s %s" % (
'off', 'tag', 'type', 'id', 'len', 'dump')
tag = 0xffffffff
off = 4
while True:
try:
data = file.read(4)
crc = binascii.crc32(data, crc)
ntag, = struct.unpack('>I', data)
except struct.error:
break
tag ^= ntag
off += 4
type = (tag & 0x7ff00000) >> 20
id = (tag & 0x000ffc00) >> 10
size = (tag & 0x000003ff) >> 0
iscrc = (type & 0x700) == 0x500
data = file.read(size if size != 0x3ff else 0)
if iscrc:
crc = binascii.crc32(data[:4], crc)
else:
crc = binascii.crc32(data, crc)
print '%04x: %08x %-15s %3s %4s %-23s %-8s' % (
off, tag,
typeof(type) + (' bad!' if iscrc and ~crc else ''),
hex(id)[2:] if id != 0x3ff else '.',
size if size != 0x3ff else 'x',
' '.join('%02x' % ord(c) for c in data[:8]),
''.join(c if c >= ' ' and c <= '~' else '.' for c in data[:8]))
off += size if size != 0x3ff else 0
if iscrc:
crc = 0
tag ^= (type & 1) << 31
return 0
if __name__ == "__main__":
import sys
sys.exit(main(*sys.argv[1:]))

View File

@@ -1,383 +0,0 @@
#!/usr/bin/env python3
import re
import sys
PATTERN = ['LFS_ASSERT', 'assert']
PREFIX = 'LFS'
MAXWIDTH = 16
ASSERT = "__{PREFIX}_ASSERT_{TYPE}_{COMP}"
FAIL = """
__attribute__((unused))
static void __{prefix}_assert_fail_{type}(
const char *file, int line, const char *comp,
{ctype} lh, size_t lsize,
{ctype} rh, size_t rsize) {{
printf("%s:%d:assert: assert failed with ", file, line);
__{prefix}_assert_print_{type}(lh, lsize);
printf(", expected %s ", comp);
__{prefix}_assert_print_{type}(rh, rsize);
printf("\\n");
fflush(NULL);
raise(SIGABRT);
}}
"""
COMP = {
'==': 'eq',
'!=': 'ne',
'<=': 'le',
'>=': 'ge',
'<': 'lt',
'>': 'gt',
}
TYPE = {
'int': {
'ctype': 'intmax_t',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
(void)size;
printf("%"PRIiMAX, v);
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
__typeof__(lh) _lh = lh;
__typeof__(lh) _rh = (__typeof__(lh))rh;
if (!(_lh {op} _rh)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
(intmax_t)_lh, 0, (intmax_t)_rh, 0);
}}
}} while (0)
"""
},
'bool': {
'ctype': 'bool',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
(void)size;
printf("%s", v ? "true" : "false");
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
bool _lh = !!(lh);
bool _rh = !!(rh);
if (!(_lh {op} _rh)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, 0, _rh, 0);
}}
}} while (0)
"""
},
'mem': {
'ctype': 'const void *',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
const uint8_t *s = v;
printf("\\\"");
for (size_t i = 0; i < size && i < {maxwidth}; i++) {{
if (s[i] >= ' ' && s[i] <= '~') {{
printf("%c", s[i]);
}} else {{
printf("\\\\x%02x", s[i]);
}}
}}
if (size > {maxwidth}) {{
printf("...");
}}
printf("\\\"");
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh, size)
do {{
const void *_lh = lh;
const void *_rh = rh;
if (!(memcmp(_lh, _rh, size) {op} 0)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, size, _rh, size);
}}
}} while (0)
"""
},
'str': {
'ctype': 'const char *',
'fail': FAIL,
'print': """
__attribute__((unused))
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
__{prefix}_assert_print_mem(v, size);
}}
""",
'assert': """
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
do {{
const char *_lh = lh;
const char *_rh = rh;
if (!(strcmp(_lh, _rh) {op} 0)) {{
__{prefix}_assert_fail_{type}(file, line, "{comp}",
_lh, strlen(_lh), _rh, strlen(_rh));
}}
}} while (0)
"""
}
}
def mkdecls(outf, maxwidth=16):
outf.write("#include <stdio.h>\n")
outf.write("#include <stdbool.h>\n")
outf.write("#include <stdint.h>\n")
outf.write("#include <inttypes.h>\n")
outf.write("#include <signal.h>\n")
for type, desc in sorted(TYPE.items()):
format = {
'type': type.lower(), 'TYPE': type.upper(),
'ctype': desc['ctype'],
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
'maxwidth': maxwidth,
}
outf.write(re.sub('\s+', ' ',
desc['print'].strip().format(**format))+'\n')
outf.write(re.sub('\s+', ' ',
desc['fail'].strip().format(**format))+'\n')
for op, comp in sorted(COMP.items()):
format.update({
'comp': comp.lower(), 'COMP': comp.upper(),
'op': op,
})
outf.write(re.sub('\s+', ' ',
desc['assert'].strip().format(**format))+'\n')
def mkassert(type, comp, lh, rh, size=None):
format = {
'type': type.lower(), 'TYPE': type.upper(),
'comp': comp.lower(), 'COMP': comp.upper(),
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
'lh': lh.strip(' '),
'rh': rh.strip(' '),
'size': size,
}
if size:
return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh}, {size})')
.format(**format))
else:
return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh})')
.format(**format))
# simple recursive descent parser
LEX = {
'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
'assert': PATTERN,
'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
'arrow': ['=>'],
'paren': ['\(', '\)'],
'op': ['strcmp', 'memcmp', '->'],
'comp': ['==', '!=', '<=', '>=', '<', '>'],
'logic': ['\&\&', '\|\|'],
'sep': [':', ';', '\{', '\}', ','],
}
class ParseFailure(Exception):
def __init__(self, expected, found):
self.expected = expected
self.found = found
def __str__(self):
return "expected %r, found %s..." % (
self.expected, repr(self.found)[:70])
class Parse:
def __init__(self, inf, lexemes):
p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
for n, l in lexemes.items())
p = re.compile(p, re.DOTALL)
data = inf.read()
tokens = []
while True:
m = p.search(data)
if m:
if m.start() > 0:
tokens.append((None, data[:m.start()]))
tokens.append((m.lastgroup, m.group()))
data = data[m.end():]
else:
tokens.append((None, data))
break
self.tokens = tokens
self.off = 0
def lookahead(self, *pattern):
if self.off < len(self.tokens):
token = self.tokens[self.off]
if token[0] in pattern or token[1] in pattern:
self.m = token[1]
return self.m
self.m = None
return self.m
def accept(self, *patterns):
m = self.lookahead(*patterns)
if m is not None:
self.off += 1
return m
def expect(self, *patterns):
m = self.accept(*patterns)
if not m:
raise ParseFailure(patterns, self.tokens[self.off:])
return m
def push(self):
return self.off
def pop(self, state):
self.off = state
def passert(p):
def pastr(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
p.expect(')')
return mkassert('str', COMP[comp], lh, rh)
def pamem(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
p.expect('memcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(',') ; p.accept('ws')
size = pexpr(p) ; p.accept('ws')
p.expect(')') ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
p.expect('0') ; p.accept('ws')
p.expect(')')
return mkassert('mem', COMP[comp], lh, rh, size)
def paint(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexpr(p) ; p.accept('ws')
comp = p.expect('comp') ; p.accept('ws')
rh = pexpr(p) ; p.accept('ws')
p.expect(')')
return mkassert('int', COMP[comp], lh, rh)
def pabool(p):
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
lh = pexprs(p) ; p.accept('ws')
p.expect(')')
return mkassert('bool', 'eq', lh, 'true')
def pa(p):
return p.expect('assert')
state = p.push()
lastf = None
for pa in [pastr, pamem, paint, pabool, pa]:
try:
return pa(p)
except ParseFailure as f:
p.pop(state)
lastf = f
else:
raise lastf
def pexpr(p):
res = []
while True:
if p.accept('('):
res.append(p.m)
while True:
res.append(pexprs(p))
if p.accept('sep'):
res.append(p.m)
else:
break
res.append(p.expect(')'))
elif p.lookahead('assert'):
res.append(passert(p))
elif p.accept('assert', 'ws', 'string', 'op', None):
res.append(p.m)
else:
return ''.join(res)
def pexprs(p):
res = []
while True:
res.append(pexpr(p))
if p.accept('comp', 'logic', ','):
res.append(p.m)
else:
return ''.join(res)
def pstmt(p):
ws = p.accept('ws') or ''
lh = pexprs(p)
if p.accept('=>'):
rh = pexprs(p)
return ws + mkassert('int', 'eq', lh, rh)
else:
return ws + lh
def main(args):
inf = open(args.input, 'r') if args.input else sys.stdin
outf = open(args.output, 'w') if args.output else sys.stdout
lexemes = LEX.copy()
if args.pattern:
lexemes['assert'] = args.pattern
p = Parse(inf, lexemes)
# write extra verbose asserts
mkdecls(outf, maxwidth=args.maxwidth)
if args.input:
outf.write("#line %d \"%s\"\n" % (1, args.input))
# parse and write out stmt at a time
try:
while True:
outf.write(pstmt(p))
if p.accept('sep'):
outf.write(p.m)
else:
break
except ParseFailure as f:
pass
for i in range(p.off, len(p.tokens)):
outf.write(p.tokens[i][1])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Cpp step that increases assert verbosity")
parser.add_argument('input', nargs='?',
help="Input C file after cpp.")
parser.add_argument('-o', '--output', required=True,
help="Output C file.")
parser.add_argument('-p', '--pattern', action='append',
help="Patterns to search for starting an assert statement.")
parser.add_argument('--maxwidth', default=MAXWIDTH, type=int,
help="Maximum number of characters to display for strcmp and memcmp.")
main(parser.parse_args())

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env python3
import subprocess as sp
def main(args):
with open(args.disk, 'rb') as f:
f.seek(args.block * args.block_size)
block = (f.read(args.block_size)
.ljust(args.block_size, b'\xff'))
# what did you expect?
print("%-8s %-s" % ('off', 'data'))
return sp.run(['xxd', '-g1', '-'], input=block).returncode
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Hex dump a specific block in a disk.")
parser.add_argument('disk',
help="File representing the block device.")
parser.add_argument('block_size', type=lambda x: int(x, 0),
help="Size of a block in bytes.")
parser.add_argument('block', type=lambda x: int(x, 0),
help="Address of block to dump.")
sys.exit(main(parser.parse_args()))

View File

@@ -1,367 +0,0 @@
#!/usr/bin/env python3
import struct
import binascii
import sys
import itertools as it
TAG_TYPES = {
'splice': (0x700, 0x400),
'create': (0x7ff, 0x401),
'delete': (0x7ff, 0x4ff),
'name': (0x700, 0x000),
'reg': (0x7ff, 0x001),
'dir': (0x7ff, 0x002),
'superblock': (0x7ff, 0x0ff),
'struct': (0x700, 0x200),
'dirstruct': (0x7ff, 0x200),
'ctzstruct': (0x7ff, 0x202),
'inlinestruct': (0x7ff, 0x201),
'userattr': (0x700, 0x300),
'tail': (0x700, 0x600),
'softtail': (0x7ff, 0x600),
'hardtail': (0x7ff, 0x601),
'gstate': (0x700, 0x700),
'movestate': (0x7ff, 0x7ff),
'crc': (0x700, 0x500),
}
class Tag:
def __init__(self, *args):
if len(args) == 1:
self.tag = args[0]
elif len(args) == 3:
if isinstance(args[0], str):
type = TAG_TYPES[args[0]][1]
else:
type = args[0]
if isinstance(args[1], str):
id = int(args[1], 0) if args[1] not in 'x.' else 0x3ff
else:
id = args[1]
if isinstance(args[2], str):
size = int(args[2], str) if args[2] not in 'x.' else 0x3ff
else:
size = args[2]
self.tag = (type << 20) | (id << 10) | size
else:
assert False
@property
def isvalid(self):
return not bool(self.tag & 0x80000000)
@property
def isattr(self):
return not bool(self.tag & 0x40000000)
@property
def iscompactable(self):
return bool(self.tag & 0x20000000)
@property
def isunique(self):
return not bool(self.tag & 0x10000000)
@property
def type(self):
return (self.tag & 0x7ff00000) >> 20
@property
def type1(self):
return (self.tag & 0x70000000) >> 20
@property
def type3(self):
return (self.tag & 0x7ff00000) >> 20
@property
def id(self):
return (self.tag & 0x000ffc00) >> 10
@property
def size(self):
return (self.tag & 0x000003ff) >> 0
@property
def dsize(self):
return 4 + (self.size if self.size != 0x3ff else 0)
@property
def chunk(self):
return self.type & 0xff
@property
def schunk(self):
return struct.unpack('b', struct.pack('B', self.chunk))[0]
def is_(self, type):
return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
def mkmask(self):
return Tag(
0x700 if self.isunique else 0x7ff,
0x3ff if self.isattr else 0,
0)
def chid(self, nid):
ntag = Tag(self.type, nid, self.size)
if hasattr(self, 'off'): ntag.off = self.off
if hasattr(self, 'data'): ntag.data = self.data
if hasattr(self, 'crc'): ntag.crc = self.crc
return ntag
def typerepr(self):
if self.is_('crc') and getattr(self, 'crc', 0xffffffff) != 0xffffffff:
return 'crc (bad)'
reverse_types = {v: k for k, v in TAG_TYPES.items()}
for prefix in range(12):
mask = 0x7ff & ~((1 << prefix)-1)
if (mask, self.type & mask) in reverse_types:
type = reverse_types[mask, self.type & mask]
if prefix > 0:
return '%s %#0*x' % (
type, prefix//4, self.type & ((1 << prefix)-1))
else:
return type
else:
return '%02x' % self.type
def idrepr(self):
return repr(self.id) if self.id != 0x3ff else '.'
def sizerepr(self):
return repr(self.size) if self.size != 0x3ff else 'x'
def __repr__(self):
return 'Tag(%r, %d, %d)' % (self.typerepr(), self.id, self.size)
def __lt__(self, other):
return (self.id, self.type) < (other.id, other.type)
def __bool__(self):
return self.isvalid
def __int__(self):
return self.tag
def __index__(self):
return self.tag
class MetadataPair:
def __init__(self, blocks):
if len(blocks) > 1:
self.pair = [MetadataPair([block]) for block in blocks]
self.pair = sorted(self.pair, reverse=True)
self.data = self.pair[0].data
self.rev = self.pair[0].rev
self.tags = self.pair[0].tags
self.ids = self.pair[0].ids
self.log = self.pair[0].log
self.all_ = self.pair[0].all_
return
self.pair = [self]
self.data = blocks[0]
block = self.data
self.rev, = struct.unpack('<I', block[0:4])
crc = binascii.crc32(block[0:4])
# parse tags
corrupt = False
tag = Tag(0xffffffff)
off = 4
self.log = []
self.all_ = []
while len(block) - off >= 4:
ntag, = struct.unpack('>I', block[off:off+4])
tag = Tag(int(tag) ^ ntag)
tag.off = off + 4
tag.data = block[off+4:off+tag.dsize]
if tag.is_('crc'):
crc = binascii.crc32(block[off:off+4+4], crc)
else:
crc = binascii.crc32(block[off:off+tag.dsize], crc)
tag.crc = crc
off += tag.dsize
self.all_.append(tag)
if tag.is_('crc'):
# is valid commit?
if crc != 0xffffffff:
corrupt = True
if not corrupt:
self.log = self.all_.copy()
# reset tag parsing
crc = 0
tag = Tag(int(tag) ^ ((tag.type & 1) << 31))
# find active ids
self.ids = list(it.takewhile(
lambda id: Tag('name', id, 0) in self,
it.count()))
# find most recent tags
self.tags = []
for tag in self.log:
if tag.is_('crc') or tag.is_('splice'):
continue
elif tag.id == 0x3ff:
if tag in self and self[tag] is tag:
self.tags.append(tag)
else:
# id could have change, I know this is messy and slow
# but it works
for id in self.ids:
ntag = tag.chid(id)
if ntag in self and self[ntag] is tag:
self.tags.append(ntag)
self.tags = sorted(self.tags)
def __bool__(self):
return bool(self.log)
def __lt__(self, other):
# corrupt blocks don't count
if not self or not other:
return bool(other)
# use sequence arithmetic to avoid overflow
return not ((other.rev - self.rev) & 0x80000000)
def __contains__(self, args):
try:
self[args]
return True
except KeyError:
return False
def __getitem__(self, args):
if isinstance(args, tuple):
gmask, gtag = args
else:
gmask, gtag = args.mkmask(), args
gdiff = 0
for tag in reversed(self.log):
if (gmask.id != 0 and tag.is_('splice') and
tag.id <= gtag.id - gdiff):
if tag.is_('create') and tag.id == gtag.id - gdiff:
# creation point
break
gdiff += tag.schunk
if ((int(gmask) & int(tag)) ==
(int(gmask) & int(gtag.chid(gtag.id - gdiff)))):
if tag.size == 0x3ff:
# deleted
break
return tag
raise KeyError(gmask, gtag)
def _dump_tags(self, tags, f=sys.stdout, truncate=True):
f.write("%-8s %-8s %-13s %4s %4s" % (
'off', 'tag', 'type', 'id', 'len'))
if truncate:
f.write(' data (truncated)')
f.write('\n')
for tag in tags:
f.write("%08x: %08x %-13s %4s %4s" % (
tag.off, tag,
tag.typerepr(), tag.idrepr(), tag.sizerepr()))
if truncate:
f.write(" %-23s %-8s\n" % (
' '.join('%02x' % c for c in tag.data[:8]),
''.join(c if c >= ' ' and c <= '~' else '.'
for c in map(chr, tag.data[:8]))))
else:
f.write("\n")
for i in range(0, len(tag.data), 16):
f.write(" %08x: %-47s %-16s\n" % (
tag.off+i,
' '.join('%02x' % c for c in tag.data[i:i+16]),
''.join(c if c >= ' ' and c <= '~' else '.'
for c in map(chr, tag.data[i:i+16]))))
def dump_tags(self, f=sys.stdout, truncate=True):
self._dump_tags(self.tags, f=f, truncate=truncate)
def dump_log(self, f=sys.stdout, truncate=True):
self._dump_tags(self.log, f=f, truncate=truncate)
def dump_all(self, f=sys.stdout, truncate=True):
self._dump_tags(self.all_, f=f, truncate=truncate)
def main(args):
blocks = []
with open(args.disk, 'rb') as f:
for block in [args.block1, args.block2]:
if block is None:
continue
f.seek(block * args.block_size)
blocks.append(f.read(args.block_size)
.ljust(args.block_size, b'\xff'))
# find most recent pair
mdir = MetadataPair(blocks)
try:
mdir.tail = mdir[Tag('tail', 0, 0)]
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
mdir.tail = None
except KeyError:
mdir.tail = None
print("mdir {%s} rev %d%s%s%s" % (
', '.join('%#x' % b
for b in [args.block1, args.block2]
if b is not None),
mdir.rev,
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
if len(mdir.pair) > 1 else '',
' (corrupted!)' if not mdir else '',
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
if mdir.tail else ''))
if args.all:
mdir.dump_all(truncate=not args.no_truncate)
elif args.log:
mdir.dump_log(truncate=not args.no_truncate)
else:
mdir.dump_tags(truncate=not args.no_truncate)
return 0 if mdir else 1
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Dump useful info about metadata pairs in littlefs.")
parser.add_argument('disk',
help="File representing the block device.")
parser.add_argument('block_size', type=lambda x: int(x, 0),
help="Size of a block in bytes.")
parser.add_argument('block1', type=lambda x: int(x, 0),
help="First block address for finding the metadata pair.")
parser.add_argument('block2', nargs='?', type=lambda x: int(x, 0),
help="Second block address for finding the metadata pair.")
parser.add_argument('-l', '--log', action='store_true',
help="Show tags in log.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all tags in log, included tags in corrupted commits.")
parser.add_argument('-T', '--no-truncate', action='store_true',
help="Don't truncate large amounts of data.")
sys.exit(main(parser.parse_args()))

View File

@@ -1,183 +0,0 @@
#!/usr/bin/env python3
import struct
import sys
import json
import io
import itertools as it
from readmdir import Tag, MetadataPair
def main(args):
superblock = None
gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0'
dirs = []
mdirs = []
corrupted = []
cycle = False
with open(args.disk, 'rb') as f:
tail = (args.block1, args.block2)
hard = False
while True:
for m in it.chain((m for d in dirs for m in d), mdirs):
if set(m.blocks) == set(tail):
# cycle detected
cycle = m.blocks
if cycle:
break
# load mdir
data = []
blocks = {}
for block in tail:
f.seek(block * args.block_size)
data.append(f.read(args.block_size)
.ljust(args.block_size, b'\xff'))
blocks[id(data[-1])] = block
mdir = MetadataPair(data)
mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
# fetch some key metadata as a we scan
try:
mdir.tail = mdir[Tag('tail', 0, 0)]
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
mdir.tail = None
except KeyError:
mdir.tail = None
# have superblock?
try:
nsuperblock = mdir[
Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
except KeyError:
pass
# have gstate?
try:
ngstate = mdir[Tag('movestate', 0, 0)]
gstate = bytes((a or 0) ^ (b or 0)
for a,b in it.zip_longest(gstate, ngstate.data))
except KeyError:
pass
# corrupted?
if not mdir:
corrupted.append(mdir)
# add to directories
mdirs.append(mdir)
if mdir.tail is None or not mdir.tail.is_('hardtail'):
dirs.append(mdirs)
mdirs = []
if mdir.tail is None:
break
tail = struct.unpack('<II', mdir.tail.data)
hard = mdir.tail.is_('hardtail')
# find paths
dirtable = {}
for dir in dirs:
dirtable[frozenset(dir[0].blocks)] = dir
pending = [("/", dirs[0])]
while pending:
path, dir = pending.pop(0)
for mdir in dir:
for tag in mdir.tags:
if tag.is_('dir'):
try:
npath = tag.data.decode('utf8')
dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
nblocks = struct.unpack('<II', dirstruct.data)
nmdir = dirtable[frozenset(nblocks)]
pending.append(((path + '/' + npath), nmdir))
except KeyError:
pass
dir[0].path = path.replace('//', '/')
# print littlefs + version info
version = ('?', '?')
if superblock:
version = tuple(reversed(
struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff'))))
print("%-47s%s" % ("littlefs v%s.%s" % version,
"data (truncated, if it fits)"
if not any([args.no_truncate, args.log, args.all]) else ""))
# print gstate
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
if tag.size or not tag.isvalid:
print(" orphans >=%d" % max(tag.size, 1))
if tag.type:
print(" move dir {%#x, %#x} id %d" % (
blocks[0], blocks[1], tag.id))
# print mdir info
for i, dir in enumerate(dirs):
print("dir %s" % (json.dumps(dir[0].path)
if hasattr(dir[0], 'path') else '(orphan)'))
for j, mdir in enumerate(dir):
print("mdir {%#x, %#x} rev %d (was %d)%s%s" % (
mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev,
' (corrupted!)' if not mdir else '',
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
if mdir.tail else ''))
f = io.StringIO()
if args.log:
mdir.dump_log(f, truncate=not args.no_truncate)
elif args.all:
mdir.dump_all(f, truncate=not args.no_truncate)
else:
mdir.dump_tags(f, truncate=not args.no_truncate)
lines = list(filter(None, f.getvalue().split('\n')))
for k, line in enumerate(lines):
print("%s %s" % (
' ' if j == len(dir)-1 else
'v' if k == len(lines)-1 else
'|',
line))
errcode = 0
for mdir in corrupted:
errcode = errcode or 1
print("*** corrupted mdir {%#x, %#x}! ***" % (
mdir.blocks[0], mdir.blocks[1]))
if cycle:
errcode = errcode or 2
print("*** cycle detected {%#x, %#x}! ***" % (
cycle[0], cycle[1]))
return errcode
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Dump semantic info about the metadata tree in littlefs")
parser.add_argument('disk',
help="File representing the block device.")
parser.add_argument('block_size', type=lambda x: int(x, 0),
help="Size of a block in bytes.")
parser.add_argument('block1', nargs='?', default=0,
type=lambda x: int(x, 0),
help="Optional first block address for finding the superblock.")
parser.add_argument('block2', nargs='?', default=1,
type=lambda x: int(x, 0),
help="Optional second block address for finding the superblock.")
parser.add_argument('-l', '--log', action='store_true',
help="Show tags in log.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all tags in log, included tags in corrupted commits.")
parser.add_argument('-T', '--no-truncate', action='store_true',
help="Show the full contents of files/attrs/tags.")
sys.exit(main(parser.parse_args()))

28
scripts/results.py Executable file
View File

@@ -0,0 +1,28 @@
#!/usr/bin/env python2
import struct
import sys
import time
import os
import re
def main():
with open('blocks/.config') as file:
read_size, prog_size, block_size, block_count = (
struct.unpack('<LLLL', file.read()))
real_size = sum(
os.path.getsize(os.path.join('blocks', f))
for f in os.listdir('blocks') if re.match('\d+', f))
with open('blocks/.stats') as file:
read_count, prog_count, erase_count = (
struct.unpack('<QQQ', file.read()))
runtime = time.time() - os.stat('blocks').st_ctime
print 'results: %dB %dB %dB %.3fs' % (
read_count, prog_count, erase_count, runtime)
if __name__ == "__main__":
main(*sys.argv[1:])

View File

@@ -1,430 +0,0 @@
#!/usr/bin/env python3
#
# Script to find stack usage at the function level. Will detect recursion and
# report as infinite stack usage.
#
import os
import glob
import itertools as it
import re
import csv
import collections as co
import math as m
CI_PATHS = ['*.ci']
def collect(paths, **args):
# parse the vcg format
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
def parse_vcg(rest):
def parse_vcg(rest):
node = []
while True:
rest = rest.lstrip()
m = k_pattern.match(rest)
if not m:
return (node, rest)
k, rest = m.group(1), rest[m.end(0):]
rest = rest.lstrip()
if rest.startswith('{'):
v, rest = parse_vcg(rest[1:])
assert rest[0] == '}', "unexpected %r" % rest[0:1]
rest = rest[1:]
node.append((k, v))
else:
m = v_pattern.match(rest)
assert m, "unexpected %r" % rest[0:1]
v, rest = m.group(1) or m.group(2), rest[m.end(0):]
node.append((k, v))
node, rest = parse_vcg(rest)
assert rest == '', "unexpected %r" % rest[0:1]
return node
# collect into functions
results = co.defaultdict(lambda: (None, None, 0, set()))
f_pattern = re.compile(
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
for path in paths:
with open(path) as f:
vcg = parse_vcg(f.read())
for k, graph in vcg:
if k != 'graph':
continue
for k, info in graph:
if k == 'node':
info = dict(info)
m = f_pattern.match(info['label'])
if m:
function, file, size, type = m.groups()
if not args.get('quiet') and type != 'static':
print('warning: found non-static stack for %s (%s)'
% (function, type))
_, _, _, targets = results[info['title']]
results[info['title']] = (
file, function, int(size), targets)
elif k == 'edge':
info = dict(info)
_, _, _, targets = results[info['sourcename']]
targets.add(info['targetname'])
else:
continue
if not args.get('everything'):
for source, (s_file, s_function, _, _) in list(results.items()):
# discard internal functions
if s_file.startswith('<') or s_file.startswith('/usr/include'):
del results[source]
# find maximum stack size recursively, this requires also detecting cycles
# (in case of recursion)
def find_limit(source, seen=None):
seen = seen or set()
if source not in results:
return 0
_, _, frame, targets = results[source]
limit = 0
for target in targets:
if target in seen:
# found a cycle
return float('inf')
limit_ = find_limit(target, seen | {target})
limit = max(limit, limit_)
return frame + limit
def find_deps(targets):
deps = set()
for target in targets:
if target in results:
t_file, t_function, _, _ = results[target]
deps.add((t_file, t_function))
return deps
# flatten into a list
flat_results = []
for source, (s_file, s_function, frame, targets) in results.items():
limit = find_limit(source)
deps = find_deps(targets)
flat_results.append((s_file, s_function, frame, limit, deps))
return flat_results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find sizes
if not args.get('use', None):
# find .ci files
paths = []
for path in args['ci_paths']:
if os.path.isdir(path):
path = path + '/*.ci'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .ci files found in %r?' % args['ci_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['stack_frame']),
float(result['stack_limit']), # note limit can be inf
set())
for result in r
if result.get('stack_frame') not in {None, ''}
if result.get('stack_limit') not in {None, ''}]
total_frame = 0
total_limit = 0
for _, _, frame, limit, _ in results:
total_frame += frame
total_limit = max(total_limit, limit)
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['stack_frame']),
float(result['stack_limit']),
set())
for result in r
if result.get('stack_frame') not in {None, ''}
if result.get('stack_limit') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total_frame = 0
prev_total_limit = 0
for _, _, frame, limit, _ in prev_results:
prev_total_frame += frame
prev_total_limit = max(prev_total_limit, limit)
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('stack_frame', None)
result.pop('stack_limit', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, frame, limit, _ in results:
merged_results[(file, func)]['stack_frame'] = frame
merged_results[(file, func)]['stack_limit'] = limit
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: (0, 0, set()))
for file, func, frame, limit, deps in results:
entry = (file if by == 'file' else func)
entry_frame, entry_limit, entry_deps = entries[entry]
entries[entry] = (
entry_frame + frame,
max(entry_limit, limit),
entry_deps | {file if by == 'file' else func
for file, func in deps})
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
for name, (new_frame, new_limit, deps) in news.items():
diff[name] = (
None, None,
new_frame, new_limit,
new_frame, new_limit,
1.0,
deps)
for name, (old_frame, old_limit, _) in olds.items():
_, _, new_frame, new_limit, _, _, _, deps = diff[name]
diff[name] = (
old_frame, old_limit,
new_frame, new_limit,
(new_frame or 0) - (old_frame or 0),
0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
else (new_limit or 0) - (old_limit or 0),
0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
else +float('inf') if m.isinf(new_limit or 0)
else -float('inf') if m.isinf(old_limit or 0)
else +0.0 if not old_limit and not new_limit
else +1.0 if not old_limit
else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
deps)
return diff
def sorted_entries(entries):
if args.get('limit_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_limit_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
elif args.get('frame_sort'):
return sorted(entries, key=lambda x: (-x[1][0], x))
elif args.get('reverse_frame_sort'):
return sorted(entries, key=lambda x: (+x[1][0], x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('limit_sort'):
return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
elif args.get('reverse_limit_sort'):
return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
elif args.get('frame_sort'):
return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
elif args.get('reverse_frame_sort'):
return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
else:
return sorted(entries, key=lambda x: (-x[1][6], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s %7s' % (by, 'frame', 'limit'))
else:
print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
def print_entry(name, frame, limit):
print("%-36s %7d %7s" % (name,
frame, '' if m.isinf(limit) else int(limit)))
def print_diff_entry(name,
old_frame, old_limit,
new_frame, new_limit,
diff_frame, diff_limit,
ratio):
print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
old_frame if old_frame is not None else "-",
('' if m.isinf(old_limit) else int(old_limit))
if old_limit is not None else "-",
new_frame if new_frame is not None else "-",
('' if m.isinf(new_limit) else int(new_limit))
if new_limit is not None else "-",
diff_frame,
('+∞' if diff_limit > 0 and m.isinf(diff_limit)
else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
else '%+d' % diff_limit),
'' if not ratio
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
else ' (%+.1f%%)' % (100*ratio)))
def print_entries(by='name'):
# build optional tree of dependencies
def print_deps(entries, depth, print,
filter=lambda _: True,
prefixes=('', '', '', '')):
entries = entries if isinstance(entries, list) else list(entries)
filtered_entries = [(name, entry)
for name, entry in entries
if filter(name)]
for i, (name, entry) in enumerate(filtered_entries):
last = (i == len(filtered_entries)-1)
print(prefixes[0+last] + name, entry)
if depth > 0:
deps = entry[-1]
print_deps(entries, depth-1, print,
lambda name: name in deps,
( prefixes[2+last] + "|-> ",
prefixes[2+last] + "'-> ",
prefixes[2+last] + "| ",
prefixes[2+last] + " "))
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
print_deps(
sorted_entries(entries.items()),
args.get('depth') or 0,
lambda name, entry: print_entry(name, *entry[:-1]))
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
print_deps(
filter(
lambda x: x[1][6] or args.get('all'),
sorted_diff_entries(diff.items())),
args.get('depth') or 0,
lambda name, entry: print_diff_entry(name, *entry[:-1]))
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total_frame, total_limit)
else:
diff_frame = total_frame - prev_total_frame
diff_limit = (
0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
else (total_limit or 0) - (prev_total_limit or 0))
ratio = (
0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
else +float('inf') if m.isinf(total_limit or 0)
else -float('inf') if m.isinf(prev_total_limit or 0)
else 0.0 if not prev_total_limit and not total_limit
else 1.0 if not prev_total_limit
else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
print_diff_entry('TOTAL',
prev_total_frame, prev_total_limit,
total_frame, total_limit,
diff_frame, diff_limit,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find stack usage at the function level.")
parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
help="Description of where to find *.ci files. May be a directory \
or a list of paths. Defaults to %r." % CI_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't parse callgraph files, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--limit-sort', action='store_true',
help="Sort by stack limit.")
parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
help="Sort by stack limit, but backwards.")
parser.add_argument('--frame-sort', action='store_true',
help="Sort by stack frame size.")
parser.add_argument('--reverse-frame-sort', action='store_true',
help="Sort by stack frame size, but backwards.")
parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
nargs='?', const=float('inf'),
help="Depth of dependencies to show.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level calls.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total stack size.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

View File

@@ -1,331 +0,0 @@
#!/usr/bin/env python3
#
# Script to find struct sizes.
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
OBJ_PATHS = ['*.o']
def collect(paths, **args):
decl_pattern = re.compile(
'^\s+(?P<no>[0-9]+)'
'\s+(?P<dir>[0-9]+)'
'\s+.*'
'\s+(?P<file>[^\s]+)$')
struct_pattern = re.compile(
'^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
results = co.defaultdict(lambda: 0)
for path in paths:
# find decl, we want to filter by structs in .h files
decls = {}
# note objdump-tool may contain extra args
cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
for line in proc.stdout:
# find file numbers
m = decl_pattern.match(line)
if m:
decls[int(m.group('no'))] = m.group('file')
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
# collect structs as we parse dwarf info
found = False
name = None
decl = None
size = None
# note objdump-tool may contain extra args
cmd = args['objdump_tool'] + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
for line in proc.stdout:
# state machine here to find structs
m = struct_pattern.match(line)
if m:
if m.group('tag'):
if (name is not None
and decl is not None
and size is not None):
decl = decls.get(decl, '?')
results[(decl, name)] = size
found = (m.group('tag') == 'structure_type')
name = None
decl = None
size = None
elif found and m.group('name'):
name = m.group('name')
elif found and name and m.group('decl'):
decl = int(m.group('decl'))
elif found and name and m.group('size'):
size = int(m.group('size'))
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, struct), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# only include structs declared in header files in the current
# directory, ignore internal-only # structs (these are represented
# in other measurements)
if not args.get('everything'):
if not file.endswith('.h'):
continue
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
flat_results.append((file, struct, size))
return flat_results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['struct_size']))
for result in r
if result.get('struct_size') not in {None, ''}]
total = 0
for _, _, size in results:
total += size
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['struct_size']))
for result in r
if result.get('struct_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
struct = result.pop('name', '')
result.pop('struct_size', None)
merged_results[(file, struct)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, struct, size in results:
merged_results[(file, struct)]['struct_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
w.writeheader()
for (file, struct), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': struct, **result})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: 0)
for file, struct, size in results:
entry = (file if by == 'file' else struct)
entries[entry] += size
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted_entries(entries.items()):
print_entry(name, size)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
ratio = (0.0 if not prev_total and not total
else 1.0 if not prev_total
else (total-prev_total)/prev_total)
print_diff_entry('TOTAL',
prev_total, total,
total-prev_total,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find struct sizes.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find struct sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff struct size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level struct sizes.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total struct size.")
parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
help="Path to the objdump tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

View File

@@ -1,279 +0,0 @@
#!/usr/bin/env python3
#
# Script to summarize the outputs of other scripts. Operates on CSV files.
#
import functools as ft
import collections as co
import os
import csv
import re
import math as m
# displayable fields
Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
FIELDS = [
# name, parse, accumulate, fmt, print, null
Field('code',
lambda r: int(r['code_size']),
sum,
lambda r: r,
'%7s',
lambda r: r,
'-',
lambda old, new: (new-old)/old),
Field('data',
lambda r: int(r['data_size']),
sum,
lambda r: r,
'%7s',
lambda r: r,
'-',
lambda old, new: (new-old)/old),
Field('stack',
lambda r: float(r['stack_limit']),
max,
lambda r: r,
'%7s',
lambda r: '' if m.isinf(r) else int(r),
'-',
lambda old, new: (new-old)/old),
Field('structs',
lambda r: int(r['struct_size']),
sum,
lambda r: r,
'%8s',
lambda r: r,
'-',
lambda old, new: (new-old)/old),
Field('coverage',
lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
lambda r: r[0]/r[1],
'%19s',
lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
'%11s %7s' % ('-', '-'),
lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
]
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find results
results = co.defaultdict(lambda: {})
for path in args.get('csv_paths', '-'):
try:
with openio(path) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
name = result.pop('name', '')
prev = results[(file, name)]
for field in FIELDS:
try:
r = field.parse(result)
if field.name in prev:
results[(file, name)][field.name] = field.acc(
[prev[field.name], r])
else:
results[(file, name)][field.name] = r
except (KeyError, ValueError):
pass
except FileNotFoundError:
pass
# find fields
if args.get('all_fields'):
fields = FIELDS
elif args.get('fields') is not None:
fields_dict = {field.name: field for field in FIELDS}
fields = [fields_dict[f] for f in args['fields']]
else:
fields = []
for field in FIELDS:
if any(field.name in result for result in results.values()):
fields.append(field)
# find total for every field
total = {}
for result in results.values():
for field in fields:
if field.name in result and field.name in total:
total[field.name] = field.acc(
[total[field.name], result[field.name]])
elif field.name in result:
total[field.name] = result[field.name]
# find previous results?
if args.get('diff'):
prev_results = co.defaultdict(lambda: {})
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
name = result.pop('name', '')
prev = prev_results[(file, name)]
for field in FIELDS:
try:
r = field.parse(result)
if field.name in prev:
prev_results[(file, name)][field.name] = field.acc(
[prev[field.name], r])
else:
prev_results[(file, name)][field.name] = r
except (KeyError, ValueError):
pass
except FileNotFoundError:
pass
prev_total = {}
for result in prev_results.values():
for field in fields:
if field.name in result and field.name in prev_total:
prev_total[field.name] = field.acc(
[prev_total[field.name], result[field.name]])
elif field.name in result:
prev_total[field.name] = result[field.name]
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: {})
for (file, func), result in results.items():
entry = (file if by == 'file' else func)
prev = entries[entry]
for field in fields:
if field.name in result and field.name in prev:
entries[entry][field.name] = field.acc(
[prev[field.name], result[field.name]])
elif field.name in result:
entries[entry][field.name] = result[field.name]
return entries
def sorted_entries(entries):
if args.get('sort') is not None:
field = {field.name: field for field in FIELDS}[args['sort']]
return sorted(entries, key=lambda x: (
-(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
elif args.get('reverse_sort') is not None:
field = {field.name: field for field in FIELDS}[args['reverse_sort']]
return sorted(entries, key=lambda x: (
+(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
else:
return sorted(entries)
def print_header(by=''):
if not args.get('diff'):
print('%-36s' % by, end='')
for field in fields:
print((' '+field.fmt) % field.name, end='')
print()
else:
print('%-36s' % by, end='')
for field in fields:
print((' '+field.fmt) % field.name, end='')
print(' %-9s' % '', end='')
print()
def print_entry(name, result):
print('%-36s' % name, end='')
for field in fields:
r = result.get(field.name)
if r is not None:
print((' '+field.fmt) % field.repr(r), end='')
else:
print((' '+field.fmt) % '-', end='')
print()
def print_diff_entry(name, old, new):
print('%-36s' % name, end='')
for field in fields:
n = new.get(field.name)
if n is not None:
print((' '+field.fmt) % field.repr(n), end='')
else:
print((' '+field.fmt) % '-', end='')
o = old.get(field.name)
ratio = (
0.0 if m.isinf(o or 0) and m.isinf(n or 0)
else +float('inf') if m.isinf(n or 0)
else -float('inf') if m.isinf(o or 0)
else 0.0 if not o and not n
else +1.0 if not o
else -1.0 if not n
else field.ratio(o, n))
print(' %-9s' % (
'' if not ratio
else '(+∞%)' if ratio > 0 and m.isinf(ratio)
else '(-∞%)' if ratio < 0 and m.isinf(ratio)
else '(%+.1f%%)' % (100*ratio)), end='')
print()
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, result in sorted_entries(entries.items()):
print_entry(name, result)
else:
prev_entries = dedup_entries(prev_results, by=by)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for name in entries if name not in prev_entries),
sum(1 for name in prev_entries if name not in entries)))
for name, result in sorted_entries(entries.items()):
if args.get('all') or result != prev_entries.get(name, {}):
print_diff_entry(name, prev_entries.get(name, {}), result)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
print_diff_entry('TOTAL', prev_total, total)
if args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Summarize measurements")
parser.add_argument('csv_paths', nargs='*', default='-',
help="Description of where to find *.csv files. May be a directory \
or list of paths. *.csv files will be merged to show the total \
coverage.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff against.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all objects, not just the ones that changed.")
parser.add_argument('-e', '--all-fields', action='store_true',
help="Show all fields, even those with no results.")
parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
help="Comma separated list of fields to print, by default all fields \
that are found in the CSV files are printed.")
parser.add_argument('-s', '--sort',
help="Sort by this field.")
parser.add_argument('-S', '--reverse-sort',
help="Sort by this field, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level calls.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the totals.")
sys.exit(main(**vars(parser.parse_args())))

96
scripts/template.fmt Normal file
View File

@@ -0,0 +1,96 @@
/// AUTOGENERATED TEST ///
#include "lfs.h"
#include "emubd/lfs_emubd.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
// test stuff
static void test_assert(const char *file, unsigned line,
const char *s, uintmax_t v, uintmax_t e) {{
if (v != e) {{
fprintf(stderr, "\033[97m%s:%u: \033[91m"
"assert failed with %jd, expected %jd\033[0m\n"
" %s\n\n", file, line, v, e, s);
exit(-2);
}}
}}
#define test_assert(v, e) \
test_assert(__FILE__, __LINE__, #v " => " #e, v, e)
// implicit variable for asserts
uintmax_t test;
// utility functions for traversals
static int __attribute__((used)) test_count(void *p, lfs_block_t b) {{
(void)b;
unsigned *u = (unsigned*)p;
*u += 1;
return 0;
}}
// lfs declarations
lfs_t lfs;
lfs_emubd_t bd;
// other declarations for convenience
lfs_file_t file;
lfs_dir_t dir;
struct lfs_info info;
uint8_t buffer[1024];
char path[1024];
// test configuration options
#ifndef LFS_READ_SIZE
#define LFS_READ_SIZE 16
#endif
#ifndef LFS_PROG_SIZE
#define LFS_PROG_SIZE LFS_READ_SIZE
#endif
#ifndef LFS_BLOCK_SIZE
#define LFS_BLOCK_SIZE 512
#endif
#ifndef LFS_BLOCK_COUNT
#define LFS_BLOCK_COUNT 1024
#endif
#ifndef LFS_BLOCK_CYCLES
#define LFS_BLOCK_CYCLES 1024
#endif
#ifndef LFS_CACHE_SIZE
#define LFS_CACHE_SIZE (64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)
#endif
#ifndef LFS_LOOKAHEAD_SIZE
#define LFS_LOOKAHEAD_SIZE 16
#endif
const struct lfs_config cfg = {{
.context = &bd,
.read = &lfs_emubd_read,
.prog = &lfs_emubd_prog,
.erase = &lfs_emubd_erase,
.sync = &lfs_emubd_sync,
.read_size = LFS_READ_SIZE,
.prog_size = LFS_PROG_SIZE,
.block_size = LFS_BLOCK_SIZE,
.block_count = LFS_BLOCK_COUNT,
.block_cycles = LFS_BLOCK_CYCLES,
.cache_size = LFS_CACHE_SIZE,
.lookahead_size = LFS_LOOKAHEAD_SIZE,
}};
// Entry point
int main(void) {{
lfs_emubd_create(&cfg, "blocks");
{tests}
lfs_emubd_destroy(&cfg);
}}

View File

@@ -1,860 +1,81 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
# This script manages littlefs tests, which are configured with
# .toml files stored in the tests directory.
#
import toml
import glob
import re
import os
import io
import itertools as it
import collections.abc as abc
import subprocess as sp
import base64
import sys
import copy
import shlex
import pty
import errno
import signal
import subprocess
import os
TEST_PATHS = 'tests'
RULES = """
# add block devices to sources
TESTSRC ?= $(SRC) $(wildcard bd/*.c)
define FLATTEN
%(path)s%%$(subst /,.,$(target)): $(target)
./scripts/explode_asserts.py $$< -o $$@
endef
$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
def generate(test):
with open("scripts/template.fmt") as file:
template = file.read()
-include %(path)s*.d
.SECONDARY:
haslines = 'TEST_LINE' in os.environ and 'TEST_FILE' in os.environ
%(path)s.test: %(path)s.test.o \\
$(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
# needed in case builddir is different
%(path)s%%.o: %(path)s%%.c
$(CC) -c -MMD $(CFLAGS) $< -o $@
"""
COVERAGE_RULES = """
%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
# delete lingering coverage
%(path)s.test: | %(path)s.info.clean
.PHONY: %(path)s.info.clean
%(path)s.info.clean:
rm -f %(path)s*.gcda
# accumulate coverage info
.PHONY: %(path)s.info
%(path)s.info:
$(strip $(LCOV) -c \\
$(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
--rc 'geninfo_adjust_src_path=$(shell pwd)' \\
-o $@)
$(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
ifdef COVERAGETARGET
$(strip $(LCOV) -a $@ \\
$(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
-o $(COVERAGETARGET))
endif
"""
GLOBALS = """
//////////////// AUTOGENERATED TEST ////////////////
#include "lfs.h"
#include "bd/lfs_testbd.h"
#include <stdio.h>
extern const char *lfs_testbd_path;
extern uint32_t lfs_testbd_cycles;
"""
DEFINES = {
'LFS_READ_SIZE': 16,
'LFS_PROG_SIZE': 'LFS_READ_SIZE',
'LFS_BLOCK_SIZE': 512,
'LFS_BLOCK_COUNT': 1024,
'LFS_BLOCK_CYCLES': -1,
'LFS_CACHE_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)',
'LFS_LOOKAHEAD_SIZE': 16,
'LFS_ERASE_VALUE': 0xff,
'LFS_ERASE_CYCLES': 0,
'LFS_BADBLOCK_BEHAVIOR': 'LFS_TESTBD_BADBLOCK_PROGERROR',
}
PROLOGUE = """
// prologue
__attribute__((unused)) lfs_t lfs;
__attribute__((unused)) lfs_testbd_t bd;
__attribute__((unused)) lfs_file_t file;
__attribute__((unused)) lfs_dir_t dir;
__attribute__((unused)) struct lfs_info info;
__attribute__((unused)) char path[1024];
__attribute__((unused)) uint8_t buffer[1024];
__attribute__((unused)) lfs_size_t size;
__attribute__((unused)) int err;
__attribute__((unused)) const struct lfs_config cfg = {
.context = &bd,
.read = lfs_testbd_read,
.prog = lfs_testbd_prog,
.erase = lfs_testbd_erase,
.sync = lfs_testbd_sync,
.read_size = LFS_READ_SIZE,
.prog_size = LFS_PROG_SIZE,
.block_size = LFS_BLOCK_SIZE,
.block_count = LFS_BLOCK_COUNT,
.block_cycles = LFS_BLOCK_CYCLES,
.cache_size = LFS_CACHE_SIZE,
.lookahead_size = LFS_LOOKAHEAD_SIZE,
};
__attribute__((unused)) const struct lfs_testbd_config bdcfg = {
.erase_value = LFS_ERASE_VALUE,
.erase_cycles = LFS_ERASE_CYCLES,
.badblock_behavior = LFS_BADBLOCK_BEHAVIOR,
.power_cycles = lfs_testbd_cycles,
};
lfs_testbd_createcfg(&cfg, lfs_testbd_path, &bdcfg) => 0;
"""
EPILOGUE = """
// epilogue
lfs_testbd_destroy(&cfg) => 0;
"""
PASS = '\033[32m✓\033[0m'
FAIL = '\033[31m✗\033[0m'
class TestFailure(Exception):
def __init__(self, case, returncode=None, stdout=None, assert_=None):
self.case = case
self.returncode = returncode
self.stdout = stdout
self.assert_ = assert_
class TestCase:
def __init__(self, config, filter=filter,
suite=None, caseno=None, lineno=None, **_):
self.config = config
self.filter = filter
self.suite = suite
self.caseno = caseno
self.lineno = lineno
self.code = config['code']
self.code_lineno = config['code_lineno']
self.defines = config.get('define', {})
self.if_ = config.get('if', None)
self.in_ = config.get('in', None)
self.result = None
def __str__(self):
if hasattr(self, 'permno'):
if any(k not in self.case.defines for k in self.defines):
return '%s#%d#%d (%s)' % (
self.suite.name, self.caseno, self.permno, ', '.join(
'%s=%s' % (k, v) for k, v in self.defines.items()
if k not in self.case.defines))
else:
return '%s#%d#%d' % (
self.suite.name, self.caseno, self.permno)
lines = []
for offset, line in enumerate(
re.split('(?<=(?:.;| [{}]))\n', test.read())):
match = re.match('((?: *\n)*)( *)(.*)=>(.*);',
line, re.DOTALL | re.MULTILINE)
if match:
preface, tab, test, expect = match.groups()
lines.extend(['']*preface.count('\n'))
lines.append(tab+'test_assert({test}, {expect});'.format(
test=test.strip(), expect=expect.strip()))
else:
return '%s#%d' % (
self.suite.name, self.caseno)
lines.append(line)
def permute(self, class_=None, defines={}, permno=None, **_):
ncase = (class_ or type(self))(self.config)
for k, v in self.__dict__.items():
setattr(ncase, k, v)
ncase.case = self
ncase.perms = [ncase]
ncase.permno = permno
ncase.defines = defines
return ncase
# Create test file
with open('test.c', 'w') as file:
if 'TEST_LINE' in os.environ and 'TEST_FILE' in os.environ:
lines.insert(0, '#line %d "%s"' % (
int(os.environ['TEST_LINE']) + 1,
os.environ['TEST_FILE']))
lines.append('#line %d "test.c"' % (
template[:template.find('{tests}')].count('\n')
+ len(lines) + 2))
def build(self, f, **_):
# prologue
for k, v in sorted(self.defines.items()):
if k not in self.suite.defines:
f.write('#define %s %s\n' % (k, v))
file.write(template.format(tests='\n'.join(lines)))
f.write('void test_case%d(%s) {' % (self.caseno, ','.join(
'\n'+8*' '+'__attribute__((unused)) intmax_t %s' % k
for k in sorted(self.perms[0].defines)
if k not in self.defines)))
f.write(PROLOGUE)
f.write('\n')
f.write(4*' '+'// test case %d\n' % self.caseno)
f.write(4*' '+'#line %d "%s"\n' % (self.code_lineno, self.suite.path))
# test case goes here
f.write(self.code)
# epilogue
f.write(EPILOGUE)
f.write('}\n')
for k, v in sorted(self.defines.items()):
if k not in self.suite.defines:
f.write('#undef %s\n' % k)
def shouldtest(self, **args):
if (self.filter is not None and
len(self.filter) >= 1 and
self.filter[0] != self.caseno):
return False
elif (self.filter is not None and
len(self.filter) >= 2 and
self.filter[1] != self.permno):
return False
elif args.get('no_internal') and self.in_ is not None:
return False
elif self.if_ is not None:
if_ = self.if_
while True:
for k, v in sorted(self.defines.items(),
key=lambda x: len(x[0]), reverse=True):
if k in if_:
if_ = if_.replace(k, '(%s)' % v)
break
else:
break
if_ = (
re.sub('(\&\&|\?)', ' and ',
re.sub('(\|\||:)', ' or ',
re.sub('!(?!=)', ' not ', if_))))
return eval(if_)
else:
return True
def test(self, exec=[], persist=False, cycles=None,
gdb=False, failure=None, disk=None, **args):
# build command
cmd = exec + ['./%s.test' % self.suite.path,
repr(self.caseno), repr(self.permno)]
# persist disk or keep in RAM for speed?
if persist:
if not disk:
disk = self.suite.path + '.disk'
if persist != 'noerase':
try:
with open(disk, 'w') as f:
f.truncate(0)
if args.get('verbose'):
print('truncate --size=0', disk)
except FileNotFoundError:
pass
cmd.append(disk)
# simulate power-loss after n cycles?
if cycles:
cmd.append(str(cycles))
# failed? drop into debugger?
if gdb and failure:
ncmd = ['gdb']
if gdb == 'assert':
ncmd.extend(['-ex', 'r'])
if failure.assert_:
ncmd.extend(['-ex', 'up 2'])
elif gdb == 'main':
ncmd.extend([
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
'-ex', 'r'])
ncmd.extend(['--args'] + cmd)
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in ncmd))
signal.signal(signal.SIGINT, signal.SIG_IGN)
sys.exit(sp.call(ncmd))
# run test case!
mpty, spty = pty.openpty()
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
os.close(spty)
mpty = os.fdopen(mpty, 'r', 1)
stdout = []
assert_ = None
try:
while True:
try:
line = mpty.readline()
except OSError as e:
if e.errno == errno.EIO:
break
raise
if not line:
break;
stdout.append(line)
if args.get('verbose'):
sys.stdout.write(line)
# intercept asserts
m = re.match(
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
.format('(?:\033\[[\d;]*.| )*', 'assert'),
line)
if m and assert_ is None:
try:
with open(m.group(1)) as f:
lineno = int(m.group(2))
line = (next(it.islice(f, lineno-1, None))
.strip('\n'))
assert_ = {
'path': m.group(1),
'line': line,
'lineno': lineno,
'message': m.group(3)}
except:
pass
except KeyboardInterrupt:
raise TestFailure(self, 1, stdout, None)
proc.wait()
# did we pass?
if proc.returncode != 0:
raise TestFailure(self, proc.returncode, stdout, assert_)
else:
return PASS
class ValgrindTestCase(TestCase):
def __init__(self, config, **args):
self.leaky = config.get('leaky', False)
super().__init__(config, **args)
def shouldtest(self, **args):
return not self.leaky and super().shouldtest(**args)
def test(self, exec=[], **args):
verbose = args.get('verbose')
uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
exec = [
'valgrind',
'--leak-check=full',
] + (['--undef-value-errors=no'] if uninit else []) + [
] + (['--track-origins=yes'] if not uninit else []) + [
'--error-exitcode=4',
'--error-limit=no',
] + (['--num-callers=1'] if not verbose else []) + [
'-q'] + exec
return super().test(exec=exec, **args)
class ReentrantTestCase(TestCase):
def __init__(self, config, **args):
self.reentrant = config.get('reentrant', False)
super().__init__(config, **args)
def shouldtest(self, **args):
return self.reentrant and super().shouldtest(**args)
def test(self, persist=False, gdb=False, failure=None, **args):
for cycles in it.count(1):
# clear disk first?
if cycles == 1 and persist != 'noerase':
persist = 'erase'
else:
persist = 'noerase'
# exact cycle we should drop into debugger?
if gdb and failure and failure.cycleno == cycles:
return super().test(gdb=gdb, persist=persist, cycles=cycles,
failure=failure, **args)
# run tests, but kill the program after prog/erase has
# been hit n cycles. We exit with a special return code if the
# program has not finished, since this isn't a test failure.
try:
return super().test(persist=persist, cycles=cycles, **args)
except TestFailure as nfailure:
if nfailure.returncode == 33:
continue
else:
nfailure.cycleno = cycles
raise
class TestSuite:
def __init__(self, path, classes=[TestCase], defines={},
filter=None, **args):
self.name = os.path.basename(path)
if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')]
if args.get('build_dir'):
self.toml = path
self.path = args['build_dir'] + '/' + path
else:
self.toml = path
self.path = path
self.classes = classes
self.defines = defines.copy()
self.filter = filter
with open(self.toml) as f:
# load tests
config = toml.load(f)
# find line numbers
f.seek(0)
linenos = []
code_linenos = []
for i, line in enumerate(f):
if re.match(r'\[\[\s*case\s*\]\]', line):
linenos.append(i+1)
if re.match(r'code\s*=\s*(\'\'\'|""")', line):
code_linenos.append(i+2)
code_linenos.reverse()
# grab global config
for k, v in config.get('define', {}).items():
if k not in self.defines:
self.defines[k] = v
self.code = config.get('code', None)
if self.code is not None:
self.code_lineno = code_linenos.pop()
# create initial test cases
self.cases = []
for i, (case, lineno) in enumerate(zip(config['case'], linenos)):
# code lineno?
if 'code' in case:
case['code_lineno'] = code_linenos.pop()
# merge conditions if necessary
if 'if' in config and 'if' in case:
case['if'] = '(%s) && (%s)' % (config['if'], case['if'])
elif 'if' in config:
case['if'] = config['if']
# initialize test case
self.cases.append(TestCase(case, filter=filter,
suite=self, caseno=i+1, lineno=lineno, **args))
def __str__(self):
return self.name
def __lt__(self, other):
return self.name < other.name
def permute(self, **args):
for case in self.cases:
# lets find all parameterized definitions, in one of [args.D,
# suite.defines, case.defines, DEFINES]. Note that each of these
# can be either a dict of defines, or a list of dicts, expressing
# an initial set of permutations.
pending = [{}]
for inits in [self.defines, case.defines, DEFINES]:
if not isinstance(inits, list):
inits = [inits]
npending = []
for init, pinit in it.product(inits, pending):
ninit = pinit.copy()
for k, v in init.items():
if k not in ninit:
try:
ninit[k] = eval(v)
except:
ninit[k] = v
npending.append(ninit)
pending = npending
# expand permutations
pending = list(reversed(pending))
expanded = []
while pending:
perm = pending.pop()
for k, v in sorted(perm.items()):
if not isinstance(v, str) and isinstance(v, abc.Iterable):
for nv in reversed(v):
nperm = perm.copy()
nperm[k] = nv
pending.append(nperm)
break
else:
expanded.append(perm)
# generate permutations
case.perms = []
for i, (class_, defines) in enumerate(
it.product(self.classes, expanded)):
case.perms.append(case.permute(
class_, defines, permno=i+1, **args))
# also track non-unique defines
case.defines = {}
for k, v in case.perms[0].defines.items():
if all(perm.defines[k] == v for perm in case.perms):
case.defines[k] = v
# track all perms and non-unique defines
self.perms = []
for case in self.cases:
self.perms.extend(case.perms)
self.defines = {}
for k, v in self.perms[0].defines.items():
if all(perm.defines.get(k, None) == v for perm in self.perms):
self.defines[k] = v
return self.perms
def build(self, **args):
# build test files
tf = open(self.path + '.test.tc', 'w')
tf.write(GLOBALS)
if self.code is not None:
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
tf.write(self.code)
tfs = {None: tf}
for case in self.cases:
if case.in_ not in tfs:
tfs[case.in_] = open(self.path+'.'+
re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
with open(case.in_) as f:
for line in f:
tfs[case.in_].write(line)
tfs[case.in_].write('\n')
tfs[case.in_].write(GLOBALS)
tfs[case.in_].write('\n')
case.build(tfs[case.in_], **args)
tf.write('\n')
tf.write('const char *lfs_testbd_path;\n')
tf.write('uint32_t lfs_testbd_cycles;\n')
tf.write('int main(int argc, char **argv) {\n')
tf.write(4*' '+'int case_ = (argc > 1) ? atoi(argv[1]) : 0;\n')
tf.write(4*' '+'int perm = (argc > 2) ? atoi(argv[2]) : 0;\n')
tf.write(4*' '+'lfs_testbd_path = (argc > 3) ? argv[3] : NULL;\n')
tf.write(4*' '+'lfs_testbd_cycles = (argc > 4) ? atoi(argv[4]) : 0;\n')
for perm in self.perms:
# test declaration
tf.write(4*' '+'extern void test_case%d(%s);\n' % (
perm.caseno, ', '.join(
'intmax_t %s' % k for k in sorted(perm.defines)
if k not in perm.case.defines)))
# test call
tf.write(4*' '+
'if (argc < 3 || (case_ == %d && perm == %d)) {'
' test_case%d(%s); '
'}\n' % (perm.caseno, perm.permno, perm.caseno, ', '.join(
str(v) for k, v in sorted(perm.defines.items())
if k not in perm.case.defines)))
tf.write('}\n')
for tf in tfs.values():
tf.close()
# write makefiles
with open(self.path + '.mk', 'w') as mk:
mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
mk.write('\n')
# add coverage hooks?
if args.get('coverage'):
mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
path=self.path))
mk.write('\n')
# add truely global defines globally
for k, v in sorted(self.defines.items()):
mk.write('%s.test: override CFLAGS += -D%s=%r\n'
% (self.path, k, v))
for path in tfs:
if path is None:
mk.write('%s: %s | %s\n' % (
self.path+'.test.c',
self.toml,
self.path+'.test.tc'))
else:
mk.write('%s: %s %s | %s\n' % (
self.path+'.'+path.replace('/', '.'),
self.toml,
path,
self.path+'.'+re.sub('(\.c)?$', '.tc',
path.replace('/', '.'))))
mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
self.makefile = self.path + '.mk'
self.target = self.path + '.test'
return self.makefile, self.target
def test(self, **args):
# run test suite!
if not args.get('verbose', True):
sys.stdout.write(self.name + ' ')
sys.stdout.flush()
for perm in self.perms:
if not perm.shouldtest(**args):
continue
try:
result = perm.test(**args)
except TestFailure as failure:
perm.result = failure
if not args.get('verbose', True):
sys.stdout.write(FAIL)
sys.stdout.flush()
if not args.get('keep_going'):
if not args.get('verbose', True):
sys.stdout.write('\n')
raise
else:
perm.result = PASS
if not args.get('verbose', True):
sys.stdout.write(PASS)
sys.stdout.flush()
if not args.get('verbose', True):
sys.stdout.write('\n')
def main(**args):
# figure out explicit defines
defines = {}
for define in args['D']:
k, v, *_ = define.split('=', 2) + ['']
defines[k] = v
# and what class of TestCase to run
classes = []
if args.get('normal'):
classes.append(TestCase)
if args.get('reentrant'):
classes.append(ReentrantTestCase)
if args.get('valgrind'):
classes.append(ValgrindTestCase)
if not classes:
classes = [TestCase]
suites = []
for testpath in args['test_paths']:
# optionally specified test case/perm
testpath, *filter = testpath.split('#')
filter = [int(f) for f in filter]
# figure out the suite's toml file
if os.path.isdir(testpath):
testpath = testpath + '/*.toml'
elif os.path.isfile(testpath):
testpath = testpath
elif testpath.endswith('.toml'):
testpath = TEST_PATHS + '/' + testpath
else:
testpath = TEST_PATHS + '/' + testpath + '.toml'
# find tests
for path in glob.glob(testpath):
suites.append(TestSuite(path, classes, defines, filter, **args))
# sort for reproducability
suites = sorted(suites)
# generate permutations
for suite in suites:
suite.permute(**args)
# build tests in parallel
print('====== building ======')
makefiles = []
targets = []
for suite in suites:
makefile, target = suite.build(**args)
makefiles.append(makefile)
targets.append(target)
cmd = (['make', '-f', 'Makefile'] +
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
[target for target in targets])
mpty, spty = pty.openpty()
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
os.close(spty)
mpty = os.fdopen(mpty, 'r', 1)
stdout = []
while True:
try:
line = mpty.readline()
except OSError as e:
if e.errno == errno.EIO:
break
raise
if not line:
break;
stdout.append(line)
if args.get('verbose'):
sys.stdout.write(line)
# intercept warnings
m = re.match(
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
.format('(?:\033\[[\d;]*.| )*', 'warning'),
line)
if m and not args.get('verbose'):
try:
with open(m.group(1)) as f:
lineno = int(m.group(2))
line = next(it.islice(f, lineno-1, None)).strip('\n')
sys.stdout.write(
"\033[01m{path}:{lineno}:\033[01;35mwarning:\033[m "
"{message}\n{line}\n\n".format(
path=m.group(1), line=line, lineno=lineno,
message=m.group(3)))
except:
pass
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in stdout:
sys.stdout.write(line)
sys.exit(-1)
print('built %d test suites, %d test cases, %d permutations' % (
len(suites),
sum(len(suite.cases) for suite in suites),
sum(len(suite.perms) for suite in suites)))
total = 0
for suite in suites:
for perm in suite.perms:
total += perm.shouldtest(**args)
if total != sum(len(suite.perms) for suite in suites):
print('filtered down to %d permutations' % total)
# only requested to build?
if args.get('build'):
return 0
print('====== testing ======')
# Remove build artifacts to force rebuild
try:
for suite in suites:
suite.test(**args)
except TestFailure:
os.remove('test.o')
os.remove('lfs')
except OSError:
pass
print('====== results ======')
passed = 0
failed = 0
for suite in suites:
for perm in suite.perms:
if perm.result == PASS:
passed += 1
elif isinstance(perm.result, TestFailure):
sys.stdout.write(
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
"{perm} failed\n".format(
perm=perm, path=perm.suite.path, lineno=perm.lineno,
returncode=perm.result.returncode or 0))
if perm.result.stdout:
if perm.result.assert_:
stdout = perm.result.stdout[:-1]
else:
stdout = perm.result.stdout
for line in stdout[-5:]:
sys.stdout.write(line)
if perm.result.assert_:
sys.stdout.write(
"\033[01m{path}:{lineno}:\033[01;31massert:\033[m "
"{message}\n{line}\n".format(
**perm.result.assert_))
sys.stdout.write('\n')
failed += 1
def compile():
subprocess.check_call([
os.environ.get('MAKE', 'make'),
'--no-print-directory', '-s'])
if args.get('coverage'):
# collect coverage info
# why -j1? lcov doesn't work in parallel because of gcov limitations
cmd = (['make', '-j1', '-f', 'Makefile'] +
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
(['COVERAGETARGET=%s' % args['coverage']]
if isinstance(args['coverage'], str) else []) +
[suite.path + '.info' for suite in suites
if any(perm.result == PASS for perm in suite.perms)])
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE if not args.get('verbose') else None,
stderr=sp.STDOUT if not args.get('verbose') else None,
universal_newlines=True)
stdout = []
for line in proc.stdout:
stdout.append(line)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in stdout:
sys.stdout.write(line)
sys.exit(-1)
def execute():
if 'EXEC' in os.environ:
subprocess.check_call([os.environ['EXEC'], "./lfs"])
else:
subprocess.check_call(["./lfs"])
if args.get('gdb'):
failure = None
for suite in suites:
for perm in suite.perms:
if isinstance(perm.result, TestFailure):
failure = perm.result
if failure is not None:
print('======= gdb ======')
# drop into gdb
failure.case.test(failure=failure, **args)
sys.exit(0)
def main(test=None):
try:
if test and not test.startswith('-'):
with open(test) as file:
generate(file)
else:
generate(sys.stdin)
print('tests passed %d/%d (%.1f%%)' % (passed, total,
100*(passed/total if total else 1.0)))
print('tests failed %d/%d (%.1f%%)' % (failed, total,
100*(failed/total if total else 1.0)))
return 1 if failed > 0 else 0
compile()
if test == '-s':
sys.exit(1)
execute()
except subprocess.CalledProcessError:
# Python stack trace is counterproductive, just exit
sys.exit(2)
except KeyboardInterrupt:
# Python stack trace is counterproductive, just exit
sys.exit(3)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Run parameterized tests in various configurations.")
parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
help="Description of test(s) to run. By default, this is all tests \
found in the \"{0}\" directory. Here, you can specify a different \
directory of tests, a specific file, a suite by name, and even \
specific test cases and permutations. For example \
\"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
parser.add_argument('-D', action='append', default=[],
help="Overriding parameter definitions.")
parser.add_argument('-v', '--verbose', action='store_true',
help="Output everything that is happening.")
parser.add_argument('-k', '--keep-going', action='store_true',
help="Run all tests instead of stopping on first error. Useful for CI.")
parser.add_argument('-p', '--persist', choices=['erase', 'noerase'],
nargs='?', const='erase',
help="Store disk image in a file.")
parser.add_argument('-b', '--build', action='store_true',
help="Only build the tests, do not execute.")
parser.add_argument('-g', '--gdb', choices=['init', 'main', 'assert'],
nargs='?', const='assert',
help="Drop into gdb on test failure.")
parser.add_argument('--no-internal', action='store_true',
help="Don't run tests that require internal knowledge.")
parser.add_argument('-n', '--normal', action='store_true',
help="Run tests normally.")
parser.add_argument('-r', '--reentrant', action='store_true',
help="Run reentrant tests with simulated power-loss.")
parser.add_argument('--valgrind', action='store_true',
help="Run non-leaky tests under valgrind to check for memory leaks.")
parser.add_argument('--exec', default=[], type=lambda e: e.split(),
help="Run tests with another executable prefixed on the command line.")
parser.add_argument('--disk',
help="Specify a file to use for persistent/reentrant tests.")
parser.add_argument('--coverage', type=lambda x: x if x else True,
nargs='?', const='',
help="Collect coverage information during testing. This uses lcov/gcov \
to accumulate coverage information into *.info files. May also \
a path to a *.info file to accumulate coverage info into.")
parser.add_argument('--build-dir',
help="Build relative to the specified directory instead of the \
current directory.")
sys.exit(main(**vars(parser.parse_args())))
main(*sys.argv[1:])

443
tests/test_alloc.toml → tests/test_alloc.sh Normal file → Executable file
View File

@@ -1,201 +1,127 @@
# allocator tests
# note for these to work there are a number constraints on the device geometry
if = 'LFS_BLOCK_CYCLES == -1'
[[case]] # parallel allocation test
define.FILES = 3
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
code = '''
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
#!/bin/bash
set -euE
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Allocator tests ==="
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
TEST
SIZE=15000
lfs_mkdir() {
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < FILES; n++) {
sprintf(path, "breakfast/%s", names[n]);
lfs_mkdir(&lfs, "$1") => 0;
lfs_unmount(&lfs) => 0;
TEST
}
lfs_remove() {
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "$1/eggs") => 0;
lfs_remove(&lfs, "$1/bacon") => 0;
lfs_remove(&lfs, "$1/pancakes") => 0;
lfs_remove(&lfs, "$1") => 0;
lfs_unmount(&lfs) => 0;
TEST
}
lfs_alloc_singleproc() {
scripts/test.py << TEST
const char *names[] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[sizeof(names)/sizeof(names[0])];
lfs_mount(&lfs, &cfg) => 0;
for (unsigned n = 0; n < sizeof(names)/sizeof(names[0]); n++) {
sprintf(path, "$1/%s", names[n]);
lfs_file_open(&lfs, &files[n], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
}
for (int n = 0; n < FILES; n++) {
size = strlen(names[n]);
for (lfs_size_t i = 0; i < SIZE; i += size) {
for (unsigned n = 0; n < sizeof(names)/sizeof(names[0]); n++) {
lfs_size_t size = strlen(names[n]);
for (int i = 0; i < $SIZE; i++) {
lfs_file_write(&lfs, &files[n], names[n], size) => size;
}
}
for (int n = 0; n < FILES; n++) {
for (unsigned n = 0; n < sizeof(names)/sizeof(names[0]); n++) {
lfs_file_close(&lfs, &files[n]) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
}
lfs_alloc_multiproc() {
for name in bacon eggs pancakes
do
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < FILES; n++) {
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
size = strlen(names[n]);
for (lfs_size_t i = 0; i < SIZE; i += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "$1/$name",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
lfs_size_t size = strlen("$name");
memcpy(buffer, "$name", size);
for (int i = 0; i < $SIZE; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
TEST
done
}
[[case]] # serial allocation test
define.FILES = 3
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
code = '''
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
lfs_format(&lfs, &cfg) => 0;
lfs_verify() {
for name in bacon eggs pancakes
do
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_file_open(&lfs, &file, "$1/$name", LFS_O_RDONLY) => 0;
lfs_size_t size = strlen("$name");
for (int i = 0; i < $SIZE; i++) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "$name", size) => 0;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
done
}
for (int n = 0; n < FILES; n++) {
lfs_mount(&lfs, &cfg) => 0;
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen(names[n]);
memcpy(buffer, names[n], size);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
}
echo "--- Single-process allocation test ---"
lfs_mkdir singleproc
lfs_alloc_singleproc singleproc
lfs_verify singleproc
lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < FILES; n++) {
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
'''
echo "--- Multi-process allocation test ---"
lfs_mkdir multiproc
lfs_alloc_multiproc multiproc
lfs_verify multiproc
lfs_verify singleproc
[[case]] # parallel allocation reuse test
define.FILES = 3
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
define.CYCLES = [1, 10]
code = '''
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
lfs_file_t files[FILES];
echo "--- Single-process reuse test ---"
lfs_remove singleproc
lfs_mkdir singleprocreuse
lfs_alloc_singleproc singleprocreuse
lfs_verify singleprocreuse
lfs_verify multiproc
lfs_format(&lfs, &cfg) => 0;
echo "--- Multi-process reuse test ---"
lfs_remove multiproc
lfs_mkdir multiprocreuse
lfs_alloc_singleproc multiprocreuse
lfs_verify multiprocreuse
lfs_verify singleprocreuse
for (int c = 0; c < CYCLES; c++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
echo "--- Cleanup ---"
lfs_remove multiprocreuse
lfs_remove singleprocreuse
lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < FILES; n++) {
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &files[n], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
}
for (int n = 0; n < FILES; n++) {
size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &files[n], names[n], size) => size;
}
}
for (int n = 0; n < FILES; n++) {
lfs_file_close(&lfs, &files[n]) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < FILES; n++) {
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < FILES; n++) {
sprintf(path, "breakfast/%s", names[n]);
lfs_remove(&lfs, path) => 0;
}
lfs_remove(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
}
'''
[[case]] # serial allocation reuse test
define.FILES = 3
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
define.CYCLES = [1, 10]
code = '''
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
lfs_format(&lfs, &cfg) => 0;
for (int c = 0; c < CYCLES; c++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
for (int n = 0; n < FILES; n++) {
lfs_mount(&lfs, &cfg) => 0;
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen(names[n]);
memcpy(buffer, names[n], size);
for (int i = 0; i < SIZE; i += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
}
lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < FILES; n++) {
sprintf(path, "breakfast/%s", names[n]);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
size = strlen(names[n]);
for (int i = 0; i < SIZE; i += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, names[n], size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int n = 0; n < FILES; n++) {
sprintf(path, "breakfast/%s", names[n]);
lfs_remove(&lfs, path) => 0;
}
lfs_remove(&lfs, "breakfast") => 0;
lfs_unmount(&lfs) => 0;
}
'''
[[case]] # exhaustion test
code = '''
lfs_format(&lfs, &cfg) => 0;
echo "--- Exhaustion test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("exhaustion");
lfs_size_t size = strlen("exhaustion");
memcpy(buffer, "exhaustion", size);
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_sync(&lfs, &file) => 0;
@@ -215,27 +141,27 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
size = strlen("exhaustion");
lfs_size_t size = strlen("exhaustion");
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "exhaustion", size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
TEST
[[case]] # exhaustion wraparound test
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / 3)'
code = '''
lfs_format(&lfs, &cfg) => 0;
echo "--- Exhaustion wraparound test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "exhaustion") => 0;
lfs_file_open(&lfs, &file, "padding", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("buffering");
lfs_size_t size = strlen("buffering");
memcpy(buffer, "buffering", size);
for (int i = 0; i < SIZE; i += size) {
for (int i = 0; i < $SIZE; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
@@ -262,29 +188,30 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
size = strlen("exhaustion");
lfs_size_t size = strlen("exhaustion");
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "exhaustion", size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, "exhaustion") => 0;
lfs_unmount(&lfs) => 0;
'''
TEST
[[case]] # dir exhaustion test
code = '''
lfs_format(&lfs, &cfg) => 0;
echo "--- Dir exhaustion test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
// find out max file size
lfs_mkdir(&lfs, "exhaustiondir") => 0;
size = strlen("blahblahblahblah");
lfs_size_t size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
int count = 0;
int err;
while (true) {
err = lfs_file_write(&lfs, &file, buffer, size);
if (err < 0) {
@@ -321,102 +248,10 @@ code = '''
lfs_remove(&lfs, "exhaustion") => 0;
lfs_unmount(&lfs) => 0;
'''
TEST
[[case]] # what if we have a bad block during an allocation scan?
in = "lfs.c"
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_READERROR'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// first fill to exhaustion to find available space
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "waka");
size = strlen("waka");
lfs_size_t filesize = 0;
while (true) {
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
break;
}
filesize += size;
}
lfs_file_close(&lfs, &file) => 0;
// now fill all but a couple of blocks of the filesystem with data
filesize -= 3*LFS_BLOCK_SIZE;
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "waka");
size = strlen("waka");
for (lfs_size_t i = 0; i < filesize/size; i++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
// also save head of file so we can error during lookahead scan
lfs_block_t fileblock = file.ctz.head;
lfs_unmount(&lfs) => 0;
// remount to force an alloc scan
lfs_mount(&lfs, &cfg) => 0;
// but mark the head of our file as a "bad block", this is force our
// scan to bail early
lfs_testbd_setwear(&cfg, fileblock, 0xffffffff) => 0;
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "chomp");
size = strlen("chomp");
while (true) {
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
assert(res == (lfs_ssize_t)size || res == LFS_ERR_CORRUPT);
if (res == LFS_ERR_CORRUPT) {
break;
}
}
lfs_file_close(&lfs, &file) => 0;
// now reverse the "bad block" and try to write the file again until we
// run out of space
lfs_testbd_setwear(&cfg, fileblock, 0) => 0;
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "chomp");
size = strlen("chomp");
while (true) {
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
break;
}
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// check that the disk isn't hurt
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0;
strcpy((char*)buffer, "waka");
size = strlen("waka");
for (lfs_size_t i = 0; i < filesize/size; i++) {
uint8_t rbuffer[4];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(memcmp(rbuffer, buffer, size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
# Below, I don't like these tests. They're fragile and depend _heavily_
# on the geometry of the block device. But they are valuable. Eventually they
# should be removed and replaced with generalized tests.
[[case]] # chained dir exhaustion test
define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
code = '''
lfs_format(&lfs, &cfg) => 0;
echo "--- Chained dir exhaustion test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
// find out max file size
@@ -425,10 +260,11 @@ code = '''
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
lfs_mkdir(&lfs, path) => 0;
}
size = strlen("blahblahblahblah");
lfs_size_t size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
int count = 0;
int err;
while (true) {
err = lfs_file_write(&lfs, &file, buffer, size);
if (err < 0) {
@@ -480,14 +316,13 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
TEST
[[case]] # split dir test
define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
code = '''
echo "--- Split dir test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
// create one block hole for half a directory
@@ -499,7 +334,7 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
size = strlen("blahblahblahblah");
lfs_size_t size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < (cfg.block_count-4)*(cfg.block_size-8);
@@ -525,20 +360,18 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
TEST
[[case]] # outdated lookahead test
define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
code = '''
echo "--- Outdated lookahead test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// fill completely with two files
lfs_file_open(&lfs, &file, "exhaustion1",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = strlen("blahblahblahblah");
lfs_size_t size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
@@ -588,22 +421,18 @@ code = '''
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
TEST
lfs_unmount(&lfs) => 0;
'''
[[case]] # outdated lookahead and split dir test
define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
code = '''
echo "--- Outdated lookahead and split dir test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// fill completely with two files
lfs_file_open(&lfs, &file, "exhaustion1",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = strlen("blahblahblahblah");
lfs_size_t size = strlen("blahblahblahblah");
memcpy(buffer, "blahblahblahblah", size);
for (lfs_size_t i = 0;
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
@@ -650,4 +479,6 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
TEST
scripts/results.py

84
tests/test_attrs.toml → tests/test_attrs.sh Normal file → Executable file
View File

@@ -1,13 +1,25 @@
[[case]] # set/get attribute
code = '''
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Attr tests ==="
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_open(&lfs, &file, "hello/hello",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello"))
=> strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
TEST
echo "--- Set/get attribute ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
memset(buffer, 0, sizeof(buffer));
lfs_setattr(&lfs, "hello", 'A', "aaaa", 4) => 0;
@@ -59,7 +71,8 @@ code = '''
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
memset(buffer, 0, sizeof(buffer));
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
@@ -74,18 +87,10 @@ code = '''
memcmp(buffer, "hello", strlen("hello")) => 0;
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
'''
[[case]] # set/get root attribute
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
TEST
echo "--- Set/get root attribute ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
memset(buffer, 0, sizeof(buffer));
lfs_setattr(&lfs, "/", 'A', "aaaa", 4) => 0;
@@ -136,7 +141,8 @@ code = '''
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 9;
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
memset(buffer, 0, sizeof(buffer));
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
@@ -151,18 +157,10 @@ code = '''
memcmp(buffer, "hello", strlen("hello")) => 0;
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
'''
[[case]] # set/get file attribute
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
TEST
echo "--- Set/get file attribute ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs1[] = {
@@ -237,17 +235,18 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs3[] = {
struct lfs_attr attrs2[] = {
{'A', buffer, 4},
{'B', buffer+4, 9},
{'C', buffer+13, 5},
};
struct lfs_file_config cfg3 = {.attrs=attrs3, .attr_count=3};
struct lfs_file_config cfg2 = {.attrs=attrs2, .attr_count=3};
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg3) => 0;
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg2) => 0;
lfs_file_close(&lfs, &file) => 0;
memcmp(buffer, "aaaa", 4) => 0;
memcmp(buffer+4, "fffffffff", 9) => 0;
@@ -258,22 +257,11 @@ code = '''
memcmp(buffer, "hello", strlen("hello")) => 0;
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
'''
TEST
[[case]] # deferred file attributes
code = '''
lfs_format(&lfs, &cfg) => 0;
echo "--- Deferred file attributes ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_setattr(&lfs, "hello/hello", 'B', "fffffffff", 9) => 0;
lfs_setattr(&lfs, "hello/hello", 'C', "ccccc", 5) => 0;
memset(buffer, 0, sizeof(buffer));
struct lfs_attr attrs1[] = {
{'B', "gggg", 4},
@@ -301,4 +289,6 @@ code = '''
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
TEST
scripts/results.py

View File

@@ -1,241 +0,0 @@
# bad blocks with block cycles should be tested in test_relocations
if = 'LFS_BLOCK_CYCLES == -1'
[[case]] # single bad blocks
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
]
define.NAMEMULT = 64
define.FILEMULT = 1
code = '''
for (lfs_block_t badblock = 2; badblock < LFS_BLOCK_COUNT; badblock++) {
lfs_testbd_setwear(&cfg, badblock-1, 0) => 0;
lfs_testbd_setwear(&cfg, badblock, 0xffffffff) => 0;
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 1; i < 10; i++) {
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
lfs_mkdir(&lfs, (char*)buffer) => 0;
buffer[NAMEMULT] = '/';
for (int j = 0; j < NAMEMULT; j++) {
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 1; i < 10; i++) {
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
buffer[NAMEMULT] = '/';
for (int j = 0; j < NAMEMULT; j++) {
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(buffer, rbuffer, size) => 0;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
}
'''
[[case]] # region corruption (causes cascading failures)
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
]
define.NAMEMULT = 64
define.FILEMULT = 1
code = '''
for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
lfs_testbd_setwear(&cfg, i+2, 0xffffffff) => 0;
}
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 1; i < 10; i++) {
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
lfs_mkdir(&lfs, (char*)buffer) => 0;
buffer[NAMEMULT] = '/';
for (int j = 0; j < NAMEMULT; j++) {
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 1; i < 10; i++) {
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
buffer[NAMEMULT] = '/';
for (int j = 0; j < NAMEMULT; j++) {
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(buffer, rbuffer, size) => 0;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # alternating corruption (causes cascading failures)
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
]
define.NAMEMULT = 64
define.FILEMULT = 1
code = '''
for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
lfs_testbd_setwear(&cfg, (2*i) + 2, 0xffffffff) => 0;
}
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 1; i < 10; i++) {
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
lfs_mkdir(&lfs, (char*)buffer) => 0;
buffer[NAMEMULT] = '/';
for (int j = 0; j < NAMEMULT; j++) {
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 1; i < 10; i++) {
for (int j = 0; j < NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[NAMEMULT] = '\0';
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
buffer[NAMEMULT] = '/';
for (int j = 0; j < NAMEMULT; j++) {
buffer[j+NAMEMULT+1] = '0'+i;
}
buffer[2*NAMEMULT+1] = '\0';
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
size = NAMEMULT;
for (int j = 0; j < i*FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(buffer, rbuffer, size) => 0;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
'''
# other corner cases
[[case]] # bad superblocks (corrupt 1 or 0)
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_ERASE_VALUE = [0x00, 0xff, -1]
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
]
code = '''
lfs_testbd_setwear(&cfg, 0, 0xffffffff) => 0;
lfs_testbd_setwear(&cfg, 1, 0xffffffff) => 0;
lfs_format(&lfs, &cfg) => LFS_ERR_NOSPC;
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
'''

120
tests/test_corrupt.sh Executable file
View File

@@ -0,0 +1,120 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Corrupt tests ==="
NAMEMULT=64
FILEMULT=1
lfs_mktree() {
scripts/test.py ${1:-} << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 1; i < 10; i++) {
for (int j = 0; j < $NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[$NAMEMULT] = '\0';
lfs_mkdir(&lfs, (char*)buffer) => 0;
buffer[$NAMEMULT] = '/';
for (int j = 0; j < $NAMEMULT; j++) {
buffer[j+$NAMEMULT+1] = '0'+i;
}
buffer[2*$NAMEMULT+1] = '\0';
lfs_file_open(&lfs, &file, (char*)buffer,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_size_t size = $NAMEMULT;
for (int j = 0; j < i*$FILEMULT; j++) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
}
lfs_chktree() {
scripts/test.py ${1:-} << TEST
lfs_mount(&lfs, &cfg) => 0;
for (int i = 1; i < 10; i++) {
for (int j = 0; j < $NAMEMULT; j++) {
buffer[j] = '0'+i;
}
buffer[$NAMEMULT] = '\0';
lfs_stat(&lfs, (char*)buffer, &info) => 0;
info.type => LFS_TYPE_DIR;
buffer[$NAMEMULT] = '/';
for (int j = 0; j < $NAMEMULT; j++) {
buffer[j+$NAMEMULT+1] = '0'+i;
}
buffer[2*$NAMEMULT+1] = '\0';
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
lfs_size_t size = $NAMEMULT;
for (int j = 0; j < i*$FILEMULT; j++) {
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(buffer, rbuffer, size) => 0;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
}
echo "--- Sanity check ---"
rm -rf blocks
lfs_mktree
lfs_chktree
BLOCKS="$(ls blocks | grep -vw '[01]')"
echo "--- Block corruption ---"
for b in $BLOCKS
do
rm -rf blocks
mkdir blocks
ln -s /dev/zero blocks/$b
lfs_mktree
lfs_chktree
done
echo "--- Block persistance ---"
for b in $BLOCKS
do
rm -rf blocks
mkdir blocks
lfs_mktree
chmod a-w blocks/$b || true
lfs_mktree
lfs_chktree
done
echo "--- Big region corruption ---"
rm -rf blocks
mkdir blocks
for i in {2..512}
do
ln -s /dev/zero blocks/$(printf '%x' $i)
done
lfs_mktree
lfs_chktree
echo "--- Alternating corruption ---"
rm -rf blocks
mkdir blocks
for i in {2..1024..2}
do
ln -s /dev/zero blocks/$(printf '%x' $i)
done
lfs_mktree
lfs_chktree
scripts/results.py

489
tests/test_dirs.sh Executable file
View File

@@ -0,0 +1,489 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Directory tests ==="
LARGESIZE=128
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
echo "--- Root directory ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Directory creation ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- File creation ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "burito", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Directory iteration ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "potato") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Directory failures ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
lfs_dir_open(&lfs, &dir, "tomato") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "burito") => LFS_ERR_NOTDIR;
lfs_file_open(&lfs, &file, "tomato", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "potato", LFS_O_RDONLY) => LFS_ERR_ISDIR;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Nested directories ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato/baked") => 0;
lfs_mkdir(&lfs, "potato/sweet") => 0;
lfs_mkdir(&lfs, "potato/fried") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "potato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "baked") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "fried") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "sweet") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block directory ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "cactus") => 0;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "cactus/test%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "cactus") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "test%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
info.type => LFS_TYPE_DIR;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Directory remove ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "potato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "potato/sweet") => 0;
lfs_remove(&lfs, "potato/baked") => 0;
lfs_remove(&lfs, "potato/fried") => 0;
lfs_dir_open(&lfs, &dir, "potato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_remove(&lfs, "potato") => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "cactus") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "cactus") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Directory rename ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "coldpotato") => 0;
lfs_mkdir(&lfs, "coldpotato/baked") => 0;
lfs_mkdir(&lfs, "coldpotato/sweet") => 0;
lfs_mkdir(&lfs, "coldpotato/fried") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "coldpotato", "hotpotato") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "hotpotato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "baked") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "fried") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "sweet") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "warmpotato") => 0;
lfs_mkdir(&lfs, "warmpotato/mushy") => 0;
lfs_rename(&lfs, "hotpotato", "warmpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "warmpotato/mushy") => 0;
lfs_rename(&lfs, "hotpotato", "warmpotato") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "warmpotato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "baked") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "fried") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "sweet") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "coldpotato") => 0;
lfs_rename(&lfs, "warmpotato/baked", "coldpotato/baked") => 0;
lfs_rename(&lfs, "warmpotato/sweet", "coldpotato/sweet") => 0;
lfs_rename(&lfs, "warmpotato/fried", "coldpotato/fried") => 0;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "warmpotato") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "coldpotato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "baked") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "fried") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "sweet") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Recursive remove ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
lfs_dir_open(&lfs, &dir, "coldpotato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
while (true) {
int err = lfs_dir_read(&lfs, &dir, &info);
err >= 0 => 1;
if (err == 0) {
break;
}
strcpy(path, "coldpotato/");
strcat(path, info.name);
lfs_remove(&lfs, path) => 0;
}
lfs_remove(&lfs, "coldpotato") => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "cactus") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block rename ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < $LARGESIZE; i++) {
char oldpath[1024];
char newpath[1024];
sprintf(oldpath, "cactus/test%03d", i);
sprintf(newpath, "cactus/tedd%03d", i);
lfs_rename(&lfs, oldpath, newpath) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "cactus") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "tedd%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
info.type => LFS_TYPE_DIR;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block remove ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "cactus") => LFS_ERR_NOTEMPTY;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "cactus/tedd%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_remove(&lfs, "cactus") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block directory with files ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "prickly-pear") => 0;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "prickly-pear/test%03d", i);
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_size_t size = 6;
memcpy(buffer, "Hello", size);
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "test%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
info.type => LFS_TYPE_REG;
info.size => 6;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block rename with files ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < $LARGESIZE; i++) {
char oldpath[1024];
char newpath[1024];
sprintf(oldpath, "prickly-pear/test%03d", i);
sprintf(newpath, "prickly-pear/tedd%03d", i);
lfs_rename(&lfs, oldpath, newpath) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "tedd%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
info.type => LFS_TYPE_REG;
info.size => 6;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Multi-block remove with files ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "prickly-pear/tedd%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_remove(&lfs, "prickly-pear") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "burito") => 0;
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/results.py

View File

@@ -1,838 +0,0 @@
[[case]] # root
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # many directory creation
define.N = 'range(0, 100, 3)'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "dir%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "dir%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # many directory removal
define.N = 'range(3, 100, 11)'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # many directory rename
define.N = 'range(3, 100, 11)'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "test%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "test%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
char oldpath[128];
char newpath[128];
sprintf(oldpath, "test%03d", i);
sprintf(newpath, "tedd%03d", i);
lfs_rename(&lfs, oldpath, newpath) => 0;
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "tedd%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
'''
[[case]] # reentrant many directory creation/rename/removal
define.N = [5, 11]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
for (int i = 0; i < N; i++) {
sprintf(path, "hi%03d", i);
err = lfs_mkdir(&lfs, path);
assert(err == 0 || err == LFS_ERR_EXIST);
}
for (int i = 0; i < N; i++) {
sprintf(path, "hello%03d", i);
err = lfs_remove(&lfs, path);
assert(err == 0 || err == LFS_ERR_NOENT);
}
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < N; i++) {
char oldpath[128];
char newpath[128];
sprintf(oldpath, "hi%03d", i);
sprintf(newpath, "hello%03d", i);
// YES this can overwrite an existing newpath
lfs_rename(&lfs, oldpath, newpath) => 0;
}
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "hello%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "hello%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # file creation
define.N = 'range(3, 100, 11)'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "file%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "file%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
'''
[[case]] # file removal
define.N = 'range(0, 100, 3)'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "removeme%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # file rename
define.N = 'range(0, 100, 3)'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "test%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "test%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
char oldpath[128];
char newpath[128];
sprintf(oldpath, "test%03d", i);
sprintf(newpath, "tedd%03d", i);
lfs_rename(&lfs, oldpath, newpath) => 0;
}
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "tedd%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
'''
[[case]] # reentrant file creation/rename/removal
define.N = [5, 25]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
for (int i = 0; i < N; i++) {
sprintf(path, "hi%03d", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
for (int i = 0; i < N; i++) {
sprintf(path, "hello%03d", i);
err = lfs_remove(&lfs, path);
assert(err == 0 || err == LFS_ERR_NOENT);
}
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < N; i++) {
char oldpath[128];
char newpath[128];
sprintf(oldpath, "hi%03d", i);
sprintf(newpath, "hello%03d", i);
// YES this can overwrite an existing newpath
lfs_rename(&lfs, oldpath, newpath) => 0;
}
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "hello%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "hello%03d", i);
lfs_remove(&lfs, path) => 0;
}
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # nested directories
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato") => 0;
lfs_file_open(&lfs, &file, "burito",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato/baked") => 0;
lfs_mkdir(&lfs, "potato/sweet") => 0;
lfs_mkdir(&lfs, "potato/fried") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "potato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "baked") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "fried") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "sweet") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
// try removing?
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "potato") => LFS_ERR_NOTEMPTY;
lfs_unmount(&lfs) => 0;
// try renaming?
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "potato", "coldpotato") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "coldpotato", "warmpotato") => 0;
lfs_rename(&lfs, "warmpotato", "hotpotato") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "potato") => LFS_ERR_NOENT;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
lfs_remove(&lfs, "warmpotato") => LFS_ERR_NOENT;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_unmount(&lfs) => 0;
// try cross-directory renaming
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "coldpotato") => 0;
lfs_rename(&lfs, "hotpotato/baked", "coldpotato/baked") => 0;
lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_rename(&lfs, "hotpotato/fried", "coldpotato/fried") => 0;
lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_rename(&lfs, "hotpotato/sweet", "coldpotato/sweet") => 0;
lfs_rename(&lfs, "coldpotato", "hotpotato") => 0;
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "hotpotato") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "baked") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "fried") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "sweet") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
// final remove
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "hotpotato/baked") => 0;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "hotpotato/fried") => 0;
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
lfs_remove(&lfs, "hotpotato/sweet") => 0;
lfs_remove(&lfs, "hotpotato") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "burito") == 0);
info.type => LFS_TYPE_REG;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # recursive remove
define.N = [10, 100]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "prickly-pear") => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "prickly-pear/cactus%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "cactus%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs);
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
for (int i = 0; i < N; i++) {
sprintf(path, "cactus%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, path) == 0);
sprintf(path, "prickly-pear/%s", info.name);
lfs_remove(&lfs, path) => 0;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_remove(&lfs, "prickly-pear") => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
'''
[[case]] # other error cases
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato") => 0;
lfs_file_open(&lfs, &file, "burito",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
lfs_mkdir(&lfs, "burito") => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file, "burito",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file, "potato",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
lfs_dir_open(&lfs, &dir, "tomato") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "burito") => LFS_ERR_NOTDIR;
lfs_file_open(&lfs, &file, "tomato", LFS_O_RDONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "potato", LFS_O_RDONLY) => LFS_ERR_ISDIR;
lfs_file_open(&lfs, &file, "tomato", LFS_O_WRONLY) => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "potato", LFS_O_WRONLY) => LFS_ERR_ISDIR;
lfs_file_open(&lfs, &file, "potato",
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_ISDIR;
lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file, "/",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file, "/", LFS_O_RDONLY) => LFS_ERR_ISDIR;
lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY) => LFS_ERR_ISDIR;
lfs_file_open(&lfs, &file, "/",
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_ISDIR;
// check that errors did not corrupt directory
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, "burito") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "potato") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
// or on disk
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, ".") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "..") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_REG);
assert(strcmp(info.name, "burito") == 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(info.type == LFS_TYPE_DIR);
assert(strcmp(info.name, "potato") == 0);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # directory seek
define.COUNT = [4, 128, 132]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "hello/kitty%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
for (int j = 2; j < COUNT; j++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_soff_t pos;
for (int i = 0; i < j; i++) {
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
pos = lfs_dir_tell(&lfs, &dir);
assert(pos >= 0);
}
lfs_dir_seek(&lfs, &dir, pos) => 0;
sprintf(path, "kitty%03d", j);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_rewind(&lfs, &dir) => 0;
sprintf(path, "kitty%03d", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_seek(&lfs, &dir, pos) => 0;
sprintf(path, "kitty%03d", j);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
}
'''
[[case]] # root seek
define.COUNT = [4, 128, 132]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "hi%03d", i);
lfs_mkdir(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
for (int j = 2; j < COUNT; j++) {
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_soff_t pos;
for (int i = 0; i < j; i++) {
sprintf(path, "hi%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
pos = lfs_dir_tell(&lfs, &dir);
assert(pos >= 0);
}
lfs_dir_seek(&lfs, &dir, pos) => 0;
sprintf(path, "hi%03d", j);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_rewind(&lfs, &dir) => 0;
sprintf(path, "hi%03d", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_seek(&lfs, &dir, pos) => 0;
sprintf(path, "hi%03d", j);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
}
'''

251
tests/test_entries.sh Executable file
View File

@@ -0,0 +1,251 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Entry tests ==="
# Note: These tests are intended for 512 byte inline size at different
# inline sizes they should still pass, but won't be testing anything
rm -rf blocks
function read_file {
cat << TEST
size = $2;
lfs_file_open(&lfs, &file, "$1", LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
TEST
}
function write_file {
cat << TEST
size = $2;
lfs_file_open(&lfs, &file, "$1",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
TEST
}
echo "--- Entry grow test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_size_t size;
lfs_mount(&lfs, &cfg) => 0;
$(write_file "hi0" 20)
$(write_file "hi1" 20)
$(write_file "hi2" 20)
$(write_file "hi3" 20)
$(read_file "hi1" 20)
$(write_file "hi1" 200)
$(read_file "hi0" 20)
$(read_file "hi1" 200)
$(read_file "hi2" 20)
$(read_file "hi3" 20)
lfs_unmount(&lfs) => 0;
TEST
echo "--- Entry shrink test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_size_t size;
lfs_mount(&lfs, &cfg) => 0;
$(write_file "hi0" 20)
$(write_file "hi1" 200)
$(write_file "hi2" 20)
$(write_file "hi3" 20)
$(read_file "hi1" 200)
$(write_file "hi1" 20)
$(read_file "hi0" 20)
$(read_file "hi1" 20)
$(read_file "hi2" 20)
$(read_file "hi3" 20)
lfs_unmount(&lfs) => 0;
TEST
echo "--- Entry spill test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_size_t size;
lfs_mount(&lfs, &cfg) => 0;
$(write_file "hi0" 200)
$(write_file "hi1" 200)
$(write_file "hi2" 200)
$(write_file "hi3" 200)
$(read_file "hi0" 200)
$(read_file "hi1" 200)
$(read_file "hi2" 200)
$(read_file "hi3" 200)
lfs_unmount(&lfs) => 0;
TEST
echo "--- Entry push spill test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_size_t size;
lfs_mount(&lfs, &cfg) => 0;
$(write_file "hi0" 200)
$(write_file "hi1" 20)
$(write_file "hi2" 200)
$(write_file "hi3" 200)
$(read_file "hi1" 20)
$(write_file "hi1" 200)
$(read_file "hi0" 200)
$(read_file "hi1" 200)
$(read_file "hi2" 200)
$(read_file "hi3" 200)
lfs_unmount(&lfs) => 0;
TEST
echo "--- Entry push spill two test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_size_t size;
lfs_mount(&lfs, &cfg) => 0;
$(write_file "hi0" 200)
$(write_file "hi1" 20)
$(write_file "hi2" 200)
$(write_file "hi3" 200)
$(write_file "hi4" 200)
$(read_file "hi1" 20)
$(write_file "hi1" 200)
$(read_file "hi0" 200)
$(read_file "hi1" 200)
$(read_file "hi2" 200)
$(read_file "hi3" 200)
$(read_file "hi4" 200)
lfs_unmount(&lfs) => 0;
TEST
echo "--- Entry drop test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_size_t size;
lfs_mount(&lfs, &cfg) => 0;
$(write_file "hi0" 200)
$(write_file "hi1" 200)
$(write_file "hi2" 200)
$(write_file "hi3" 200)
lfs_remove(&lfs, "hi1") => 0;
lfs_stat(&lfs, "hi1", &info) => LFS_ERR_NOENT;
$(read_file "hi0" 200)
$(read_file "hi2" 200)
$(read_file "hi3" 200)
lfs_remove(&lfs, "hi2") => 0;
lfs_stat(&lfs, "hi2", &info) => LFS_ERR_NOENT;
$(read_file "hi0" 200)
$(read_file "hi3" 200)
lfs_remove(&lfs, "hi3") => 0;
lfs_stat(&lfs, "hi3", &info) => LFS_ERR_NOENT;
$(read_file "hi0" 200)
lfs_remove(&lfs, "hi0") => 0;
lfs_stat(&lfs, "hi0", &info) => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Create too big ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
memset(path, 'm', 200);
path[200] = '\0';
lfs_size_t size = 400;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t wbuffer[1024];
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
size = 400;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Resize too big ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
memset(path, 'm', 200);
path[200] = '\0';
lfs_size_t size = 40;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t wbuffer[1024];
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
size = 40;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
size = 400;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
size = 400;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/results.py

View File

@@ -1,611 +0,0 @@
# These tests are for some specific corner cases with neighboring inline files.
# Note that these tests are intended for 512 byte inline sizes. They should
# still pass with other inline sizes but wouldn't be testing anything.
define.LFS_CACHE_SIZE = 512
if = 'LFS_CACHE_SIZE % LFS_PROG_SIZE == 0 && LFS_CACHE_SIZE == 512'
[[case]] # entry grow test
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// write hi0 20
sprintf(path, "hi0"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi1 20
sprintf(path, "hi1"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi2 20
sprintf(path, "hi2"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi3 20
sprintf(path, "hi3"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi1 20
sprintf(path, "hi1"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// write hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi0 20
sprintf(path, "hi0"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi2 20
sprintf(path, "hi2"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi3 20
sprintf(path, "hi3"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry shrink test
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// write hi0 20
sprintf(path, "hi0"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi2 20
sprintf(path, "hi2"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi3 20
sprintf(path, "hi3"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// write hi1 20
sprintf(path, "hi1"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi0 20
sprintf(path, "hi0"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi1 20
sprintf(path, "hi1"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi2 20
sprintf(path, "hi2"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi3 20
sprintf(path, "hi3"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry spill test
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// write hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi2 200
sprintf(path, "hi2"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi2 200
sprintf(path, "hi2"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry push spill test
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// write hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi1 20
sprintf(path, "hi1"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi2 200
sprintf(path, "hi2"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi1 20
sprintf(path, "hi1"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// write hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi2 200
sprintf(path, "hi2"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry push spill two test
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// write hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi1 20
sprintf(path, "hi1"); size = 20;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi2 200
sprintf(path, "hi2"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi4 200
sprintf(path, "hi4"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi1 20
sprintf(path, "hi1"); size = 20;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// write hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// read hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi2 200
sprintf(path, "hi2"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi4 200
sprintf(path, "hi4"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # entry drop test
code = '''
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
// write hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi1 200
sprintf(path, "hi1"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi2 200
sprintf(path, "hi2"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
// write hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, "hi1") => 0;
lfs_stat(&lfs, "hi1", &info) => LFS_ERR_NOENT;
// read hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi2 200
sprintf(path, "hi2"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, "hi2") => 0;
lfs_stat(&lfs, "hi2", &info) => LFS_ERR_NOENT;
// read hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
// read hi3 200
sprintf(path, "hi3"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, "hi3") => 0;
lfs_stat(&lfs, "hi3", &info) => LFS_ERR_NOENT;
// read hi0 200
sprintf(path, "hi0"); size = 200;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => size;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, "hi0") => 0;
lfs_stat(&lfs, "hi0", &info) => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
'''
[[case]] # create too big
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
memset(path, 'm', 200);
path[200] = '\0';
size = 400;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t wbuffer[1024];
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
size = 400;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # resize too big
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
memset(path, 'm', 200);
path[200] = '\0';
size = 40;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
uint8_t wbuffer[1024];
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
size = 40;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
size = 400;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
memset(wbuffer, 'c', size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
size = 400;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''

View File

@@ -1,288 +0,0 @@
# Tests for recovering from conditions which shouldn't normally
# happen during normal operation of littlefs
# invalid pointer tests (outside of block_count)
[[case]] # invalid tail-pointer test
define.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
define.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
// change tail-pointer to invalid pointers
lfs_init(&lfs, &cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
(lfs_block_t[2]){
(INVALSET & 0x1) ? 0xcccccccc : 0,
(INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
'''
[[case]] # invalid dir pointer test
define.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
// make a dir
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "dir_here") => 0;
lfs_unmount(&lfs) => 0;
// change the dir pointer to be invalid
lfs_init(&lfs, &cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our directory
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("dir_here")), buffer)
=> LFS_MKTAG(LFS_TYPE_DIR, 1, strlen("dir_here"));
assert(memcmp((char*)buffer, "dir_here", strlen("dir_here")) == 0);
// change dir pointer
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, 8),
(lfs_block_t[2]){
(INVALSET & 0x1) ? 0xcccccccc : 0,
(INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
lfs_deinit(&lfs) => 0;
// test that accessing our bad dir fails, note there's a number
// of ways to access the dir, some can fail, but some don't
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "dir_here", &info) => 0;
assert(strcmp(info.name, "dir_here") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_open(&lfs, &dir, "dir_here") => LFS_ERR_CORRUPT;
lfs_stat(&lfs, "dir_here/file_here", &info) => LFS_ERR_CORRUPT;
lfs_dir_open(&lfs, &dir, "dir_here/dir_here") => LFS_ERR_CORRUPT;
lfs_file_open(&lfs, &file, "dir_here/file_here",
LFS_O_RDONLY) => LFS_ERR_CORRUPT;
lfs_file_open(&lfs, &file, "dir_here/file_here",
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_CORRUPT;
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid file pointer test
in = "lfs.c"
define.SIZE = [10, 1000, 100000] # faked file size
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
// make a file
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "file_here",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// change the file pointer to be invalid
lfs_init(&lfs, &cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our file
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
=> LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
// change file pointer
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz)),
&(struct lfs_ctz){0xcccccccc, lfs_tole32(SIZE)}})) => 0;
lfs_deinit(&lfs) => 0;
// test that accessing our bad file fails, note there's a number
// of ways to access the dir, some can fail, but some don't
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "file_here", &info) => 0;
assert(strcmp(info.name, "file_here") == 0);
assert(info.type == LFS_TYPE_REG);
assert(info.size == SIZE);
lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
lfs_file_close(&lfs, &file) => 0;
// any allocs that traverse CTZ must unfortunately must fail
if (SIZE > 2*LFS_BLOCK_SIZE) {
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid pointer in CTZ skip-list test
define.SIZE = ['2*LFS_BLOCK_SIZE', '3*LFS_BLOCK_SIZE', '4*LFS_BLOCK_SIZE']
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
// make a file
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "file_here",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int i = 0; i < SIZE; i++) {
char c = 'c';
lfs_file_write(&lfs, &file, &c, 1) => 1;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// change pointer in CTZ skip-list to be invalid
lfs_init(&lfs, &cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
// make sure id 1 == our file and get our CTZ structure
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
=> LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
struct lfs_ctz ctz;
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x700, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz)
=> LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
lfs_ctz_fromle32(&ctz);
// rewrite block to contain bad pointer
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
uint32_t bad = lfs_tole32(0xcccccccc);
memcpy(&bbuffer[0], &bad, sizeof(bad));
memcpy(&bbuffer[4], &bad, sizeof(bad));
cfg.erase(&cfg, ctz.head) => 0;
cfg.prog(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
lfs_deinit(&lfs) => 0;
// test that accessing our bad file fails, note there's a number
// of ways to access the dir, some can fail, but some don't
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "file_here", &info) => 0;
assert(strcmp(info.name, "file_here") == 0);
assert(info.type == LFS_TYPE_REG);
assert(info.size == SIZE);
lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
lfs_file_close(&lfs, &file) => 0;
// any allocs that traverse CTZ must unfortunately must fail
if (SIZE > 2*LFS_BLOCK_SIZE) {
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid gstate pointer
define.INVALSET = [0x3, 0x1, 0x2]
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
// create an invalid gstate
lfs_init(&lfs, &cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){
(INVALSET & 0x1) ? 0xcccccccc : 0,
(INVALSET & 0x2) ? 0xcccccccc : 0});
lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
// mount may not fail, but our first alloc should fail when
// we try to fix the gstate
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT;
lfs_unmount(&lfs) => 0;
'''
# cycle detection/recovery tests
[[case]] # metadata-pair threaded-list loop test
in = "lfs.c"
code = '''
// create littlefs
lfs_format(&lfs, &cfg) => 0;
// change tail-pointer to point to ourself
lfs_init(&lfs, &cfg) => 0;
lfs_mdir_t mdir;
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
(lfs_block_t[2]){0, 1}})) => 0;
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
'''
[[case]] # metadata-pair threaded-list 2-length loop test
in = "lfs.c"
code = '''
// create littlefs with child dir
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
// find child
lfs_init(&lfs, &cfg) => 0;
lfs_mdir_t mdir;
lfs_block_t pair[2];
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x7ff, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
lfs_pair_fromle32(pair);
// change tail-pointer to point to root
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
(lfs_block_t[2]){0, 1}})) => 0;
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
'''
[[case]] # metadata-pair threaded-list 1-length child loop test
in = "lfs.c"
code = '''
// create littlefs with child dir
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
// find child
lfs_init(&lfs, &cfg) => 0;
lfs_mdir_t mdir;
lfs_block_t pair[2];
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
lfs_dir_get(&lfs, &mdir,
LFS_MKTAG(0x7ff, 0x3ff, 0),
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
lfs_pair_fromle32(pair);
// change tail-pointer to point to ourself
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), pair})) => 0;
lfs_deinit(&lfs) => 0;
// test that mount fails gracefully
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
'''

View File

@@ -1,465 +0,0 @@
[[case]] # test running a filesystem to exhaustion
define.LFS_ERASE_CYCLES = 10
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
]
define.FILES = 10
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
cycle += 1;
}
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "roadrunner/test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
lfs_unmount(&lfs) => 0;
LFS_WARN("completed %d cycles", cycle);
'''
[[case]] # test running a filesystem to exhaustion
# which also requires expanding superblocks
define.LFS_ERASE_CYCLES = 10
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
define.LFS_BADBLOCK_BEHAVIOR = [
'LFS_TESTBD_BADBLOCK_PROGERROR',
'LFS_TESTBD_BADBLOCK_ERASEERROR',
'LFS_TESTBD_BADBLOCK_READERROR',
'LFS_TESTBD_BADBLOCK_PROGNOOP',
'LFS_TESTBD_BADBLOCK_ERASENOOP',
]
define.FILES = 10
code = '''
lfs_format(&lfs, &cfg) => 0;
uint32_t cycle = 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
sprintf(path, "test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
cycle += 1;
}
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
lfs_unmount(&lfs) => 0;
LFS_WARN("completed %d cycles", cycle);
'''
# These are a sort of high-level litmus test for wear-leveling. One definition
# of wear-leveling is that increasing a block device's space translates directly
# into increasing the block devices lifetime. This is something we can actually
# check for.
[[case]] # wear-level test running a filesystem to exhaustion
define.LFS_ERASE_CYCLES = 20
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
define.FILES = 10
code = '''
uint32_t run_cycles[2];
const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
for (int run = 0; run < 2; run++) {
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
lfs_testbd_setwear(&cfg, b,
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
}
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
cycle += 1;
}
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "roadrunner/test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
lfs_unmount(&lfs) => 0;
run_cycles[run] = cycle;
LFS_WARN("completed %d blocks %d cycles",
run_block_count[run], run_cycles[run]);
}
// check we increased the lifetime by 2x with ~10% error
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
'''
[[case]] # wear-level test + expanding superblock
define.LFS_ERASE_CYCLES = 20
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2'
define.FILES = 10
code = '''
uint32_t run_cycles[2];
const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT};
for (int run = 0; run < 2; run++) {
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
lfs_testbd_setwear(&cfg, b,
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
}
lfs_format(&lfs, &cfg) => 0;
uint32_t cycle = 0;
while (true) {
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
sprintf(path, "test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "test%d", i);
srand(cycle * i);
size = 1 << ((rand() % 10)+2);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
cycle += 1;
}
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
lfs_unmount(&lfs) => 0;
run_cycles[run] = cycle;
LFS_WARN("completed %d blocks %d cycles",
run_block_count[run], run_cycles[run]);
}
// check we increased the lifetime by 2x with ~10% error
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
'''
[[case]] # test that we wear blocks roughly evenly
define.LFS_ERASE_CYCLES = 0xffffffff
define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster
define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1]
define.CYCLES = 100
define.FILES = 10
if = 'LFS_BLOCK_CYCLES < CYCLES/10'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "roadrunner") => 0;
lfs_unmount(&lfs) => 0;
uint32_t cycle = 0;
while (cycle < CYCLES) {
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// chose name, roughly random seed, and random 2^n size
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << 4; //((rand() % 10)+2);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
assert(res == 1 || res == LFS_ERR_NOSPC);
if (res == LFS_ERR_NOSPC) {
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
err = lfs_file_close(&lfs, &file);
assert(err == 0 || err == LFS_ERR_NOSPC);
if (err == LFS_ERR_NOSPC) {
lfs_unmount(&lfs) => 0;
goto exhausted;
}
}
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "roadrunner/test%d", i);
srand(cycle * i);
size = 1 << 4; //((rand() % 10)+2);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
for (lfs_size_t j = 0; j < size; j++) {
char c = 'a' + (rand() % 26);
char r;
lfs_file_read(&lfs, &file, &r, 1) => 1;
assert(r == c);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
cycle += 1;
}
exhausted:
// should still be readable
lfs_mount(&lfs, &cfg) => 0;
for (uint32_t i = 0; i < FILES; i++) {
// check for errors
sprintf(path, "roadrunner/test%d", i);
lfs_stat(&lfs, path, &info) => 0;
}
lfs_unmount(&lfs) => 0;
LFS_WARN("completed %d cycles", cycle);
// check the wear on our block device
lfs_testbd_wear_t minwear = -1;
lfs_testbd_wear_t totalwear = 0;
lfs_testbd_wear_t maxwear = 0;
// skip 0 and 1 as superblock movement is intentionally avoided
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
printf("%08x: wear %d\n", b, wear);
assert(wear >= 0);
if (wear < minwear) {
minwear = wear;
}
if (wear > maxwear) {
maxwear = wear;
}
totalwear += wear;
}
lfs_testbd_wear_t avgwear = totalwear / LFS_BLOCK_COUNT;
LFS_WARN("max wear: %d cycles", maxwear);
LFS_WARN("avg wear: %d cycles", totalwear / LFS_BLOCK_COUNT);
LFS_WARN("min wear: %d cycles", minwear);
// find standard deviation^2
lfs_testbd_wear_t dev2 = 0;
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
assert(wear >= 0);
lfs_testbd_swear_t diff = wear - avgwear;
dev2 += diff*diff;
}
dev2 /= totalwear;
LFS_WARN("std dev^2: %d", dev2);
assert(dev2 < 8);
'''

221
tests/test_files.sh Executable file
View File

@@ -0,0 +1,221 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== File tests ==="
SMALLSIZE=32
MEDIUMSIZE=8192
LARGESIZE=262144
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
echo "--- Simple file test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_size_t size = strlen("Hello World!\n");
uint8_t wbuffer[1024];
memcpy(wbuffer, "Hello World!\n", size);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "hello", LFS_O_RDONLY) => 0;
size = strlen("Hello World!\n");
uint8_t rbuffer[1024];
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(rbuffer, wbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
w_test() {
scripts/test.py ${4:-} << TEST
lfs_size_t size = $1;
lfs_size_t chunk = 31;
srand(0);
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "$2",
${3:-LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC}) => 0;
for (lfs_size_t i = 0; i < size; i += chunk) {
chunk = (chunk < size - i) ? chunk : size - i;
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
}
r_test() {
scripts/test.py << TEST
lfs_size_t size = $1;
lfs_size_t chunk = 29;
srand(0);
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "$2", &info) => 0;
info.type => LFS_TYPE_REG;
info.size => size;
lfs_file_open(&lfs, &file, "$2", ${3:-LFS_O_RDONLY}) => 0;
for (lfs_size_t i = 0; i < size; i += chunk) {
chunk = (chunk < size - i) ? chunk : size - i;
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk && i+b < size; b++) {
buffer[b] => rand() & 0xff;
}
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
}
echo "--- Small file test ---"
w_test $SMALLSIZE smallavacado
r_test $SMALLSIZE smallavacado
echo "--- Medium file test ---"
w_test $MEDIUMSIZE mediumavacado
r_test $MEDIUMSIZE mediumavacado
echo "--- Large file test ---"
w_test $LARGESIZE largeavacado
r_test $LARGESIZE largeavacado
echo "--- Zero file test ---"
w_test 0 noavacado
r_test 0 noavacado
echo "--- Truncate small test ---"
w_test $SMALLSIZE mediumavacado
r_test $SMALLSIZE mediumavacado
w_test $MEDIUMSIZE mediumavacado
r_test $MEDIUMSIZE mediumavacado
echo "--- Truncate zero test ---"
w_test $SMALLSIZE noavacado
r_test $SMALLSIZE noavacado
w_test 0 noavacado
r_test 0 noavacado
echo "--- Non-overlap check ---"
r_test $SMALLSIZE smallavacado
r_test $MEDIUMSIZE mediumavacado
r_test $LARGESIZE largeavacado
r_test 0 noavacado
echo "--- Dir check ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hello") => 0;
info.type => LFS_TYPE_REG;
info.size => strlen("Hello World!\n");
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "largeavacado") => 0;
info.type => LFS_TYPE_REG;
info.size => $LARGESIZE;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "mediumavacado") => 0;
info.type => LFS_TYPE_REG;
info.size => $MEDIUMSIZE;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "noavacado") => 0;
info.type => LFS_TYPE_REG;
info.size => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "smallavacado") => 0;
info.type => LFS_TYPE_REG;
info.size => $SMALLSIZE;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Many files test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
scripts/test.py << TEST
// Create 300 files of 7 bytes
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < 300; i++) {
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_RDWR | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_size_t size = 7;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
snprintf((char*)wbuffer, size, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_rewind(&lfs, &file) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(wbuffer, rbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
echo "--- Many files with flush test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
scripts/test.py << TEST
// Create 300 files of 7 bytes
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < 300; i++) {
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_size_t size = 7;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
snprintf((char*)wbuffer, size, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(wbuffer, rbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
echo "--- Many files with power cycle test ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
scripts/test.py << TEST
// Create 300 files of 7 bytes
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < 300; i++) {
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_size_t size = 7;
uint8_t wbuffer[1024];
uint8_t rbuffer[1024];
snprintf((char*)wbuffer, size, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
memcmp(wbuffer, rbuffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
scripts/results.py

View File

@@ -1,486 +0,0 @@
[[case]] # simple file test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
size = strlen("Hello World!")+1;
strcpy((char*)buffer, "Hello World!");
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello", LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(strcmp((char*)buffer, "Hello World!") == 0);
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # larger files
define.SIZE = [32, 8192, 262144, 0, 7, 8193]
define.CHUNKSIZE = [31, 16, 33, 1, 1023]
code = '''
lfs_format(&lfs, &cfg) => 0;
// write
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
srand(1);
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
srand(1);
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # rewriting files
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
define.CHUNKSIZE = [31, 16, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
// write
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
srand(1);
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
srand(1);
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// rewrite
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY) => 0;
srand(2);
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => lfs_max(SIZE1, SIZE2);
srand(2);
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
if (SIZE1 > SIZE2) {
srand(1);
for (lfs_size_t b = 0; b < SIZE2; b++) {
rand();
}
for (lfs_size_t i = SIZE2; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # appending files
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
define.CHUNKSIZE = [31, 16, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
// write
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
srand(1);
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
srand(1);
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// append
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_APPEND) => 0;
srand(2);
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1 + SIZE2;
srand(1);
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
srand(2);
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # truncating files
define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
define.CHUNKSIZE = [31, 16, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
// write
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
srand(1);
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE1;
srand(1);
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// truncate
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
srand(2);
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// read
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE2;
srand(2);
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant file writing
define.SIZE = [32, 0, 7, 2049]
define.CHUNKSIZE = [31, 16, 65]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
assert(err == LFS_ERR_NOENT || err == 0);
if (err == 0) {
// can only be 0 (new file) or full size
size = lfs_file_size(&lfs, &file);
assert(size == 0 || size == SIZE);
lfs_file_close(&lfs, &file) => 0;
}
// write
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_CREAT) => 0;
srand(1);
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
}
lfs_file_close(&lfs, &file) => 0;
// read
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
srand(1);
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant file writing with syncs
define = [
# append (O(n))
{MODE='LFS_O_APPEND', SIZE=[32, 0, 7, 2049], CHUNKSIZE=[31, 16, 65]},
# truncate (O(n^2))
{MODE='LFS_O_TRUNC', SIZE=[32, 0, 7, 200], CHUNKSIZE=[31, 16, 65]},
# rewrite (O(n^2))
{MODE=0, SIZE=[32, 0, 7, 200], CHUNKSIZE=[31, 16, 65]},
]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
assert(err == LFS_ERR_NOENT || err == 0);
if (err == 0) {
// with syncs we could be any size, but it at least must be valid data
size = lfs_file_size(&lfs, &file);
assert(size <= SIZE);
srand(1);
for (lfs_size_t i = 0; i < size; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, size-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_close(&lfs, &file) => 0;
}
// write
lfs_file_open(&lfs, &file, "avacado",
LFS_O_WRONLY | LFS_O_CREAT | MODE) => 0;
size = lfs_file_size(&lfs, &file);
assert(size <= SIZE);
srand(1);
lfs_size_t skip = (MODE == LFS_O_APPEND) ? size : 0;
for (lfs_size_t b = 0; b < skip; b++) {
rand();
}
for (lfs_size_t i = skip; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
for (lfs_size_t b = 0; b < chunk; b++) {
buffer[b] = rand() & 0xff;
}
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
// read
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => SIZE;
srand(1);
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
for (lfs_size_t b = 0; b < chunk; b++) {
assert(buffer[b] == (rand() & 0xff));
}
}
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # many files
define.N = 300
code = '''
lfs_format(&lfs, &cfg) => 0;
// create N files of 7 bytes
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
char wbuffer[1024];
size = 7;
snprintf(wbuffer, size, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
char rbuffer[1024];
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(strcmp(rbuffer, wbuffer) == 0);
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # many files with power cycle
define.N = 300
code = '''
lfs_format(&lfs, &cfg) => 0;
// create N files of 7 bytes
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
sprintf(path, "file_%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
char wbuffer[1024];
size = 7;
snprintf(wbuffer, size, "Hi %03d", i);
lfs_file_write(&lfs, &file, wbuffer, size) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
char rbuffer[1024];
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(strcmp(rbuffer, wbuffer) == 0);
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # many files with power loss
define.N = 300
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
// create N files of 7 bytes
for (int i = 0; i < N; i++) {
sprintf(path, "file_%03d", i);
err = lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT);
char wbuffer[1024];
size = 7;
snprintf(wbuffer, size, "Hi %03d", i);
if ((lfs_size_t)lfs_file_size(&lfs, &file) != size) {
lfs_file_write(&lfs, &file, wbuffer, size) => size;
}
lfs_file_close(&lfs, &file) => 0;
char rbuffer[1024];
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, rbuffer, size) => size;
assert(strcmp(rbuffer, wbuffer) == 0);
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
'''

51
tests/test_format.sh Executable file
View File

@@ -0,0 +1,51 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Formatting tests ==="
rm -rf blocks
echo "--- Basic formatting ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
echo "--- Basic mounting ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Invalid superblocks ---"
ln -f -s /dev/zero blocks/0
ln -f -s /dev/zero blocks/1
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => LFS_ERR_NOSPC;
TEST
rm blocks/0 blocks/1
echo "--- Invalid mount ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
TEST
echo "--- Expanding superblock ---"
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < 100; i++) {
lfs_mkdir(&lfs, "dummy") => 0;
lfs_remove(&lfs, "dummy") => 0;
}
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "dummy") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/results.py

190
tests/test_interspersed.sh Executable file
View File

@@ -0,0 +1,190 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Interspersed tests ==="
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
echo "--- Interspersed file test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_t files[4];
lfs_file_open(&lfs, &files[0], "a", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_open(&lfs, &files[1], "b", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_open(&lfs, &files[2], "c", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_open(&lfs, &files[3], "d", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int i = 0; i < 10; i++) {
lfs_file_write(&lfs, &files[0], (const void*)"a", 1) => 1;
lfs_file_write(&lfs, &files[1], (const void*)"b", 1) => 1;
lfs_file_write(&lfs, &files[2], (const void*)"c", 1) => 1;
lfs_file_write(&lfs, &files[3], (const void*)"d", 1) => 1;
}
lfs_file_close(&lfs, &files[0]);
lfs_file_close(&lfs, &files[1]);
lfs_file_close(&lfs, &files[2]);
lfs_file_close(&lfs, &files[3]);
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "a") => 0;
info.type => LFS_TYPE_REG;
info.size => 10;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "b") => 0;
info.type => LFS_TYPE_REG;
info.size => 10;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "c") => 0;
info.type => LFS_TYPE_REG;
info.size => 10;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "d") => 0;
info.type => LFS_TYPE_REG;
info.size => 10;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &files[0], "a", LFS_O_RDONLY) => 0;
lfs_file_open(&lfs, &files[1], "b", LFS_O_RDONLY) => 0;
lfs_file_open(&lfs, &files[2], "c", LFS_O_RDONLY) => 0;
lfs_file_open(&lfs, &files[3], "d", LFS_O_RDONLY) => 0;
for (int i = 0; i < 10; i++) {
lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
buffer[0] => 'a';
lfs_file_read(&lfs, &files[1], buffer, 1) => 1;
buffer[0] => 'b';
lfs_file_read(&lfs, &files[2], buffer, 1) => 1;
buffer[0] => 'c';
lfs_file_read(&lfs, &files[3], buffer, 1) => 1;
buffer[0] => 'd';
}
lfs_file_close(&lfs, &files[0]);
lfs_file_close(&lfs, &files[1]);
lfs_file_close(&lfs, &files[2]);
lfs_file_close(&lfs, &files[3]);
lfs_unmount(&lfs) => 0;
TEST
echo "--- Interspersed remove file test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_t files[4];
lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
}
lfs_remove(&lfs, "a") => 0;
lfs_remove(&lfs, "b") => 0;
lfs_remove(&lfs, "c") => 0;
lfs_remove(&lfs, "d") => 0;
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
}
lfs_file_close(&lfs, &files[0]);
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "e") => 0;
info.type => LFS_TYPE_REG;
info.size => 10;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &files[0], "e", LFS_O_RDONLY) => 0;
for (int i = 0; i < 10; i++) {
lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
buffer[0] => 'e';
}
lfs_file_close(&lfs, &files[0]);
lfs_unmount(&lfs) => 0;
TEST
echo "--- Remove inconveniently test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_t files[4];
lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
lfs_file_open(&lfs, &files[1], "f", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_open(&lfs, &files[2], "g", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
}
lfs_remove(&lfs, "f") => 0;
for (int i = 0; i < 5; i++) {
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
}
lfs_file_close(&lfs, &files[0]);
lfs_file_close(&lfs, &files[1]);
lfs_file_close(&lfs, &files[2]);
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
info.type => LFS_TYPE_DIR;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "e") => 0;
info.type => LFS_TYPE_REG;
info.size => 10;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "g") => 0;
info.type => LFS_TYPE_REG;
info.size => 10;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &files[0], "e", LFS_O_RDONLY) => 0;
lfs_file_open(&lfs, &files[1], "g", LFS_O_RDONLY) => 0;
for (int i = 0; i < 10; i++) {
lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
buffer[0] => 'e';
lfs_file_read(&lfs, &files[1], buffer, 1) => 1;
buffer[0] => 'g';
}
lfs_file_close(&lfs, &files[0]);
lfs_file_close(&lfs, &files[1]);
lfs_unmount(&lfs) => 0;
TEST
scripts/results.py

View File

@@ -1,244 +0,0 @@
[[case]] # interspersed file test
define.SIZE = [10, 100]
define.FILES = [4, 10, 26]
code = '''
lfs_file_t files[FILES];
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int j = 0; j < FILES; j++) {
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
}
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < FILES; j++) {
lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
}
}
for (int j = 0; j < FILES; j++) {
lfs_file_close(&lfs, &files[j]);
}
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
for (int j = 0; j < FILES; j++) {
sprintf(path, "%c", alphas[j]);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_REG);
assert(info.size == SIZE);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int j = 0; j < FILES; j++) {
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
}
for (int i = 0; i < 10; i++) {
for (int j = 0; j < FILES; j++) {
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
assert(buffer[0] == alphas[j]);
}
}
for (int j = 0; j < FILES; j++) {
lfs_file_close(&lfs, &files[j]);
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # interspersed remove file test
define.SIZE = [10, 100]
define.FILES = [4, 10, 26]
code = '''
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int j = 0; j < FILES; j++) {
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
for (int i = 0; i < SIZE; i++) {
lfs_file_write(&lfs, &file, &alphas[j], 1) => 1;
}
lfs_file_close(&lfs, &file);
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "zzz", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int j = 0; j < FILES; j++) {
lfs_file_write(&lfs, &file, (const void*)"~", 1) => 1;
lfs_file_sync(&lfs, &file) => 0;
sprintf(path, "%c", alphas[j]);
lfs_remove(&lfs, path) => 0;
}
lfs_file_close(&lfs, &file);
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "zzz") == 0);
assert(info.type == LFS_TYPE_REG);
assert(info.size == FILES);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &file, "zzz", LFS_O_RDONLY) => 0;
for (int i = 0; i < FILES; i++) {
lfs_file_read(&lfs, &file, buffer, 1) => 1;
assert(buffer[0] == '~');
}
lfs_file_close(&lfs, &file);
lfs_unmount(&lfs) => 0;
'''
[[case]] # remove inconveniently test
define.SIZE = [10, 100]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_t files[3];
lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_open(&lfs, &files[1], "f", LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_open(&lfs, &files[2], "g", LFS_O_WRONLY | LFS_O_CREAT) => 0;
for (int i = 0; i < SIZE/2; i++) {
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
}
lfs_remove(&lfs, "f") => 0;
for (int i = 0; i < SIZE/2; i++) {
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
}
lfs_file_close(&lfs, &files[0]);
lfs_file_close(&lfs, &files[1]);
lfs_file_close(&lfs, &files[2]);
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "e") == 0);
assert(info.type == LFS_TYPE_REG);
assert(info.size == SIZE);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "g") == 0);
assert(info.type == LFS_TYPE_REG);
assert(info.size == SIZE);
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_file_open(&lfs, &files[0], "e", LFS_O_RDONLY) => 0;
lfs_file_open(&lfs, &files[1], "g", LFS_O_RDONLY) => 0;
for (int i = 0; i < SIZE; i++) {
lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
assert(buffer[0] == 'e');
lfs_file_read(&lfs, &files[1], buffer, 1) => 1;
assert(buffer[0] == 'g');
}
lfs_file_close(&lfs, &files[0]);
lfs_file_close(&lfs, &files[1]);
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant interspersed file test
define.SIZE = [10, 100]
define.FILES = [4, 10, 26]
reentrant = true
code = '''
lfs_file_t files[FILES];
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
for (int j = 0; j < FILES; j++) {
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
}
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < FILES; j++) {
size = lfs_file_size(&lfs, &files[j]);
assert((int)size >= 0);
if ((int)size <= i) {
lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
lfs_file_sync(&lfs, &files[j]) => 0;
}
}
}
for (int j = 0; j < FILES; j++) {
lfs_file_close(&lfs, &files[j]);
}
lfs_dir_open(&lfs, &dir, "/") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, ".") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, "..") == 0);
assert(info.type == LFS_TYPE_DIR);
for (int j = 0; j < FILES; j++) {
sprintf(path, "%c", alphas[j]);
lfs_dir_read(&lfs, &dir, &info) => 1;
assert(strcmp(info.name, path) == 0);
assert(info.type == LFS_TYPE_REG);
assert(info.size == SIZE);
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int j = 0; j < FILES; j++) {
sprintf(path, "%c", alphas[j]);
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
}
for (int i = 0; i < 10; i++) {
for (int j = 0; j < FILES; j++) {
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
assert(buffer[0] == alphas[j]);
}
}
for (int j = 0; j < FILES; j++) {
lfs_file_close(&lfs, &files[j]);
}
lfs_unmount(&lfs) => 0;
'''

333
tests/test_move.sh Executable file
View File

@@ -0,0 +1,333 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Move tests ==="
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "a") => 0;
lfs_mkdir(&lfs, "b") => 0;
lfs_mkdir(&lfs, "c") => 0;
lfs_mkdir(&lfs, "d") => 0;
lfs_mkdir(&lfs, "a/hi") => 0;
lfs_mkdir(&lfs, "a/hi/hola") => 0;
lfs_mkdir(&lfs, "a/hi/bonjour") => 0;
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
lfs_file_open(&lfs, &file, "a/hello", LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hola\n", 5) => 5;
lfs_file_write(&lfs, &file, "bonjour\n", 8) => 8;
lfs_file_write(&lfs, &file, "ohayo\n", 6) => 6;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move file ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "a/hello", "b/hello") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hi") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "b") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move file corrupt source ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "b/hello", "c/hello") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/corrupt.py -n 1
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "b") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move file corrupt source and dest ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "c/hello", "d/hello") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/corrupt.py -n 2
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "d") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move file after corrupt ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "c/hello", "d/hello") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "d") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move dir ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "a/hi", "b/hi") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "a") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "b") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hi") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move dir corrupt source ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "b/hi", "c/hi") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/corrupt.py -n 1
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "b") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hi") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move dir corrupt source and dest ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "c/hi", "d/hi") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/corrupt.py -n 2
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hi") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "d") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move dir after corrupt ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_rename(&lfs, "c/hi", "d/hi") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "c") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "d") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hi") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move check ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "a/hi") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "b/hi") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "c/hi") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "d/hi") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "bonjour") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hola") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "ohayo") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "a/hello") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "b/hello") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "c/hello") => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, buffer, 5) => 5;
memcmp(buffer, "hola\n", 5) => 0;
lfs_file_read(&lfs, &file, buffer, 8) => 8;
memcmp(buffer, "bonjour\n", 8) => 0;
lfs_file_read(&lfs, &file, buffer, 6) => 6;
memcmp(buffer, "ohayo\n", 6) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Move state stealing ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_remove(&lfs, "b") => 0;
lfs_remove(&lfs, "c") => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "a/hi") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "b") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "c") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "d/hi") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "bonjour") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "hola") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "ohayo") => 0;
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_dir_open(&lfs, &dir, "a/hello") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "b") => LFS_ERR_NOENT;
lfs_dir_open(&lfs, &dir, "c") => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "d/hello", LFS_O_RDONLY) => 0;
lfs_file_read(&lfs, &file, buffer, 5) => 5;
memcmp(buffer, "hola\n", 5) => 0;
lfs_file_read(&lfs, &file, buffer, 8) => 8;
memcmp(buffer, "bonjour\n", 8) => 0;
lfs_file_read(&lfs, &file, buffer, 6) => 6;
memcmp(buffer, "ohayo\n", 6) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/results.py

File diff suppressed because it is too large Load Diff

46
tests/test_orphan.sh Executable file
View File

@@ -0,0 +1,46 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Orphan tests ==="
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
echo "--- Orphan test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "parent") => 0;
lfs_mkdir(&lfs, "parent/orphan") => 0;
lfs_mkdir(&lfs, "parent/child") => 0;
lfs_remove(&lfs, "parent/orphan") => 0;
TEST
# corrupt most recent commit, this should be the update to the previous
# linked-list entry and should orphan the child
scripts/corrupt.py
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_ssize_t before = lfs_fs_size(&lfs);
before => 8;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_ssize_t orphaned = lfs_fs_size(&lfs);
orphaned => 8;
lfs_mkdir(&lfs, "parent/otherchild") => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_ssize_t deorphaned = lfs_fs_size(&lfs);
deorphaned => 8;
lfs_unmount(&lfs) => 0;
TEST
scripts/results.py

View File

@@ -1,120 +0,0 @@
[[case]] # orphan test
in = "lfs.c"
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "parent") => 0;
lfs_mkdir(&lfs, "parent/orphan") => 0;
lfs_mkdir(&lfs, "parent/child") => 0;
lfs_remove(&lfs, "parent/orphan") => 0;
lfs_unmount(&lfs) => 0;
// corrupt the child's most recent commit, this should be the update
// to the linked-list entry, which should orphan the orphan. Note this
// makes a lot of assumptions about the remove operation.
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "parent/child") => 0;
lfs_block_t block = dir.m.pair[0];
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
uint8_t bbuffer[LFS_BLOCK_SIZE];
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
int off = LFS_BLOCK_SIZE-1;
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
off -= 1;
}
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
cfg.erase(&cfg, block) => 0;
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
cfg.sync(&cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_fs_size(&lfs) => 8;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_fs_size(&lfs) => 8;
// this mkdir should both create a dir and deorphan, so size
// should be unchanged
lfs_mkdir(&lfs, "parent/otherchild") => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
lfs_fs_size(&lfs) => 8;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "parent/child", &info) => 0;
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
lfs_fs_size(&lfs) => 8;
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant testing for orphans, basically just spam mkdir/remove
reentrant = true
# TODO fix this case, caused by non-DAG trees
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [
{FILES=6, DEPTH=1, CYCLES=20},
{FILES=26, DEPTH=1, CYCLES=20},
{FILES=3, DEPTH=3, CYCLES=20},
]
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
srand(1);
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (int i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (int d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
}
// if it does not exist, we create it, else we destroy
int res = lfs_stat(&lfs, full_path, &info);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (int d = 0; d < DEPTH; d++) {
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (int d = 0; d < DEPTH; d++) {
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
assert(strcmp(info.name, &path[2*d+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
}
} else {
// is valid dir?
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
// try to delete path in reverse order, ignore if dir is not empty
for (int d = DEPTH-1; d >= 0; d--) {
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);
assert(!err || err == LFS_ERR_NOTEMPTY);
}
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
}
}
lfs_unmount(&lfs) => 0;
'''

202
tests/test_paths.sh Executable file
View File

@@ -0,0 +1,202 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Path tests ==="
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "soda") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
lfs_mkdir(&lfs, "soda/hotsoda") => 0;
lfs_mkdir(&lfs, "soda/warmsoda") => 0;
lfs_mkdir(&lfs, "soda/coldsoda") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Root path tests ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_mkdir(&lfs, "/milk1") => 0;
lfs_stat(&lfs, "/milk1", &info) => 0;
strcmp(info.name, "milk1") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Redundant slash path tests ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "//tea//hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "///tea///hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_mkdir(&lfs, "///milk2") => 0;
lfs_stat(&lfs, "///milk2", &info) => 0;
strcmp(info.name, "milk2") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Dot path tests ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "./tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "/./tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "/././tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "/./tea/./hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_mkdir(&lfs, "/./milk3") => 0;
lfs_stat(&lfs, "/./milk3", &info) => 0;
strcmp(info.name, "milk3") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Dot dot path tests ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "coffee/../tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "tea/coldtea/../hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "coffee/coldcoffee/../../tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "coffee/../soda/../tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_mkdir(&lfs, "coffee/../milk4") => 0;
lfs_stat(&lfs, "coffee/../milk4", &info) => 0;
strcmp(info.name, "milk4") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Trailing dot path tests ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "tea/hottea/", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "tea/hottea/.", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "tea/hottea/./.", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_stat(&lfs, "tea/hottea/..", &info) => 0;
strcmp(info.name, "tea") => 0;
lfs_stat(&lfs, "tea/hottea/../.", &info) => 0;
strcmp(info.name, "tea") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Root dot dot path tests ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "coffee/../../../../../../tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_mkdir(&lfs, "coffee/../../../../../../milk5") => 0;
lfs_stat(&lfs, "coffee/../../../../../../milk5", &info) => 0;
strcmp(info.name, "milk5") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Root tests ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "/", &info) => 0;
info.type => LFS_TYPE_DIR;
strcmp(info.name, "/") => 0;
lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_ISDIR;
// more corner cases
lfs_remove(&lfs, "") => LFS_ERR_INVAL;
lfs_remove(&lfs, ".") => LFS_ERR_INVAL;
lfs_remove(&lfs, "..") => LFS_ERR_INVAL;
lfs_remove(&lfs, "/") => LFS_ERR_INVAL;
lfs_remove(&lfs, "//") => LFS_ERR_INVAL;
lfs_remove(&lfs, "./") => LFS_ERR_INVAL;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Sketchy path tests ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "dirt/ground") => LFS_ERR_NOENT;
lfs_mkdir(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Superblock conflict test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "littlefs") => 0;
lfs_remove(&lfs, "littlefs") => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Max path test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
memset(path, 'w', LFS_NAME_MAX+1);
path[LFS_NAME_MAX+2] = '\0';
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_NAMETOOLONG;
memcpy(path, "coffee/", strlen("coffee/"));
memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX+1);
path[strlen("coffee/")+LFS_NAME_MAX+2] = '\0';
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_NAMETOOLONG;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Really big path test ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
memset(path, 'w', LFS_NAME_MAX);
path[LFS_NAME_MAX+1] = '\0';
lfs_mkdir(&lfs, path) => 0;
lfs_remove(&lfs, path) => 0;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, path) => 0;
memcpy(path, "coffee/", strlen("coffee/"));
memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX);
path[strlen("coffee/")+LFS_NAME_MAX+1] = '\0';
lfs_mkdir(&lfs, path) => 0;
lfs_remove(&lfs, path) => 0;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, path) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/results.py

View File

@@ -1,293 +0,0 @@
[[case]] # simple path test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
lfs_stat(&lfs, "tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_mkdir(&lfs, "/milk") => 0;
lfs_stat(&lfs, "/milk", &info) => 0;
assert(strcmp(info.name, "milk") == 0);
lfs_stat(&lfs, "milk", &info) => 0;
assert(strcmp(info.name, "milk") == 0);
lfs_unmount(&lfs) => 0;
'''
[[case]] # redundant slashes
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
lfs_stat(&lfs, "/tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "//tea//hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "///tea///hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_mkdir(&lfs, "////milk") => 0;
lfs_stat(&lfs, "////milk", &info) => 0;
assert(strcmp(info.name, "milk") == 0);
lfs_stat(&lfs, "milk", &info) => 0;
assert(strcmp(info.name, "milk") == 0);
lfs_unmount(&lfs) => 0;
'''
[[case]] # dot path test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
lfs_stat(&lfs, "./tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "/./tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "/././tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "/./tea/./hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_mkdir(&lfs, "/./milk") => 0;
lfs_stat(&lfs, "/./milk", &info) => 0;
assert(strcmp(info.name, "milk") == 0);
lfs_stat(&lfs, "milk", &info) => 0;
assert(strcmp(info.name, "milk") == 0);
lfs_unmount(&lfs) => 0;
'''
[[case]] # dot dot path test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
lfs_stat(&lfs, "coffee/../tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "tea/coldtea/../hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "coffee/coldcoffee/../../tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "coffee/../coffee/../tea/hottea", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_mkdir(&lfs, "coffee/../milk") => 0;
lfs_stat(&lfs, "coffee/../milk", &info) => 0;
strcmp(info.name, "milk") => 0;
lfs_stat(&lfs, "milk", &info) => 0;
strcmp(info.name, "milk") => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # trailing dot path test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
lfs_stat(&lfs, "tea/hottea/", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "tea/hottea/.", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "tea/hottea/./.", &info) => 0;
assert(strcmp(info.name, "hottea") == 0);
lfs_stat(&lfs, "tea/hottea/..", &info) => 0;
assert(strcmp(info.name, "tea") == 0);
lfs_stat(&lfs, "tea/hottea/../.", &info) => 0;
assert(strcmp(info.name, "tea") == 0);
lfs_unmount(&lfs) => 0;
'''
[[case]] # leading dot path test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, ".milk") => 0;
lfs_stat(&lfs, ".milk", &info) => 0;
strcmp(info.name, ".milk") => 0;
lfs_stat(&lfs, "tea/.././.milk", &info) => 0;
strcmp(info.name, ".milk") => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # root dot dot path test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "tea") => 0;
lfs_mkdir(&lfs, "tea/hottea") => 0;
lfs_mkdir(&lfs, "tea/warmtea") => 0;
lfs_mkdir(&lfs, "tea/coldtea") => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
lfs_stat(&lfs, "coffee/../../../../../../tea/hottea", &info) => 0;
strcmp(info.name, "hottea") => 0;
lfs_mkdir(&lfs, "coffee/../../../../../../milk") => 0;
lfs_stat(&lfs, "coffee/../../../../../../milk", &info) => 0;
strcmp(info.name, "milk") => 0;
lfs_stat(&lfs, "milk", &info) => 0;
strcmp(info.name, "milk") => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid path tests
code = '''
lfs_format(&lfs, &cfg);
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "dirt", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "dirt/ground", &info) => LFS_ERR_NOENT;
lfs_stat(&lfs, "dirt/ground/earth", &info) => LFS_ERR_NOENT;
lfs_remove(&lfs, "dirt") => LFS_ERR_NOENT;
lfs_remove(&lfs, "dirt/ground") => LFS_ERR_NOENT;
lfs_remove(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
lfs_mkdir(&lfs, "dirt/ground") => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "dirt/ground", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_NOENT;
lfs_mkdir(&lfs, "dirt/ground/earth") => LFS_ERR_NOENT;
lfs_file_open(&lfs, &file, "dirt/ground/earth", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
'''
[[case]] # root operations
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "/", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_mkdir(&lfs, "/") => LFS_ERR_EXIST;
lfs_file_open(&lfs, &file, "/", LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_ISDIR;
lfs_remove(&lfs, "/") => LFS_ERR_INVAL;
lfs_unmount(&lfs) => 0;
'''
[[case]] # root representations
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "/", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_stat(&lfs, "", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_stat(&lfs, ".", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_stat(&lfs, "..", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_stat(&lfs, "//", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_stat(&lfs, "./", &info) => 0;
assert(strcmp(info.name, "/") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_unmount(&lfs) => 0;
'''
[[case]] # superblock conflict test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
lfs_remove(&lfs, "littlefs") => LFS_ERR_NOENT;
lfs_mkdir(&lfs, "littlefs") => 0;
lfs_stat(&lfs, "littlefs", &info) => 0;
assert(strcmp(info.name, "littlefs") == 0);
assert(info.type == LFS_TYPE_DIR);
lfs_remove(&lfs, "littlefs") => 0;
lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
lfs_unmount(&lfs) => 0;
'''
[[case]] # max path test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
memset(path, 'w', LFS_NAME_MAX+1);
path[LFS_NAME_MAX+1] = '\0';
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_NAMETOOLONG;
memcpy(path, "coffee/", strlen("coffee/"));
memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX+1);
path[strlen("coffee/")+LFS_NAME_MAX+1] = '\0';
lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG;
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT)
=> LFS_ERR_NAMETOOLONG;
lfs_unmount(&lfs) => 0;
'''
[[case]] # really big path test
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "coffee") => 0;
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
lfs_mkdir(&lfs, "coffee/coldcoffee") => 0;
memset(path, 'w', LFS_NAME_MAX);
path[LFS_NAME_MAX] = '\0';
lfs_mkdir(&lfs, path) => 0;
lfs_remove(&lfs, path) => 0;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, path) => 0;
memcpy(path, "coffee/", strlen("coffee/"));
memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX);
path[strlen("coffee/")+LFS_NAME_MAX] = '\0';
lfs_mkdir(&lfs, path) => 0;
lfs_remove(&lfs, path) => 0;
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_remove(&lfs, path) => 0;
lfs_unmount(&lfs) => 0;
'''

View File

@@ -1,305 +0,0 @@
# specific corner cases worth explicitly testing for
[[case]] # dangling split dir test
define.ITERATIONS = 20
define.COUNT = 10
define.LFS_BLOCK_CYCLES = [8, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
// fill up filesystem so only ~16 blocks are left
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
memset(buffer, 0, 512);
while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
lfs_file_write(&lfs, &file, buffer, 512) => 512;
}
lfs_file_close(&lfs, &file) => 0;
// make a child dir to use in bounded space
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int j = 0; j < ITERATIONS; j++) {
for (int i = 0; i < COUNT; i++) {
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
if (j == ITERATIONS-1) {
break;
}
for (int i = 0; i < COUNT; i++) {
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # outdated head test
define.ITERATIONS = 20
define.COUNT = 10
define.LFS_BLOCK_CYCLES = [8, 1]
code = '''
lfs_format(&lfs, &cfg) => 0;
// fill up filesystem so only ~16 blocks are left
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
memset(buffer, 0, 512);
while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
lfs_file_write(&lfs, &file, buffer, 512) => 512;
}
lfs_file_close(&lfs, &file) => 0;
// make a child dir to use in bounded space
lfs_mkdir(&lfs, "child") => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int j = 0; j < ITERATIONS; j++) {
for (int i = 0; i < COUNT; i++) {
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
lfs_file_close(&lfs, &file) => 0;
}
lfs_dir_open(&lfs, &dir, "child") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
info.size => 0;
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hi", 2) => 2;
lfs_file_close(&lfs, &file) => 0;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_rewind(&lfs, &dir) => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
info.size => 2;
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0;
lfs_file_write(&lfs, &file, "hi", 2) => 2;
lfs_file_close(&lfs, &file) => 0;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_rewind(&lfs, &dir) => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
lfs_dir_read(&lfs, &dir, &info) => 1;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
info.size => 2;
}
lfs_dir_read(&lfs, &dir, &info) => 0;
lfs_dir_close(&lfs, &dir) => 0;
for (int i = 0; i < COUNT; i++) {
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
lfs_remove(&lfs, path) => 0;
}
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant testing for relocations, this is the same as the
# orphan testing, except here we also set block_cycles so that
# almost every tree operation needs a relocation
reentrant = true
# TODO fix this case, caused by non-DAG trees
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
]
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
srand(1);
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (int i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (int d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
}
// if it does not exist, we create it, else we destroy
int res = lfs_stat(&lfs, full_path, &info);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (int d = 0; d < DEPTH; d++) {
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (int d = 0; d < DEPTH; d++) {
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
assert(strcmp(info.name, &path[2*d+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
}
} else {
// is valid dir?
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
// try to delete path in reverse order, ignore if dir is not empty
for (int d = DEPTH-1; d >= 0; d--) {
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);
assert(!err || err == LFS_ERR_NOTEMPTY);
}
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
}
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant testing for relocations, but now with random renames!
reentrant = true
# TODO fix this case, caused by non-DAG trees
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
define = [
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
{FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
]
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
srand(1);
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (int i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (int d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]);
}
// if it does not exist, we create it, else we destroy
int res = lfs_stat(&lfs, full_path, &info);
assert(!res || res == LFS_ERR_NOENT);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (int d = 0; d < DEPTH; d++) {
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (int d = 0; d < DEPTH; d++) {
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
assert(strcmp(info.name, &path[2*d+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
}
} else {
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
// create new random path
char new_path[256];
for (int d = 0; d < DEPTH; d++) {
sprintf(&new_path[2*d], "/%c", alpha[rand() % FILES]);
}
// if new path does not exist, rename, otherwise destroy
res = lfs_stat(&lfs, new_path, &info);
assert(!res || res == LFS_ERR_NOENT);
if (res == LFS_ERR_NOENT) {
// stop once some dir is renamed
for (int d = 0; d < DEPTH; d++) {
strcpy(&path[2*d], &full_path[2*d]);
path[2*d+2] = '\0';
strcpy(&path[128+2*d], &new_path[2*d]);
path[128+2*d+2] = '\0';
err = lfs_rename(&lfs, path, path+128);
assert(!err || err == LFS_ERR_NOTEMPTY);
if (!err) {
strcpy(path, path+128);
}
}
for (int d = 0; d < DEPTH; d++) {
strcpy(path, new_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
assert(strcmp(info.name, &path[2*d+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
}
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
} else {
// try to delete path in reverse order,
// ignore if dir is not empty
for (int d = DEPTH-1; d >= 0; d--) {
strcpy(path, full_path);
path[2*d+2] = '\0';
err = lfs_remove(&lfs, path);
assert(!err || err == LFS_ERR_NOTEMPTY);
}
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
}
}
}
lfs_unmount(&lfs) => 0;
'''

431
tests/test_seek.sh Executable file
View File

@@ -0,0 +1,431 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Seek tests ==="
SMALLSIZE=4
MEDIUMSIZE=128
LARGESIZE=132
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_mkdir(&lfs, "hello") => 0;
for (int i = 0; i < $LARGESIZE; i++) {
sprintf(path, "hello/kitty%03d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
lfs_size_t size = strlen("kittycatcat");
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < $LARGESIZE; j++) {
lfs_file_write(&lfs, &file, buffer, size);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
echo "--- Simple dir seek ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_soff_t pos;
int i;
for (i = 0; i < $SMALLSIZE; i++) {
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
pos = lfs_dir_tell(&lfs, &dir);
}
pos >= 0 => 1;
lfs_dir_seek(&lfs, &dir, pos) => 0;
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
lfs_dir_rewind(&lfs, &dir) => 0;
sprintf(path, "kitty%03d", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
lfs_dir_seek(&lfs, &dir, pos) => 0;
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Large dir seek ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_dir_open(&lfs, &dir, "hello") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_soff_t pos;
int i;
for (i = 0; i < $MEDIUMSIZE; i++) {
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
pos = lfs_dir_tell(&lfs, &dir);
}
pos >= 0 => 1;
lfs_dir_seek(&lfs, &dir, pos) => 0;
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
lfs_dir_rewind(&lfs, &dir) => 0;
sprintf(path, "kitty%03d", 0);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, ".") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, "..") => 0;
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
lfs_dir_seek(&lfs, &dir, pos) => 0;
sprintf(path, "kitty%03d", i);
lfs_dir_read(&lfs, &dir, &info) => 1;
strcmp(info.name, path) => 0;
lfs_dir_close(&lfs, &dir) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Simple file seek ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello/kitty042", LFS_O_RDONLY) => 0;
lfs_soff_t pos;
lfs_size_t size = strlen("kittycatcat");
for (int i = 0; i < $SMALLSIZE; i++) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
pos = lfs_file_tell(&lfs, &file);
}
pos >= 0 => 1;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_rewind(&lfs, &file) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, size, LFS_SEEK_CUR) => 3*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_CUR) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
size = lfs_file_size(&lfs, &file);
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Large file seek ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello/kitty042", LFS_O_RDONLY) => 0;
lfs_soff_t pos;
lfs_size_t size = strlen("kittycatcat");
for (int i = 0; i < $MEDIUMSIZE; i++) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
pos = lfs_file_tell(&lfs, &file);
}
pos >= 0 => 1;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_rewind(&lfs, &file) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, size, LFS_SEEK_CUR) => 3*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_CUR) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
size = lfs_file_size(&lfs, &file);
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Simple file seek and write ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello/kitty042", LFS_O_RDWR) => 0;
lfs_soff_t pos;
lfs_size_t size = strlen("kittycatcat");
for (int i = 0; i < $SMALLSIZE; i++) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
pos = lfs_file_tell(&lfs, &file);
}
pos >= 0 => 1;
memcpy(buffer, "doggodogdog", size);
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "doggodogdog", size) => 0;
lfs_file_rewind(&lfs, &file) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "doggodogdog", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
size = lfs_file_size(&lfs, &file);
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Large file seek and write ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello/kitty042", LFS_O_RDWR) => 0;
lfs_soff_t pos;
lfs_size_t size = strlen("kittycatcat");
for (int i = 0; i < $MEDIUMSIZE; i++) {
lfs_file_read(&lfs, &file, buffer, size) => size;
if (i != $SMALLSIZE) {
memcmp(buffer, "kittycatcat", size) => 0;
}
pos = lfs_file_tell(&lfs, &file);
}
pos >= 0 => 1;
memcpy(buffer, "doggodogdog", size);
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "doggodogdog", size) => 0;
lfs_file_rewind(&lfs, &file) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "doggodogdog", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
size = lfs_file_size(&lfs, &file);
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Boundary seek and write ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello/kitty042", LFS_O_RDWR) => 0;
lfs_size_t size = strlen("hedgehoghog");
const lfs_soff_t offsets[] = {512, 1020, 513, 1021, 511, 1019};
for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
lfs_soff_t off = offsets[i];
memcpy(buffer, "hedgehoghog", size);
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hedgehoghog", size) => 0;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_sync(&lfs, &file) => 0;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Out-of-bounds seek ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello/kitty042", LFS_O_RDWR) => 0;
lfs_size_t size = strlen("kittycatcat");
lfs_file_size(&lfs, &file) => $LARGESIZE*size;
lfs_file_seek(&lfs, &file, ($LARGESIZE+$SMALLSIZE)*size,
LFS_SEEK_SET) => ($LARGESIZE+$SMALLSIZE)*size;
lfs_file_read(&lfs, &file, buffer, size) => 0;
memcpy(buffer, "porcupineee", size);
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_seek(&lfs, &file, ($LARGESIZE+$SMALLSIZE)*size,
LFS_SEEK_SET) => ($LARGESIZE+$SMALLSIZE)*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "porcupineee", size) => 0;
lfs_file_seek(&lfs, &file, $LARGESIZE*size,
LFS_SEEK_SET) => $LARGESIZE*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0\0\0\0\0\0\0\0", size) => 0;
lfs_file_seek(&lfs, &file, -(($LARGESIZE+$SMALLSIZE)*size),
LFS_SEEK_CUR) => LFS_ERR_INVAL;
lfs_file_tell(&lfs, &file) => ($LARGESIZE+1)*size;
lfs_file_seek(&lfs, &file, -(($LARGESIZE+2*$SMALLSIZE)*size),
LFS_SEEK_END) => LFS_ERR_INVAL;
lfs_file_tell(&lfs, &file) => ($LARGESIZE+1)*size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Inline write and seek ---"
for SIZE in $SMALLSIZE $MEDIUMSIZE $LARGESIZE
do
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "hello/tinykitty$SIZE",
LFS_O_RDWR | LFS_O_CREAT) => 0;
int j = 0;
int k = 0;
memcpy(buffer, "abcdefghijklmnopqrstuvwxyz", 26);
for (unsigned i = 0; i < $SIZE; i++) {
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
lfs_file_tell(&lfs, &file) => i+1;
lfs_file_size(&lfs, &file) => i+1;
}
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
lfs_file_tell(&lfs, &file) => 0;
lfs_file_size(&lfs, &file) => $SIZE;
for (unsigned i = 0; i < $SIZE; i++) {
uint8_t c;
lfs_file_read(&lfs, &file, &c, 1) => 1;
c => buffer[k++ % 26];
}
lfs_file_sync(&lfs, &file) => 0;
lfs_file_tell(&lfs, &file) => $SIZE;
lfs_file_size(&lfs, &file) => $SIZE;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
for (unsigned i = 0; i < $SIZE; i++) {
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
lfs_file_tell(&lfs, &file) => i+1;
lfs_file_size(&lfs, &file) => $SIZE;
lfs_file_sync(&lfs, &file) => 0;
lfs_file_tell(&lfs, &file) => i+1;
lfs_file_size(&lfs, &file) => $SIZE;
if (i < $SIZE-2) {
uint8_t c[3];
lfs_file_seek(&lfs, &file, -1, LFS_SEEK_CUR) => i;
lfs_file_read(&lfs, &file, &c, 3) => 3;
lfs_file_tell(&lfs, &file) => i+3;
lfs_file_size(&lfs, &file) => $SIZE;
lfs_file_seek(&lfs, &file, i+1, LFS_SEEK_SET) => i+1;
lfs_file_tell(&lfs, &file) => i+1;
lfs_file_size(&lfs, &file) => $SIZE;
}
}
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
lfs_file_tell(&lfs, &file) => 0;
lfs_file_size(&lfs, &file) => $SIZE;
for (unsigned i = 0; i < $SIZE; i++) {
uint8_t c;
lfs_file_read(&lfs, &file, &c, 1) => 1;
c => buffer[k++ % 26];
}
lfs_file_sync(&lfs, &file) => 0;
lfs_file_tell(&lfs, &file) => $SIZE;
lfs_file_size(&lfs, &file) => $SIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
done
scripts/results.py

View File

@@ -1,380 +0,0 @@
[[case]] # simple file seek
define = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
{COUNT=200, SKIP=100},
{COUNT=4, SKIP=1},
{COUNT=4, SKIP=2},
]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen("kittycatcat");
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY) => 0;
lfs_soff_t pos = -1;
size = strlen("kittycatcat");
for (int i = 0; i < SKIP; i++) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
pos = lfs_file_tell(&lfs, &file);
}
assert(pos >= 0);
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_rewind(&lfs, &file) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, size, LFS_SEEK_CUR) => 3*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_CUR) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
size = lfs_file_size(&lfs, &file);
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # simple file seek and write
define = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
{COUNT=200, SKIP=100},
{COUNT=4, SKIP=1},
{COUNT=4, SKIP=2},
]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen("kittycatcat");
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
lfs_soff_t pos = -1;
size = strlen("kittycatcat");
for (int i = 0; i < SKIP; i++) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
pos = lfs_file_tell(&lfs, &file);
}
assert(pos >= 0);
memcpy(buffer, "doggodogdog", size);
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "doggodogdog", size) => 0;
lfs_file_rewind(&lfs, &file) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "doggodogdog", size) => 0;
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
size = lfs_file_size(&lfs, &file);
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # boundary seek and writes
define.COUNT = 132
define.OFFSETS = '"{512, 1020, 513, 1021, 511, 1019, 1441}"'
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen("kittycatcat");
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
size = strlen("hedgehoghog");
const lfs_soff_t offsets[] = OFFSETS;
for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
lfs_soff_t off = offsets[i];
memcpy(buffer, "hedgehoghog", size);
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hedgehoghog", size) => 0;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hedgehoghog", size) => 0;
lfs_file_sync(&lfs, &file) => 0;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "kittycatcat", size) => 0;
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hedgehoghog", size) => 0;
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # out of bounds seek
define = [
{COUNT=132, SKIP=4},
{COUNT=132, SKIP=128},
{COUNT=200, SKIP=10},
{COUNT=200, SKIP=100},
{COUNT=4, SKIP=2},
{COUNT=4, SKIP=3},
]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "kitty",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
size = strlen("kittycatcat");
memcpy(buffer, "kittycatcat", size);
for (int j = 0; j < COUNT; j++) {
lfs_file_write(&lfs, &file, buffer, size);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
size = strlen("kittycatcat");
lfs_file_size(&lfs, &file) => COUNT*size;
lfs_file_seek(&lfs, &file, (COUNT+SKIP)*size,
LFS_SEEK_SET) => (COUNT+SKIP)*size;
lfs_file_read(&lfs, &file, buffer, size) => 0;
memcpy(buffer, "porcupineee", size);
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_seek(&lfs, &file, (COUNT+SKIP)*size,
LFS_SEEK_SET) => (COUNT+SKIP)*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "porcupineee", size) => 0;
lfs_file_seek(&lfs, &file, COUNT*size,
LFS_SEEK_SET) => COUNT*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0\0\0\0\0\0\0\0", size) => 0;
lfs_file_seek(&lfs, &file, -((COUNT+SKIP)*size),
LFS_SEEK_CUR) => LFS_ERR_INVAL;
lfs_file_tell(&lfs, &file) => (COUNT+1)*size;
lfs_file_seek(&lfs, &file, -((COUNT+2*SKIP)*size),
LFS_SEEK_END) => LFS_ERR_INVAL;
lfs_file_tell(&lfs, &file) => (COUNT+1)*size;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # inline write and seek
define.SIZE = [2, 4, 128, 132]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "tinykitty",
LFS_O_RDWR | LFS_O_CREAT) => 0;
int j = 0;
int k = 0;
memcpy(buffer, "abcdefghijklmnopqrstuvwxyz", 26);
for (unsigned i = 0; i < SIZE; i++) {
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
lfs_file_tell(&lfs, &file) => i+1;
lfs_file_size(&lfs, &file) => i+1;
}
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
lfs_file_tell(&lfs, &file) => 0;
lfs_file_size(&lfs, &file) => SIZE;
for (unsigned i = 0; i < SIZE; i++) {
uint8_t c;
lfs_file_read(&lfs, &file, &c, 1) => 1;
c => buffer[k++ % 26];
}
lfs_file_sync(&lfs, &file) => 0;
lfs_file_tell(&lfs, &file) => SIZE;
lfs_file_size(&lfs, &file) => SIZE;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
for (unsigned i = 0; i < SIZE; i++) {
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
lfs_file_tell(&lfs, &file) => i+1;
lfs_file_size(&lfs, &file) => SIZE;
lfs_file_sync(&lfs, &file) => 0;
lfs_file_tell(&lfs, &file) => i+1;
lfs_file_size(&lfs, &file) => SIZE;
if (i < SIZE-2) {
uint8_t c[3];
lfs_file_seek(&lfs, &file, -1, LFS_SEEK_CUR) => i;
lfs_file_read(&lfs, &file, &c, 3) => 3;
lfs_file_tell(&lfs, &file) => i+3;
lfs_file_size(&lfs, &file) => SIZE;
lfs_file_seek(&lfs, &file, i+1, LFS_SEEK_SET) => i+1;
lfs_file_tell(&lfs, &file) => i+1;
lfs_file_size(&lfs, &file) => SIZE;
}
}
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
lfs_file_tell(&lfs, &file) => 0;
lfs_file_size(&lfs, &file) => SIZE;
for (unsigned i = 0; i < SIZE; i++) {
uint8_t c;
lfs_file_read(&lfs, &file, &c, 1) => 1;
c => buffer[k++ % 26];
}
lfs_file_sync(&lfs, &file) => 0;
lfs_file_tell(&lfs, &file) => SIZE;
lfs_file_size(&lfs, &file) => SIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # file seek and write with power-loss
# must be power-of-2 for quadratic probing to be exhaustive
define.COUNT = [4, 64, 128]
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
err = lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY);
assert(!err || err == LFS_ERR_NOENT);
if (!err) {
if (lfs_file_size(&lfs, &file) != 0) {
lfs_file_size(&lfs, &file) => 11*COUNT;
for (int j = 0; j < COUNT; j++) {
memset(buffer, 0, 11+1);
lfs_file_read(&lfs, &file, buffer, 11) => 11;
assert(memcmp(buffer, "kittycatcat", 11) == 0 ||
memcmp(buffer, "doggodogdog", 11) == 0);
}
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_file_open(&lfs, &file, "kitty", LFS_O_WRONLY | LFS_O_CREAT) => 0;
if (lfs_file_size(&lfs, &file) == 0) {
for (int j = 0; j < COUNT; j++) {
strcpy((char*)buffer, "kittycatcat");
size = strlen((char*)buffer);
lfs_file_write(&lfs, &file, buffer, size) => size;
}
}
lfs_file_close(&lfs, &file) => 0;
strcpy((char*)buffer, "doggodogdog");
size = strlen((char*)buffer);
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => COUNT*size;
// seek and write using quadratic probing to touch all
// 11-byte words in the file
lfs_off_t off = 0;
for (int j = 0; j < COUNT; j++) {
off = (5*off + 1) % COUNT;
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, "kittycatcat", size) == 0 ||
memcmp(buffer, "doggodogdog", size) == 0);
if (memcmp(buffer, "doggodogdog", size) != 0) {
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
strcpy((char*)buffer, "doggodogdog");
lfs_file_write(&lfs, &file, buffer, size) => size;
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, "doggodogdog", size) == 0);
lfs_file_sync(&lfs, &file) => 0;
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, "doggodogdog", size) == 0);
}
}
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => COUNT*size;
for (int j = 0; j < COUNT; j++) {
lfs_file_read(&lfs, &file, buffer, size) => size;
assert(memcmp(buffer, "doggodogdog", size) == 0);
}
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''

View File

@@ -1,127 +0,0 @@
[[case]] # simple formatting test
code = '''
lfs_format(&lfs, &cfg) => 0;
'''
[[case]] # mount/unmount
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant format
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # invalid mount
code = '''
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
'''
[[case]] # expanding superblock
define.LFS_BLOCK_CYCLES = [32, 33, 1]
define.N = [10, 100, 1000]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_remove(&lfs, "dummy") => 0;
}
lfs_unmount(&lfs) => 0;
// one last check after power-cycle
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
'''
[[case]] # expanding superblock with power cycle
define.LFS_BLOCK_CYCLES = [32, 33, 1]
define.N = [10, 100, 1000]
code = '''
lfs_format(&lfs, &cfg) => 0;
for (int i = 0; i < N; i++) {
lfs_mount(&lfs, &cfg) => 0;
// remove lingering dummy?
err = lfs_stat(&lfs, "dummy", &info);
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
if (!err) {
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_remove(&lfs, "dummy") => 0;
}
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
}
// one last check after power-cycle
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
'''
[[case]] # reentrant expanding superblock
define.LFS_BLOCK_CYCLES = [2, 1]
define.N = 24
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
for (int i = 0; i < N; i++) {
// remove lingering dummy?
err = lfs_stat(&lfs, "dummy", &info);
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
if (!err) {
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_remove(&lfs, "dummy") => 0;
}
lfs_file_open(&lfs, &file, "dummy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
}
lfs_unmount(&lfs) => 0;
// one last check after power-cycle
lfs_mount(&lfs, &cfg) => 0;
lfs_stat(&lfs, "dummy", &info) => 0;
assert(strcmp(info.name, "dummy") == 0);
assert(info.type == LFS_TYPE_REG);
lfs_unmount(&lfs) => 0;
'''

304
tests/test_truncate.sh Executable file
View File

@@ -0,0 +1,304 @@
#!/bin/bash
set -eu
export TEST_FILE=$0
trap 'export TEST_LINE=$LINENO' DEBUG
echo "=== Truncate tests ==="
SMALLSIZE=32
MEDIUMSIZE=2048
LARGESIZE=8192
rm -rf blocks
scripts/test.py << TEST
lfs_format(&lfs, &cfg) => 0;
TEST
echo "--- Simple truncate ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "hair");
lfs_size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < $LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => $LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => $LARGESIZE;
lfs_file_truncate(&lfs, &file, $MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => $MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => $MEDIUMSIZE;
lfs_size_t size = strlen("hair");
for (lfs_off_t j = 0; j < $MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Truncate and read ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "hair");
lfs_size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < $LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => $LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => $LARGESIZE;
lfs_file_truncate(&lfs, &file, $MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => $MEDIUMSIZE;
lfs_size_t size = strlen("hair");
for (lfs_off_t j = 0; j < $MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => $MEDIUMSIZE;
lfs_size_t size = strlen("hair");
for (lfs_off_t j = 0; j < $MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
echo "--- Truncate and write ---"
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "hair");
lfs_size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < $LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => $LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => $LARGESIZE;
lfs_file_truncate(&lfs, &file, $MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => $MEDIUMSIZE;
strcpy((char*)buffer, "bald");
lfs_size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < $MEDIUMSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => $MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => $MEDIUMSIZE;
lfs_size_t size = strlen("bald");
for (lfs_off_t j = 0; j < $MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "bald", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
TEST
# More aggressive general truncation tests
truncate_test() {
STARTSIZES="$1"
STARTSEEKS="$2"
HOTSIZES="$3"
COLDSIZES="$4"
scripts/test.py << TEST
static const lfs_off_t startsizes[] = {$STARTSIZES};
static const lfs_off_t startseeks[] = {$STARTSEEKS};
static const lfs_off_t hotsizes[] = {$HOTSIZES};
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf(path, "hairyhead%d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
strcpy((char*)buffer, "hair");
lfs_size_t size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < startsizes[i]; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => startsizes[i];
if (startseeks[i] != startsizes[i]) {
lfs_file_seek(&lfs, &file,
startseeks[i], LFS_SEEK_SET) => startseeks[i];
}
lfs_file_truncate(&lfs, &file, hotsizes[i]) => 0;
lfs_file_size(&lfs, &file) => hotsizes[i];
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
static const lfs_off_t startsizes[] = {$STARTSIZES};
static const lfs_off_t hotsizes[] = {$HOTSIZES};
static const lfs_off_t coldsizes[] = {$COLDSIZES};
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf(path, "hairyhead%d", i);
lfs_file_open(&lfs, &file, path, LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => hotsizes[i];
lfs_size_t size = strlen("hair");
lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < hotsizes[i]; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
lfs_file_truncate(&lfs, &file, coldsizes[i]) => 0;
lfs_file_size(&lfs, &file) => coldsizes[i];
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
scripts/test.py << TEST
static const lfs_off_t startsizes[] = {$STARTSIZES};
static const lfs_off_t hotsizes[] = {$HOTSIZES};
static const lfs_off_t coldsizes[] = {$COLDSIZES};
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < sizeof(startsizes)/sizeof(startsizes[0]); i++) {
sprintf(path, "hairyhead%d", i);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => coldsizes[i];
lfs_size_t size = strlen("hair");
lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < coldsizes[i]; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
TEST
}
echo "--- Cold shrinking truncate ---"
truncate_test \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE"
echo "--- Cold expanding truncate ---"
truncate_test \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE"
echo "--- Warm shrinking truncate ---"
truncate_test \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, 0, 0, 0, 0"
echo "--- Warm expanding truncate ---"
truncate_test \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE"
echo "--- Mid-file shrinking truncate ---"
truncate_test \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
" $LARGESIZE, $LARGESIZE, $LARGESIZE, $LARGESIZE, $LARGESIZE" \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, 0, 0, 0, 0"
echo "--- Mid-file expanding truncate ---"
truncate_test \
" 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE, 2*$LARGESIZE" \
" 0, 0, $SMALLSIZE, $MEDIUMSIZE, $LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE" \
"2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE, 2*$LARGESIZE"
scripts/results.py

View File

@@ -1,439 +0,0 @@
[[case]] # simple truncate
define.MEDIUMSIZE = [32, 2048]
define.LARGESIZE = 8192
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # truncate and read
define.MEDIUMSIZE = [32, 2048]
define.LARGESIZE = 8192
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("hair");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # write, truncate, and read
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "sequence",
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
lfs_size_t qsize = size / 4;
uint8_t *wb = buffer;
uint8_t *rb = buffer + size;
for (lfs_off_t j = 0; j < size; ++j) {
wb[j] = j;
}
/* Spread sequence over size */
lfs_file_write(&lfs, &file, wb, size) => size;
lfs_file_size(&lfs, &file) => size;
lfs_file_tell(&lfs, &file) => size;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
lfs_file_tell(&lfs, &file) => 0;
/* Chop off the last quarter */
lfs_size_t trunc = size - qsize;
lfs_file_truncate(&lfs, &file, trunc) => 0;
lfs_file_tell(&lfs, &file) => 0;
lfs_file_size(&lfs, &file) => trunc;
/* Read should produce first 3/4 */
lfs_file_read(&lfs, &file, rb, size) => trunc;
memcmp(rb, wb, trunc) => 0;
/* Move to 1/4 */
lfs_file_size(&lfs, &file) => trunc;
lfs_file_seek(&lfs, &file, qsize, LFS_SEEK_SET) => qsize;
lfs_file_tell(&lfs, &file) => qsize;
/* Chop to 1/2 */
trunc -= qsize;
lfs_file_truncate(&lfs, &file, trunc) => 0;
lfs_file_tell(&lfs, &file) => qsize;
lfs_file_size(&lfs, &file) => trunc;
/* Read should produce second quarter */
lfs_file_read(&lfs, &file, rb, size) => trunc - qsize;
memcmp(rb, wb + qsize, trunc - qsize) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # truncate and write
define.MEDIUMSIZE = [32, 2048]
define.LARGESIZE = 8192
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite",
LFS_O_WRONLY | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
strcpy((char*)buffer, "bald");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
size = strlen("bald");
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "bald", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # truncate write under powerloss
define.SMALLSIZE = [4, 512]
define.MEDIUMSIZE = [32, 1024]
define.LARGESIZE = 2048
reentrant = true
code = '''
err = lfs_mount(&lfs, &cfg);
if (err) {
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
}
err = lfs_file_open(&lfs, &file, "baldy", LFS_O_RDONLY);
assert(!err || err == LFS_ERR_NOENT);
if (!err) {
size = lfs_file_size(&lfs, &file);
assert(size == 0 ||
size == LARGESIZE ||
size == MEDIUMSIZE ||
size == SMALLSIZE);
for (lfs_off_t j = 0; j < size; j += 4) {
lfs_file_read(&lfs, &file, buffer, 4) => 4;
assert(memcmp(buffer, "hair", 4) == 0 ||
memcmp(buffer, "bald", 4) == 0 ||
memcmp(buffer, "comb", 4) == 0);
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_file_open(&lfs, &file, "baldy",
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
lfs_file_size(&lfs, &file) => 0;
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => LARGESIZE;
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
strcpy((char*)buffer, "bald");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_truncate(&lfs, &file, SMALLSIZE) => 0;
lfs_file_size(&lfs, &file) => SMALLSIZE;
strcpy((char*)buffer, "comb");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < SMALLSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => SMALLSIZE;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''
[[case]] # more aggressive general truncation tests
define.CONFIG = 'range(6)'
define.SMALLSIZE = 32
define.MEDIUMSIZE = 2048
define.LARGESIZE = 8192
code = '''
#define COUNT 5
const struct {
lfs_off_t startsizes[COUNT];
lfs_off_t startseeks[COUNT];
lfs_off_t hotsizes[COUNT];
lfs_off_t coldsizes[COUNT];
} configs[] = {
// cold shrinking
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE}},
// cold expanding
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE}},
// warm shrinking truncate
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
{ 0, 0, 0, 0, 0}},
// warm expanding truncate
{{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE}},
// mid-file shrinking truncate
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{ LARGESIZE, LARGESIZE, LARGESIZE, LARGESIZE, LARGESIZE},
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
{ 0, 0, 0, 0, 0}},
// mid-file expanding truncate
{{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
{ 0, 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE}},
};
const lfs_off_t *startsizes = configs[CONFIG].startsizes;
const lfs_off_t *startseeks = configs[CONFIG].startseeks;
const lfs_off_t *hotsizes = configs[CONFIG].hotsizes;
const lfs_off_t *coldsizes = configs[CONFIG].coldsizes;
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
sprintf(path, "hairyhead%d", i);
lfs_file_open(&lfs, &file, path,
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < startsizes[i]; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
}
lfs_file_size(&lfs, &file) => startsizes[i];
if (startseeks[i] != startsizes[i]) {
lfs_file_seek(&lfs, &file,
startseeks[i], LFS_SEEK_SET) => startseeks[i];
}
lfs_file_truncate(&lfs, &file, hotsizes[i]) => 0;
lfs_file_size(&lfs, &file) => hotsizes[i];
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
sprintf(path, "hairyhead%d", i);
lfs_file_open(&lfs, &file, path, LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => hotsizes[i];
size = strlen("hair");
lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < hotsizes[i]; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
lfs_file_truncate(&lfs, &file, coldsizes[i]) => 0;
lfs_file_size(&lfs, &file) => coldsizes[i];
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
lfs_mount(&lfs, &cfg) => 0;
for (unsigned i = 0; i < COUNT; i++) {
sprintf(path, "hairyhead%d", i);
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
lfs_file_size(&lfs, &file) => coldsizes[i];
size = strlen("hair");
lfs_off_t j = 0;
for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
for (; j < coldsizes[i]; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "\0\0\0\0", size) => 0;
}
lfs_file_close(&lfs, &file) => 0;
}
lfs_unmount(&lfs) => 0;
'''
[[case]] # noop truncate
define.MEDIUMSIZE = [32, 2048]
code = '''
lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop",
LFS_O_RDWR | LFS_O_CREAT) => 0;
strcpy((char*)buffer, "hair");
size = strlen((char*)buffer);
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_write(&lfs, &file, buffer, size) => size;
// this truncate should do nothing
lfs_file_truncate(&lfs, &file, j+size) => 0;
}
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
// should do nothing again
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
// still there after reboot?
lfs_mount(&lfs, &cfg) => 0;
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
lfs_file_read(&lfs, &file, buffer, size) => size;
memcmp(buffer, "hair", size) => 0;
}
lfs_file_read(&lfs, &file, buffer, size) => 0;
lfs_file_close(&lfs, &file) => 0;
lfs_unmount(&lfs) => 0;
'''