mirror of
https://github.com/eledio-devices/thirdparty-littlefs.git
synced 2025-11-01 16:14:13 +01:00
Compare commits
11 Commits
more-scrip
...
config-imp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f885f0af1 | ||
|
|
fe42d102a5 | ||
|
|
499083765c | ||
|
|
aa46bb68ca | ||
|
|
190eb833a2 | ||
|
|
30ed816feb | ||
|
|
a7cdd563f6 | ||
|
|
a549413077 | ||
|
|
3f6f88778a | ||
|
|
c44427f9ec | ||
|
|
ef9ba2d912 |
26
.github/workflows/post-release.yml
vendored
26
.github/workflows/post-release.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: post-release
|
||||
on:
|
||||
release:
|
||||
branches: [master]
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
post-release:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
# trigger post-release in dependency repo, this indirection allows the
|
||||
# dependency repo to be updated often without affecting this repo. At
|
||||
# the time of this comment, the dependency repo is responsible for
|
||||
# creating PRs for other dependent repos post-release.
|
||||
- name: trigger-post-release
|
||||
continue-on-error: true
|
||||
run: |
|
||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
||||
"$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \
|
||||
-d "$(jq -n '{
|
||||
event_type: "post-release",
|
||||
client_payload: {
|
||||
repo: env.GITHUB_REPOSITORY,
|
||||
version: "${{github.event.release.tag_name}}"}}' \
|
||||
| tee /dev/stderr)"
|
||||
|
||||
196
.github/workflows/release.yml
vendored
196
.github/workflows/release.yml
vendored
@@ -1,196 +0,0 @@
|
||||
name: release
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [test]
|
||||
branches: [master]
|
||||
types: [completed]
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
# need to manually check for a couple things
|
||||
# - tests passed?
|
||||
# - we are the most recent commit on master?
|
||||
if: ${{github.event.workflow_run.conclusion == 'success' &&
|
||||
github.event.workflow_run.head_sha == github.sha}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{github.event.workflow_run.head_sha}}
|
||||
# need workflow access since we push branches
|
||||
# containing workflows
|
||||
token: ${{secrets.BOT_TOKEN}}
|
||||
# need all tags
|
||||
fetch-depth: 0
|
||||
|
||||
# try to get results from tests
|
||||
- uses: dawidd6/action-download-artifact@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
workflow: ${{github.event.workflow_run.name}}
|
||||
run_id: ${{github.event.workflow_run.id}}
|
||||
name: results
|
||||
path: results
|
||||
|
||||
- name: find-version
|
||||
run: |
|
||||
# rip version from lfs.h
|
||||
LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \
|
||||
| awk '{print $3}')"
|
||||
LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))"
|
||||
LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >> 0)))"
|
||||
|
||||
# find a new patch version based on what we find in our tags
|
||||
LFS_VERSION_PATCH="$( \
|
||||
( git describe --tags --abbrev=0 \
|
||||
--match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \
|
||||
|| echo 'v0.0.-1' ) \
|
||||
| awk -F '.' '{print $3+1}')"
|
||||
|
||||
# found new version
|
||||
LFS_VERSION="v$LFS_VERSION_MAJOR`
|
||||
`.$LFS_VERSION_MINOR`
|
||||
`.$LFS_VERSION_PATCH"
|
||||
echo "LFS_VERSION=$LFS_VERSION"
|
||||
echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV
|
||||
echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV
|
||||
echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV
|
||||
echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV
|
||||
|
||||
# try to find previous version?
|
||||
- name: find-prev-version
|
||||
continue-on-error: true
|
||||
run: |
|
||||
LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')"
|
||||
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION"
|
||||
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV
|
||||
|
||||
# try to find results from tests
|
||||
- name: collect-results
|
||||
run: |
|
||||
# previous results to compare against?
|
||||
[ -n "$LFS_PREV_VERSION" ] && curl -sS \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
|
||||
`status/$LFS_PREV_VERSION?per_page=100" \
|
||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
|
||||
>> prev-results.json \
|
||||
|| true
|
||||
|
||||
# build table for GitHub
|
||||
echo "<table>" >> results.txt
|
||||
echo "<thead>" >> results.txt
|
||||
echo "<tr>" >> results.txt
|
||||
echo "<th align=left>Configuration</th>" >> results.txt
|
||||
for r in Code Stack Structs Coverage
|
||||
do
|
||||
echo "<th align=right>$r</th>" >> results.txt
|
||||
done
|
||||
echo "</tr>" >> results.txt
|
||||
echo "</thead>" >> results.txt
|
||||
|
||||
echo "<tbody>" >> results.txt
|
||||
for c in "" readonly threadsafe migrate error-asserts
|
||||
do
|
||||
echo "<tr>" >> results.txt
|
||||
c_or_default=${c:-default}
|
||||
echo "<td align=left>${c_or_default^}</td>" >> results.txt
|
||||
for r in code stack structs
|
||||
do
|
||||
# per-config results
|
||||
echo "<td align=right>" >> results.txt
|
||||
[ -e results/thumb${c:+-$c}.csv ] && ( \
|
||||
export PREV="$(jq -re '
|
||||
select(.context == "'"results (thumb${c:+, $c}) / $r"'").description
|
||||
| capture("(?<result>[0-9∞]+)").result' \
|
||||
prev-results.json || echo 0)"
|
||||
./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk '
|
||||
NR==2 {printf "%s B",$2}
|
||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
|
||||
NR==2 {printf "\n"}' \
|
||||
| sed -e 's/ /\ /g' \
|
||||
>> results.txt)
|
||||
echo "</td>" >> results.txt
|
||||
done
|
||||
# coverage results
|
||||
if [ -z $c ]
|
||||
then
|
||||
echo "<td rowspan=0 align=right>" >> results.txt
|
||||
[ -e results/coverage.csv ] && ( \
|
||||
export PREV="$(jq -re '
|
||||
select(.context == "results / coverage").description
|
||||
| capture("(?<result>[0-9\\.]+)").result' \
|
||||
prev-results.json || echo 0)"
|
||||
./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
|
||||
NR==2 {printf "%.1f%% of %d lines",$4,$3}
|
||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
||||
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
|
||||
NR==2 {printf "\n"}' \
|
||||
| sed -e 's/ /\ /g' \
|
||||
>> results.txt)
|
||||
echo "</td>" >> results.txt
|
||||
fi
|
||||
echo "</tr>" >> results.txt
|
||||
done
|
||||
echo "</tbody>" >> results.txt
|
||||
echo "</table>" >> results.txt
|
||||
|
||||
cat results.txt
|
||||
|
||||
# find changes from history
|
||||
- name: collect-changes
|
||||
run: |
|
||||
[ -n "$LFS_PREV_VERSION" ] || exit 0
|
||||
# use explicit link to github commit so that release notes can
|
||||
# be copied elsewhere
|
||||
git log "$LFS_PREV_VERSION.." \
|
||||
--grep='^Merge' --invert-grep \
|
||||
--format="format:[\`%h\`](`
|
||||
`https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \
|
||||
> changes.txt
|
||||
echo "CHANGES:"
|
||||
cat changes.txt
|
||||
|
||||
# create and update major branches (vN and vN-prefix)
|
||||
- name: create-major-branches
|
||||
run: |
|
||||
# create major branch
|
||||
git branch "v$LFS_VERSION_MAJOR" HEAD
|
||||
|
||||
# create major prefix branch
|
||||
git config user.name ${{secrets.BOT_USER}}
|
||||
git config user.email ${{secrets.BOT_EMAIL}}
|
||||
git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
|
||||
"v$LFS_VERSION_MAJOR-prefix" || true
|
||||
./scripts/prefix.py "lfs$LFS_VERSION_MAJOR"
|
||||
git branch "v$LFS_VERSION_MAJOR-prefix" $( \
|
||||
git commit-tree $(git write-tree) \
|
||||
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
||||
-p HEAD \
|
||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||
git reset --hard
|
||||
|
||||
# push!
|
||||
git push --atomic origin \
|
||||
"v$LFS_VERSION_MAJOR" \
|
||||
"v$LFS_VERSION_MAJOR-prefix"
|
||||
|
||||
# build release notes
|
||||
- name: create-release
|
||||
run: |
|
||||
# create release and patch version tag (vN.N.N)
|
||||
# only draft if not a patch release
|
||||
[ -e results.txt ] && export RESULTS="$(cat results.txt)"
|
||||
[ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
|
||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
|
||||
-d "$(jq -n '{
|
||||
tag_name: env.LFS_VERSION,
|
||||
name: env.LFS_VERSION | rtrimstr(".0"),
|
||||
target_commitish: "${{github.event.workflow_run.head_sha}}",
|
||||
draft: env.LFS_VERSION | endswith(".0"),
|
||||
body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
|
||||
| tee /dev/stderr)"
|
||||
|
||||
55
.github/workflows/status.yml
vendored
55
.github/workflows/status.yml
vendored
@@ -1,55 +0,0 @@
|
||||
name: status
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [test]
|
||||
types: [completed]
|
||||
|
||||
jobs:
|
||||
status:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
# custom statuses?
|
||||
- uses: dawidd6/action-download-artifact@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
workflow: ${{github.event.workflow_run.name}}
|
||||
run_id: ${{github.event.workflow_run.id}}
|
||||
name: status
|
||||
path: status
|
||||
- name: update-status
|
||||
continue-on-error: true
|
||||
run: |
|
||||
ls status
|
||||
for s in $(shopt -s nullglob ; echo status/*.json)
|
||||
do
|
||||
# parse requested status
|
||||
export STATE="$(jq -er '.state' $s)"
|
||||
export CONTEXT="$(jq -er '.context' $s)"
|
||||
export DESCRIPTION="$(jq -er '.description' $s)"
|
||||
# help lookup URL for job/steps because GitHub makes
|
||||
# it VERY HARD to link to specific jobs
|
||||
export TARGET_URL="$(
|
||||
jq -er '.target_url // empty' $s || (
|
||||
export TARGET_JOB="$(jq -er '.target_job' $s)"
|
||||
export TARGET_STEP="$(jq -er '.target_step // ""' $s)"
|
||||
curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/`
|
||||
`${{github.event.workflow_run.id}}/jobs" \
|
||||
| jq -er '.jobs[]
|
||||
| select(.name == env.TARGET_JOB)
|
||||
| .html_url
|
||||
+ "?check_suite_focus=true"
|
||||
+ ((.steps[]
|
||||
| select(.name == env.TARGET_STEP)
|
||||
| "#step:\(.number):0") // "")'))"
|
||||
# update status
|
||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/`
|
||||
`${{github.event.workflow_run.head_sha}}" \
|
||||
-d "$(jq -n '{
|
||||
state: env.STATE,
|
||||
context: env.CONTEXT,
|
||||
description: env.DESCRIPTION,
|
||||
target_url: env.TARGET_URL}' \
|
||||
| tee /dev/stderr)"
|
||||
done
|
||||
472
.github/workflows/test.yml
vendored
472
.github/workflows/test.yml
vendored
@@ -1,472 +0,0 @@
|
||||
name: test
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
CFLAGS: -Werror
|
||||
MAKEFLAGS: -j
|
||||
|
||||
jobs:
|
||||
# run tests
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [x86_64, thumb, mips, powerpc]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need a few additional tools
|
||||
#
|
||||
# note this includes gcc-10, which is required for -fcallgraph-info=su
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq gcc-10 python3 python3-pip lcov
|
||||
sudo pip3 install toml
|
||||
echo "CC=gcc-10" >> $GITHUB_ENV
|
||||
gcc-10 --version
|
||||
lcov --version
|
||||
python3 --version
|
||||
|
||||
# need newer lcov version for gcc-10
|
||||
#sudo apt-get remove lcov
|
||||
#wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb
|
||||
#sudo apt install ./lcov_1.15-1_all.deb
|
||||
#lcov --version
|
||||
#which lcov
|
||||
#ls -lha /usr/bin/lcov
|
||||
wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz
|
||||
tar xf lcov-1.15.tar.gz
|
||||
sudo make -C lcov-1.15 install
|
||||
|
||||
# setup a ram-backed disk to speed up reentrant tests
|
||||
mkdir disks
|
||||
sudo mount -t tmpfs -o size=100m tmpfs disks
|
||||
TESTFLAGS="$TESTFLAGS --disk=disks/disk"
|
||||
|
||||
# collect coverage
|
||||
mkdir -p coverage
|
||||
TESTFLAGS="$TESTFLAGS --coverage=`
|
||||
`coverage/${{github.job}}-${{matrix.arch}}.info"
|
||||
|
||||
echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
|
||||
|
||||
# cross-compile with ARM Thumb (32-bit, little-endian)
|
||||
- name: install-thumb
|
||||
if: ${{matrix.arch == 'thumb'}}
|
||||
run: |
|
||||
sudo apt-get install -qq \
|
||||
gcc-10-arm-linux-gnueabi \
|
||||
libc6-dev-armel-cross \
|
||||
qemu-user
|
||||
echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV
|
||||
echo "EXEC=qemu-arm" >> $GITHUB_ENV
|
||||
arm-linux-gnueabi-gcc-10 --version
|
||||
qemu-arm -version
|
||||
# cross-compile with MIPS (32-bit, big-endian)
|
||||
- name: install-mips
|
||||
if: ${{matrix.arch == 'mips'}}
|
||||
run: |
|
||||
sudo apt-get install -qq \
|
||||
gcc-10-mips-linux-gnu \
|
||||
libc6-dev-mips-cross \
|
||||
qemu-user
|
||||
echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
|
||||
echo "EXEC=qemu-mips" >> $GITHUB_ENV
|
||||
mips-linux-gnu-gcc-10 --version
|
||||
qemu-mips -version
|
||||
# cross-compile with PowerPC (32-bit, big-endian)
|
||||
- name: install-powerpc
|
||||
if: ${{matrix.arch == 'powerpc'}}
|
||||
run: |
|
||||
sudo apt-get install -qq \
|
||||
gcc-10-powerpc-linux-gnu \
|
||||
libc6-dev-powerpc-cross \
|
||||
qemu-user
|
||||
echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
|
||||
echo "EXEC=qemu-ppc" >> $GITHUB_ENV
|
||||
powerpc-linux-gnu-gcc-10 --version
|
||||
qemu-ppc -version
|
||||
|
||||
# make sure example can at least compile
|
||||
- name: test-example
|
||||
run: |
|
||||
sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
|
||||
make all CFLAGS+=" \
|
||||
-Duser_provided_block_device_read=NULL \
|
||||
-Duser_provided_block_device_prog=NULL \
|
||||
-Duser_provided_block_device_erase=NULL \
|
||||
-Duser_provided_block_device_sync=NULL \
|
||||
-include stdio.h"
|
||||
rm test.c
|
||||
|
||||
# test configurations
|
||||
# normal+reentrant tests
|
||||
- name: test-default
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk"
|
||||
# NOR flash: read/prog = 1 block = 4KiB
|
||||
- name: test-nor
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
||||
# SD/eMMC: read/prog = 512 block = 512
|
||||
- name: test-emmc
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
|
||||
# NAND flash: read/prog = 4KiB block = 32KiB
|
||||
- name: test-nand
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
|
||||
# other extreme geometries that are useful for various corner cases
|
||||
- name: test-no-intrinsics
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_NO_INTRINSICS"
|
||||
- name: test-byte-writes
|
||||
# it just takes too long to test byte-level writes when in qemu,
|
||||
# should be plenty covered by the other configurations
|
||||
if: ${{matrix.arch == 'x86_64'}}
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
|
||||
- name: test-block-cycles
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_BLOCK_CYCLES=1"
|
||||
- name: test-odd-block-count
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
||||
- name: test-odd-block-size
|
||||
run: |
|
||||
make clean
|
||||
make test TESTFLAGS+="-nrk \
|
||||
-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
||||
|
||||
# upload coverage for later coverage
|
||||
- name: upload-coverage
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage
|
||||
retention-days: 1
|
||||
|
||||
# update results
|
||||
- name: results
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_ASSERT \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR"
|
||||
cp lfs.csv results/${{matrix.arch}}.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}.csv
|
||||
- name: results-readonly
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_ASSERT \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR \
|
||||
-DLFS_READONLY"
|
||||
cp lfs.csv results/${{matrix.arch}}-readonly.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}-readonly.csv
|
||||
- name: results-threadsafe
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_ASSERT \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR \
|
||||
-DLFS_THREADSAFE"
|
||||
cp lfs.csv results/${{matrix.arch}}-threadsafe.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv
|
||||
- name: results-migrate
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_ASSERT \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR \
|
||||
-DLFS_MIGRATE"
|
||||
cp lfs.csv results/${{matrix.arch}}-migrate.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}-migrate.csv
|
||||
- name: results-error-asserts
|
||||
run: |
|
||||
mkdir -p results
|
||||
make clean
|
||||
make lfs.csv \
|
||||
CFLAGS+=" \
|
||||
-DLFS_NO_DEBUG \
|
||||
-DLFS_NO_WARN \
|
||||
-DLFS_NO_ERROR \
|
||||
-D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
|
||||
cp lfs.csv results/${{matrix.arch}}-error-asserts.csv
|
||||
./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv
|
||||
- name: upload-results
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: results
|
||||
|
||||
# create statuses with results
|
||||
- name: collect-status
|
||||
run: |
|
||||
mkdir -p status
|
||||
for f in $(shopt -s nullglob ; echo results/*.csv)
|
||||
do
|
||||
export STEP="results$(
|
||||
echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')"
|
||||
for r in code stack structs
|
||||
do
|
||||
export CONTEXT="results (${{matrix.arch}}$(
|
||||
echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r"
|
||||
export PREV="$(curl -sS \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
|
||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
|
||||
| select(.context == env.CONTEXT).description
|
||||
| capture("(?<result>[0-9∞]+)").result' \
|
||||
|| echo 0)"
|
||||
export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk '
|
||||
NR==2 {printf "%s B",$2}
|
||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
|
||||
jq -n '{
|
||||
state: "success",
|
||||
context: env.CONTEXT,
|
||||
description: env.DESCRIPTION,
|
||||
target_job: "${{github.job}} (${{matrix.arch}})",
|
||||
target_step: env.STEP}' \
|
||||
| tee status/$r-${{matrix.arch}}$(
|
||||
echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json
|
||||
done
|
||||
done
|
||||
- name: upload-status
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: status
|
||||
path: status
|
||||
retention-days: 1
|
||||
|
||||
# run under Valgrind to check for memory errors
|
||||
valgrind:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip
|
||||
sudo pip3 install toml
|
||||
- name: install-valgrind
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq valgrind
|
||||
valgrind --version
|
||||
# normal tests, we don't need to test all geometries
|
||||
- name: test-valgrind
|
||||
run: make test TESTFLAGS+="-k --valgrind"
|
||||
|
||||
# self-host with littlefs-fuse for a fuzz-like test
|
||||
fuse:
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{!endsWith(github.ref, '-prefix')}}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip libfuse-dev
|
||||
sudo pip3 install toml
|
||||
fusermount -V
|
||||
gcc --version
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: littlefs-project/littlefs-fuse
|
||||
ref: v2
|
||||
path: littlefs-fuse
|
||||
- name: setup
|
||||
run: |
|
||||
# copy our new version into littlefs-fuse
|
||||
rm -rf littlefs-fuse/littlefs/*
|
||||
cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
||||
|
||||
# setup disk for littlefs-fuse
|
||||
mkdir mount
|
||||
LOOP=$(sudo losetup -f)
|
||||
sudo chmod a+rw $LOOP
|
||||
dd if=/dev/zero bs=512 count=128K of=disk
|
||||
losetup $LOOP disk
|
||||
echo "LOOP=$LOOP" >> $GITHUB_ENV
|
||||
- name: test
|
||||
run: |
|
||||
# self-host test
|
||||
make -C littlefs-fuse
|
||||
|
||||
littlefs-fuse/lfs --format $LOOP
|
||||
littlefs-fuse/lfs $LOOP mount
|
||||
|
||||
ls mount
|
||||
mkdir mount/littlefs
|
||||
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
cd mount/littlefs
|
||||
stat .
|
||||
ls -flh
|
||||
make -B test
|
||||
|
||||
# test migration using littlefs-fuse
|
||||
migrate:
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{!endsWith(github.ref, '-prefix')}}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip libfuse-dev
|
||||
sudo pip3 install toml
|
||||
fusermount -V
|
||||
gcc --version
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: littlefs-project/littlefs-fuse
|
||||
ref: v2
|
||||
path: v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: littlefs-project/littlefs-fuse
|
||||
ref: v1
|
||||
path: v1
|
||||
- name: setup
|
||||
run: |
|
||||
# copy our new version into littlefs-fuse
|
||||
rm -rf v2/littlefs/*
|
||||
cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
||||
|
||||
# setup disk for littlefs-fuse
|
||||
mkdir mount
|
||||
LOOP=$(sudo losetup -f)
|
||||
sudo chmod a+rw $LOOP
|
||||
dd if=/dev/zero bs=512 count=128K of=disk
|
||||
losetup $LOOP disk
|
||||
echo "LOOP=$LOOP" >> $GITHUB_ENV
|
||||
- name: test
|
||||
run: |
|
||||
# compile v1 and v2
|
||||
make -C v1
|
||||
make -C v2
|
||||
|
||||
# run self-host test with v1
|
||||
v1/lfs --format $LOOP
|
||||
v1/lfs $LOOP mount
|
||||
|
||||
ls mount
|
||||
mkdir mount/littlefs
|
||||
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
cd mount/littlefs
|
||||
stat .
|
||||
ls -flh
|
||||
make -B test
|
||||
|
||||
# attempt to migrate
|
||||
cd ../..
|
||||
fusermount -u mount
|
||||
|
||||
v2/lfs --migrate $LOOP
|
||||
v2/lfs $LOOP mount
|
||||
|
||||
# run self-host test with v2 right where we left off
|
||||
ls mount
|
||||
cd mount/littlefs
|
||||
stat .
|
||||
ls -flh
|
||||
make -B test
|
||||
|
||||
# collect coverage info
|
||||
coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [test]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -qq python3 python3-pip lcov
|
||||
sudo pip3 install toml
|
||||
# yes we continue-on-error nearly every step, continue-on-error
|
||||
# at job level apparently still marks a job as failed, which isn't
|
||||
# what we want
|
||||
- uses: actions/download-artifact@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: coverage
|
||||
path: coverage
|
||||
- name: results-coverage
|
||||
continue-on-error: true
|
||||
run: |
|
||||
mkdir -p results
|
||||
lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
|
||||
-o results/coverage.info
|
||||
./scripts/coverage.py results/coverage.info -o results/coverage.csv
|
||||
- name: upload-results
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: results
|
||||
- name: collect-status
|
||||
run: |
|
||||
mkdir -p status
|
||||
[ -e results/coverage.csv ] || exit 0
|
||||
export STEP="results-coverage"
|
||||
export CONTEXT="results / coverage"
|
||||
export PREV="$(curl -sS \
|
||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
|
||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
|
||||
| select(.context == env.CONTEXT).description
|
||||
| capture("(?<result>[0-9\\.]+)").result' \
|
||||
|| echo 0)"
|
||||
export DESCRIPTION="$(
|
||||
./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
|
||||
NR==2 {printf "%.1f%% of %d lines",$4,$3}
|
||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
||||
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
|
||||
jq -n '{
|
||||
state: "success",
|
||||
context: env.CONTEXT,
|
||||
description: env.DESCRIPTION,
|
||||
target_job: "${{github.job}}",
|
||||
target_step: env.STEP}' \
|
||||
| tee status/coverage.json
|
||||
- name: upload-status
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: status
|
||||
path: status
|
||||
retention-days: 1
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,8 +2,6 @@
|
||||
*.o
|
||||
*.d
|
||||
*.a
|
||||
*.ci
|
||||
*.csv
|
||||
|
||||
# Testing things
|
||||
blocks/
|
||||
|
||||
460
.travis.yml
Normal file
460
.travis.yml
Normal file
@@ -0,0 +1,460 @@
|
||||
# environment variables
|
||||
env:
|
||||
global:
|
||||
- CFLAGS=-Werror
|
||||
- MAKEFLAGS=-j
|
||||
|
||||
# cache installation dirs
|
||||
cache:
|
||||
pip: true
|
||||
directories:
|
||||
- $HOME/.cache/apt
|
||||
|
||||
# common installation
|
||||
_: &install-common
|
||||
# need toml, also pip3 isn't installed by default?
|
||||
- sudo apt-get install python3 python3-pip
|
||||
- sudo pip3 install toml
|
||||
# setup a ram-backed disk to speed up reentrant tests
|
||||
- mkdir disks
|
||||
- sudo mount -t tmpfs -o size=100m tmpfs disks
|
||||
- export TFLAGS="$TFLAGS --disk=disks/disk"
|
||||
|
||||
# test cases
|
||||
_: &test-example
|
||||
# make sure example can at least compile
|
||||
- sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c &&
|
||||
make all CFLAGS+="
|
||||
-Duser_provided_block_device_read=NULL
|
||||
-Duser_provided_block_device_prog=NULL
|
||||
-Duser_provided_block_device_erase=NULL
|
||||
-Duser_provided_block_device_sync=NULL
|
||||
-include stdio.h"
|
||||
# default tests
|
||||
_: &test-default
|
||||
# normal+reentrant tests
|
||||
- make test TFLAGS+="-nrk"
|
||||
# common real-life geometries
|
||||
_: &test-nor
|
||||
# NOR flash: read/prog = 1 block = 4KiB
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
||||
_: &test-emmc
|
||||
# eMMC: read/prog = 512 block = 512
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
|
||||
_: &test-nand
|
||||
# NAND flash: read/prog = 4KiB block = 32KiB
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
|
||||
# other extreme geometries that are useful for testing various corner cases
|
||||
_: &test-no-intrinsics
|
||||
- make test TFLAGS+="-nrk -DLFS_NO_INTRINSICS"
|
||||
_: &test-no-inline
|
||||
- make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0"
|
||||
_: &test-byte-writes
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BUFFER_SIZE=1"
|
||||
_: &test-block-cycles
|
||||
- make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1"
|
||||
_: &test-odd-block-count
|
||||
- make test TFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
||||
_: &test-odd-block-size
|
||||
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
||||
|
||||
# report size
|
||||
_: &report-size
|
||||
# compile and find the code size with the smallest configuration
|
||||
- make -j1 clean size
|
||||
OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')"
|
||||
CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR"
|
||||
| tee sizes
|
||||
# update status if we succeeded, compare with master if possible
|
||||
- |
|
||||
if [ "$TRAVIS_TEST_RESULT" -eq 0 ]
|
||||
then
|
||||
CURR=$(tail -n1 sizes | awk '{print $1}')
|
||||
PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \
|
||||
| jq -re "select(.sha != \"$TRAVIS_COMMIT\")
|
||||
| .statuses[] | select(.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\").description
|
||||
| capture(\"code size is (?<size>[0-9]+)\").size" \
|
||||
|| echo 0)
|
||||
|
||||
STATUS="Passed, code size is ${CURR}B"
|
||||
if [ "$PREV" -ne 0 ]
|
||||
then
|
||||
STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)"
|
||||
fi
|
||||
fi
|
||||
|
||||
# stage control
|
||||
stages:
|
||||
- name: test
|
||||
- name: deploy
|
||||
if: branch = master AND type = push
|
||||
|
||||
# job control
|
||||
jobs:
|
||||
# native testing
|
||||
- &x86
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-x86
|
||||
install: *install-common
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *x86, script: [*test-default, *report-size]}
|
||||
- {<<: *x86, script: [*test-nor, *report-size]}
|
||||
- {<<: *x86, script: [*test-emmc, *report-size]}
|
||||
- {<<: *x86, script: [*test-nand, *report-size]}
|
||||
- {<<: *x86, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *x86, script: [*test-no-inline, *report-size]}
|
||||
- {<<: *x86, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *x86, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *x86, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *x86, script: [*test-odd-block-size, *report-size]}
|
||||
|
||||
# cross-compile with ARM (thumb mode)
|
||||
- &arm
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-arm
|
||||
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-arm"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-arm-linux-gnueabi
|
||||
libc6-dev-armel-cross
|
||||
qemu-user
|
||||
- arm-linux-gnueabi-gcc --version
|
||||
- qemu-arm -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *arm, script: [*test-default, *report-size]}
|
||||
- {<<: *arm, script: [*test-nor, *report-size]}
|
||||
- {<<: *arm, script: [*test-emmc, *report-size]}
|
||||
- {<<: *arm, script: [*test-nand, *report-size]}
|
||||
- {<<: *arm, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *arm, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *arm, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *arm, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *arm, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *arm, script: [*test-odd-block-size, *report-size]}
|
||||
|
||||
# cross-compile with MIPS
|
||||
- &mips
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-mips
|
||||
- CC="mips-linux-gnu-gcc --static"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-mips"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-mips-linux-gnu
|
||||
libc6-dev-mips-cross
|
||||
qemu-user
|
||||
- mips-linux-gnu-gcc --version
|
||||
- qemu-mips -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *mips, script: [*test-default, *report-size]}
|
||||
- {<<: *mips, script: [*test-nor, *report-size]}
|
||||
- {<<: *mips, script: [*test-emmc, *report-size]}
|
||||
- {<<: *mips, script: [*test-nand, *report-size]}
|
||||
- {<<: *mips, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *mips, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *mips, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *mips, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *mips, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *mips, script: [*test-odd-block-size, *report-size]}
|
||||
|
||||
# cross-compile with PowerPC
|
||||
- &powerpc
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-powerpc
|
||||
- CC="powerpc-linux-gnu-gcc --static"
|
||||
- TFLAGS="$TFLAGS --exec=qemu-ppc"
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-powerpc-linux-gnu
|
||||
libc6-dev-powerpc-cross
|
||||
qemu-user
|
||||
- powerpc-linux-gnu-gcc --version
|
||||
- qemu-ppc -version
|
||||
script: [*test-example, *report-size]
|
||||
- {<<: *powerpc, script: [*test-default, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-nor, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-emmc, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-nand, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-no-intrinsics, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-no-inline, *report-size]}
|
||||
# it just takes way to long to run byte-level writes in qemu,
|
||||
# note this is still tested in the native tests
|
||||
#- {<<: *powerpc, script: [*test-byte-writes, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-block-cycles, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-odd-block-count, *report-size]}
|
||||
- {<<: *powerpc, script: [*test-odd-block-size, *report-size]}
|
||||
|
||||
# test under valgrind, checking for memory errors
|
||||
- &valgrind
|
||||
stage: test
|
||||
env:
|
||||
- NAME=littlefs-valgrind
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install valgrind
|
||||
- valgrind --version
|
||||
script:
|
||||
- make test TFLAGS+="-k --valgrind"
|
||||
|
||||
# test minimal compilation using static configs
|
||||
- stage: test
|
||||
env:
|
||||
- NAME=littlefs-minimal
|
||||
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||
- CFLAGS="-Werror
|
||||
-DLFS_BD_READ
|
||||
-DLFS_BD_PROG
|
||||
-DLFS_BD_ERASE
|
||||
-DLFS_BD_SYNC
|
||||
-DLFS_READ_SIZE=16
|
||||
-DLFS_PROG_SIZE=16
|
||||
-DLFS_BLOCK_SIZE=512
|
||||
-DLFS_BLOCK_COUNT=1024
|
||||
-DLFS_BLOCK_CYCLES=1024
|
||||
-DLFS_BUFFER_SIZE=64
|
||||
-DLFS_LOOKAHEAD_SIZE=16
|
||||
-DLFS_NAME_LIMIT=0
|
||||
-DLFS_FILE_LIMIT=0
|
||||
-DLFS_ATTR_LIMIT=0
|
||||
-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR"
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install
|
||||
gcc-arm-linux-gnueabi
|
||||
libc6-dev-armel-cross
|
||||
- arm-linux-gnueabi-gcc --version
|
||||
# report-size will compile littlefs and report the size
|
||||
script: [*report-size]
|
||||
|
||||
# self-host with littlefs-fuse for fuzz test
|
||||
- stage: test
|
||||
env:
|
||||
- NAME=littlefs-fuse
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf littlefs-fuse/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
||||
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=128K of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# self-host test
|
||||
- make -C littlefs-fuse
|
||||
|
||||
- littlefs-fuse/lfs --format /dev/loop0
|
||||
- littlefs-fuse/lfs /dev/loop0 mount
|
||||
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
|
||||
# test migration using littlefs-fuse
|
||||
- stage: test
|
||||
env:
|
||||
- NAME=littlefs-migration
|
||||
if: branch !~ -prefix$
|
||||
install:
|
||||
- *install-common
|
||||
- sudo apt-get install libfuse-dev
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2
|
||||
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1
|
||||
- fusermount -V
|
||||
- gcc --version
|
||||
|
||||
# setup disk for littlefs-fuse
|
||||
- rm -rf v2/littlefs/*
|
||||
- cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
||||
|
||||
- mkdir mount
|
||||
- sudo chmod a+rw /dev/loop0
|
||||
- dd if=/dev/zero bs=512 count=128K of=disk
|
||||
- losetup /dev/loop0 disk
|
||||
script:
|
||||
# compile v1 and v2
|
||||
- make -C v1
|
||||
- make -C v2
|
||||
|
||||
# run self-host test with v1
|
||||
- v1/lfs --format /dev/loop0
|
||||
- v1/lfs /dev/loop0 mount
|
||||
|
||||
- ls mount
|
||||
- mkdir mount/littlefs
|
||||
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
|
||||
# attempt to migrate
|
||||
- cd ../..
|
||||
- fusermount -u mount
|
||||
|
||||
- v2/lfs --migrate /dev/loop0
|
||||
- v2/lfs /dev/loop0 mount
|
||||
|
||||
# run self-host test with v2 right where we left off
|
||||
- ls mount
|
||||
- cd mount/littlefs
|
||||
- stat .
|
||||
- ls -flh
|
||||
- make -B test
|
||||
|
||||
# automatically create releases
|
||||
- stage: deploy
|
||||
env:
|
||||
- NAME=deploy
|
||||
script:
|
||||
- |
|
||||
bash << 'SCRIPT'
|
||||
set -ev
|
||||
# Find version defined in lfs.h
|
||||
LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
|
||||
LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
|
||||
LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
|
||||
# Grab latests patch from repo tags, default to 0, needs finagling
|
||||
# to get past github's pagination api
|
||||
PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
|
||||
PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
|
||||
| sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
|
||||
|| echo $PREV_URL)
|
||||
LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
|
||||
| jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
|
||||
.captures[].string | tonumber) | max + 1' \
|
||||
|| echo 0)
|
||||
# We have our new version
|
||||
LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
|
||||
echo "VERSION $LFS_VERSION"
|
||||
# Check that we're the most recent commit
|
||||
CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
|
||||
| jq -re '.sha')
|
||||
[ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
|
||||
# Create major branch
|
||||
git branch v$LFS_VERSION_MAJOR HEAD
|
||||
# Create major prefix branch
|
||||
git config user.name "geky bot"
|
||||
git config user.email "bot@geky.net"
|
||||
git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
|
||||
--depth=50 v$LFS_VERSION_MAJOR-prefix || true
|
||||
./scripts/prefix.py lfs$LFS_VERSION_MAJOR
|
||||
git branch v$LFS_VERSION_MAJOR-prefix $( \
|
||||
git commit-tree $(git write-tree) \
|
||||
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
||||
-p HEAD \
|
||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||
git reset --hard
|
||||
# Update major version branches (vN and vN-prefix)
|
||||
git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
|
||||
v$LFS_VERSION_MAJOR \
|
||||
v$LFS_VERSION_MAJOR-prefix
|
||||
# Build release notes
|
||||
PREV=$(git tag --sort=-v:refname -l "v*" | head -1)
|
||||
if [ ! -z "$PREV" ]
|
||||
then
|
||||
echo "PREV $PREV"
|
||||
CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep)
|
||||
printf "CHANGES\n%s\n\n" "$CHANGES"
|
||||
fi
|
||||
case ${GEKY_BOT_DRAFT:-minor} in
|
||||
true) DRAFT=true ;;
|
||||
minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;;
|
||||
false) DRAFT=false ;;
|
||||
esac
|
||||
# Create the release and patch version tag (vN.N.N)
|
||||
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
|
||||
-d "{
|
||||
\"tag_name\": \"$LFS_VERSION\",
|
||||
\"name\": \"${LFS_VERSION%.0}\",
|
||||
\"target_commitish\": \"$TRAVIS_COMMIT\",
|
||||
\"draft\": $DRAFT,
|
||||
\"body\": $(jq -sR '.' <<< "$CHANGES")
|
||||
}" #"
|
||||
SCRIPT
|
||||
|
||||
# manage statuses
|
||||
before_install:
|
||||
- |
|
||||
# don't clobber other (not us) failures
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
.state == \"failure\" and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"pending\",
|
||||
\"description\": \"${STATUS:-In progress}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
|
||||
after_failure:
|
||||
- |
|
||||
# don't clobber other (not us) failures
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
.state == \"failure\" and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"failure\",
|
||||
\"description\": \"${STATUS:-Failed}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
|
||||
after_success:
|
||||
- |
|
||||
# don't clobber other (not us) failures
|
||||
# only update if we were last job to mark in progress,
|
||||
# this isn't perfect but is probably good enough
|
||||
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
| jq -e ".statuses[] | select(
|
||||
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||
(.state == \"failure\" or .state == \"pending\") and
|
||||
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||
then
|
||||
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||
-d "{
|
||||
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||
\"state\": \"success\",
|
||||
\"description\": \"${STATUS:-Passed}\",
|
||||
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||
}"
|
||||
fi
|
||||
144
Makefile
144
Makefile
@@ -1,173 +1,67 @@
|
||||
ifdef BUILDDIR
|
||||
# make sure BUILDDIR ends with a slash
|
||||
override BUILDDIR := $(BUILDDIR)/
|
||||
# bit of a hack, but we want to make sure BUILDDIR directory structure
|
||||
# is correct before any commands
|
||||
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
|
||||
$(BUILDDIR) \
|
||||
$(BUILDDIR)bd \
|
||||
$(BUILDDIR)tests))
|
||||
endif
|
||||
|
||||
# overridable target/src/tools/flags/etc
|
||||
TARGET = lfs.a
|
||||
ifneq ($(wildcard test.c main.c),)
|
||||
TARGET ?= $(BUILDDIR)lfs
|
||||
else
|
||||
TARGET ?= $(BUILDDIR)lfs.a
|
||||
override TARGET = lfs
|
||||
endif
|
||||
|
||||
|
||||
CC ?= gcc
|
||||
AR ?= ar
|
||||
SIZE ?= size
|
||||
CTAGS ?= ctags
|
||||
NM ?= nm
|
||||
OBJDUMP ?= objdump
|
||||
LCOV ?= lcov
|
||||
|
||||
SRC ?= $(wildcard *.c)
|
||||
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
|
||||
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
|
||||
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
|
||||
CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
|
||||
SRC += $(wildcard *.c bd/*.c)
|
||||
OBJ := $(SRC:.c=.o)
|
||||
DEP := $(SRC:.c=.d)
|
||||
ASM := $(SRC:.c=.s)
|
||||
|
||||
ifdef DEBUG
|
||||
override CFLAGS += -O0
|
||||
override CFLAGS += -O0 -g3
|
||||
else
|
||||
override CFLAGS += -Os
|
||||
endif
|
||||
ifdef WORD
|
||||
override CFLAGS += -m$(WORD)
|
||||
endif
|
||||
ifdef TRACE
|
||||
override CFLAGS += -DLFS_YES_TRACE
|
||||
endif
|
||||
override CFLAGS += -g3
|
||||
override CFLAGS += -I.
|
||||
override CFLAGS += -std=c99 -Wall -pedantic
|
||||
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
|
||||
|
||||
ifdef VERBOSE
|
||||
override TESTFLAGS += -v
|
||||
override CALLSFLAGS += -v
|
||||
override CODEFLAGS += -v
|
||||
override DATAFLAGS += -v
|
||||
override STACKFLAGS += -v
|
||||
override STRUCTSFLAGS += -v
|
||||
override COVERAGEFLAGS += -v
|
||||
endif
|
||||
ifdef EXEC
|
||||
override TESTFLAGS += --exec="$(EXEC)"
|
||||
endif
|
||||
ifdef COVERAGE
|
||||
override TESTFLAGS += --coverage
|
||||
endif
|
||||
ifdef BUILDDIR
|
||||
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override CALLSFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override STRUCTSFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
endif
|
||||
ifneq ($(NM),nm)
|
||||
override CODEFLAGS += --nm-tool="$(NM)"
|
||||
override DATAFLAGS += --nm-tool="$(NM)"
|
||||
endif
|
||||
ifneq ($(OBJDUMP),objdump)
|
||||
override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
|
||||
override TFLAGS += -v
|
||||
endif
|
||||
|
||||
|
||||
# commands
|
||||
.PHONY: all build
|
||||
all build: $(TARGET)
|
||||
|
||||
.PHONY: asm
|
||||
asm: $(ASM)
|
||||
|
||||
.PHONY: size
|
||||
size: $(OBJ)
|
||||
$(SIZE) -t $^
|
||||
|
||||
.PHONY: tags
|
||||
tags:
|
||||
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
|
||||
|
||||
.PHONY: calls
|
||||
calls: $(CGI)
|
||||
./scripts/calls.py $^ $(CALLSFLAGS)
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
./scripts/test.py $(TESTFLAGS)
|
||||
./scripts/test.py $(TFLAGS)
|
||||
.SECONDEXPANSION:
|
||||
test%: tests/test$$(firstword $$(subst \#, ,%)).toml
|
||||
./scripts/test.py $@ $(TESTFLAGS)
|
||||
./scripts/test.py $@ $(TFLAGS)
|
||||
|
||||
.PHONY: code
|
||||
code: $(OBJ)
|
||||
./scripts/code.py $^ -S $(CODEFLAGS)
|
||||
|
||||
.PHONY: data
|
||||
data: $(OBJ)
|
||||
./scripts/data.py $^ -S $(DATAFLAGS)
|
||||
|
||||
.PHONY: stack
|
||||
stack: $(CGI)
|
||||
./scripts/stack.py $^ -S $(STACKFLAGS)
|
||||
|
||||
.PHONY: structs
|
||||
structs: $(OBJ)
|
||||
./scripts/structs.py $^ -S $(STRUCTSFLAGS)
|
||||
|
||||
.PHONY: coverage
|
||||
coverage:
|
||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS)
|
||||
|
||||
.PHONY: summary
|
||||
summary: $(BUILDDIR)lfs.csv
|
||||
./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
|
||||
|
||||
|
||||
# rules
|
||||
-include $(DEP)
|
||||
.SUFFIXES:
|
||||
|
||||
$(BUILDDIR)lfs: $(OBJ)
|
||||
lfs: $(OBJ)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)lfs.a: $(OBJ)
|
||||
%.a: $(OBJ)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
|
||||
./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
|
||||
./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
|
||||
./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
|
||||
./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
|
||||
$(if $(COVERAGE),\
|
||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
|
||||
-q -m $@ $(COVERAGEFLAGS) -o $@)
|
||||
|
||||
$(BUILDDIR)%.o: %.c
|
||||
%.o: %.c
|
||||
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
||||
|
||||
$(BUILDDIR)%.s: %.c
|
||||
%.s: %.c
|
||||
$(CC) -S $(CFLAGS) $< -o $@
|
||||
|
||||
# gcc depends on the output file for intermediate file names, so
|
||||
# we can't omit to .o output. We also need to serialize with the
|
||||
# normal .o rule because otherwise we can end up with multiprocess
|
||||
# problems with two instances of gcc modifying the same .o
|
||||
$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
|
||||
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
|
||||
|
||||
# clean everything
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -f $(BUILDDIR)lfs
|
||||
rm -f $(BUILDDIR)lfs.a
|
||||
rm -f $(BUILDDIR)lfs.csv
|
||||
rm -f $(TARGET)
|
||||
rm -f $(OBJ)
|
||||
rm -f $(CGI)
|
||||
rm -f $(DEP)
|
||||
rm -f $(ASM)
|
||||
rm -f $(BUILDDIR)tests/*.toml.*
|
||||
rm -f tests/*.toml.*
|
||||
|
||||
10
README.md
10
README.md
@@ -39,7 +39,7 @@ lfs_t lfs;
|
||||
lfs_file_t file;
|
||||
|
||||
// configuration of the filesystem is provided by this struct
|
||||
const struct lfs_config cfg = {
|
||||
const struct lfs_cfg cfg = {
|
||||
// block device operations
|
||||
.read = user_provided_block_device_read,
|
||||
.prog = user_provided_block_device_prog,
|
||||
@@ -192,7 +192,7 @@ More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and
|
||||
## Testing
|
||||
|
||||
The littlefs comes with a test suite designed to run on a PC using the
|
||||
[emulated block device](bd/lfs_testbd.h) found in the `bd` directory.
|
||||
[emulated block device](emubd/lfs_emubd.h) found in the emubd directory.
|
||||
The tests assume a Linux environment and can be started with make:
|
||||
|
||||
``` bash
|
||||
@@ -222,11 +222,6 @@ License Identifiers that are here available: http://spdx.org/licenses/
|
||||
want this, but it is handy for demos. You can see it in action
|
||||
[here][littlefs-js-demo].
|
||||
|
||||
- [littlefs-python] - A Python wrapper for littlefs. The project allows you
|
||||
to create images of the filesystem on your PC. Check if littlefs will fit
|
||||
your needs, create images for a later download to the target memory or
|
||||
inspect the content of a binary image of the target memory.
|
||||
|
||||
- [mklfs] - A command line tool built by the [Lua RTOS] guys for making
|
||||
littlefs images from a host PC. Supports Windows, Mac OS, and Linux.
|
||||
|
||||
@@ -255,4 +250,3 @@ License Identifiers that are here available: http://spdx.org/licenses/
|
||||
[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/v5.12/apis/littlefilesystem.html
|
||||
[SPIFFS]: https://github.com/pellepl/spiffs
|
||||
[Dhara]: https://github.com/dlbeer/dhara
|
||||
[littlefs-python]: https://pypi.org/project/littlefs-python/
|
||||
|
||||
@@ -10,21 +10,16 @@
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_filebd_config *bdcfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
int lfs_filebd_createcfg(lfs_filebd_t *bd, const char *path,
|
||||
const struct lfs_filebd_cfg *cfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p, \"%s\", %p {"
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"\"%s\", "
|
||||
"%p {.erase_value=%"PRId32"})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
path, (void*)bdcfg, bdcfg->erase_value);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
".erase_size=%"PRIu32", .erase_count=%"PRIu32", "
|
||||
".erase_value=%"PRId32"})",
|
||||
(void*)bd, path, (void*)cfg,
|
||||
cfg->read_size, cfg->prog_size, cfg->erase_size, cfg->erase_count,
|
||||
cfg->erase_value);
|
||||
bd->cfg = cfg;
|
||||
|
||||
// open file
|
||||
bd->fd = open(path, O_RDWR | O_CREAT, 0666);
|
||||
@@ -38,26 +33,8 @@ int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_create(const struct lfs_config *cfg, const char *path) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"\"%s\")",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
path);
|
||||
static const struct lfs_filebd_config defaults = {.erase_value=-1};
|
||||
int err = lfs_filebd_createcfg(cfg, path, &defaults);
|
||||
LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_filebd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
int lfs_filebd_destroy(lfs_filebd_t *bd) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)bd);
|
||||
int err = close(bd->fd);
|
||||
if (err < 0) {
|
||||
err = -errno;
|
||||
@@ -68,17 +45,16 @@ int lfs_filebd_destroy(const struct lfs_config *cfg) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_filebd_read(lfs_filebd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
(void*)bd, block, off, buffer, size);
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(off % cfg->read_size == 0);
|
||||
LFS_ASSERT(size % cfg->read_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(off % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// zero for reproducability (in case file is truncated)
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
@@ -87,7 +63,7 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
|
||||
// read
|
||||
off_t res1 = lseek(bd->fd,
|
||||
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
|
||||
(off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
|
||||
@@ -105,21 +81,21 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_filebd_prog(lfs_filebd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)bd, block, off, buffer, size);
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(off % cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(off % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// check that data was erased? only needed for testing
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
off_t res1 = lseek(bd->fd,
|
||||
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
|
||||
(off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
|
||||
@@ -141,7 +117,7 @@ int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
|
||||
// program data
|
||||
off_t res1 = lseek(bd->fd,
|
||||
(off_t)block*cfg->block_size + (off_t)off, SEEK_SET);
|
||||
(off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
|
||||
@@ -159,23 +135,22 @@ int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
int lfs_filebd_erase(lfs_filebd_t *bd, lfs_block_t block) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)bd, block);
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// erase, only needed for testing
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
off_t res1 = lseek(bd->fd, (off_t)block*cfg->block_size, SEEK_SET);
|
||||
off_t res1 = lseek(bd->fd, (off_t)block*bd->cfg->erase_size, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i++) {
|
||||
for (lfs_off_t i = 0; i < bd->cfg->erase_size; i++) {
|
||||
ssize_t res2 = write(bd->fd, &(uint8_t){bd->cfg->erase_value}, 1);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
@@ -189,10 +164,10 @@ int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_sync(const struct lfs_config *cfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
|
||||
int lfs_filebd_sync(lfs_filebd_t *bd) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)bd);
|
||||
|
||||
// file sync
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
int err = fsync(bd->fd);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
|
||||
@@ -8,11 +8,9 @@
|
||||
#define LFS_FILEBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
@@ -24,7 +22,21 @@ extern "C"
|
||||
#endif
|
||||
|
||||
// filebd config (optional)
|
||||
struct lfs_filebd_config {
|
||||
struct lfs_filebd_cfg {
|
||||
// Minimum size of block read. All read operations must be a
|
||||
// multiple of this value.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of block program. All program operations must be a
|
||||
// multiple of this value.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erasable block.
|
||||
lfs_size_t erase_size;
|
||||
|
||||
// Number of erasable blocks on the device.
|
||||
lfs_size_t erase_count;
|
||||
|
||||
// 8-bit erase value to use for simulating erases. -1 does not simulate
|
||||
// erases, which can speed up testing by avoiding all the extra block-device
|
||||
// operations to store the erase value.
|
||||
@@ -34,40 +46,39 @@ struct lfs_filebd_config {
|
||||
// filebd state
|
||||
typedef struct lfs_filebd {
|
||||
int fd;
|
||||
const struct lfs_filebd_config *cfg;
|
||||
const struct lfs_filebd_cfg *cfg;
|
||||
} lfs_filebd_t;
|
||||
|
||||
|
||||
// Create a file block device using the geometry in lfs_config
|
||||
int lfs_filebd_create(const struct lfs_config *cfg, const char *path);
|
||||
int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_filebd_config *bdcfg);
|
||||
// Create a file block device using the geometry in lfs_filebd_cfg
|
||||
int lfs_filebd_createcfg(lfs_filebd_t *bd, const char *path,
|
||||
const struct lfs_filebd_cfg *cfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_filebd_destroy(const struct lfs_config *cfg);
|
||||
int lfs_filebd_destroy(lfs_filebd_t *bd);
|
||||
|
||||
// Read a block
|
||||
int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_filebd_read(lfs_filebd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_filebd_prog(lfs_filebd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
int lfs_filebd_erase(lfs_filebd_t *bd, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_filebd_sync(const struct lfs_config *cfg);
|
||||
int lfs_filebd_sync(lfs_filebd_t *bd);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,26 +6,22 @@
|
||||
*/
|
||||
#include "bd/lfs_rambd.h"
|
||||
|
||||
int lfs_rambd_createcfg(const struct lfs_config *cfg,
|
||||
const struct lfs_rambd_config *bdcfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_createcfg(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
int lfs_rambd_createcfg(lfs_rambd_t *bd,
|
||||
const struct lfs_rambd_cfg *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_filebd_createcfg(%p, %p {"
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"%p {.erase_value=%"PRId32", .buffer=%p})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
(void*)bdcfg, bdcfg->erase_value, bdcfg->buffer);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
".erase_size=%"PRIu32", .erase_count=%"PRIu32", "
|
||||
".erase_value=%"PRId32", .buffer=%p})",
|
||||
(void*)bd, (void*)cfg,
|
||||
cfg->read_size, cfg->prog_size, cfg->erase_size, cfg->erase_count,
|
||||
cfg->erase_value, cfg->buffer);
|
||||
bd->cfg = cfg;
|
||||
|
||||
// allocate buffer?
|
||||
if (bd->cfg->buffer) {
|
||||
bd->buffer = bd->cfg->buffer;
|
||||
} else {
|
||||
bd->buffer = lfs_malloc(cfg->block_size * cfg->block_count);
|
||||
bd->buffer = lfs_malloc(bd->cfg->erase_size * bd->cfg->erase_count);
|
||||
if (!bd->buffer) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
@@ -35,32 +31,16 @@ int lfs_rambd_createcfg(const struct lfs_config *cfg,
|
||||
// zero for reproducability?
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
memset(bd->buffer, bd->cfg->erase_value,
|
||||
cfg->block_size * cfg->block_count);
|
||||
bd->cfg->erase_size * bd->cfg->erase_count);
|
||||
}
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_create(const struct lfs_config *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count);
|
||||
static const struct lfs_rambd_config defaults = {.erase_value=-1};
|
||||
int err = lfs_rambd_createcfg(cfg, &defaults);
|
||||
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_rambd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)cfg);
|
||||
int lfs_rambd_destroy(lfs_rambd_t *bd) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)bd);
|
||||
// clean up memory
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
if (!bd->cfg->buffer) {
|
||||
lfs_free(bd->buffer);
|
||||
}
|
||||
@@ -68,73 +48,70 @@ int lfs_rambd_destroy(const struct lfs_config *cfg) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_rambd_read(lfs_rambd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
(void*)bd, block, off, buffer, size);
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(off % cfg->read_size == 0);
|
||||
LFS_ASSERT(size % cfg->read_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(off % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// read data
|
||||
memcpy(buffer, &bd->buffer[block*cfg->block_size + off], size);
|
||||
memcpy(buffer, &bd->buffer[block*bd->cfg->erase_size + off], size);
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_read -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_rambd_prog(lfs_rambd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_prog(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
(void*)bd, block, off, buffer, size);
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(off % cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(off % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// check that data was erased? only needed for testing
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
for (lfs_off_t i = 0; i < size; i++) {
|
||||
LFS_ASSERT(bd->buffer[block*cfg->block_size + off + i] ==
|
||||
LFS_ASSERT(bd->buffer[block*bd->cfg->erase_size + off + i] ==
|
||||
bd->cfg->erase_value);
|
||||
}
|
||||
}
|
||||
|
||||
// program data
|
||||
memcpy(&bd->buffer[block*cfg->block_size + off], buffer, size);
|
||||
memcpy(&bd->buffer[block*bd->cfg->erase_size + off], buffer, size);
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
int lfs_rambd_erase(lfs_rambd_t *bd, lfs_block_t block) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)bd, block);
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// erase, only needed for testing
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
memset(&bd->buffer[block*cfg->block_size],
|
||||
bd->cfg->erase_value, cfg->block_size);
|
||||
memset(&bd->buffer[block*bd->cfg->erase_size],
|
||||
bd->cfg->erase_value, bd->cfg->erase_size);
|
||||
}
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_sync(const struct lfs_config *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg);
|
||||
int lfs_rambd_sync(lfs_rambd_t *bd) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)bd);
|
||||
// sync does nothing because we aren't backed by anything real
|
||||
(void)cfg;
|
||||
(void)bd;
|
||||
LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -8,11 +8,9 @@
|
||||
#define LFS_RAMBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
@@ -24,9 +22,24 @@ extern "C"
|
||||
#endif
|
||||
|
||||
// rambd config (optional)
|
||||
struct lfs_rambd_config {
|
||||
// 8-bit erase value to simulate erasing with. -1 indicates no erase
|
||||
// occurs, which is still a valid block device
|
||||
struct lfs_rambd_cfg {
|
||||
// Minimum size of block read. All read operations must be a
|
||||
// multiple of this value.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of block program. All program operations must be a
|
||||
// multiple of this value.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erasable block.
|
||||
lfs_size_t erase_size;
|
||||
|
||||
// Number of erasable blocks on the device.
|
||||
lfs_size_t erase_count;
|
||||
|
||||
// 8-bit erase value to use for simulating erases. -1 does not simulate
|
||||
// erases, which can speed up testing by avoiding all the extra block-device
|
||||
// operations to store the erase value.
|
||||
int32_t erase_value;
|
||||
|
||||
// Optional statically allocated buffer for the block device.
|
||||
@@ -36,40 +49,39 @@ struct lfs_rambd_config {
|
||||
// rambd state
|
||||
typedef struct lfs_rambd {
|
||||
uint8_t *buffer;
|
||||
const struct lfs_rambd_config *cfg;
|
||||
const struct lfs_rambd_cfg *cfg;
|
||||
} lfs_rambd_t;
|
||||
|
||||
|
||||
// Create a RAM block device using the geometry in lfs_config
|
||||
int lfs_rambd_create(const struct lfs_config *cfg);
|
||||
int lfs_rambd_createcfg(const struct lfs_config *cfg,
|
||||
const struct lfs_rambd_config *bdcfg);
|
||||
// Create a RAM block device using the geometry in lfs_cfg
|
||||
int lfs_rambd_createcfg(lfs_rambd_t *bd,
|
||||
const struct lfs_rambd_cfg *cfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_rambd_destroy(const struct lfs_config *cfg);
|
||||
int lfs_rambd_destroy(lfs_rambd_t *bd);
|
||||
|
||||
// Read a block
|
||||
int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_rambd_read(lfs_rambd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_rambd_prog(lfs_rambd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
int lfs_rambd_erase(lfs_rambd_t *bd, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_rambd_sync(const struct lfs_config *cfg);
|
||||
int lfs_rambd_sync(lfs_rambd_t *bd);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
157
bd/lfs_testbd.c
157
bd/lfs_testbd.c
@@ -10,25 +10,20 @@
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_testbd_config *bdcfg) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
int lfs_testbd_createcfg(lfs_testbd_t *bd, const char *path,
|
||||
const struct lfs_testbd_cfg *cfg) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg(%p, \"%s\", %p {"
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"\"%s\", "
|
||||
"%p {.erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
|
||||
".erase_size=%"PRIu32", .erase_count=%"PRIu32", "
|
||||
".erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
|
||||
".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", "
|
||||
".buffer=%p, .wear_buffer=%p})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
path, (void*)bdcfg, bdcfg->erase_value, bdcfg->erase_cycles,
|
||||
bdcfg->badblock_behavior, bdcfg->power_cycles,
|
||||
bdcfg->buffer, bdcfg->wear_buffer);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
(void*)bd, path, (void*)cfg,
|
||||
cfg->read_size, cfg->prog_size, cfg->erase_size, cfg->erase_count,
|
||||
cfg->erase_value, cfg->erase_cycles,
|
||||
cfg->badblock_behavior, cfg->power_cycles,
|
||||
cfg->buffer, cfg->wear_buffer);
|
||||
bd->cfg = cfg;
|
||||
|
||||
// setup testing things
|
||||
bd->persist = path;
|
||||
@@ -38,122 +33,94 @@ int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
if (bd->cfg->wear_buffer) {
|
||||
bd->wear = bd->cfg->wear_buffer;
|
||||
} else {
|
||||
bd->wear = lfs_malloc(sizeof(lfs_testbd_wear_t)*cfg->block_count);
|
||||
bd->wear = lfs_malloc(sizeof(lfs_testbd_wear_t)*cfg->erase_count);
|
||||
if (!bd->wear) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
memset(bd->wear, 0, sizeof(lfs_testbd_wear_t) * cfg->block_count);
|
||||
memset(bd->wear, 0, sizeof(lfs_testbd_wear_t) * bd->cfg->erase_count);
|
||||
}
|
||||
|
||||
// create underlying block device
|
||||
if (bd->persist) {
|
||||
bd->u.file.cfg = (struct lfs_filebd_config){
|
||||
.erase_value = bd->cfg->erase_value,
|
||||
};
|
||||
int err = lfs_filebd_createcfg(cfg, path, &bd->u.file.cfg);
|
||||
int err = lfs_filebd_createcfg(&bd->impl.filebd, path,
|
||||
bd->cfg->filebd_cfg);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
|
||||
return err;
|
||||
} else {
|
||||
bd->u.ram.cfg = (struct lfs_rambd_config){
|
||||
.erase_value = bd->cfg->erase_value,
|
||||
.buffer = bd->cfg->buffer,
|
||||
};
|
||||
int err = lfs_rambd_createcfg(cfg, &bd->u.ram.cfg);
|
||||
int err = lfs_rambd_createcfg(&bd->impl.rambd,
|
||||
bd->cfg->rambd_cfg);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
int lfs_testbd_create(const struct lfs_config *cfg, const char *path) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p, "
|
||||
".read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".block_size=%"PRIu32", .block_count=%"PRIu32"}, "
|
||||
"\"%s\")",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
|
||||
path);
|
||||
static const struct lfs_testbd_config defaults = {.erase_value=-1};
|
||||
int err = lfs_testbd_createcfg(cfg, path, &defaults);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_testbd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_destroy(%p)", (void*)cfg);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
int lfs_testbd_destroy(lfs_testbd_t *bd) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_destroy(%p)", (void*)bd);
|
||||
if (bd->cfg->erase_cycles && !bd->cfg->wear_buffer) {
|
||||
lfs_free(bd->wear);
|
||||
}
|
||||
|
||||
if (bd->persist) {
|
||||
int err = lfs_filebd_destroy(cfg);
|
||||
int err = lfs_filebd_destroy(&bd->impl.filebd);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
|
||||
return err;
|
||||
} else {
|
||||
int err = lfs_rambd_destroy(cfg);
|
||||
int err = lfs_rambd_destroy(&bd->impl.rambd);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal mapping to block devices ///
|
||||
static int lfs_testbd_rawread(const struct lfs_config *cfg, lfs_block_t block,
|
||||
static int lfs_testbd_rawread(lfs_testbd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
if (bd->persist) {
|
||||
return lfs_filebd_read(cfg, block, off, buffer, size);
|
||||
return lfs_filebd_read(&bd->impl.filebd, block, off, buffer, size);
|
||||
} else {
|
||||
return lfs_rambd_read(cfg, block, off, buffer, size);
|
||||
return lfs_rambd_read(&bd->impl.rambd, block, off, buffer, size);
|
||||
}
|
||||
}
|
||||
|
||||
static int lfs_testbd_rawprog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
static int lfs_testbd_rawprog(lfs_testbd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
if (bd->persist) {
|
||||
return lfs_filebd_prog(cfg, block, off, buffer, size);
|
||||
return lfs_filebd_prog(&bd->impl.filebd, block, off, buffer, size);
|
||||
} else {
|
||||
return lfs_rambd_prog(cfg, block, off, buffer, size);
|
||||
return lfs_rambd_prog(&bd->impl.rambd, block, off, buffer, size);
|
||||
}
|
||||
}
|
||||
|
||||
static int lfs_testbd_rawerase(const struct lfs_config *cfg,
|
||||
static int lfs_testbd_rawerase(lfs_testbd_t *bd,
|
||||
lfs_block_t block) {
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
if (bd->persist) {
|
||||
return lfs_filebd_erase(cfg, block);
|
||||
return lfs_filebd_erase(&bd->impl.filebd, block);
|
||||
} else {
|
||||
return lfs_rambd_erase(cfg, block);
|
||||
return lfs_rambd_erase(&bd->impl.rambd, block);
|
||||
}
|
||||
}
|
||||
|
||||
static int lfs_testbd_rawsync(const struct lfs_config *cfg) {
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
static int lfs_testbd_rawsync(lfs_testbd_t *bd) {
|
||||
if (bd->persist) {
|
||||
return lfs_filebd_sync(cfg);
|
||||
return lfs_filebd_sync(&bd->impl.filebd);
|
||||
} else {
|
||||
return lfs_rambd_sync(cfg);
|
||||
return lfs_rambd_sync(&bd->impl.rambd);
|
||||
}
|
||||
}
|
||||
|
||||
/// block device API ///
|
||||
int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_testbd_read(lfs_testbd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
(void*)bd, block, off, buffer, size);
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(off % cfg->read_size == 0);
|
||||
LFS_ASSERT(size % cfg->read_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(off % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles &&
|
||||
@@ -163,22 +130,21 @@ int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
}
|
||||
|
||||
// read
|
||||
int err = lfs_testbd_rawread(cfg, block, off, buffer, size);
|
||||
int err = lfs_testbd_rawread(bd, block, off, buffer, size);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_read -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_testbd_prog(lfs_testbd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_prog(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
(void*)bd, block, off, buffer, size);
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(off % cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(off % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles) {
|
||||
@@ -196,7 +162,7 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
}
|
||||
|
||||
// prog
|
||||
int err = lfs_testbd_rawprog(cfg, block, off, buffer, size);
|
||||
int err = lfs_testbd_rawprog(bd, block, off, buffer, size);
|
||||
if (err) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err);
|
||||
return err;
|
||||
@@ -207,7 +173,7 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
bd->power_cycles -= 1;
|
||||
if (bd->power_cycles == 0) {
|
||||
// sync to make sure we persist the last changes
|
||||
LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
|
||||
assert(lfs_testbd_rawsync(bd) == 0);
|
||||
// simulate power loss
|
||||
exit(33);
|
||||
}
|
||||
@@ -217,12 +183,11 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_erase(%p, 0x%"PRIx32")", (void*)cfg, block);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
int lfs_testbd_erase(lfs_testbd_t *bd, lfs_block_t block) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_erase(%p, 0x%"PRIx32")", (void*)bd, block);
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles) {
|
||||
@@ -243,7 +208,7 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
}
|
||||
|
||||
// erase
|
||||
int err = lfs_testbd_rawerase(cfg, block);
|
||||
int err = lfs_testbd_rawerase(bd, block);
|
||||
if (err) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", err);
|
||||
return err;
|
||||
@@ -254,7 +219,7 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
bd->power_cycles -= 1;
|
||||
if (bd->power_cycles == 0) {
|
||||
// sync to make sure we persist the last changes
|
||||
LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0);
|
||||
assert(lfs_testbd_rawsync(bd) == 0);
|
||||
// simulate power loss
|
||||
exit(33);
|
||||
}
|
||||
@@ -264,36 +229,34 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_testbd_sync(const struct lfs_config *cfg) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_sync(%p)", (void*)cfg);
|
||||
int err = lfs_testbd_rawsync(cfg);
|
||||
int lfs_testbd_sync(lfs_testbd_t *bd) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_sync(%p)", (void*)bd);
|
||||
int err = lfs_testbd_rawsync(bd);
|
||||
LFS_TESTBD_TRACE("lfs_testbd_sync -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
/// simulated wear operations ///
|
||||
lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
|
||||
lfs_testbd_swear_t lfs_testbd_getwear(lfs_testbd_t *bd,
|
||||
lfs_block_t block) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_getwear(%p, %"PRIu32")", (void*)cfg, block);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
LFS_TESTBD_TRACE("lfs_testbd_getwear(%p, %"PRIu32")", (void*)bd, block);
|
||||
|
||||
// check if block is valid
|
||||
LFS_ASSERT(bd->cfg->erase_cycles);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
LFS_TESTBD_TRACE("lfs_testbd_getwear -> %"PRIu32, bd->wear[block]);
|
||||
return bd->wear[block];
|
||||
}
|
||||
|
||||
int lfs_testbd_setwear(const struct lfs_config *cfg,
|
||||
int lfs_testbd_setwear(lfs_testbd_t *bd,
|
||||
lfs_block_t block, lfs_testbd_wear_t wear) {
|
||||
LFS_TESTBD_TRACE("lfs_testbd_setwear(%p, %"PRIu32")", (void*)cfg, block);
|
||||
lfs_testbd_t *bd = cfg->context;
|
||||
LFS_TESTBD_TRACE("lfs_testbd_setwear(%p, %"PRIu32")", (void*)bd, block);
|
||||
|
||||
// check if block is valid
|
||||
LFS_ASSERT(bd->cfg->erase_cycles);
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
bd->wear[block] = wear;
|
||||
|
||||
|
||||
@@ -9,13 +9,11 @@
|
||||
#define LFS_TESTBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
#include "bd/lfs_rambd.h"
|
||||
#include "bd/lfs_filebd.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
@@ -45,7 +43,26 @@ typedef uint32_t lfs_testbd_wear_t;
|
||||
typedef int32_t lfs_testbd_swear_t;
|
||||
|
||||
// testbd config, this is required for testing
|
||||
struct lfs_testbd_config {
|
||||
struct lfs_testbd_cfg {
|
||||
// Block device specific configuration, see the related config structs.
|
||||
// May be NULL if the underlying implementation goes unused.
|
||||
const struct lfs_rambd_cfg *rambd_cfg;
|
||||
const struct lfs_filebd_cfg *filebd_cfg;
|
||||
|
||||
// Minimum size of block read. All read operations must be a
|
||||
// multiple of this value.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of block program. All program operations must be a
|
||||
// multiple of this value.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erasable block.
|
||||
lfs_size_t erase_size;
|
||||
|
||||
// Number of erasable blocks on the device.
|
||||
lfs_size_t erase_count;
|
||||
|
||||
// 8-bit erase value to use for simulating erases. -1 does not simulate
|
||||
// erases, which can speed up testing by avoiding all the extra block-device
|
||||
// operations to store the erase value.
|
||||
@@ -62,9 +79,6 @@ struct lfs_testbd_config {
|
||||
// the program with exit. Simulates power-loss. 0 disables.
|
||||
uint32_t power_cycles;
|
||||
|
||||
// Optional buffer for RAM block device.
|
||||
void *buffer;
|
||||
|
||||
// Optional buffer for wear
|
||||
void *wear_buffer;
|
||||
};
|
||||
@@ -72,70 +86,63 @@ struct lfs_testbd_config {
|
||||
// testbd state
|
||||
typedef struct lfs_testbd {
|
||||
union {
|
||||
struct {
|
||||
lfs_filebd_t bd;
|
||||
struct lfs_filebd_config cfg;
|
||||
} file;
|
||||
struct {
|
||||
lfs_rambd_t bd;
|
||||
struct lfs_rambd_config cfg;
|
||||
} ram;
|
||||
} u;
|
||||
lfs_filebd_t filebd;
|
||||
lfs_rambd_t rambd;
|
||||
} impl;
|
||||
|
||||
bool persist;
|
||||
uint32_t power_cycles;
|
||||
lfs_testbd_wear_t *wear;
|
||||
|
||||
const struct lfs_testbd_config *cfg;
|
||||
const struct lfs_testbd_cfg *cfg;
|
||||
} lfs_testbd_t;
|
||||
|
||||
|
||||
/// Block device API ///
|
||||
|
||||
// Create a test block device using the geometry in lfs_config
|
||||
// Create a test block device using the geometry in lfs_cfg
|
||||
//
|
||||
// Note that filebd is used if a path is provided, if path is NULL
|
||||
// testbd will use rambd which can be much faster.
|
||||
int lfs_testbd_create(const struct lfs_config *cfg, const char *path);
|
||||
int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_testbd_config *bdcfg);
|
||||
int lfs_testbd_createcfg(lfs_testbd_t *bd, const char *path,
|
||||
const struct lfs_testbd_cfg *cfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_testbd_destroy(const struct lfs_config *cfg);
|
||||
int lfs_testbd_destroy(lfs_testbd_t *bd);
|
||||
|
||||
// Read a block
|
||||
int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_testbd_read(lfs_testbd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
int lfs_testbd_prog(lfs_testbd_t *bd, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
int lfs_testbd_erase(lfs_testbd_t *bd, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_testbd_sync(const struct lfs_config *cfg);
|
||||
int lfs_testbd_sync(lfs_testbd_t *bd);
|
||||
|
||||
|
||||
/// Additional extended API for driving test features ///
|
||||
|
||||
// Get simulated wear on a given block
|
||||
lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg,
|
||||
lfs_testbd_swear_t lfs_testbd_getwear(lfs_testbd_t *bd,
|
||||
lfs_block_t block);
|
||||
|
||||
// Manually set simulated wear on a given block
|
||||
int lfs_testbd_setwear(const struct lfs_config *cfg,
|
||||
int lfs_testbd_setwear(lfs_testbd_t *bd,
|
||||
lfs_block_t block, lfs_testbd_wear_t wear);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
406
lfs.h
406
lfs.h
@@ -7,13 +7,10 @@
|
||||
#ifndef LFS_H
|
||||
#define LFS_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
@@ -22,7 +19,7 @@ extern "C"
|
||||
// Software library version
|
||||
// Major (top-nibble), incremented on backwards incompatible changes
|
||||
// Minor (bottom-nibble), incremented on feature additions
|
||||
#define LFS_VERSION 0x00020004
|
||||
#define LFS_VERSION 0x00020002
|
||||
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
|
||||
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
|
||||
|
||||
@@ -49,8 +46,13 @@ typedef uint32_t lfs_block_t;
|
||||
// info struct. Limited to <= 1022. Stored in superblock and must be
|
||||
// respected by other littlefs drivers.
|
||||
#ifndef LFS_NAME_MAX
|
||||
#if defined(LFS_NAME_LIMIT) && \
|
||||
LFS_NAME_LIMIT > 0 && LFS_NAME_MAX <= 1022
|
||||
#define LFS_NAME_MAX LFS_NAME_LIMIT
|
||||
#else
|
||||
#define LFS_NAME_MAX 255
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Maximum size of a file in bytes, may be redefined to limit to support other
|
||||
// drivers. Limited on disk to <= 4294967296. However, above 2147483647 the
|
||||
@@ -58,34 +60,24 @@ typedef uint32_t lfs_block_t;
|
||||
// incorrect values due to using signed integers. Stored in superblock and
|
||||
// must be respected by other littlefs drivers.
|
||||
#ifndef LFS_FILE_MAX
|
||||
#if defined(LFS_FILE_LIMIT) && \
|
||||
LFS_FILE_LIMIT > 0 && LFS_FILE_LIMIT <= 4294967296
|
||||
#define LFS_FILE_MAX LFS_FILE_LIMIT
|
||||
#else
|
||||
#define LFS_FILE_MAX 2147483647
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Maximum size of custom attributes in bytes, may be redefined, but there is
|
||||
// no real benefit to using a smaller LFS_ATTR_MAX. Limited to <= 1022.
|
||||
#ifndef LFS_ATTR_MAX
|
||||
#if defined(LFS_ATTR_LIMIT) && \
|
||||
LFS_ATTR_LIMIT > 0 && LFS_ATTR_LIMIT <= 1022
|
||||
#define LFS_ATTR_MAX LFS_FILE_LIMIT
|
||||
#else
|
||||
#define LFS_ATTR_MAX 1022
|
||||
#endif
|
||||
|
||||
// Possible error codes, these are negative to allow
|
||||
// valid positive return values
|
||||
enum lfs_error {
|
||||
LFS_ERR_OK = 0, // No error
|
||||
LFS_ERR_IO = -5, // Error during device operation
|
||||
LFS_ERR_CORRUPT = -84, // Corrupted
|
||||
LFS_ERR_NOENT = -2, // No directory entry
|
||||
LFS_ERR_EXIST = -17, // Entry already exists
|
||||
LFS_ERR_NOTDIR = -20, // Entry is not a dir
|
||||
LFS_ERR_ISDIR = -21, // Entry is a dir
|
||||
LFS_ERR_NOTEMPTY = -39, // Dir is not empty
|
||||
LFS_ERR_BADF = -9, // Bad file number
|
||||
LFS_ERR_FBIG = -27, // File too large
|
||||
LFS_ERR_INVAL = -22, // Invalid parameter
|
||||
LFS_ERR_NOSPC = -28, // No space left on device
|
||||
LFS_ERR_NOMEM = -12, // No more memory available
|
||||
LFS_ERR_NOATTR = -61, // No data/attr available
|
||||
LFS_ERR_NAMETOOLONG = -36, // File name too long
|
||||
};
|
||||
#endif
|
||||
|
||||
// File types
|
||||
enum lfs_type {
|
||||
@@ -124,25 +116,20 @@ enum lfs_type {
|
||||
enum lfs_open_flags {
|
||||
// open flags
|
||||
LFS_O_RDONLY = 1, // Open a file as read only
|
||||
#ifndef LFS_READONLY
|
||||
LFS_O_WRONLY = 2, // Open a file as write only
|
||||
LFS_O_RDWR = 3, // Open a file as read and write
|
||||
LFS_O_CREAT = 0x0100, // Create a file if it does not exist
|
||||
LFS_O_EXCL = 0x0200, // Fail if a file already exists
|
||||
LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size
|
||||
LFS_O_APPEND = 0x0800, // Move to end of file on every write
|
||||
#endif
|
||||
|
||||
// internally used flags
|
||||
#ifndef LFS_READONLY
|
||||
LFS_F_DIRTY = 0x010000, // File does not match storage
|
||||
LFS_F_WRITING = 0x020000, // File has been written since last flush
|
||||
#endif
|
||||
LFS_F_READING = 0x040000, // File has been read since last flush
|
||||
#ifndef LFS_READONLY
|
||||
LFS_F_ERRED = 0x080000, // An error occurred during write
|
||||
#endif
|
||||
LFS_F_ERRED = 0x080000, // An error occured during write
|
||||
LFS_F_INLINE = 0x100000, // Currently inlined in directory entry
|
||||
LFS_F_OPENED = 0x200000, // File has been opened
|
||||
};
|
||||
|
||||
// File seek flags
|
||||
@@ -154,58 +141,128 @@ enum lfs_whence_flags {
|
||||
|
||||
|
||||
// Configuration provided during initialization of the littlefs
|
||||
struct lfs_config {
|
||||
|
||||
// If every config option is provided at compile time, littlefs switches
|
||||
// to "LFS_STATICCFG" mode. The dynamic lfs_cfg struct is not included in
|
||||
// the lfs_t struct, and *cfg functions are no longer available.
|
||||
#if defined(LFS_BD_READ) && \
|
||||
defined(LFS_BD_PROG) && \
|
||||
defined(LFS_BD_ERASE) && \
|
||||
defined(LFS_BD_SYNC) && \
|
||||
defined(LFS_READ_SIZE) && \
|
||||
defined(LFS_PROG_SIZE) && \
|
||||
defined(LFS_BLOCK_SIZE) && \
|
||||
defined(LFS_BLOCK_COUNT) && \
|
||||
defined(LFS_BLOCK_CYCLES) && \
|
||||
defined(LFS_BUFFER_SIZE) && \
|
||||
defined(LFS_LOOKAHEAD_SIZE) && \
|
||||
defined(LFS_READ_BUFFER) && \
|
||||
defined(LFS_PROG_BUFFER) && \
|
||||
defined(LFS_LOOKAHEAD_BUFFER) && \
|
||||
defined(LFS_NAME_LIMIT) && \
|
||||
defined(LFS_FILE_LIMIT) && \
|
||||
defined(LFS_ATTR_LIMIT)
|
||||
#define LFS_STATICCFG
|
||||
#endif
|
||||
|
||||
// Dynamic config struct
|
||||
#ifndef LFS_STATICCFG
|
||||
struct lfs_cfg {
|
||||
#endif
|
||||
// Opaque user provided context that can be used to pass
|
||||
// information to the block device operations
|
||||
void *context;
|
||||
#if !(defined(LFS_BD_READ) && \
|
||||
defined(LFS_BD_PROG) && \
|
||||
defined(LFS_BD_ERASE) && \
|
||||
defined(LFS_BD_SYNC))
|
||||
void *bd_ctx;
|
||||
#endif
|
||||
|
||||
// Read a region in a block. Negative error codes are propogated
|
||||
// to the user.
|
||||
int (*read)(const struct lfs_config *c, lfs_block_t block,
|
||||
#ifndef LFS_BD_READ
|
||||
int (*bd_read)(void *ctx, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
#define LFS_CFG_BD_READ(cfg, block, off, buffer, size) \
|
||||
(cfg)->bd_read((cfg)->bd_ctx, block, off, buffer, size)
|
||||
#else
|
||||
#define LFS_CFG_BD_READ(cfg, block, off, buffer, size) \
|
||||
lfs_bd_read(block, off, buffer, size)
|
||||
#endif
|
||||
|
||||
// Program a region in a block. The block must have previously
|
||||
// been erased. Negative error codes are propogated to the user.
|
||||
// May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
int (*prog)(const struct lfs_config *c, lfs_block_t block,
|
||||
#ifndef LFS_BD_PROG
|
||||
int (*bd_prog)(void *ctx, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
#define LFS_CFG_BD_PROG(cfg, block, off, buffer, size) \
|
||||
(cfg)->bd_prog((cfg)->bd_ctx, block, off, buffer, size)
|
||||
#else
|
||||
#define LFS_CFG_BD_PROG(cfg, block, off, buffer, size) \
|
||||
lfs_bd_prog(block, off, buffer, size)
|
||||
#endif
|
||||
|
||||
// Erase a block. A block must be erased before being programmed.
|
||||
// The state of an erased block is undefined. Negative error codes
|
||||
// are propogated to the user.
|
||||
// May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
int (*erase)(const struct lfs_config *c, lfs_block_t block);
|
||||
#ifndef LFS_BD_ERASE
|
||||
int (*bd_erase)(void *ctx, lfs_block_t block);
|
||||
#define LFS_CFG_BD_ERASE(cfg, block) \
|
||||
(cfg)->bd_erase((cfg)->bd_ctx, block)
|
||||
#else
|
||||
#define LFS_CFG_BD_ERASE(cfg, block) \
|
||||
lfs_bd_erase(block)
|
||||
#endif
|
||||
|
||||
// Sync the state of the underlying block device. Negative error codes
|
||||
// are propogated to the user.
|
||||
int (*sync)(const struct lfs_config *c);
|
||||
|
||||
#ifdef LFS_THREADSAFE
|
||||
// Lock the underlying block device. Negative error codes
|
||||
// are propogated to the user.
|
||||
int (*lock)(const struct lfs_config *c);
|
||||
|
||||
// Unlock the underlying block device. Negative error codes
|
||||
// are propogated to the user.
|
||||
int (*unlock)(const struct lfs_config *c);
|
||||
#ifndef LFS_BD_SYNC
|
||||
int (*bd_sync)(void *ctx);
|
||||
#define LFS_CFG_BD_SYNC(cfg) \
|
||||
(cfg)->bd_sync((cfg)->bd_ctx)
|
||||
#else
|
||||
#define LFS_CFG_BD_SYNC(cfg) \
|
||||
lfs_bd_sync()
|
||||
#endif
|
||||
|
||||
// Minimum size of a block read. All read operations will be a
|
||||
// multiple of this value.
|
||||
#ifndef LFS_READ_SIZE
|
||||
lfs_size_t read_size;
|
||||
#define LFS_CFG_READ_SIZE(cfg) (cfg)->read_size
|
||||
#else
|
||||
#define LFS_CFG_READ_SIZE(cfg) LFS_READ_SIZE
|
||||
#endif
|
||||
|
||||
// Minimum size of a block program. All program operations will be a
|
||||
// multiple of this value.
|
||||
#ifndef LFS_PROG_SIZE
|
||||
lfs_size_t prog_size;
|
||||
#define LFS_CFG_PROG_SIZE(cfg) (cfg)->prog_size
|
||||
#else
|
||||
#define LFS_CFG_PROG_SIZE(cfg) LFS_PROG_SIZE
|
||||
#endif
|
||||
|
||||
// Size of an erasable block. This does not impact ram consumption and
|
||||
// may be larger than the physical erase size. However, non-inlined files
|
||||
// take up at minimum one block. Must be a multiple of the read
|
||||
// and program sizes.
|
||||
#ifndef LFS_BLOCK_SIZE
|
||||
lfs_size_t block_size;
|
||||
#define LFS_CFG_BLOCK_SIZE(cfg) (cfg)->block_size
|
||||
#else
|
||||
#define LFS_CFG_BLOCK_SIZE(cfg) LFS_BLOCK_SIZE
|
||||
#endif
|
||||
|
||||
// Number of erasable blocks on the device.
|
||||
#ifndef LFS_BLOCK_COUNT
|
||||
lfs_size_t block_count;
|
||||
#define LFS_CFG_BLOCK_COUNT(cfg) (cfg)->block_count
|
||||
#else
|
||||
#define LFS_CFG_BLOCK_COUNT(cfg) LFS_BLOCK_COUNT
|
||||
#endif
|
||||
|
||||
// Number of erase cycles before littlefs evicts metadata logs and moves
|
||||
// the metadata to another block. Suggested values are in the
|
||||
@@ -213,56 +270,168 @@ struct lfs_config {
|
||||
// of less consistent wear distribution.
|
||||
//
|
||||
// Set to -1 to disable block-level wear-leveling.
|
||||
#ifndef LFS_BLOCK_CYCLES
|
||||
int32_t block_cycles;
|
||||
#define LFS_CFG_BLOCK_CYCLES(cfg) (cfg)->block_cycles
|
||||
#else
|
||||
#define LFS_CFG_BLOCK_CYCLES(cfg) LFS_BLOCK_CYCLES
|
||||
#endif
|
||||
|
||||
// Size of block caches. Each cache buffers a portion of a block in RAM.
|
||||
// The littlefs needs a read cache, a program cache, and one additional
|
||||
// cache per file. Larger caches can improve performance by storing more
|
||||
// Size of internal buffers used to cache slices of blocks in RAM.
|
||||
// The littlefs needs a read buffer, a program buffer, and one additional
|
||||
// buffer per file. Larger buffers can improve performance by storing more
|
||||
// data and reducing the number of disk accesses. Must be a multiple of
|
||||
// the read and program sizes, and a factor of the block size.
|
||||
lfs_size_t cache_size;
|
||||
#ifndef LFS_BUFFER_SIZE
|
||||
lfs_size_t buffer_size;
|
||||
#define LFS_CFG_BUFFER_SIZE(cfg) (cfg)->buffer_size
|
||||
#else
|
||||
#define LFS_CFG_BUFFER_SIZE(cfg) LFS_BUFFER_SIZE
|
||||
#endif
|
||||
|
||||
// Size of the lookahead buffer in bytes. A larger lookahead buffer
|
||||
// increases the number of blocks found during an allocation pass. The
|
||||
// lookahead buffer is stored as a compact bitmap, so each byte of RAM
|
||||
// can track 8 blocks. Must be a multiple of 8.
|
||||
#ifndef LFS_LOOKAHEAD_SIZE
|
||||
lfs_size_t lookahead_size;
|
||||
#define LFS_CFG_LOOKAHEAD_SIZE(cfg) (cfg)->lookahead_size
|
||||
#else
|
||||
#define LFS_CFG_LOOKAHEAD_SIZE(cfg) LFS_LOOKAHEAD_SIZE
|
||||
#endif
|
||||
|
||||
// Optional statically allocated read buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
#ifndef LFS_READ_BUFFER
|
||||
void *read_buffer;
|
||||
#define LFS_CFG_READ_BUFFER(cfg) (cfg)->read_buffer
|
||||
#else
|
||||
#define LFS_CFG_READ_BUFFER(cfg) LFS_READ_BUFFER
|
||||
#endif
|
||||
|
||||
// Optional statically allocated program buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
#ifndef LFS_PROG_BUFFER
|
||||
void *prog_buffer;
|
||||
#define LFS_CFG_PROG_BUFFER(cfg) (cfg)->prog_buffer
|
||||
#else
|
||||
#define LFS_CFG_PROG_BUFFER(cfg) LFS_PROG_BUFFER
|
||||
#endif
|
||||
|
||||
// Optional statically allocated lookahead buffer. Must be lookahead_size
|
||||
// and aligned to a 32-bit boundary. By default lfs_malloc is used to
|
||||
// allocate this buffer.
|
||||
#ifndef LFS_LOOKAHEAD_BUFFER
|
||||
void *lookahead_buffer;
|
||||
#define LFS_CFG_LOOKAHEAD_BUFFER(cfg) (cfg)->lookahead_buffer
|
||||
#else
|
||||
#define LFS_CFG_LOOKAHEAD_BUFFER(cfg) LFS_LOOKAHEAD_BUFFER
|
||||
#endif
|
||||
|
||||
// Optional upper limit on length of file names in bytes. No downside for
|
||||
// larger names except the size of the info struct which is controlled by
|
||||
// the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX when zero. Stored in
|
||||
// superblock and must be respected by other littlefs drivers.
|
||||
lfs_size_t name_max;
|
||||
#ifndef LFS_NAME_LIMIT
|
||||
lfs_size_t name_limit;
|
||||
#define LFS_CFG_NAME_LIMIT(cfg) (cfg)->name_limit
|
||||
#else
|
||||
#define LFS_CFG_NAME_LIMIT(cfg) LFS_NAME_LIMIT
|
||||
#endif
|
||||
|
||||
// Optional upper limit on files in bytes. No downside for larger files
|
||||
// but must be <= LFS_FILE_MAX. Defaults to LFS_FILE_MAX when zero. Stored
|
||||
// in superblock and must be respected by other littlefs drivers.
|
||||
lfs_size_t file_max;
|
||||
#ifndef LFS_FILE_LIMIT
|
||||
lfs_size_t file_limit;
|
||||
#define LFS_CFG_FILE_LIMIT(cfg) (cfg)->file_limit
|
||||
#else
|
||||
#define LFS_CFG_FILE_LIMIT(cfg) LFS_FILE_LIMIT
|
||||
#endif
|
||||
|
||||
// Optional upper limit on custom attributes in bytes. No downside for
|
||||
// larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
|
||||
// LFS_ATTR_MAX when zero.
|
||||
lfs_size_t attr_max;
|
||||
|
||||
// Optional upper limit on total space given to metadata pairs in bytes. On
|
||||
// devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB)
|
||||
// can help bound the metadata compaction time. Must be <= block_size.
|
||||
// Defaults to block_size when zero.
|
||||
lfs_size_t metadata_max;
|
||||
#ifndef LFS_ATTR_LIMIT
|
||||
lfs_size_t attr_limit;
|
||||
#define LFS_CFG_ATTR_LIMIT(cfg) (cfg)->attr_limit
|
||||
#else
|
||||
#define LFS_CFG_ATTR_LIMIT(cfg) LFS_ATTR_LIMIT
|
||||
#endif
|
||||
#ifndef LFS_STATICCFG
|
||||
};
|
||||
#endif
|
||||
|
||||
// Configurable callbacks are a bit special, when LFS_BD_* is defined,
|
||||
// LFS_CFG_* instead expands into a call to an extern lfs_bd_*, which
|
||||
// must be defined by the user. This preserves type-safety of the
|
||||
// callbacks.
|
||||
#ifdef LFS_BD_READ
|
||||
extern int lfs_bd_read(lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
#ifdef LFS_BD_PROG
|
||||
extern int lfs_bd_prog(lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
#ifdef LFS_BD_ERASE
|
||||
extern int lfs_bd_erase(lfs_block_t block);
|
||||
#endif
|
||||
#ifdef LFS_BD_SYNC
|
||||
extern int lfs_bd_sync(void);
|
||||
#endif
|
||||
|
||||
// If every config option is provided at compile time, littlefs switches
|
||||
// to "LFS_FILE_STATICCFG" mode. The dynamic lfs_file_cfg struct is not
|
||||
// included in the lfs_file_t struct, and *cfg functions are no longer
|
||||
// available.
|
||||
#if defined(LFS_FILE_BUFFER) && \
|
||||
defined(LFS_FILE_ATTRS) && \
|
||||
defined(LFS_FILE_ATTR_COUNT)
|
||||
#define LFS_STATICCFG
|
||||
#endif
|
||||
|
||||
#ifndef LFS_FILE_STATICCFG
|
||||
// Optional configuration provided during lfs_file_opencfg
|
||||
struct lfs_file_cfg {
|
||||
#endif
|
||||
// Optional statically allocated file buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
#ifndef LFS_FILE_BUFFER
|
||||
void *buffer;
|
||||
#define LFS_FILE_CFG_BUFFER(cfg) (cfg)->buffer
|
||||
#else
|
||||
#define LFS_FILE_CFG_BUFFER(cfg) LFS_FILE_BUFFER
|
||||
#endif
|
||||
|
||||
// Optional list of custom attributes related to the file. If the file
|
||||
// is opened with read access, these attributes will be read from disk
|
||||
// during the open call. If the file is opened with write access, the
|
||||
// attributes will be written to disk every file sync or close. This
|
||||
// write occurs atomically with update to the file's contents.
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller
|
||||
// than the buffer, it will be padded with zeros. If the stored attribute
|
||||
// is larger, then it will be silently truncated. If the attribute is not
|
||||
// found, it will be created implicitly.
|
||||
#ifndef LFS_FILE_ATTRS
|
||||
struct lfs_attr *attrs;
|
||||
#define LFS_FILE_CFG_ATTRS(cfg) (cfg)->attrs
|
||||
#else
|
||||
#define LFS_FILE_CFG_ATTRS(cfg) LFS_FILE_ATTRS
|
||||
#endif
|
||||
|
||||
// Number of custom attributes in the list
|
||||
#ifndef LFS_FILE_ATTR_COUNT
|
||||
lfs_size_t attr_count;
|
||||
#define LFS_FILE_CFG_ATTR_COUNT(cfg) (cfg)->attr_count
|
||||
#else
|
||||
#define LFS_FILE_CFG_ATTR_COUNT(cfg) LFS_FILE_ATTR_COUNT
|
||||
#endif
|
||||
#ifndef LFS_FILE_STATICCFG
|
||||
};
|
||||
#endif
|
||||
|
||||
// File info structure
|
||||
struct lfs_info {
|
||||
@@ -293,29 +462,6 @@ struct lfs_attr {
|
||||
lfs_size_t size;
|
||||
};
|
||||
|
||||
// Optional configuration provided during lfs_file_opencfg
|
||||
struct lfs_file_config {
|
||||
// Optional statically allocated file buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
void *buffer;
|
||||
|
||||
// Optional list of custom attributes related to the file. If the file
|
||||
// is opened with read access, these attributes will be read from disk
|
||||
// during the open call. If the file is opened with write access, the
|
||||
// attributes will be written to disk every file sync or close. This
|
||||
// write occurs atomically with update to the file's contents.
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller
|
||||
// than the buffer, it will be padded with zeros. If the stored attribute
|
||||
// is larger, then it will be silently truncated. If the attribute is not
|
||||
// found, it will be created implicitly.
|
||||
struct lfs_attr *attrs;
|
||||
|
||||
// Number of custom attributes in the list
|
||||
lfs_size_t attr_count;
|
||||
};
|
||||
|
||||
|
||||
/// internal littlefs data structures ///
|
||||
typedef struct lfs_cache {
|
||||
@@ -365,16 +511,18 @@ typedef struct lfs_file {
|
||||
lfs_off_t off;
|
||||
lfs_cache_t cache;
|
||||
|
||||
const struct lfs_file_config *cfg;
|
||||
#ifndef LFS_FILE_STATICCFG
|
||||
const struct lfs_file_cfg *cfg;
|
||||
#endif
|
||||
} lfs_file_t;
|
||||
|
||||
typedef struct lfs_superblock {
|
||||
uint32_t version;
|
||||
lfs_size_t block_size;
|
||||
lfs_size_t block_count;
|
||||
lfs_size_t name_max;
|
||||
lfs_size_t file_max;
|
||||
lfs_size_t attr_max;
|
||||
lfs_size_t name_limit;
|
||||
lfs_size_t file_limit;
|
||||
lfs_size_t attr_limit;
|
||||
} lfs_superblock_t;
|
||||
|
||||
typedef struct lfs_gstate {
|
||||
@@ -408,10 +556,12 @@ typedef struct lfs {
|
||||
uint32_t *buffer;
|
||||
} free;
|
||||
|
||||
const struct lfs_config *cfg;
|
||||
lfs_size_t name_max;
|
||||
lfs_size_t file_max;
|
||||
lfs_size_t attr_max;
|
||||
#ifndef LFS_STATICCFG
|
||||
const struct lfs_cfg *cfg;
|
||||
#endif
|
||||
lfs_size_t name_limit;
|
||||
lfs_size_t file_limit;
|
||||
lfs_size_t attr_limit;
|
||||
|
||||
#ifdef LFS_MIGRATE
|
||||
struct lfs1 *lfs1;
|
||||
@@ -421,18 +571,38 @@ typedef struct lfs {
|
||||
|
||||
/// Filesystem functions ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Format a block device with the littlefs
|
||||
#if defined(LFS_STATICCFG)
|
||||
// Format a block device with littlefs
|
||||
//
|
||||
// Requires a littlefs object. This clobbers the littlefs object, and does
|
||||
// not leave the filesystem mounted.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_format(lfs_t *lfs);
|
||||
#endif
|
||||
|
||||
#if !defined(LFS_STATICCFG)
|
||||
// Format a block device with littlefs with per-filesystem configuration
|
||||
//
|
||||
// Requires a littlefs object and config struct. This clobbers the littlefs
|
||||
// object, and does not leave the filesystem mounted. The config struct must
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_format(lfs_t *lfs, const struct lfs_config *config);
|
||||
int lfs_formatcfg(lfs_t *lfs, const struct lfs_cfg *config);
|
||||
#endif
|
||||
|
||||
// Mounts a littlefs
|
||||
#if defined(LFS_STATICCFG)
|
||||
// Mounts littlefs
|
||||
//
|
||||
// Requires a littlefs object and static configuration.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_mount(lfs_t *lfs);
|
||||
#endif
|
||||
|
||||
#if !defined(LFS_STATICCFG)
|
||||
// Mounts a littlefs with per-filesystem configuration
|
||||
//
|
||||
// Requires a littlefs object and config struct. Multiple filesystems
|
||||
// may be mounted simultaneously with multiple littlefs objects. Both
|
||||
@@ -440,7 +610,8 @@ int lfs_format(lfs_t *lfs, const struct lfs_config *config);
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_mount(lfs_t *lfs, const struct lfs_config *config);
|
||||
int lfs_mountcfg(lfs_t *lfs, const struct lfs_cfg *config);
|
||||
#endif
|
||||
|
||||
// Unmounts a littlefs
|
||||
//
|
||||
@@ -450,15 +621,12 @@ int lfs_unmount(lfs_t *lfs);
|
||||
|
||||
/// General operations ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Removes a file or directory
|
||||
//
|
||||
// If removing a directory, the directory must be empty.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_remove(lfs_t *lfs, const char *path);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Rename or move a file or directory
|
||||
//
|
||||
// If the destination exists, it must match the source in type.
|
||||
@@ -466,7 +634,6 @@ int lfs_remove(lfs_t *lfs, const char *path);
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath);
|
||||
#endif
|
||||
|
||||
// Find info about a file or directory
|
||||
//
|
||||
@@ -489,7 +656,6 @@ int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info);
|
||||
lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
|
||||
uint8_t type, void *buffer, lfs_size_t size);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Set custom attributes
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
@@ -499,16 +665,13 @@ lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_setattr(lfs_t *lfs, const char *path,
|
||||
uint8_t type, const void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Removes a custom attribute
|
||||
//
|
||||
// If an attribute is not found, nothing happens.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
|
||||
#endif
|
||||
|
||||
|
||||
/// File operations ///
|
||||
@@ -522,7 +685,8 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
|
||||
int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
|
||||
const char *path, int flags);
|
||||
|
||||
// Open a file with extra configuration
|
||||
#if !defined(LFS_FILE_STATICCFG)
|
||||
// Open a file with per-file configuration
|
||||
//
|
||||
// The mode that the file is opened in is determined by the flags, which
|
||||
// are values from the enum lfs_open_flags that are bitwise-ored together.
|
||||
@@ -534,7 +698,8 @@ int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
|
||||
const char *path, int flags,
|
||||
const struct lfs_file_config *config);
|
||||
const struct lfs_file_cfg *config);
|
||||
#endif
|
||||
|
||||
// Close a file
|
||||
//
|
||||
@@ -557,7 +722,6 @@ int lfs_file_sync(lfs_t *lfs, lfs_file_t *file);
|
||||
lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
|
||||
void *buffer, lfs_size_t size);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Write data to file
|
||||
//
|
||||
// Takes a buffer and size indicating the data to write. The file will not
|
||||
@@ -566,7 +730,6 @@ lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
|
||||
// Returns the number of bytes written, or a negative error code on failure.
|
||||
lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
|
||||
const void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
|
||||
// Change the position of the file
|
||||
//
|
||||
@@ -575,12 +738,10 @@ lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
|
||||
lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
|
||||
lfs_soff_t off, int whence);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Truncates the size of the file to the specified size
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size);
|
||||
#endif
|
||||
|
||||
// Return the position of the file
|
||||
//
|
||||
@@ -603,12 +764,10 @@ lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
/// Directory operations ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Create a directory
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_mkdir(lfs_t *lfs, const char *path);
|
||||
#endif
|
||||
|
||||
// Open a directory
|
||||
//
|
||||
@@ -670,26 +829,39 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs);
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
#ifdef LFS_MIGRATE
|
||||
#if defined(LFS_MIGRATE) && defined(LFS_STATICCFG)
|
||||
// Attempts to migrate a previous version of littlefs
|
||||
//
|
||||
// Behaves similarly to the lfs_format function. Attempts to mount
|
||||
// the previous version of littlefs and update the filesystem so it can be
|
||||
// mounted with the current version of littlefs.
|
||||
//
|
||||
// Requires a littlefs object. This clobbers the littlefs object, and does
|
||||
// not leave the filesystem mounted.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_migrate(lfs_t *lfs, const struct lfs_cfg *cfg);
|
||||
#endif
|
||||
|
||||
#if defined(LFS_MIGRATE) && !defined(LFS_STATICCFG)
|
||||
// Attempts to migrate a previous version of littlefs with per-filesystem
|
||||
// configuration
|
||||
//
|
||||
// Behaves similarly to the lfs_format function. Attempts to mount
|
||||
// the previous version of littlefs and update the filesystem so it can be
|
||||
// mounted with the current version of littlefs.
|
||||
//
|
||||
// Requires a littlefs object and config struct. This clobbers the littlefs
|
||||
// object, and does not leave the filesystem mounted. The config struct must
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg);
|
||||
#endif
|
||||
int lfs_migratecfg(lfs_t *lfs, const struct lfs_cfg *cfg);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "lfs_util.h"
|
||||
|
||||
// Only compile if user does not provide custom config
|
||||
#ifndef LFS_CONFIG
|
||||
#ifndef LFS_UTIL
|
||||
|
||||
|
||||
// Software CRC implementation with small lookup table
|
||||
|
||||
58
lfs_util.h
58
lfs_util.h
@@ -3,20 +3,21 @@
|
||||
*
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*
|
||||
* Can be overridden by users with their own configuration by defining
|
||||
* LFS_UTIL as a header file (-DLFS_UTIL=my_lfs_util.h)
|
||||
*
|
||||
* If LFS_UTIL is defined, none of the default definitions will be
|
||||
* emitted and must be provided by the user's header file. To start, I would
|
||||
* suggest copying lfs_util.h and modifying as needed.
|
||||
*/
|
||||
#ifndef LFS_UTIL_H
|
||||
#define LFS_UTIL_H
|
||||
|
||||
// Users can override lfs_util.h with their own configuration by defining
|
||||
// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h).
|
||||
//
|
||||
// If LFS_CONFIG is used, none of the default utils will be emitted and must be
|
||||
// provided by the config file. To start, I would suggest copying lfs_util.h
|
||||
// and modifying as needed.
|
||||
#ifdef LFS_CONFIG
|
||||
#ifdef LFS_UTIL
|
||||
#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x)
|
||||
#define LFS_STRINGIZE2(x) #x
|
||||
#include LFS_STRINGIZE(LFS_CONFIG)
|
||||
#include LFS_STRINGIZE(LFS_UTIL)
|
||||
#else
|
||||
|
||||
// System includes
|
||||
@@ -39,17 +40,37 @@
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
// Possible error codes, these are negative to allow valid positive
|
||||
// return values. May be redefined to system-specific error codes as long
|
||||
// as they fit in a negative integer.
|
||||
enum lfs_error {
|
||||
LFS_ERR_OK = 0, // No error
|
||||
LFS_ERR_IO = -5, // Error during device operation
|
||||
LFS_ERR_CORRUPT = -84, // Corrupted
|
||||
LFS_ERR_NOENT = -2, // No directory entry
|
||||
LFS_ERR_EXIST = -17, // Entry already exists
|
||||
LFS_ERR_NOTDIR = -20, // Entry is not a dir
|
||||
LFS_ERR_ISDIR = -21, // Entry is a dir
|
||||
LFS_ERR_NOTEMPTY = -39, // Dir is not empty
|
||||
LFS_ERR_BADF = -9, // Bad file number
|
||||
LFS_ERR_FBIG = -27, // File too large
|
||||
LFS_ERR_INVAL = -22, // Invalid parameter
|
||||
LFS_ERR_NOSPC = -28, // No space left on device
|
||||
LFS_ERR_NOMEM = -12, // No more memory available
|
||||
LFS_ERR_NOATTR = -61, // No data/attr available
|
||||
LFS_ERR_NAMETOOLONG = -36, // File name too long
|
||||
};
|
||||
|
||||
|
||||
// Macros, may be replaced by system specific wrappers. Arguments to these
|
||||
// macros must not have side-effects as the macros can be removed for a smaller
|
||||
// code footprint
|
||||
|
||||
// Logging functions
|
||||
#ifndef LFS_TRACE
|
||||
#ifdef LFS_YES_TRACE
|
||||
#define LFS_TRACE_(fmt, ...) \
|
||||
printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
@@ -57,9 +78,7 @@ extern "C"
|
||||
#else
|
||||
#define LFS_TRACE(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_DEBUG
|
||||
#ifndef LFS_NO_DEBUG
|
||||
#define LFS_DEBUG_(fmt, ...) \
|
||||
printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
@@ -67,9 +86,7 @@ extern "C"
|
||||
#else
|
||||
#define LFS_DEBUG(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_WARN
|
||||
#ifndef LFS_NO_WARN
|
||||
#define LFS_WARN_(fmt, ...) \
|
||||
printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
@@ -77,9 +94,7 @@ extern "C"
|
||||
#else
|
||||
#define LFS_WARN(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_ERROR
|
||||
#ifndef LFS_NO_ERROR
|
||||
#define LFS_ERROR_(fmt, ...) \
|
||||
printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
@@ -87,16 +102,13 @@ extern "C"
|
||||
#else
|
||||
#define LFS_ERROR(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Runtime assertions
|
||||
#ifndef LFS_ASSERT
|
||||
#ifndef LFS_NO_ASSERT
|
||||
#define LFS_ASSERT(test) assert(test)
|
||||
#else
|
||||
#define LFS_ASSERT(test)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
// Builtin functions, these may be replaced by more efficient
|
||||
@@ -160,8 +172,8 @@ static inline uint32_t lfs_popc(uint32_t a) {
|
||||
|
||||
// Find the sequence comparison of a and b, this is the distance
|
||||
// between a and b ignoring overflow
|
||||
static inline int lfs_scmp(uint32_t a, uint32_t b) {
|
||||
return (int)(unsigned)(a - b);
|
||||
static inline int32_t lfs_scmp(uint32_t a, uint32_t b) {
|
||||
return (int32_t)(uint32_t)(a - b);
|
||||
}
|
||||
|
||||
// Convert between 32-bit little-endian and native order
|
||||
@@ -237,7 +249,7 @@ static inline void lfs_free(void *p) {
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
284
scripts/code.py
284
scripts/code.py
@@ -1,284 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find code size at the function level. Basically just a bit wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('code_size', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['code_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find code size at the function level.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find code sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff code size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level code sizes. Note this does not include padding! "
|
||||
"So sizes may differ from other tools.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total code size.")
|
||||
parser.add_argument('--type', default='tTrRdD',
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %(default)r.")
|
||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
||||
help="Path to the nm tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
@@ -1,323 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Parse and report coverage info from .info files generated by lcov
|
||||
#
|
||||
import os
|
||||
import glob
|
||||
import csv
|
||||
import re
|
||||
import collections as co
|
||||
import bisect as b
|
||||
|
||||
|
||||
INFO_PATHS = ['tests/*.toml.info']
|
||||
|
||||
def collect(paths, **args):
|
||||
file = None
|
||||
funcs = []
|
||||
lines = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<file>SF:/?(?P<file_name>.*))$'
|
||||
'|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$'
|
||||
'|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$')
|
||||
for path in paths:
|
||||
with open(path) as f:
|
||||
for line in f:
|
||||
m = pattern.match(line)
|
||||
if m and m.group('file'):
|
||||
file = m.group('file_name')
|
||||
elif m and file and m.group('func'):
|
||||
funcs.append((file, int(m.group('func_lineno')),
|
||||
m.group('func_name')))
|
||||
elif m and file and m.group('line'):
|
||||
lines[(file, int(m.group('line_lineno')))] += (
|
||||
int(m.group('line_hits')))
|
||||
|
||||
# map line numbers to functions
|
||||
funcs.sort()
|
||||
def func_from_lineno(file, lineno):
|
||||
i = b.bisect(funcs, (file, lineno))
|
||||
if i and funcs[i-1][0] == file:
|
||||
return funcs[i-1][2]
|
||||
else:
|
||||
return None
|
||||
|
||||
# reduce to function info
|
||||
reduced_funcs = co.defaultdict(lambda: (0, 0))
|
||||
for (file, line_lineno), line_hits in lines.items():
|
||||
func = func_from_lineno(file, line_lineno)
|
||||
if not func:
|
||||
continue
|
||||
hits, count = reduced_funcs[(file, func)]
|
||||
reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
|
||||
|
||||
results = []
|
||||
for (file, func), (hits, count) in reduced_funcs.items():
|
||||
# discard internal/testing functions (test_* injected with
|
||||
# internal testing)
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__') or func.startswith('test_'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
results.append((file, func, hits, count))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find coverage
|
||||
if not args.get('use'):
|
||||
# find *.info files
|
||||
paths = []
|
||||
for path in args['info_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.gcov'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .info files found in %r?' % args['info_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['coverage_hits']),
|
||||
int(result['coverage_count']))
|
||||
for result in r
|
||||
if result.get('coverage_hits') not in {None, ''}
|
||||
if result.get('coverage_count') not in {None, ''}]
|
||||
|
||||
total_hits, total_count = 0, 0
|
||||
for _, _, hits, count in results:
|
||||
total_hits += hits
|
||||
total_count += count
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['coverage_hits']),
|
||||
int(result['coverage_count']))
|
||||
for result in r
|
||||
if result.get('coverage_hits') not in {None, ''}
|
||||
if result.get('coverage_count') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_hits, prev_total_count = 0, 0
|
||||
for _, _, hits, count in prev_results:
|
||||
prev_total_hits += hits
|
||||
prev_total_count += count
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('coverage_hits', None)
|
||||
result.pop('coverage_count', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, hits, count in results:
|
||||
merged_results[(file, func)]['coverage_hits'] = hits
|
||||
merged_results[(file, func)]['coverage_count'] = count
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0))
|
||||
for file, func, hits, count in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entry_hits, entry_count = entries[entry]
|
||||
entries[entry] = (entry_hits + hits, entry_count + count)
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
|
||||
for name, (new_hits, new_count) in news.items():
|
||||
diff[name] = (
|
||||
0, 0,
|
||||
new_hits, new_count,
|
||||
new_hits, new_count,
|
||||
(new_hits/new_count if new_count else 1.0) - 1.0)
|
||||
for name, (old_hits, old_count) in olds.items():
|
||||
_, _, new_hits, new_count, _, _, _ = diff[name]
|
||||
diff[name] = (
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
new_hits-old_hits, new_count-old_count,
|
||||
((new_hits/new_count if new_count else 1.0)
|
||||
- (old_hits/old_count if old_count else 1.0)))
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
|
||||
elif args.get('reverse_coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
|
||||
elif args.get('reverse_coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %19s' % (by, 'hits/line'))
|
||||
else:
|
||||
print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, hits, count):
|
||||
print("%-36s %11s %7s" % (name,
|
||||
'%d/%d' % (hits, count)
|
||||
if count else '-',
|
||||
'%.1f%%' % (100*hits/count)
|
||||
if count else '-'))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count,
|
||||
ratio):
|
||||
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
|
||||
'%d/%d' % (old_hits, old_count)
|
||||
if old_count else '-',
|
||||
'%.1f%%' % (100*old_hits/old_count)
|
||||
if old_count else '-',
|
||||
'%d/%d' % (new_hits, new_count)
|
||||
if new_count else '-',
|
||||
'%.1f%%' % (100*new_hits/new_count)
|
||||
if new_count else '-',
|
||||
'%+d/%+d' % (diff_hits, diff_count),
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, (hits, count) in sorted_entries(entries.items()):
|
||||
print_entry(name, hits, count)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
|
||||
for name, (
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name,
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count,
|
||||
ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total_hits, total_count)
|
||||
else:
|
||||
ratio = ((total_hits/total_count
|
||||
if total_count else 1.0)
|
||||
- (prev_total_hits/prev_total_count
|
||||
if prev_total_count else 1.0))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_hits, prev_total_count,
|
||||
total_hits, total_count,
|
||||
total_hits-prev_total_hits, total_count-prev_total_count,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Parse and report coverage info from .info files \
|
||||
generated by lcov")
|
||||
parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
|
||||
help="Description of where to find *.info files. May be a directory \
|
||||
or list of paths. *.info files will be merged to show the total \
|
||||
coverage. Defaults to %r." % INFO_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't do any work, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff code size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--coverage-sort', action='store_true',
|
||||
help="Sort by coverage.")
|
||||
parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
|
||||
help="Sort by coverage, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level coverage.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total coverage.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
283
scripts/data.py
283
scripts/data.py
@@ -1,283 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find data size at the function level. Basically just a bit wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('data_size', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['data_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find data size at the function level.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find data sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff data size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level data sizes. Note this does not include padding! "
|
||||
"So sizes may differ from other tools.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total data size.")
|
||||
parser.add_argument('--type', default='dDbB',
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %(default)r.")
|
||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
||||
help="Path to the nm tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
@@ -106,7 +106,7 @@ def main(args):
|
||||
struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff'))))
|
||||
print("%-47s%s" % ("littlefs v%s.%s" % version,
|
||||
"data (truncated, if it fits)"
|
||||
if not any([args.no_truncate, args.log, args.all]) else ""))
|
||||
if not any([args.no_truncate, args.tags, args.log, args.all]) else ""))
|
||||
|
||||
# print gstate
|
||||
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
|
||||
|
||||
430
scripts/stack.py
430
scripts/stack.py
@@ -1,430 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find stack usage at the function level. Will detect recursion and
|
||||
# report as infinite stack usage.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
import math as m
|
||||
|
||||
|
||||
CI_PATHS = ['*.ci']
|
||||
|
||||
def collect(paths, **args):
|
||||
# parse the vcg format
|
||||
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
|
||||
v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
|
||||
def parse_vcg(rest):
|
||||
def parse_vcg(rest):
|
||||
node = []
|
||||
while True:
|
||||
rest = rest.lstrip()
|
||||
m = k_pattern.match(rest)
|
||||
if not m:
|
||||
return (node, rest)
|
||||
k, rest = m.group(1), rest[m.end(0):]
|
||||
|
||||
rest = rest.lstrip()
|
||||
if rest.startswith('{'):
|
||||
v, rest = parse_vcg(rest[1:])
|
||||
assert rest[0] == '}', "unexpected %r" % rest[0:1]
|
||||
rest = rest[1:]
|
||||
node.append((k, v))
|
||||
else:
|
||||
m = v_pattern.match(rest)
|
||||
assert m, "unexpected %r" % rest[0:1]
|
||||
v, rest = m.group(1) or m.group(2), rest[m.end(0):]
|
||||
node.append((k, v))
|
||||
|
||||
node, rest = parse_vcg(rest)
|
||||
assert rest == '', "unexpected %r" % rest[0:1]
|
||||
return node
|
||||
|
||||
# collect into functions
|
||||
results = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
f_pattern = re.compile(
|
||||
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
|
||||
for path in paths:
|
||||
with open(path) as f:
|
||||
vcg = parse_vcg(f.read())
|
||||
for k, graph in vcg:
|
||||
if k != 'graph':
|
||||
continue
|
||||
for k, info in graph:
|
||||
if k == 'node':
|
||||
info = dict(info)
|
||||
m = f_pattern.match(info['label'])
|
||||
if m:
|
||||
function, file, size, type = m.groups()
|
||||
if not args.get('quiet') and type != 'static':
|
||||
print('warning: found non-static stack for %s (%s)'
|
||||
% (function, type))
|
||||
_, _, _, targets = results[info['title']]
|
||||
results[info['title']] = (
|
||||
file, function, int(size), targets)
|
||||
elif k == 'edge':
|
||||
info = dict(info)
|
||||
_, _, _, targets = results[info['sourcename']]
|
||||
targets.add(info['targetname'])
|
||||
else:
|
||||
continue
|
||||
|
||||
if not args.get('everything'):
|
||||
for source, (s_file, s_function, _, _) in list(results.items()):
|
||||
# discard internal functions
|
||||
if s_file.startswith('<') or s_file.startswith('/usr/include'):
|
||||
del results[source]
|
||||
|
||||
# find maximum stack size recursively, this requires also detecting cycles
|
||||
# (in case of recursion)
|
||||
def find_limit(source, seen=None):
|
||||
seen = seen or set()
|
||||
if source not in results:
|
||||
return 0
|
||||
_, _, frame, targets = results[source]
|
||||
|
||||
limit = 0
|
||||
for target in targets:
|
||||
if target in seen:
|
||||
# found a cycle
|
||||
return float('inf')
|
||||
limit_ = find_limit(target, seen | {target})
|
||||
limit = max(limit, limit_)
|
||||
|
||||
return frame + limit
|
||||
|
||||
def find_deps(targets):
|
||||
deps = set()
|
||||
for target in targets:
|
||||
if target in results:
|
||||
t_file, t_function, _, _ = results[target]
|
||||
deps.add((t_file, t_function))
|
||||
return deps
|
||||
|
||||
# flatten into a list
|
||||
flat_results = []
|
||||
for source, (s_file, s_function, frame, targets) in results.items():
|
||||
limit = find_limit(source)
|
||||
deps = find_deps(targets)
|
||||
flat_results.append((s_file, s_function, frame, limit, deps))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .ci files
|
||||
paths = []
|
||||
for path in args['ci_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.ci'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .ci files found in %r?' % args['ci_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']), # note limit can be inf
|
||||
set())
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
|
||||
total_frame = 0
|
||||
total_limit = 0
|
||||
for _, _, frame, limit, _ in results:
|
||||
total_frame += frame
|
||||
total_limit = max(total_limit, limit)
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']),
|
||||
set())
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_frame = 0
|
||||
prev_total_limit = 0
|
||||
for _, _, frame, limit, _ in prev_results:
|
||||
prev_total_frame += frame
|
||||
prev_total_limit = max(prev_total_limit, limit)
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('stack_frame', None)
|
||||
result.pop('stack_limit', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, frame, limit, _ in results:
|
||||
merged_results[(file, func)]['stack_frame'] = frame
|
||||
merged_results[(file, func)]['stack_limit'] = limit
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0, set()))
|
||||
for file, func, frame, limit, deps in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entry_frame, entry_limit, entry_deps = entries[entry]
|
||||
entries[entry] = (
|
||||
entry_frame + frame,
|
||||
max(entry_limit, limit),
|
||||
entry_deps | {file if by == 'file' else func
|
||||
for file, func in deps})
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
|
||||
for name, (new_frame, new_limit, deps) in news.items():
|
||||
diff[name] = (
|
||||
None, None,
|
||||
new_frame, new_limit,
|
||||
new_frame, new_limit,
|
||||
1.0,
|
||||
deps)
|
||||
for name, (old_frame, old_limit, _) in olds.items():
|
||||
_, _, new_frame, new_limit, _, _, _, deps = diff[name]
|
||||
diff[name] = (
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
(new_frame or 0) - (old_frame or 0),
|
||||
0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else (new_limit or 0) - (old_limit or 0),
|
||||
0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else +float('inf') if m.isinf(new_limit or 0)
|
||||
else -float('inf') if m.isinf(old_limit or 0)
|
||||
else +0.0 if not old_limit and not new_limit
|
||||
else +1.0 if not old_limit
|
||||
else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
|
||||
deps)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][0], x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][0], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s %7s' % (by, 'frame', 'limit'))
|
||||
else:
|
||||
print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, frame, limit):
|
||||
print("%-36s %7d %7s" % (name,
|
||||
frame, '∞' if m.isinf(limit) else int(limit)))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio):
|
||||
print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
|
||||
old_frame if old_frame is not None else "-",
|
||||
('∞' if m.isinf(old_limit) else int(old_limit))
|
||||
if old_limit is not None else "-",
|
||||
new_frame if new_frame is not None else "-",
|
||||
('∞' if m.isinf(new_limit) else int(new_limit))
|
||||
if new_limit is not None else "-",
|
||||
diff_frame,
|
||||
('+∞' if diff_limit > 0 and m.isinf(diff_limit)
|
||||
else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
|
||||
else '%+d' % diff_limit),
|
||||
'' if not ratio
|
||||
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else ' (%+.1f%%)' % (100*ratio)))
|
||||
|
||||
def print_entries(by='name'):
|
||||
# build optional tree of dependencies
|
||||
def print_deps(entries, depth, print,
|
||||
filter=lambda _: True,
|
||||
prefixes=('', '', '', '')):
|
||||
entries = entries if isinstance(entries, list) else list(entries)
|
||||
filtered_entries = [(name, entry)
|
||||
for name, entry in entries
|
||||
if filter(name)]
|
||||
for i, (name, entry) in enumerate(filtered_entries):
|
||||
last = (i == len(filtered_entries)-1)
|
||||
print(prefixes[0+last] + name, entry)
|
||||
|
||||
if depth > 0:
|
||||
deps = entry[-1]
|
||||
print_deps(entries, depth-1, print,
|
||||
lambda name: name in deps,
|
||||
( prefixes[2+last] + "|-> ",
|
||||
prefixes[2+last] + "'-> ",
|
||||
prefixes[2+last] + "| ",
|
||||
prefixes[2+last] + " "))
|
||||
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
print_deps(
|
||||
sorted_entries(entries.items()),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_entry(name, *entry[:-1]))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
|
||||
sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
|
||||
print_deps(
|
||||
filter(
|
||||
lambda x: x[1][6] or args.get('all'),
|
||||
sorted_diff_entries(diff.items())),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_diff_entry(name, *entry[:-1]))
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total_frame, total_limit)
|
||||
else:
|
||||
diff_frame = total_frame - prev_total_frame
|
||||
diff_limit = (
|
||||
0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else (total_limit or 0) - (prev_total_limit or 0))
|
||||
ratio = (
|
||||
0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else +float('inf') if m.isinf(total_limit or 0)
|
||||
else -float('inf') if m.isinf(prev_total_limit or 0)
|
||||
else 0.0 if not prev_total_limit and not total_limit
|
||||
else 1.0 if not prev_total_limit
|
||||
else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_frame, prev_total_limit,
|
||||
total_frame, total_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find stack usage at the function level.")
|
||||
parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
|
||||
help="Description of where to find *.ci files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % CI_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't parse callgraph files, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--limit-sort', action='store_true',
|
||||
help="Sort by stack limit.")
|
||||
parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
|
||||
help="Sort by stack limit, but backwards.")
|
||||
parser.add_argument('--frame-sort', action='store_true',
|
||||
help="Sort by stack frame size.")
|
||||
parser.add_argument('--reverse-frame-sort', action='store_true',
|
||||
help="Sort by stack frame size, but backwards.")
|
||||
parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
|
||||
nargs='?', const=float('inf'),
|
||||
help="Depth of dependencies to show.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level calls.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total stack size.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
@@ -1,331 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find struct sizes.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
decl_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'\s+(?P<dir>[0-9]+)'
|
||||
'\s+.*'
|
||||
'\s+(?P<file>[^\s]+)$')
|
||||
struct_pattern = re.compile(
|
||||
'^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
|
||||
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
|
||||
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
||||
|
||||
results = co.defaultdict(lambda: 0)
|
||||
for path in paths:
|
||||
# find decl, we want to filter by structs in .h files
|
||||
decls = {}
|
||||
# note objdump-tool may contain extra args
|
||||
cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
# find file numbers
|
||||
m = decl_pattern.match(line)
|
||||
if m:
|
||||
decls[int(m.group('no'))] = m.group('file')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
# collect structs as we parse dwarf info
|
||||
found = False
|
||||
name = None
|
||||
decl = None
|
||||
size = None
|
||||
|
||||
# note objdump-tool may contain extra args
|
||||
cmd = args['objdump_tool'] + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
# state machine here to find structs
|
||||
m = struct_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if (name is not None
|
||||
and decl is not None
|
||||
and size is not None):
|
||||
decl = decls.get(decl, '?')
|
||||
results[(decl, name)] = size
|
||||
found = (m.group('tag') == 'structure_type')
|
||||
name = None
|
||||
decl = None
|
||||
size = None
|
||||
elif found and m.group('name'):
|
||||
name = m.group('name')
|
||||
elif found and name and m.group('decl'):
|
||||
decl = int(m.group('decl'))
|
||||
elif found and name and m.group('size'):
|
||||
size = int(m.group('size'))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, struct), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# only include structs declared in header files in the current
|
||||
# directory, ignore internal-only # structs (these are represented
|
||||
# in other measurements)
|
||||
if not args.get('everything'):
|
||||
if not file.endswith('.h'):
|
||||
continue
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
|
||||
flat_results.append((file, struct, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
struct = result.pop('name', '')
|
||||
result.pop('struct_size', None)
|
||||
merged_results[(file, struct)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, struct, size in results:
|
||||
merged_results[(file, struct)]['struct_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
|
||||
w.writeheader()
|
||||
for (file, struct), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': struct, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, struct, size in results:
|
||||
entry = (file if by == 'file' else struct)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find struct sizes.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find struct sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff struct size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level struct sizes.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total struct size.")
|
||||
parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
|
||||
help="Path to the objdump tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
@@ -1,279 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to summarize the outputs of other scripts. Operates on CSV files.
|
||||
#
|
||||
|
||||
import functools as ft
|
||||
import collections as co
|
||||
import os
|
||||
import csv
|
||||
import re
|
||||
import math as m
|
||||
|
||||
# displayable fields
|
||||
Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
|
||||
FIELDS = [
|
||||
# name, parse, accumulate, fmt, print, null
|
||||
Field('code',
|
||||
lambda r: int(r['code_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('data',
|
||||
lambda r: int(r['data_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('stack',
|
||||
lambda r: float(r['stack_limit']),
|
||||
max,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: '∞' if m.isinf(r) else int(r),
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('structs',
|
||||
lambda r: int(r['struct_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%8s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('coverage',
|
||||
lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
|
||||
lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
|
||||
lambda r: r[0]/r[1],
|
||||
'%19s',
|
||||
lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
|
||||
'%11s %7s' % ('-', '-'),
|
||||
lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
|
||||
]
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find results
|
||||
results = co.defaultdict(lambda: {})
|
||||
for path in args.get('csv_paths', '-'):
|
||||
try:
|
||||
with openio(path) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# find fields
|
||||
if args.get('all_fields'):
|
||||
fields = FIELDS
|
||||
elif args.get('fields') is not None:
|
||||
fields_dict = {field.name: field for field in FIELDS}
|
||||
fields = [fields_dict[f] for f in args['fields']]
|
||||
else:
|
||||
fields = []
|
||||
for field in FIELDS:
|
||||
if any(field.name in result for result in results.values()):
|
||||
fields.append(field)
|
||||
|
||||
# find total for every field
|
||||
total = {}
|
||||
for result in results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in total:
|
||||
total[field.name] = field.acc(
|
||||
[total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
total[field.name] = result[field.name]
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
prev_results = co.defaultdict(lambda: {})
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = prev_results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
prev_results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
prev_results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
prev_total = {}
|
||||
for result in prev_results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev_total:
|
||||
prev_total[field.name] = field.acc(
|
||||
[prev_total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
prev_total[field.name] = result[field.name]
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: {})
|
||||
for (file, func), result in results.items():
|
||||
entry = (file if by == 'file' else func)
|
||||
prev = entries[entry]
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev:
|
||||
entries[entry][field.name] = field.acc(
|
||||
[prev[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
entries[entry][field.name] = result[field.name]
|
||||
return entries
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
-(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
elif args.get('reverse_sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['reverse_sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
+(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print()
|
||||
else:
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print(' %-9s' % '', end='')
|
||||
print()
|
||||
|
||||
def print_entry(name, result):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
r = result.get(field.name)
|
||||
if r is not None:
|
||||
print((' '+field.fmt) % field.repr(r), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
print()
|
||||
|
||||
def print_diff_entry(name, old, new):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
n = new.get(field.name)
|
||||
if n is not None:
|
||||
print((' '+field.fmt) % field.repr(n), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
o = old.get(field.name)
|
||||
ratio = (
|
||||
0.0 if m.isinf(o or 0) and m.isinf(n or 0)
|
||||
else +float('inf') if m.isinf(n or 0)
|
||||
else -float('inf') if m.isinf(o or 0)
|
||||
else 0.0 if not o and not n
|
||||
else +1.0 if not o
|
||||
else -1.0 if not n
|
||||
else field.ratio(o, n))
|
||||
print(' %-9s' % (
|
||||
'' if not ratio
|
||||
else '(+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else '(-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else '(%+.1f%%)' % (100*ratio)), end='')
|
||||
print()
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
print_entry(name, result)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for name in entries if name not in prev_entries),
|
||||
sum(1 for name in prev_entries if name not in entries)))
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
if args.get('all') or result != prev_entries.get(name, {}):
|
||||
print_diff_entry(name, prev_entries.get(name, {}), result)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
print_diff_entry('TOTAL', prev_total, total)
|
||||
|
||||
if args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Summarize measurements")
|
||||
parser.add_argument('csv_paths', nargs='*', default='-',
|
||||
help="Description of where to find *.csv files. May be a directory \
|
||||
or list of paths. *.csv files will be merged to show the total \
|
||||
coverage.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all objects, not just the ones that changed.")
|
||||
parser.add_argument('-e', '--all-fields', action='store_true',
|
||||
help="Show all fields, even those with no results.")
|
||||
parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
|
||||
help="Comma separated list of fields to print, by default all fields \
|
||||
that are found in the CSV files are printed.")
|
||||
parser.add_argument('-s', '--sort',
|
||||
help="Sort by this field.")
|
||||
parser.add_argument('-S', '--reverse-sort',
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level calls.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the totals.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
307
scripts/test.py
307
scripts/test.py
@@ -20,58 +20,57 @@ import pty
|
||||
import errno
|
||||
import signal
|
||||
|
||||
TEST_PATHS = 'tests'
|
||||
TESTDIR = 'tests'
|
||||
RULES = """
|
||||
# add block devices to sources
|
||||
TESTSRC ?= $(SRC) $(wildcard bd/*.c)
|
||||
|
||||
define FLATTEN
|
||||
%(path)s%%$(subst /,.,$(target)): $(target)
|
||||
tests/%$(subst /,.,$(target)): $(target)
|
||||
./scripts/explode_asserts.py $$< -o $$@
|
||||
endef
|
||||
$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
|
||||
$(foreach target,$(SRC),$(eval $(FLATTEN)))
|
||||
|
||||
-include tests/*.d
|
||||
|
||||
-include %(path)s*.d
|
||||
.SECONDARY:
|
||||
|
||||
%(path)s.test: %(path)s.test.o \\
|
||||
$(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
|
||||
%.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
# needed in case builddir is different
|
||||
%(path)s%%.o: %(path)s%%.c
|
||||
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
||||
"""
|
||||
COVERAGE_RULES = """
|
||||
%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
|
||||
BEFORE_MAIN = """
|
||||
const char *lfs_testbd_path;
|
||||
uint32_t lfs_testbd_cycles;
|
||||
|
||||
# delete lingering coverage
|
||||
%(path)s.test: | %(path)s.info.clean
|
||||
.PHONY: %(path)s.info.clean
|
||||
%(path)s.info.clean:
|
||||
rm -f %(path)s*.gcda
|
||||
int lfs_testbd_readctx(void *ctx, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
return lfs_testbd_read((lfs_testbd_t*)ctx, block, off, buffer, size);
|
||||
}
|
||||
|
||||
# accumulate coverage info
|
||||
.PHONY: %(path)s.info
|
||||
%(path)s.info:
|
||||
$(strip $(LCOV) -c \\
|
||||
$(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
|
||||
--rc 'geninfo_adjust_src_path=$(shell pwd)' \\
|
||||
-o $@)
|
||||
$(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
|
||||
ifdef COVERAGETARGET
|
||||
$(strip $(LCOV) -a $@ \\
|
||||
$(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
|
||||
-o $(COVERAGETARGET))
|
||||
endif
|
||||
int lfs_testbd_progctx(void *ctx, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
return lfs_testbd_prog((lfs_testbd_t*)ctx, block, off, buffer, size);
|
||||
}
|
||||
|
||||
int lfs_testbd_erasectx(void *ctx, lfs_block_t block) {
|
||||
return lfs_testbd_erase((lfs_testbd_t*)ctx, block);
|
||||
}
|
||||
|
||||
int lfs_testbd_syncctx(void *ctx) {
|
||||
return lfs_testbd_sync((lfs_testbd_t*)ctx);
|
||||
}
|
||||
"""
|
||||
GLOBALS = """
|
||||
BEFORE_TESTS = """
|
||||
//////////////// AUTOGENERATED TEST ////////////////
|
||||
#include "lfs.h"
|
||||
#include "bd/lfs_testbd.h"
|
||||
#include <stdio.h>
|
||||
|
||||
extern const char *lfs_testbd_path;
|
||||
extern uint32_t lfs_testbd_cycles;
|
||||
|
||||
extern int lfs_testbd_readctx(void *ctx, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
extern int lfs_testbd_progctx(void *ctx, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
extern int lfs_testbd_erasectx(void *ctx, lfs_block_t block);
|
||||
extern int lfs_testbd_syncctx(void *ctx);
|
||||
"""
|
||||
DEFINES = {
|
||||
'LFS_READ_SIZE': 16,
|
||||
@@ -79,12 +78,23 @@ DEFINES = {
|
||||
'LFS_BLOCK_SIZE': 512,
|
||||
'LFS_BLOCK_COUNT': 1024,
|
||||
'LFS_BLOCK_CYCLES': -1,
|
||||
'LFS_CACHE_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)',
|
||||
'LFS_BUFFER_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)',
|
||||
'LFS_LOOKAHEAD_SIZE': 16,
|
||||
'LFS_ERASE_VALUE': 0xff,
|
||||
'LFS_ERASE_CYCLES': 0,
|
||||
'LFS_BADBLOCK_BEHAVIOR': 'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
}
|
||||
CFG = {
|
||||
'read_size': 'LFS_READ_SIZE',
|
||||
'prog_size': 'LFS_PROG_SIZE',
|
||||
'read_size': 'LFS_READ_SIZE',
|
||||
'prog_size': 'LFS_PROG_SIZE',
|
||||
'block_size': 'LFS_BLOCK_SIZE',
|
||||
'block_count': 'LFS_BLOCK_COUNT',
|
||||
'block_cycles': 'LFS_BLOCK_CYCLES',
|
||||
'buffer_size': 'LFS_BUFFER_SIZE',
|
||||
'lookahead_size': 'LFS_LOOKAHEAD_SIZE',
|
||||
}
|
||||
PROLOGUE = """
|
||||
// prologue
|
||||
__attribute__((unused)) lfs_t lfs;
|
||||
@@ -97,33 +107,45 @@ PROLOGUE = """
|
||||
__attribute__((unused)) lfs_size_t size;
|
||||
__attribute__((unused)) int err;
|
||||
|
||||
__attribute__((unused)) const struct lfs_config cfg = {
|
||||
.context = &bd,
|
||||
.read = lfs_testbd_read,
|
||||
.prog = lfs_testbd_prog,
|
||||
.erase = lfs_testbd_erase,
|
||||
.sync = lfs_testbd_sync,
|
||||
.read_size = LFS_READ_SIZE,
|
||||
.prog_size = LFS_PROG_SIZE,
|
||||
.block_size = LFS_BLOCK_SIZE,
|
||||
.block_count = LFS_BLOCK_COUNT,
|
||||
.block_cycles = LFS_BLOCK_CYCLES,
|
||||
.cache_size = LFS_CACHE_SIZE,
|
||||
.lookahead_size = LFS_LOOKAHEAD_SIZE,
|
||||
__attribute__((unused)) const struct lfs_cfg cfg = {
|
||||
.bd_ctx = &bd,
|
||||
.bd_read = lfs_testbd_readctx,
|
||||
.bd_prog = lfs_testbd_progctx,
|
||||
.bd_erase = lfs_testbd_erasectx,
|
||||
.bd_sync = lfs_testbd_syncctx,
|
||||
%(cfg)s
|
||||
};
|
||||
|
||||
__attribute__((unused)) const struct lfs_testbd_config bdcfg = {
|
||||
__attribute__((unused)) const struct lfs_testbd_cfg bdcfg = {
|
||||
.rambd_cfg = &(const struct lfs_rambd_cfg){
|
||||
.read_size = LFS_READ_SIZE,
|
||||
.prog_size = LFS_PROG_SIZE,
|
||||
.erase_size = LFS_BLOCK_SIZE,
|
||||
.erase_count = LFS_BLOCK_COUNT,
|
||||
.erase_value = LFS_ERASE_VALUE,
|
||||
},
|
||||
.filebd_cfg = &(const struct lfs_filebd_cfg){
|
||||
.read_size = LFS_READ_SIZE,
|
||||
.prog_size = LFS_PROG_SIZE,
|
||||
.erase_size = LFS_BLOCK_SIZE,
|
||||
.erase_count = LFS_BLOCK_COUNT,
|
||||
.erase_value = LFS_ERASE_VALUE,
|
||||
},
|
||||
.read_size = LFS_READ_SIZE,
|
||||
.prog_size = LFS_PROG_SIZE,
|
||||
.erase_size = LFS_BLOCK_SIZE,
|
||||
.erase_count = LFS_BLOCK_COUNT,
|
||||
.erase_value = LFS_ERASE_VALUE,
|
||||
.erase_cycles = LFS_ERASE_CYCLES,
|
||||
.badblock_behavior = LFS_BADBLOCK_BEHAVIOR,
|
||||
.power_cycles = lfs_testbd_cycles,
|
||||
};
|
||||
|
||||
lfs_testbd_createcfg(&cfg, lfs_testbd_path, &bdcfg) => 0;
|
||||
lfs_testbd_createcfg(&bd, lfs_testbd_path, &bdcfg) => 0;
|
||||
"""
|
||||
EPILOGUE = """
|
||||
// epilogue
|
||||
lfs_testbd_destroy(&cfg) => 0;
|
||||
lfs_testbd_destroy(&bd) => 0;
|
||||
"""
|
||||
PASS = '\033[32m✓\033[0m'
|
||||
FAIL = '\033[31m✗\033[0m'
|
||||
@@ -150,8 +172,6 @@ class TestCase:
|
||||
self.if_ = config.get('if', None)
|
||||
self.in_ = config.get('in', None)
|
||||
|
||||
self.result = None
|
||||
|
||||
def __str__(self):
|
||||
if hasattr(self, 'permno'):
|
||||
if any(k not in self.case.defines for k in self.defines):
|
||||
@@ -187,7 +207,11 @@ class TestCase:
|
||||
for k in sorted(self.perms[0].defines)
|
||||
if k not in self.defines)))
|
||||
|
||||
f.write(PROLOGUE)
|
||||
f.write(PROLOGUE % dict(
|
||||
cfg='\n'.join(
|
||||
8*' '+'.%s = %s,\n' % (k, d)
|
||||
for k, d in sorted(CFG.items())
|
||||
if d not in self.suite.defines)))
|
||||
f.write('\n')
|
||||
f.write(4*' '+'// test case %d\n' % self.caseno)
|
||||
f.write(4*' '+'#line %d "%s"\n' % (self.code_lineno, self.suite.path))
|
||||
@@ -196,6 +220,7 @@ class TestCase:
|
||||
f.write(self.code)
|
||||
|
||||
# epilogue
|
||||
f.write(4*' '+'#line %d "%s"\n' % (f.lineno+1, f.path))
|
||||
f.write(EPILOGUE)
|
||||
f.write('}\n')
|
||||
|
||||
@@ -212,7 +237,7 @@ class TestCase:
|
||||
len(self.filter) >= 2 and
|
||||
self.filter[1] != self.permno):
|
||||
return False
|
||||
elif args.get('no_internal') and self.in_ is not None:
|
||||
elif args.get('no_internal', False) and self.in_ is not None:
|
||||
return False
|
||||
elif self.if_ is not None:
|
||||
if_ = self.if_
|
||||
@@ -246,7 +271,7 @@ class TestCase:
|
||||
try:
|
||||
with open(disk, 'w') as f:
|
||||
f.truncate(0)
|
||||
if args.get('verbose'):
|
||||
if args.get('verbose', False):
|
||||
print('truncate --size=0', disk)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
@@ -270,14 +295,14 @@ class TestCase:
|
||||
'-ex', 'r'])
|
||||
ncmd.extend(['--args'] + cmd)
|
||||
|
||||
if args.get('verbose'):
|
||||
if args.get('verbose', False):
|
||||
print(' '.join(shlex.quote(c) for c in ncmd))
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
sys.exit(sp.call(ncmd))
|
||||
|
||||
# run test case!
|
||||
mpty, spty = pty.openpty()
|
||||
if args.get('verbose'):
|
||||
if args.get('verbose', False):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
||||
os.close(spty)
|
||||
@@ -292,10 +317,8 @@ class TestCase:
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
raise
|
||||
if not line:
|
||||
break;
|
||||
stdout.append(line)
|
||||
if args.get('verbose'):
|
||||
if args.get('verbose', False):
|
||||
sys.stdout.write(line)
|
||||
# intercept asserts
|
||||
m = re.match(
|
||||
@@ -334,7 +357,7 @@ class ValgrindTestCase(TestCase):
|
||||
return not self.leaky and super().shouldtest(**args)
|
||||
|
||||
def test(self, exec=[], **args):
|
||||
verbose = args.get('verbose')
|
||||
verbose = args.get('verbose', False)
|
||||
uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
|
||||
exec = [
|
||||
'valgrind',
|
||||
@@ -386,17 +409,12 @@ class TestSuite:
|
||||
self.name = os.path.basename(path)
|
||||
if self.name.endswith('.toml'):
|
||||
self.name = self.name[:-len('.toml')]
|
||||
if args.get('build_dir'):
|
||||
self.toml = path
|
||||
self.path = args['build_dir'] + '/' + path
|
||||
else:
|
||||
self.toml = path
|
||||
self.path = path
|
||||
self.classes = classes
|
||||
self.defines = defines.copy()
|
||||
self.filter = filter
|
||||
|
||||
with open(self.toml) as f:
|
||||
with open(path) as f:
|
||||
# load tests
|
||||
config = toml.load(f)
|
||||
|
||||
@@ -506,31 +524,44 @@ class TestSuite:
|
||||
return self.perms
|
||||
|
||||
def build(self, **args):
|
||||
# intercept writes to keep track of linenos
|
||||
def lineno_open(path, flags):
|
||||
f = open(path, flags)
|
||||
f.path = path
|
||||
f.lineno = 1
|
||||
write = f.write
|
||||
|
||||
def lineno_write(s):
|
||||
f.lineno += s.count('\n')
|
||||
write(s)
|
||||
f.write = lineno_write
|
||||
return f
|
||||
|
||||
# build test files
|
||||
tf = open(self.path + '.test.tc', 'w')
|
||||
tf.write(GLOBALS)
|
||||
tf = lineno_open(self.path + '.test.tc', 'w')
|
||||
tf.write(BEFORE_TESTS)
|
||||
if self.code is not None:
|
||||
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
|
||||
tf.write(self.code)
|
||||
tf.write('#line %d "%s"\n' % (tf.lineno+1, tf.path))
|
||||
|
||||
tfs = {None: tf}
|
||||
for case in self.cases:
|
||||
if case.in_ not in tfs:
|
||||
tfs[case.in_] = open(self.path+'.'+
|
||||
re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
|
||||
tfs[case.in_] = lineno_open(self.path+'.'+
|
||||
case.in_.replace('/', '.')[:-2]+'.tc', 'w')
|
||||
tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
|
||||
with open(case.in_) as f:
|
||||
for line in f:
|
||||
tfs[case.in_].write(line)
|
||||
tfs[case.in_].write('\n')
|
||||
tfs[case.in_].write(GLOBALS)
|
||||
tfs[case.in_].write(BEFORE_TESTS)
|
||||
|
||||
tfs[case.in_].write('\n')
|
||||
case.build(tfs[case.in_], **args)
|
||||
|
||||
tf.write(BEFORE_MAIN)
|
||||
tf.write('\n')
|
||||
tf.write('const char *lfs_testbd_path;\n')
|
||||
tf.write('uint32_t lfs_testbd_cycles;\n')
|
||||
tf.write('int main(int argc, char **argv) {\n')
|
||||
tf.write(4*' '+'int case_ = (argc > 1) ? atoi(argv[1]) : 0;\n')
|
||||
tf.write(4*' '+'int perm = (argc > 2) ? atoi(argv[2]) : 0;\n')
|
||||
@@ -556,33 +587,25 @@ class TestSuite:
|
||||
|
||||
# write makefiles
|
||||
with open(self.path + '.mk', 'w') as mk:
|
||||
mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
|
||||
mk.write('\n')
|
||||
|
||||
# add coverage hooks?
|
||||
if args.get('coverage'):
|
||||
mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
|
||||
path=self.path))
|
||||
mk.write(RULES.replace(4*' ', '\t'))
|
||||
mk.write('\n')
|
||||
|
||||
# add truely global defines globally
|
||||
for k, v in sorted(self.defines.items()):
|
||||
mk.write('%s.test: override CFLAGS += -D%s=%r\n'
|
||||
% (self.path, k, v))
|
||||
mk.write('%s: override CFLAGS += -D%s=%r\n' % (
|
||||
self.path+'.test', k, v))
|
||||
|
||||
for path in tfs:
|
||||
if path is None:
|
||||
mk.write('%s: %s | %s\n' % (
|
||||
self.path+'.test.c',
|
||||
self.toml,
|
||||
self.path,
|
||||
self.path+'.test.tc'))
|
||||
else:
|
||||
mk.write('%s: %s %s | %s\n' % (
|
||||
self.path+'.'+path.replace('/', '.'),
|
||||
self.toml,
|
||||
path,
|
||||
self.path+'.'+re.sub('(\.c)?$', '.tc',
|
||||
path.replace('/', '.'))))
|
||||
self.path, path,
|
||||
self.path+'.'+path.replace('/', '.')[:-2]+'.tc'))
|
||||
mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
|
||||
|
||||
self.makefile = self.path + '.mk'
|
||||
@@ -605,7 +628,7 @@ class TestSuite:
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write(FAIL)
|
||||
sys.stdout.flush()
|
||||
if not args.get('keep_going'):
|
||||
if not args.get('keep_going', False):
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write('\n')
|
||||
raise
|
||||
@@ -627,30 +650,30 @@ def main(**args):
|
||||
|
||||
# and what class of TestCase to run
|
||||
classes = []
|
||||
if args.get('normal'):
|
||||
if args.get('normal', False):
|
||||
classes.append(TestCase)
|
||||
if args.get('reentrant'):
|
||||
if args.get('reentrant', False):
|
||||
classes.append(ReentrantTestCase)
|
||||
if args.get('valgrind'):
|
||||
if args.get('valgrind', False):
|
||||
classes.append(ValgrindTestCase)
|
||||
if not classes:
|
||||
classes = [TestCase]
|
||||
|
||||
suites = []
|
||||
for testpath in args['test_paths']:
|
||||
for testpath in args['testpaths']:
|
||||
# optionally specified test case/perm
|
||||
testpath, *filter = testpath.split('#')
|
||||
filter = [int(f) for f in filter]
|
||||
|
||||
# figure out the suite's toml file
|
||||
if os.path.isdir(testpath):
|
||||
testpath = testpath + '/*.toml'
|
||||
testpath = testpath + '/test_*.toml'
|
||||
elif os.path.isfile(testpath):
|
||||
testpath = testpath
|
||||
elif testpath.endswith('.toml'):
|
||||
testpath = TEST_PATHS + '/' + testpath
|
||||
testpath = TESTDIR + '/' + testpath
|
||||
else:
|
||||
testpath = TEST_PATHS + '/' + testpath + '.toml'
|
||||
testpath = TESTDIR + '/' + testpath + '.toml'
|
||||
|
||||
# find tests
|
||||
for path in glob.glob(testpath):
|
||||
@@ -676,7 +699,7 @@ def main(**args):
|
||||
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
||||
[target for target in targets])
|
||||
mpty, spty = pty.openpty()
|
||||
if args.get('verbose'):
|
||||
if args.get('verbose', False):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
||||
os.close(spty)
|
||||
@@ -689,17 +712,15 @@ def main(**args):
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
raise
|
||||
if not line:
|
||||
break;
|
||||
stdout.append(line)
|
||||
if args.get('verbose'):
|
||||
if args.get('verbose', False):
|
||||
sys.stdout.write(line)
|
||||
# intercept warnings
|
||||
m = re.match(
|
||||
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
|
||||
.format('(?:\033\[[\d;]*.| )*', 'warning'),
|
||||
line)
|
||||
if m and not args.get('verbose'):
|
||||
if m and not args.get('verbose', False):
|
||||
try:
|
||||
with open(m.group(1)) as f:
|
||||
lineno = int(m.group(2))
|
||||
@@ -712,26 +733,27 @@ def main(**args):
|
||||
except:
|
||||
pass
|
||||
proc.wait()
|
||||
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
if not args.get('verbose', False):
|
||||
for line in stdout:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
sys.exit(-3)
|
||||
|
||||
print('built %d test suites, %d test cases, %d permutations' % (
|
||||
len(suites),
|
||||
sum(len(suite.cases) for suite in suites),
|
||||
sum(len(suite.perms) for suite in suites)))
|
||||
|
||||
total = 0
|
||||
filtered = 0
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
total += perm.shouldtest(**args)
|
||||
if total != sum(len(suite.perms) for suite in suites):
|
||||
print('filtered down to %d permutations' % total)
|
||||
filtered += perm.shouldtest(**args)
|
||||
if filtered != sum(len(suite.perms) for suite in suites):
|
||||
print('filtered down to %d permutations' % filtered)
|
||||
|
||||
# only requested to build?
|
||||
if args.get('build'):
|
||||
if args.get('build', False):
|
||||
return 0
|
||||
|
||||
print('====== testing ======')
|
||||
@@ -746,12 +768,15 @@ def main(**args):
|
||||
failed = 0
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
if not hasattr(perm, 'result'):
|
||||
continue
|
||||
|
||||
if perm.result == PASS:
|
||||
passed += 1
|
||||
elif isinstance(perm.result, TestFailure):
|
||||
else:
|
||||
sys.stdout.write(
|
||||
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
|
||||
"{perm} failed\n".format(
|
||||
"{perm} failed with {returncode}\n".format(
|
||||
perm=perm, path=perm.suite.path, lineno=perm.lineno,
|
||||
returncode=perm.result.returncode or 0))
|
||||
if perm.result.stdout:
|
||||
@@ -769,36 +794,11 @@ def main(**args):
|
||||
sys.stdout.write('\n')
|
||||
failed += 1
|
||||
|
||||
if args.get('coverage'):
|
||||
# collect coverage info
|
||||
# why -j1? lcov doesn't work in parallel because of gcov limitations
|
||||
cmd = (['make', '-j1', '-f', 'Makefile'] +
|
||||
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
||||
(['COVERAGETARGET=%s' % args['coverage']]
|
||||
if isinstance(args['coverage'], str) else []) +
|
||||
[suite.path + '.info' for suite in suites
|
||||
if any(perm.result == PASS for perm in suite.perms)])
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE if not args.get('verbose') else None,
|
||||
stderr=sp.STDOUT if not args.get('verbose') else None,
|
||||
universal_newlines=True)
|
||||
stdout = []
|
||||
for line in proc.stdout:
|
||||
stdout.append(line)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in stdout:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
if args.get('gdb'):
|
||||
if args.get('gdb', False):
|
||||
failure = None
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
if isinstance(perm.result, TestFailure):
|
||||
if getattr(perm, 'result', PASS) != PASS:
|
||||
failure = perm.result
|
||||
if failure is not None:
|
||||
print('======= gdb ======')
|
||||
@@ -806,22 +806,20 @@ def main(**args):
|
||||
failure.case.test(failure=failure, **args)
|
||||
sys.exit(0)
|
||||
|
||||
print('tests passed %d/%d (%.1f%%)' % (passed, total,
|
||||
100*(passed/total if total else 1.0)))
|
||||
print('tests failed %d/%d (%.1f%%)' % (failed, total,
|
||||
100*(failed/total if total else 1.0)))
|
||||
print('tests passed: %d' % passed)
|
||||
print('tests failed: %d' % failed)
|
||||
return 1 if failed > 0 else 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run parameterized tests in various configurations.")
|
||||
parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
|
||||
parser.add_argument('testpaths', nargs='*', default=[TESTDIR],
|
||||
help="Description of test(s) to run. By default, this is all tests \
|
||||
found in the \"{0}\" directory. Here, you can specify a different \
|
||||
directory of tests, a specific file, a suite by name, and even \
|
||||
specific test cases and permutations. For example \
|
||||
\"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
|
||||
directory of tests, a specific file, a suite by name, and even a \
|
||||
specific test case by adding brackets. For example \
|
||||
\"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR))
|
||||
parser.add_argument('-D', action='append', default=[],
|
||||
help="Overriding parameter definitions.")
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
@@ -842,19 +840,10 @@ if __name__ == "__main__":
|
||||
help="Run tests normally.")
|
||||
parser.add_argument('-r', '--reentrant', action='store_true',
|
||||
help="Run reentrant tests with simulated power-loss.")
|
||||
parser.add_argument('--valgrind', action='store_true',
|
||||
parser.add_argument('-V', '--valgrind', action='store_true',
|
||||
help="Run non-leaky tests under valgrind to check for memory leaks.")
|
||||
parser.add_argument('--exec', default=[], type=lambda e: e.split(),
|
||||
parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
|
||||
help="Run tests with another executable prefixed on the command line.")
|
||||
parser.add_argument('--disk',
|
||||
parser.add_argument('-d', '--disk',
|
||||
help="Specify a file to use for persistent/reentrant tests.")
|
||||
parser.add_argument('--coverage', type=lambda x: x if x else True,
|
||||
nargs='?', const='',
|
||||
help="Collect coverage information during testing. This uses lcov/gcov \
|
||||
to accumulate coverage information into *.info files. May also \
|
||||
a path to a *.info file to accumulate coverage info into.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Build relative to the specified directory instead of the \
|
||||
current directory.")
|
||||
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
||||
|
||||
@@ -9,12 +9,12 @@ code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
lfs_file_t files[FILES];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &files[n], path,
|
||||
@@ -31,7 +31,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
@@ -51,13 +51,13 @@ define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)'
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
@@ -70,7 +70,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
@@ -92,14 +92,14 @@ code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
lfs_file_t files[FILES];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
for (int c = 0; c < CYCLES; c++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &files[n], path,
|
||||
@@ -116,7 +116,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
@@ -129,7 +129,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
@@ -146,15 +146,15 @@ define.CYCLES = [1, 10]
|
||||
code = '''
|
||||
const char *names[FILES] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
for (int c = 0; c < CYCLES; c++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
@@ -167,7 +167,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
@@ -180,7 +180,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
@@ -192,8 +192,8 @@ code = '''
|
||||
|
||||
[[case]] # exhaustion test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size = strlen("exhaustion");
|
||||
memcpy(buffer, "exhaustion", size);
|
||||
@@ -216,7 +216,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
|
||||
size = strlen("exhaustion");
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
@@ -229,8 +229,8 @@ code = '''
|
||||
[[case]] # exhaustion wraparound test
|
||||
define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / 3)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size = strlen("buffering");
|
||||
@@ -263,7 +263,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
|
||||
size = strlen("exhaustion");
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
@@ -276,8 +276,8 @@ code = '''
|
||||
|
||||
[[case]] # dir exhaustion test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// find out max file size
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
||||
@@ -328,8 +328,8 @@ in = "lfs.c"
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_READERROR'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
// first fill to exhaustion to find available space
|
||||
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "waka");
|
||||
@@ -358,11 +358,11 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// remount to force an alloc scan
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// but mark the head of our file as a "bad block", this is force our
|
||||
// scan to bail early
|
||||
lfs_testbd_setwear(&cfg, fileblock, 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, fileblock, 0xffffffff) => 0;
|
||||
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "chomp");
|
||||
size = strlen("chomp");
|
||||
@@ -377,7 +377,7 @@ code = '''
|
||||
|
||||
// now reverse the "bad block" and try to write the file again until we
|
||||
// run out of space
|
||||
lfs_testbd_setwear(&cfg, fileblock, 0) => 0;
|
||||
lfs_testbd_setwear(&bd, fileblock, 0) => 0;
|
||||
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "chomp");
|
||||
size = strlen("chomp");
|
||||
@@ -393,7 +393,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// check that the disk isn't hurt
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0;
|
||||
strcpy((char*)buffer, "waka");
|
||||
size = strlen("waka");
|
||||
@@ -416,8 +416,8 @@ define.LFS_BLOCK_SIZE = 512
|
||||
define.LFS_BLOCK_COUNT = 1024
|
||||
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// find out max file size
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
||||
@@ -487,22 +487,22 @@ define.LFS_BLOCK_SIZE = 512
|
||||
define.LFS_BLOCK_COUNT = 1024
|
||||
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// create one block hole for half a directory
|
||||
lfs_file_open(&lfs, &file, "bump", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
|
||||
for (lfs_size_t i = 0; i < LFS_BLOCK_SIZE; i += 2) {
|
||||
memcpy(&buffer[i], "hi", 2);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, cfg.block_size) => cfg.block_size;
|
||||
lfs_file_write(&lfs, &file, buffer, LFS_BLOCK_SIZE) => LFS_BLOCK_SIZE;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < (cfg.block_count-4)*(cfg.block_size-8);
|
||||
i < (LFS_BLOCK_COUNT-4)*(LFS_BLOCK_SIZE-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
@@ -510,7 +510,7 @@ code = '''
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// open hole
|
||||
lfs_remove(&lfs, "bump") => 0;
|
||||
@@ -518,10 +518,10 @@ code = '''
|
||||
lfs_mkdir(&lfs, "splitdir") => 0;
|
||||
lfs_file_open(&lfs, &file, "splitdir/bump",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (lfs_size_t i = 0; i < cfg.block_size; i += 2) {
|
||||
for (lfs_size_t i = 0; i < LFS_BLOCK_SIZE; i += 2) {
|
||||
memcpy(&buffer[i], "hi", 2);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, 2*cfg.block_size) => LFS_ERR_NOSPC;
|
||||
lfs_file_write(&lfs, &file, buffer, 2*LFS_BLOCK_SIZE) => LFS_ERR_NOSPC;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
@@ -532,8 +532,8 @@ define.LFS_BLOCK_SIZE = 512
|
||||
define.LFS_BLOCK_COUNT = 1024
|
||||
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// fill completely with two files
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
@@ -541,7 +541,7 @@ code = '''
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
||||
i < ((LFS_BLOCK_COUNT-2)/2)*(LFS_BLOCK_SIZE-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
@@ -552,7 +552,7 @@ code = '''
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
||||
i < ((LFS_BLOCK_COUNT-2+1)/2)*(LFS_BLOCK_SIZE-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
@@ -560,7 +560,7 @@ code = '''
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// rewrite one file
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
@@ -569,7 +569,7 @@ code = '''
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
||||
i < ((LFS_BLOCK_COUNT-2)/2)*(LFS_BLOCK_SIZE-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
@@ -583,7 +583,7 @@ code = '''
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
||||
i < ((LFS_BLOCK_COUNT-2+1)/2)*(LFS_BLOCK_SIZE-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
@@ -597,8 +597,8 @@ define.LFS_BLOCK_SIZE = 512
|
||||
define.LFS_BLOCK_COUNT = 1024
|
||||
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// fill completely with two files
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
@@ -606,7 +606,7 @@ code = '''
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2)/2)*(cfg.block_size-8);
|
||||
i < ((LFS_BLOCK_COUNT-2)/2)*(LFS_BLOCK_SIZE-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
@@ -617,7 +617,7 @@ code = '''
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2+1)/2)*(cfg.block_size-8);
|
||||
i < ((LFS_BLOCK_COUNT-2+1)/2)*(LFS_BLOCK_SIZE-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
@@ -625,7 +625,7 @@ code = '''
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// rewrite one file with a hole of one block
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
@@ -634,7 +634,7 @@ code = '''
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg.block_count-2)/2 - 1)*(cfg.block_size-8);
|
||||
i < ((LFS_BLOCK_COUNT-2)/2 - 1)*(LFS_BLOCK_SIZE-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
[[case]] # set/get attribute
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_setattr(&lfs, "hello", 'A', "aaaa", 4) => 0;
|
||||
lfs_setattr(&lfs, "hello", 'B', "bbbbbb", 6) => 0;
|
||||
@@ -60,7 +60,7 @@ code = '''
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 9) => 9;
|
||||
@@ -78,15 +78,15 @@ code = '''
|
||||
|
||||
[[case]] # set/get root attribute
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_setattr(&lfs, "/", 'A', "aaaa", 4) => 0;
|
||||
lfs_setattr(&lfs, "/", 'B', "bbbbbb", 6) => 0;
|
||||
@@ -137,7 +137,7 @@ code = '''
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 9) => 9;
|
||||
@@ -155,22 +155,22 @@ code = '''
|
||||
|
||||
[[case]] # set/get file attribute
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
struct lfs_attr attrs1[] = {
|
||||
{'A', buffer, 4},
|
||||
{'B', buffer+4, 6},
|
||||
{'C', buffer+10, 5},
|
||||
};
|
||||
struct lfs_file_config cfg1 = {.attrs=attrs1, .attr_count=3};
|
||||
struct lfs_file_cfg cfg1 = {.attrs=attrs1, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
memcpy(buffer, "aaaa", 4);
|
||||
@@ -228,7 +228,7 @@ code = '''
|
||||
{'B', buffer+4, 9},
|
||||
{'C', buffer+13, 5},
|
||||
};
|
||||
struct lfs_file_config cfg2 = {.attrs=attrs2, .attr_count=3};
|
||||
struct lfs_file_cfg cfg2 = {.attrs=attrs2, .attr_count=3};
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDWR, &cfg2) => 0;
|
||||
memcpy(buffer+4, "fffffffff", 9);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
@@ -238,14 +238,14 @@ code = '''
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
struct lfs_attr attrs3[] = {
|
||||
{'A', buffer, 4},
|
||||
{'B', buffer+4, 9},
|
||||
{'C', buffer+13, 5},
|
||||
};
|
||||
struct lfs_file_config cfg3 = {.attrs=attrs3, .attr_count=3};
|
||||
struct lfs_file_cfg cfg3 = {.attrs=attrs3, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg3) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
@@ -262,15 +262,15 @@ code = '''
|
||||
|
||||
[[case]] # deferred file attributes
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_setattr(&lfs, "hello/hello", 'B', "fffffffff", 9) => 0;
|
||||
lfs_setattr(&lfs, "hello/hello", 'C', "ccccc", 5) => 0;
|
||||
|
||||
@@ -280,7 +280,7 @@ code = '''
|
||||
{'C', "", 0},
|
||||
{'D', "hhhh", 4},
|
||||
};
|
||||
struct lfs_file_config cfg1 = {.attrs=attrs1, .attr_count=3};
|
||||
struct lfs_file_cfg cfg1 = {.attrs=attrs1, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@ define.NAMEMULT = 64
|
||||
define.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t badblock = 2; badblock < LFS_BLOCK_COUNT; badblock++) {
|
||||
lfs_testbd_setwear(&cfg, badblock-1, 0) => 0;
|
||||
lfs_testbd_setwear(&cfg, badblock, 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, badblock-1, 0) => 0;
|
||||
lfs_testbd_setwear(&bd, badblock, 0xffffffff) => 0;
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
@@ -46,7 +46,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
@@ -90,12 +90,12 @@ define.NAMEMULT = 64
|
||||
define.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
|
||||
lfs_testbd_setwear(&cfg, i+2, 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, i+2, 0xffffffff) => 0;
|
||||
}
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
@@ -120,7 +120,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
@@ -163,12 +163,12 @@ define.NAMEMULT = 64
|
||||
define.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t i = 0; i < (LFS_BLOCK_COUNT-2)/2; i++) {
|
||||
lfs_testbd_setwear(&cfg, (2*i) + 2, 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, (2*i) + 2, 0xffffffff) => 0;
|
||||
}
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
@@ -193,7 +193,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
@@ -233,9 +233,9 @@ define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
'LFS_TESTBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
code = '''
|
||||
lfs_testbd_setwear(&cfg, 0, 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&cfg, 1, 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, 0, 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, 1, 0xffffffff) => 0;
|
||||
|
||||
lfs_format(&lfs, &cfg) => LFS_ERR_NOSPC;
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
lfs_formatcfg(&lfs, &cfg) => LFS_ERR_NOSPC;
|
||||
lfs_mountcfg(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[[case]] # root
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -17,16 +17,16 @@ code = '''
|
||||
[[case]] # many directory creation
|
||||
define.N = 'range(0, 100, 3)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "dir%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -48,16 +48,16 @@ code = '''
|
||||
[[case]] # many directory removal
|
||||
define.N = 'range(3, 100, 11)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -75,14 +75,14 @@ code = '''
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -98,16 +98,16 @@ code = '''
|
||||
[[case]] # many directory rename
|
||||
define.N = 'range(3, 100, 11)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "test%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -125,7 +125,7 @@ code = '''
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
char oldpath[128];
|
||||
char newpath[128];
|
||||
@@ -135,7 +135,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -158,10 +158,10 @@ code = '''
|
||||
define.N = [5, 11]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
@@ -237,9 +237,9 @@ code = '''
|
||||
[[case]] # file creation
|
||||
define.N = 'range(3, 100, 11)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "file%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
@@ -248,7 +248,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -270,9 +270,9 @@ code = '''
|
||||
[[case]] # file removal
|
||||
define.N = 'range(0, 100, 3)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
@@ -281,7 +281,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -299,14 +299,14 @@ code = '''
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "removeme%03d", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -322,9 +322,9 @@ code = '''
|
||||
[[case]] # file rename
|
||||
define.N = 'range(0, 100, 3)'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "test%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
@@ -333,7 +333,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -351,7 +351,7 @@ code = '''
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
char oldpath[128];
|
||||
char newpath[128];
|
||||
@@ -361,7 +361,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -384,10 +384,10 @@ code = '''
|
||||
define.N = [5, 25]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
@@ -462,21 +462,21 @@ code = '''
|
||||
|
||||
[[case]] # nested directories
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "potato") => 0;
|
||||
lfs_file_open(&lfs, &file, "burito",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "potato/baked") => 0;
|
||||
lfs_mkdir(&lfs, "potato/sweet") => 0;
|
||||
lfs_mkdir(&lfs, "potato/fried") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "potato") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -498,21 +498,21 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// try removing?
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "potato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// try renaming?
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "potato", "coldpotato") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "coldpotato", "warmpotato") => 0;
|
||||
lfs_rename(&lfs, "warmpotato", "hotpotato") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "potato") => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "coldpotato") => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "warmpotato") => LFS_ERR_NOENT;
|
||||
@@ -520,7 +520,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// try cross-directory renaming
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "coldpotato") => 0;
|
||||
lfs_rename(&lfs, "hotpotato/baked", "coldpotato/baked") => 0;
|
||||
lfs_rename(&lfs, "coldpotato", "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
@@ -536,7 +536,7 @@ code = '''
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "hotpotato") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -558,7 +558,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// final remove
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
lfs_remove(&lfs, "hotpotato/baked") => 0;
|
||||
lfs_remove(&lfs, "hotpotato") => LFS_ERR_NOTEMPTY;
|
||||
@@ -568,7 +568,7 @@ code = '''
|
||||
lfs_remove(&lfs, "hotpotato") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -587,8 +587,8 @@ code = '''
|
||||
[[case]] # recursive remove
|
||||
define.N = [10, 100]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "prickly-pear") => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "prickly-pear/cactus%03d", i);
|
||||
@@ -611,7 +611,7 @@ code = '''
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOTEMPTY;
|
||||
|
||||
lfs_dir_open(&lfs, &dir, "prickly-pear") => 0;
|
||||
@@ -636,22 +636,22 @@ code = '''
|
||||
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "prickly-pear") => LFS_ERR_NOENT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # other error cases
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "potato") => 0;
|
||||
lfs_file_open(&lfs, &file, "burito",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "potato") => LFS_ERR_EXIST;
|
||||
lfs_mkdir(&lfs, "burito") => LFS_ERR_EXIST;
|
||||
@@ -696,7 +696,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// or on disk
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -718,8 +718,8 @@ code = '''
|
||||
[[case]] # directory seek
|
||||
define.COUNT = [4, 128, 132]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hello/kitty%03d", i);
|
||||
@@ -728,7 +728,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int j = 2; j < COUNT; j++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "hello") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -779,8 +779,8 @@ code = '''
|
||||
[[case]] # root seek
|
||||
define.COUNT = [4, 128, 132]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hi%03d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
@@ -788,7 +788,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int j = 2; j < COUNT; j++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
|
||||
@@ -10,8 +10,8 @@ code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 20
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
@@ -99,8 +99,8 @@ code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 20
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
@@ -188,8 +188,8 @@ code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
@@ -261,8 +261,8 @@ code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
@@ -350,8 +350,8 @@ code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
@@ -454,8 +454,8 @@ code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
@@ -549,9 +549,9 @@ code = '''
|
||||
|
||||
[[case]] # create too big
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
memset(path, 'm', 200);
|
||||
path[200] = '\0';
|
||||
|
||||
@@ -574,9 +574,9 @@ code = '''
|
||||
|
||||
[[case]] # resize too big
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
memset(path, 'm', 200);
|
||||
path[200] = '\0';
|
||||
|
||||
|
||||
@@ -9,10 +9,11 @@ define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// change tail-pointer to invalid pointers
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs.cfg = &cfg;
|
||||
lfs_initcommon(&lfs) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
@@ -23,7 +24,7 @@ code = '''
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
lfs_mountcfg(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # invalid dir pointer test
|
||||
@@ -31,14 +32,15 @@ define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
// make a dir
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "dir_here") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// change the dir pointer to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs.cfg = &cfg;
|
||||
lfs_initcommon(&lfs) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our directory
|
||||
@@ -57,7 +59,7 @@ code = '''
|
||||
|
||||
// test that accessing our bad dir fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dir_here", &info) => 0;
|
||||
assert(strcmp(info.name, "dir_here") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -77,16 +79,17 @@ in = "lfs.c"
|
||||
define.SIZE = [10, 1000, 100000] # faked file size
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
// make a file
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// change the file pointer to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs.cfg = &cfg;
|
||||
lfs_initcommon(&lfs) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our file
|
||||
@@ -103,7 +106,7 @@ code = '''
|
||||
|
||||
// test that accessing our bad file fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "file_here", &info) => 0;
|
||||
assert(strcmp(info.name, "file_here") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
@@ -125,9 +128,9 @@ define.SIZE = ['2*LFS_BLOCK_SIZE', '3*LFS_BLOCK_SIZE', '4*LFS_BLOCK_SIZE']
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
// make a file
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
@@ -137,7 +140,8 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
// change pointer in CTZ skip-list to be invalid
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs.cfg = &cfg;
|
||||
lfs_initcommon(&lfs) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our file and get our CTZ structure
|
||||
@@ -154,17 +158,17 @@ code = '''
|
||||
lfs_ctz_fromle32(&ctz);
|
||||
// rewrite block to contain bad pointer
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
uint32_t bad = lfs_tole32(0xcccccccc);
|
||||
memcpy(&bbuffer[0], &bad, sizeof(bad));
|
||||
memcpy(&bbuffer[4], &bad, sizeof(bad));
|
||||
cfg.erase(&cfg, ctz.head) => 0;
|
||||
cfg.prog(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_erase(&bd, ctz.head) => 0;
|
||||
lfs_testbd_prog(&bd, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad file fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "file_here", &info) => 0;
|
||||
assert(strcmp(info.name, "file_here") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
@@ -187,10 +191,11 @@ define.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// create an invalid gstate
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs.cfg = &cfg;
|
||||
lfs_initcommon(&lfs) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){
|
||||
@@ -202,7 +207,7 @@ code = '''
|
||||
// test that mount fails gracefully
|
||||
// mount may not fail, but our first alloc should fail when
|
||||
// we try to fix the gstate
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
@@ -213,10 +218,11 @@ code = '''
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// change tail-pointer to point to ourself
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs.cfg = &cfg;
|
||||
lfs_initcommon(&lfs) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
@@ -225,20 +231,21 @@ code = '''
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
lfs_mountcfg(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # metadata-pair threaded-list 2-length loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs with child dir
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// find child
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs.cfg = &cfg;
|
||||
lfs_initcommon(&lfs) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_block_t pair[2];
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
@@ -255,20 +262,21 @@ code = '''
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
lfs_mountcfg(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # metadata-pair threaded-list 1-length child loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs with child dir
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// find child
|
||||
lfs_init(&lfs, &cfg) => 0;
|
||||
lfs.cfg = &cfg;
|
||||
lfs_initcommon(&lfs) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_block_t pair[2];
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
@@ -284,5 +292,5 @@ code = '''
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
lfs_mountcfg(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
@@ -11,14 +11,14 @@ define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
]
|
||||
define.FILES = 10
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
@@ -71,7 +71,7 @@ code = '''
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
@@ -96,11 +96,11 @@ define.LFS_BADBLOCK_BEHAVIOR = [
|
||||
]
|
||||
define.FILES = 10
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "test%d", i);
|
||||
@@ -153,7 +153,7 @@ code = '''
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "test%d", i);
|
||||
@@ -180,18 +180,18 @@ code = '''
|
||||
|
||||
for (int run = 0; run < 2; run++) {
|
||||
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_setwear(&cfg, b,
|
||||
lfs_testbd_setwear(&bd, b,
|
||||
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
|
||||
}
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
@@ -244,7 +244,7 @@ code = '''
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
@@ -272,15 +272,15 @@ code = '''
|
||||
|
||||
for (int run = 0; run < 2; run++) {
|
||||
for (lfs_block_t b = 0; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_setwear(&cfg, b,
|
||||
lfs_testbd_setwear(&bd, b,
|
||||
(b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0;
|
||||
}
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "test%d", i);
|
||||
@@ -333,7 +333,7 @@ code = '''
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "test%d", i);
|
||||
@@ -358,14 +358,14 @@ define.CYCLES = 100
|
||||
define.FILES = 10
|
||||
if = 'LFS_BLOCK_CYCLES < CYCLES/10'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (cycle < CYCLES) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
@@ -418,7 +418,7 @@ code = '''
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
@@ -434,7 +434,7 @@ exhausted:
|
||||
lfs_testbd_wear_t maxwear = 0;
|
||||
// skip 0 and 1 as superblock movement is intentionally avoided
|
||||
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
|
||||
lfs_testbd_wear_t wear = lfs_testbd_getwear(&bd, b);
|
||||
printf("%08x: wear %d\n", b, wear);
|
||||
assert(wear >= 0);
|
||||
if (wear < minwear) {
|
||||
@@ -453,7 +453,7 @@ exhausted:
|
||||
// find standard deviation^2
|
||||
lfs_testbd_wear_t dev2 = 0;
|
||||
for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) {
|
||||
lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b);
|
||||
lfs_testbd_wear_t wear = lfs_testbd_getwear(&bd, b);
|
||||
assert(wear >= 0);
|
||||
lfs_testbd_swear_t diff = wear - avgwear;
|
||||
dev2 += diff*diff;
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
|
||||
[[case]] # simple file test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "hello",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
size = strlen("Hello World!")+1;
|
||||
@@ -11,7 +11,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(strcmp((char*)buffer, "Hello World!") == 0);
|
||||
@@ -23,10 +23,10 @@ code = '''
|
||||
define.SIZE = [32, 8192, 262144, 0, 7, 8193]
|
||||
define.CHUNKSIZE = [31, 16, 33, 1, 1023]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
srand(1);
|
||||
@@ -41,7 +41,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
srand(1);
|
||||
@@ -62,10 +62,10 @@ define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.CHUNKSIZE = [31, 16, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
srand(1);
|
||||
@@ -80,7 +80,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
srand(1);
|
||||
@@ -96,7 +96,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// rewrite
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY) => 0;
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
@@ -110,7 +110,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => lfs_max(SIZE1, SIZE2);
|
||||
srand(2);
|
||||
@@ -144,10 +144,10 @@ define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.CHUNKSIZE = [31, 16, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
srand(1);
|
||||
@@ -162,7 +162,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
srand(1);
|
||||
@@ -178,7 +178,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// append
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_APPEND) => 0;
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
@@ -192,7 +192,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1 + SIZE2;
|
||||
srand(1);
|
||||
@@ -221,10 +221,10 @@ define.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
define.CHUNKSIZE = [31, 16, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
srand(1);
|
||||
@@ -239,7 +239,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
srand(1);
|
||||
@@ -255,7 +255,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// truncate
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
srand(2);
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
@@ -269,7 +269,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE2;
|
||||
srand(2);
|
||||
@@ -290,10 +290,10 @@ define.SIZE = [32, 0, 7, 2049]
|
||||
define.CHUNKSIZE = [31, 16, 65]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
|
||||
@@ -344,10 +344,10 @@ define = [
|
||||
]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
|
||||
@@ -406,9 +406,9 @@ code = '''
|
||||
[[case]] # many files
|
||||
define.N = 300
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
// create N files of 7 bytes
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
@@ -431,9 +431,9 @@ code = '''
|
||||
[[case]] # many files with power cycle
|
||||
define.N = 300
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
// create N files of 7 bytes
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
@@ -446,7 +446,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
char rbuffer[1024];
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(strcmp(rbuffer, wbuffer) == 0);
|
||||
@@ -459,10 +459,10 @@ code = '''
|
||||
define.N = 300
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
// create N files of 7 bytes
|
||||
for (int i = 0; i < N; i++) {
|
||||
|
||||
@@ -5,8 +5,8 @@ define.FILES = [4, 10, 26]
|
||||
code = '''
|
||||
lfs_file_t files[FILES];
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path,
|
||||
@@ -64,8 +64,8 @@ define.SIZE = [10, 100]
|
||||
define.FILES = [4, 10, 26]
|
||||
code = '''
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
@@ -77,7 +77,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "zzz", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_write(&lfs, &file, (const void*)"~", 1) => 1;
|
||||
@@ -115,8 +115,8 @@ code = '''
|
||||
[[case]] # remove inconveniently test
|
||||
define.SIZE = [10, 100]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_t files[3];
|
||||
lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_open(&lfs, &files[1], "f", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
@@ -180,10 +180,10 @@ code = '''
|
||||
lfs_file_t files[FILES];
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[[case]] # move file
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -13,11 +13,11 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -57,8 +57,8 @@ code = '''
|
||||
|
||||
[[case]] # noop move, yes this is legal
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hi") => 0;
|
||||
lfs_rename(&lfs, "hi", "hi") => 0;
|
||||
lfs_mkdir(&lfs, "hi/hi") => 0;
|
||||
@@ -74,8 +74,8 @@ code = '''
|
||||
[[case]] # move file corrupt source
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -87,28 +87,28 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// corrupt the source
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
int off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -150,8 +150,8 @@ code = '''
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -163,44 +163,44 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// corrupt the source
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
int off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
// corrupt the destination
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "c") => 0;
|
||||
block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -242,8 +242,8 @@ code = '''
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -255,49 +255,49 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// corrupt the source
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
int off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
// corrupt the destination
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "c") => 0;
|
||||
block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
// continue move
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hello", "c/hello") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -338,10 +338,10 @@ code = '''
|
||||
[[case]] # simple reentrant move file
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
err = lfs_mkdir(&lfs, "a");
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
@@ -354,7 +354,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
// there should never exist _2_ hello files
|
||||
int count = 0;
|
||||
if (lfs_stat(&lfs, "a/hello", &info) == 0) {
|
||||
@@ -384,7 +384,7 @@ code = '''
|
||||
assert(count <= 1);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
if (lfs_stat(&lfs, "a/hello", &info) == 0 && info.size > 0) {
|
||||
lfs_rename(&lfs, "a/hello", "b/hello") => 0;
|
||||
} else if (lfs_stat(&lfs, "b/hello", &info) == 0) {
|
||||
@@ -407,7 +407,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -447,8 +447,8 @@ code = '''
|
||||
|
||||
[[case]] # move dir
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -459,11 +459,11 @@ code = '''
|
||||
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -513,8 +513,8 @@ code = '''
|
||||
[[case]] # move dir corrupt source
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -525,28 +525,28 @@ code = '''
|
||||
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// corrupt the source
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
int off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -597,8 +597,8 @@ code = '''
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -609,44 +609,44 @@ code = '''
|
||||
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// corrupt the source
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
int off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
// corrupt the destination
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "c") => 0;
|
||||
block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -697,8 +697,8 @@ code = '''
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -709,49 +709,49 @@ code = '''
|
||||
lfs_mkdir(&lfs, "a/hi/ohayo") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// corrupt the source
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
int off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
// corrupt the destination
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "c") => 0;
|
||||
block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
// continue move
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hi", "c/hi") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -801,10 +801,10 @@ code = '''
|
||||
[[case]] # simple reentrant move dir
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
err = lfs_mkdir(&lfs, "a");
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
@@ -817,7 +817,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
while (true) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
// there should never exist _2_ hi directories
|
||||
int count = 0;
|
||||
if (lfs_stat(&lfs, "a/hi", &info) == 0) {
|
||||
@@ -843,7 +843,7 @@ code = '''
|
||||
assert(count <= 1);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
if (lfs_stat(&lfs, "a/hi", &info) == 0) {
|
||||
lfs_rename(&lfs, "a/hi", "b/hi") => 0;
|
||||
} else if (lfs_stat(&lfs, "b/hi", &info) == 0) {
|
||||
@@ -868,7 +868,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "a") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
@@ -917,8 +917,8 @@ code = '''
|
||||
|
||||
[[case]] # move state stealing
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "a") => 0;
|
||||
lfs_mkdir(&lfs, "b") => 0;
|
||||
lfs_mkdir(&lfs, "c") => 0;
|
||||
@@ -930,17 +930,17 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "a/hello", "b/hello") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "b/hello", "c/hello") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_rename(&lfs, "c/hello", "d/hello") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "a/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
|
||||
lfs_file_open(&lfs, &file, "b/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
|
||||
lfs_file_open(&lfs, &file, "c/hello", LFS_O_RDONLY) => LFS_ERR_NOENT;
|
||||
@@ -954,12 +954,12 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_remove(&lfs, "b") => 0;
|
||||
lfs_remove(&lfs, "c") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "a", &info) => 0;
|
||||
lfs_stat(&lfs, "b", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "c", &info) => LFS_ERR_NOENT;
|
||||
@@ -981,8 +981,8 @@ code = '''
|
||||
# Other specific corner cases
|
||||
[[case]] # create + delete in same commit with neighbors
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// littlefs keeps files sorted, so we know the order these will be in
|
||||
lfs_file_open(&lfs, &file, "/1.move_me",
|
||||
@@ -1127,8 +1127,8 @@ code = '''
|
||||
# Other specific corner cases
|
||||
[[case]] # create + delete + delete in same commit with neighbors
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// littlefs keeps files sorted, so we know the order these will be in
|
||||
lfs_file_open(&lfs, &file, "/1.move_me",
|
||||
@@ -1283,8 +1283,8 @@ code = '''
|
||||
|
||||
[[case]] # create + delete in different dirs with neighbors
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
// littlefs keeps files sorted, so we know the order these will be in
|
||||
lfs_mkdir(&lfs, "/dir.1") => 0;
|
||||
@@ -1523,8 +1523,8 @@ in = "lfs.c"
|
||||
define.RELOCATIONS = 'range(0x3+1)'
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "/parent") => 0;
|
||||
lfs_mkdir(&lfs, "/parent/child") => 0;
|
||||
@@ -1569,14 +1569,14 @@ code = '''
|
||||
// force specific directories to relocate
|
||||
if (RELOCATIONS & 0x1) {
|
||||
lfs_dir_open(&lfs, &dir, "/parent");
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
}
|
||||
if (RELOCATIONS & 0x2) {
|
||||
lfs_dir_open(&lfs, &dir, "/parent/child");
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
}
|
||||
|
||||
@@ -1660,8 +1660,8 @@ in = "lfs.c"
|
||||
define.RELOCATIONS = 'range(0x7+1)'
|
||||
define.LFS_ERASE_CYCLES = 0xffffffff
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "/parent") => 0;
|
||||
lfs_mkdir(&lfs, "/parent/child") => 0;
|
||||
@@ -1707,20 +1707,20 @@ code = '''
|
||||
// force specific directories to relocate
|
||||
if (RELOCATIONS & 0x1) {
|
||||
lfs_dir_open(&lfs, &dir, "/parent");
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
}
|
||||
if (RELOCATIONS & 0x2) {
|
||||
lfs_dir_open(&lfs, &dir, "/parent/sibling");
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
}
|
||||
if (RELOCATIONS & 0x4) {
|
||||
lfs_dir_open(&lfs, &dir, "/parent/child");
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&cfg, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[0], 0xffffffff) => 0;
|
||||
lfs_testbd_setwear(&bd, dir.m.pair[1], 0xffffffff) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
in = "lfs.c"
|
||||
if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "parent") => 0;
|
||||
lfs_mkdir(&lfs, "parent/orphan") => 0;
|
||||
lfs_mkdir(&lfs, "parent/child") => 0;
|
||||
@@ -13,29 +13,29 @@ code = '''
|
||||
// corrupt the child's most recent commit, this should be the update
|
||||
// to the linked-list entry, which should orphan the orphan. Note this
|
||||
// makes a lot of assumptions about the remove operation.
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "parent/child") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t bbuffer[LFS_BLOCK_SIZE];
|
||||
cfg.read(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_read(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
int off = LFS_BLOCK_SIZE-1;
|
||||
while (off >= 0 && bbuffer[off] == LFS_ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&bbuffer[off-3], LFS_BLOCK_SIZE, 3);
|
||||
cfg.erase(&cfg, block) => 0;
|
||||
cfg.prog(&cfg, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
cfg.sync(&cfg) => 0;
|
||||
lfs_testbd_erase(&bd, block) => 0;
|
||||
lfs_testbd_prog(&bd, block, 0, bbuffer, LFS_BLOCK_SIZE) => 0;
|
||||
lfs_testbd_sync(&bd) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
@@ -48,7 +48,7 @@ code = '''
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
|
||||
@@ -59,17 +59,17 @@ code = '''
|
||||
[[case]] # reentrant testing for orphans, basically just spam mkdir/remove
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
if = '!(DEPTH == 3 && LFS_BUFFER_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20},
|
||||
{FILES=26, DEPTH=1, CYCLES=20},
|
||||
{FILES=3, DEPTH=3, CYCLES=20},
|
||||
]
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
srand(1);
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
|
||||
[[case]] # simple path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
@@ -23,8 +23,8 @@ code = '''
|
||||
|
||||
[[case]] # redundant slashes
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
@@ -47,8 +47,8 @@ code = '''
|
||||
|
||||
[[case]] # dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
@@ -73,8 +73,8 @@ code = '''
|
||||
|
||||
[[case]] # dot dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
@@ -103,8 +103,8 @@ code = '''
|
||||
|
||||
[[case]] # trailing dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
@@ -125,8 +125,8 @@ code = '''
|
||||
|
||||
[[case]] # leading dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, ".milk") => 0;
|
||||
lfs_stat(&lfs, ".milk", &info) => 0;
|
||||
strcmp(info.name, ".milk") => 0;
|
||||
@@ -137,8 +137,8 @@ code = '''
|
||||
|
||||
[[case]] # root dot dot path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "tea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/hottea") => 0;
|
||||
lfs_mkdir(&lfs, "tea/warmtea") => 0;
|
||||
@@ -161,8 +161,8 @@ code = '''
|
||||
|
||||
[[case]] # invalid path tests
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg);
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg);
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dirt", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "dirt/ground", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "dirt/ground/earth", &info) => LFS_ERR_NOENT;
|
||||
@@ -182,8 +182,8 @@ code = '''
|
||||
|
||||
[[case]] # root operations
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "/", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -198,8 +198,8 @@ code = '''
|
||||
|
||||
[[case]] # root representations
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "/", &info) => 0;
|
||||
assert(strcmp(info.name, "/") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
@@ -223,8 +223,8 @@ code = '''
|
||||
|
||||
[[case]] # superblock conflict test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "littlefs", &info) => LFS_ERR_NOENT;
|
||||
lfs_remove(&lfs, "littlefs") => LFS_ERR_NOENT;
|
||||
|
||||
@@ -239,8 +239,8 @@ code = '''
|
||||
|
||||
[[case]] # max path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "coffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
|
||||
@@ -263,8 +263,8 @@ code = '''
|
||||
|
||||
[[case]] # really big path test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_mkdir(&lfs, "coffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/hotcoffee") => 0;
|
||||
lfs_mkdir(&lfs, "coffee/warmcoffee") => 0;
|
||||
|
||||
@@ -4,9 +4,9 @@ define.ITERATIONS = 20
|
||||
define.COUNT = 10
|
||||
define.LFS_BLOCK_CYCLES = [8, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
// fill up filesystem so only ~16 blocks are left
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
memset(buffer, 0, 512);
|
||||
while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
|
||||
@@ -17,7 +17,7 @@ code = '''
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int j = 0; j < ITERATIONS; j++) {
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
@@ -47,7 +47,7 @@ code = '''
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_dir_open(&lfs, &dir, "child") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
@@ -70,9 +70,9 @@ define.ITERATIONS = 20
|
||||
define.COUNT = 10
|
||||
define.LFS_BLOCK_CYCLES = [8, 1]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
// fill up filesystem so only ~16 blocks are left
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
memset(buffer, 0, 512);
|
||||
while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
|
||||
@@ -83,7 +83,7 @@ code = '''
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int j = 0; j < ITERATIONS; j++) {
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
@@ -148,17 +148,17 @@ code = '''
|
||||
# almost every tree operation needs a relocation
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
if = '!(DEPTH == 3 && LFS_BUFFER_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
]
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
srand(1);
|
||||
@@ -210,17 +210,17 @@ code = '''
|
||||
[[case]] # reentrant testing for relocations, but now with random renames!
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)'
|
||||
if = '!(DEPTH == 3 && LFS_BUFFER_SIZE != 64)'
|
||||
define = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
{FILES=3, DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1},
|
||||
]
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
srand(1);
|
||||
|
||||
@@ -9,8 +9,8 @@ define = [
|
||||
{COUNT=4, SKIP=2},
|
||||
]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen("kittycatcat");
|
||||
@@ -21,7 +21,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY) => 0;
|
||||
|
||||
lfs_soff_t pos = -1;
|
||||
@@ -78,8 +78,8 @@ define = [
|
||||
{COUNT=4, SKIP=2},
|
||||
]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen("kittycatcat");
|
||||
@@ -90,7 +90,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
lfs_soff_t pos = -1;
|
||||
@@ -133,8 +133,8 @@ code = '''
|
||||
define.COUNT = 132
|
||||
define.OFFSETS = '"{512, 1020, 513, 1021, 511, 1019, 1441}"'
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen("kittycatcat");
|
||||
@@ -145,7 +145,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
size = strlen("hedgehoghog");
|
||||
@@ -193,8 +193,8 @@ define = [
|
||||
{COUNT=4, SKIP=3},
|
||||
]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size = strlen("kittycatcat");
|
||||
@@ -204,7 +204,7 @@ code = '''
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
size = strlen("kittycatcat");
|
||||
@@ -241,8 +241,8 @@ code = '''
|
||||
[[case]] # inline write and seek
|
||||
define.SIZE = [2, 4, 128, 132]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "tinykitty",
|
||||
LFS_O_RDWR | LFS_O_CREAT) => 0;
|
||||
int j = 0;
|
||||
@@ -310,10 +310,10 @@ code = '''
|
||||
define.COUNT = [4, 64, 128]
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
err = lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY);
|
||||
assert(!err || err == LFS_ERR_NOENT);
|
||||
|
||||
@@ -1,37 +1,37 @@
|
||||
[[case]] # simple formatting test
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # mount/unmount
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # reentrant format
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # invalid mount
|
||||
code = '''
|
||||
lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
lfs_mountcfg(&lfs, &cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[[case]] # expanding superblock
|
||||
define.LFS_BLOCK_CYCLES = [32, 33, 1]
|
||||
define.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
@@ -44,7 +44,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
@@ -58,9 +58,9 @@ code = '''
|
||||
define.LFS_BLOCK_CYCLES = [32, 33, 1]
|
||||
define.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
// remove lingering dummy?
|
||||
err = lfs_stat(&lfs, "dummy", &info);
|
||||
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
|
||||
@@ -80,7 +80,7 @@ code = '''
|
||||
}
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
@@ -92,10 +92,10 @@ define.LFS_BLOCK_CYCLES = [2, 1]
|
||||
define.N = 24
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
@@ -119,7 +119,7 @@ code = '''
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
define.MEDIUMSIZE = [32, 2048]
|
||||
define.LARGESIZE = 8192
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
@@ -17,7 +17,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
@@ -27,7 +27,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
@@ -46,8 +46,8 @@ code = '''
|
||||
define.MEDIUMSIZE = [32, 2048]
|
||||
define.LARGESIZE = 8192
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldyread",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
@@ -61,7 +61,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
@@ -78,7 +78,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
@@ -95,12 +95,12 @@ code = '''
|
||||
|
||||
[[case]] # write, truncate, and read
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "sequence",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
|
||||
size = lfs_min(LFS_BUFFER_SIZE, sizeof(buffer)/2);
|
||||
lfs_size_t qsize = size / 4;
|
||||
uint8_t *wb = buffer;
|
||||
uint8_t *rb = buffer + size;
|
||||
@@ -149,8 +149,8 @@ code = '''
|
||||
define.MEDIUMSIZE = [32, 2048]
|
||||
define.LARGESIZE = 8192
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldywrite",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
@@ -164,7 +164,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
@@ -181,7 +181,7 @@ code = '''
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
@@ -202,10 +202,10 @@ define.MEDIUMSIZE = [32, 1024]
|
||||
define.LARGESIZE = 2048
|
||||
reentrant = true
|
||||
code = '''
|
||||
err = lfs_mount(&lfs, &cfg);
|
||||
err = lfs_mountcfg(&lfs, &cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
}
|
||||
err = lfs_file_open(&lfs, &file, "baldy", LFS_O_RDONLY);
|
||||
assert(!err || err == LFS_ERR_NOENT);
|
||||
@@ -312,8 +312,8 @@ code = '''
|
||||
const lfs_off_t *hotsizes = configs[CONFIG].hotsizes;
|
||||
const lfs_off_t *coldsizes = configs[CONFIG].coldsizes;
|
||||
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_formatcfg(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
@@ -340,7 +340,7 @@ code = '''
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
@@ -367,7 +367,7 @@ code = '''
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_mountcfg(&lfs, &cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
@@ -392,48 +392,3 @@ code = '''
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[[case]] # noop truncate
|
||||
define.MEDIUMSIZE = [32, 2048]
|
||||
code = '''
|
||||
lfs_format(&lfs, &cfg) => 0;
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop",
|
||||
LFS_O_RDWR | LFS_O_CREAT) => 0;
|
||||
|
||||
strcpy((char*)buffer, "hair");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
// this truncate should do nothing
|
||||
lfs_file_truncate(&lfs, &file, j+size) => 0;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
// should do nothing again
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// still there after reboot?
|
||||
lfs_mount(&lfs, &cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
Reference in New Issue
Block a user