mirror of
https://github.com/eledio-devices/thirdparty-littlefs.git
synced 2025-11-01 16:14:13 +01:00
Compare commits
1 Commits
v2.4.0
...
omit-isope
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
640f7efae7 |
26
.github/workflows/post-release.yml
vendored
26
.github/workflows/post-release.yml
vendored
@@ -1,26 +0,0 @@
|
|||||||
name: post-release
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
branches: [master]
|
|
||||||
types: [released]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
post-release:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
# trigger post-release in dependency repo, this indirection allows the
|
|
||||||
# dependency repo to be updated often without affecting this repo. At
|
|
||||||
# the time of this comment, the dependency repo is responsible for
|
|
||||||
# creating PRs for other dependent repos post-release.
|
|
||||||
- name: trigger-post-release
|
|
||||||
continue-on-error: true
|
|
||||||
run: |
|
|
||||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
|
||||||
"$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \
|
|
||||||
-d "$(jq -n '{
|
|
||||||
event_type: "post-release",
|
|
||||||
client_payload: {
|
|
||||||
repo: env.GITHUB_REPOSITORY,
|
|
||||||
version: "${{github.event.release.tag_name}}"}}' \
|
|
||||||
| tee /dev/stderr)"
|
|
||||||
|
|
||||||
215
.github/workflows/release.yml
vendored
215
.github/workflows/release.yml
vendored
@@ -1,215 +0,0 @@
|
|||||||
name: release
|
|
||||||
on:
|
|
||||||
workflow_run:
|
|
||||||
workflows: [test]
|
|
||||||
branches: [master]
|
|
||||||
types: [completed]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
|
|
||||||
# need to manually check for a couple things
|
|
||||||
# - tests passed?
|
|
||||||
# - we are the most recent commit on master?
|
|
||||||
if: ${{github.event.workflow_run.conclusion == 'success' &&
|
|
||||||
github.event.workflow_run.head_sha == github.sha}}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
ref: ${{github.event.workflow_run.head_sha}}
|
|
||||||
# need workflow access since we push branches
|
|
||||||
# containing workflows
|
|
||||||
token: ${{secrets.BOT_TOKEN}}
|
|
||||||
# need all tags
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# try to get results from tests
|
|
||||||
- uses: dawidd6/action-download-artifact@v2
|
|
||||||
continue-on-error: true
|
|
||||||
with:
|
|
||||||
workflow: ${{github.event.workflow_run.name}}
|
|
||||||
run_id: ${{github.event.workflow_run.id}}
|
|
||||||
name: results
|
|
||||||
path: results
|
|
||||||
|
|
||||||
- name: find-version
|
|
||||||
run: |
|
|
||||||
# rip version from lfs.h
|
|
||||||
LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \
|
|
||||||
| awk '{print $3}')"
|
|
||||||
LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))"
|
|
||||||
LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >> 0)))"
|
|
||||||
|
|
||||||
# find a new patch version based on what we find in our tags
|
|
||||||
LFS_VERSION_PATCH="$( \
|
|
||||||
( git describe --tags --abbrev=0 \
|
|
||||||
--match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \
|
|
||||||
|| echo 'v0.0.-1' ) \
|
|
||||||
| awk -F '.' '{print $3+1}')"
|
|
||||||
|
|
||||||
# found new version
|
|
||||||
LFS_VERSION="v$LFS_VERSION_MAJOR`
|
|
||||||
`.$LFS_VERSION_MINOR`
|
|
||||||
`.$LFS_VERSION_PATCH"
|
|
||||||
echo "LFS_VERSION=$LFS_VERSION"
|
|
||||||
echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV
|
|
||||||
echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV
|
|
||||||
echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV
|
|
||||||
echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
# try to find previous version?
|
|
||||||
- name: find-prev-version
|
|
||||||
continue-on-error: true
|
|
||||||
run: |
|
|
||||||
LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')"
|
|
||||||
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION"
|
|
||||||
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
# try to find results from tests
|
|
||||||
- name: collect-results
|
|
||||||
run: |
|
|
||||||
# previous results to compare against?
|
|
||||||
[ -n "$LFS_PREV_VERSION" ] && curl -sS \
|
|
||||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
|
|
||||||
`status/$LFS_PREV_VERSION" \
|
|
||||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
|
|
||||||
>> prev-results.json \
|
|
||||||
|| true
|
|
||||||
|
|
||||||
# unfortunately these each have their own format
|
|
||||||
[ -e results/code-thumb.csv ] && ( \
|
|
||||||
export PREV="$(jq -re '
|
|
||||||
select(.context == "results / code").description
|
|
||||||
| capture("Code size is (?<result>[0-9]+)").result' \
|
|
||||||
prev-results.json || echo 0)"
|
|
||||||
./scripts/code.py -u results/code-thumb.csv -s | awk '
|
|
||||||
NR==2 {printf "Code size,%d B",$2}
|
|
||||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
|
||||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
|
|
||||||
NR==2 {printf "\n"}' \
|
|
||||||
>> results.csv)
|
|
||||||
[ -e results/code-thumb-readonly.csv ] && ( \
|
|
||||||
export PREV="$(jq -re '
|
|
||||||
select(.context == "results / code (readonly)").description
|
|
||||||
| capture("Code size is (?<result>[0-9]+)").result' \
|
|
||||||
prev-results.json || echo 0)"
|
|
||||||
./scripts/code.py -u results/code-thumb-readonly.csv -s | awk '
|
|
||||||
NR==2 {printf "Code size<br/>(readonly),%d B",$2}
|
|
||||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
|
||||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
|
|
||||||
NR==2 {printf "\n"}' \
|
|
||||||
>> results.csv)
|
|
||||||
[ -e results/code-thumb-threadsafe.csv ] && ( \
|
|
||||||
export PREV="$(jq -re '
|
|
||||||
select(.context == "results / code (threadsafe)").description
|
|
||||||
| capture("Code size is (?<result>[0-9]+)").result' \
|
|
||||||
prev-results.json || echo 0)"
|
|
||||||
./scripts/code.py -u results/code-thumb-threadsafe.csv -s | awk '
|
|
||||||
NR==2 {printf "Code size<br/>(threadsafe),%d B",$2}
|
|
||||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
|
||||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
|
|
||||||
NR==2 {printf "\n"}' \
|
|
||||||
>> results.csv)
|
|
||||||
[ -e results/code-thumb-migrate.csv ] && ( \
|
|
||||||
export PREV="$(jq -re '
|
|
||||||
select(.context == "results / code (migrate)").description
|
|
||||||
| capture("Code size is (?<result>[0-9]+)").result' \
|
|
||||||
prev-results.json || echo 0)"
|
|
||||||
./scripts/code.py -u results/code-thumb-migrate.csv -s | awk '
|
|
||||||
NR==2 {printf "Code size<br/>(migrate),%d B",$2}
|
|
||||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
|
||||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
|
|
||||||
NR==2 {printf "\n"}' \
|
|
||||||
>> results.csv)
|
|
||||||
[ -e results/code-thumb-error-asserts.csv ] && ( \
|
|
||||||
export PREV="$(jq -re '
|
|
||||||
select(.context == "results / code (error-asserts)").description
|
|
||||||
| capture("Code size is (?<result>[0-9]+)").result' \
|
|
||||||
prev-results.json || echo 0)"
|
|
||||||
./scripts/code.py -u results/code-thumb-error-asserts.csv -s | awk '
|
|
||||||
NR==2 {printf "Code size<br/>(error-asserts),%d B",$2}
|
|
||||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
|
||||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
|
|
||||||
NR==2 {printf "\n"}' \
|
|
||||||
>> results.csv)
|
|
||||||
[ -e results/coverage.csv ] && ( \
|
|
||||||
export PREV="$(jq -re '
|
|
||||||
select(.context == "results / coverage").description
|
|
||||||
| capture("Coverage is (?<result>[0-9\\.]+)").result' \
|
|
||||||
prev-results.json || echo 0)"
|
|
||||||
./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
|
|
||||||
NR==2 {printf "Coverage,%.1f%% of %d lines",$4,$3}
|
|
||||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
|
||||||
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
|
|
||||||
NR==2 {printf "\n"}' \
|
|
||||||
>> results.csv)
|
|
||||||
|
|
||||||
# transpose to GitHub table
|
|
||||||
[ -e results.csv ] || exit 0
|
|
||||||
awk -F ',' '
|
|
||||||
{label[NR]=$1; value[NR]=$2}
|
|
||||||
END {
|
|
||||||
for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n";
|
|
||||||
for (r=1; r<=NR; r++) {printf "|:--"}; printf "|\n";
|
|
||||||
for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \
|
|
||||||
results.csv > results.txt
|
|
||||||
echo "RESULTS:"
|
|
||||||
cat results.txt
|
|
||||||
|
|
||||||
# find changes from history
|
|
||||||
- name: collect-changes
|
|
||||||
run: |
|
|
||||||
[ -n "$LFS_PREV_VERSION" ] || exit 0
|
|
||||||
# use explicit link to github commit so that release notes can
|
|
||||||
# be copied elsewhere
|
|
||||||
git log "$LFS_PREV_VERSION.." \
|
|
||||||
--grep='^Merge' --invert-grep \
|
|
||||||
--format="format:[\`%h\`](`
|
|
||||||
`https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \
|
|
||||||
> changes.txt
|
|
||||||
echo "CHANGES:"
|
|
||||||
cat changes.txt
|
|
||||||
|
|
||||||
# create and update major branches (vN and vN-prefix)
|
|
||||||
- name: create-major-branches
|
|
||||||
run: |
|
|
||||||
# create major branch
|
|
||||||
git branch "v$LFS_VERSION_MAJOR" HEAD
|
|
||||||
|
|
||||||
# create major prefix branch
|
|
||||||
git config user.name ${{secrets.BOT_USER}}
|
|
||||||
git config user.email ${{secrets.BOT_EMAIL}}
|
|
||||||
git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
|
|
||||||
"v$LFS_VERSION_MAJOR-prefix" || true
|
|
||||||
./scripts/prefix.py "lfs$LFS_VERSION_MAJOR"
|
|
||||||
git branch "v$LFS_VERSION_MAJOR-prefix" $( \
|
|
||||||
git commit-tree $(git write-tree) \
|
|
||||||
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
|
||||||
-p HEAD \
|
|
||||||
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
|
||||||
git reset --hard
|
|
||||||
|
|
||||||
# push!
|
|
||||||
git push --atomic origin \
|
|
||||||
"v$LFS_VERSION_MAJOR" \
|
|
||||||
"v$LFS_VERSION_MAJOR-prefix"
|
|
||||||
|
|
||||||
# build release notes
|
|
||||||
- name: create-release
|
|
||||||
run: |
|
|
||||||
# create release and patch version tag (vN.N.N)
|
|
||||||
# only draft if not a patch release
|
|
||||||
[ -e results.txt ] && export RESULTS="$(cat results.txt)"
|
|
||||||
[ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
|
|
||||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
|
||||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
|
|
||||||
-d "$(jq -n '{
|
|
||||||
tag_name: env.LFS_VERSION,
|
|
||||||
name: env.LFS_VERSION | rtrimstr(".0"),
|
|
||||||
target_commitish: "${{github.event.workflow_run.head_sha}}",
|
|
||||||
draft: env.LFS_VERSION | endswith(".0"),
|
|
||||||
body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
|
|
||||||
| tee /dev/stderr)"
|
|
||||||
|
|
||||||
55
.github/workflows/status.yml
vendored
55
.github/workflows/status.yml
vendored
@@ -1,55 +0,0 @@
|
|||||||
name: status
|
|
||||||
on:
|
|
||||||
workflow_run:
|
|
||||||
workflows: [test]
|
|
||||||
types: [completed]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
status:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
# custom statuses?
|
|
||||||
- uses: dawidd6/action-download-artifact@v2
|
|
||||||
continue-on-error: true
|
|
||||||
with:
|
|
||||||
workflow: ${{github.event.workflow_run.name}}
|
|
||||||
run_id: ${{github.event.workflow_run.id}}
|
|
||||||
name: status
|
|
||||||
path: status
|
|
||||||
- name: update-status
|
|
||||||
continue-on-error: true
|
|
||||||
run: |
|
|
||||||
ls status
|
|
||||||
for s in $(shopt -s nullglob ; echo status/*.json)
|
|
||||||
do
|
|
||||||
# parse requested status
|
|
||||||
export STATE="$(jq -er '.state' $s)"
|
|
||||||
export CONTEXT="$(jq -er '.context' $s)"
|
|
||||||
export DESCRIPTION="$(jq -er '.description' $s)"
|
|
||||||
# help lookup URL for job/steps because GitHub makes
|
|
||||||
# it VERY HARD to link to specific jobs
|
|
||||||
export TARGET_URL="$(
|
|
||||||
jq -er '.target_url // empty' $s || (
|
|
||||||
export TARGET_JOB="$(jq -er '.target_job' $s)"
|
|
||||||
export TARGET_STEP="$(jq -er '.target_step // ""' $s)"
|
|
||||||
curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
|
||||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/`
|
|
||||||
`${{github.event.workflow_run.id}}/jobs" \
|
|
||||||
| jq -er '.jobs[]
|
|
||||||
| select(.name == env.TARGET_JOB)
|
|
||||||
| .html_url
|
|
||||||
+ "?check_suite_focus=true"
|
|
||||||
+ ((.steps[]
|
|
||||||
| select(.name == env.TARGET_STEP)
|
|
||||||
| "#step:\(.number):0") // "")'))"
|
|
||||||
# update status
|
|
||||||
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
|
|
||||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/`
|
|
||||||
`${{github.event.workflow_run.head_sha}}" \
|
|
||||||
-d "$(jq -n '{
|
|
||||||
state: env.STATE,
|
|
||||||
context: env.CONTEXT,
|
|
||||||
description: env.DESCRIPTION,
|
|
||||||
target_url: env.TARGET_URL}' \
|
|
||||||
| tee /dev/stderr)"
|
|
||||||
done
|
|
||||||
446
.github/workflows/test.yml
vendored
446
.github/workflows/test.yml
vendored
@@ -1,446 +0,0 @@
|
|||||||
name: test
|
|
||||||
on: [push, pull_request]
|
|
||||||
|
|
||||||
env:
|
|
||||||
CFLAGS: -Werror
|
|
||||||
MAKEFLAGS: -j
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
# run tests
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
arch: [x86_64, thumb, mips, powerpc]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: install
|
|
||||||
run: |
|
|
||||||
# need toml, also pip3 isn't installed by default?
|
|
||||||
sudo apt-get update -qq
|
|
||||||
sudo apt-get install -qq python3 python3-pip lcov
|
|
||||||
sudo pip3 install toml
|
|
||||||
gcc --version
|
|
||||||
|
|
||||||
# setup a ram-backed disk to speed up reentrant tests
|
|
||||||
mkdir disks
|
|
||||||
sudo mount -t tmpfs -o size=100m tmpfs disks
|
|
||||||
TESTFLAGS="$TESTFLAGS --disk=disks/disk"
|
|
||||||
|
|
||||||
# collect coverage
|
|
||||||
mkdir -p coverage
|
|
||||||
TESTFLAGS="$TESTFLAGS --coverage=`
|
|
||||||
`coverage/${{github.job}}-${{matrix.arch}}.info"
|
|
||||||
|
|
||||||
echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
# cross-compile with ARM Thumb (32-bit, little-endian)
|
|
||||||
- name: install-thumb
|
|
||||||
if: ${{matrix.arch == 'thumb'}}
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -qq \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
libc6-dev-armel-cross \
|
|
||||||
qemu-user
|
|
||||||
echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
|
|
||||||
echo "EXEC=qemu-arm" >> $GITHUB_ENV
|
|
||||||
arm-linux-gnueabi-gcc --version
|
|
||||||
qemu-arm -version
|
|
||||||
# cross-compile with MIPS (32-bit, big-endian)
|
|
||||||
- name: install-mips
|
|
||||||
if: ${{matrix.arch == 'mips'}}
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -qq \
|
|
||||||
gcc-mips-linux-gnu \
|
|
||||||
libc6-dev-mips-cross \
|
|
||||||
qemu-user
|
|
||||||
echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
|
|
||||||
echo "EXEC=qemu-mips" >> $GITHUB_ENV
|
|
||||||
mips-linux-gnu-gcc --version
|
|
||||||
qemu-mips -version
|
|
||||||
# cross-compile with PowerPC (32-bit, big-endian)
|
|
||||||
- name: install-powerpc
|
|
||||||
if: ${{matrix.arch == 'powerpc'}}
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -qq \
|
|
||||||
gcc-powerpc-linux-gnu \
|
|
||||||
libc6-dev-powerpc-cross \
|
|
||||||
qemu-user
|
|
||||||
echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
|
|
||||||
echo "EXEC=qemu-ppc" >> $GITHUB_ENV
|
|
||||||
powerpc-linux-gnu-gcc --version
|
|
||||||
qemu-ppc -version
|
|
||||||
|
|
||||||
# make sure example can at least compile
|
|
||||||
- name: test-example
|
|
||||||
run: |
|
|
||||||
sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
|
|
||||||
make all CFLAGS+=" \
|
|
||||||
-Duser_provided_block_device_read=NULL \
|
|
||||||
-Duser_provided_block_device_prog=NULL \
|
|
||||||
-Duser_provided_block_device_erase=NULL \
|
|
||||||
-Duser_provided_block_device_sync=NULL \
|
|
||||||
-include stdio.h"
|
|
||||||
rm test.c
|
|
||||||
|
|
||||||
# test configurations
|
|
||||||
# normal+reentrant tests
|
|
||||||
- name: test-default
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk"
|
|
||||||
# NOR flash: read/prog = 1 block = 4KiB
|
|
||||||
- name: test-nor
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk \
|
|
||||||
-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
|
||||||
# SD/eMMC: read/prog = 512 block = 512
|
|
||||||
- name: test-emmc
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk \
|
|
||||||
-DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
|
|
||||||
# NAND flash: read/prog = 4KiB block = 32KiB
|
|
||||||
- name: test-nand
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk \
|
|
||||||
-DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
|
|
||||||
# other extreme geometries that are useful for various corner cases
|
|
||||||
- name: test-no-intrinsics
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk \
|
|
||||||
-DLFS_NO_INTRINSICS"
|
|
||||||
- name: test-byte-writes
|
|
||||||
# it just takes too long to test byte-level writes when in qemu,
|
|
||||||
# should be plenty covered by the other configurations
|
|
||||||
if: ${{matrix.arch == 'x86_64'}}
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk \
|
|
||||||
-DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
|
|
||||||
- name: test-block-cycles
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk \
|
|
||||||
-DLFS_BLOCK_CYCLES=1"
|
|
||||||
- name: test-odd-block-count
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk \
|
|
||||||
-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
|
||||||
- name: test-odd-block-size
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
make test TESTFLAGS+="-nrk \
|
|
||||||
-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
|
||||||
|
|
||||||
# upload coverage for later coverage
|
|
||||||
- name: upload-coverage
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: coverage
|
|
||||||
path: coverage
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
# update results
|
|
||||||
- name: results-code
|
|
||||||
run: |
|
|
||||||
mkdir -p results
|
|
||||||
make clean
|
|
||||||
make code \
|
|
||||||
CFLAGS+=" \
|
|
||||||
-DLFS_NO_ASSERT \
|
|
||||||
-DLFS_NO_DEBUG \
|
|
||||||
-DLFS_NO_WARN \
|
|
||||||
-DLFS_NO_ERROR" \
|
|
||||||
CODEFLAGS+="-o results/code-${{matrix.arch}}.csv"
|
|
||||||
- name: results-code-readonly
|
|
||||||
run: |
|
|
||||||
mkdir -p results
|
|
||||||
make clean
|
|
||||||
make code \
|
|
||||||
CFLAGS+=" \
|
|
||||||
-DLFS_NO_ASSERT \
|
|
||||||
-DLFS_NO_DEBUG \
|
|
||||||
-DLFS_NO_WARN \
|
|
||||||
-DLFS_NO_ERROR \
|
|
||||||
-DLFS_READONLY" \
|
|
||||||
CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv"
|
|
||||||
- name: results-code-threadsafe
|
|
||||||
run: |
|
|
||||||
mkdir -p results
|
|
||||||
make clean
|
|
||||||
make code \
|
|
||||||
CFLAGS+=" \
|
|
||||||
-DLFS_NO_ASSERT \
|
|
||||||
-DLFS_NO_DEBUG \
|
|
||||||
-DLFS_NO_WARN \
|
|
||||||
-DLFS_NO_ERROR \
|
|
||||||
-DLFS_THREADSAFE" \
|
|
||||||
CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv"
|
|
||||||
- name: results-code-migrate
|
|
||||||
run: |
|
|
||||||
mkdir -p results
|
|
||||||
make clean
|
|
||||||
make code \
|
|
||||||
CFLAGS+=" \
|
|
||||||
-DLFS_NO_ASSERT \
|
|
||||||
-DLFS_NO_DEBUG \
|
|
||||||
-DLFS_NO_WARN \
|
|
||||||
-DLFS_NO_ERROR \
|
|
||||||
-DLFS_MIGRATE" \
|
|
||||||
CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv"
|
|
||||||
- name: results-code-error-asserts
|
|
||||||
run: |
|
|
||||||
mkdir -p results
|
|
||||||
make clean
|
|
||||||
make code \
|
|
||||||
CFLAGS+=" \
|
|
||||||
-DLFS_NO_DEBUG \
|
|
||||||
-DLFS_NO_WARN \
|
|
||||||
-DLFS_NO_ERROR \
|
|
||||||
-D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
|
|
||||||
CODEFLAGS+="-o results/code-${{matrix.arch}}-error-asserts.csv"
|
|
||||||
- name: upload-results
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: results
|
|
||||||
path: results
|
|
||||||
# limit reporting to Thumb, otherwise there would be too many numbers
|
|
||||||
# flying around for the results to be easily readable
|
|
||||||
- name: collect-status
|
|
||||||
if: ${{matrix.arch == 'thumb'}}
|
|
||||||
run: |
|
|
||||||
mkdir -p status
|
|
||||||
for f in $(shopt -s nullglob ; echo results/code*.csv)
|
|
||||||
do
|
|
||||||
export STEP="results-code$(
|
|
||||||
echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')"
|
|
||||||
export CONTEXT="results / code$(
|
|
||||||
echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')"
|
|
||||||
export PREV="$(curl -sS \
|
|
||||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
|
|
||||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
|
|
||||||
| select(.context == env.CONTEXT).description
|
|
||||||
| capture("Code size is (?<result>[0-9]+)").result' \
|
|
||||||
|| echo 0)"
|
|
||||||
export DESCRIPTION="$(./scripts/code.py -u $f -s | awk '
|
|
||||||
NR==2 {printf "Code size is %d B",$2}
|
|
||||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
|
||||||
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
|
|
||||||
jq -n '{
|
|
||||||
state: "success",
|
|
||||||
context: env.CONTEXT,
|
|
||||||
description: env.DESCRIPTION,
|
|
||||||
target_job: "${{github.job}} (${{matrix.arch}})",
|
|
||||||
target_step: env.STEP}' \
|
|
||||||
| tee status/code$(
|
|
||||||
echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json
|
|
||||||
done
|
|
||||||
- name: upload-status
|
|
||||||
if: ${{matrix.arch == 'thumb'}}
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: status
|
|
||||||
path: status
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
# run under Valgrind to check for memory errors
|
|
||||||
valgrind:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: install
|
|
||||||
run: |
|
|
||||||
# need toml, also pip3 isn't installed by default?
|
|
||||||
sudo apt-get update -qq
|
|
||||||
sudo apt-get install -qq python3 python3-pip
|
|
||||||
sudo pip3 install toml
|
|
||||||
- name: install-valgrind
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -qq
|
|
||||||
sudo apt-get install -qq valgrind
|
|
||||||
valgrind --version
|
|
||||||
# normal tests, we don't need to test all geometries
|
|
||||||
- name: test-valgrind
|
|
||||||
run: make test TESTFLAGS+="-k --valgrind"
|
|
||||||
|
|
||||||
# self-host with littlefs-fuse for a fuzz-like test
|
|
||||||
fuse:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
if: ${{!endsWith(github.ref, '-prefix')}}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: install
|
|
||||||
run: |
|
|
||||||
# need toml, also pip3 isn't installed by default?
|
|
||||||
sudo apt-get update -qq
|
|
||||||
sudo apt-get install -qq python3 python3-pip libfuse-dev
|
|
||||||
sudo pip3 install toml
|
|
||||||
fusermount -V
|
|
||||||
gcc --version
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
repository: littlefs-project/littlefs-fuse
|
|
||||||
ref: v2
|
|
||||||
path: littlefs-fuse
|
|
||||||
- name: setup
|
|
||||||
run: |
|
|
||||||
# copy our new version into littlefs-fuse
|
|
||||||
rm -rf littlefs-fuse/littlefs/*
|
|
||||||
cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
|
||||||
|
|
||||||
# setup disk for littlefs-fuse
|
|
||||||
mkdir mount
|
|
||||||
sudo chmod a+rw /dev/loop0
|
|
||||||
dd if=/dev/zero bs=512 count=128K of=disk
|
|
||||||
losetup /dev/loop0 disk
|
|
||||||
- name: test
|
|
||||||
run: |
|
|
||||||
# self-host test
|
|
||||||
make -C littlefs-fuse
|
|
||||||
|
|
||||||
littlefs-fuse/lfs --format /dev/loop0
|
|
||||||
littlefs-fuse/lfs /dev/loop0 mount
|
|
||||||
|
|
||||||
ls mount
|
|
||||||
mkdir mount/littlefs
|
|
||||||
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
|
||||||
cd mount/littlefs
|
|
||||||
stat .
|
|
||||||
ls -flh
|
|
||||||
make -B test
|
|
||||||
|
|
||||||
# test migration using littlefs-fuse
|
|
||||||
migrate:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
if: ${{!endsWith(github.ref, '-prefix')}}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: install
|
|
||||||
run: |
|
|
||||||
# need toml, also pip3 isn't installed by default?
|
|
||||||
sudo apt-get update -qq
|
|
||||||
sudo apt-get install -qq python3 python3-pip libfuse-dev
|
|
||||||
sudo pip3 install toml
|
|
||||||
fusermount -V
|
|
||||||
gcc --version
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
repository: littlefs-project/littlefs-fuse
|
|
||||||
ref: v2
|
|
||||||
path: v2
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
repository: littlefs-project/littlefs-fuse
|
|
||||||
ref: v1
|
|
||||||
path: v1
|
|
||||||
- name: setup
|
|
||||||
run: |
|
|
||||||
# copy our new version into littlefs-fuse
|
|
||||||
rm -rf v2/littlefs/*
|
|
||||||
cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
|
||||||
|
|
||||||
# setup disk for littlefs-fuse
|
|
||||||
mkdir mount
|
|
||||||
sudo chmod a+rw /dev/loop0
|
|
||||||
dd if=/dev/zero bs=512 count=128K of=disk
|
|
||||||
losetup /dev/loop0 disk
|
|
||||||
- name: test
|
|
||||||
run: |
|
|
||||||
# compile v1 and v2
|
|
||||||
make -C v1
|
|
||||||
make -C v2
|
|
||||||
|
|
||||||
# run self-host test with v1
|
|
||||||
v1/lfs --format /dev/loop0
|
|
||||||
v1/lfs /dev/loop0 mount
|
|
||||||
|
|
||||||
ls mount
|
|
||||||
mkdir mount/littlefs
|
|
||||||
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
|
||||||
cd mount/littlefs
|
|
||||||
stat .
|
|
||||||
ls -flh
|
|
||||||
make -B test
|
|
||||||
|
|
||||||
# attempt to migrate
|
|
||||||
cd ../..
|
|
||||||
fusermount -u mount
|
|
||||||
|
|
||||||
v2/lfs --migrate /dev/loop0
|
|
||||||
v2/lfs /dev/loop0 mount
|
|
||||||
|
|
||||||
# run self-host test with v2 right where we left off
|
|
||||||
ls mount
|
|
||||||
cd mount/littlefs
|
|
||||||
stat .
|
|
||||||
ls -flh
|
|
||||||
make -B test
|
|
||||||
|
|
||||||
# collect coverage info
|
|
||||||
coverage:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
needs: [test]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: install
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -qq
|
|
||||||
sudo apt-get install -qq python3 python3-pip lcov
|
|
||||||
sudo pip3 install toml
|
|
||||||
# yes we continue-on-error nearly every step, continue-on-error
|
|
||||||
# at job level apparently still marks a job as failed, which isn't
|
|
||||||
# what we want
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
continue-on-error: true
|
|
||||||
with:
|
|
||||||
name: coverage
|
|
||||||
path: coverage
|
|
||||||
- name: results-coverage
|
|
||||||
continue-on-error: true
|
|
||||||
run: |
|
|
||||||
mkdir -p results
|
|
||||||
lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
|
|
||||||
-o results/coverage.info
|
|
||||||
./scripts/coverage.py results/coverage.info -o results/coverage.csv
|
|
||||||
- name: upload-results
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: results
|
|
||||||
path: results
|
|
||||||
- name: collect-status
|
|
||||||
run: |
|
|
||||||
mkdir -p status
|
|
||||||
[ -e results/coverage.csv ] || exit 0
|
|
||||||
export STEP="results-coverage"
|
|
||||||
export CONTEXT="results / coverage"
|
|
||||||
export PREV="$(curl -sS \
|
|
||||||
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
|
|
||||||
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
|
|
||||||
| select(.context == env.CONTEXT).description
|
|
||||||
| capture("Coverage is (?<result>[0-9\\.]+)").result' \
|
|
||||||
|| echo 0)"
|
|
||||||
export DESCRIPTION="$(
|
|
||||||
./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
|
|
||||||
NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3}
|
|
||||||
NR==2 && ENVIRON["PREV"]+0 != 0 {
|
|
||||||
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
|
|
||||||
jq -n '{
|
|
||||||
state: "success",
|
|
||||||
context: env.CONTEXT,
|
|
||||||
description: env.DESCRIPTION,
|
|
||||||
target_job: "${{github.job}}",
|
|
||||||
target_step: env.STEP}' \
|
|
||||||
| tee status/coverage.json
|
|
||||||
- name: upload-status
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: status
|
|
||||||
path: status
|
|
||||||
retention-days: 1
|
|
||||||
461
.travis.yml
Normal file
461
.travis.yml
Normal file
@@ -0,0 +1,461 @@
|
|||||||
|
# environment variables
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
- CFLAGS=-Werror
|
||||||
|
- MAKEFLAGS=-j
|
||||||
|
|
||||||
|
# cache installation dirs
|
||||||
|
cache:
|
||||||
|
pip: true
|
||||||
|
directories:
|
||||||
|
- $HOME/.cache/apt
|
||||||
|
|
||||||
|
# common installation
|
||||||
|
_: &install-common
|
||||||
|
# need toml, also pip3 isn't installed by default?
|
||||||
|
- sudo apt-get install python3 python3-pip
|
||||||
|
- sudo pip3 install toml
|
||||||
|
# setup a ram-backed disk to speed up reentrant tests
|
||||||
|
- mkdir disks
|
||||||
|
- sudo mount -t tmpfs -o size=100m tmpfs disks
|
||||||
|
- export TFLAGS="$TFLAGS --disk=disks/disk"
|
||||||
|
|
||||||
|
# test cases
|
||||||
|
_: &test-example
|
||||||
|
# make sure example can at least compile
|
||||||
|
- sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c &&
|
||||||
|
make all CFLAGS+="
|
||||||
|
-Duser_provided_block_device_read=NULL
|
||||||
|
-Duser_provided_block_device_prog=NULL
|
||||||
|
-Duser_provided_block_device_erase=NULL
|
||||||
|
-Duser_provided_block_device_sync=NULL
|
||||||
|
-include stdio.h"
|
||||||
|
# default tests
|
||||||
|
_: &test-default
|
||||||
|
# normal+reentrant tests
|
||||||
|
- make test TFLAGS+="-nrk"
|
||||||
|
# common real-life geometries
|
||||||
|
_: &test-nor
|
||||||
|
# NOR flash: read/prog = 1 block = 4KiB
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
|
||||||
|
_: &test-emmc
|
||||||
|
# eMMC: read/prog = 512 block = 512
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
|
||||||
|
_: &test-nand
|
||||||
|
# NAND flash: read/prog = 4KiB block = 32KiB
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
|
||||||
|
# other extreme geometries that are useful for testing various corner cases
|
||||||
|
_: &test-no-intrinsics
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_NO_INTRINSICS"
|
||||||
|
_: &test-no-inline
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0"
|
||||||
|
_: &test-byte-writes
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
|
||||||
|
_: &test-block-cycles
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1"
|
||||||
|
_: &test-odd-block-count
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
|
||||||
|
_: &test-odd-block-size
|
||||||
|
- make test TFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
|
||||||
|
|
||||||
|
# report size
|
||||||
|
_: &report-size
|
||||||
|
# compile and find the code size with the smallest configuration
|
||||||
|
- make -j1 clean size
|
||||||
|
OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')"
|
||||||
|
CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR"
|
||||||
|
| tee sizes
|
||||||
|
# update status if we succeeded, compare with master if possible
|
||||||
|
- |
|
||||||
|
if [ "$TRAVIS_TEST_RESULT" -eq 0 ]
|
||||||
|
then
|
||||||
|
CURR=$(tail -n1 sizes | awk '{print $1}')
|
||||||
|
PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \
|
||||||
|
| jq -re "select(.sha != \"$TRAVIS_COMMIT\")
|
||||||
|
| .statuses[] | select(.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\").description
|
||||||
|
| capture(\"code size is (?<size>[0-9]+)\").size" \
|
||||||
|
|| echo 0)
|
||||||
|
|
||||||
|
STATUS="Passed, code size is ${CURR}B"
|
||||||
|
if [ "$PREV" -ne 0 ]
|
||||||
|
then
|
||||||
|
STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# stage control
|
||||||
|
stages:
|
||||||
|
- name: test
|
||||||
|
- name: deploy
|
||||||
|
if: branch = master AND type = push
|
||||||
|
|
||||||
|
# job control
|
||||||
|
jobs:
|
||||||
|
# native testing
|
||||||
|
- &x86
|
||||||
|
stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-x86
|
||||||
|
install: *install-common
|
||||||
|
script: [*test-example, *report-size]
|
||||||
|
- {<<: *x86, script: [*test-default, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-nor, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-emmc, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-nand, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-no-intrinsics, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-no-inline, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-byte-writes, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-block-cycles, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-odd-block-count, *report-size]}
|
||||||
|
- {<<: *x86, script: [*test-odd-block-size, *report-size]}
|
||||||
|
|
||||||
|
# cross-compile with ARM (thumb mode)
|
||||||
|
- &arm
|
||||||
|
stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-arm
|
||||||
|
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||||
|
- TFLAGS="$TFLAGS --exec=qemu-arm"
|
||||||
|
install:
|
||||||
|
- *install-common
|
||||||
|
- sudo apt-get install
|
||||||
|
gcc-arm-linux-gnueabi
|
||||||
|
libc6-dev-armel-cross
|
||||||
|
qemu-user
|
||||||
|
- arm-linux-gnueabi-gcc --version
|
||||||
|
- qemu-arm -version
|
||||||
|
script: [*test-example, *report-size]
|
||||||
|
- {<<: *arm, script: [*test-default, *report-size]}
|
||||||
|
- {<<: *arm, script: [*test-nor, *report-size]}
|
||||||
|
- {<<: *arm, script: [*test-emmc, *report-size]}
|
||||||
|
- {<<: *arm, script: [*test-nand, *report-size]}
|
||||||
|
- {<<: *arm, script: [*test-no-intrinsics, *report-size]}
|
||||||
|
- {<<: *arm, script: [*test-no-inline, *report-size]}
|
||||||
|
# it just takes way to long to run byte-level writes in qemu,
|
||||||
|
# note this is still tested in the native tests
|
||||||
|
#- {<<: *arm, script: [*test-byte-writes, *report-size]}
|
||||||
|
- {<<: *arm, script: [*test-block-cycles, *report-size]}
|
||||||
|
- {<<: *arm, script: [*test-odd-block-count, *report-size]}
|
||||||
|
- {<<: *arm, script: [*test-odd-block-size, *report-size]}
|
||||||
|
|
||||||
|
# cross-compile with MIPS
|
||||||
|
- &mips
|
||||||
|
stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-mips
|
||||||
|
- CC="mips-linux-gnu-gcc --static"
|
||||||
|
- TFLAGS="$TFLAGS --exec=qemu-mips"
|
||||||
|
install:
|
||||||
|
- *install-common
|
||||||
|
- sudo apt-get install
|
||||||
|
gcc-mips-linux-gnu
|
||||||
|
libc6-dev-mips-cross
|
||||||
|
qemu-user
|
||||||
|
- mips-linux-gnu-gcc --version
|
||||||
|
- qemu-mips -version
|
||||||
|
script: [*test-example, *report-size]
|
||||||
|
- {<<: *mips, script: [*test-default, *report-size]}
|
||||||
|
- {<<: *mips, script: [*test-nor, *report-size]}
|
||||||
|
- {<<: *mips, script: [*test-emmc, *report-size]}
|
||||||
|
- {<<: *mips, script: [*test-nand, *report-size]}
|
||||||
|
- {<<: *mips, script: [*test-no-intrinsics, *report-size]}
|
||||||
|
- {<<: *mips, script: [*test-no-inline, *report-size]}
|
||||||
|
# it just takes way to long to run byte-level writes in qemu,
|
||||||
|
# note this is still tested in the native tests
|
||||||
|
#- {<<: *mips, script: [*test-byte-writes, *report-size]}
|
||||||
|
- {<<: *mips, script: [*test-block-cycles, *report-size]}
|
||||||
|
- {<<: *mips, script: [*test-odd-block-count, *report-size]}
|
||||||
|
- {<<: *mips, script: [*test-odd-block-size, *report-size]}
|
||||||
|
|
||||||
|
# cross-compile with PowerPC
|
||||||
|
- &powerpc
|
||||||
|
stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-powerpc
|
||||||
|
- CC="powerpc-linux-gnu-gcc --static"
|
||||||
|
- TFLAGS="$TFLAGS --exec=qemu-ppc"
|
||||||
|
install:
|
||||||
|
- *install-common
|
||||||
|
- sudo apt-get install
|
||||||
|
gcc-powerpc-linux-gnu
|
||||||
|
libc6-dev-powerpc-cross
|
||||||
|
qemu-user
|
||||||
|
- powerpc-linux-gnu-gcc --version
|
||||||
|
- qemu-ppc -version
|
||||||
|
script: [*test-example, *report-size]
|
||||||
|
- {<<: *powerpc, script: [*test-default, *report-size]}
|
||||||
|
- {<<: *powerpc, script: [*test-nor, *report-size]}
|
||||||
|
- {<<: *powerpc, script: [*test-emmc, *report-size]}
|
||||||
|
- {<<: *powerpc, script: [*test-nand, *report-size]}
|
||||||
|
- {<<: *powerpc, script: [*test-no-intrinsics, *report-size]}
|
||||||
|
- {<<: *powerpc, script: [*test-no-inline, *report-size]}
|
||||||
|
# it just takes way to long to run byte-level writes in qemu,
|
||||||
|
# note this is still tested in the native tests
|
||||||
|
#- {<<: *powerpc, script: [*test-byte-writes, *report-size]}
|
||||||
|
- {<<: *powerpc, script: [*test-block-cycles, *report-size]}
|
||||||
|
- {<<: *powerpc, script: [*test-odd-block-count, *report-size]}
|
||||||
|
- {<<: *powerpc, script: [*test-odd-block-size, *report-size]}
|
||||||
|
|
||||||
|
# test under valgrind, checking for memory errors
|
||||||
|
- &valgrind
|
||||||
|
stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-valgrind
|
||||||
|
install:
|
||||||
|
- *install-common
|
||||||
|
- sudo apt-get install valgrind
|
||||||
|
- valgrind --version
|
||||||
|
script:
|
||||||
|
- make test TFLAGS+="-k --valgrind"
|
||||||
|
|
||||||
|
# test compilation in read-only mode
|
||||||
|
- stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-readonly
|
||||||
|
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||||
|
- CFLAGS="-Werror -DLFS_READONLY"
|
||||||
|
if: branch !~ -prefix$
|
||||||
|
install:
|
||||||
|
- *install-common
|
||||||
|
- sudo apt-get install
|
||||||
|
gcc-arm-linux-gnueabi
|
||||||
|
libc6-dev-armel-cross
|
||||||
|
- arm-linux-gnueabi-gcc --version
|
||||||
|
# report-size will compile littlefs and report the size
|
||||||
|
script: [*report-size]
|
||||||
|
|
||||||
|
# test compilation in thread-safe mode
|
||||||
|
- stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-threadsafe
|
||||||
|
- CC="arm-linux-gnueabi-gcc --static -mthumb"
|
||||||
|
- CFLAGS="-Werror -DLFS_THREADSAFE"
|
||||||
|
if: branch !~ -prefix$
|
||||||
|
install:
|
||||||
|
- *install-common
|
||||||
|
- sudo apt-get install
|
||||||
|
gcc-arm-linux-gnueabi
|
||||||
|
libc6-dev-armel-cross
|
||||||
|
- arm-linux-gnueabi-gcc --version
|
||||||
|
# report-size will compile littlefs and report the size
|
||||||
|
script: [*report-size]
|
||||||
|
|
||||||
|
# self-host with littlefs-fuse for fuzz test
|
||||||
|
- stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-fuse
|
||||||
|
if: branch !~ -prefix$
|
||||||
|
install:
|
||||||
|
- *install-common
|
||||||
|
- sudo apt-get install libfuse-dev
|
||||||
|
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2
|
||||||
|
- fusermount -V
|
||||||
|
- gcc --version
|
||||||
|
|
||||||
|
# setup disk for littlefs-fuse
|
||||||
|
- rm -rf littlefs-fuse/littlefs/*
|
||||||
|
- cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
|
||||||
|
|
||||||
|
- mkdir mount
|
||||||
|
- sudo chmod a+rw /dev/loop0
|
||||||
|
- dd if=/dev/zero bs=512 count=128K of=disk
|
||||||
|
- losetup /dev/loop0 disk
|
||||||
|
script:
|
||||||
|
# self-host test
|
||||||
|
- make -C littlefs-fuse
|
||||||
|
|
||||||
|
- littlefs-fuse/lfs --format /dev/loop0
|
||||||
|
- littlefs-fuse/lfs /dev/loop0 mount
|
||||||
|
|
||||||
|
- ls mount
|
||||||
|
- mkdir mount/littlefs
|
||||||
|
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||||
|
- cd mount/littlefs
|
||||||
|
- stat .
|
||||||
|
- ls -flh
|
||||||
|
- make -B test
|
||||||
|
|
||||||
|
# test migration using littlefs-fuse
|
||||||
|
- stage: test
|
||||||
|
env:
|
||||||
|
- NAME=littlefs-migration
|
||||||
|
if: branch !~ -prefix$
|
||||||
|
install:
|
||||||
|
- *install-common
|
||||||
|
- sudo apt-get install libfuse-dev
|
||||||
|
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2
|
||||||
|
- git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1
|
||||||
|
- fusermount -V
|
||||||
|
- gcc --version
|
||||||
|
|
||||||
|
# setup disk for littlefs-fuse
|
||||||
|
- rm -rf v2/littlefs/*
|
||||||
|
- cp -r $(git ls-tree --name-only HEAD) v2/littlefs
|
||||||
|
|
||||||
|
- mkdir mount
|
||||||
|
- sudo chmod a+rw /dev/loop0
|
||||||
|
- dd if=/dev/zero bs=512 count=128K of=disk
|
||||||
|
- losetup /dev/loop0 disk
|
||||||
|
script:
|
||||||
|
# compile v1 and v2
|
||||||
|
- make -C v1
|
||||||
|
- make -C v2
|
||||||
|
|
||||||
|
# run self-host test with v1
|
||||||
|
- v1/lfs --format /dev/loop0
|
||||||
|
- v1/lfs /dev/loop0 mount
|
||||||
|
|
||||||
|
- ls mount
|
||||||
|
- mkdir mount/littlefs
|
||||||
|
- cp -r $(git ls-tree --name-only HEAD) mount/littlefs
|
||||||
|
- cd mount/littlefs
|
||||||
|
- stat .
|
||||||
|
- ls -flh
|
||||||
|
- make -B test
|
||||||
|
|
||||||
|
# attempt to migrate
|
||||||
|
- cd ../..
|
||||||
|
- fusermount -u mount
|
||||||
|
|
||||||
|
- v2/lfs --migrate /dev/loop0
|
||||||
|
- v2/lfs /dev/loop0 mount
|
||||||
|
|
||||||
|
# run self-host test with v2 right where we left off
|
||||||
|
- ls mount
|
||||||
|
- cd mount/littlefs
|
||||||
|
- stat .
|
||||||
|
- ls -flh
|
||||||
|
- make -B test
|
||||||
|
|
||||||
|
# automatically create releases
|
||||||
|
- stage: deploy
|
||||||
|
env:
|
||||||
|
- NAME=deploy
|
||||||
|
script:
|
||||||
|
- |
|
||||||
|
bash << 'SCRIPT'
|
||||||
|
set -ev
|
||||||
|
# Find version defined in lfs.h
|
||||||
|
LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
|
||||||
|
LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
|
||||||
|
LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0)))
|
||||||
|
# Grab latests patch from repo tags, default to 0, needs finagling
|
||||||
|
# to get past github's pagination api
|
||||||
|
PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
|
||||||
|
PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
|
||||||
|
| sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
|
||||||
|
|| echo $PREV_URL)
|
||||||
|
LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
|
||||||
|
| jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
|
||||||
|
.captures[].string | tonumber) | max + 1' \
|
||||||
|
|| echo 0)
|
||||||
|
# We have our new version
|
||||||
|
LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
|
||||||
|
echo "VERSION $LFS_VERSION"
|
||||||
|
# Check that we're the most recent commit
|
||||||
|
CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
|
||||||
|
https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
|
||||||
|
| jq -re '.sha')
|
||||||
|
[ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
|
||||||
|
# Create major branch
|
||||||
|
git branch v$LFS_VERSION_MAJOR HEAD
|
||||||
|
# Create major prefix branch
|
||||||
|
git config user.name "geky bot"
|
||||||
|
git config user.email "bot@geky.net"
|
||||||
|
git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
|
||||||
|
--depth=50 v$LFS_VERSION_MAJOR-prefix || true
|
||||||
|
./scripts/prefix.py lfs$LFS_VERSION_MAJOR
|
||||||
|
git branch v$LFS_VERSION_MAJOR-prefix $( \
|
||||||
|
git commit-tree $(git write-tree) \
|
||||||
|
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
|
||||||
|
-p HEAD \
|
||||||
|
-m "Generated v$LFS_VERSION_MAJOR prefixes")
|
||||||
|
git reset --hard
|
||||||
|
# Update major version branches (vN and vN-prefix)
|
||||||
|
git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
|
||||||
|
v$LFS_VERSION_MAJOR \
|
||||||
|
v$LFS_VERSION_MAJOR-prefix
|
||||||
|
# Build release notes
|
||||||
|
PREV=$(git tag --sort=-v:refname -l "v*" | head -1)
|
||||||
|
if [ ! -z "$PREV" ]
|
||||||
|
then
|
||||||
|
echo "PREV $PREV"
|
||||||
|
CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep)
|
||||||
|
printf "CHANGES\n%s\n\n" "$CHANGES"
|
||||||
|
fi
|
||||||
|
case ${GEKY_BOT_DRAFT:-minor} in
|
||||||
|
true) DRAFT=true ;;
|
||||||
|
minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;;
|
||||||
|
false) DRAFT=false ;;
|
||||||
|
esac
|
||||||
|
# Create the release and patch version tag (vN.N.N)
|
||||||
|
curl -f -u "$GEKY_BOT_RELEASES" -X POST \
|
||||||
|
https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
|
||||||
|
-d "{
|
||||||
|
\"tag_name\": \"$LFS_VERSION\",
|
||||||
|
\"name\": \"${LFS_VERSION%.0}\",
|
||||||
|
\"target_commitish\": \"$TRAVIS_COMMIT\",
|
||||||
|
\"draft\": $DRAFT,
|
||||||
|
\"body\": $(jq -sR '.' <<< "$CHANGES")
|
||||||
|
}" #"
|
||||||
|
SCRIPT
|
||||||
|
|
||||||
|
# manage statuses
|
||||||
|
before_install:
|
||||||
|
- |
|
||||||
|
# don't clobber other (not us) failures
|
||||||
|
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||||
|
| jq -e ".statuses[] | select(
|
||||||
|
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||||
|
.state == \"failure\" and
|
||||||
|
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||||
|
then
|
||||||
|
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||||
|
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||||
|
-d "{
|
||||||
|
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||||
|
\"state\": \"pending\",
|
||||||
|
\"description\": \"${STATUS:-In progress}\",
|
||||||
|
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||||
|
}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
after_failure:
|
||||||
|
- |
|
||||||
|
# don't clobber other (not us) failures
|
||||||
|
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||||
|
| jq -e ".statuses[] | select(
|
||||||
|
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||||
|
.state == \"failure\" and
|
||||||
|
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||||
|
then
|
||||||
|
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||||
|
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||||
|
-d "{
|
||||||
|
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||||
|
\"state\": \"failure\",
|
||||||
|
\"description\": \"${STATUS:-Failed}\",
|
||||||
|
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||||
|
}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- |
|
||||||
|
# don't clobber other (not us) failures
|
||||||
|
# only update if we were last job to mark in progress,
|
||||||
|
# this isn't perfect but is probably good enough
|
||||||
|
if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||||
|
| jq -e ".statuses[] | select(
|
||||||
|
.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
|
||||||
|
(.state == \"failure\" or .state == \"pending\") and
|
||||||
|
(.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
|
||||||
|
then
|
||||||
|
curl -u "$GEKY_BOT_STATUSES" -X POST \
|
||||||
|
https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
|
||||||
|
-d "{
|
||||||
|
\"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
|
||||||
|
\"state\": \"success\",
|
||||||
|
\"description\": \"${STATUS:-Passed}\",
|
||||||
|
\"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
|
||||||
|
}"
|
||||||
|
fi
|
||||||
83
Makefile
83
Makefile
@@ -1,39 +1,25 @@
|
|||||||
ifdef BUILDDIR
|
TARGET = lfs.a
|
||||||
# make sure BUILDDIR ends with a slash
|
|
||||||
override BUILDDIR := $(BUILDDIR)/
|
|
||||||
# bit of a hack, but we want to make sure BUILDDIR directory structure
|
|
||||||
# is correct before any commands
|
|
||||||
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
|
|
||||||
$(BUILDDIR) \
|
|
||||||
$(BUILDDIR)bd \
|
|
||||||
$(BUILDDIR)tests))
|
|
||||||
endif
|
|
||||||
|
|
||||||
# overridable target/src/tools/flags/etc
|
|
||||||
ifneq ($(wildcard test.c main.c),)
|
ifneq ($(wildcard test.c main.c),)
|
||||||
TARGET ?= $(BUILDDIR)lfs
|
override TARGET = lfs
|
||||||
else
|
|
||||||
TARGET ?= $(BUILDDIR)lfs.a
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
CC ?= gcc
|
CC ?= gcc
|
||||||
AR ?= ar
|
AR ?= ar
|
||||||
SIZE ?= size
|
SIZE ?= size
|
||||||
CTAGS ?= ctags
|
|
||||||
NM ?= nm
|
|
||||||
LCOV ?= lcov
|
|
||||||
|
|
||||||
SRC ?= $(wildcard *.c)
|
SRC += $(wildcard *.c bd/*.c)
|
||||||
OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
|
OBJ := $(SRC:.c=.o)
|
||||||
DEP := $(SRC:%.c=$(BUILDDIR)%.d)
|
DEP := $(SRC:.c=.d)
|
||||||
ASM := $(SRC:%.c=$(BUILDDIR)%.s)
|
ASM := $(SRC:.c=.s)
|
||||||
|
|
||||||
ifdef DEBUG
|
ifdef DEBUG
|
||||||
override CFLAGS += -O0 -g3
|
override CFLAGS += -O0 -g3
|
||||||
else
|
else
|
||||||
override CFLAGS += -Os
|
override CFLAGS += -Os
|
||||||
endif
|
endif
|
||||||
|
ifdef WORD
|
||||||
|
override CFLAGS += -m$(WORD)
|
||||||
|
endif
|
||||||
ifdef TRACE
|
ifdef TRACE
|
||||||
override CFLAGS += -DLFS_YES_TRACE
|
override CFLAGS += -DLFS_YES_TRACE
|
||||||
endif
|
endif
|
||||||
@@ -42,73 +28,40 @@ override CFLAGS += -std=c99 -Wall -pedantic
|
|||||||
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
|
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
|
||||||
|
|
||||||
ifdef VERBOSE
|
ifdef VERBOSE
|
||||||
override TESTFLAGS += -v
|
override TFLAGS += -v
|
||||||
override CODEFLAGS += -v
|
|
||||||
override COVERAGEFLAGS += -v
|
|
||||||
endif
|
|
||||||
ifdef EXEC
|
|
||||||
override TESTFLAGS += --exec="$(EXEC)"
|
|
||||||
endif
|
|
||||||
ifdef BUILDDIR
|
|
||||||
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
|
|
||||||
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
|
||||||
endif
|
|
||||||
ifneq ($(NM),nm)
|
|
||||||
override CODEFLAGS += --nm-tool="$(NM)"
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
# commands
|
all: $(TARGET)
|
||||||
.PHONY: all build
|
|
||||||
all build: $(TARGET)
|
|
||||||
|
|
||||||
.PHONY: asm
|
|
||||||
asm: $(ASM)
|
asm: $(ASM)
|
||||||
|
|
||||||
.PHONY: size
|
|
||||||
size: $(OBJ)
|
size: $(OBJ)
|
||||||
$(SIZE) -t $^
|
$(SIZE) -t $^
|
||||||
|
|
||||||
.PHONY: tags
|
|
||||||
tags:
|
|
||||||
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
|
|
||||||
|
|
||||||
.PHONY: code
|
|
||||||
code: $(OBJ)
|
|
||||||
./scripts/code.py $^ $(CODEFLAGS)
|
|
||||||
|
|
||||||
.PHONY: test
|
|
||||||
test:
|
test:
|
||||||
./scripts/test.py $(TESTFLAGS)
|
./scripts/test.py $(TFLAGS)
|
||||||
.SECONDEXPANSION:
|
.SECONDEXPANSION:
|
||||||
test%: tests/test$$(firstword $$(subst \#, ,%)).toml
|
test%: tests/test$$(firstword $$(subst \#, ,%)).toml
|
||||||
./scripts/test.py $@ $(TESTFLAGS)
|
./scripts/test.py $@ $(TFLAGS)
|
||||||
|
|
||||||
.PHONY: coverage
|
|
||||||
coverage:
|
|
||||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS)
|
|
||||||
|
|
||||||
# rules
|
|
||||||
-include $(DEP)
|
-include $(DEP)
|
||||||
.SUFFIXES:
|
|
||||||
|
|
||||||
$(BUILDDIR)lfs: $(OBJ)
|
lfs: $(OBJ)
|
||||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||||
|
|
||||||
$(BUILDDIR)%.a: $(OBJ)
|
%.a: $(OBJ)
|
||||||
$(AR) rcs $@ $^
|
$(AR) rcs $@ $^
|
||||||
|
|
||||||
$(BUILDDIR)%.o: %.c
|
%.o: %.c
|
||||||
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
||||||
|
|
||||||
$(BUILDDIR)%.s: %.c
|
%.s: %.c
|
||||||
$(CC) -S $(CFLAGS) $< -o $@
|
$(CC) -S $(CFLAGS) $< -o $@
|
||||||
|
|
||||||
# clean everything
|
|
||||||
.PHONY: clean
|
|
||||||
clean:
|
clean:
|
||||||
rm -f $(TARGET)
|
rm -f $(TARGET)
|
||||||
rm -f $(OBJ)
|
rm -f $(OBJ)
|
||||||
rm -f $(DEP)
|
rm -f $(DEP)
|
||||||
rm -f $(ASM)
|
rm -f $(ASM)
|
||||||
rm -f $(BUILDDIR)tests/*.toml.*
|
rm -f tests/*.toml.*
|
||||||
|
|||||||
132
lfs.c
132
lfs.c
@@ -467,7 +467,7 @@ static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
|
|||||||
static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
|
static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
|
||||||
static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
|
static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
|
||||||
|
|
||||||
static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
|
static void lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
|
||||||
static void lfs_fs_prepmove(lfs_t *lfs,
|
static void lfs_fs_prepmove(lfs_t *lfs,
|
||||||
uint16_t id, const lfs_block_t pair[2]);
|
uint16_t id, const lfs_block_t pair[2]);
|
||||||
static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
|
static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
|
||||||
@@ -1591,8 +1591,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
|||||||
// for metadata updates.
|
// for metadata updates.
|
||||||
if (end - begin < 0xff &&
|
if (end - begin < 0xff &&
|
||||||
size <= lfs_min(lfs->cfg->block_size - 36,
|
size <= lfs_min(lfs->cfg->block_size - 36,
|
||||||
lfs_alignup((lfs->cfg->metadata_max ?
|
lfs_alignup(lfs->cfg->block_size/2,
|
||||||
lfs->cfg->metadata_max : lfs->cfg->block_size)/2,
|
|
||||||
lfs->cfg->prog_size))) {
|
lfs->cfg->prog_size))) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -1677,8 +1676,7 @@ static int lfs_dir_compact(lfs_t *lfs,
|
|||||||
.crc = 0xffffffff,
|
.crc = 0xffffffff,
|
||||||
|
|
||||||
.begin = 0,
|
.begin = 0,
|
||||||
.end = (lfs->cfg->metadata_max ?
|
.end = lfs->cfg->block_size - 8,
|
||||||
lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// erase block to write to
|
// erase block to write to
|
||||||
@@ -1888,8 +1886,7 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
|||||||
.crc = 0xffffffff,
|
.crc = 0xffffffff,
|
||||||
|
|
||||||
.begin = dir->off,
|
.begin = dir->off,
|
||||||
.end = (lfs->cfg->metadata_max ?
|
.end = lfs->cfg->block_size - 8,
|
||||||
lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// traverse attrs that need to be written out
|
// traverse attrs that need to be written out
|
||||||
@@ -2066,10 +2063,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
|
|||||||
// current block end of list?
|
// current block end of list?
|
||||||
if (cwd.m.split) {
|
if (cwd.m.split) {
|
||||||
// update tails, this creates a desync
|
// update tails, this creates a desync
|
||||||
err = lfs_fs_preporphans(lfs, +1);
|
lfs_fs_preporphans(lfs, +1);
|
||||||
if (err) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
// it's possible our predecessor has to be relocated, and if
|
// it's possible our predecessor has to be relocated, and if
|
||||||
// our parent is our predecessor's predecessor, this could have
|
// our parent is our predecessor's predecessor, this could have
|
||||||
@@ -2089,10 +2083,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lfs->mlist = cwd.next;
|
lfs->mlist = cwd.next;
|
||||||
err = lfs_fs_preporphans(lfs, -1);
|
lfs_fs_preporphans(lfs, -1);
|
||||||
if (err) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// now insert into our parent block
|
// now insert into our parent block
|
||||||
@@ -2977,9 +2968,7 @@ static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
|
|||||||
if ((file->flags & LFS_F_INLINE) &&
|
if ((file->flags & LFS_F_INLINE) &&
|
||||||
lfs_max(file->pos+nsize, file->ctz.size) >
|
lfs_max(file->pos+nsize, file->ctz.size) >
|
||||||
lfs_min(0x3fe, lfs_min(
|
lfs_min(0x3fe, lfs_min(
|
||||||
lfs->cfg->cache_size,
|
lfs->cfg->cache_size, lfs->cfg->block_size/8))) {
|
||||||
(lfs->cfg->metadata_max ?
|
|
||||||
lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
|
|
||||||
// inline file doesn't fit anymore
|
// inline file doesn't fit anymore
|
||||||
int err = lfs_file_outline(lfs, file);
|
int err = lfs_file_outline(lfs, file);
|
||||||
if (err) {
|
if (err) {
|
||||||
@@ -3061,26 +3050,6 @@ relocate:
|
|||||||
|
|
||||||
static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
|
static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
|
||||||
lfs_soff_t off, int whence) {
|
lfs_soff_t off, int whence) {
|
||||||
// find new pos
|
|
||||||
lfs_off_t npos = file->pos;
|
|
||||||
if (whence == LFS_SEEK_SET) {
|
|
||||||
npos = off;
|
|
||||||
} else if (whence == LFS_SEEK_CUR) {
|
|
||||||
npos = file->pos + off;
|
|
||||||
} else if (whence == LFS_SEEK_END) {
|
|
||||||
npos = lfs_file_rawsize(lfs, file) + off;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (npos > lfs->file_max) {
|
|
||||||
// file position out of range
|
|
||||||
return LFS_ERR_INVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (file->pos == npos) {
|
|
||||||
// noop - position has not changed
|
|
||||||
return npos;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef LFS_READONLY
|
#ifndef LFS_READONLY
|
||||||
// write out everything beforehand, may be noop if rdonly
|
// write out everything beforehand, may be noop if rdonly
|
||||||
int err = lfs_file_flush(lfs, file);
|
int err = lfs_file_flush(lfs, file);
|
||||||
@@ -3089,6 +3058,21 @@ static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// find new pos
|
||||||
|
lfs_off_t npos = file->pos;
|
||||||
|
if (whence == LFS_SEEK_SET) {
|
||||||
|
npos = off;
|
||||||
|
} else if (whence == LFS_SEEK_CUR) {
|
||||||
|
npos = file->pos + off;
|
||||||
|
} else if (whence == LFS_SEEK_END) {
|
||||||
|
npos = file->ctz.size + off;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (npos > lfs->file_max) {
|
||||||
|
// file position out of range
|
||||||
|
return LFS_ERR_INVAL;
|
||||||
|
}
|
||||||
|
|
||||||
// update pos
|
// update pos
|
||||||
file->pos = npos;
|
file->pos = npos;
|
||||||
return npos;
|
return npos;
|
||||||
@@ -3119,22 +3103,21 @@ static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
// need to set pos/block/off consistently so seeking back to
|
|
||||||
// the old position does not get confused
|
|
||||||
file->pos = size;
|
|
||||||
file->ctz.head = file->block;
|
file->ctz.head = file->block;
|
||||||
file->ctz.size = size;
|
file->ctz.size = size;
|
||||||
file->flags |= LFS_F_DIRTY | LFS_F_READING;
|
file->flags |= LFS_F_DIRTY | LFS_F_READING;
|
||||||
} else if (size > oldsize) {
|
} else if (size > oldsize) {
|
||||||
// flush+seek if not already at end
|
// flush+seek if not already at end
|
||||||
lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
|
if (file->pos != oldsize) {
|
||||||
if (res < 0) {
|
lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
|
||||||
return (int)res;
|
if (res < 0) {
|
||||||
|
return (int)res;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill with zeros
|
// fill with zeros
|
||||||
while (file->pos < size) {
|
while (file->pos < size) {
|
||||||
res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
|
lfs_ssize_t res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
|
||||||
if (res < 0) {
|
if (res < 0) {
|
||||||
return (int)res;
|
return (int)res;
|
||||||
}
|
}
|
||||||
@@ -3225,10 +3208,7 @@ static int lfs_rawremove(lfs_t *lfs, const char *path) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mark fs as orphaned
|
// mark fs as orphaned
|
||||||
err = lfs_fs_preporphans(lfs, +1);
|
lfs_fs_preporphans(lfs, +1);
|
||||||
if (err) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
// I know it's crazy but yes, dir can be changed by our parent's
|
// I know it's crazy but yes, dir can be changed by our parent's
|
||||||
// commit (if predecessor is child)
|
// commit (if predecessor is child)
|
||||||
@@ -3248,10 +3228,7 @@ static int lfs_rawremove(lfs_t *lfs, const char *path) {
|
|||||||
lfs->mlist = dir.next;
|
lfs->mlist = dir.next;
|
||||||
if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
|
if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
|
||||||
// fix orphan
|
// fix orphan
|
||||||
err = lfs_fs_preporphans(lfs, -1);
|
lfs_fs_preporphans(lfs, -1);
|
||||||
if (err) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
|
err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
|
||||||
if (err) {
|
if (err) {
|
||||||
@@ -3337,10 +3314,7 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mark fs as orphaned
|
// mark fs as orphaned
|
||||||
err = lfs_fs_preporphans(lfs, +1);
|
lfs_fs_preporphans(lfs, +1);
|
||||||
if (err) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
// I know it's crazy but yes, dir can be changed by our parent's
|
// I know it's crazy but yes, dir can be changed by our parent's
|
||||||
// commit (if predecessor is child)
|
// commit (if predecessor is child)
|
||||||
@@ -3383,10 +3357,7 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
|
|||||||
lfs->mlist = prevdir.next;
|
lfs->mlist = prevdir.next;
|
||||||
if (prevtag != LFS_ERR_NOENT && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
|
if (prevtag != LFS_ERR_NOENT && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
|
||||||
// fix orphan
|
// fix orphan
|
||||||
err = lfs_fs_preporphans(lfs, -1);
|
lfs_fs_preporphans(lfs, -1);
|
||||||
if (err) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
|
err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
|
||||||
if (err) {
|
if (err) {
|
||||||
@@ -3567,8 +3538,6 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
|
|||||||
lfs->attr_max = LFS_ATTR_MAX;
|
lfs->attr_max = LFS_ATTR_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
|
|
||||||
|
|
||||||
// setup default state
|
// setup default state
|
||||||
lfs->root[0] = LFS_BLOCK_NULL;
|
lfs->root[0] = LFS_BLOCK_NULL;
|
||||||
lfs->root[1] = LFS_BLOCK_NULL;
|
lfs->root[1] = LFS_BLOCK_NULL;
|
||||||
@@ -3649,16 +3618,16 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
// force compaction to prevent accidentally mounting any
|
// sanity check that fetch works
|
||||||
// older version of littlefs that may live on disk
|
err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
|
||||||
root.erased = false;
|
|
||||||
err = lfs_dir_commit(lfs, &root, NULL, 0);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
// sanity check that fetch works
|
// force compaction to prevent accidentally mounting any
|
||||||
err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
|
// older version of littlefs that may live on disk
|
||||||
|
root.erased = false;
|
||||||
|
err = lfs_dir_commit(lfs, &root, NULL, 0);
|
||||||
if (err) {
|
if (err) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
@@ -3862,7 +3831,7 @@ int lfs_fs_rawtraverse(lfs_t *lfs,
|
|||||||
if (err) {
|
if (err) {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
} else if (includeorphans &&
|
} else if (includeorphans &&
|
||||||
lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
|
lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
err = cb(data, (&ctz.head)[i]);
|
err = cb(data, (&ctz.head)[i]);
|
||||||
@@ -4019,10 +3988,7 @@ static int lfs_fs_relocate(lfs_t *lfs,
|
|||||||
|
|
||||||
if (tag != LFS_ERR_NOENT) {
|
if (tag != LFS_ERR_NOENT) {
|
||||||
// update disk, this creates a desync
|
// update disk, this creates a desync
|
||||||
int err = lfs_fs_preporphans(lfs, +1);
|
lfs_fs_preporphans(lfs, +1);
|
||||||
if (err) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
// fix pending move in this pair? this looks like an optimization but
|
// fix pending move in this pair? this looks like an optimization but
|
||||||
// is in fact _required_ since relocating may outdate the move.
|
// is in fact _required_ since relocating may outdate the move.
|
||||||
@@ -4039,7 +4005,7 @@ static int lfs_fs_relocate(lfs_t *lfs,
|
|||||||
}
|
}
|
||||||
|
|
||||||
lfs_pair_tole32(newpair);
|
lfs_pair_tole32(newpair);
|
||||||
err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
|
int err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
|
||||||
{LFS_MKTAG_IF(moveid != 0x3ff,
|
{LFS_MKTAG_IF(moveid != 0x3ff,
|
||||||
LFS_TYPE_DELETE, moveid, 0), NULL},
|
LFS_TYPE_DELETE, moveid, 0), NULL},
|
||||||
{tag, newpair}));
|
{tag, newpair}));
|
||||||
@@ -4049,10 +4015,7 @@ static int lfs_fs_relocate(lfs_t *lfs,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// next step, clean up orphans
|
// next step, clean up orphans
|
||||||
err = lfs_fs_preporphans(lfs, -1);
|
lfs_fs_preporphans(lfs, -1);
|
||||||
if (err) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// find pred
|
// find pred
|
||||||
@@ -4091,13 +4054,11 @@ static int lfs_fs_relocate(lfs_t *lfs,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef LFS_READONLY
|
#ifndef LFS_READONLY
|
||||||
static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
|
static void lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
|
||||||
LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
|
LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
|
||||||
lfs->gstate.tag += orphans;
|
lfs->gstate.tag += orphans;
|
||||||
lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
|
lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
|
||||||
((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
|
((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -4214,7 +4175,8 @@ static int lfs_fs_deorphan(lfs_t *lfs) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mark orphans as fixed
|
// mark orphans as fixed
|
||||||
return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
|
lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -4763,7 +4725,7 @@ static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
|
|||||||
|
|
||||||
lfs1_entry_tole32(&entry1.d);
|
lfs1_entry_tole32(&entry1.d);
|
||||||
err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
|
err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
|
||||||
{LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
|
{LFS_MKTAG(LFS_TYPE_CREATE, id, 0)},
|
||||||
{LFS_MKTAG_IF_ELSE(isdir,
|
{LFS_MKTAG_IF_ELSE(isdir,
|
||||||
LFS_TYPE_DIR, id, entry1.d.nlen,
|
LFS_TYPE_DIR, id, entry1.d.nlen,
|
||||||
LFS_TYPE_REG, id, entry1.d.nlen),
|
LFS_TYPE_REG, id, entry1.d.nlen),
|
||||||
@@ -4868,7 +4830,7 @@ static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
|
|||||||
|
|
||||||
lfs_superblock_tole32(&superblock);
|
lfs_superblock_tole32(&superblock);
|
||||||
err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
|
err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
|
||||||
{LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
|
{LFS_MKTAG(LFS_TYPE_CREATE, 0, 0)},
|
||||||
{LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
|
{LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
|
||||||
{LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
|
{LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
|
||||||
&superblock}));
|
&superblock}));
|
||||||
|
|||||||
10
lfs.h
10
lfs.h
@@ -22,7 +22,7 @@ extern "C"
|
|||||||
// Software library version
|
// Software library version
|
||||||
// Major (top-nibble), incremented on backwards incompatible changes
|
// Major (top-nibble), incremented on backwards incompatible changes
|
||||||
// Minor (bottom-nibble), incremented on feature additions
|
// Minor (bottom-nibble), incremented on feature additions
|
||||||
#define LFS_VERSION 0x00020004
|
#define LFS_VERSION 0x00020003
|
||||||
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
|
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
|
||||||
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
|
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
|
||||||
|
|
||||||
@@ -207,7 +207,7 @@ struct lfs_config {
|
|||||||
// Number of erasable blocks on the device.
|
// Number of erasable blocks on the device.
|
||||||
lfs_size_t block_count;
|
lfs_size_t block_count;
|
||||||
|
|
||||||
// Number of erase cycles before littlefs evicts metadata logs and moves
|
// Number of erase cycles before littlefs evicts metadata logs and moves
|
||||||
// the metadata to another block. Suggested values are in the
|
// the metadata to another block. Suggested values are in the
|
||||||
// range 100-1000, with large values having better performance at the cost
|
// range 100-1000, with large values having better performance at the cost
|
||||||
// of less consistent wear distribution.
|
// of less consistent wear distribution.
|
||||||
@@ -256,12 +256,6 @@ struct lfs_config {
|
|||||||
// larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
|
// larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
|
||||||
// LFS_ATTR_MAX when zero.
|
// LFS_ATTR_MAX when zero.
|
||||||
lfs_size_t attr_max;
|
lfs_size_t attr_max;
|
||||||
|
|
||||||
// Optional upper limit on total space given to metadata pairs in bytes. On
|
|
||||||
// devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB)
|
|
||||||
// can help bound the metadata compaction time. Must be <= block_size.
|
|
||||||
// Defaults to block_size when zero.
|
|
||||||
lfs_size_t metadata_max;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// File info structure
|
// File info structure
|
||||||
|
|||||||
10
lfs_util.h
10
lfs_util.h
@@ -49,7 +49,6 @@ extern "C"
|
|||||||
// code footprint
|
// code footprint
|
||||||
|
|
||||||
// Logging functions
|
// Logging functions
|
||||||
#ifndef LFS_TRACE
|
|
||||||
#ifdef LFS_YES_TRACE
|
#ifdef LFS_YES_TRACE
|
||||||
#define LFS_TRACE_(fmt, ...) \
|
#define LFS_TRACE_(fmt, ...) \
|
||||||
printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||||
@@ -57,9 +56,7 @@ extern "C"
|
|||||||
#else
|
#else
|
||||||
#define LFS_TRACE(...)
|
#define LFS_TRACE(...)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef LFS_DEBUG
|
|
||||||
#ifndef LFS_NO_DEBUG
|
#ifndef LFS_NO_DEBUG
|
||||||
#define LFS_DEBUG_(fmt, ...) \
|
#define LFS_DEBUG_(fmt, ...) \
|
||||||
printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||||
@@ -67,9 +64,7 @@ extern "C"
|
|||||||
#else
|
#else
|
||||||
#define LFS_DEBUG(...)
|
#define LFS_DEBUG(...)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef LFS_WARN
|
|
||||||
#ifndef LFS_NO_WARN
|
#ifndef LFS_NO_WARN
|
||||||
#define LFS_WARN_(fmt, ...) \
|
#define LFS_WARN_(fmt, ...) \
|
||||||
printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||||
@@ -77,9 +72,7 @@ extern "C"
|
|||||||
#else
|
#else
|
||||||
#define LFS_WARN(...)
|
#define LFS_WARN(...)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef LFS_ERROR
|
|
||||||
#ifndef LFS_NO_ERROR
|
#ifndef LFS_NO_ERROR
|
||||||
#define LFS_ERROR_(fmt, ...) \
|
#define LFS_ERROR_(fmt, ...) \
|
||||||
printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||||
@@ -87,16 +80,13 @@ extern "C"
|
|||||||
#else
|
#else
|
||||||
#define LFS_ERROR(...)
|
#define LFS_ERROR(...)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
// Runtime assertions
|
// Runtime assertions
|
||||||
#ifndef LFS_ASSERT
|
|
||||||
#ifndef LFS_NO_ASSERT
|
#ifndef LFS_NO_ASSERT
|
||||||
#define LFS_ASSERT(test) assert(test)
|
#define LFS_ASSERT(test) assert(test)
|
||||||
#else
|
#else
|
||||||
#define LFS_ASSERT(test)
|
#define LFS_ASSERT(test)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
// Builtin functions, these may be replaced by more efficient
|
// Builtin functions, these may be replaced by more efficient
|
||||||
|
|||||||
214
scripts/code.py
214
scripts/code.py
@@ -1,214 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
#
|
|
||||||
# Script to find code size at the function level. Basically just a bit wrapper
|
|
||||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
|
||||||
# by Linux's Bloat-O-Meter.
|
|
||||||
#
|
|
||||||
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
import itertools as it
|
|
||||||
import subprocess as sp
|
|
||||||
import shlex
|
|
||||||
import re
|
|
||||||
import csv
|
|
||||||
import collections as co
|
|
||||||
|
|
||||||
|
|
||||||
OBJ_PATHS = ['*.o', 'bd/*.o']
|
|
||||||
|
|
||||||
def collect(paths, **args):
|
|
||||||
results = co.defaultdict(lambda: 0)
|
|
||||||
pattern = re.compile(
|
|
||||||
'^(?P<size>[0-9a-fA-F]+)' +
|
|
||||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
|
||||||
' (?P<func>.+?)$')
|
|
||||||
for path in paths:
|
|
||||||
# note nm-tool may contain extra args
|
|
||||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
|
||||||
if args.get('verbose'):
|
|
||||||
print(' '.join(shlex.quote(c) for c in cmd))
|
|
||||||
proc = sp.Popen(cmd,
|
|
||||||
stdout=sp.PIPE,
|
|
||||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
|
||||||
universal_newlines=True)
|
|
||||||
for line in proc.stdout:
|
|
||||||
m = pattern.match(line)
|
|
||||||
if m:
|
|
||||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
|
||||||
proc.wait()
|
|
||||||
if proc.returncode != 0:
|
|
||||||
if not args.get('verbose'):
|
|
||||||
for line in proc.stderr:
|
|
||||||
sys.stdout.write(line)
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
flat_results = []
|
|
||||||
for (file, func), size in results.items():
|
|
||||||
# map to source files
|
|
||||||
if args.get('build_dir'):
|
|
||||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
|
||||||
# discard internal functions
|
|
||||||
if func.startswith('__'):
|
|
||||||
continue
|
|
||||||
# discard .8449 suffixes created by optimizer
|
|
||||||
func = re.sub('\.[0-9]+', '', func)
|
|
||||||
flat_results.append((file, func, size))
|
|
||||||
|
|
||||||
return flat_results
|
|
||||||
|
|
||||||
def main(**args):
|
|
||||||
# find sizes
|
|
||||||
if not args.get('use', None):
|
|
||||||
# find .o files
|
|
||||||
paths = []
|
|
||||||
for path in args['obj_paths']:
|
|
||||||
if os.path.isdir(path):
|
|
||||||
path = path + '/*.o'
|
|
||||||
|
|
||||||
for path in glob.glob(path):
|
|
||||||
paths.append(path)
|
|
||||||
|
|
||||||
if not paths:
|
|
||||||
print('no .obj files found in %r?' % args['obj_paths'])
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
results = collect(paths, **args)
|
|
||||||
else:
|
|
||||||
with open(args['use']) as f:
|
|
||||||
r = csv.DictReader(f)
|
|
||||||
results = [
|
|
||||||
( result['file'],
|
|
||||||
result['function'],
|
|
||||||
int(result['size']))
|
|
||||||
for result in r]
|
|
||||||
|
|
||||||
total = 0
|
|
||||||
for _, _, size in results:
|
|
||||||
total += size
|
|
||||||
|
|
||||||
# find previous results?
|
|
||||||
if args.get('diff'):
|
|
||||||
with open(args['diff']) as f:
|
|
||||||
r = csv.DictReader(f)
|
|
||||||
prev_results = [
|
|
||||||
( result['file'],
|
|
||||||
result['function'],
|
|
||||||
int(result['size']))
|
|
||||||
for result in r]
|
|
||||||
|
|
||||||
prev_total = 0
|
|
||||||
for _, _, size in prev_results:
|
|
||||||
prev_total += size
|
|
||||||
|
|
||||||
# write results to CSV
|
|
||||||
if args.get('output'):
|
|
||||||
with open(args['output'], 'w') as f:
|
|
||||||
w = csv.writer(f)
|
|
||||||
w.writerow(['file', 'function', 'size'])
|
|
||||||
for file, func, size in sorted(results):
|
|
||||||
w.writerow((file, func, size))
|
|
||||||
|
|
||||||
# print results
|
|
||||||
def dedup_entries(results, by='function'):
|
|
||||||
entries = co.defaultdict(lambda: 0)
|
|
||||||
for file, func, size in results:
|
|
||||||
entry = (file if by == 'file' else func)
|
|
||||||
entries[entry] += size
|
|
||||||
return entries
|
|
||||||
|
|
||||||
def diff_entries(olds, news):
|
|
||||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
|
||||||
for name, new in news.items():
|
|
||||||
diff[name] = (0, new, new, 1.0)
|
|
||||||
for name, old in olds.items():
|
|
||||||
_, new, _, _ = diff[name]
|
|
||||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
|
||||||
return diff
|
|
||||||
|
|
||||||
def print_header(by=''):
|
|
||||||
if not args.get('diff'):
|
|
||||||
print('%-36s %7s' % (by, 'size'))
|
|
||||||
else:
|
|
||||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
|
||||||
|
|
||||||
def print_entries(by='function'):
|
|
||||||
entries = dedup_entries(results, by=by)
|
|
||||||
|
|
||||||
if not args.get('diff'):
|
|
||||||
print_header(by=by)
|
|
||||||
for name, size in sorted(entries.items()):
|
|
||||||
print("%-36s %7d" % (name, size))
|
|
||||||
else:
|
|
||||||
prev_entries = dedup_entries(prev_results, by=by)
|
|
||||||
diff = diff_entries(prev_entries, entries)
|
|
||||||
print_header(by='%s (%d added, %d removed)' % (by,
|
|
||||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
|
||||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
|
||||||
for name, (old, new, diff, ratio) in sorted(diff.items(),
|
|
||||||
key=lambda x: (-x[1][3], x)):
|
|
||||||
if ratio or args.get('all'):
|
|
||||||
print("%-36s %7s %7s %+7d%s" % (name,
|
|
||||||
old or "-",
|
|
||||||
new or "-",
|
|
||||||
diff,
|
|
||||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
|
||||||
|
|
||||||
def print_totals():
|
|
||||||
if not args.get('diff'):
|
|
||||||
print("%-36s %7d" % ('TOTAL', total))
|
|
||||||
else:
|
|
||||||
ratio = (total-prev_total)/prev_total if prev_total else 1.0
|
|
||||||
print("%-36s %7s %7s %+7d%s" % (
|
|
||||||
'TOTAL',
|
|
||||||
prev_total if prev_total else '-',
|
|
||||||
total if total else '-',
|
|
||||||
total-prev_total,
|
|
||||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
|
||||||
|
|
||||||
if args.get('quiet'):
|
|
||||||
pass
|
|
||||||
elif args.get('summary'):
|
|
||||||
print_header()
|
|
||||||
print_totals()
|
|
||||||
elif args.get('files'):
|
|
||||||
print_entries(by='file')
|
|
||||||
print_totals()
|
|
||||||
else:
|
|
||||||
print_entries(by='function')
|
|
||||||
print_totals()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Find code size at the function level.")
|
|
||||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
|
||||||
help="Description of where to find *.o files. May be a directory \
|
|
||||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
|
||||||
parser.add_argument('-v', '--verbose', action='store_true',
|
|
||||||
help="Output commands that run behind the scenes.")
|
|
||||||
parser.add_argument('-o', '--output',
|
|
||||||
help="Specify CSV file to store results.")
|
|
||||||
parser.add_argument('-u', '--use',
|
|
||||||
help="Don't compile and find code sizes, instead use this CSV file.")
|
|
||||||
parser.add_argument('-d', '--diff',
|
|
||||||
help="Specify CSV file to diff code size against.")
|
|
||||||
parser.add_argument('-a', '--all', action='store_true',
|
|
||||||
help="Show all functions, not just the ones that changed.")
|
|
||||||
parser.add_argument('--files', action='store_true',
|
|
||||||
help="Show file-level code sizes. Note this does not include padding! "
|
|
||||||
"So sizes may differ from other tools.")
|
|
||||||
parser.add_argument('-s', '--summary', action='store_true',
|
|
||||||
help="Only show the total code size.")
|
|
||||||
parser.add_argument('-q', '--quiet', action='store_true',
|
|
||||||
help="Don't show anything, useful with -o.")
|
|
||||||
parser.add_argument('--type', default='tTrRdDbB',
|
|
||||||
help="Type of symbols to report, this uses the same single-character "
|
|
||||||
"type-names emitted by nm. Defaults to %(default)r.")
|
|
||||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
|
||||||
help="Path to the nm tool to use.")
|
|
||||||
parser.add_argument('--build-dir',
|
|
||||||
help="Specify the relative build directory. Used to map object files \
|
|
||||||
to the correct source files.")
|
|
||||||
sys.exit(main(**vars(parser.parse_args())))
|
|
||||||
@@ -1,254 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
#
|
|
||||||
# Parse and report coverage info from .info files generated by lcov
|
|
||||||
#
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
import csv
|
|
||||||
import re
|
|
||||||
import collections as co
|
|
||||||
import bisect as b
|
|
||||||
|
|
||||||
|
|
||||||
INFO_PATHS = ['tests/*.toml.info']
|
|
||||||
|
|
||||||
def collect(paths, **args):
|
|
||||||
file = None
|
|
||||||
funcs = []
|
|
||||||
lines = co.defaultdict(lambda: 0)
|
|
||||||
pattern = re.compile(
|
|
||||||
'^(?P<file>SF:/?(?P<file_name>.*))$'
|
|
||||||
'|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$'
|
|
||||||
'|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$')
|
|
||||||
for path in paths:
|
|
||||||
with open(path) as f:
|
|
||||||
for line in f:
|
|
||||||
m = pattern.match(line)
|
|
||||||
if m and m.group('file'):
|
|
||||||
file = m.group('file_name')
|
|
||||||
elif m and file and m.group('func'):
|
|
||||||
funcs.append((file, int(m.group('func_lineno')),
|
|
||||||
m.group('func_name')))
|
|
||||||
elif m and file and m.group('line'):
|
|
||||||
lines[(file, int(m.group('line_lineno')))] += (
|
|
||||||
int(m.group('line_hits')))
|
|
||||||
|
|
||||||
# map line numbers to functions
|
|
||||||
funcs.sort()
|
|
||||||
def func_from_lineno(file, lineno):
|
|
||||||
i = b.bisect(funcs, (file, lineno))
|
|
||||||
if i and funcs[i-1][0] == file:
|
|
||||||
return funcs[i-1][2]
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# reduce to function info
|
|
||||||
reduced_funcs = co.defaultdict(lambda: (0, 0))
|
|
||||||
for (file, line_lineno), line_hits in lines.items():
|
|
||||||
func = func_from_lineno(file, line_lineno)
|
|
||||||
if not func:
|
|
||||||
continue
|
|
||||||
hits, count = reduced_funcs[(file, func)]
|
|
||||||
reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for (file, func), (hits, count) in reduced_funcs.items():
|
|
||||||
# discard internal/testing functions (test_* injected with
|
|
||||||
# internal testing)
|
|
||||||
if func.startswith('__') or func.startswith('test_'):
|
|
||||||
continue
|
|
||||||
# discard .8449 suffixes created by optimizer
|
|
||||||
func = re.sub('\.[0-9]+', '', func)
|
|
||||||
results.append((file, func, hits, count))
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def main(**args):
|
|
||||||
# find coverage
|
|
||||||
if not args.get('use'):
|
|
||||||
# find *.info files
|
|
||||||
paths = []
|
|
||||||
for path in args['info_paths']:
|
|
||||||
if os.path.isdir(path):
|
|
||||||
path = path + '/*.gcov'
|
|
||||||
|
|
||||||
for path in glob.glob(path):
|
|
||||||
paths.append(path)
|
|
||||||
|
|
||||||
if not paths:
|
|
||||||
print('no .info files found in %r?' % args['info_paths'])
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
results = collect(paths, **args)
|
|
||||||
else:
|
|
||||||
with open(args['use']) as f:
|
|
||||||
r = csv.DictReader(f)
|
|
||||||
results = [
|
|
||||||
( result['file'],
|
|
||||||
result['function'],
|
|
||||||
int(result['hits']),
|
|
||||||
int(result['count']))
|
|
||||||
for result in r]
|
|
||||||
|
|
||||||
total_hits, total_count = 0, 0
|
|
||||||
for _, _, hits, count in results:
|
|
||||||
total_hits += hits
|
|
||||||
total_count += count
|
|
||||||
|
|
||||||
# find previous results?
|
|
||||||
if args.get('diff'):
|
|
||||||
with open(args['diff']) as f:
|
|
||||||
r = csv.DictReader(f)
|
|
||||||
prev_results = [
|
|
||||||
( result['file'],
|
|
||||||
result['function'],
|
|
||||||
int(result['hits']),
|
|
||||||
int(result['count']))
|
|
||||||
for result in r]
|
|
||||||
|
|
||||||
prev_total_hits, prev_total_count = 0, 0
|
|
||||||
for _, _, hits, count in prev_results:
|
|
||||||
prev_total_hits += hits
|
|
||||||
prev_total_count += count
|
|
||||||
|
|
||||||
# write results to CSV
|
|
||||||
if args.get('output'):
|
|
||||||
with open(args['output'], 'w') as f:
|
|
||||||
w = csv.writer(f)
|
|
||||||
w.writerow(['file', 'function', 'hits', 'count'])
|
|
||||||
for file, func, hits, count in sorted(results):
|
|
||||||
w.writerow((file, func, hits, count))
|
|
||||||
|
|
||||||
# print results
|
|
||||||
def dedup_entries(results, by='function'):
|
|
||||||
entries = co.defaultdict(lambda: (0, 0))
|
|
||||||
for file, func, hits, count in results:
|
|
||||||
entry = (file if by == 'file' else func)
|
|
||||||
entry_hits, entry_count = entries[entry]
|
|
||||||
entries[entry] = (entry_hits + hits, entry_count + count)
|
|
||||||
return entries
|
|
||||||
|
|
||||||
def diff_entries(olds, news):
|
|
||||||
diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
|
|
||||||
for name, (new_hits, new_count) in news.items():
|
|
||||||
diff[name] = (
|
|
||||||
0, 0,
|
|
||||||
new_hits, new_count,
|
|
||||||
new_hits, new_count,
|
|
||||||
(new_hits/new_count if new_count else 1.0) - 1.0)
|
|
||||||
for name, (old_hits, old_count) in olds.items():
|
|
||||||
_, _, new_hits, new_count, _, _, _ = diff[name]
|
|
||||||
diff[name] = (
|
|
||||||
old_hits, old_count,
|
|
||||||
new_hits, new_count,
|
|
||||||
new_hits-old_hits, new_count-old_count,
|
|
||||||
((new_hits/new_count if new_count else 1.0)
|
|
||||||
- (old_hits/old_count if old_count else 1.0)))
|
|
||||||
return diff
|
|
||||||
|
|
||||||
def print_header(by=''):
|
|
||||||
if not args.get('diff'):
|
|
||||||
print('%-36s %19s' % (by, 'hits/line'))
|
|
||||||
else:
|
|
||||||
print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
|
|
||||||
|
|
||||||
def print_entries(by='function'):
|
|
||||||
entries = dedup_entries(results, by=by)
|
|
||||||
|
|
||||||
if not args.get('diff'):
|
|
||||||
print_header(by=by)
|
|
||||||
for name, (hits, count) in sorted(entries.items()):
|
|
||||||
print("%-36s %11s %7s" % (name,
|
|
||||||
'%d/%d' % (hits, count)
|
|
||||||
if count else '-',
|
|
||||||
'%.1f%%' % (100*hits/count)
|
|
||||||
if count else '-'))
|
|
||||||
else:
|
|
||||||
prev_entries = dedup_entries(prev_results, by=by)
|
|
||||||
diff = diff_entries(prev_entries, entries)
|
|
||||||
print_header(by='%s (%d added, %d removed)' % (by,
|
|
||||||
sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
|
|
||||||
sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
|
|
||||||
for name, (
|
|
||||||
old_hits, old_count,
|
|
||||||
new_hits, new_count,
|
|
||||||
diff_hits, diff_count, ratio) in sorted(diff.items(),
|
|
||||||
key=lambda x: (-x[1][6], x)):
|
|
||||||
if ratio or args.get('all'):
|
|
||||||
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
|
|
||||||
'%d/%d' % (old_hits, old_count)
|
|
||||||
if old_count else '-',
|
|
||||||
'%.1f%%' % (100*old_hits/old_count)
|
|
||||||
if old_count else '-',
|
|
||||||
'%d/%d' % (new_hits, new_count)
|
|
||||||
if new_count else '-',
|
|
||||||
'%.1f%%' % (100*new_hits/new_count)
|
|
||||||
if new_count else '-',
|
|
||||||
'%+d/%+d' % (diff_hits, diff_count),
|
|
||||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
|
||||||
|
|
||||||
def print_totals():
|
|
||||||
if not args.get('diff'):
|
|
||||||
print("%-36s %11s %7s" % ('TOTAL',
|
|
||||||
'%d/%d' % (total_hits, total_count)
|
|
||||||
if total_count else '-',
|
|
||||||
'%.1f%%' % (100*total_hits/total_count)
|
|
||||||
if total_count else '-'))
|
|
||||||
else:
|
|
||||||
ratio = ((total_hits/total_count
|
|
||||||
if total_count else 1.0)
|
|
||||||
- (prev_total_hits/prev_total_count
|
|
||||||
if prev_total_count else 1.0))
|
|
||||||
print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL',
|
|
||||||
'%d/%d' % (prev_total_hits, prev_total_count)
|
|
||||||
if prev_total_count else '-',
|
|
||||||
'%.1f%%' % (100*prev_total_hits/prev_total_count)
|
|
||||||
if prev_total_count else '-',
|
|
||||||
'%d/%d' % (total_hits, total_count)
|
|
||||||
if total_count else '-',
|
|
||||||
'%.1f%%' % (100*total_hits/total_count)
|
|
||||||
if total_count else '-',
|
|
||||||
'%+d/%+d' % (total_hits-prev_total_hits,
|
|
||||||
total_count-prev_total_count),
|
|
||||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
|
||||||
|
|
||||||
if args.get('quiet'):
|
|
||||||
pass
|
|
||||||
elif args.get('summary'):
|
|
||||||
print_header()
|
|
||||||
print_totals()
|
|
||||||
elif args.get('files'):
|
|
||||||
print_entries(by='file')
|
|
||||||
print_totals()
|
|
||||||
else:
|
|
||||||
print_entries(by='function')
|
|
||||||
print_totals()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Parse and report coverage info from .info files \
|
|
||||||
generated by lcov")
|
|
||||||
parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
|
|
||||||
help="Description of where to find *.info files. May be a directory \
|
|
||||||
or list of paths. *.info files will be merged to show the total \
|
|
||||||
coverage. Defaults to %r." % INFO_PATHS)
|
|
||||||
parser.add_argument('-v', '--verbose', action='store_true',
|
|
||||||
help="Output commands that run behind the scenes.")
|
|
||||||
parser.add_argument('-o', '--output',
|
|
||||||
help="Specify CSV file to store results.")
|
|
||||||
parser.add_argument('-u', '--use',
|
|
||||||
help="Don't do any work, instead use this CSV file.")
|
|
||||||
parser.add_argument('-d', '--diff',
|
|
||||||
help="Specify CSV file to diff code size against.")
|
|
||||||
parser.add_argument('-a', '--all', action='store_true',
|
|
||||||
help="Show all functions, not just the ones that changed.")
|
|
||||||
parser.add_argument('--files', action='store_true',
|
|
||||||
help="Show file-level coverage.")
|
|
||||||
parser.add_argument('-s', '--summary', action='store_true',
|
|
||||||
help="Only show the total coverage.")
|
|
||||||
parser.add_argument('-q', '--quiet', action='store_true',
|
|
||||||
help="Don't show anything, useful with -o.")
|
|
||||||
sys.exit(main(**vars(parser.parse_args())))
|
|
||||||
191
scripts/test.py
191
scripts/test.py
@@ -20,50 +20,19 @@ import pty
|
|||||||
import errno
|
import errno
|
||||||
import signal
|
import signal
|
||||||
|
|
||||||
TEST_PATHS = 'tests'
|
TESTDIR = 'tests'
|
||||||
RULES = """
|
RULES = """
|
||||||
# add block devices to sources
|
|
||||||
TESTSRC ?= $(SRC) $(wildcard bd/*.c)
|
|
||||||
|
|
||||||
define FLATTEN
|
define FLATTEN
|
||||||
%(path)s%%$(subst /,.,$(target)): $(target)
|
tests/%$(subst /,.,$(target)): $(target)
|
||||||
./scripts/explode_asserts.py $$< -o $$@
|
./scripts/explode_asserts.py $$< -o $$@
|
||||||
endef
|
endef
|
||||||
$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
|
$(foreach target,$(SRC),$(eval $(FLATTEN)))
|
||||||
|
|
||||||
|
-include tests/*.d
|
||||||
|
|
||||||
-include %(path)s*.d
|
|
||||||
.SECONDARY:
|
.SECONDARY:
|
||||||
|
%.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f)
|
||||||
%(path)s.test: %(path)s.test.o \\
|
|
||||||
$(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
|
|
||||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||||
|
|
||||||
# needed in case builddir is different
|
|
||||||
%(path)s%%.o: %(path)s%%.c
|
|
||||||
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
|
||||||
"""
|
|
||||||
COVERAGE_RULES = """
|
|
||||||
%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
|
|
||||||
|
|
||||||
# delete lingering coverage
|
|
||||||
%(path)s.test: | %(path)s.info.clean
|
|
||||||
.PHONY: %(path)s.info.clean
|
|
||||||
%(path)s.info.clean:
|
|
||||||
rm -f %(path)s*.gcda
|
|
||||||
|
|
||||||
# accumulate coverage info
|
|
||||||
.PHONY: %(path)s.info
|
|
||||||
%(path)s.info:
|
|
||||||
$(strip $(LCOV) -c \\
|
|
||||||
$(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
|
|
||||||
--rc 'geninfo_adjust_src_path=$(shell pwd)' \\
|
|
||||||
-o $@)
|
|
||||||
$(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
|
|
||||||
ifdef COVERAGETARGET
|
|
||||||
$(strip $(LCOV) -a $@ \\
|
|
||||||
$(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
|
|
||||||
-o $(COVERAGETARGET))
|
|
||||||
endif
|
|
||||||
"""
|
"""
|
||||||
GLOBALS = """
|
GLOBALS = """
|
||||||
//////////////// AUTOGENERATED TEST ////////////////
|
//////////////// AUTOGENERATED TEST ////////////////
|
||||||
@@ -150,8 +119,6 @@ class TestCase:
|
|||||||
self.if_ = config.get('if', None)
|
self.if_ = config.get('if', None)
|
||||||
self.in_ = config.get('in', None)
|
self.in_ = config.get('in', None)
|
||||||
|
|
||||||
self.result = None
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
if hasattr(self, 'permno'):
|
if hasattr(self, 'permno'):
|
||||||
if any(k not in self.case.defines for k in self.defines):
|
if any(k not in self.case.defines for k in self.defines):
|
||||||
@@ -212,7 +179,7 @@ class TestCase:
|
|||||||
len(self.filter) >= 2 and
|
len(self.filter) >= 2 and
|
||||||
self.filter[1] != self.permno):
|
self.filter[1] != self.permno):
|
||||||
return False
|
return False
|
||||||
elif args.get('no_internal') and self.in_ is not None:
|
elif args.get('no_internal', False) and self.in_ is not None:
|
||||||
return False
|
return False
|
||||||
elif self.if_ is not None:
|
elif self.if_ is not None:
|
||||||
if_ = self.if_
|
if_ = self.if_
|
||||||
@@ -246,7 +213,7 @@ class TestCase:
|
|||||||
try:
|
try:
|
||||||
with open(disk, 'w') as f:
|
with open(disk, 'w') as f:
|
||||||
f.truncate(0)
|
f.truncate(0)
|
||||||
if args.get('verbose'):
|
if args.get('verbose', False):
|
||||||
print('truncate --size=0', disk)
|
print('truncate --size=0', disk)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
@@ -270,14 +237,14 @@ class TestCase:
|
|||||||
'-ex', 'r'])
|
'-ex', 'r'])
|
||||||
ncmd.extend(['--args'] + cmd)
|
ncmd.extend(['--args'] + cmd)
|
||||||
|
|
||||||
if args.get('verbose'):
|
if args.get('verbose', False):
|
||||||
print(' '.join(shlex.quote(c) for c in ncmd))
|
print(' '.join(shlex.quote(c) for c in ncmd))
|
||||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
sys.exit(sp.call(ncmd))
|
sys.exit(sp.call(ncmd))
|
||||||
|
|
||||||
# run test case!
|
# run test case!
|
||||||
mpty, spty = pty.openpty()
|
mpty, spty = pty.openpty()
|
||||||
if args.get('verbose'):
|
if args.get('verbose', False):
|
||||||
print(' '.join(shlex.quote(c) for c in cmd))
|
print(' '.join(shlex.quote(c) for c in cmd))
|
||||||
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
||||||
os.close(spty)
|
os.close(spty)
|
||||||
@@ -293,7 +260,7 @@ class TestCase:
|
|||||||
break
|
break
|
||||||
raise
|
raise
|
||||||
stdout.append(line)
|
stdout.append(line)
|
||||||
if args.get('verbose'):
|
if args.get('verbose', False):
|
||||||
sys.stdout.write(line)
|
sys.stdout.write(line)
|
||||||
# intercept asserts
|
# intercept asserts
|
||||||
m = re.match(
|
m = re.match(
|
||||||
@@ -332,7 +299,7 @@ class ValgrindTestCase(TestCase):
|
|||||||
return not self.leaky and super().shouldtest(**args)
|
return not self.leaky and super().shouldtest(**args)
|
||||||
|
|
||||||
def test(self, exec=[], **args):
|
def test(self, exec=[], **args):
|
||||||
verbose = args.get('verbose')
|
verbose = args.get('verbose', False)
|
||||||
uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
|
uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
|
||||||
exec = [
|
exec = [
|
||||||
'valgrind',
|
'valgrind',
|
||||||
@@ -384,17 +351,12 @@ class TestSuite:
|
|||||||
self.name = os.path.basename(path)
|
self.name = os.path.basename(path)
|
||||||
if self.name.endswith('.toml'):
|
if self.name.endswith('.toml'):
|
||||||
self.name = self.name[:-len('.toml')]
|
self.name = self.name[:-len('.toml')]
|
||||||
if args.get('build_dir'):
|
self.path = path
|
||||||
self.toml = path
|
|
||||||
self.path = args['build_dir'] + '/' + path
|
|
||||||
else:
|
|
||||||
self.toml = path
|
|
||||||
self.path = path
|
|
||||||
self.classes = classes
|
self.classes = classes
|
||||||
self.defines = defines.copy()
|
self.defines = defines.copy()
|
||||||
self.filter = filter
|
self.filter = filter
|
||||||
|
|
||||||
with open(self.toml) as f:
|
with open(path) as f:
|
||||||
# load tests
|
# load tests
|
||||||
config = toml.load(f)
|
config = toml.load(f)
|
||||||
|
|
||||||
@@ -505,7 +467,7 @@ class TestSuite:
|
|||||||
|
|
||||||
def build(self, **args):
|
def build(self, **args):
|
||||||
# build test files
|
# build test files
|
||||||
tf = open(self.path + '.test.tc', 'w')
|
tf = open(self.path + '.test.c.t', 'w')
|
||||||
tf.write(GLOBALS)
|
tf.write(GLOBALS)
|
||||||
if self.code is not None:
|
if self.code is not None:
|
||||||
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
|
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
|
||||||
@@ -515,7 +477,7 @@ class TestSuite:
|
|||||||
for case in self.cases:
|
for case in self.cases:
|
||||||
if case.in_ not in tfs:
|
if case.in_ not in tfs:
|
||||||
tfs[case.in_] = open(self.path+'.'+
|
tfs[case.in_] = open(self.path+'.'+
|
||||||
re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
|
case.in_.replace('/', '.')+'.t', 'w')
|
||||||
tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
|
tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
|
||||||
with open(case.in_) as f:
|
with open(case.in_) as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
@@ -554,33 +516,25 @@ class TestSuite:
|
|||||||
|
|
||||||
# write makefiles
|
# write makefiles
|
||||||
with open(self.path + '.mk', 'w') as mk:
|
with open(self.path + '.mk', 'w') as mk:
|
||||||
mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
|
mk.write(RULES.replace(4*' ', '\t'))
|
||||||
mk.write('\n')
|
mk.write('\n')
|
||||||
|
|
||||||
# add coverage hooks?
|
|
||||||
if args.get('coverage'):
|
|
||||||
mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
|
|
||||||
path=self.path))
|
|
||||||
mk.write('\n')
|
|
||||||
|
|
||||||
# add truely global defines globally
|
# add truely global defines globally
|
||||||
for k, v in sorted(self.defines.items()):
|
for k, v in sorted(self.defines.items()):
|
||||||
mk.write('%s.test: override CFLAGS += -D%s=%r\n'
|
mk.write('%s: override CFLAGS += -D%s=%r\n' % (
|
||||||
% (self.path, k, v))
|
self.path+'.test', k, v))
|
||||||
|
|
||||||
for path in tfs:
|
for path in tfs:
|
||||||
if path is None:
|
if path is None:
|
||||||
mk.write('%s: %s | %s\n' % (
|
mk.write('%s: %s | %s\n' % (
|
||||||
self.path+'.test.c',
|
self.path+'.test.c',
|
||||||
self.toml,
|
self.path,
|
||||||
self.path+'.test.tc'))
|
self.path+'.test.c.t'))
|
||||||
else:
|
else:
|
||||||
mk.write('%s: %s %s | %s\n' % (
|
mk.write('%s: %s %s | %s\n' % (
|
||||||
self.path+'.'+path.replace('/', '.'),
|
self.path+'.'+path.replace('/', '.'),
|
||||||
self.toml,
|
self.path, path,
|
||||||
path,
|
self.path+'.'+path.replace('/', '.')+'.t'))
|
||||||
self.path+'.'+re.sub('(\.c)?$', '.tc',
|
|
||||||
path.replace('/', '.'))))
|
|
||||||
mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
|
mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
|
||||||
|
|
||||||
self.makefile = self.path + '.mk'
|
self.makefile = self.path + '.mk'
|
||||||
@@ -603,7 +557,7 @@ class TestSuite:
|
|||||||
if not args.get('verbose', True):
|
if not args.get('verbose', True):
|
||||||
sys.stdout.write(FAIL)
|
sys.stdout.write(FAIL)
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
if not args.get('keep_going'):
|
if not args.get('keep_going', False):
|
||||||
if not args.get('verbose', True):
|
if not args.get('verbose', True):
|
||||||
sys.stdout.write('\n')
|
sys.stdout.write('\n')
|
||||||
raise
|
raise
|
||||||
@@ -625,30 +579,30 @@ def main(**args):
|
|||||||
|
|
||||||
# and what class of TestCase to run
|
# and what class of TestCase to run
|
||||||
classes = []
|
classes = []
|
||||||
if args.get('normal'):
|
if args.get('normal', False):
|
||||||
classes.append(TestCase)
|
classes.append(TestCase)
|
||||||
if args.get('reentrant'):
|
if args.get('reentrant', False):
|
||||||
classes.append(ReentrantTestCase)
|
classes.append(ReentrantTestCase)
|
||||||
if args.get('valgrind'):
|
if args.get('valgrind', False):
|
||||||
classes.append(ValgrindTestCase)
|
classes.append(ValgrindTestCase)
|
||||||
if not classes:
|
if not classes:
|
||||||
classes = [TestCase]
|
classes = [TestCase]
|
||||||
|
|
||||||
suites = []
|
suites = []
|
||||||
for testpath in args['test_paths']:
|
for testpath in args['testpaths']:
|
||||||
# optionally specified test case/perm
|
# optionally specified test case/perm
|
||||||
testpath, *filter = testpath.split('#')
|
testpath, *filter = testpath.split('#')
|
||||||
filter = [int(f) for f in filter]
|
filter = [int(f) for f in filter]
|
||||||
|
|
||||||
# figure out the suite's toml file
|
# figure out the suite's toml file
|
||||||
if os.path.isdir(testpath):
|
if os.path.isdir(testpath):
|
||||||
testpath = testpath + '/*.toml'
|
testpath = testpath + '/test_*.toml'
|
||||||
elif os.path.isfile(testpath):
|
elif os.path.isfile(testpath):
|
||||||
testpath = testpath
|
testpath = testpath
|
||||||
elif testpath.endswith('.toml'):
|
elif testpath.endswith('.toml'):
|
||||||
testpath = TEST_PATHS + '/' + testpath
|
testpath = TESTDIR + '/' + testpath
|
||||||
else:
|
else:
|
||||||
testpath = TEST_PATHS + '/' + testpath + '.toml'
|
testpath = TESTDIR + '/' + testpath + '.toml'
|
||||||
|
|
||||||
# find tests
|
# find tests
|
||||||
for path in glob.glob(testpath):
|
for path in glob.glob(testpath):
|
||||||
@@ -674,7 +628,7 @@ def main(**args):
|
|||||||
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
||||||
[target for target in targets])
|
[target for target in targets])
|
||||||
mpty, spty = pty.openpty()
|
mpty, spty = pty.openpty()
|
||||||
if args.get('verbose'):
|
if args.get('verbose', False):
|
||||||
print(' '.join(shlex.quote(c) for c in cmd))
|
print(' '.join(shlex.quote(c) for c in cmd))
|
||||||
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
||||||
os.close(spty)
|
os.close(spty)
|
||||||
@@ -688,14 +642,14 @@ def main(**args):
|
|||||||
break
|
break
|
||||||
raise
|
raise
|
||||||
stdout.append(line)
|
stdout.append(line)
|
||||||
if args.get('verbose'):
|
if args.get('verbose', False):
|
||||||
sys.stdout.write(line)
|
sys.stdout.write(line)
|
||||||
# intercept warnings
|
# intercept warnings
|
||||||
m = re.match(
|
m = re.match(
|
||||||
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
|
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
|
||||||
.format('(?:\033\[[\d;]*.| )*', 'warning'),
|
.format('(?:\033\[[\d;]*.| )*', 'warning'),
|
||||||
line)
|
line)
|
||||||
if m and not args.get('verbose'):
|
if m and not args.get('verbose', False):
|
||||||
try:
|
try:
|
||||||
with open(m.group(1)) as f:
|
with open(m.group(1)) as f:
|
||||||
lineno = int(m.group(2))
|
lineno = int(m.group(2))
|
||||||
@@ -708,26 +662,27 @@ def main(**args):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
proc.wait()
|
proc.wait()
|
||||||
|
|
||||||
if proc.returncode != 0:
|
if proc.returncode != 0:
|
||||||
if not args.get('verbose'):
|
if not args.get('verbose', False):
|
||||||
for line in stdout:
|
for line in stdout:
|
||||||
sys.stdout.write(line)
|
sys.stdout.write(line)
|
||||||
sys.exit(-1)
|
sys.exit(-3)
|
||||||
|
|
||||||
print('built %d test suites, %d test cases, %d permutations' % (
|
print('built %d test suites, %d test cases, %d permutations' % (
|
||||||
len(suites),
|
len(suites),
|
||||||
sum(len(suite.cases) for suite in suites),
|
sum(len(suite.cases) for suite in suites),
|
||||||
sum(len(suite.perms) for suite in suites)))
|
sum(len(suite.perms) for suite in suites)))
|
||||||
|
|
||||||
total = 0
|
filtered = 0
|
||||||
for suite in suites:
|
for suite in suites:
|
||||||
for perm in suite.perms:
|
for perm in suite.perms:
|
||||||
total += perm.shouldtest(**args)
|
filtered += perm.shouldtest(**args)
|
||||||
if total != sum(len(suite.perms) for suite in suites):
|
if filtered != sum(len(suite.perms) for suite in suites):
|
||||||
print('filtered down to %d permutations' % total)
|
print('filtered down to %d permutations' % filtered)
|
||||||
|
|
||||||
# only requested to build?
|
# only requested to build?
|
||||||
if args.get('build'):
|
if args.get('build', False):
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
print('====== testing ======')
|
print('====== testing ======')
|
||||||
@@ -742,12 +697,15 @@ def main(**args):
|
|||||||
failed = 0
|
failed = 0
|
||||||
for suite in suites:
|
for suite in suites:
|
||||||
for perm in suite.perms:
|
for perm in suite.perms:
|
||||||
|
if not hasattr(perm, 'result'):
|
||||||
|
continue
|
||||||
|
|
||||||
if perm.result == PASS:
|
if perm.result == PASS:
|
||||||
passed += 1
|
passed += 1
|
||||||
elif isinstance(perm.result, TestFailure):
|
else:
|
||||||
sys.stdout.write(
|
sys.stdout.write(
|
||||||
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
|
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
|
||||||
"{perm} failed\n".format(
|
"{perm} failed with {returncode}\n".format(
|
||||||
perm=perm, path=perm.suite.path, lineno=perm.lineno,
|
perm=perm, path=perm.suite.path, lineno=perm.lineno,
|
||||||
returncode=perm.result.returncode or 0))
|
returncode=perm.result.returncode or 0))
|
||||||
if perm.result.stdout:
|
if perm.result.stdout:
|
||||||
@@ -765,33 +723,11 @@ def main(**args):
|
|||||||
sys.stdout.write('\n')
|
sys.stdout.write('\n')
|
||||||
failed += 1
|
failed += 1
|
||||||
|
|
||||||
if args.get('coverage'):
|
if args.get('gdb', False):
|
||||||
# collect coverage info
|
|
||||||
# why -j1? lcov doesn't work in parallel because of gcov limitations
|
|
||||||
cmd = (['make', '-j1', '-f', 'Makefile'] +
|
|
||||||
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
|
||||||
(['COVERAGETARGET=%s' % args['coverage']]
|
|
||||||
if isinstance(args['coverage'], str) else []) +
|
|
||||||
[suite.path + '.info' for suite in suites
|
|
||||||
if any(perm.result == PASS for perm in suite.perms)])
|
|
||||||
if args.get('verbose'):
|
|
||||||
print(' '.join(shlex.quote(c) for c in cmd))
|
|
||||||
proc = sp.Popen(cmd,
|
|
||||||
stdout=sp.PIPE if not args.get('verbose') else None,
|
|
||||||
stderr=sp.STDOUT if not args.get('verbose') else None,
|
|
||||||
universal_newlines=True)
|
|
||||||
proc.wait()
|
|
||||||
if proc.returncode != 0:
|
|
||||||
if not args.get('verbose'):
|
|
||||||
for line in proc.stdout:
|
|
||||||
sys.stdout.write(line)
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
if args.get('gdb'):
|
|
||||||
failure = None
|
failure = None
|
||||||
for suite in suites:
|
for suite in suites:
|
||||||
for perm in suite.perms:
|
for perm in suite.perms:
|
||||||
if isinstance(perm.result, TestFailure):
|
if getattr(perm, 'result', PASS) != PASS:
|
||||||
failure = perm.result
|
failure = perm.result
|
||||||
if failure is not None:
|
if failure is not None:
|
||||||
print('======= gdb ======')
|
print('======= gdb ======')
|
||||||
@@ -799,22 +735,20 @@ def main(**args):
|
|||||||
failure.case.test(failure=failure, **args)
|
failure.case.test(failure=failure, **args)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
print('tests passed %d/%d (%.2f%%)' % (passed, total,
|
print('tests passed: %d' % passed)
|
||||||
100*(passed/total if total else 1.0)))
|
print('tests failed: %d' % failed)
|
||||||
print('tests failed %d/%d (%.2f%%)' % (failed, total,
|
|
||||||
100*(failed/total if total else 1.0)))
|
|
||||||
return 1 if failed > 0 else 0
|
return 1 if failed > 0 else 0
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import argparse
|
import argparse
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Run parameterized tests in various configurations.")
|
description="Run parameterized tests in various configurations.")
|
||||||
parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
|
parser.add_argument('testpaths', nargs='*', default=[TESTDIR],
|
||||||
help="Description of test(s) to run. By default, this is all tests \
|
help="Description of test(s) to run. By default, this is all tests \
|
||||||
found in the \"{0}\" directory. Here, you can specify a different \
|
found in the \"{0}\" directory. Here, you can specify a different \
|
||||||
directory of tests, a specific file, a suite by name, and even \
|
directory of tests, a specific file, a suite by name, and even a \
|
||||||
specific test cases and permutations. For example \
|
specific test case by adding brackets. For example \
|
||||||
\"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
|
\"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR))
|
||||||
parser.add_argument('-D', action='append', default=[],
|
parser.add_argument('-D', action='append', default=[],
|
||||||
help="Overriding parameter definitions.")
|
help="Overriding parameter definitions.")
|
||||||
parser.add_argument('-v', '--verbose', action='store_true',
|
parser.add_argument('-v', '--verbose', action='store_true',
|
||||||
@@ -835,19 +769,10 @@ if __name__ == "__main__":
|
|||||||
help="Run tests normally.")
|
help="Run tests normally.")
|
||||||
parser.add_argument('-r', '--reentrant', action='store_true',
|
parser.add_argument('-r', '--reentrant', action='store_true',
|
||||||
help="Run reentrant tests with simulated power-loss.")
|
help="Run reentrant tests with simulated power-loss.")
|
||||||
parser.add_argument('--valgrind', action='store_true',
|
parser.add_argument('-V', '--valgrind', action='store_true',
|
||||||
help="Run non-leaky tests under valgrind to check for memory leaks.")
|
help="Run non-leaky tests under valgrind to check for memory leaks.")
|
||||||
parser.add_argument('--exec', default=[], type=lambda e: e.split(),
|
parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
|
||||||
help="Run tests with another executable prefixed on the command line.")
|
help="Run tests with another executable prefixed on the command line.")
|
||||||
parser.add_argument('--disk',
|
parser.add_argument('-d', '--disk',
|
||||||
help="Specify a file to use for persistent/reentrant tests.")
|
help="Specify a file to use for persistent/reentrant tests.")
|
||||||
parser.add_argument('--coverage', type=lambda x: x if x else True,
|
|
||||||
nargs='?', const='',
|
|
||||||
help="Collect coverage information during testing. This uses lcov/gcov \
|
|
||||||
to accumulate coverage information into *.info files. May also \
|
|
||||||
a path to a *.info file to accumulate coverage info into.")
|
|
||||||
parser.add_argument('--build-dir',
|
|
||||||
help="Build relative to the specified directory instead of the \
|
|
||||||
current directory.")
|
|
||||||
|
|
||||||
sys.exit(main(**vars(parser.parse_args())))
|
sys.exit(main(**vars(parser.parse_args())))
|
||||||
|
|||||||
@@ -392,48 +392,3 @@ code = '''
|
|||||||
|
|
||||||
lfs_unmount(&lfs) => 0;
|
lfs_unmount(&lfs) => 0;
|
||||||
'''
|
'''
|
||||||
|
|
||||||
[[case]] # noop truncate
|
|
||||||
define.MEDIUMSIZE = [32, 2048]
|
|
||||||
code = '''
|
|
||||||
lfs_format(&lfs, &cfg) => 0;
|
|
||||||
lfs_mount(&lfs, &cfg) => 0;
|
|
||||||
lfs_file_open(&lfs, &file, "baldynoop",
|
|
||||||
LFS_O_RDWR | LFS_O_CREAT) => 0;
|
|
||||||
|
|
||||||
strcpy((char*)buffer, "hair");
|
|
||||||
size = strlen((char*)buffer);
|
|
||||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
|
||||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
|
||||||
|
|
||||||
// this truncate should do nothing
|
|
||||||
lfs_file_truncate(&lfs, &file, j+size) => 0;
|
|
||||||
}
|
|
||||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
|
||||||
|
|
||||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
|
||||||
// should do nothing again
|
|
||||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
|
||||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
|
||||||
|
|
||||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
|
||||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
|
||||||
memcmp(buffer, "hair", size) => 0;
|
|
||||||
}
|
|
||||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
|
||||||
|
|
||||||
lfs_file_close(&lfs, &file) => 0;
|
|
||||||
lfs_unmount(&lfs) => 0;
|
|
||||||
|
|
||||||
// still there after reboot?
|
|
||||||
lfs_mount(&lfs, &cfg) => 0;
|
|
||||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
|
|
||||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
|
||||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
|
||||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
|
||||||
memcmp(buffer, "hair", size) => 0;
|
|
||||||
}
|
|
||||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
|
||||||
lfs_file_close(&lfs, &file) => 0;
|
|
||||||
lfs_unmount(&lfs) => 0;
|
|
||||||
'''
|
|
||||||
|
|||||||
Reference in New Issue
Block a user