mirror of
				https://github.com/eledio-devices/thirdparty-littlefs.git
				synced 2025-10-31 16:14:16 +01:00 
			
		
		
		
	Compare commits
	
		
			37 Commits
		
	
	
		
			v2.3.0
			...
			tim-nordel
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | e3ace9ce98 | ||
|  | 40bebef368 | ||
|  | 2612ef130b | ||
|  | a3ea209503 | ||
|  | 45d02e5750 | ||
|  | 5c87b6864f | ||
|  | 8c1931ac55 | ||
|  | 1863dc7883 | ||
|  | 3d4e4f2085 | ||
|  | a2c744c8f8 | ||
|  | c0cc0a417e | ||
|  | bca64d76cf | ||
|  | cab1d6cca6 | ||
|  | c9eed1f181 | ||
|  | e7e4b352bd | ||
|  | 9449ef4be4 | ||
|  | cfe779fc08 | ||
|  | 0db6466984 | ||
|  | 21488d9e06 | ||
|  | 10a08833c6 | ||
|  | 47d6b2fcf3 | ||
|  | 745d98cde0 | ||
|  | 3216b07c3b | ||
|  | 6592719d28 | ||
|  | c9110617b3 | ||
|  | 104d65113d | ||
|  | 6d3e4ac33e | ||
|  | 9d6546071b | ||
|  | b84fb6bcc5 | ||
|  | 887f3660ed | ||
|  | eeeceb9e30 | ||
|  | b2235e956d | ||
|  | 6bb4043154 | ||
|  | 2b804537b0 | ||
|  | d804c2d3b7 | ||
|  | 37f4de2976 | ||
|  | 6b16dafb4d | 
							
								
								
									
										26
									
								
								.github/workflows/post-release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								.github/workflows/post-release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| name: post-release | ||||
| on: | ||||
|   release: | ||||
|     branches: [master] | ||||
|     types: [released] | ||||
|  | ||||
| jobs: | ||||
|   post-release: | ||||
|     runs-on: ubuntu-18.04 | ||||
|     steps: | ||||
|       # trigger post-release in dependency repo, this indirection allows the | ||||
|       # dependency repo to be updated often without affecting this repo. At | ||||
|       # the time of this comment, the dependency repo is responsible for | ||||
|       # creating PRs for other dependent repos post-release. | ||||
|       - name: trigger-post-release | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|             "$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \ | ||||
|             -d "$(jq -n '{ | ||||
|               event_type: "post-release", | ||||
|               client_payload: { | ||||
|                 repo: env.GITHUB_REPOSITORY, | ||||
|                 version: "${{github.event.release.tag_name}}"}}' \ | ||||
|               | tee /dev/stderr)" | ||||
|  | ||||
							
								
								
									
										215
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										215
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,215 @@ | ||||
| name: release | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: [test] | ||||
|     branches: [master] | ||||
|     types: [completed] | ||||
|  | ||||
| jobs: | ||||
|   release: | ||||
|     runs-on: ubuntu-18.04 | ||||
|  | ||||
|     # need to manually check for a couple things | ||||
|     # - tests passed? | ||||
|     # - we are the most recent commit on master? | ||||
|     if: ${{github.event.workflow_run.conclusion == 'success' && | ||||
|       github.event.workflow_run.head_sha == github.sha}} | ||||
|  | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           ref: ${{github.event.workflow_run.head_sha}} | ||||
|           # need workflow access since we push branches | ||||
|           # containing workflows | ||||
|           token: ${{secrets.BOT_TOKEN}} | ||||
|           # need all tags | ||||
|           fetch-depth: 0 | ||||
|  | ||||
|       # try to get results from tests | ||||
|       - uses: dawidd6/action-download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           workflow: ${{github.event.workflow_run.name}} | ||||
|           run_id: ${{github.event.workflow_run.id}} | ||||
|           name: results | ||||
|           path: results | ||||
|  | ||||
|       - name: find-version | ||||
|         run: | | ||||
|           # rip version from lfs.h | ||||
|           LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \ | ||||
|             | awk '{print $3}')" | ||||
|           LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))" | ||||
|           LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >>  0)))" | ||||
|  | ||||
|           # find a new patch version based on what we find in our tags | ||||
|           LFS_VERSION_PATCH="$( \ | ||||
|             ( git describe --tags --abbrev=0 \ | ||||
|                 --match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \ | ||||
|               || echo 'v0.0.-1' ) \ | ||||
|             | awk -F '.' '{print $3+1}')" | ||||
|  | ||||
|           # found new version | ||||
|           LFS_VERSION="v$LFS_VERSION_MAJOR` | ||||
|             `.$LFS_VERSION_MINOR` | ||||
|             `.$LFS_VERSION_PATCH" | ||||
|           echo "LFS_VERSION=$LFS_VERSION" | ||||
|           echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV | ||||
|  | ||||
|       # try to find previous version? | ||||
|       - name: find-prev-version | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')" | ||||
|           echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" | ||||
|           echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV | ||||
|  | ||||
|       # try to find results from tests | ||||
|       - name: collect-results | ||||
|         run: | | ||||
|           # previous results to compare against? | ||||
|           [ -n "$LFS_PREV_VERSION" ] && curl -sS \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/` | ||||
|               `status/$LFS_PREV_VERSION" \ | ||||
|             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \ | ||||
|             >> prev-results.json \ | ||||
|             || true | ||||
|  | ||||
|           # unfortunately these each have their own format | ||||
|           [ -e results/code-thumb.csv ] && ( \ | ||||
|             export PREV="$(jq -re ' | ||||
|                   select(.context == "results / code").description | ||||
|                   | capture("Code size is (?<result>[0-9]+)").result' \ | ||||
|                 prev-results.json || echo 0)" | ||||
|             ./scripts/code.py -u results/code-thumb.csv -s | awk ' | ||||
|               NR==2 {printf "Code size,%d B",$2} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} | ||||
|               NR==2 {printf "\n"}' \ | ||||
|             >> results.csv) | ||||
|           [ -e results/code-thumb-readonly.csv ] && ( \ | ||||
|             export PREV="$(jq -re ' | ||||
|                   select(.context == "results / code (readonly)").description | ||||
|                   | capture("Code size is (?<result>[0-9]+)").result' \ | ||||
|                 prev-results.json || echo 0)" | ||||
|             ./scripts/code.py -u results/code-thumb-readonly.csv -s | awk ' | ||||
|               NR==2 {printf "Code size<br/>(readonly),%d B",$2} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} | ||||
|               NR==2 {printf "\n"}' \ | ||||
|             >> results.csv) | ||||
|           [ -e results/code-thumb-threadsafe.csv ] && ( \ | ||||
|             export PREV="$(jq -re ' | ||||
|                   select(.context == "results / code (threadsafe)").description | ||||
|                   | capture("Code size is (?<result>[0-9]+)").result' \ | ||||
|                 prev-results.json || echo 0)" | ||||
|             ./scripts/code.py -u results/code-thumb-threadsafe.csv -s | awk ' | ||||
|               NR==2 {printf "Code size<br/>(threadsafe),%d B",$2} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} | ||||
|               NR==2 {printf "\n"}' \ | ||||
|             >> results.csv) | ||||
|           [ -e results/code-thumb-migrate.csv ] && ( \ | ||||
|             export PREV="$(jq -re ' | ||||
|                   select(.context == "results / code (migrate)").description | ||||
|                   | capture("Code size is (?<result>[0-9]+)").result' \ | ||||
|                 prev-results.json || echo 0)" | ||||
|             ./scripts/code.py -u results/code-thumb-migrate.csv -s | awk ' | ||||
|               NR==2 {printf "Code size<br/>(migrate),%d B",$2} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} | ||||
|               NR==2 {printf "\n"}' \ | ||||
|             >> results.csv) | ||||
|           [ -e results/code-thumb-error-asserts.csv ] && ( \ | ||||
|             export PREV="$(jq -re ' | ||||
|                   select(.context == "results / code (error-asserts)").description | ||||
|                   | capture("Code size is (?<result>[0-9]+)").result' \ | ||||
|                 prev-results.json || echo 0)" | ||||
|             ./scripts/code.py -u results/code-thumb-error-asserts.csv -s | awk ' | ||||
|               NR==2 {printf "Code size<br/>(error-asserts),%d B",$2} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} | ||||
|               NR==2 {printf "\n"}' \ | ||||
|             >> results.csv) | ||||
|           [ -e results/coverage.csv ] && ( \ | ||||
|             export PREV="$(jq -re ' | ||||
|                   select(.context == "results / coverage").description | ||||
|                   | capture("Coverage is (?<result>[0-9\\.]+)").result' \ | ||||
|                 prev-results.json || echo 0)" | ||||
|             ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' | ||||
|               NR==2 {printf "Coverage,%.1f%% of %d lines",$4,$3} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",$4-ENVIRON["PREV"]} | ||||
|               NR==2 {printf "\n"}' \ | ||||
|             >> results.csv) | ||||
|  | ||||
|           # transpose to GitHub table | ||||
|           [ -e results.csv ] || exit 0 | ||||
|           awk -F ',' ' | ||||
|             {label[NR]=$1; value[NR]=$2} | ||||
|             END { | ||||
|               for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n"; | ||||
|               for (r=1; r<=NR; r++) {printf "|:--"}; printf "|\n"; | ||||
|               for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \ | ||||
|             results.csv > results.txt | ||||
|           echo "RESULTS:" | ||||
|           cat results.txt | ||||
|  | ||||
|       # find changes from history | ||||
|       - name: collect-changes | ||||
|         run: | | ||||
|           [ -n "$LFS_PREV_VERSION" ] || exit 0 | ||||
|           # use explicit link to github commit so that release notes can | ||||
|           # be copied elsewhere | ||||
|           git log "$LFS_PREV_VERSION.." \ | ||||
|             --grep='^Merge' --invert-grep \ | ||||
|             --format="format:[\`%h\`](` | ||||
|               `https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \ | ||||
|             > changes.txt | ||||
|           echo "CHANGES:" | ||||
|           cat changes.txt | ||||
|  | ||||
|       # create and update major branches (vN and vN-prefix) | ||||
|       - name: create-major-branches | ||||
|         run: | | ||||
|           # create major branch | ||||
|           git branch "v$LFS_VERSION_MAJOR" HEAD | ||||
|  | ||||
|           # create major prefix branch | ||||
|           git config user.name ${{secrets.BOT_USER}} | ||||
|           git config user.email ${{secrets.BOT_EMAIL}} | ||||
|           git fetch "https://github.com/$GITHUB_REPOSITORY.git" \ | ||||
|             "v$LFS_VERSION_MAJOR-prefix" || true | ||||
|           ./scripts/prefix.py "lfs$LFS_VERSION_MAJOR" | ||||
|           git branch "v$LFS_VERSION_MAJOR-prefix" $( \ | ||||
|             git commit-tree $(git write-tree) \ | ||||
|               $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \ | ||||
|               -p HEAD \ | ||||
|               -m "Generated v$LFS_VERSION_MAJOR prefixes") | ||||
|           git reset --hard | ||||
|  | ||||
|           # push! | ||||
|           git push --atomic origin \ | ||||
|             "v$LFS_VERSION_MAJOR" \ | ||||
|             "v$LFS_VERSION_MAJOR-prefix" | ||||
|  | ||||
|       # build release notes | ||||
|       - name: create-release | ||||
|         run: | | ||||
|           # create release and patch version tag (vN.N.N) | ||||
|           # only draft if not a patch release | ||||
|           [ -e results.txt ] && export RESULTS="$(cat results.txt)" | ||||
|           [ -e changes.txt ] && export CHANGES="$(cat changes.txt)" | ||||
|           curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \ | ||||
|             -d "$(jq -n '{ | ||||
|               tag_name: env.LFS_VERSION, | ||||
|               name: env.LFS_VERSION | rtrimstr(".0"), | ||||
|               target_commitish: "${{github.event.workflow_run.head_sha}}", | ||||
|               draft: env.LFS_VERSION | endswith(".0"), | ||||
|               body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \ | ||||
|               | tee /dev/stderr)" | ||||
|  | ||||
							
								
								
									
										55
									
								
								.github/workflows/status.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								.github/workflows/status.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,55 @@ | ||||
| name: status | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: [test] | ||||
|     types: [completed] | ||||
|  | ||||
| jobs: | ||||
|   status: | ||||
|     runs-on: ubuntu-18.04 | ||||
|     steps: | ||||
|       # custom statuses? | ||||
|       - uses: dawidd6/action-download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           workflow: ${{github.event.workflow_run.name}} | ||||
|           run_id: ${{github.event.workflow_run.id}} | ||||
|           name: status | ||||
|           path: status | ||||
|       - name: update-status | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           ls status | ||||
|           for s in $(shopt -s nullglob ; echo status/*.json) | ||||
|           do | ||||
|             # parse requested status | ||||
|             export STATE="$(jq -er '.state' $s)" | ||||
|             export CONTEXT="$(jq -er '.context' $s)" | ||||
|             export DESCRIPTION="$(jq -er '.description' $s)" | ||||
|             # help lookup URL for job/steps because GitHub makes | ||||
|             # it VERY HARD to link to specific jobs | ||||
|             export TARGET_URL="$( | ||||
|               jq -er '.target_url // empty' $s || ( | ||||
|                 export TARGET_JOB="$(jq -er '.target_job' $s)" | ||||
|                 export TARGET_STEP="$(jq -er '.target_step // ""' $s)" | ||||
|                 curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|                   "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/` | ||||
|                     `${{github.event.workflow_run.id}}/jobs" \ | ||||
|                   | jq -er '.jobs[] | ||||
|                     | select(.name == env.TARGET_JOB) | ||||
|                     | .html_url | ||||
|                       + "?check_suite_focus=true" | ||||
|                       + ((.steps[] | ||||
|                         | select(.name == env.TARGET_STEP) | ||||
|                         | "#step:\(.number):0") // "")'))" | ||||
|             # update status | ||||
|             curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|               "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` | ||||
|                 `${{github.event.workflow_run.head_sha}}" \ | ||||
|               -d "$(jq -n '{ | ||||
|                 state: env.STATE, | ||||
|                 context: env.CONTEXT, | ||||
|                 description: env.DESCRIPTION, | ||||
|                 target_url: env.TARGET_URL}' \ | ||||
|                 | tee /dev/stderr)" | ||||
|           done | ||||
							
								
								
									
										446
									
								
								.github/workflows/test.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										446
									
								
								.github/workflows/test.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,446 @@ | ||||
| name: test | ||||
| on: [push, pull_request] | ||||
|  | ||||
| env: | ||||
|   CFLAGS: -Werror | ||||
|   MAKEFLAGS: -j | ||||
|  | ||||
| jobs: | ||||
|   # run tests | ||||
|   test: | ||||
|     runs-on: ubuntu-18.04 | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         arch: [x86_64, thumb, mips, powerpc] | ||||
|  | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip lcov | ||||
|           sudo pip3 install toml | ||||
|           gcc --version | ||||
|  | ||||
|           # setup a ram-backed disk to speed up reentrant tests | ||||
|           mkdir disks | ||||
|           sudo mount -t tmpfs -o size=100m tmpfs disks | ||||
|           TESTFLAGS="$TESTFLAGS --disk=disks/disk" | ||||
|  | ||||
|           # collect coverage | ||||
|           mkdir -p coverage | ||||
|           TESTFLAGS="$TESTFLAGS --coverage=` | ||||
|             `coverage/${{github.job}}-${{matrix.arch}}.info" | ||||
|  | ||||
|           echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV | ||||
|  | ||||
|       # cross-compile with ARM Thumb (32-bit, little-endian) | ||||
|       - name: install-thumb | ||||
|         if: ${{matrix.arch == 'thumb'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-arm-linux-gnueabi \ | ||||
|             libc6-dev-armel-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-arm" >> $GITHUB_ENV | ||||
|           arm-linux-gnueabi-gcc --version | ||||
|           qemu-arm -version | ||||
|       # cross-compile with MIPS (32-bit, big-endian) | ||||
|       - name: install-mips | ||||
|         if: ${{matrix.arch == 'mips'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-mips-linux-gnu \ | ||||
|             libc6-dev-mips-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-mips" >> $GITHUB_ENV | ||||
|           mips-linux-gnu-gcc --version | ||||
|           qemu-mips -version | ||||
|       # cross-compile with PowerPC (32-bit, big-endian) | ||||
|       - name: install-powerpc | ||||
|         if: ${{matrix.arch == 'powerpc'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-powerpc-linux-gnu \ | ||||
|             libc6-dev-powerpc-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-ppc" >> $GITHUB_ENV | ||||
|           powerpc-linux-gnu-gcc --version | ||||
|           qemu-ppc -version | ||||
|  | ||||
|       # make sure example can at least compile | ||||
|       - name: test-example | ||||
|         run: | | ||||
|           sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c | ||||
|           make all CFLAGS+=" \ | ||||
|             -Duser_provided_block_device_read=NULL \ | ||||
|             -Duser_provided_block_device_prog=NULL \ | ||||
|             -Duser_provided_block_device_erase=NULL \ | ||||
|             -Duser_provided_block_device_sync=NULL \ | ||||
|             -include stdio.h" | ||||
|           rm test.c | ||||
|  | ||||
|       # test configurations | ||||
|       # normal+reentrant tests | ||||
|       - name: test-default | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk" | ||||
|       # NOR flash: read/prog = 1 block = 4KiB | ||||
|       - name: test-nor | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" | ||||
|       # SD/eMMC: read/prog = 512 block = 512 | ||||
|       - name: test-emmc | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" | ||||
|       # NAND flash: read/prog = 4KiB block = 32KiB | ||||
|       - name: test-nand | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" | ||||
|       # other extreme geometries that are useful for various corner cases | ||||
|       - name: test-no-intrinsics | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_NO_INTRINSICS" | ||||
|       - name: test-byte-writes | ||||
|         # it just takes too long to test byte-level writes when in qemu, | ||||
|         # should be plenty covered by the other configurations | ||||
|         if: ${{matrix.arch == 'x86_64'}} | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" | ||||
|       - name: test-block-cycles | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_BLOCK_CYCLES=1" | ||||
|       - name: test-odd-block-count | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" | ||||
|       - name: test-odd-block-size | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" | ||||
|  | ||||
|       # upload coverage for later coverage | ||||
|       - name: upload-coverage | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: coverage | ||||
|           path: coverage | ||||
|           retention-days: 1 | ||||
|  | ||||
|       # update results | ||||
|       - name: results-code | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make code \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR" \ | ||||
|             CODEFLAGS+="-o results/code-${{matrix.arch}}.csv" | ||||
|       - name: results-code-readonly | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make code \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_READONLY" \ | ||||
|             CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv" | ||||
|       - name: results-code-threadsafe | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make code \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_THREADSAFE" \ | ||||
|             CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv" | ||||
|       - name: results-code-migrate | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make code \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_MIGRATE" \ | ||||
|             CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv" | ||||
|       - name: results-code-error-asserts | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make code \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \ | ||||
|             CODEFLAGS+="-o results/code-${{matrix.arch}}-error-asserts.csv" | ||||
|       - name: upload-results | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: results | ||||
|           path: results | ||||
|       # limit reporting to Thumb, otherwise there would be too many numbers | ||||
|       # flying around for the results to be easily readable | ||||
|       - name: collect-status | ||||
|         if: ${{matrix.arch == 'thumb'}} | ||||
|         run: | | ||||
|           mkdir -p status | ||||
|           for f in $(shopt -s nullglob ; echo results/code*.csv) | ||||
|           do | ||||
|             export STEP="results-code$( | ||||
|               echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')" | ||||
|             export CONTEXT="results / code$( | ||||
|               echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')" | ||||
|             export PREV="$(curl -sS \ | ||||
|               "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ | ||||
|               | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | ||||
|                 | select(.context == env.CONTEXT).description | ||||
|                 | capture("Code size is (?<result>[0-9]+)").result' \ | ||||
|               || echo 0)" | ||||
|             export DESCRIPTION="$(./scripts/code.py -u $f -s | awk ' | ||||
|               NR==2 {printf "Code size is %d B",$2} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')" | ||||
|             jq -n '{ | ||||
|               state: "success", | ||||
|               context: env.CONTEXT, | ||||
|               description: env.DESCRIPTION, | ||||
|               target_job: "${{github.job}} (${{matrix.arch}})", | ||||
|               target_step: env.STEP}' \ | ||||
|               | tee status/code$( | ||||
|                 echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json | ||||
|           done | ||||
|       - name: upload-status | ||||
|         if: ${{matrix.arch == 'thumb'}} | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: status | ||||
|           path: status | ||||
|           retention-days: 1 | ||||
|  | ||||
|   # run under Valgrind to check for memory errors | ||||
|   valgrind: | ||||
|     runs-on: ubuntu-18.04 | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip | ||||
|           sudo pip3 install toml | ||||
|       - name: install-valgrind | ||||
|         run: | | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq valgrind | ||||
|           valgrind --version | ||||
|       # normal tests, we don't need to test all geometries | ||||
|       - name: test-valgrind | ||||
|         run: make test TESTFLAGS+="-k --valgrind" | ||||
|  | ||||
|   # self-host with littlefs-fuse for a fuzz-like test | ||||
|   fuse: | ||||
|     runs-on: ubuntu-18.04 | ||||
|     if: ${{!endsWith(github.ref, '-prefix')}} | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip libfuse-dev | ||||
|           sudo pip3 install toml | ||||
|           fusermount -V | ||||
|           gcc --version | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v2 | ||||
|           path: littlefs-fuse | ||||
|       - name: setup | ||||
|         run: | | ||||
|           # copy our new version into littlefs-fuse | ||||
|           rm -rf littlefs-fuse/littlefs/* | ||||
|           cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs | ||||
|  | ||||
|           # setup disk for littlefs-fuse | ||||
|           mkdir mount | ||||
|           sudo chmod a+rw /dev/loop0 | ||||
|           dd if=/dev/zero bs=512 count=128K of=disk | ||||
|           losetup /dev/loop0 disk | ||||
|       - name: test | ||||
|         run: | | ||||
|           # self-host test | ||||
|           make -C littlefs-fuse | ||||
|  | ||||
|           littlefs-fuse/lfs --format /dev/loop0 | ||||
|           littlefs-fuse/lfs /dev/loop0 mount | ||||
|  | ||||
|           ls mount | ||||
|           mkdir mount/littlefs | ||||
|           cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|   # test migration using littlefs-fuse | ||||
|   migrate: | ||||
|     runs-on: ubuntu-18.04 | ||||
|     if: ${{!endsWith(github.ref, '-prefix')}} | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip libfuse-dev | ||||
|           sudo pip3 install toml | ||||
|           fusermount -V | ||||
|           gcc --version | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v2 | ||||
|           path: v2 | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v1 | ||||
|           path: v1 | ||||
|       - name: setup | ||||
|         run: | | ||||
|           # copy our new version into littlefs-fuse | ||||
|           rm -rf v2/littlefs/* | ||||
|           cp -r $(git ls-tree --name-only HEAD) v2/littlefs | ||||
|  | ||||
|           # setup disk for littlefs-fuse | ||||
|           mkdir mount | ||||
|           sudo chmod a+rw /dev/loop0 | ||||
|           dd if=/dev/zero bs=512 count=128K of=disk | ||||
|           losetup /dev/loop0 disk | ||||
|       - name: test | ||||
|         run: | | ||||
|           # compile v1 and v2 | ||||
|           make -C v1 | ||||
|           make -C v2 | ||||
|  | ||||
|           # run self-host test with v1 | ||||
|           v1/lfs --format /dev/loop0 | ||||
|           v1/lfs /dev/loop0 mount | ||||
|  | ||||
|           ls mount | ||||
|           mkdir mount/littlefs | ||||
|           cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|           # attempt to migrate | ||||
|           cd ../.. | ||||
|           fusermount -u mount | ||||
|  | ||||
|           v2/lfs --migrate /dev/loop0 | ||||
|           v2/lfs /dev/loop0 mount | ||||
|  | ||||
|           # run self-host test with v2 right where we left off | ||||
|           ls mount | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|   # collect coverage info | ||||
|   coverage: | ||||
|     runs-on: ubuntu-18.04 | ||||
|     needs: [test] | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip lcov | ||||
|           sudo pip3 install toml | ||||
|       # yes we continue-on-error nearly every step, continue-on-error | ||||
|       # at job level apparently still marks a job as failed, which isn't | ||||
|       # what we want | ||||
|       - uses: actions/download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           name: coverage | ||||
|           path: coverage | ||||
|       - name: results-coverage | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \ | ||||
|             -o results/coverage.info | ||||
|           ./scripts/coverage.py results/coverage.info -o results/coverage.csv | ||||
|       - name: upload-results | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: results | ||||
|           path: results | ||||
|       - name: collect-status | ||||
|         run: | | ||||
|           mkdir -p status | ||||
|           [ -e results/coverage.csv ] || exit 0 | ||||
|           export STEP="results-coverage" | ||||
|           export CONTEXT="results / coverage" | ||||
|           export PREV="$(curl -sS \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ | ||||
|             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | ||||
|               | select(.context == env.CONTEXT).description | ||||
|               | capture("Coverage is (?<result>[0-9\\.]+)").result' \ | ||||
|             || echo 0)" | ||||
|           export DESCRIPTION="$( | ||||
|             ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' | ||||
|               NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" | ||||
|           jq -n '{ | ||||
|             state: "success", | ||||
|             context: env.CONTEXT, | ||||
|             description: env.DESCRIPTION, | ||||
|             target_job: "${{github.job}}", | ||||
|             target_step: env.STEP}' \ | ||||
|             | tee status/coverage.json | ||||
|       - name: upload-status | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: status | ||||
|           path: status | ||||
|           retention-days: 1 | ||||
							
								
								
									
										461
									
								
								.travis.yml
									
									
									
									
									
								
							
							
						
						
									
										461
									
								
								.travis.yml
									
									
									
									
									
								
							| @@ -1,461 +0,0 @@ | ||||
| # environment variables | ||||
| env: | ||||
|   global: | ||||
|     - CFLAGS=-Werror | ||||
|     - MAKEFLAGS=-j | ||||
|  | ||||
| # cache installation dirs | ||||
| cache: | ||||
|   pip: true | ||||
|   directories: | ||||
|     - $HOME/.cache/apt | ||||
|  | ||||
| # common installation | ||||
| _: &install-common | ||||
|   # need toml, also pip3 isn't installed by default? | ||||
|   - sudo apt-get install python3 python3-pip | ||||
|   - sudo pip3 install toml | ||||
|   # setup a ram-backed disk to speed up reentrant tests | ||||
|   - mkdir disks | ||||
|   - sudo mount -t tmpfs -o size=100m tmpfs disks | ||||
|   - export TFLAGS="$TFLAGS --disk=disks/disk" | ||||
|  | ||||
| # test cases | ||||
| _: &test-example | ||||
|   # make sure example can at least compile | ||||
|   - sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c && | ||||
|     make all CFLAGS+=" | ||||
|         -Duser_provided_block_device_read=NULL | ||||
|         -Duser_provided_block_device_prog=NULL | ||||
|         -Duser_provided_block_device_erase=NULL | ||||
|         -Duser_provided_block_device_sync=NULL | ||||
|         -include stdio.h" | ||||
| # default tests | ||||
| _: &test-default | ||||
|   # normal+reentrant tests | ||||
|   - make test TFLAGS+="-nrk" | ||||
| # common real-life geometries | ||||
| _: &test-nor | ||||
|   # NOR flash: read/prog = 1 block = 4KiB | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" | ||||
| _: &test-emmc | ||||
|   # eMMC: read/prog = 512 block = 512 | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" | ||||
| _: &test-nand | ||||
|   # NAND flash: read/prog = 4KiB block = 32KiB | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" | ||||
| # other extreme geometries that are useful for testing various corner cases | ||||
| _: &test-no-intrinsics | ||||
|   - make test TFLAGS+="-nrk -DLFS_NO_INTRINSICS" | ||||
| _: &test-no-inline | ||||
|   - make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0" | ||||
| _: &test-byte-writes | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" | ||||
| _: &test-block-cycles | ||||
|   - make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1" | ||||
| _: &test-odd-block-count | ||||
|   - make test TFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" | ||||
| _: &test-odd-block-size | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" | ||||
|  | ||||
| # report size  | ||||
| _: &report-size | ||||
|   # compile and find the code size with the smallest configuration | ||||
|   - make -j1 clean size | ||||
|         OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" | ||||
|         CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR" | ||||
|         | tee sizes | ||||
|   # update status if we succeeded, compare with master if possible | ||||
|   - | | ||||
|     if [ "$TRAVIS_TEST_RESULT" -eq 0 ] | ||||
|     then | ||||
|         CURR=$(tail -n1 sizes | awk '{print $1}') | ||||
|         PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \ | ||||
|             | jq -re "select(.sha != \"$TRAVIS_COMMIT\") | ||||
|                 | .statuses[] | select(.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\").description | ||||
|                 | capture(\"code size is (?<size>[0-9]+)\").size" \ | ||||
|             || echo 0) | ||||
|    | ||||
|         STATUS="Passed, code size is ${CURR}B" | ||||
|         if [ "$PREV" -ne 0 ] | ||||
|         then | ||||
|             STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)" | ||||
|         fi | ||||
|     fi | ||||
|  | ||||
| # stage control | ||||
| stages: | ||||
|   - name: test | ||||
|   - name: deploy | ||||
|     if: branch = master AND type = push | ||||
|  | ||||
| # job control | ||||
| jobs: | ||||
|   # native testing | ||||
|   - &x86 | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-x86 | ||||
|     install: *install-common | ||||
|     script: [*test-example, *report-size] | ||||
|   - {<<: *x86, script: [*test-default,          *report-size]} | ||||
|   - {<<: *x86, script: [*test-nor,              *report-size]} | ||||
|   - {<<: *x86, script: [*test-emmc,             *report-size]} | ||||
|   - {<<: *x86, script: [*test-nand,             *report-size]} | ||||
|   - {<<: *x86, script: [*test-no-intrinsics,    *report-size]} | ||||
|   - {<<: *x86, script: [*test-no-inline,        *report-size]} | ||||
|   - {<<: *x86, script: [*test-byte-writes,      *report-size]} | ||||
|   - {<<: *x86, script: [*test-block-cycles,     *report-size]} | ||||
|   - {<<: *x86, script: [*test-odd-block-count,  *report-size]} | ||||
|   - {<<: *x86, script: [*test-odd-block-size,   *report-size]} | ||||
|  | ||||
|   # cross-compile with ARM (thumb mode) | ||||
|   - &arm | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-arm | ||||
|       - CC="arm-linux-gnueabi-gcc --static -mthumb" | ||||
|       - TFLAGS="$TFLAGS --exec=qemu-arm" | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install | ||||
|             gcc-arm-linux-gnueabi | ||||
|             libc6-dev-armel-cross | ||||
|             qemu-user | ||||
|       - arm-linux-gnueabi-gcc --version | ||||
|       - qemu-arm -version | ||||
|     script: [*test-example, *report-size] | ||||
|   - {<<: *arm, script: [*test-default,          *report-size]} | ||||
|   - {<<: *arm, script: [*test-nor,              *report-size]} | ||||
|   - {<<: *arm, script: [*test-emmc,             *report-size]} | ||||
|   - {<<: *arm, script: [*test-nand,             *report-size]} | ||||
|   - {<<: *arm, script: [*test-no-intrinsics,    *report-size]} | ||||
|   - {<<: *arm, script: [*test-no-inline,        *report-size]} | ||||
|   # it just takes way to long to run byte-level writes in qemu, | ||||
|   # note this is still tested in the native tests | ||||
|   #- {<<: *arm, script: [*test-byte-writes,      *report-size]} | ||||
|   - {<<: *arm, script: [*test-block-cycles,     *report-size]} | ||||
|   - {<<: *arm, script: [*test-odd-block-count,  *report-size]} | ||||
|   - {<<: *arm, script: [*test-odd-block-size,   *report-size]} | ||||
|  | ||||
|   # cross-compile with MIPS | ||||
|   - &mips | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-mips | ||||
|       - CC="mips-linux-gnu-gcc --static" | ||||
|       - TFLAGS="$TFLAGS --exec=qemu-mips" | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install | ||||
|             gcc-mips-linux-gnu | ||||
|             libc6-dev-mips-cross | ||||
|             qemu-user | ||||
|       - mips-linux-gnu-gcc --version | ||||
|       - qemu-mips -version | ||||
|     script: [*test-example, *report-size] | ||||
|   - {<<: *mips, script: [*test-default,          *report-size]} | ||||
|   - {<<: *mips, script: [*test-nor,              *report-size]} | ||||
|   - {<<: *mips, script: [*test-emmc,             *report-size]} | ||||
|   - {<<: *mips, script: [*test-nand,             *report-size]} | ||||
|   - {<<: *mips, script: [*test-no-intrinsics,    *report-size]} | ||||
|   - {<<: *mips, script: [*test-no-inline,        *report-size]} | ||||
|   # it just takes way to long to run byte-level writes in qemu, | ||||
|   # note this is still tested in the native tests | ||||
|   #- {<<: *mips, script: [*test-byte-writes,      *report-size]} | ||||
|   - {<<: *mips, script: [*test-block-cycles,     *report-size]} | ||||
|   - {<<: *mips, script: [*test-odd-block-count,  *report-size]} | ||||
|   - {<<: *mips, script: [*test-odd-block-size,   *report-size]} | ||||
|  | ||||
|   # cross-compile with PowerPC | ||||
|   - &powerpc | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-powerpc | ||||
|       - CC="powerpc-linux-gnu-gcc --static" | ||||
|       - TFLAGS="$TFLAGS --exec=qemu-ppc" | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install | ||||
|             gcc-powerpc-linux-gnu | ||||
|             libc6-dev-powerpc-cross | ||||
|             qemu-user | ||||
|       - powerpc-linux-gnu-gcc --version | ||||
|       - qemu-ppc -version | ||||
|     script: [*test-example, *report-size] | ||||
|   - {<<: *powerpc, script: [*test-default,          *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-nor,              *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-emmc,             *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-nand,             *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-no-intrinsics,    *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-no-inline,        *report-size]} | ||||
|   # it just takes way to long to run byte-level writes in qemu, | ||||
|   # note this is still tested in the native tests | ||||
|   #- {<<: *powerpc, script: [*test-byte-writes,      *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-block-cycles,     *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-odd-block-count,  *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-odd-block-size,   *report-size]} | ||||
|  | ||||
|   # test under valgrind, checking for memory errors | ||||
|   - &valgrind | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-valgrind | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install valgrind | ||||
|       - valgrind --version | ||||
|     script: | ||||
|       - make test TFLAGS+="-k --valgrind" | ||||
|  | ||||
|   # test compilation in read-only mode | ||||
|   - stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-readonly | ||||
|       - CC="arm-linux-gnueabi-gcc --static -mthumb" | ||||
|       - CFLAGS="-Werror -DLFS_READONLY" | ||||
|     if: branch !~ -prefix$ | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install | ||||
|             gcc-arm-linux-gnueabi | ||||
|             libc6-dev-armel-cross | ||||
|       - arm-linux-gnueabi-gcc --version | ||||
|     # report-size will compile littlefs and report the size | ||||
|     script: [*report-size] | ||||
|  | ||||
|   # test compilation in thread-safe mode | ||||
|   - stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-threadsafe | ||||
|       - CC="arm-linux-gnueabi-gcc --static -mthumb" | ||||
|       - CFLAGS="-Werror -DLFS_THREADSAFE" | ||||
|     if: branch !~ -prefix$ | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install | ||||
|             gcc-arm-linux-gnueabi | ||||
|             libc6-dev-armel-cross | ||||
|       - arm-linux-gnueabi-gcc --version | ||||
|     # report-size will compile littlefs and report the size | ||||
|     script: [*report-size] | ||||
|  | ||||
|   # self-host with littlefs-fuse for fuzz test | ||||
|   - stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-fuse | ||||
|     if: branch !~ -prefix$ | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install libfuse-dev | ||||
|       - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 | ||||
|       - fusermount -V | ||||
|       - gcc --version | ||||
|  | ||||
|       # setup disk for littlefs-fuse | ||||
|       - rm -rf littlefs-fuse/littlefs/* | ||||
|       - cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs | ||||
|  | ||||
|       - mkdir mount | ||||
|       - sudo chmod a+rw /dev/loop0 | ||||
|       - dd if=/dev/zero bs=512 count=128K of=disk | ||||
|       - losetup /dev/loop0 disk | ||||
|     script: | ||||
|       # self-host test | ||||
|       - make -C littlefs-fuse | ||||
|  | ||||
|       - littlefs-fuse/lfs --format /dev/loop0 | ||||
|       - littlefs-fuse/lfs /dev/loop0 mount | ||||
|  | ||||
|       - ls mount | ||||
|       - mkdir mount/littlefs | ||||
|       - cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|       - cd mount/littlefs | ||||
|       - stat . | ||||
|       - ls -flh | ||||
|       - make -B test | ||||
|  | ||||
|   # test migration using littlefs-fuse | ||||
|   - stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-migration | ||||
|     if: branch !~ -prefix$ | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install libfuse-dev | ||||
|       - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2 | ||||
|       - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1 | ||||
|       - fusermount -V | ||||
|       - gcc --version | ||||
|  | ||||
|       # setup disk for littlefs-fuse | ||||
|       - rm -rf v2/littlefs/* | ||||
|       - cp -r $(git ls-tree --name-only HEAD) v2/littlefs | ||||
|  | ||||
|       - mkdir mount | ||||
|       - sudo chmod a+rw /dev/loop0 | ||||
|       - dd if=/dev/zero bs=512 count=128K of=disk | ||||
|       - losetup /dev/loop0 disk | ||||
|     script: | ||||
|       # compile v1 and v2 | ||||
|       - make -C v1 | ||||
|       - make -C v2 | ||||
|  | ||||
|       # run self-host test with v1 | ||||
|       - v1/lfs --format /dev/loop0 | ||||
|       - v1/lfs /dev/loop0 mount | ||||
|  | ||||
|       - ls mount | ||||
|       - mkdir mount/littlefs | ||||
|       - cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|       - cd mount/littlefs | ||||
|       - stat . | ||||
|       - ls -flh | ||||
|       - make -B test | ||||
|  | ||||
|       # attempt to migrate | ||||
|       - cd ../.. | ||||
|       - fusermount -u mount | ||||
|  | ||||
|       - v2/lfs --migrate /dev/loop0 | ||||
|       - v2/lfs /dev/loop0 mount | ||||
|  | ||||
|       # run self-host test with v2 right where we left off | ||||
|       - ls mount | ||||
|       - cd mount/littlefs | ||||
|       - stat . | ||||
|       - ls -flh | ||||
|       - make -B test | ||||
|  | ||||
|   # automatically create releases | ||||
|   - stage: deploy | ||||
|     env: | ||||
|       - NAME=deploy | ||||
|     script: | ||||
|       - | | ||||
|         bash << 'SCRIPT' | ||||
|         set -ev | ||||
|         # Find version defined in lfs.h | ||||
|         LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3) | ||||
|         LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16))) | ||||
|         LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >>  0))) | ||||
|         # Grab latests patch from repo tags, default to 0, needs finagling | ||||
|         # to get past github's pagination api | ||||
|         PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR. | ||||
|         PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \ | ||||
|             | sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \ | ||||
|             || echo $PREV_URL) | ||||
|         LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \ | ||||
|             | jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g") | ||||
|                 .captures[].string | tonumber) | max + 1' \ | ||||
|             || echo 0) | ||||
|         # We have our new version | ||||
|         LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH" | ||||
|         echo "VERSION $LFS_VERSION" | ||||
|         # Check that we're the most recent commit | ||||
|         CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \ | ||||
|             | jq -re '.sha') | ||||
|         [ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0 | ||||
|         # Create major branch | ||||
|         git branch v$LFS_VERSION_MAJOR HEAD | ||||
|         # Create major prefix branch | ||||
|         git config user.name "geky bot" | ||||
|         git config user.email "bot@geky.net" | ||||
|         git fetch https://github.com/$TRAVIS_REPO_SLUG.git \ | ||||
|             --depth=50 v$LFS_VERSION_MAJOR-prefix || true | ||||
|         ./scripts/prefix.py lfs$LFS_VERSION_MAJOR | ||||
|         git branch v$LFS_VERSION_MAJOR-prefix $( \ | ||||
|             git commit-tree $(git write-tree) \ | ||||
|                 $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \ | ||||
|                 -p HEAD \ | ||||
|                 -m "Generated v$LFS_VERSION_MAJOR prefixes") | ||||
|         git reset --hard | ||||
|         # Update major version branches (vN and vN-prefix) | ||||
|         git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \ | ||||
|             v$LFS_VERSION_MAJOR \ | ||||
|             v$LFS_VERSION_MAJOR-prefix | ||||
|         # Build release notes | ||||
|         PREV=$(git tag --sort=-v:refname -l "v*" | head -1) | ||||
|         if [ ! -z "$PREV" ] | ||||
|         then | ||||
|             echo "PREV $PREV" | ||||
|             CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep) | ||||
|             printf "CHANGES\n%s\n\n" "$CHANGES" | ||||
|         fi | ||||
|         case ${GEKY_BOT_DRAFT:-minor} in | ||||
|             true)  DRAFT=true ;; | ||||
|             minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;; | ||||
|             false) DRAFT=false ;; | ||||
|         esac | ||||
|         # Create the release and patch version tag (vN.N.N) | ||||
|         curl -f -u "$GEKY_BOT_RELEASES" -X POST \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \ | ||||
|             -d "{ | ||||
|                 \"tag_name\": \"$LFS_VERSION\", | ||||
|                 \"name\": \"${LFS_VERSION%.0}\", | ||||
|                 \"target_commitish\": \"$TRAVIS_COMMIT\", | ||||
|                 \"draft\": $DRAFT, | ||||
|                 \"body\": $(jq -sR '.' <<< "$CHANGES") | ||||
|             }" #" | ||||
|         SCRIPT | ||||
|  | ||||
| # manage statuses | ||||
| before_install: | ||||
|   - | | ||||
|     # don't clobber other (not us) failures | ||||
|     if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         | jq -e ".statuses[] | select( | ||||
|             .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and | ||||
|             .state == \"failure\" and | ||||
|             (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" | ||||
|     then | ||||
|         curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|             -d "{ | ||||
|                 \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", | ||||
|                 \"state\": \"pending\", | ||||
|                 \"description\": \"${STATUS:-In progress}\", | ||||
|                 \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" | ||||
|             }" | ||||
|     fi | ||||
|  | ||||
| after_failure: | ||||
|   - | | ||||
|     # don't clobber other (not us) failures | ||||
|     if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         | jq -e ".statuses[] | select( | ||||
|             .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and | ||||
|             .state == \"failure\" and | ||||
|             (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" | ||||
|     then | ||||
|         curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|             -d "{ | ||||
|                 \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", | ||||
|                 \"state\": \"failure\", | ||||
|                 \"description\": \"${STATUS:-Failed}\", | ||||
|                 \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" | ||||
|             }" | ||||
|     fi | ||||
|  | ||||
| after_success: | ||||
|   - | | ||||
|     # don't clobber other (not us) failures | ||||
|     # only update if we were last job to mark in progress, | ||||
|     # this isn't perfect but is probably good enough | ||||
|     if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         | jq -e ".statuses[] | select( | ||||
|             .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and | ||||
|             (.state == \"failure\" or .state == \"pending\") and | ||||
|             (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" | ||||
|     then | ||||
|         curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|             -d "{ | ||||
|                 \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", | ||||
|                 \"state\": \"success\", | ||||
|                 \"description\": \"${STATUS:-Passed}\", | ||||
|                 \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" | ||||
|             }" | ||||
|     fi | ||||
							
								
								
									
										85
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										85
									
								
								Makefile
									
									
									
									
									
								
							| @@ -1,25 +1,39 @@ | ||||
| TARGET = lfs.a | ||||
| ifneq ($(wildcard test.c main.c),) | ||||
| override TARGET = lfs | ||||
| ifdef BUILDDIR | ||||
| # make sure BUILDDIR ends with a slash | ||||
| override BUILDDIR := $(BUILDDIR)/ | ||||
| # bit of a hack, but we want to make sure BUILDDIR directory structure | ||||
| # is correct before any commands | ||||
| $(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \ | ||||
| 	$(BUILDDIR) \ | ||||
| 	$(BUILDDIR)bd \ | ||||
| 	$(BUILDDIR)tests)) | ||||
| endif | ||||
|  | ||||
| # overridable target/src/tools/flags/etc | ||||
| ifneq ($(wildcard test.c main.c),) | ||||
| TARGET ?= $(BUILDDIR)lfs | ||||
| else | ||||
| TARGET ?= $(BUILDDIR)lfs.a | ||||
| endif | ||||
|  | ||||
|  | ||||
| CC ?= gcc | ||||
| AR ?= ar | ||||
| SIZE ?= size | ||||
| CTAGS ?= ctags | ||||
| NM ?= nm | ||||
| LCOV ?= lcov | ||||
|  | ||||
| SRC += $(wildcard *.c bd/*.c) | ||||
| OBJ := $(SRC:.c=.o) | ||||
| DEP := $(SRC:.c=.d) | ||||
| ASM := $(SRC:.c=.s) | ||||
| SRC ?= $(wildcard *.c) | ||||
| OBJ := $(SRC:%.c=$(BUILDDIR)%.o) | ||||
| DEP := $(SRC:%.c=$(BUILDDIR)%.d) | ||||
| ASM := $(SRC:%.c=$(BUILDDIR)%.s) | ||||
|  | ||||
| ifdef DEBUG | ||||
| override CFLAGS += -O0 -g3 | ||||
| else | ||||
| override CFLAGS += -Os | ||||
| endif | ||||
| ifdef WORD | ||||
| override CFLAGS += -m$(WORD) | ||||
| endif | ||||
| ifdef TRACE | ||||
| override CFLAGS += -DLFS_YES_TRACE | ||||
| endif | ||||
| @@ -28,40 +42,73 @@ override CFLAGS += -std=c99 -Wall -pedantic | ||||
| override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef | ||||
|  | ||||
| ifdef VERBOSE | ||||
| override TFLAGS += -v | ||||
| override TESTFLAGS += -v | ||||
| override CODEFLAGS += -v | ||||
| override COVERAGEFLAGS += -v | ||||
| endif | ||||
| ifdef EXEC | ||||
| override TESTFLAGS += --exec="$(EXEC)" | ||||
| endif | ||||
| ifdef BUILDDIR | ||||
| override TESTFLAGS += --build-dir="$(BUILDDIR:/=)" | ||||
| override CODEFLAGS += --build-dir="$(BUILDDIR:/=)" | ||||
| endif | ||||
| ifneq ($(NM),nm) | ||||
| override CODEFLAGS += --nm-tool="$(NM)" | ||||
| endif | ||||
|  | ||||
|  | ||||
| all: $(TARGET) | ||||
| # commands | ||||
| .PHONY: all build | ||||
| all build: $(TARGET) | ||||
|  | ||||
| .PHONY: asm | ||||
| asm: $(ASM) | ||||
|  | ||||
| .PHONY: size | ||||
| size: $(OBJ) | ||||
| 	$(SIZE) -t $^ | ||||
|  | ||||
| .PHONY: tags | ||||
| tags: | ||||
| 	$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC) | ||||
|  | ||||
| .PHONY: code | ||||
| code: $(OBJ) | ||||
| 	./scripts/code.py $^ $(CODEFLAGS) | ||||
|  | ||||
| .PHONY: test | ||||
| test: | ||||
| 	./scripts/test.py $(TFLAGS) | ||||
| 	./scripts/test.py $(TESTFLAGS) | ||||
| .SECONDEXPANSION: | ||||
| test%: tests/test$$(firstword $$(subst \#, ,%)).toml | ||||
| 	./scripts/test.py $@ $(TFLAGS) | ||||
| 	./scripts/test.py $@ $(TESTFLAGS) | ||||
|  | ||||
| .PHONY: coverage | ||||
| coverage: | ||||
| 	./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS) | ||||
|  | ||||
| # rules | ||||
| -include $(DEP) | ||||
| .SUFFIXES: | ||||
|  | ||||
| lfs: $(OBJ) | ||||
| $(BUILDDIR)lfs: $(OBJ) | ||||
| 	$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ | ||||
|  | ||||
| %.a: $(OBJ) | ||||
| $(BUILDDIR)%.a: $(OBJ) | ||||
| 	$(AR) rcs $@ $^ | ||||
|  | ||||
| %.o: %.c | ||||
| $(BUILDDIR)%.o: %.c | ||||
| 	$(CC) -c -MMD $(CFLAGS) $< -o $@ | ||||
|  | ||||
| %.s: %.c | ||||
| $(BUILDDIR)%.s: %.c | ||||
| 	$(CC) -S $(CFLAGS) $< -o $@ | ||||
|  | ||||
| # clean everything | ||||
| .PHONY: clean | ||||
| clean: | ||||
| 	rm -f $(TARGET) | ||||
| 	rm -f $(OBJ) | ||||
| 	rm -f $(DEP) | ||||
| 	rm -f $(ASM) | ||||
| 	rm -f tests/*.toml.* | ||||
| 	rm -f $(BUILDDIR)tests/*.toml.* | ||||
|   | ||||
							
								
								
									
										337
									
								
								lfs.c
									
									
									
									
									
								
							
							
						
						
									
										337
									
								
								lfs.c
									
									
									
									
									
								
							| @@ -425,7 +425,8 @@ static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) { | ||||
|     superblock->attr_max    = lfs_tole32(superblock->attr_max); | ||||
| } | ||||
|  | ||||
| static inline bool lfs_mlist_isopen(struct lfs_mlist *head, | ||||
| #ifndef LFS_NO_ASSERT | ||||
| static bool lfs_mlist_isopen(struct lfs_mlist *head, | ||||
|         struct lfs_mlist *node) { | ||||
|     for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) { | ||||
|         if (*p == (struct lfs_mlist*)node) { | ||||
| @@ -435,8 +436,9 @@ static inline bool lfs_mlist_isopen(struct lfs_mlist *head, | ||||
|  | ||||
|     return false; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| static inline void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) { | ||||
| static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) { | ||||
|     for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) { | ||||
|         if (*p == mlist) { | ||||
|             *p = (*p)->next; | ||||
| @@ -445,7 +447,7 @@ static inline void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) { | ||||
|     } | ||||
| } | ||||
|  | ||||
| static inline void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) { | ||||
| static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) { | ||||
|     mlist->next = lfs->mlist; | ||||
|     lfs->mlist = mlist; | ||||
| } | ||||
| @@ -465,7 +467,7 @@ static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file); | ||||
| static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file); | ||||
| static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file); | ||||
|  | ||||
| static void lfs_fs_preporphans(lfs_t *lfs, int8_t orphans); | ||||
| static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans); | ||||
| static void lfs_fs_prepmove(lfs_t *lfs, | ||||
|         uint16_t id, const lfs_block_t pair[2]); | ||||
| static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2], | ||||
| @@ -1589,7 +1591,8 @@ static int lfs_dir_compact(lfs_t *lfs, | ||||
|         // for metadata updates. | ||||
|         if (end - begin < 0xff && | ||||
|                 size <= lfs_min(lfs->cfg->block_size - 36, | ||||
|                     lfs_alignup(lfs->cfg->block_size/2, | ||||
|                     lfs_alignup((lfs->cfg->metadata_max ? | ||||
|                             lfs->cfg->metadata_max : lfs->cfg->block_size)/2, | ||||
|                         lfs->cfg->prog_size))) { | ||||
|             break; | ||||
|         } | ||||
| @@ -1674,7 +1677,8 @@ static int lfs_dir_compact(lfs_t *lfs, | ||||
|                 .crc = 0xffffffff, | ||||
|  | ||||
|                 .begin = 0, | ||||
|                 .end = lfs->cfg->block_size - 8, | ||||
|                 .end = (lfs->cfg->metadata_max ? | ||||
|                     lfs->cfg->metadata_max : lfs->cfg->block_size) - 8, | ||||
|             }; | ||||
|  | ||||
|             // erase block to write to | ||||
| @@ -1884,7 +1888,8 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, | ||||
|             .crc = 0xffffffff, | ||||
|  | ||||
|             .begin = dir->off, | ||||
|             .end = lfs->cfg->block_size - 8, | ||||
|             .end = (lfs->cfg->metadata_max ? | ||||
|                 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8, | ||||
|         }; | ||||
|  | ||||
|         // traverse attrs that need to be written out | ||||
| @@ -2061,7 +2066,10 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) { | ||||
|     // current block end of list? | ||||
|     if (cwd.m.split) { | ||||
|         // update tails, this creates a desync | ||||
|         lfs_fs_preporphans(lfs, +1); | ||||
|         err = lfs_fs_preporphans(lfs, +1); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         // it's possible our predecessor has to be relocated, and if | ||||
|         // our parent is our predecessor's predecessor, this could have | ||||
| @@ -2081,7 +2089,10 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) { | ||||
|         } | ||||
|  | ||||
|         lfs->mlist = cwd.next; | ||||
|         lfs_fs_preporphans(lfs, -1); | ||||
|         err = lfs_fs_preporphans(lfs, -1); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // now insert into our parent block | ||||
| @@ -2966,7 +2977,9 @@ static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file, | ||||
|     if ((file->flags & LFS_F_INLINE) && | ||||
|             lfs_max(file->pos+nsize, file->ctz.size) > | ||||
|             lfs_min(0x3fe, lfs_min( | ||||
|                 lfs->cfg->cache_size, lfs->cfg->block_size/8))) { | ||||
|                 lfs->cfg->cache_size, | ||||
|                 (lfs->cfg->metadata_max ? | ||||
|                     lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) { | ||||
|         // inline file doesn't fit anymore | ||||
|         int err = lfs_file_outline(lfs, file); | ||||
|         if (err) { | ||||
| @@ -3048,14 +3061,6 @@ relocate: | ||||
|  | ||||
| static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file, | ||||
|         lfs_soff_t off, int whence) { | ||||
| #ifndef LFS_READONLY | ||||
|     // write out everything beforehand, may be noop if rdonly | ||||
|     int err = lfs_file_flush(lfs, file); | ||||
|     if (err) { | ||||
|         return err; | ||||
|     } | ||||
| #endif | ||||
|  | ||||
|     // find new pos | ||||
|     lfs_off_t npos = file->pos; | ||||
|     if (whence == LFS_SEEK_SET) { | ||||
| @@ -3063,7 +3068,7 @@ static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file, | ||||
|     } else if (whence == LFS_SEEK_CUR) { | ||||
|         npos = file->pos + off; | ||||
|     } else if (whence == LFS_SEEK_END) { | ||||
|         npos = file->ctz.size + off; | ||||
|         npos = lfs_file_rawsize(lfs, file) + off; | ||||
|     } | ||||
|  | ||||
|     if (npos > lfs->file_max) { | ||||
| @@ -3071,6 +3076,19 @@ static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file, | ||||
|         return LFS_ERR_INVAL; | ||||
|     } | ||||
|  | ||||
|     if (file->pos == npos) { | ||||
|         // noop - position has not changed | ||||
|         return npos; | ||||
|     } | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
|     // write out everything beforehand, may be noop if rdonly | ||||
|     int err = lfs_file_flush(lfs, file); | ||||
|     if (err) { | ||||
|         return err; | ||||
|     } | ||||
| #endif | ||||
|  | ||||
|     // update pos | ||||
|     file->pos = npos; | ||||
|     return npos; | ||||
| @@ -3101,21 +3119,22 @@ static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         // need to set pos/block/off consistently so seeking back to | ||||
|         // the old position does not get confused | ||||
|         file->pos = size; | ||||
|         file->ctz.head = file->block; | ||||
|         file->ctz.size = size; | ||||
|         file->flags |= LFS_F_DIRTY | LFS_F_READING; | ||||
|     } else if (size > oldsize) { | ||||
|         // flush+seek if not already at end | ||||
|         if (file->pos != oldsize) { | ||||
|             lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END); | ||||
|             if (res < 0) { | ||||
|                 return (int)res; | ||||
|             } | ||||
|         lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END); | ||||
|         if (res < 0) { | ||||
|             return (int)res; | ||||
|         } | ||||
|  | ||||
|         // fill with zeros | ||||
|         while (file->pos < size) { | ||||
|             lfs_ssize_t res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1); | ||||
|             res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1); | ||||
|             if (res < 0) { | ||||
|                 return (int)res; | ||||
|             } | ||||
| @@ -3171,7 +3190,106 @@ static int lfs_rawstat(lfs_t *lfs, const char *path, struct lfs_info *info) { | ||||
| } | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| static int lfs_rawremove(lfs_t *lfs, const char *path) { | ||||
| typedef int (*lfs_dir_prep_helper_t)(lfs_t *, struct lfs_mlist *, lfs_gstate_t *); | ||||
|  | ||||
| static int lfs_dir_prep_remove_nonempty_folders(lfs_t *lfs, struct lfs_mlist *dir, | ||||
|         lfs_gstate_t *tmp_gstate) | ||||
| { | ||||
|     lfs_gstate_t split_gstate; | ||||
|     uint16_t id = 0; | ||||
|  | ||||
|     // Walk tags stored in this directory and check for any directory | ||||
|     // tags.  Removal of directories with a directory in them can lead | ||||
|     // to additional orphans in the filesystem, so we return | ||||
|     // LFS_ERR_NOTEMPTY in this case.  Otherwise, leave the loaded | ||||
|     // directory for the tail end of the directory split to leave a proper | ||||
|     // view of the filesystem after removal. | ||||
|     while (true) { | ||||
|         if (dir->m.count == id) { | ||||
|             if (!dir->m.split) { | ||||
|                 // We have iterated through the folder to the last | ||||
|                 // tag. | ||||
|                 break; | ||||
|             } | ||||
|  | ||||
|             // Before we fetch the next block, update our fetched gstate xor | ||||
|             lfs_dir_getgstate(lfs, &dir->m, &split_gstate); | ||||
|             lfs_gstate_xor(tmp_gstate, &split_gstate); | ||||
|  | ||||
|             int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail); | ||||
|             if (err) { | ||||
|                 return err; | ||||
|             } | ||||
|  | ||||
|             id = 0; | ||||
|         } | ||||
|  | ||||
|         lfs_stag_t tag = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x780, 0x3ff, 0), | ||||
|                 LFS_MKTAG(LFS_TYPE_NAME, id, 0), NULL); | ||||
|  | ||||
|         if (tag < 0) { | ||||
|             return tag; | ||||
|         } | ||||
|  | ||||
|         if (lfs_tag_type3(tag) == LFS_TYPE_DIR) { | ||||
|             return LFS_ERR_NOTEMPTY; | ||||
|         } | ||||
|  | ||||
|         id += 1; | ||||
|     } | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static int lfs_dir_prep_removal(lfs_t *lfs, struct lfs_mlist *dir, | ||||
|         lfs_mdir_t *newcwd, uint16_t newid, lfs_block_t *pair, | ||||
|         lfs_gstate_t *tmp_gstate, lfs_dir_prep_helper_t helper) | ||||
| { | ||||
|     lfs_stag_t res = lfs_dir_get(lfs, newcwd, LFS_MKTAG(0x700, 0x3ff, 0), | ||||
|             LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), pair); | ||||
|     if (res < 0) { | ||||
|         return (int)res; | ||||
|     } | ||||
|     lfs_pair_fromle32(pair); | ||||
|  | ||||
|     memset(tmp_gstate, 0, sizeof(*tmp_gstate)); | ||||
|  | ||||
|     int err = lfs_dir_fetch(lfs, &dir->m, pair); | ||||
|     if (err) { | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
|     if (dir->m.count > 0 || dir->m.split) { | ||||
|         // Normal POSIX behavior wouldn't allow a non-empty | ||||
|         // folder to be removed/renamed into in this manner | ||||
|         if (NULL == helper) { | ||||
|             return LFS_ERR_NOTEMPTY; | ||||
|         } | ||||
|  | ||||
|         err = helper(lfs, dir, tmp_gstate); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // mark fs as orphaned | ||||
|     err = lfs_fs_preporphans(lfs, +1); | ||||
|     if (err) { | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
|     // I know it's crazy but yes, dir can be changed by our parent's | ||||
|     // commit (if predecessor is child) | ||||
|     dir->type = 0; | ||||
|     dir->id = 0; | ||||
|     lfs->mlist = dir; | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| static int lfs_rawremove(lfs_t *lfs, const char *path, lfs_dir_prep_helper_t helper) { | ||||
|     // deorphan if we haven't yet, needed at most once after poweron | ||||
|     int err = lfs_fs_forceconsistency(lfs); | ||||
|     if (err) { | ||||
| @@ -3185,34 +3303,16 @@ static int lfs_rawremove(lfs_t *lfs, const char *path) { | ||||
|     } | ||||
|  | ||||
|     struct lfs_mlist dir; | ||||
|     lfs_block_t pair[2]; | ||||
|     lfs_gstate_t tmp_gstate; | ||||
|     dir.next = lfs->mlist; | ||||
|     if (lfs_tag_type3(tag) == LFS_TYPE_DIR) { | ||||
|         // must be empty before removal | ||||
|         lfs_block_t pair[2]; | ||||
|         lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0), | ||||
|                 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair); | ||||
|         if (res < 0) { | ||||
|             return (int)res; | ||||
|         } | ||||
|         lfs_pair_fromle32(pair); | ||||
|  | ||||
|         err = lfs_dir_fetch(lfs, &dir.m, pair); | ||||
|         if (err) { | ||||
|         // must be empty before removal to prevent orphans | ||||
|         err = lfs_dir_prep_removal(lfs, &dir, &cwd, lfs_tag_id(tag), | ||||
|                 pair, &tmp_gstate, helper); | ||||
|         if (err < 0) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         if (dir.m.count > 0 || dir.m.split) { | ||||
|             return LFS_ERR_NOTEMPTY; | ||||
|         } | ||||
|  | ||||
|         // mark fs as orphaned | ||||
|         lfs_fs_preporphans(lfs, +1); | ||||
|  | ||||
|         // I know it's crazy but yes, dir can be changed by our parent's | ||||
|         // commit (if predecessor is child) | ||||
|         dir.type = 0; | ||||
|         dir.id = 0; | ||||
|         lfs->mlist = &dir; | ||||
|     } | ||||
|  | ||||
|     // delete the entry | ||||
| @@ -3226,13 +3326,20 @@ static int lfs_rawremove(lfs_t *lfs, const char *path) { | ||||
|     lfs->mlist = dir.next; | ||||
|     if (lfs_tag_type3(tag) == LFS_TYPE_DIR) { | ||||
|         // fix orphan | ||||
|         lfs_fs_preporphans(lfs, -1); | ||||
|  | ||||
|         err = lfs_fs_pred(lfs, dir.m.pair, &cwd); | ||||
|         err = lfs_fs_preporphans(lfs, -1); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         err = lfs_fs_pred(lfs, pair, &cwd); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         // Merge in gstate from first block splits within the directory; | ||||
|         // lfs_dir_drop will pick up the last gstate entry. | ||||
|         lfs_gstate_xor(&lfs->gdelta, &tmp_gstate); | ||||
|  | ||||
|         err = lfs_dir_drop(lfs, &cwd, &dir.m); | ||||
|         if (err) { | ||||
|             return err; | ||||
| @@ -3244,7 +3351,8 @@ static int lfs_rawremove(lfs_t *lfs, const char *path) { | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) { | ||||
| static int lfs_rawrename(lfs_t *lfs, const char *oldpath, | ||||
|         const char *newpath, lfs_dir_prep_helper_t helper) { | ||||
|     // deorphan if we haven't yet, needed at most once after poweron | ||||
|     int err = lfs_fs_forceconsistency(lfs); | ||||
|     if (err) { | ||||
| @@ -3272,6 +3380,8 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) { | ||||
|     uint16_t newoldid = lfs_tag_id(oldtag); | ||||
|  | ||||
|     struct lfs_mlist prevdir; | ||||
|     lfs_block_t dir_pair[2]; | ||||
|     lfs_gstate_t tmp_gstate; | ||||
|     prevdir.next = lfs->mlist; | ||||
|     if (prevtag == LFS_ERR_NOENT) { | ||||
|         // check that name fits | ||||
| @@ -3292,33 +3402,12 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) { | ||||
|         // we're renaming to ourselves?? | ||||
|         return 0; | ||||
|     } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) { | ||||
|         // must be empty before removal | ||||
|         lfs_block_t prevpair[2]; | ||||
|         lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0), | ||||
|                 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair); | ||||
|         if (res < 0) { | ||||
|             return (int)res; | ||||
|         } | ||||
|         lfs_pair_fromle32(prevpair); | ||||
|  | ||||
|         // must be empty before removal | ||||
|         err = lfs_dir_fetch(lfs, &prevdir.m, prevpair); | ||||
|         // must be empty before removal to prevent orphans | ||||
|         err = lfs_dir_prep_removal(lfs, &prevdir, &newcwd, newid, | ||||
|                 dir_pair, &tmp_gstate, helper); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         if (prevdir.m.count > 0 || prevdir.m.split) { | ||||
|             return LFS_ERR_NOTEMPTY; | ||||
|         } | ||||
|  | ||||
|         // mark fs as orphaned | ||||
|         lfs_fs_preporphans(lfs, +1); | ||||
|  | ||||
|         // I know it's crazy but yes, dir can be changed by our parent's | ||||
|         // commit (if predecessor is child) | ||||
|         prevdir.type = 0; | ||||
|         prevdir.id = 0; | ||||
|         lfs->mlist = &prevdir; | ||||
|     } | ||||
|  | ||||
|     if (!samepair) { | ||||
| @@ -3355,13 +3444,20 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) { | ||||
|     lfs->mlist = prevdir.next; | ||||
|     if (prevtag != LFS_ERR_NOENT && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) { | ||||
|         // fix orphan | ||||
|         lfs_fs_preporphans(lfs, -1); | ||||
|  | ||||
|         err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd); | ||||
|         err = lfs_fs_preporphans(lfs, -1); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         err = lfs_fs_pred(lfs, dir_pair, &newcwd); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         // Merge in gstate from first split blocks in the directory; | ||||
|         // lfs_dir_drop will pick up the other gstate entries. | ||||
|         lfs_gstate_xor(&lfs->gdelta, &tmp_gstate); | ||||
|  | ||||
|         err = lfs_dir_drop(lfs, &newcwd, &prevdir.m); | ||||
|         if (err) { | ||||
|             return err; | ||||
| @@ -3536,6 +3632,8 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) { | ||||
|         lfs->attr_max = LFS_ATTR_MAX; | ||||
|     } | ||||
|  | ||||
|     LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size); | ||||
|  | ||||
|     // setup default state | ||||
|     lfs->root[0] = LFS_BLOCK_NULL; | ||||
|     lfs->root[1] = LFS_BLOCK_NULL; | ||||
| @@ -3616,12 +3714,6 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) { | ||||
|             goto cleanup; | ||||
|         } | ||||
|  | ||||
|         // sanity check that fetch works | ||||
|         err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1}); | ||||
|         if (err) { | ||||
|             goto cleanup; | ||||
|         } | ||||
|  | ||||
|         // force compaction to prevent accidentally mounting any | ||||
|         // older version of littlefs that may live on disk | ||||
|         root.erased = false; | ||||
| @@ -3629,6 +3721,12 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) { | ||||
|         if (err) { | ||||
|             goto cleanup; | ||||
|         } | ||||
|  | ||||
|         // sanity check that fetch works | ||||
|         err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1}); | ||||
|         if (err) { | ||||
|             goto cleanup; | ||||
|         } | ||||
|     } | ||||
|  | ||||
| cleanup: | ||||
| @@ -3829,7 +3927,7 @@ int lfs_fs_rawtraverse(lfs_t *lfs, | ||||
|                 if (err) { | ||||
|                     return err; | ||||
|                 } | ||||
|             } else if (includeorphans &&  | ||||
|             } else if (includeorphans && | ||||
|                     lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) { | ||||
|                 for (int i = 0; i < 2; i++) { | ||||
|                     err = cb(data, (&ctz.head)[i]); | ||||
| @@ -3986,7 +4084,10 @@ static int lfs_fs_relocate(lfs_t *lfs, | ||||
|  | ||||
|     if (tag != LFS_ERR_NOENT) { | ||||
|         // update disk, this creates a desync | ||||
|         lfs_fs_preporphans(lfs, +1); | ||||
|         int err = lfs_fs_preporphans(lfs, +1); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
|         // fix pending move in this pair? this looks like an optimization but | ||||
|         // is in fact _required_ since relocating may outdate the move. | ||||
| @@ -4003,7 +4104,7 @@ static int lfs_fs_relocate(lfs_t *lfs, | ||||
|         } | ||||
|  | ||||
|         lfs_pair_tole32(newpair); | ||||
|         int err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS( | ||||
|         err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS( | ||||
|                 {LFS_MKTAG_IF(moveid != 0x3ff, | ||||
|                     LFS_TYPE_DELETE, moveid, 0), NULL}, | ||||
|                 {tag, newpair})); | ||||
| @@ -4013,7 +4114,10 @@ static int lfs_fs_relocate(lfs_t *lfs, | ||||
|         } | ||||
|  | ||||
|         // next step, clean up orphans | ||||
|         lfs_fs_preporphans(lfs, -1); | ||||
|         err = lfs_fs_preporphans(lfs, -1); | ||||
|         if (err) { | ||||
|             return err; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // find pred | ||||
| @@ -4052,11 +4156,13 @@ static int lfs_fs_relocate(lfs_t *lfs, | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| static void lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) { | ||||
| static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) { | ||||
|     LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0); | ||||
|     lfs->gstate.tag += orphans; | ||||
|     lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) | | ||||
|             ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31)); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| @@ -4173,8 +4279,7 @@ static int lfs_fs_deorphan(lfs_t *lfs) { | ||||
|     } | ||||
|  | ||||
|     // mark orphans as fixed | ||||
|     lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate)); | ||||
|     return 0; | ||||
|     return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate)); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| @@ -4723,7 +4828,7 @@ static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) { | ||||
|  | ||||
|                 lfs1_entry_tole32(&entry1.d); | ||||
|                 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( | ||||
|                         {LFS_MKTAG(LFS_TYPE_CREATE, id, 0)}, | ||||
|                         {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL}, | ||||
|                         {LFS_MKTAG_IF_ELSE(isdir, | ||||
|                             LFS_TYPE_DIR, id, entry1.d.nlen, | ||||
|                             LFS_TYPE_REG, id, entry1.d.nlen), | ||||
| @@ -4828,7 +4933,7 @@ static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) { | ||||
|  | ||||
|         lfs_superblock_tole32(&superblock); | ||||
|         err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( | ||||
|                 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0)}, | ||||
|                 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL}, | ||||
|                 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"}, | ||||
|                 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), | ||||
|                     &superblock})); | ||||
| @@ -4954,7 +5059,7 @@ int lfs_remove(lfs_t *lfs, const char *path) { | ||||
|     } | ||||
|     LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path); | ||||
|  | ||||
|     err = lfs_rawremove(lfs, path); | ||||
|     err = lfs_rawremove(lfs, path, NULL); | ||||
|  | ||||
|     LFS_TRACE("lfs_remove -> %d", err); | ||||
|     LFS_UNLOCK(lfs->cfg); | ||||
| @@ -4962,6 +5067,24 @@ int lfs_remove(lfs_t *lfs, const char *path) { | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| int lfs_removeall(lfs_t *lfs, const char *path) { | ||||
|     int err = LFS_LOCK(lfs->cfg); | ||||
|     if (err) { | ||||
|         return err; | ||||
|     } | ||||
|     LFS_TRACE("lfs_removeall(%p, \"%s\")", (void*)lfs, path); | ||||
|  | ||||
|     // Note: We pass in a helper pointer here so that this extra | ||||
|     //       logic can be dropped if it is never referenced | ||||
|     err = lfs_rawremove(lfs, path, lfs_dir_prep_remove_nonempty_folders); | ||||
|  | ||||
|     LFS_TRACE("lfs_removeall -> %d", err); | ||||
|     LFS_UNLOCK(lfs->cfg); | ||||
|     return err; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) { | ||||
|     int err = LFS_LOCK(lfs->cfg); | ||||
| @@ -4970,7 +5093,25 @@ int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) { | ||||
|     } | ||||
|     LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath); | ||||
|  | ||||
|     err = lfs_rawrename(lfs, oldpath, newpath); | ||||
|     err = lfs_rawrename(lfs, oldpath, newpath, NULL); | ||||
|  | ||||
|     LFS_TRACE("lfs_rename -> %d", err); | ||||
|     LFS_UNLOCK(lfs->cfg); | ||||
|     return err; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| int lfs_rename_with_removeall(lfs_t *lfs, const char *oldpath, const char *newpath) { | ||||
|     int err = LFS_LOCK(lfs->cfg); | ||||
|     if (err) { | ||||
|         return err; | ||||
|     } | ||||
|     LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath); | ||||
|  | ||||
|     // Note: We pass in a helper pointer here so that this extra | ||||
|     //       logic can be dropped if it is never referenced | ||||
|     err = lfs_rawrename(lfs, oldpath, newpath, lfs_dir_prep_remove_nonempty_folders); | ||||
|  | ||||
|     LFS_TRACE("lfs_rename -> %d", err); | ||||
|     LFS_UNLOCK(lfs->cfg); | ||||
|   | ||||
							
								
								
									
										34
									
								
								lfs.h
									
									
									
									
									
								
							
							
						
						
									
										34
									
								
								lfs.h
									
									
									
									
									
								
							| @@ -22,7 +22,7 @@ extern "C" | ||||
| // Software library version | ||||
| // Major (top-nibble), incremented on backwards incompatible changes | ||||
| // Minor (bottom-nibble), incremented on feature additions | ||||
| #define LFS_VERSION 0x00020003 | ||||
| #define LFS_VERSION 0x00020004 | ||||
| #define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16)) | ||||
| #define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >>  0)) | ||||
|  | ||||
| @@ -207,7 +207,7 @@ struct lfs_config { | ||||
|     // Number of erasable blocks on the device. | ||||
|     lfs_size_t block_count; | ||||
|  | ||||
|     // Number of erase cycles before littlefs evicts metadata logs and moves  | ||||
|     // Number of erase cycles before littlefs evicts metadata logs and moves | ||||
|     // the metadata to another block. Suggested values are in the | ||||
|     // range 100-1000, with large values having better performance at the cost | ||||
|     // of less consistent wear distribution. | ||||
| @@ -256,6 +256,12 @@ struct lfs_config { | ||||
|     // larger attributes size but must be <= LFS_ATTR_MAX. Defaults to | ||||
|     // LFS_ATTR_MAX when zero. | ||||
|     lfs_size_t attr_max; | ||||
|  | ||||
|     // Optional upper limit on total space given to metadata pairs in bytes. On | ||||
|     // devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB) | ||||
|     // can help bound the metadata compaction time. Must be <= block_size. | ||||
|     // Defaults to block_size when zero. | ||||
|     lfs_size_t metadata_max; | ||||
| }; | ||||
|  | ||||
| // File info structure | ||||
| @@ -452,6 +458,17 @@ int lfs_unmount(lfs_t *lfs); | ||||
| int lfs_remove(lfs_t *lfs, const char *path); | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Removes a file or directory | ||||
| // | ||||
| // If removing a directory, the directory must not have | ||||
| // any directories but it may contain files.  This is | ||||
| // non-POSIX behavior, and thus is a different call | ||||
| // than lfs_remove(...) | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_removeall(lfs_t *lfs, const char *path); | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Rename or move a file or directory | ||||
| // | ||||
| @@ -462,6 +479,19 @@ int lfs_remove(lfs_t *lfs, const char *path); | ||||
| int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath); | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Rename or move a file or directory | ||||
| // | ||||
| // If the destination exists, it must match the source in type. | ||||
| // If the destination is a directory, it may not contain | ||||
| // any directories but it may contain files.  This is | ||||
| // non-POSIX behavior, and thus is a different call | ||||
| // than lfs_rename(...) | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_rename_with_removeall(lfs_t *lfs, const char *oldpath, const char *newpath); | ||||
| #endif | ||||
|  | ||||
| // Find info about a file or directory | ||||
| // | ||||
| // Fills out the info structure, based on the specified file or directory. | ||||
|   | ||||
							
								
								
									
										10
									
								
								lfs_util.h
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								lfs_util.h
									
									
									
									
									
								
							| @@ -49,6 +49,7 @@ extern "C" | ||||
| // code footprint | ||||
|  | ||||
| // Logging functions | ||||
| #ifndef LFS_TRACE | ||||
| #ifdef LFS_YES_TRACE | ||||
| #define LFS_TRACE_(fmt, ...) \ | ||||
|     printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| @@ -56,7 +57,9 @@ extern "C" | ||||
| #else | ||||
| #define LFS_TRACE(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_DEBUG | ||||
| #ifndef LFS_NO_DEBUG | ||||
| #define LFS_DEBUG_(fmt, ...) \ | ||||
|     printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| @@ -64,7 +67,9 @@ extern "C" | ||||
| #else | ||||
| #define LFS_DEBUG(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_WARN | ||||
| #ifndef LFS_NO_WARN | ||||
| #define LFS_WARN_(fmt, ...) \ | ||||
|     printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| @@ -72,7 +77,9 @@ extern "C" | ||||
| #else | ||||
| #define LFS_WARN(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_ERROR | ||||
| #ifndef LFS_NO_ERROR | ||||
| #define LFS_ERROR_(fmt, ...) \ | ||||
|     printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| @@ -80,13 +87,16 @@ extern "C" | ||||
| #else | ||||
| #define LFS_ERROR(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| // Runtime assertions | ||||
| #ifndef LFS_ASSERT | ||||
| #ifndef LFS_NO_ASSERT | ||||
| #define LFS_ASSERT(test) assert(test) | ||||
| #else | ||||
| #define LFS_ASSERT(test) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
|  | ||||
| // Builtin functions, these may be replaced by more efficient | ||||
|   | ||||
							
								
								
									
										214
									
								
								scripts/code.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										214
									
								
								scripts/code.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,214 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find code size at the function level. Basically just a bit wrapper | ||||
| # around nm with some extra conveniences for comparing builds. Heavily inspired | ||||
| # by Linux's Bloat-O-Meter. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import subprocess as sp | ||||
| import shlex | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
|  | ||||
|  | ||||
| OBJ_PATHS = ['*.o', 'bd/*.o'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     results = co.defaultdict(lambda: 0) | ||||
|     pattern = re.compile( | ||||
|         '^(?P<size>[0-9a-fA-F]+)' + | ||||
|         ' (?P<type>[%s])' % re.escape(args['type']) + | ||||
|         ' (?P<func>.+?)$') | ||||
|     for path in paths: | ||||
|         # note nm-tool may contain extra args | ||||
|         cmd = args['nm_tool'] + ['--size-sort', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True) | ||||
|         for line in proc.stdout: | ||||
|             m = pattern.match(line) | ||||
|             if m: | ||||
|                 results[(path, m.group('func'))] += int(m.group('size'), 16) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     flat_results = [] | ||||
|     for (file, func), size in results.items(): | ||||
|         # map to source files | ||||
|         if args.get('build_dir'): | ||||
|             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) | ||||
|         # discard internal functions | ||||
|         if func.startswith('__'): | ||||
|             continue | ||||
|         # discard .8449 suffixes created by optimizer | ||||
|         func = re.sub('\.[0-9]+', '', func) | ||||
|         flat_results.append((file, func, size)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
| def main(**args): | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .o files | ||||
|         paths = [] | ||||
|         for path in args['obj_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.o' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .obj files found in %r?' % args['obj_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with open(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['function'], | ||||
|                     int(result['size'])) | ||||
|                 for result in r] | ||||
|  | ||||
|     total = 0 | ||||
|     for _, _, size in results: | ||||
|         total += size | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         with open(args['diff']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             prev_results = [ | ||||
|                 (   result['file'], | ||||
|                     result['function'], | ||||
|                     int(result['size'])) | ||||
|                 for result in r] | ||||
|  | ||||
|         prev_total = 0 | ||||
|         for _, _, size in prev_results: | ||||
|             prev_total += size | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         with open(args['output'], 'w') as f: | ||||
|             w = csv.writer(f) | ||||
|             w.writerow(['file', 'function', 'size']) | ||||
|             for file, func, size in sorted(results): | ||||
|                 w.writerow((file, func, size)) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='function'): | ||||
|         entries = co.defaultdict(lambda: 0) | ||||
|         for file, func, size in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entries[entry] += size | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0)) | ||||
|         for name, new in news.items(): | ||||
|             diff[name] = (0, new, new, 1.0) | ||||
|         for name, old in olds.items(): | ||||
|             _, new, _, _ = diff[name] | ||||
|             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) | ||||
|         return diff | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s' % (by, 'size')) | ||||
|         else: | ||||
|             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entries(by='function'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, size in sorted(entries.items()): | ||||
|                 print("%-36s %7d" % (name, size)) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for old, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, new, _, _ in diff.values() if not new))) | ||||
|             for name, (old, new, diff, ratio) in sorted(diff.items(), | ||||
|                     key=lambda x: (-x[1][3], x)): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print("%-36s %7s %7s %+7d%s" % (name, | ||||
|                         old or "-", | ||||
|                         new or "-", | ||||
|                         diff, | ||||
|                         ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print("%-36s %7d" % ('TOTAL', total)) | ||||
|         else: | ||||
|             ratio = (total-prev_total)/prev_total if prev_total else 1.0 | ||||
|             print("%-36s %7s %7s %+7d%s" % ( | ||||
|                 'TOTAL', | ||||
|                 prev_total if prev_total else '-', | ||||
|                 total if total else '-', | ||||
|                 total-prev_total, | ||||
|                 ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='function') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find code size at the function level.") | ||||
|     parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, | ||||
|         help="Description of where to find *.o files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % OBJ_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't compile and find code sizes, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff code size against.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('--files', action='store_true', | ||||
|         help="Show file-level code sizes. Note this does not include padding! " | ||||
|             "So sizes may differ from other tools.") | ||||
|     parser.add_argument('-s', '--summary', action='store_true', | ||||
|         help="Only show the total code size.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('--type', default='tTrRdDbB', | ||||
|         help="Type of symbols to report, this uses the same single-character " | ||||
|             "type-names emitted by nm. Defaults to %(default)r.") | ||||
|     parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(), | ||||
|         help="Path to the nm tool to use.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										254
									
								
								scripts/coverage.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										254
									
								
								scripts/coverage.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,254 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Parse and report coverage info from .info files generated by lcov | ||||
| # | ||||
| import os | ||||
| import glob | ||||
| import csv | ||||
| import re | ||||
| import collections as co | ||||
| import bisect as b | ||||
|  | ||||
|  | ||||
| INFO_PATHS = ['tests/*.toml.info'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     file = None | ||||
|     funcs = [] | ||||
|     lines = co.defaultdict(lambda: 0) | ||||
|     pattern = re.compile( | ||||
|         '^(?P<file>SF:/?(?P<file_name>.*))$' | ||||
|         '|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$' | ||||
|         '|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$') | ||||
|     for path in paths: | ||||
|         with open(path) as f: | ||||
|             for line in f: | ||||
|                 m = pattern.match(line) | ||||
|                 if m and m.group('file'): | ||||
|                     file = m.group('file_name') | ||||
|                 elif m and file and m.group('func'): | ||||
|                     funcs.append((file, int(m.group('func_lineno')), | ||||
|                         m.group('func_name'))) | ||||
|                 elif m and file and m.group('line'): | ||||
|                     lines[(file, int(m.group('line_lineno')))] += ( | ||||
|                         int(m.group('line_hits'))) | ||||
|  | ||||
|     # map line numbers to functions | ||||
|     funcs.sort() | ||||
|     def func_from_lineno(file, lineno): | ||||
|         i = b.bisect(funcs, (file, lineno)) | ||||
|         if i and funcs[i-1][0] == file: | ||||
|             return funcs[i-1][2] | ||||
|         else: | ||||
|             return None | ||||
|  | ||||
|     # reduce to function info | ||||
|     reduced_funcs = co.defaultdict(lambda: (0, 0)) | ||||
|     for (file, line_lineno), line_hits in lines.items(): | ||||
|         func = func_from_lineno(file, line_lineno) | ||||
|         if not func: | ||||
|             continue | ||||
|         hits, count = reduced_funcs[(file, func)] | ||||
|         reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1) | ||||
|  | ||||
|     results = [] | ||||
|     for (file, func), (hits, count) in reduced_funcs.items(): | ||||
|         # discard internal/testing functions (test_* injected with | ||||
|         # internal testing) | ||||
|         if func.startswith('__') or func.startswith('test_'): | ||||
|             continue | ||||
|         # discard .8449 suffixes created by optimizer | ||||
|         func = re.sub('\.[0-9]+', '', func) | ||||
|         results.append((file, func, hits, count)) | ||||
|  | ||||
|     return results | ||||
|  | ||||
|  | ||||
| def main(**args): | ||||
|     # find coverage | ||||
|     if not args.get('use'): | ||||
|         # find *.info files | ||||
|         paths = [] | ||||
|         for path in args['info_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.gcov' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .info files found in %r?' % args['info_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with open(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['function'], | ||||
|                     int(result['hits']), | ||||
|                     int(result['count'])) | ||||
|                 for result in r] | ||||
|  | ||||
|     total_hits, total_count = 0, 0 | ||||
|     for _, _, hits, count in results: | ||||
|         total_hits += hits | ||||
|         total_count += count | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         with open(args['diff']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             prev_results = [ | ||||
|                 (   result['file'], | ||||
|                     result['function'], | ||||
|                     int(result['hits']), | ||||
|                     int(result['count'])) | ||||
|                 for result in r] | ||||
|  | ||||
|         prev_total_hits, prev_total_count = 0, 0 | ||||
|         for _, _, hits, count in prev_results: | ||||
|             prev_total_hits += hits | ||||
|             prev_total_count += count | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         with open(args['output'], 'w') as f: | ||||
|             w = csv.writer(f) | ||||
|             w.writerow(['file', 'function', 'hits', 'count']) | ||||
|             for file, func, hits, count in sorted(results): | ||||
|                 w.writerow((file, func, hits, count)) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='function'): | ||||
|         entries = co.defaultdict(lambda: (0, 0)) | ||||
|         for file, func, hits, count in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entry_hits, entry_count = entries[entry] | ||||
|             entries[entry] = (entry_hits + hits, entry_count + count) | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0)) | ||||
|         for name, (new_hits, new_count) in news.items(): | ||||
|             diff[name] = ( | ||||
|                 0, 0, | ||||
|                 new_hits, new_count, | ||||
|                 new_hits, new_count, | ||||
|                 (new_hits/new_count if new_count else 1.0) - 1.0) | ||||
|         for name, (old_hits, old_count) in olds.items(): | ||||
|             _, _, new_hits, new_count, _, _, _ = diff[name] | ||||
|             diff[name] = ( | ||||
|                 old_hits, old_count, | ||||
|                 new_hits, new_count, | ||||
|                 new_hits-old_hits, new_count-old_count, | ||||
|                 ((new_hits/new_count if new_count else 1.0) | ||||
|                     - (old_hits/old_count if old_count else 1.0))) | ||||
|         return diff | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %19s' % (by, 'hits/line')) | ||||
|         else: | ||||
|             print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entries(by='function'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, (hits, count) in sorted(entries.items()): | ||||
|                 print("%-36s %11s %7s" % (name, | ||||
|                     '%d/%d' % (hits, count) | ||||
|                         if count else '-', | ||||
|                     '%.1f%%' % (100*hits/count) | ||||
|                         if count else '-')) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for _, old, _, _, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, _, _, new, _, _, _ in diff.values() if not new))) | ||||
|             for name, ( | ||||
|                     old_hits, old_count, | ||||
|                     new_hits, new_count, | ||||
|                     diff_hits, diff_count, ratio) in sorted(diff.items(), | ||||
|                         key=lambda x: (-x[1][6], x)): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print("%-36s %11s %7s %11s %7s %11s%s" % (name, | ||||
|                         '%d/%d' % (old_hits, old_count) | ||||
|                             if old_count else '-', | ||||
|                         '%.1f%%' % (100*old_hits/old_count) | ||||
|                             if old_count else '-', | ||||
|                         '%d/%d' % (new_hits, new_count) | ||||
|                             if new_count else '-', | ||||
|                         '%.1f%%' % (100*new_hits/new_count) | ||||
|                             if new_count else '-', | ||||
|                         '%+d/%+d' % (diff_hits, diff_count), | ||||
|                         ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print("%-36s %11s %7s" % ('TOTAL', | ||||
|                 '%d/%d' % (total_hits, total_count) | ||||
|                     if total_count else '-', | ||||
|                 '%.1f%%' % (100*total_hits/total_count) | ||||
|                     if total_count else '-')) | ||||
|         else: | ||||
|             ratio = ((total_hits/total_count | ||||
|                     if total_count else 1.0) | ||||
|                 - (prev_total_hits/prev_total_count | ||||
|                     if prev_total_count else 1.0)) | ||||
|             print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL', | ||||
|                 '%d/%d' % (prev_total_hits, prev_total_count) | ||||
|                     if prev_total_count else '-', | ||||
|                 '%.1f%%' % (100*prev_total_hits/prev_total_count) | ||||
|                     if prev_total_count else '-', | ||||
|                 '%d/%d' % (total_hits, total_count) | ||||
|                     if total_count else '-', | ||||
|                 '%.1f%%' % (100*total_hits/total_count) | ||||
|                     if total_count else '-', | ||||
|                 '%+d/%+d' % (total_hits-prev_total_hits, | ||||
|                     total_count-prev_total_count), | ||||
|                 ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='function') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Parse and report coverage info from .info files \ | ||||
|             generated by lcov") | ||||
|     parser.add_argument('info_paths', nargs='*', default=INFO_PATHS, | ||||
|         help="Description of where to find *.info files. May be a directory \ | ||||
|             or list of paths. *.info files will be merged to show the total \ | ||||
|             coverage. Defaults to %r." % INFO_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't do any work, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff code size against.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('--files', action='store_true', | ||||
|         help="Show file-level coverage.") | ||||
|     parser.add_argument('-s', '--summary', action='store_true', | ||||
|         help="Only show the total coverage.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										191
									
								
								scripts/test.py
									
									
									
									
									
								
							
							
						
						
									
										191
									
								
								scripts/test.py
									
									
									
									
									
								
							| @@ -20,19 +20,50 @@ import pty | ||||
| import errno | ||||
| import signal | ||||
|  | ||||
| TESTDIR = 'tests' | ||||
| TEST_PATHS = 'tests' | ||||
| RULES = """ | ||||
| # add block devices to sources | ||||
| TESTSRC ?= $(SRC) $(wildcard bd/*.c) | ||||
|  | ||||
| define FLATTEN | ||||
| tests/%$(subst /,.,$(target)): $(target) | ||||
| %(path)s%%$(subst /,.,$(target)): $(target) | ||||
|     ./scripts/explode_asserts.py $$< -o $$@ | ||||
| endef | ||||
| $(foreach target,$(SRC),$(eval $(FLATTEN))) | ||||
|  | ||||
| -include tests/*.d | ||||
| $(foreach target,$(TESTSRC),$(eval $(FLATTEN))) | ||||
|  | ||||
| -include %(path)s*.d | ||||
| .SECONDARY: | ||||
| %.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f) | ||||
|  | ||||
| %(path)s.test: %(path)s.test.o \\ | ||||
|         $(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t) | ||||
|     $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ | ||||
|  | ||||
| # needed in case builddir is different | ||||
| %(path)s%%.o: %(path)s%%.c | ||||
|     $(CC) -c -MMD $(CFLAGS) $< -o $@ | ||||
| """ | ||||
| COVERAGE_RULES = """ | ||||
| %(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage | ||||
|  | ||||
| # delete lingering coverage | ||||
| %(path)s.test: | %(path)s.info.clean | ||||
| .PHONY: %(path)s.info.clean | ||||
| %(path)s.info.clean: | ||||
|     rm -f %(path)s*.gcda | ||||
|  | ||||
| # accumulate coverage info | ||||
| .PHONY: %(path)s.info | ||||
| %(path)s.info: | ||||
|     $(strip $(LCOV) -c \\ | ||||
|         $(addprefix -d ,$(wildcard %(path)s*.gcda)) \\ | ||||
|         --rc 'geninfo_adjust_src_path=$(shell pwd)' \\ | ||||
|         -o $@) | ||||
|     $(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@ | ||||
| ifdef COVERAGETARGET | ||||
|     $(strip $(LCOV) -a $@ \\ | ||||
|         $(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\ | ||||
|         -o $(COVERAGETARGET)) | ||||
| endif | ||||
| """ | ||||
| GLOBALS = """ | ||||
| //////////////// AUTOGENERATED TEST //////////////// | ||||
| @@ -119,6 +150,8 @@ class TestCase: | ||||
|         self.if_ = config.get('if', None) | ||||
|         self.in_ = config.get('in', None) | ||||
|  | ||||
|         self.result = None | ||||
|  | ||||
|     def __str__(self): | ||||
|         if hasattr(self, 'permno'): | ||||
|             if any(k not in self.case.defines for k in self.defines): | ||||
| @@ -179,7 +212,7 @@ class TestCase: | ||||
|                 len(self.filter) >= 2 and | ||||
|                 self.filter[1] != self.permno): | ||||
|             return False | ||||
|         elif args.get('no_internal', False) and self.in_ is not None: | ||||
|         elif args.get('no_internal') and self.in_ is not None: | ||||
|             return False | ||||
|         elif self.if_ is not None: | ||||
|             if_ = self.if_ | ||||
| @@ -213,7 +246,7 @@ class TestCase: | ||||
|                 try: | ||||
|                     with open(disk, 'w') as f: | ||||
|                         f.truncate(0) | ||||
|                     if args.get('verbose', False): | ||||
|                     if args.get('verbose'): | ||||
|                         print('truncate --size=0', disk) | ||||
|                 except FileNotFoundError: | ||||
|                     pass | ||||
| @@ -237,14 +270,14 @@ class TestCase: | ||||
|                     '-ex', 'r']) | ||||
|             ncmd.extend(['--args'] + cmd) | ||||
|  | ||||
|             if args.get('verbose', False): | ||||
|             if args.get('verbose'): | ||||
|                 print(' '.join(shlex.quote(c) for c in ncmd)) | ||||
|             signal.signal(signal.SIGINT, signal.SIG_IGN) | ||||
|             sys.exit(sp.call(ncmd)) | ||||
|  | ||||
|         # run test case! | ||||
|         mpty, spty = pty.openpty() | ||||
|         if args.get('verbose', False): | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, stdout=spty, stderr=spty) | ||||
|         os.close(spty) | ||||
| @@ -260,7 +293,7 @@ class TestCase: | ||||
|                         break | ||||
|                     raise | ||||
|                 stdout.append(line) | ||||
|                 if args.get('verbose', False): | ||||
|                 if args.get('verbose'): | ||||
|                     sys.stdout.write(line) | ||||
|                 # intercept asserts | ||||
|                 m = re.match( | ||||
| @@ -299,7 +332,7 @@ class ValgrindTestCase(TestCase): | ||||
|         return not self.leaky and super().shouldtest(**args) | ||||
|  | ||||
|     def test(self, exec=[], **args): | ||||
|         verbose = args.get('verbose', False) | ||||
|         verbose = args.get('verbose') | ||||
|         uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1) | ||||
|         exec = [ | ||||
|             'valgrind', | ||||
| @@ -351,12 +384,17 @@ class TestSuite: | ||||
|         self.name = os.path.basename(path) | ||||
|         if self.name.endswith('.toml'): | ||||
|             self.name = self.name[:-len('.toml')] | ||||
|         self.path = path | ||||
|         if args.get('build_dir'): | ||||
|             self.toml = path | ||||
|             self.path = args['build_dir'] + '/' + path | ||||
|         else: | ||||
|             self.toml = path | ||||
|             self.path = path | ||||
|         self.classes = classes | ||||
|         self.defines = defines.copy() | ||||
|         self.filter = filter | ||||
|  | ||||
|         with open(path) as f: | ||||
|         with open(self.toml) as f: | ||||
|             # load tests | ||||
|             config = toml.load(f) | ||||
|  | ||||
| @@ -467,7 +505,7 @@ class TestSuite: | ||||
|  | ||||
|     def build(self, **args): | ||||
|         # build test files | ||||
|         tf = open(self.path + '.test.c.t', 'w') | ||||
|         tf = open(self.path + '.test.tc', 'w') | ||||
|         tf.write(GLOBALS) | ||||
|         if self.code is not None: | ||||
|             tf.write('#line %d "%s"\n' % (self.code_lineno, self.path)) | ||||
| @@ -477,7 +515,7 @@ class TestSuite: | ||||
|         for case in self.cases: | ||||
|             if case.in_ not in tfs: | ||||
|                 tfs[case.in_] = open(self.path+'.'+ | ||||
|                     case.in_.replace('/', '.')+'.t', 'w') | ||||
|                     re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w') | ||||
|                 tfs[case.in_].write('#line 1 "%s"\n' % case.in_) | ||||
|                 with open(case.in_) as f: | ||||
|                     for line in f: | ||||
| @@ -516,25 +554,33 @@ class TestSuite: | ||||
|  | ||||
|         # write makefiles | ||||
|         with open(self.path + '.mk', 'w') as mk: | ||||
|             mk.write(RULES.replace(4*' ', '\t')) | ||||
|             mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path)) | ||||
|             mk.write('\n') | ||||
|  | ||||
|             # add coverage hooks? | ||||
|             if args.get('coverage'): | ||||
|                 mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict( | ||||
|                     path=self.path)) | ||||
|                 mk.write('\n') | ||||
|  | ||||
|             # add truely global defines globally | ||||
|             for k, v in sorted(self.defines.items()): | ||||
|                 mk.write('%s: override CFLAGS += -D%s=%r\n' % ( | ||||
|                     self.path+'.test', k, v)) | ||||
|                 mk.write('%s.test: override CFLAGS += -D%s=%r\n' | ||||
|                     % (self.path, k, v)) | ||||
|  | ||||
|             for path in tfs: | ||||
|                 if path is None: | ||||
|                     mk.write('%s: %s | %s\n' % ( | ||||
|                         self.path+'.test.c', | ||||
|                         self.path, | ||||
|                         self.path+'.test.c.t')) | ||||
|                         self.toml, | ||||
|                         self.path+'.test.tc')) | ||||
|                 else: | ||||
|                     mk.write('%s: %s %s | %s\n' % ( | ||||
|                         self.path+'.'+path.replace('/', '.'), | ||||
|                         self.path, path, | ||||
|                         self.path+'.'+path.replace('/', '.')+'.t')) | ||||
|                         self.toml, | ||||
|                         path, | ||||
|                         self.path+'.'+re.sub('(\.c)?$', '.tc', | ||||
|                             path.replace('/', '.')))) | ||||
|                 mk.write('\t./scripts/explode_asserts.py $| -o $@\n') | ||||
|  | ||||
|         self.makefile = self.path + '.mk' | ||||
| @@ -557,7 +603,7 @@ class TestSuite: | ||||
|                 if not args.get('verbose', True): | ||||
|                     sys.stdout.write(FAIL) | ||||
|                     sys.stdout.flush() | ||||
|                 if not args.get('keep_going', False): | ||||
|                 if not args.get('keep_going'): | ||||
|                     if not args.get('verbose', True): | ||||
|                         sys.stdout.write('\n') | ||||
|                     raise | ||||
| @@ -579,30 +625,30 @@ def main(**args): | ||||
|  | ||||
|     # and what class of TestCase to run | ||||
|     classes = [] | ||||
|     if args.get('normal', False): | ||||
|     if args.get('normal'): | ||||
|         classes.append(TestCase) | ||||
|     if args.get('reentrant', False): | ||||
|     if args.get('reentrant'): | ||||
|         classes.append(ReentrantTestCase) | ||||
|     if args.get('valgrind', False): | ||||
|     if args.get('valgrind'): | ||||
|         classes.append(ValgrindTestCase) | ||||
|     if not classes: | ||||
|         classes = [TestCase] | ||||
|  | ||||
|     suites = [] | ||||
|     for testpath in args['testpaths']: | ||||
|     for testpath in args['test_paths']: | ||||
|         # optionally specified test case/perm | ||||
|         testpath, *filter = testpath.split('#') | ||||
|         filter = [int(f) for f in filter] | ||||
|  | ||||
|         # figure out the suite's toml file | ||||
|         if os.path.isdir(testpath): | ||||
|             testpath = testpath + '/test_*.toml' | ||||
|             testpath = testpath + '/*.toml' | ||||
|         elif os.path.isfile(testpath): | ||||
|             testpath = testpath | ||||
|         elif testpath.endswith('.toml'): | ||||
|             testpath = TESTDIR + '/' + testpath | ||||
|             testpath = TEST_PATHS + '/' + testpath | ||||
|         else: | ||||
|             testpath = TESTDIR + '/' + testpath + '.toml' | ||||
|             testpath = TEST_PATHS + '/' + testpath + '.toml' | ||||
|  | ||||
|         # find tests | ||||
|         for path in glob.glob(testpath): | ||||
| @@ -628,7 +674,7 @@ def main(**args): | ||||
|         list(it.chain.from_iterable(['-f', m] for m in makefiles)) + | ||||
|         [target for target in targets]) | ||||
|     mpty, spty = pty.openpty() | ||||
|     if args.get('verbose', False): | ||||
|     if args.get('verbose'): | ||||
|         print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|     proc = sp.Popen(cmd, stdout=spty, stderr=spty) | ||||
|     os.close(spty) | ||||
| @@ -642,14 +688,14 @@ def main(**args): | ||||
|                 break | ||||
|             raise | ||||
|         stdout.append(line) | ||||
|         if args.get('verbose', False): | ||||
|         if args.get('verbose'): | ||||
|             sys.stdout.write(line) | ||||
|         # intercept warnings | ||||
|         m = re.match( | ||||
|             '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$' | ||||
|             .format('(?:\033\[[\d;]*.| )*', 'warning'), | ||||
|             line) | ||||
|         if m and not args.get('verbose', False): | ||||
|         if m and not args.get('verbose'): | ||||
|             try: | ||||
|                 with open(m.group(1)) as f: | ||||
|                     lineno = int(m.group(2)) | ||||
| @@ -662,27 +708,26 @@ def main(**args): | ||||
|             except: | ||||
|                 pass | ||||
|     proc.wait() | ||||
|  | ||||
|     if proc.returncode != 0: | ||||
|         if not args.get('verbose', False): | ||||
|         if not args.get('verbose'): | ||||
|             for line in stdout: | ||||
|                 sys.stdout.write(line) | ||||
|         sys.exit(-3) | ||||
|         sys.exit(-1) | ||||
|  | ||||
|     print('built %d test suites, %d test cases, %d permutations' % ( | ||||
|         len(suites), | ||||
|         sum(len(suite.cases) for suite in suites), | ||||
|         sum(len(suite.perms) for suite in suites))) | ||||
|  | ||||
|     filtered = 0 | ||||
|     total = 0 | ||||
|     for suite in suites: | ||||
|         for perm in suite.perms: | ||||
|             filtered += perm.shouldtest(**args) | ||||
|     if filtered != sum(len(suite.perms) for suite in suites): | ||||
|         print('filtered down to %d permutations' % filtered) | ||||
|             total += perm.shouldtest(**args) | ||||
|     if total != sum(len(suite.perms) for suite in suites): | ||||
|         print('filtered down to %d permutations' % total) | ||||
|  | ||||
|     # only requested to build? | ||||
|     if args.get('build', False): | ||||
|     if args.get('build'): | ||||
|         return 0 | ||||
|  | ||||
|     print('====== testing ======') | ||||
| @@ -697,15 +742,12 @@ def main(**args): | ||||
|     failed = 0 | ||||
|     for suite in suites: | ||||
|         for perm in suite.perms: | ||||
|             if not hasattr(perm, 'result'): | ||||
|                 continue | ||||
|  | ||||
|             if perm.result == PASS: | ||||
|                 passed += 1 | ||||
|             else: | ||||
|             elif isinstance(perm.result, TestFailure): | ||||
|                 sys.stdout.write( | ||||
|                     "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m " | ||||
|                     "{perm} failed with {returncode}\n".format( | ||||
|                     "{perm} failed\n".format( | ||||
|                         perm=perm, path=perm.suite.path, lineno=perm.lineno, | ||||
|                         returncode=perm.result.returncode or 0)) | ||||
|                 if perm.result.stdout: | ||||
| @@ -723,11 +765,33 @@ def main(**args): | ||||
|                 sys.stdout.write('\n') | ||||
|                 failed += 1 | ||||
|  | ||||
|     if args.get('gdb', False): | ||||
|     if args.get('coverage'): | ||||
|         # collect coverage info | ||||
|         # why -j1? lcov doesn't work in parallel because of gcov limitations | ||||
|         cmd = (['make', '-j1', '-f', 'Makefile'] + | ||||
|             list(it.chain.from_iterable(['-f', m] for m in makefiles)) + | ||||
|             (['COVERAGETARGET=%s' % args['coverage']] | ||||
|                 if isinstance(args['coverage'], str) else []) + | ||||
|             [suite.path + '.info' for suite in suites | ||||
|                 if any(perm.result == PASS for perm in suite.perms)]) | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE if not args.get('verbose') else None, | ||||
|             stderr=sp.STDOUT if not args.get('verbose') else None, | ||||
|             universal_newlines=True) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stdout: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     if args.get('gdb'): | ||||
|         failure = None | ||||
|         for suite in suites: | ||||
|             for perm in suite.perms: | ||||
|                 if getattr(perm, 'result', PASS) != PASS: | ||||
|                 if isinstance(perm.result, TestFailure): | ||||
|                     failure = perm.result | ||||
|         if failure is not None: | ||||
|             print('======= gdb ======') | ||||
| @@ -735,20 +799,22 @@ def main(**args): | ||||
|             failure.case.test(failure=failure, **args) | ||||
|             sys.exit(0) | ||||
|  | ||||
|     print('tests passed: %d' % passed) | ||||
|     print('tests failed: %d' % failed) | ||||
|     print('tests passed %d/%d (%.2f%%)' % (passed, total, | ||||
|         100*(passed/total if total else 1.0))) | ||||
|     print('tests failed %d/%d (%.2f%%)' % (failed, total, | ||||
|         100*(failed/total if total else 1.0))) | ||||
|     return 1 if failed > 0 else 0 | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Run parameterized tests in various configurations.") | ||||
|     parser.add_argument('testpaths', nargs='*', default=[TESTDIR], | ||||
|     parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS], | ||||
|         help="Description of test(s) to run. By default, this is all tests \ | ||||
|             found in the \"{0}\" directory. Here, you can specify a different \ | ||||
|             directory of tests, a specific file, a suite by name, and even a \ | ||||
|             specific test case by adding brackets. For example \ | ||||
|             \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) | ||||
|             directory of tests, a specific file, a suite by name, and even \ | ||||
|             specific test cases and permutations. For example \ | ||||
|             \"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS)) | ||||
|     parser.add_argument('-D', action='append', default=[], | ||||
|         help="Overriding parameter definitions.") | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
| @@ -769,10 +835,19 @@ if __name__ == "__main__": | ||||
|         help="Run tests normally.") | ||||
|     parser.add_argument('-r', '--reentrant', action='store_true', | ||||
|         help="Run reentrant tests with simulated power-loss.") | ||||
|     parser.add_argument('-V', '--valgrind', action='store_true', | ||||
|     parser.add_argument('--valgrind', action='store_true', | ||||
|         help="Run non-leaky tests under valgrind to check for memory leaks.") | ||||
|     parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '), | ||||
|     parser.add_argument('--exec', default=[], type=lambda e: e.split(), | ||||
|         help="Run tests with another executable prefixed on the command line.") | ||||
|     parser.add_argument('-d', '--disk', | ||||
|     parser.add_argument('--disk', | ||||
|         help="Specify a file to use for persistent/reentrant tests.") | ||||
|     parser.add_argument('--coverage', type=lambda x: x if x else True, | ||||
|         nargs='?', const='', | ||||
|         help="Collect coverage information during testing. This uses lcov/gcov \ | ||||
|             to accumulate coverage information into *.info files. May also \ | ||||
|             a path to a *.info file to accumulate coverage info into.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Build relative to the specified directory instead of the \ | ||||
|             current directory.") | ||||
|  | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
|   | ||||
							
								
								
									
										305
									
								
								tests/test_relocations_with_removeall.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										305
									
								
								tests/test_relocations_with_removeall.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,305 @@ | ||||
| # specific corner cases worth explicitly testing for | ||||
| [[case]] # dangling split dir test | ||||
| define.ITERATIONS = 20 | ||||
| define.COUNT = 10 | ||||
| define.LFS_BLOCK_CYCLES = [8, 1] | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     // fill up filesystem so only ~16 blocks are left | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0; | ||||
|     memset(buffer, 0, 512); | ||||
|     while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) { | ||||
|         lfs_file_write(&lfs, &file, buffer, 512) => 512; | ||||
|     } | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     // make a child dir to use in bounded space | ||||
|     lfs_mkdir(&lfs, "child") => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     for (int j = 0; j < ITERATIONS; j++) { | ||||
|         for (int i = 0; i < COUNT; i++) { | ||||
|             sprintf(path, "child/test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0; | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|  | ||||
|         lfs_dir_open(&lfs, &dir, "child") => 0; | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         for (int i = 0; i < COUNT; i++) { | ||||
|             sprintf(path, "test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|             strcmp(info.name, path) => 0; | ||||
|         } | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 0; | ||||
|         lfs_dir_close(&lfs, &dir) => 0; | ||||
|  | ||||
|         if (j == ITERATIONS-1) { | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         for (int i = 0; i < COUNT; i++) { | ||||
|             sprintf(path, "child/test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_removeall(&lfs, path) => 0; | ||||
|         } | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_dir_open(&lfs, &dir, "child") => 0; | ||||
|     lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|     lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|     for (int i = 0; i < COUNT; i++) { | ||||
|         sprintf(path, "test%03d_loooooooooooooooooong_name", i); | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         strcmp(info.name, path) => 0; | ||||
|     } | ||||
|     lfs_dir_read(&lfs, &dir, &info) => 0; | ||||
|     lfs_dir_close(&lfs, &dir) => 0; | ||||
|     for (int i = 0; i < COUNT; i++) { | ||||
|         sprintf(path, "child/test%03d_loooooooooooooooooong_name", i); | ||||
|         lfs_removeall(&lfs, path) => 0; | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # outdated head test | ||||
| define.ITERATIONS = 20 | ||||
| define.COUNT = 10 | ||||
| define.LFS_BLOCK_CYCLES = [8, 1] | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     // fill up filesystem so only ~16 blocks are left | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0; | ||||
|     memset(buffer, 0, 512); | ||||
|     while (LFS_BLOCK_COUNT - lfs_fs_size(&lfs) > 16) { | ||||
|         lfs_file_write(&lfs, &file, buffer, 512) => 512; | ||||
|     } | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     // make a child dir to use in bounded space | ||||
|     lfs_mkdir(&lfs, "child") => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     for (int j = 0; j < ITERATIONS; j++) { | ||||
|         for (int i = 0; i < COUNT; i++) { | ||||
|             sprintf(path, "child/test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0; | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|  | ||||
|         lfs_dir_open(&lfs, &dir, "child") => 0; | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         for (int i = 0; i < COUNT; i++) { | ||||
|             sprintf(path, "test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|             strcmp(info.name, path) => 0; | ||||
|             info.size => 0; | ||||
|  | ||||
|             sprintf(path, "child/test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0; | ||||
|             lfs_file_write(&lfs, &file, "hi", 2) => 2; | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 0; | ||||
|  | ||||
|         lfs_dir_rewind(&lfs, &dir) => 0; | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         for (int i = 0; i < COUNT; i++) { | ||||
|             sprintf(path, "test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|             strcmp(info.name, path) => 0; | ||||
|             info.size => 2; | ||||
|  | ||||
|             sprintf(path, "child/test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0; | ||||
|             lfs_file_write(&lfs, &file, "hi", 2) => 2; | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 0; | ||||
|  | ||||
|         lfs_dir_rewind(&lfs, &dir) => 0; | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|         for (int i = 0; i < COUNT; i++) { | ||||
|             sprintf(path, "test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_dir_read(&lfs, &dir, &info) => 1; | ||||
|             strcmp(info.name, path) => 0; | ||||
|             info.size => 2; | ||||
|         } | ||||
|         lfs_dir_read(&lfs, &dir, &info) => 0; | ||||
|         lfs_dir_close(&lfs, &dir) => 0; | ||||
|  | ||||
|         for (int i = 0; i < COUNT; i++) { | ||||
|             sprintf(path, "child/test%03d_loooooooooooooooooong_name", i); | ||||
|             lfs_removeall(&lfs, path) => 0; | ||||
|         } | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # reentrant testing for relocations, this is the same as the | ||||
|          # orphan testing, except here we also set block_cycles so that | ||||
|          # almost every tree operation needs a relocation | ||||
| reentrant = true | ||||
| # TODO fix this case, caused by non-DAG trees | ||||
| if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' | ||||
| define = [ | ||||
|     {FILES=6,  DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=3,  DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
| ] | ||||
| code = ''' | ||||
|     err = lfs_mount(&lfs, &cfg); | ||||
|     if (err) { | ||||
|         lfs_format(&lfs, &cfg) => 0; | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|     } | ||||
|  | ||||
|     srand(1); | ||||
|     const char alpha[] = "abcdefghijklmnopqrstuvwxyz"; | ||||
|     for (int i = 0; i < CYCLES; i++) { | ||||
|         // create random path | ||||
|         char full_path[256]; | ||||
|         for (int d = 0; d < DEPTH; d++) { | ||||
|             sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]); | ||||
|         } | ||||
|  | ||||
|         // if it does not exist, we create it, else we destroy | ||||
|         int res = lfs_stat(&lfs, full_path, &info); | ||||
|         if (res == LFS_ERR_NOENT) { | ||||
|             // create each directory in turn, ignore if dir already exists | ||||
|             for (int d = 0; d < DEPTH; d++) { | ||||
|                 strcpy(path, full_path); | ||||
|                 path[2*d+2] = '\0'; | ||||
|                 err = lfs_mkdir(&lfs, path); | ||||
|                 assert(!err || err == LFS_ERR_EXIST); | ||||
|             } | ||||
|  | ||||
|             for (int d = 0; d < DEPTH; d++) { | ||||
|                 strcpy(path, full_path); | ||||
|                 path[2*d+2] = '\0'; | ||||
|                 lfs_stat(&lfs, path, &info) => 0; | ||||
|                 assert(strcmp(info.name, &path[2*d+1]) == 0); | ||||
|                 assert(info.type == LFS_TYPE_DIR); | ||||
|             } | ||||
|         } else { | ||||
|             // is valid dir? | ||||
|             assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0); | ||||
|             assert(info.type == LFS_TYPE_DIR); | ||||
|  | ||||
|             // try to delete path in reverse order, ignore if dir is not empty | ||||
|             for (int d = DEPTH-1; d >= 0; d--) { | ||||
|                 strcpy(path, full_path); | ||||
|                 path[2*d+2] = '\0'; | ||||
|                 err = lfs_removeall(&lfs, path); | ||||
|                 assert(!err || err == LFS_ERR_NOTEMPTY); | ||||
|             } | ||||
|  | ||||
|             lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT; | ||||
|         } | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # reentrant testing for relocations, but now with random renames! | ||||
| reentrant = true | ||||
| # TODO fix this case, caused by non-DAG trees | ||||
| if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' | ||||
| define = [ | ||||
|     {FILES=6,  DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=3,  DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
| ] | ||||
| code = ''' | ||||
|     err = lfs_mount(&lfs, &cfg); | ||||
|     if (err) { | ||||
|         lfs_format(&lfs, &cfg) => 0; | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|     } | ||||
|  | ||||
|     srand(1); | ||||
|     const char alpha[] = "abcdefghijklmnopqrstuvwxyz"; | ||||
|     for (int i = 0; i < CYCLES; i++) { | ||||
|         // create random path | ||||
|         char full_path[256]; | ||||
|         for (int d = 0; d < DEPTH; d++) { | ||||
|             sprintf(&full_path[2*d], "/%c", alpha[rand() % FILES]); | ||||
|         } | ||||
|  | ||||
|         // if it does not exist, we create it, else we destroy | ||||
|         int res = lfs_stat(&lfs, full_path, &info); | ||||
|         assert(!res || res == LFS_ERR_NOENT); | ||||
|         if (res == LFS_ERR_NOENT) { | ||||
|             // create each directory in turn, ignore if dir already exists | ||||
|             for (int d = 0; d < DEPTH; d++) { | ||||
|                 strcpy(path, full_path); | ||||
|                 path[2*d+2] = '\0'; | ||||
|                 err = lfs_mkdir(&lfs, path); | ||||
|                 assert(!err || err == LFS_ERR_EXIST); | ||||
|             } | ||||
|  | ||||
|             for (int d = 0; d < DEPTH; d++) { | ||||
|                 strcpy(path, full_path); | ||||
|                 path[2*d+2] = '\0'; | ||||
|                 lfs_stat(&lfs, path, &info) => 0; | ||||
|                 assert(strcmp(info.name, &path[2*d+1]) == 0); | ||||
|                 assert(info.type == LFS_TYPE_DIR); | ||||
|             } | ||||
|         } else { | ||||
|             assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0); | ||||
|             assert(info.type == LFS_TYPE_DIR); | ||||
|  | ||||
|             // create new random path | ||||
|             char new_path[256]; | ||||
|             for (int d = 0; d < DEPTH; d++) { | ||||
|                 sprintf(&new_path[2*d], "/%c", alpha[rand() % FILES]); | ||||
|             } | ||||
|  | ||||
|             // if new path does not exist, rename, otherwise destroy | ||||
|             res = lfs_stat(&lfs, new_path, &info); | ||||
|             assert(!res || res == LFS_ERR_NOENT); | ||||
|             if (res == LFS_ERR_NOENT) { | ||||
|                 // stop once some dir is renamed | ||||
|                 for (int d = 0; d < DEPTH; d++) { | ||||
|                     strcpy(&path[2*d], &full_path[2*d]); | ||||
|                     path[2*d+2] = '\0'; | ||||
|                     strcpy(&path[128+2*d], &new_path[2*d]); | ||||
|                     path[128+2*d+2] = '\0'; | ||||
|                     err = lfs_rename(&lfs, path, path+128); | ||||
|                     assert(!err || err == LFS_ERR_NOTEMPTY); | ||||
|                     if (!err) { | ||||
|                         strcpy(path, path+128); | ||||
|                     } | ||||
|                 } | ||||
|  | ||||
|                 for (int d = 0; d < DEPTH; d++) { | ||||
|                     strcpy(path, new_path); | ||||
|                     path[2*d+2] = '\0'; | ||||
|                     lfs_stat(&lfs, path, &info) => 0; | ||||
|                     assert(strcmp(info.name, &path[2*d+1]) == 0); | ||||
|                     assert(info.type == LFS_TYPE_DIR); | ||||
|                 } | ||||
|                  | ||||
|                 lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT; | ||||
|             } else { | ||||
|                 // try to delete path in reverse order, | ||||
|                 // ignore if dir is not empty | ||||
|                 for (int d = DEPTH-1; d >= 0; d--) { | ||||
|                     strcpy(path, full_path); | ||||
|                     path[2*d+2] = '\0'; | ||||
|                     err = lfs_removeall(&lfs, path); | ||||
|                     assert(!err || err == LFS_ERR_NOTEMPTY); | ||||
|                 } | ||||
|  | ||||
|                 lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
| @@ -392,3 +392,48 @@ code = ''' | ||||
|  | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # noop truncate | ||||
| define.MEDIUMSIZE = [32, 2048] | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "baldynoop", | ||||
|             LFS_O_RDWR | LFS_O_CREAT) => 0; | ||||
|  | ||||
|     strcpy((char*)buffer, "hair"); | ||||
|     size = strlen((char*)buffer); | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_write(&lfs, &file, buffer, size) => size; | ||||
|  | ||||
|         // this truncate should do nothing | ||||
|         lfs_file_truncate(&lfs, &file, j+size) => 0; | ||||
|     } | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|  | ||||
|     lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0; | ||||
|     // should do nothing again | ||||
|     lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0; | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|  | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_read(&lfs, &file, buffer, size) => size; | ||||
|         memcmp(buffer, "hair", size) => 0; | ||||
|     } | ||||
|     lfs_file_read(&lfs, &file, buffer, size) => 0; | ||||
|  | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // still there after reboot? | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0; | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_read(&lfs, &file, buffer, size) => size; | ||||
|         memcmp(buffer, "hair", size) => 0; | ||||
|     } | ||||
|     lfs_file_read(&lfs, &file, buffer, size) => 0; | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|   | ||||
		Reference in New Issue
	
	Block a user