mirror of
				https://github.com/eledio-devices/thirdparty-littlefs.git
				synced 2025-10-31 16:14:16 +01:00 
			
		
		
		
	Compare commits
	
		
			174 Commits
		
	
	
		
			test-revam
			...
			master
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 40dba4a556 | ||
|  | 148e312ea3 | ||
|  | abbfe8e92e | ||
|  | c60c977c25 | ||
|  | 3ce64d1ac0 | ||
|  | 0ced3623d4 | ||
|  | 5451a6d503 | ||
|  | 1e038c81fc | ||
|  | f28ac3ea7d | ||
|  | a94fbda1cd | ||
|  | cc025653ed | ||
|  | bfb9bd2483 | ||
|  | f40b854ab5 | ||
|  | c2fa1bb7df | ||
|  | 3b62ec1c47 | ||
|  | b898977fd8 | ||
|  | cf274e6ec6 | ||
|  | 425dc810a5 | ||
|  | a6f01b7d6e | ||
|  | 9c7e232086 | ||
|  | c676bcee4c | ||
|  | 03f088b92c | ||
|  | e955b9f65d | ||
|  | 99f58139cb | ||
|  | 5801169348 | ||
|  | 2d6f4ead13 | ||
|  | 3d1b89b41a | ||
|  | 45cefb825d | ||
|  | bbb9e3873e | ||
|  | c6d3c48939 | ||
|  | 2db5dc80c2 | ||
|  | 1363c9f9d4 | ||
|  | 5bc682a0d4 | ||
|  | 8109f28266 | ||
|  | fedf646c79 | ||
|  | 84da4c0b1a | ||
|  | 554e4b1444 | ||
|  | fe8f3d4f18 | ||
|  | 316b019f41 | ||
|  | 8475c8064d | ||
|  | 563af5f364 | ||
|  | 3b495bab79 | ||
|  | e4adefd1d7 | ||
|  | 9d54603ce2 | ||
|  | 7ea2b515aa | ||
|  | 55b3c538d5 | ||
|  | eb8be9f351 | ||
|  | 50ad2adc96 | ||
|  | 0a2ff3b6ff | ||
|  | d7582efec8 | ||
|  | f4c7af76f8 | ||
|  | 20c58dcbaa | ||
|  | f5286abe7a | ||
|  | 2cdabe810d | ||
|  | b045436c23 | ||
|  | 1877c40aac | ||
|  | e29e7aeefa | ||
|  | e334983767 | ||
|  | 4977fa0c0e | ||
|  | fdda3b4aa2 | ||
|  | 487df12dde | ||
|  | 3efb8e44f3 | ||
|  | fb2c311bb4 | ||
|  | ead50807f1 | ||
|  | 2f7596811d | ||
|  | 1e423bae58 | ||
|  | 3bee4d9a19 | ||
|  | 1863dc7883 | ||
|  | 3d4e4f2085 | ||
|  | a2c744c8f8 | ||
|  | c0cc0a417e | ||
|  | bca64d76cf | ||
|  | cab1d6cca6 | ||
|  | c9eed1f181 | ||
|  | e7e4b352bd | ||
|  | 9449ef4be4 | ||
|  | cfe779fc08 | ||
|  | 0db6466984 | ||
|  | 21488d9e06 | ||
|  | 10a08833c6 | ||
|  | 47d6b2fcf3 | ||
|  | 745d98cde0 | ||
|  | 3216b07c3b | ||
|  | 6592719d28 | ||
|  | c9110617b3 | ||
|  | 104d65113d | ||
|  | 6d3e4ac33e | ||
|  | 9d6546071b | ||
|  | b84fb6bcc5 | ||
|  | 887f3660ed | ||
|  | eeeceb9e30 | ||
|  | b2235e956d | ||
|  | 6bb4043154 | ||
|  | 2b804537b0 | ||
|  | d804c2d3b7 | ||
|  | 37f4de2976 | ||
|  | 6b16dafb4d | ||
|  | 1a59954ec6 | ||
|  | 6a7012774d | ||
|  | 288a5cbc8d | ||
|  | 5783eea0de | ||
|  | 2bb523421e | ||
|  | 7388b2938a | ||
|  | ce425a56c3 | ||
|  | a99a93fb27 | ||
|  | 45afded784 | ||
|  | 00a9ba7826 | ||
|  | fc6988c7c3 | ||
|  | d0f055d321 | ||
|  | b9fa33f9bc | ||
|  | 2efebf8e9b | ||
|  | 754b4c3cda | ||
|  | 584eb26efc | ||
|  | 008ebc37df | ||
|  | 66272067ab | ||
|  | e273a82679 | ||
|  | 1dc6ae94b9 | ||
|  | 817ef02d24 | ||
|  | b8dcf10974 | ||
|  | 0aba71d0d6 | ||
|  | 0ea2871e24 | ||
|  | d04c1392c0 | ||
|  | f215027fd4 | ||
|  | 1ae4b36f2a | ||
|  | 480cdd9f81 | ||
|  | 6303558aee | ||
|  | 4bd653dd00 | ||
|  | 8e6826c4e2 | ||
|  | 10ac6b9cf0 | ||
|  | 87a2cb0e41 | ||
|  | 6d0ec5e851 | ||
|  | 4c9146ea53 | ||
|  | 5a9f38df01 | ||
|  | 1b033e9ab6 | ||
|  | a049f1318e | ||
|  | 7257681f5d | ||
|  | 2da340af69 | ||
|  | 02881e591b | ||
|  | 38024d5a17 | ||
|  | 4a9bac4418 | ||
|  | 6121495444 | ||
|  | 6372f515fe | ||
|  | 6622f3deee | ||
|  | 5137e4b0ba | ||
|  | ff84902970 | ||
|  | 01e42abd10 | ||
|  | f9dbec3d92 | ||
|  | f17d3d7eba | ||
|  | 5e5b5d8572 | ||
|  | d498b9fb31 | ||
|  | 4677421aba | ||
|  | 64f70f51b0 | ||
|  | cb26157880 | ||
|  | a7dfae4526 | ||
|  | 50fe8ae258 | ||
|  | 0990296619 | ||
|  | d04b077506 | ||
|  | c7987a3162 | ||
|  | dcae185a00 | ||
|  | f4b17b379c | ||
|  | 9f546f154f | ||
|  | b69cf890e6 | ||
|  | 02c84ac5f4 | ||
|  | 6530cb3a61 | ||
|  | fe957de892 | ||
|  | 6a550844f4 | ||
|  | f9c2fd93f2 | ||
|  | 44d7112794 | ||
|  | 77e3078b9f | ||
|  | 517d3414c5 | ||
|  | 4fb188369d | ||
|  | c8e9a64a21 | ||
|  | 626006af0c | ||
|  | 5a12c443b8 | 
							
								
								
									
										26
									
								
								.github/workflows/post-release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								.github/workflows/post-release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| name: post-release | ||||
| on: | ||||
|   release: | ||||
|     branches: [master] | ||||
|     types: [released] | ||||
|  | ||||
| jobs: | ||||
|   post-release: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     steps: | ||||
|       # trigger post-release in dependency repo, this indirection allows the | ||||
|       # dependency repo to be updated often without affecting this repo. At | ||||
|       # the time of this comment, the dependency repo is responsible for | ||||
|       # creating PRs for other dependent repos post-release. | ||||
|       - name: trigger-post-release | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|             "$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \ | ||||
|             -d "$(jq -n '{ | ||||
|               event_type: "post-release", | ||||
|               client_payload: { | ||||
|                 repo: env.GITHUB_REPOSITORY, | ||||
|                 version: "${{github.event.release.tag_name}}"}}' \ | ||||
|               | tee /dev/stderr)" | ||||
|  | ||||
							
								
								
									
										196
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										196
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,196 @@ | ||||
| name: release | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: [test] | ||||
|     branches: [master] | ||||
|     types: [completed] | ||||
|  | ||||
| jobs: | ||||
|   release: | ||||
|     runs-on: ubuntu-20.04 | ||||
|  | ||||
|     # need to manually check for a couple things | ||||
|     # - tests passed? | ||||
|     # - we are the most recent commit on master? | ||||
|     if: ${{github.event.workflow_run.conclusion == 'success' && | ||||
|       github.event.workflow_run.head_sha == github.sha}} | ||||
|  | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           ref: ${{github.event.workflow_run.head_sha}} | ||||
|           # need workflow access since we push branches | ||||
|           # containing workflows | ||||
|           token: ${{secrets.BOT_TOKEN}} | ||||
|           # need all tags | ||||
|           fetch-depth: 0 | ||||
|  | ||||
|       # try to get results from tests | ||||
|       - uses: dawidd6/action-download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           workflow: ${{github.event.workflow_run.name}} | ||||
|           run_id: ${{github.event.workflow_run.id}} | ||||
|           name: results | ||||
|           path: results | ||||
|  | ||||
|       - name: find-version | ||||
|         run: | | ||||
|           # rip version from lfs.h | ||||
|           LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \ | ||||
|             | awk '{print $3}')" | ||||
|           LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))" | ||||
|           LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >>  0)))" | ||||
|  | ||||
|           # find a new patch version based on what we find in our tags | ||||
|           LFS_VERSION_PATCH="$( \ | ||||
|             ( git describe --tags --abbrev=0 \ | ||||
|                 --match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \ | ||||
|               || echo 'v0.0.-1' ) \ | ||||
|             | awk -F '.' '{print $3+1}')" | ||||
|  | ||||
|           # found new version | ||||
|           LFS_VERSION="v$LFS_VERSION_MAJOR` | ||||
|             `.$LFS_VERSION_MINOR` | ||||
|             `.$LFS_VERSION_PATCH" | ||||
|           echo "LFS_VERSION=$LFS_VERSION" | ||||
|           echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV | ||||
|  | ||||
|       # try to find previous version? | ||||
|       - name: find-prev-version | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')" | ||||
|           echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" | ||||
|           echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV | ||||
|  | ||||
|       # try to find results from tests | ||||
|       - name: collect-results | ||||
|         run: | | ||||
|           # previous results to compare against? | ||||
|           [ -n "$LFS_PREV_VERSION" ] && curl -sS \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/` | ||||
|               `status/$LFS_PREV_VERSION?per_page=100" \ | ||||
|             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \ | ||||
|             >> prev-results.json \ | ||||
|             || true | ||||
|  | ||||
|           # build table for GitHub | ||||
|           echo "<table>" >> results.txt | ||||
|           echo "<thead>" >> results.txt | ||||
|           echo "<tr>" >> results.txt | ||||
|           echo "<th align=left>Configuration</th>" >> results.txt | ||||
|           for r in Code Stack Structs Coverage | ||||
|           do | ||||
|             echo "<th align=right>$r</th>" >> results.txt | ||||
|           done | ||||
|           echo "</tr>" >> results.txt | ||||
|           echo "</thead>" >> results.txt | ||||
|  | ||||
|           echo "<tbody>" >> results.txt | ||||
|           for c in "" readonly threadsafe migrate error-asserts | ||||
|           do | ||||
|             echo "<tr>" >> results.txt | ||||
|             c_or_default=${c:-default} | ||||
|             echo "<td align=left>${c_or_default^}</td>" >> results.txt | ||||
|             for r in code stack structs | ||||
|             do | ||||
|               # per-config results | ||||
|               echo "<td align=right>" >> results.txt | ||||
|               [ -e results/thumb${c:+-$c}.csv ] && ( \ | ||||
|                 export PREV="$(jq -re ' | ||||
|                       select(.context == "'"results (thumb${c:+, $c}) / $r"'").description | ||||
|                       | capture("(?<result>[0-9∞]+)").result' \ | ||||
|                     prev-results.json || echo 0)" | ||||
|                 ./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk ' | ||||
|                   NR==2 {printf "%s B",$2} | ||||
|                   NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                     printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} | ||||
|                   NR==2 {printf "\n"}' \ | ||||
|                 | sed -e 's/ /\ /g' \ | ||||
|                 >> results.txt) | ||||
|               echo "</td>" >> results.txt | ||||
|             done | ||||
|             # coverage results | ||||
|             if [ -z $c ] | ||||
|             then | ||||
|               echo "<td rowspan=0 align=right>" >> results.txt | ||||
|               [ -e results/coverage.csv ] && ( \ | ||||
|                 export PREV="$(jq -re ' | ||||
|                       select(.context == "results / coverage").description | ||||
|                       | capture("(?<result>[0-9\\.]+)").result' \ | ||||
|                     prev-results.json || echo 0)" | ||||
|                 ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' ' | ||||
|                   NR==2 {printf "%.1f%% of %d lines",$4,$3} | ||||
|                   NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                     printf " (%+.1f%%)",$4-ENVIRON["PREV"]} | ||||
|                   NR==2 {printf "\n"}' \ | ||||
|                 | sed -e 's/ /\ /g' \ | ||||
|                 >> results.txt) | ||||
|               echo "</td>" >> results.txt | ||||
|             fi | ||||
|             echo "</tr>" >> results.txt | ||||
|           done | ||||
|           echo "</tbody>" >> results.txt | ||||
|           echo "</table>" >> results.txt | ||||
|  | ||||
|           cat results.txt | ||||
|  | ||||
|       # find changes from history | ||||
|       - name: collect-changes | ||||
|         run: | | ||||
|           [ -n "$LFS_PREV_VERSION" ] || exit 0 | ||||
|           # use explicit link to github commit so that release notes can | ||||
|           # be copied elsewhere | ||||
|           git log "$LFS_PREV_VERSION.." \ | ||||
|             --grep='^Merge' --invert-grep \ | ||||
|             --format="format:[\`%h\`](` | ||||
|               `https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \ | ||||
|             > changes.txt | ||||
|           echo "CHANGES:" | ||||
|           cat changes.txt | ||||
|  | ||||
|       # create and update major branches (vN and vN-prefix) | ||||
|       - name: create-major-branches | ||||
|         run: | | ||||
|           # create major branch | ||||
|           git branch "v$LFS_VERSION_MAJOR" HEAD | ||||
|  | ||||
|           # create major prefix branch | ||||
|           git config user.name ${{secrets.BOT_USER}} | ||||
|           git config user.email ${{secrets.BOT_EMAIL}} | ||||
|           git fetch "https://github.com/$GITHUB_REPOSITORY.git" \ | ||||
|             "v$LFS_VERSION_MAJOR-prefix" || true | ||||
|           ./scripts/prefix.py "lfs$LFS_VERSION_MAJOR" | ||||
|           git branch "v$LFS_VERSION_MAJOR-prefix" $( \ | ||||
|             git commit-tree $(git write-tree) \ | ||||
|               $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \ | ||||
|               -p HEAD \ | ||||
|               -m "Generated v$LFS_VERSION_MAJOR prefixes") | ||||
|           git reset --hard | ||||
|  | ||||
|           # push! | ||||
|           git push --atomic origin \ | ||||
|             "v$LFS_VERSION_MAJOR" \ | ||||
|             "v$LFS_VERSION_MAJOR-prefix" | ||||
|  | ||||
|       # build release notes | ||||
|       - name: create-release | ||||
|         run: | | ||||
|           # create release and patch version tag (vN.N.N) | ||||
|           # only draft if not a patch release | ||||
|           [ -e results.txt ] && export RESULTS="$(cat results.txt)" | ||||
|           [ -e changes.txt ] && export CHANGES="$(cat changes.txt)" | ||||
|           curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \ | ||||
|             -d "$(jq -n '{ | ||||
|               tag_name: env.LFS_VERSION, | ||||
|               name: env.LFS_VERSION | rtrimstr(".0"), | ||||
|               target_commitish: "${{github.event.workflow_run.head_sha}}", | ||||
|               draft: env.LFS_VERSION | endswith(".0"), | ||||
|               body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \ | ||||
|               | tee /dev/stderr)" | ||||
|  | ||||
							
								
								
									
										55
									
								
								.github/workflows/status.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								.github/workflows/status.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,55 @@ | ||||
| name: status | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: [test] | ||||
|     types: [completed] | ||||
|  | ||||
| jobs: | ||||
|   status: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     steps: | ||||
|       # custom statuses? | ||||
|       - uses: dawidd6/action-download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           workflow: ${{github.event.workflow_run.name}} | ||||
|           run_id: ${{github.event.workflow_run.id}} | ||||
|           name: status | ||||
|           path: status | ||||
|       - name: update-status | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           ls status | ||||
|           for s in $(shopt -s nullglob ; echo status/*.json) | ||||
|           do | ||||
|             # parse requested status | ||||
|             export STATE="$(jq -er '.state' $s)" | ||||
|             export CONTEXT="$(jq -er '.context' $s)" | ||||
|             export DESCRIPTION="$(jq -er '.description' $s)" | ||||
|             # help lookup URL for job/steps because GitHub makes | ||||
|             # it VERY HARD to link to specific jobs | ||||
|             export TARGET_URL="$( | ||||
|               jq -er '.target_url // empty' $s || ( | ||||
|                 export TARGET_JOB="$(jq -er '.target_job' $s)" | ||||
|                 export TARGET_STEP="$(jq -er '.target_step // ""' $s)" | ||||
|                 curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|                   "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/` | ||||
|                     `${{github.event.workflow_run.id}}/jobs" \ | ||||
|                   | jq -er '.jobs[] | ||||
|                     | select(.name == env.TARGET_JOB) | ||||
|                     | .html_url | ||||
|                       + "?check_suite_focus=true" | ||||
|                       + ((.steps[] | ||||
|                         | select(.name == env.TARGET_STEP) | ||||
|                         | "#step:\(.number):0") // "")'))" | ||||
|             # update status | ||||
|             curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|               "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` | ||||
|                 `${{github.event.workflow_run.head_sha}}" \ | ||||
|               -d "$(jq -n '{ | ||||
|                 state: env.STATE, | ||||
|                 context: env.CONTEXT, | ||||
|                 description: env.DESCRIPTION, | ||||
|                 target_url: env.TARGET_URL}' \ | ||||
|                 | tee /dev/stderr)" | ||||
|           done | ||||
							
								
								
									
										472
									
								
								.github/workflows/test.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										472
									
								
								.github/workflows/test.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,472 @@ | ||||
| name: test | ||||
| on: [push, pull_request] | ||||
|  | ||||
| env: | ||||
|   CFLAGS: -Werror | ||||
|   MAKEFLAGS: -j | ||||
|  | ||||
| jobs: | ||||
|   # run tests | ||||
|   test: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         arch: [x86_64, thumb, mips, powerpc] | ||||
|  | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need a few additional tools | ||||
|           # | ||||
|           # note this includes gcc-10, which is required for -fcallgraph-info=su | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq gcc-10 python3 python3-pip lcov | ||||
|           sudo pip3 install toml | ||||
|           echo "CC=gcc-10" >> $GITHUB_ENV | ||||
|           gcc-10 --version | ||||
|           lcov --version | ||||
|           python3 --version | ||||
|  | ||||
|           # need newer lcov version for gcc-10 | ||||
|           #sudo apt-get remove lcov | ||||
|           #wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb | ||||
|           #sudo apt install ./lcov_1.15-1_all.deb | ||||
|           #lcov --version | ||||
|           #which lcov | ||||
|           #ls -lha /usr/bin/lcov | ||||
|           wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz | ||||
|           tar xf lcov-1.15.tar.gz | ||||
|           sudo make -C lcov-1.15 install | ||||
|  | ||||
|           # setup a ram-backed disk to speed up reentrant tests | ||||
|           mkdir disks | ||||
|           sudo mount -t tmpfs -o size=100m tmpfs disks | ||||
|           TESTFLAGS="$TESTFLAGS --disk=disks/disk" | ||||
|  | ||||
|           # collect coverage | ||||
|           mkdir -p coverage | ||||
|           TESTFLAGS="$TESTFLAGS --coverage=` | ||||
|             `coverage/${{github.job}}-${{matrix.arch}}.info" | ||||
|  | ||||
|           echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV | ||||
|  | ||||
|       # cross-compile with ARM Thumb (32-bit, little-endian) | ||||
|       - name: install-thumb | ||||
|         if: ${{matrix.arch == 'thumb'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-10-arm-linux-gnueabi \ | ||||
|             libc6-dev-armel-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-arm" >> $GITHUB_ENV | ||||
|           arm-linux-gnueabi-gcc-10 --version | ||||
|           qemu-arm -version | ||||
|       # cross-compile with MIPS (32-bit, big-endian) | ||||
|       - name: install-mips | ||||
|         if: ${{matrix.arch == 'mips'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-10-mips-linux-gnu \ | ||||
|             libc6-dev-mips-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-mips" >> $GITHUB_ENV | ||||
|           mips-linux-gnu-gcc-10 --version | ||||
|           qemu-mips -version | ||||
|       # cross-compile with PowerPC (32-bit, big-endian) | ||||
|       - name: install-powerpc | ||||
|         if: ${{matrix.arch == 'powerpc'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-10-powerpc-linux-gnu \ | ||||
|             libc6-dev-powerpc-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-ppc" >> $GITHUB_ENV | ||||
|           powerpc-linux-gnu-gcc-10 --version | ||||
|           qemu-ppc -version | ||||
|  | ||||
|       # make sure example can at least compile | ||||
|       - name: test-example | ||||
|         run: | | ||||
|           sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c | ||||
|           make all CFLAGS+=" \ | ||||
|             -Duser_provided_block_device_read=NULL \ | ||||
|             -Duser_provided_block_device_prog=NULL \ | ||||
|             -Duser_provided_block_device_erase=NULL \ | ||||
|             -Duser_provided_block_device_sync=NULL \ | ||||
|             -include stdio.h" | ||||
|           rm test.c | ||||
|  | ||||
|       # test configurations | ||||
|       # normal+reentrant tests | ||||
|       - name: test-default | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk" | ||||
|       # NOR flash: read/prog = 1 block = 4KiB | ||||
|       - name: test-nor | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" | ||||
|       # SD/eMMC: read/prog = 512 block = 512 | ||||
|       - name: test-emmc | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" | ||||
|       # NAND flash: read/prog = 4KiB block = 32KiB | ||||
|       - name: test-nand | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" | ||||
|       # other extreme geometries that are useful for various corner cases | ||||
|       - name: test-no-intrinsics | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_NO_INTRINSICS" | ||||
|       - name: test-byte-writes | ||||
|         # it just takes too long to test byte-level writes when in qemu, | ||||
|         # should be plenty covered by the other configurations | ||||
|         if: ${{matrix.arch == 'x86_64'}} | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" | ||||
|       - name: test-block-cycles | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_BLOCK_CYCLES=1" | ||||
|       - name: test-odd-block-count | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" | ||||
|       - name: test-odd-block-size | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" | ||||
|  | ||||
|       # upload coverage for later coverage | ||||
|       - name: upload-coverage | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: coverage | ||||
|           path: coverage | ||||
|           retention-days: 1 | ||||
|  | ||||
|       # update results | ||||
|       - name: results | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR" | ||||
|           cp lfs.csv results/${{matrix.arch}}.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}.csv | ||||
|       - name: results-readonly | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_READONLY" | ||||
|           cp lfs.csv results/${{matrix.arch}}-readonly.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}-readonly.csv | ||||
|       - name: results-threadsafe | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_THREADSAFE" | ||||
|           cp lfs.csv results/${{matrix.arch}}-threadsafe.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv | ||||
|       - name: results-migrate | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_MIGRATE" | ||||
|           cp lfs.csv results/${{matrix.arch}}-migrate.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}-migrate.csv | ||||
|       - name: results-error-asserts | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" | ||||
|           cp lfs.csv results/${{matrix.arch}}-error-asserts.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv | ||||
|       - name: upload-results | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: results | ||||
|           path: results | ||||
|  | ||||
|       # create statuses with results | ||||
|       - name: collect-status | ||||
|         run: | | ||||
|           mkdir -p status | ||||
|           for f in $(shopt -s nullglob ; echo results/*.csv) | ||||
|           do | ||||
|             export STEP="results$( | ||||
|               echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')" | ||||
|             for r in code stack structs | ||||
|             do | ||||
|               export CONTEXT="results (${{matrix.arch}}$( | ||||
|                 echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r" | ||||
|               export PREV="$(curl -sS \ | ||||
|                 "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \ | ||||
|                 | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | ||||
|                   | select(.context == env.CONTEXT).description | ||||
|                   | capture("(?<result>[0-9∞]+)").result' \ | ||||
|                 || echo 0)" | ||||
|               export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk ' | ||||
|                 NR==2 {printf "%s B",$2} | ||||
|                 NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                   printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')" | ||||
|               jq -n '{ | ||||
|                 state: "success", | ||||
|                 context: env.CONTEXT, | ||||
|                 description: env.DESCRIPTION, | ||||
|                 target_job: "${{github.job}} (${{matrix.arch}})", | ||||
|                 target_step: env.STEP}' \ | ||||
|                 | tee status/$r-${{matrix.arch}}$( | ||||
|                   echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json | ||||
|             done | ||||
|           done | ||||
|       - name: upload-status | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: status | ||||
|           path: status | ||||
|           retention-days: 1 | ||||
|  | ||||
|   # run under Valgrind to check for memory errors | ||||
|   valgrind: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip | ||||
|           sudo pip3 install toml | ||||
|       - name: install-valgrind | ||||
|         run: | | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq valgrind | ||||
|           valgrind --version | ||||
|       # normal tests, we don't need to test all geometries | ||||
|       - name: test-valgrind | ||||
|         run: make test TESTFLAGS+="-k --valgrind" | ||||
|  | ||||
|   # self-host with littlefs-fuse for a fuzz-like test | ||||
|   fuse: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     if: ${{!endsWith(github.ref, '-prefix')}} | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip libfuse-dev | ||||
|           sudo pip3 install toml | ||||
|           fusermount -V | ||||
|           gcc --version | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v2 | ||||
|           path: littlefs-fuse | ||||
|       - name: setup | ||||
|         run: | | ||||
|           # copy our new version into littlefs-fuse | ||||
|           rm -rf littlefs-fuse/littlefs/* | ||||
|           cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs | ||||
|  | ||||
|           # setup disk for littlefs-fuse | ||||
|           mkdir mount | ||||
|           LOOP=$(sudo losetup -f) | ||||
|           sudo chmod a+rw $LOOP | ||||
|           dd if=/dev/zero bs=512 count=128K of=disk | ||||
|           losetup $LOOP disk | ||||
|           echo "LOOP=$LOOP" >> $GITHUB_ENV | ||||
|       - name: test | ||||
|         run: | | ||||
|           # self-host test | ||||
|           make -C littlefs-fuse | ||||
|  | ||||
|           littlefs-fuse/lfs --format $LOOP | ||||
|           littlefs-fuse/lfs $LOOP mount | ||||
|  | ||||
|           ls mount | ||||
|           mkdir mount/littlefs | ||||
|           cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|   # test migration using littlefs-fuse | ||||
|   migrate: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     if: ${{!endsWith(github.ref, '-prefix')}} | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip libfuse-dev | ||||
|           sudo pip3 install toml | ||||
|           fusermount -V | ||||
|           gcc --version | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v2 | ||||
|           path: v2 | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v1 | ||||
|           path: v1 | ||||
|       - name: setup | ||||
|         run: | | ||||
|           # copy our new version into littlefs-fuse | ||||
|           rm -rf v2/littlefs/* | ||||
|           cp -r $(git ls-tree --name-only HEAD) v2/littlefs | ||||
|  | ||||
|           # setup disk for littlefs-fuse | ||||
|           mkdir mount | ||||
|           LOOP=$(sudo losetup -f) | ||||
|           sudo chmod a+rw $LOOP | ||||
|           dd if=/dev/zero bs=512 count=128K of=disk | ||||
|           losetup $LOOP disk | ||||
|           echo "LOOP=$LOOP" >> $GITHUB_ENV | ||||
|       - name: test | ||||
|         run: | | ||||
|           # compile v1 and v2 | ||||
|           make -C v1 | ||||
|           make -C v2 | ||||
|  | ||||
|           # run self-host test with v1 | ||||
|           v1/lfs --format $LOOP | ||||
|           v1/lfs $LOOP mount | ||||
|  | ||||
|           ls mount | ||||
|           mkdir mount/littlefs | ||||
|           cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|           # attempt to migrate | ||||
|           cd ../.. | ||||
|           fusermount -u mount | ||||
|  | ||||
|           v2/lfs --migrate $LOOP | ||||
|           v2/lfs $LOOP mount | ||||
|  | ||||
|           # run self-host test with v2 right where we left off | ||||
|           ls mount | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|   # collect coverage info | ||||
|   coverage: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     needs: [test] | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip lcov | ||||
|           sudo pip3 install toml | ||||
|       # yes we continue-on-error nearly every step, continue-on-error | ||||
|       # at job level apparently still marks a job as failed, which isn't | ||||
|       # what we want | ||||
|       - uses: actions/download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           name: coverage | ||||
|           path: coverage | ||||
|       - name: results-coverage | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \ | ||||
|             -o results/coverage.info | ||||
|           ./scripts/coverage.py results/coverage.info -o results/coverage.csv | ||||
|       - name: upload-results | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: results | ||||
|           path: results | ||||
|       - name: collect-status | ||||
|         run: | | ||||
|           mkdir -p status | ||||
|           [ -e results/coverage.csv ] || exit 0 | ||||
|           export STEP="results-coverage" | ||||
|           export CONTEXT="results / coverage" | ||||
|           export PREV="$(curl -sS \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \ | ||||
|             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | ||||
|               | select(.context == env.CONTEXT).description | ||||
|               | capture("(?<result>[0-9\\.]+)").result' \ | ||||
|             || echo 0)" | ||||
|           export DESCRIPTION="$( | ||||
|             ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' ' | ||||
|               NR==2 {printf "%.1f%% of %d lines",$4,$3} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" | ||||
|           jq -n '{ | ||||
|             state: "success", | ||||
|             context: env.CONTEXT, | ||||
|             description: env.DESCRIPTION, | ||||
|             target_job: "${{github.job}}", | ||||
|             target_step: env.STEP}' \ | ||||
|             | tee status/coverage.json | ||||
|       - name: upload-status | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: status | ||||
|           path: status | ||||
|           retention-days: 1 | ||||
							
								
								
									
										6
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -2,9 +2,13 @@ | ||||
| *.o | ||||
| *.d | ||||
| *.a | ||||
| *.ci | ||||
| *.csv | ||||
|  | ||||
| # Testing things | ||||
| blocks/ | ||||
| lfs | ||||
| test.c | ||||
| tests_/*.toml.* | ||||
| tests/*.toml.* | ||||
| scripts/__pycache__ | ||||
| .gdb_history | ||||
|   | ||||
							
								
								
									
										318
									
								
								.travis.yml
									
									
									
									
									
								
							
							
						
						
									
										318
									
								
								.travis.yml
									
									
									
									
									
								
							| @@ -1,318 +0,0 @@ | ||||
| # Environment variables | ||||
| env: | ||||
|   global: | ||||
|     - CFLAGS=-Werror | ||||
|  | ||||
| # Common test script | ||||
| script: | ||||
|   # make sure example can at least compile | ||||
|   - sed -n '/``` c/,/```/{/```/d; p;}' README.md > test.c && | ||||
|     make all CFLAGS+=" | ||||
|         -Duser_provided_block_device_read=NULL | ||||
|         -Duser_provided_block_device_prog=NULL | ||||
|         -Duser_provided_block_device_erase=NULL | ||||
|         -Duser_provided_block_device_sync=NULL | ||||
|         -include stdio.h" | ||||
|  | ||||
|   # run tests | ||||
|   - make test QUIET=1 | ||||
|  | ||||
|   # run tests with a few different configurations | ||||
|   - make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=1      -DLFS_CACHE_SIZE=4" | ||||
|   - make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=512    -DLFS_CACHE_SIZE=512 -DLFS_BLOCK_CYCLES=16" | ||||
|   - make test QUIET=1 CFLAGS+="-DLFS_READ_SIZE=8      -DLFS_CACHE_SIZE=16  -DLFS_BLOCK_CYCLES=2" | ||||
|   - make test QUIET=1 CFLAGS+="-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" | ||||
|  | ||||
|   - make clean test QUIET=1 CFLAGS+="-DLFS_INLINE_MAX=0" | ||||
|   - make clean test QUIET=1 CFLAGS+="-DLFS_EMUBD_ERASE_VALUE=0xff" | ||||
|   - make clean test QUIET=1 CFLAGS+="-DLFS_NO_INTRINSICS" | ||||
|  | ||||
|   # additional configurations that don't support all tests (this should be | ||||
|   # fixed but at the moment it is what it is) | ||||
|   - make test_files QUIET=1 | ||||
|         CFLAGS+="-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" | ||||
|   - make test_files QUIET=1 | ||||
|         CFLAGS+="-DLFS_READ_SIZE=\(2*1024\) -DLFS_BLOCK_SIZE=\(64*1024\)" | ||||
|   - make test_files QUIET=1 | ||||
|         CFLAGS+="-DLFS_READ_SIZE=\(8*1024\) -DLFS_BLOCK_SIZE=\(64*1024\)" | ||||
|   - make test_files QUIET=1 | ||||
|         CFLAGS+="-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" | ||||
|  | ||||
|   # compile and find the code size with the smallest configuration | ||||
|   - make clean size | ||||
|         OBJ="$(ls lfs*.o | tr '\n' ' ')" | ||||
|         CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR" | ||||
|         | tee sizes | ||||
|  | ||||
|   # update status if we succeeded, compare with master if possible | ||||
|   - | | ||||
|     if [ "$TRAVIS_TEST_RESULT" -eq 0 ] | ||||
|     then | ||||
|         CURR=$(tail -n1 sizes | awk '{print $1}') | ||||
|         PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \ | ||||
|             | jq -re "select(.sha != \"$TRAVIS_COMMIT\") | ||||
|                 | .statuses[] | select(.context == \"$STAGE/$NAME\").description | ||||
|                 | capture(\"code size is (?<size>[0-9]+)\").size" \ | ||||
|             || echo 0) | ||||
|  | ||||
|         STATUS="Passed, code size is ${CURR}B" | ||||
|         if [ "$PREV" -ne 0 ] | ||||
|         then | ||||
|             STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)" | ||||
|         fi | ||||
|     fi | ||||
|  | ||||
| # CI matrix | ||||
| jobs: | ||||
|   include: | ||||
|     # native testing | ||||
|     - stage: test | ||||
|       env: | ||||
|         - STAGE=test | ||||
|         - NAME=littlefs-x86 | ||||
|  | ||||
|     # cross-compile with ARM (thumb mode) | ||||
|     - stage: test | ||||
|       env: | ||||
|         - STAGE=test | ||||
|         - NAME=littlefs-arm | ||||
|         - CC="arm-linux-gnueabi-gcc --static -mthumb" | ||||
|         - EXEC="qemu-arm" | ||||
|       install: | ||||
|         - sudo apt-get install | ||||
|               gcc-arm-linux-gnueabi | ||||
|               libc6-dev-armel-cross | ||||
|               qemu-user | ||||
|         - arm-linux-gnueabi-gcc --version | ||||
|         - qemu-arm -version | ||||
|  | ||||
|     # cross-compile with PowerPC | ||||
|     - stage: test | ||||
|       env: | ||||
|         - STAGE=test | ||||
|         - NAME=littlefs-powerpc | ||||
|         - CC="powerpc-linux-gnu-gcc --static" | ||||
|         - EXEC="qemu-ppc" | ||||
|       install: | ||||
|         - sudo apt-get install | ||||
|               gcc-powerpc-linux-gnu | ||||
|               libc6-dev-powerpc-cross | ||||
|               qemu-user | ||||
|         - powerpc-linux-gnu-gcc --version | ||||
|         - qemu-ppc -version | ||||
|  | ||||
|     # cross-compile with MIPS | ||||
|     - stage: test | ||||
|       env: | ||||
|         - STAGE=test | ||||
|         - NAME=littlefs-mips | ||||
|         - CC="mips-linux-gnu-gcc --static" | ||||
|         - EXEC="qemu-mips" | ||||
|       install: | ||||
|         - sudo apt-get install | ||||
|               gcc-mips-linux-gnu | ||||
|               libc6-dev-mips-cross | ||||
|               qemu-user | ||||
|         - mips-linux-gnu-gcc --version | ||||
|         - qemu-mips -version | ||||
|  | ||||
|     # self-host with littlefs-fuse for fuzz test | ||||
|     - stage: test | ||||
|       env: | ||||
|         - STAGE=test | ||||
|         - NAME=littlefs-fuse | ||||
|       if: branch !~ -prefix$ | ||||
|       install: | ||||
|         - sudo apt-get install libfuse-dev | ||||
|         - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 | ||||
|         - fusermount -V | ||||
|         - gcc --version | ||||
|       before_script: | ||||
|         # setup disk for littlefs-fuse | ||||
|         - rm -rf littlefs-fuse/littlefs/* | ||||
|         - cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs | ||||
|  | ||||
|         - mkdir mount | ||||
|         - sudo chmod a+rw /dev/loop0 | ||||
|         - dd if=/dev/zero bs=512 count=4096 of=disk | ||||
|         - losetup /dev/loop0 disk | ||||
|       script: | ||||
|         # self-host test | ||||
|         - make -C littlefs-fuse | ||||
|  | ||||
|         - littlefs-fuse/lfs --format /dev/loop0 | ||||
|         - littlefs-fuse/lfs /dev/loop0 mount | ||||
|  | ||||
|         - ls mount | ||||
|         - mkdir mount/littlefs | ||||
|         - cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|         - cd mount/littlefs | ||||
|         - stat . | ||||
|         - ls -flh | ||||
|         - make -B test_dirs test_files QUIET=1 | ||||
|  | ||||
|     # self-host with littlefs-fuse for fuzz test | ||||
|     - stage: test | ||||
|       env: | ||||
|         - STAGE=test | ||||
|         - NAME=littlefs-migration | ||||
|       if: branch !~ -prefix$ | ||||
|       install: | ||||
|         - sudo apt-get install libfuse-dev | ||||
|         - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2 | ||||
|         - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1 | ||||
|         - fusermount -V | ||||
|         - gcc --version | ||||
|       before_script: | ||||
|         # setup disk for littlefs-fuse | ||||
|         - rm -rf v2/littlefs/* | ||||
|         - cp -r $(git ls-tree --name-only HEAD) v2/littlefs | ||||
|  | ||||
|         - mkdir mount | ||||
|         - sudo chmod a+rw /dev/loop0 | ||||
|         - dd if=/dev/zero bs=512 count=4096 of=disk | ||||
|         - losetup /dev/loop0 disk | ||||
|       script: | ||||
|         # compile v1 and v2 | ||||
|         - make -C v1 | ||||
|         - make -C v2 | ||||
|  | ||||
|         # run self-host test with v1 | ||||
|         - v1/lfs --format /dev/loop0 | ||||
|         - v1/lfs /dev/loop0 mount | ||||
|  | ||||
|         - ls mount | ||||
|         - mkdir mount/littlefs | ||||
|         - cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|         - cd mount/littlefs | ||||
|         - stat . | ||||
|         - ls -flh | ||||
|         - make -B test_dirs test_files QUIET=1 | ||||
|  | ||||
|         # attempt to migrate | ||||
|         - cd ../.. | ||||
|         - fusermount -u mount | ||||
|  | ||||
|         - v2/lfs --migrate /dev/loop0 | ||||
|         - v2/lfs /dev/loop0 mount | ||||
|  | ||||
|         # run self-host test with v2 right where we left off | ||||
|         - ls mount | ||||
|         - cd mount/littlefs | ||||
|         - stat . | ||||
|         - ls -flh | ||||
|         - make -B test_dirs test_files QUIET=1 | ||||
|  | ||||
|     # Automatically create releases | ||||
|     - stage: deploy | ||||
|       env: | ||||
|         - STAGE=deploy | ||||
|         - NAME=deploy | ||||
|       script: | ||||
|         - | | ||||
|           bash << 'SCRIPT' | ||||
|           set -ev | ||||
|           # Find version defined in lfs.h | ||||
|           LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3) | ||||
|           LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16))) | ||||
|           LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >>  0))) | ||||
|           # Grab latests patch from repo tags, default to 0, needs finagling | ||||
|           # to get past github's pagination api | ||||
|           PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR. | ||||
|           PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \ | ||||
|               | sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \ | ||||
|               || echo $PREV_URL) | ||||
|           LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \ | ||||
|               | jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g") | ||||
|                   .captures[].string | tonumber) | max + 1' \ | ||||
|               || echo 0) | ||||
|           # We have our new version | ||||
|           LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH" | ||||
|           echo "VERSION $LFS_VERSION" | ||||
|           # Check that we're the most recent commit | ||||
|           CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \ | ||||
|               https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \ | ||||
|               | jq -re '.sha') | ||||
|           [ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0 | ||||
|           # Create major branch | ||||
|           git branch v$LFS_VERSION_MAJOR HEAD | ||||
|           # Create major prefix branch | ||||
|           git config user.name "geky bot" | ||||
|           git config user.email "bot@geky.net" | ||||
|           git fetch https://github.com/$TRAVIS_REPO_SLUG.git \ | ||||
|               --depth=50 v$LFS_VERSION_MAJOR-prefix || true | ||||
|           ./scripts/prefix.py lfs$LFS_VERSION_MAJOR | ||||
|           git branch v$LFS_VERSION_MAJOR-prefix $( \ | ||||
|               git commit-tree $(git write-tree) \ | ||||
|                   $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \ | ||||
|                   -p HEAD \ | ||||
|                   -m "Generated v$LFS_VERSION_MAJOR prefixes") | ||||
|           git reset --hard | ||||
|           # Update major version branches (vN and vN-prefix) | ||||
|           git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \ | ||||
|               v$LFS_VERSION_MAJOR \ | ||||
|               v$LFS_VERSION_MAJOR-prefix | ||||
|           # Build release notes | ||||
|           PREV=$(git tag --sort=-v:refname -l "v*" | head -1) | ||||
|           if [ ! -z "$PREV" ] | ||||
|           then | ||||
|               echo "PREV $PREV" | ||||
|               CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep) | ||||
|               printf "CHANGES\n%s\n\n" "$CHANGES" | ||||
|           fi | ||||
|           case ${GEKY_BOT_DRAFT:-minor} in | ||||
|               true)  DRAFT=true ;; | ||||
|               minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;; | ||||
|               false) DRAFT=false ;; | ||||
|           esac | ||||
|           # Create the release and patch version tag (vN.N.N) | ||||
|           curl -f -u "$GEKY_BOT_RELEASES" -X POST \ | ||||
|               https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \ | ||||
|               -d "{ | ||||
|                   \"tag_name\": \"$LFS_VERSION\", | ||||
|                   \"name\": \"${LFS_VERSION%.0}\", | ||||
|                   \"target_commitish\": \"$TRAVIS_COMMIT\", | ||||
|                   \"draft\": $DRAFT, | ||||
|                   \"body\": $(jq -sR '.' <<< "$CHANGES") | ||||
|               }" #" | ||||
|           SCRIPT | ||||
|  | ||||
| # Manage statuses | ||||
| before_install: | ||||
|   - | | ||||
|     curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|         https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         -d "{ | ||||
|             \"context\": \"$STAGE/$NAME\", | ||||
|             \"state\": \"pending\", | ||||
|             \"description\": \"${STATUS:-In progress}\", | ||||
|             \"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\" | ||||
|         }" | ||||
|  | ||||
| after_failure: | ||||
|   - | | ||||
|     curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|         https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         -d "{ | ||||
|             \"context\": \"$STAGE/$NAME\", | ||||
|             \"state\": \"failure\", | ||||
|             \"description\": \"${STATUS:-Failed}\", | ||||
|             \"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\" | ||||
|         }" | ||||
|  | ||||
| after_success: | ||||
|   - | | ||||
|     curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|         https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         -d "{ | ||||
|             \"context\": \"$STAGE/$NAME\", | ||||
|             \"state\": \"success\", | ||||
|             \"description\": \"${STATUS:-Passed}\", | ||||
|             \"target_url\": \"https://travis-ci.org/$TRAVIS_REPO_SLUG/jobs/$TRAVIS_JOB_ID\" | ||||
|         }" | ||||
|  | ||||
| # Job control | ||||
| stages: | ||||
|     - name: test | ||||
|     - name: deploy | ||||
|       if: branch = master AND type = push | ||||
| @@ -1,3 +1,4 @@ | ||||
| Copyright (c) 2022, The littlefs authors.   | ||||
| Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without modification, | ||||
|   | ||||
							
								
								
									
										156
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										156
									
								
								Makefile
									
									
									
									
									
								
							| @@ -1,69 +1,173 @@ | ||||
| TARGET = lfs.a | ||||
| ifneq ($(wildcard test.c main.c),) | ||||
| override TARGET = lfs | ||||
| ifdef BUILDDIR | ||||
| # make sure BUILDDIR ends with a slash | ||||
| override BUILDDIR := $(BUILDDIR)/ | ||||
| # bit of a hack, but we want to make sure BUILDDIR directory structure | ||||
| # is correct before any commands | ||||
| $(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \ | ||||
| 	$(BUILDDIR) \ | ||||
| 	$(BUILDDIR)bd \ | ||||
| 	$(BUILDDIR)tests)) | ||||
| endif | ||||
|  | ||||
| CC ?= gcc | ||||
| AR ?= ar | ||||
| SIZE ?= size | ||||
| # overridable target/src/tools/flags/etc | ||||
| ifneq ($(wildcard test.c main.c),) | ||||
| TARGET ?= $(BUILDDIR)lfs | ||||
| else | ||||
| TARGET ?= $(BUILDDIR)lfs.a | ||||
| endif | ||||
|  | ||||
| SRC += $(wildcard *.c bd/*.c) | ||||
| OBJ := $(SRC:.c=.o) | ||||
| DEP := $(SRC:.c=.d) | ||||
| ASM := $(SRC:.c=.s) | ||||
|  | ||||
| CC      ?= gcc | ||||
| AR      ?= ar | ||||
| SIZE    ?= size | ||||
| CTAGS   ?= ctags | ||||
| NM      ?= nm | ||||
| OBJDUMP ?= objdump | ||||
| LCOV    ?= lcov | ||||
|  | ||||
| SRC ?= $(wildcard *.c) | ||||
| OBJ := $(SRC:%.c=$(BUILDDIR)%.o) | ||||
| DEP := $(SRC:%.c=$(BUILDDIR)%.d) | ||||
| ASM := $(SRC:%.c=$(BUILDDIR)%.s) | ||||
| CGI := $(SRC:%.c=$(BUILDDIR)%.ci) | ||||
|  | ||||
| ifdef DEBUG | ||||
| override CFLAGS += -O0 -g3 | ||||
| override CFLAGS += -O0 | ||||
| else | ||||
| override CFLAGS += -Os | ||||
| endif | ||||
| ifdef WORD | ||||
| override CFLAGS += -m$(WORD) | ||||
| endif | ||||
| ifdef TRACE | ||||
| override CFLAGS += -DLFS_YES_TRACE | ||||
| endif | ||||
| override CFLAGS += -g3 | ||||
| override CFLAGS += -I. | ||||
| override CFLAGS += -std=c99 -Wall -pedantic | ||||
| override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef | ||||
| # Remove missing-field-initializers because of GCC bug | ||||
| override CFLAGS += -Wno-missing-field-initializers | ||||
|  | ||||
| ifdef VERBOSE | ||||
| override TFLAGS += -v | ||||
| override TESTFLAGS     += -v | ||||
| override CALLSFLAGS    += -v | ||||
| override CODEFLAGS     += -v | ||||
| override DATAFLAGS     += -v | ||||
| override STACKFLAGS    += -v | ||||
| override STRUCTSFLAGS  += -v | ||||
| override COVERAGEFLAGS += -v | ||||
| endif | ||||
| ifdef EXEC | ||||
| override TESTFLAGS += --exec="$(EXEC)" | ||||
| endif | ||||
| ifdef COVERAGE | ||||
| override TESTFLAGS += --coverage | ||||
| endif | ||||
| ifdef BUILDDIR | ||||
| override TESTFLAGS     += --build-dir="$(BUILDDIR:/=)" | ||||
| override CALLSFLAGS    += --build-dir="$(BUILDDIR:/=)" | ||||
| override CODEFLAGS     += --build-dir="$(BUILDDIR:/=)" | ||||
| override DATAFLAGS     += --build-dir="$(BUILDDIR:/=)" | ||||
| override STACKFLAGS    += --build-dir="$(BUILDDIR:/=)" | ||||
| override STRUCTSFLAGS  += --build-dir="$(BUILDDIR:/=)" | ||||
| override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)" | ||||
| endif | ||||
| ifneq ($(NM),nm) | ||||
| override CODEFLAGS += --nm-tool="$(NM)" | ||||
| override DATAFLAGS += --nm-tool="$(NM)" | ||||
| endif | ||||
| ifneq ($(OBJDUMP),objdump) | ||||
| override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)" | ||||
| endif | ||||
|  | ||||
|  | ||||
| all: $(TARGET) | ||||
| # commands | ||||
| .PHONY: all build | ||||
| all build: $(TARGET) | ||||
|  | ||||
| .PHONY: asm | ||||
| asm: $(ASM) | ||||
|  | ||||
| .PHONY: size | ||||
| size: $(OBJ) | ||||
| 	$(SIZE) -t $^ | ||||
|  | ||||
| .PHONY: tags | ||||
| tags: | ||||
| 	$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC) | ||||
|  | ||||
| .PHONY: calls | ||||
| calls: $(CGI) | ||||
| 	./scripts/calls.py $^ $(CALLSFLAGS) | ||||
|  | ||||
| .PHONY: test | ||||
| test: | ||||
| 	./scripts/test.py $(TFLAGS) | ||||
| 	./scripts/test.py $(TESTFLAGS) | ||||
| .SECONDEXPANSION: | ||||
| test%: tests/test$$(firstword $$(subst \#, ,%)).toml | ||||
| 	./scripts/test.py $(TFLAGS) $@ | ||||
| 	./scripts/test.py $@ $(TESTFLAGS) | ||||
|  | ||||
| .PHONY: code | ||||
| code: $(OBJ) | ||||
| 	./scripts/code.py $^ -S $(CODEFLAGS) | ||||
|  | ||||
| .PHONY: data | ||||
| data: $(OBJ) | ||||
| 	./scripts/data.py $^ -S $(DATAFLAGS) | ||||
|  | ||||
| .PHONY: stack | ||||
| stack: $(CGI) | ||||
| 	./scripts/stack.py $^ -S $(STACKFLAGS) | ||||
|  | ||||
| .PHONY: structs | ||||
| structs: $(OBJ) | ||||
| 	./scripts/structs.py $^ -S $(STRUCTSFLAGS) | ||||
|  | ||||
| .PHONY: coverage | ||||
| coverage: | ||||
| 	./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS) | ||||
|  | ||||
| .PHONY: summary | ||||
| summary: $(BUILDDIR)lfs.csv | ||||
| 	./scripts/summary.py -Y $^ $(SUMMARYFLAGS) | ||||
|  | ||||
|  | ||||
| # rules | ||||
| -include $(DEP) | ||||
| .SUFFIXES: | ||||
|  | ||||
| lfs: $(OBJ) | ||||
| $(BUILDDIR)lfs: $(OBJ) | ||||
| 	$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ | ||||
|  | ||||
| %.a: $(OBJ) | ||||
| $(BUILDDIR)lfs.a: $(OBJ) | ||||
| 	$(AR) rcs $@ $^ | ||||
|  | ||||
| %.o: %.c | ||||
| $(BUILDDIR)lfs.csv: $(OBJ) $(CGI) | ||||
| 	./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@ | ||||
| 	./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@ | ||||
| 	./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@ | ||||
| 	./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@ | ||||
| 	$(if $(COVERAGE),\ | ||||
| 		./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \ | ||||
| 			-q -m $@ $(COVERAGEFLAGS) -o $@) | ||||
|  | ||||
| $(BUILDDIR)%.o: %.c | ||||
| 	$(CC) -c -MMD $(CFLAGS) $< -o $@ | ||||
|  | ||||
| %.s: %.c | ||||
| $(BUILDDIR)%.s: %.c | ||||
| 	$(CC) -S $(CFLAGS) $< -o $@ | ||||
|  | ||||
| # gcc depends on the output file for intermediate file names, so | ||||
| # we can't omit to .o output. We also need to serialize with the | ||||
| # normal .o rule because otherwise we can end up with multiprocess | ||||
| # problems with two instances of gcc modifying the same .o | ||||
| $(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o | ||||
| 	$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $| | ||||
|  | ||||
| # clean everything | ||||
| .PHONY: clean | ||||
| clean: | ||||
| 	rm -f $(TARGET) | ||||
| 	rm -f $(BUILDDIR)lfs | ||||
| 	rm -f $(BUILDDIR)lfs.a | ||||
| 	rm -f $(BUILDDIR)lfs.csv | ||||
| 	rm -f $(OBJ) | ||||
| 	rm -f $(CGI) | ||||
| 	rm -f $(DEP) | ||||
| 	rm -f $(ASM) | ||||
| 	rm -f tests/*.toml.* | ||||
| 	rm -f $(BUILDDIR)tests/*.toml.* | ||||
|   | ||||
							
								
								
									
										11
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								README.md
									
									
									
									
									
								
							| @@ -115,6 +115,9 @@ the filesystem until sync or close is called on the file. | ||||
|  | ||||
| ## Other notes | ||||
|  | ||||
| Littlefs is written in C, and specifically should compile with any compiler | ||||
| that conforms to the `C99` standard. | ||||
|  | ||||
| All littlefs calls have the potential to return a negative error code. The | ||||
| errors can be either one of those found in the `enum lfs_error` in | ||||
| [lfs.h](lfs.h), or an error returned by the user's block device operations. | ||||
| @@ -189,7 +192,7 @@ More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and | ||||
| ## Testing | ||||
|  | ||||
| The littlefs comes with a test suite designed to run on a PC using the | ||||
| [emulated block device](emubd/lfs_emubd.h) found in the emubd directory. | ||||
| [emulated block device](bd/lfs_testbd.h) found in the `bd` directory. | ||||
| The tests assume a Linux environment and can be started with make: | ||||
|  | ||||
| ``` bash | ||||
| @@ -218,6 +221,11 @@ License Identifiers that are here available: http://spdx.org/licenses/ | ||||
| - [littlefs-js] - A javascript wrapper for littlefs. I'm not sure why you would | ||||
|   want this, but it is handy for demos.  You can see it in action | ||||
|   [here][littlefs-js-demo]. | ||||
|    | ||||
| - [littlefs-python] - A Python wrapper for littlefs. The project allows you | ||||
|   to create images of the filesystem on your PC. Check if littlefs will fit | ||||
|   your needs, create images for a later download to the target memory or | ||||
|   inspect the content of a binary image of the target memory. | ||||
|  | ||||
| - [mklfs] - A command line tool built by the [Lua RTOS] guys for making | ||||
|   littlefs images from a host PC. Supports Windows, Mac OS, and Linux. | ||||
| @@ -247,3 +255,4 @@ License Identifiers that are here available: http://spdx.org/licenses/ | ||||
| [LittleFileSystem]: https://os.mbed.com/docs/mbed-os/v5.12/apis/littlefilesystem.html | ||||
| [SPIFFS]: https://github.com/pellepl/spiffs | ||||
| [Dhara]: https://github.com/dlbeer/dhara | ||||
| [littlefs-python]: https://pypi.org/project/littlefs-python/ | ||||
|   | ||||
							
								
								
									
										32
									
								
								SPEC.md
									
									
									
									
									
								
							
							
						
						
									
										32
									
								
								SPEC.md
									
									
									
									
									
								
							| @@ -233,19 +233,19 @@ Metadata tag fields: | ||||
|    into a 3-bit abstract type and an 8-bit chunk field. Note that the value | ||||
|    `0x000` is invalid and not assigned a type. | ||||
|  | ||||
| 3. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into | ||||
|    8 categories that facilitate bitmasked lookups. | ||||
|     1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into | ||||
|        8 categories that facilitate bitmasked lookups. | ||||
|  | ||||
| 4. **Chunk (8-bits)** - Chunk field used for various purposes by the different | ||||
|    abstract types.  type1+chunk+id form a unique identifier for each tag in the | ||||
|    metadata block. | ||||
|     2. **Chunk (8-bits)** - Chunk field used for various purposes by the different | ||||
|        abstract types.  type1+chunk+id form a unique identifier for each tag in the | ||||
|        metadata block. | ||||
|  | ||||
| 5. **Id (10-bits)** - File id associated with the tag. Each file in a metadata | ||||
| 3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata | ||||
|    block gets a unique id which is used to associate tags with that file. The | ||||
|    special value `0x3ff` is used for any tags that are not associated with a | ||||
|    file, such as directory and global metadata. | ||||
|  | ||||
| 6. **Length (10-bits)** - Length of the data in bytes. The special value | ||||
| 4. **Length (10-bits)** - Length of the data in bytes. The special value | ||||
|    `0x3ff` indicates that this tag has been deleted. | ||||
|  | ||||
| ## Metadata types | ||||
| @@ -289,8 +289,8 @@ Layout of the name tag: | ||||
| ``` | ||||
|         tag                          data | ||||
| [--      32      --][---        variable length        ---] | ||||
| [1| 3| 8 | 10 | 10 ][---            (size)             ---] | ||||
|  ^  ^  ^    ^    ^- size               ^- file name | ||||
| [1| 3| 8 | 10 | 10 ][---          (size * 8)           ---] | ||||
|  ^  ^  ^    ^    ^- size                   ^- file name | ||||
|  |  |  |    '------ id | ||||
|  |  |  '----------- file type | ||||
|  |  '-------------- type1 (0x0) | ||||
| @@ -470,8 +470,8 @@ Layout of the inline-struct tag: | ||||
| ``` | ||||
|         tag                          data | ||||
| [--      32      --][---        variable length        ---] | ||||
| [1|- 11 -| 10 | 10 ][---            (size)             ---] | ||||
|  ^    ^     ^    ^- size               ^- inline data | ||||
| [1|- 11 -| 10 | 10 ][---           (size * 8)          ---] | ||||
|  ^    ^     ^    ^- size                    ^- inline data | ||||
|  |    |     '------ id | ||||
|  |    '------------ type (0x201) | ||||
|  '----------------- valid bit | ||||
| @@ -556,8 +556,8 @@ Layout of the user-attr tag: | ||||
| ``` | ||||
|         tag                          data | ||||
| [--      32      --][---        variable length        ---] | ||||
| [1| 3| 8 | 10 | 10 ][---            (size)             ---] | ||||
|  ^  ^  ^    ^    ^- size               ^- attr data | ||||
| [1| 3| 8 | 10 | 10 ][---           (size * 8)          ---] | ||||
|  ^  ^  ^    ^    ^- size                    ^- attr data | ||||
|  |  |  |    '------ id | ||||
|  |  |  '----------- attr type | ||||
|  |  '-------------- type1 (0x3) | ||||
| @@ -764,9 +764,9 @@ Layout of the CRC tag: | ||||
| ``` | ||||
|         tag                                    data | ||||
| [--      32      --][--      32      --|---        variable length        ---] | ||||
| [1| 3| 8 | 10 | 10 ][--      32      --|---            (size)             ---] | ||||
|  ^  ^  ^    ^    ^            ^- crc                      ^- padding | ||||
|  |  |  |    |    '- size (12) | ||||
| [1| 3| 8 | 10 | 10 ][--      32      --|---        (size * 8 - 32)        ---] | ||||
|  ^  ^  ^    ^    ^            ^- crc                             ^- padding | ||||
|  |  |  |    |    '- size | ||||
|  |  |  |    '------ id (0x3ff) | ||||
|  |  |  '----------- valid state | ||||
|  |  '-------------- type1 (0x5) | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * Block device emulated in a file | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -10,9 +11,13 @@ | ||||
| #include <unistd.h> | ||||
| #include <errno.h> | ||||
|  | ||||
| #ifdef _WIN32 | ||||
| #include <windows.h> | ||||
| #endif | ||||
|  | ||||
| int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path, | ||||
|         const struct lfs_filebd_config *bdcfg) { | ||||
|     LFS_TRACE("lfs_filebd_createcfg(%p {.context=%p, " | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, " | ||||
|                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, " | ||||
|                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", " | ||||
|                 ".block_size=%"PRIu32", .block_count=%"PRIu32"}, " | ||||
| @@ -27,19 +32,24 @@ int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path, | ||||
|     bd->cfg = bdcfg; | ||||
|  | ||||
|     // open file | ||||
|     #ifdef _WIN32 | ||||
|     bd->fd = open(path, O_RDWR | O_CREAT | O_BINARY, 0666); | ||||
|     #else | ||||
|     bd->fd = open(path, O_RDWR | O_CREAT, 0666); | ||||
|     #endif | ||||
|  | ||||
|     if (bd->fd < 0) { | ||||
|         int err = -errno; | ||||
|         LFS_TRACE("lfs_filebd_createcfg -> %d", err); | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_filebd_createcfg -> %d", 0); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_filebd_create(const struct lfs_config *cfg, const char *path) { | ||||
|     LFS_TRACE("lfs_filebd_create(%p {.context=%p, " | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, " | ||||
|                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, " | ||||
|                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", " | ||||
|                 ".block_size=%"PRIu32", .block_count=%"PRIu32"}, " | ||||
| @@ -51,26 +61,27 @@ int lfs_filebd_create(const struct lfs_config *cfg, const char *path) { | ||||
|             path); | ||||
|     static const struct lfs_filebd_config defaults = {.erase_value=-1}; | ||||
|     int err = lfs_filebd_createcfg(cfg, path, &defaults); | ||||
|     LFS_TRACE("lfs_filebd_create -> %d", err); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err); | ||||
|     return err; | ||||
| } | ||||
|  | ||||
| int lfs_filebd_destroy(const struct lfs_config *cfg) { | ||||
|     LFS_TRACE("lfs_filebd_destroy(%p)", (void*)cfg); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg); | ||||
|     lfs_filebd_t *bd = cfg->context; | ||||
|     int err = close(bd->fd); | ||||
|     if (err < 0) { | ||||
|         err = -errno; | ||||
|         LFS_TRACE("lfs_filebd_destroy -> %d", err); | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
|     LFS_TRACE("lfs_filebd_destroy -> %d", 0); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block, | ||||
|         lfs_off_t off, void *buffer, lfs_size_t size) { | ||||
|     LFS_TRACE("lfs_filebd_read(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_read(%p, " | ||||
|                 "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|             (void*)cfg, block, off, buffer, size); | ||||
|     lfs_filebd_t *bd = cfg->context; | ||||
|  | ||||
| @@ -79,7 +90,7 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block, | ||||
|     LFS_ASSERT(size % cfg->read_size == 0); | ||||
|     LFS_ASSERT(block < cfg->block_count); | ||||
|  | ||||
|     // zero for reproducability (in case file is truncated) | ||||
|     // zero for reproducibility (in case file is truncated) | ||||
|     if (bd->cfg->erase_value != -1) { | ||||
|         memset(buffer, bd->cfg->erase_value, size); | ||||
|     } | ||||
| @@ -89,24 +100,24 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block, | ||||
|             (off_t)block*cfg->block_size + (off_t)off, SEEK_SET); | ||||
|     if (res1 < 0) { | ||||
|         int err = -errno; | ||||
|         LFS_TRACE("lfs_filebd_read -> %d", err); | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
|     ssize_t res2 = read(bd->fd, buffer, size); | ||||
|     if (res2 < 0) { | ||||
|         int err = -errno; | ||||
|         LFS_TRACE("lfs_filebd_read -> %d", err); | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_filebd_read -> %d", 0); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_read -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|         lfs_off_t off, const void *buffer, lfs_size_t size) { | ||||
|     LFS_TRACE("lfs_filebd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|             (void*)cfg, block, off, buffer, size); | ||||
|     lfs_filebd_t *bd = cfg->context; | ||||
|  | ||||
| @@ -121,7 +132,7 @@ int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|                 (off_t)block*cfg->block_size + (off_t)off, SEEK_SET); | ||||
|         if (res1 < 0) { | ||||
|             int err = -errno; | ||||
|             LFS_TRACE("lfs_filebd_prog -> %d", err); | ||||
|             LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err); | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
| @@ -130,7 +141,7 @@ int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|             ssize_t res2 = read(bd->fd, &c, 1); | ||||
|             if (res2 < 0) { | ||||
|                 int err = -errno; | ||||
|                 LFS_TRACE("lfs_filebd_prog -> %d", err); | ||||
|                 LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err); | ||||
|                 return err; | ||||
|             } | ||||
|  | ||||
| @@ -143,23 +154,23 @@ int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|             (off_t)block*cfg->block_size + (off_t)off, SEEK_SET); | ||||
|     if (res1 < 0) { | ||||
|         int err = -errno; | ||||
|         LFS_TRACE("lfs_filebd_prog -> %d", err); | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
|     ssize_t res2 = write(bd->fd, buffer, size); | ||||
|     if (res2 < 0) { | ||||
|         int err = -errno; | ||||
|         LFS_TRACE("lfs_filebd_prog -> %d", err); | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_filebd_prog -> %d", 0); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|     LFS_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)cfg, block); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32")", (void*)cfg, block); | ||||
|     lfs_filebd_t *bd = cfg->context; | ||||
|  | ||||
|     // check if erase is valid | ||||
| @@ -170,7 +181,7 @@ int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|         off_t res1 = lseek(bd->fd, (off_t)block*cfg->block_size, SEEK_SET); | ||||
|         if (res1 < 0) { | ||||
|             int err = -errno; | ||||
|             LFS_TRACE("lfs_filebd_erase -> %d", err); | ||||
|             LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err); | ||||
|             return err; | ||||
|         } | ||||
|  | ||||
| @@ -178,27 +189,31 @@ int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|             ssize_t res2 = write(bd->fd, &(uint8_t){bd->cfg->erase_value}, 1); | ||||
|             if (res2 < 0) { | ||||
|                 int err = -errno; | ||||
|                 LFS_TRACE("lfs_filebd_erase -> %d", err); | ||||
|                 LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", err); | ||||
|                 return err; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_filebd_erase -> %d", 0); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_filebd_sync(const struct lfs_config *cfg) { | ||||
|     LFS_TRACE("lfs_filebd_sync(%p)", (void*)cfg); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg); | ||||
|     // file sync | ||||
|     lfs_filebd_t *bd = cfg->context; | ||||
|     #ifdef _WIN32 | ||||
|     int err = FlushFileBuffers((HANDLE) _get_osfhandle(fd)) ? 0 : -1; | ||||
|     #else | ||||
|     int err = fsync(bd->fd); | ||||
|     #endif | ||||
|     if (err) { | ||||
|         err = -errno; | ||||
|         LFS_TRACE("lfs_filebd_sync -> %d", 0); | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0); | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_filebd_sync -> %d", 0); | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * Block device emulated in a file | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -15,6 +16,14 @@ extern "C" | ||||
| { | ||||
| #endif | ||||
|  | ||||
|  | ||||
| // Block device specific tracing | ||||
| #ifdef LFS_FILEBD_YES_TRACE | ||||
| #define LFS_FILEBD_TRACE(...) LFS_TRACE(__VA_ARGS__) | ||||
| #else | ||||
| #define LFS_FILEBD_TRACE(...) | ||||
| #endif | ||||
|  | ||||
| // filebd config (optional) | ||||
| struct lfs_filebd_config { | ||||
|     // 8-bit erase value to use for simulating erases. -1 does not simulate | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * Block device emulated in RAM | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -8,7 +9,7 @@ | ||||
|  | ||||
| int lfs_rambd_createcfg(const struct lfs_config *cfg, | ||||
|         const struct lfs_rambd_config *bdcfg) { | ||||
|     LFS_TRACE("lfs_rambd_createcfg(%p {.context=%p, " | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_createcfg(%p {.context=%p, " | ||||
|                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, " | ||||
|                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", " | ||||
|                 ".block_size=%"PRIu32", .block_count=%"PRIu32"}, " | ||||
| @@ -27,23 +28,25 @@ int lfs_rambd_createcfg(const struct lfs_config *cfg, | ||||
|     } else { | ||||
|         bd->buffer = lfs_malloc(cfg->block_size * cfg->block_count); | ||||
|         if (!bd->buffer) { | ||||
|             LFS_TRACE("lfs_rambd_createcfg -> %d", LFS_ERR_NOMEM); | ||||
|             LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", LFS_ERR_NOMEM); | ||||
|             return LFS_ERR_NOMEM; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // zero for reproducability? | ||||
|     // zero for reproducibility? | ||||
|     if (bd->cfg->erase_value != -1) { | ||||
|         memset(bd->buffer, bd->cfg->erase_value, | ||||
|                 cfg->block_size * cfg->block_count); | ||||
|     } else { | ||||
|         memset(bd->buffer, 0, cfg->block_size * cfg->block_count); | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_rambd_createcfg -> %d", 0); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_rambd_create(const struct lfs_config *cfg) { | ||||
|     LFS_TRACE("lfs_rambd_create(%p {.context=%p, " | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, " | ||||
|                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, " | ||||
|                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", " | ||||
|                 ".block_size=%"PRIu32", .block_count=%"PRIu32"})", | ||||
| @@ -53,24 +56,25 @@ int lfs_rambd_create(const struct lfs_config *cfg) { | ||||
|             cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count); | ||||
|     static const struct lfs_rambd_config defaults = {.erase_value=-1}; | ||||
|     int err = lfs_rambd_createcfg(cfg, &defaults); | ||||
|     LFS_TRACE("lfs_rambd_create -> %d", err); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_create -> %d", err); | ||||
|     return err; | ||||
| } | ||||
|  | ||||
| int lfs_rambd_destroy(const struct lfs_config *cfg) { | ||||
|     LFS_TRACE("lfs_rambd_destroy(%p)", (void*)cfg); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)cfg); | ||||
|     // clean up memory | ||||
|     lfs_rambd_t *bd = cfg->context; | ||||
|     if (!bd->cfg->buffer) { | ||||
|         lfs_free(bd->buffer); | ||||
|     } | ||||
|     LFS_TRACE("lfs_rambd_destroy -> %d", 0); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_destroy -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block, | ||||
|         lfs_off_t off, void *buffer, lfs_size_t size) { | ||||
|     LFS_TRACE("lfs_rambd_read(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_read(%p, " | ||||
|                 "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|             (void*)cfg, block, off, buffer, size); | ||||
|     lfs_rambd_t *bd = cfg->context; | ||||
|  | ||||
| @@ -82,13 +86,14 @@ int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block, | ||||
|     // read data | ||||
|     memcpy(buffer, &bd->buffer[block*cfg->block_size + off], size); | ||||
|  | ||||
|     LFS_TRACE("lfs_rambd_read -> %d", 0); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_read -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|         lfs_off_t off, const void *buffer, lfs_size_t size) { | ||||
|     LFS_TRACE("lfs_rambd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_prog(%p, " | ||||
|                 "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|             (void*)cfg, block, off, buffer, size); | ||||
|     lfs_rambd_t *bd = cfg->context; | ||||
|  | ||||
| @@ -108,12 +113,12 @@ int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|     // program data | ||||
|     memcpy(&bd->buffer[block*cfg->block_size + off], buffer, size); | ||||
|  | ||||
|     LFS_TRACE("lfs_rambd_prog -> %d", 0); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_prog -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|     LFS_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)cfg, block); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32")", (void*)cfg, block); | ||||
|     lfs_rambd_t *bd = cfg->context; | ||||
|  | ||||
|     // check if erase is valid | ||||
| @@ -125,14 +130,14 @@ int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|                 bd->cfg->erase_value, cfg->block_size); | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_rambd_erase -> %d", 0); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_rambd_sync(const struct lfs_config *cfg) { | ||||
|     LFS_TRACE("lfs_rambd_sync(%p)", (void*)cfg); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg); | ||||
|     // sync does nothing because we aren't backed by anything real | ||||
|     (void)cfg; | ||||
|     LFS_TRACE("lfs_rambd_sync -> %d", 0); | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * Block device emulated in RAM | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -15,6 +16,14 @@ extern "C" | ||||
| { | ||||
| #endif | ||||
|  | ||||
|  | ||||
| // Block device specific tracing | ||||
| #ifdef LFS_RAMBD_YES_TRACE | ||||
| #define LFS_RAMBD_TRACE(...) LFS_TRACE(__VA_ARGS__) | ||||
| #else | ||||
| #define LFS_RAMBD_TRACE(...) | ||||
| #endif | ||||
|  | ||||
| // rambd config (optional) | ||||
| struct lfs_rambd_config { | ||||
|     // 8-bit erase value to simulate erasing with. -1 indicates no erase | ||||
|   | ||||
| @@ -2,6 +2,7 @@ | ||||
|  * Testing block device, wraps filebd and rambd while providing a bunch | ||||
|  * of hooks for testing littlefs in various conditions. | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -12,7 +13,7 @@ | ||||
|  | ||||
| int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path, | ||||
|         const struct lfs_testbd_config *bdcfg) { | ||||
|     LFS_TRACE("lfs_testbd_createcfg(%p {.context=%p, " | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_createcfg(%p {.context=%p, " | ||||
|                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, " | ||||
|                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", " | ||||
|                 ".block_size=%"PRIu32", .block_count=%"PRIu32"}, " | ||||
| @@ -38,23 +39,23 @@ int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path, | ||||
|         if (bd->cfg->wear_buffer) { | ||||
|             bd->wear = bd->cfg->wear_buffer; | ||||
|         } else { | ||||
|             bd->wear = lfs_malloc(sizeof(lfs_testbd_wear_t) * cfg->block_count); | ||||
|             bd->wear = lfs_malloc(sizeof(lfs_testbd_wear_t)*cfg->block_count); | ||||
|             if (!bd->wear) { | ||||
|                 LFS_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM); | ||||
|                 LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", LFS_ERR_NOMEM); | ||||
|                 return LFS_ERR_NOMEM; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         memset(bd->wear, 0, sizeof(lfs_testbd_wear_t) * cfg->block_count); | ||||
|     } | ||||
|      | ||||
|  | ||||
|     // create underlying block device | ||||
|     if (bd->persist) { | ||||
|         bd->u.file.cfg = (struct lfs_filebd_config){ | ||||
|             .erase_value = bd->cfg->erase_value, | ||||
|         }; | ||||
|         int err = lfs_filebd_createcfg(cfg, path, &bd->u.file.cfg); | ||||
|         LFS_TRACE("lfs_testbd_createcfg -> %d", err); | ||||
|         LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err); | ||||
|         return err; | ||||
|     } else { | ||||
|         bd->u.ram.cfg = (struct lfs_rambd_config){ | ||||
| @@ -62,13 +63,13 @@ int lfs_testbd_createcfg(const struct lfs_config *cfg, const char *path, | ||||
|             .buffer = bd->cfg->buffer, | ||||
|         }; | ||||
|         int err = lfs_rambd_createcfg(cfg, &bd->u.ram.cfg); | ||||
|         LFS_TRACE("lfs_testbd_createcfg -> %d", err); | ||||
|         LFS_TESTBD_TRACE("lfs_testbd_createcfg -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
| } | ||||
|  | ||||
| int lfs_testbd_create(const struct lfs_config *cfg, const char *path) { | ||||
|     LFS_TRACE("lfs_testbd_create(%p {.context=%p, " | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_create(%p {.context=%p, " | ||||
|                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, " | ||||
|                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", " | ||||
|                 ".block_size=%"PRIu32", .block_count=%"PRIu32"}, " | ||||
| @@ -80,12 +81,12 @@ int lfs_testbd_create(const struct lfs_config *cfg, const char *path) { | ||||
|             path); | ||||
|     static const struct lfs_testbd_config defaults = {.erase_value=-1}; | ||||
|     int err = lfs_testbd_createcfg(cfg, path, &defaults); | ||||
|     LFS_TRACE("lfs_testbd_create -> %d", err); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_create -> %d", err); | ||||
|     return err; | ||||
| } | ||||
|  | ||||
| int lfs_testbd_destroy(const struct lfs_config *cfg) { | ||||
|     LFS_TRACE("lfs_testbd_destroy(%p)", (void*)cfg); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_destroy(%p)", (void*)cfg); | ||||
|     lfs_testbd_t *bd = cfg->context; | ||||
|     if (bd->cfg->erase_cycles && !bd->cfg->wear_buffer) { | ||||
|         lfs_free(bd->wear); | ||||
| @@ -93,11 +94,11 @@ int lfs_testbd_destroy(const struct lfs_config *cfg) { | ||||
|  | ||||
|     if (bd->persist) { | ||||
|         int err = lfs_filebd_destroy(cfg); | ||||
|         LFS_TRACE("lfs_testbd_destroy -> %d", err); | ||||
|         LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err); | ||||
|         return err; | ||||
|     } else { | ||||
|         int err = lfs_rambd_destroy(cfg); | ||||
|         LFS_TRACE("lfs_testbd_destroy -> %d", err); | ||||
|         LFS_TESTBD_TRACE("lfs_testbd_destroy -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
| } | ||||
| @@ -145,7 +146,8 @@ static int lfs_testbd_rawsync(const struct lfs_config *cfg) { | ||||
| /// block device API /// | ||||
| int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block, | ||||
|         lfs_off_t off, void *buffer, lfs_size_t size) { | ||||
|     LFS_TRACE("lfs_testbd_read(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_read(%p, " | ||||
|                 "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|             (void*)cfg, block, off, buffer, size); | ||||
|     lfs_testbd_t *bd = cfg->context; | ||||
|  | ||||
| @@ -155,22 +157,22 @@ int lfs_testbd_read(const struct lfs_config *cfg, lfs_block_t block, | ||||
|     LFS_ASSERT(block < cfg->block_count); | ||||
|  | ||||
|     // block bad? | ||||
|     if (bd->cfg->erase_cycles && | ||||
|             bd->cfg->badblock_behavior == LFS_TESTBD_BADBLOCK_NOREAD && | ||||
|             bd->wear[block] >= bd->cfg->erase_cycles) { | ||||
|         LFS_TRACE("lfs_testbd_read -> %d", LFS_ERR_CORRUPT); | ||||
|     if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles && | ||||
|             bd->cfg->badblock_behavior == LFS_TESTBD_BADBLOCK_READERROR) { | ||||
|         LFS_TESTBD_TRACE("lfs_testbd_read -> %d", LFS_ERR_CORRUPT); | ||||
|         return LFS_ERR_CORRUPT; | ||||
|     } | ||||
|  | ||||
|     // read | ||||
|     int err = lfs_testbd_rawread(cfg, block, off, buffer, size); | ||||
|     LFS_TRACE("lfs_testbd_read -> %d", err); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_read -> %d", err); | ||||
|     return err; | ||||
| } | ||||
|  | ||||
| int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|         lfs_off_t off, const void *buffer, lfs_size_t size) { | ||||
|     LFS_TRACE("lfs_testbd_prog(%p, 0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_prog(%p, " | ||||
|                 "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", | ||||
|             (void*)cfg, block, off, buffer, size); | ||||
|     lfs_testbd_t *bd = cfg->context; | ||||
|  | ||||
| @@ -180,17 +182,24 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|     LFS_ASSERT(block < cfg->block_count); | ||||
|  | ||||
|     // block bad? | ||||
|     if (bd->cfg->erase_cycles && | ||||
|             bd->cfg->badblock_behavior == LFS_TESTBD_BADBLOCK_NOPROG && | ||||
|             bd->wear[block] >= bd->cfg->erase_cycles) { | ||||
|         LFS_TRACE("lfs_testbd_prog -> %d", LFS_ERR_CORRUPT); | ||||
|         return LFS_ERR_CORRUPT; | ||||
|     if (bd->cfg->erase_cycles && bd->wear[block] >= bd->cfg->erase_cycles) { | ||||
|         if (bd->cfg->badblock_behavior == | ||||
|                 LFS_TESTBD_BADBLOCK_PROGERROR) { | ||||
|             LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", LFS_ERR_CORRUPT); | ||||
|             return LFS_ERR_CORRUPT; | ||||
|         } else if (bd->cfg->badblock_behavior == | ||||
|                 LFS_TESTBD_BADBLOCK_PROGNOOP || | ||||
|                 bd->cfg->badblock_behavior == | ||||
|                 LFS_TESTBD_BADBLOCK_ERASENOOP) { | ||||
|             LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0); | ||||
|             return 0; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // prog | ||||
|     int err = lfs_testbd_rawprog(cfg, block, off, buffer, size); | ||||
|     if (err) { | ||||
|         LFS_TRACE("lfs_testbd_prog -> %d", err); | ||||
|         LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
| @@ -199,18 +208,18 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|         bd->power_cycles -= 1; | ||||
|         if (bd->power_cycles == 0) { | ||||
|             // sync to make sure we persist the last changes | ||||
|             assert(lfs_testbd_rawsync(cfg) == 0); | ||||
|             LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0); | ||||
|             // simulate power loss | ||||
|             exit(33); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_testbd_prog -> %d", 0); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|     LFS_TRACE("lfs_testbd_erase(%p, 0x%"PRIx32")", (void*)cfg, block); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_erase(%p, 0x%"PRIx32")", (void*)cfg, block); | ||||
|     lfs_testbd_t *bd = cfg->context; | ||||
|  | ||||
|     // check if erase is valid | ||||
| @@ -219,9 +228,14 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|     // block bad? | ||||
|     if (bd->cfg->erase_cycles) { | ||||
|         if (bd->wear[block] >= bd->cfg->erase_cycles) { | ||||
|             if (bd->cfg->badblock_behavior == LFS_TESTBD_BADBLOCK_NOERASE) { | ||||
|                 LFS_TRACE("lfs_testbd_erase -> %d", LFS_ERR_CORRUPT); | ||||
|             if (bd->cfg->badblock_behavior == | ||||
|                     LFS_TESTBD_BADBLOCK_ERASEERROR) { | ||||
|                 LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", LFS_ERR_CORRUPT); | ||||
|                 return LFS_ERR_CORRUPT; | ||||
|             } else if (bd->cfg->badblock_behavior == | ||||
|                     LFS_TESTBD_BADBLOCK_ERASENOOP) { | ||||
|                 LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", 0); | ||||
|                 return 0; | ||||
|             } | ||||
|         } else { | ||||
|             // mark wear | ||||
| @@ -232,7 +246,7 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|     // erase | ||||
|     int err = lfs_testbd_rawerase(cfg, block); | ||||
|     if (err) { | ||||
|         LFS_TRACE("lfs_testbd_erase -> %d", err); | ||||
|         LFS_TESTBD_TRACE("lfs_testbd_erase -> %d", err); | ||||
|         return err; | ||||
|     } | ||||
|  | ||||
| @@ -241,20 +255,20 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|         bd->power_cycles -= 1; | ||||
|         if (bd->power_cycles == 0) { | ||||
|             // sync to make sure we persist the last changes | ||||
|             assert(lfs_testbd_rawsync(cfg) == 0); | ||||
|             LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0); | ||||
|             // simulate power loss | ||||
|             exit(33); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     LFS_TRACE("lfs_testbd_prog -> %d", 0); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_prog -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int lfs_testbd_sync(const struct lfs_config *cfg) { | ||||
|     LFS_TRACE("lfs_testbd_sync(%p)", (void*)cfg); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_sync(%p)", (void*)cfg); | ||||
|     int err = lfs_testbd_rawsync(cfg); | ||||
|     LFS_TRACE("lfs_testbd_sync -> %d", err); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_sync -> %d", err); | ||||
|     return err; | ||||
| } | ||||
|  | ||||
| @@ -262,20 +276,20 @@ int lfs_testbd_sync(const struct lfs_config *cfg) { | ||||
| /// simulated wear operations /// | ||||
| lfs_testbd_swear_t lfs_testbd_getwear(const struct lfs_config *cfg, | ||||
|         lfs_block_t block) { | ||||
|     LFS_TRACE("lfs_testbd_getwear(%p, %"PRIu32")", (void*)cfg, block); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_getwear(%p, %"PRIu32")", (void*)cfg, block); | ||||
|     lfs_testbd_t *bd = cfg->context; | ||||
|  | ||||
|     // check if block is valid | ||||
|     LFS_ASSERT(bd->cfg->erase_cycles); | ||||
|     LFS_ASSERT(block < cfg->block_count); | ||||
|  | ||||
|     LFS_TRACE("lfs_testbd_getwear -> %"PRIu32, bd->wear[block]); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_getwear -> %"PRIu32, bd->wear[block]); | ||||
|     return bd->wear[block]; | ||||
| } | ||||
|  | ||||
| int lfs_testbd_setwear(const struct lfs_config *cfg, | ||||
|         lfs_block_t block, lfs_testbd_wear_t wear) { | ||||
|     LFS_TRACE("lfs_testbd_setwear(%p, %"PRIu32")", (void*)cfg, block); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_setwear(%p, %"PRIu32")", (void*)cfg, block); | ||||
|     lfs_testbd_t *bd = cfg->context; | ||||
|  | ||||
|     // check if block is valid | ||||
| @@ -284,6 +298,6 @@ int lfs_testbd_setwear(const struct lfs_config *cfg, | ||||
|  | ||||
|     bd->wear[block] = wear; | ||||
|  | ||||
|     LFS_TRACE("lfs_testbd_setwear -> %d", 0); | ||||
|     LFS_TESTBD_TRACE("lfs_testbd_setwear -> %d", 0); | ||||
|     return 0; | ||||
| } | ||||
|   | ||||
| @@ -2,6 +2,7 @@ | ||||
|  * Testing block device, wraps filebd and rambd while providing a bunch | ||||
|  * of hooks for testing littlefs in various conditions. | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -19,14 +20,25 @@ extern "C" | ||||
| #endif | ||||
|  | ||||
|  | ||||
| // Mode determining how "bad blocks" behave during testing. This | ||||
| // simulates some real-world circumstances such as writes not | ||||
| // going through (noprog), erases not sticking (noerase), and ECC | ||||
| // failures (noread). | ||||
| // Block device specific tracing | ||||
| #ifdef LFS_TESTBD_YES_TRACE | ||||
| #define LFS_TESTBD_TRACE(...) LFS_TRACE(__VA_ARGS__) | ||||
| #else | ||||
| #define LFS_TESTBD_TRACE(...) | ||||
| #endif | ||||
|  | ||||
| // Mode determining how "bad blocks" behave during testing. This simulates | ||||
| // some real-world circumstances such as progs not sticking (prog-noop), | ||||
| // a readonly disk (erase-noop), and ECC failures (read-error). | ||||
| // | ||||
| // Not that read-noop is not allowed. Read _must_ return a consistent (but | ||||
| // may be arbitrary) value on every read. | ||||
| enum lfs_testbd_badblock_behavior { | ||||
|     LFS_TESTBD_BADBLOCK_NOPROG  = 0, | ||||
|     LFS_TESTBD_BADBLOCK_NOERASE = 1, | ||||
|     LFS_TESTBD_BADBLOCK_NOREAD  = 2, | ||||
|     LFS_TESTBD_BADBLOCK_PROGERROR, | ||||
|     LFS_TESTBD_BADBLOCK_ERASEERROR, | ||||
|     LFS_TESTBD_BADBLOCK_READERROR, | ||||
|     LFS_TESTBD_BADBLOCK_PROGNOOP, | ||||
|     LFS_TESTBD_BADBLOCK_ERASENOOP, | ||||
| }; | ||||
|  | ||||
| // Type for measuring wear | ||||
| @@ -82,7 +94,7 @@ typedef struct lfs_testbd { | ||||
| /// Block device API /// | ||||
|  | ||||
| // Create a test block device using the geometry in lfs_config | ||||
| //  | ||||
| // | ||||
| // Note that filebd is used if a path is provided, if path is NULL | ||||
| // testbd will use rambd which can be much faster. | ||||
| int lfs_testbd_create(const struct lfs_config *cfg, const char *path); | ||||
|   | ||||
							
								
								
									
										86
									
								
								lfs.h
									
									
									
									
									
								
							
							
						
						
									
										86
									
								
								lfs.h
									
									
									
									
									
								
							| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * The little filesystem | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -9,6 +10,7 @@ | ||||
|  | ||||
| #include <stdint.h> | ||||
| #include <stdbool.h> | ||||
| #include "lfs_util.h" | ||||
|  | ||||
| #ifdef __cplusplus | ||||
| extern "C" | ||||
| @@ -21,7 +23,7 @@ extern "C" | ||||
| // Software library version | ||||
| // Major (top-nibble), incremented on backwards incompatible changes | ||||
| // Minor (bottom-nibble), incremented on feature additions | ||||
| #define LFS_VERSION 0x00020001 | ||||
| #define LFS_VERSION 0x00020005 | ||||
| #define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16)) | ||||
| #define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >>  0)) | ||||
|  | ||||
| @@ -123,20 +125,25 @@ enum lfs_type { | ||||
| enum lfs_open_flags { | ||||
|     // open flags | ||||
|     LFS_O_RDONLY = 1,         // Open a file as read only | ||||
| #ifndef LFS_READONLY | ||||
|     LFS_O_WRONLY = 2,         // Open a file as write only | ||||
|     LFS_O_RDWR   = 3,         // Open a file as read and write | ||||
|     LFS_O_CREAT  = 0x0100,    // Create a file if it does not exist | ||||
|     LFS_O_EXCL   = 0x0200,    // Fail if a file already exists | ||||
|     LFS_O_TRUNC  = 0x0400,    // Truncate the existing file to zero size | ||||
|     LFS_O_APPEND = 0x0800,    // Move to end of file on every write | ||||
| #endif | ||||
|  | ||||
|     // internally used flags | ||||
| #ifndef LFS_READONLY | ||||
|     LFS_F_DIRTY   = 0x010000, // File does not match storage | ||||
|     LFS_F_WRITING = 0x020000, // File has been written since last flush | ||||
| #endif | ||||
|     LFS_F_READING = 0x040000, // File has been read since last flush | ||||
|     LFS_F_ERRED   = 0x080000, // An error occured during write | ||||
| #ifndef LFS_READONLY | ||||
|     LFS_F_ERRED   = 0x080000, // An error occurred during write | ||||
| #endif | ||||
|     LFS_F_INLINE  = 0x100000, // Currently inlined in directory entry | ||||
|     LFS_F_OPENED  = 0x200000, // File has been opened | ||||
| }; | ||||
|  | ||||
| // File seek flags | ||||
| @@ -153,45 +160,55 @@ struct lfs_config { | ||||
|     // information to the block device operations | ||||
|     void *context; | ||||
|  | ||||
|     // Read a region in a block. Negative error codes are propogated | ||||
|     // Read a region in a block. Negative error codes are propagated | ||||
|     // to the user. | ||||
|     int (*read)(const struct lfs_config *c, lfs_block_t block, | ||||
|             lfs_off_t off, void *buffer, lfs_size_t size); | ||||
|  | ||||
|     // Program a region in a block. The block must have previously | ||||
|     // been erased. Negative error codes are propogated to the user. | ||||
|     // been erased. Negative error codes are propagated to the user. | ||||
|     // May return LFS_ERR_CORRUPT if the block should be considered bad. | ||||
|     int (*prog)(const struct lfs_config *c, lfs_block_t block, | ||||
|             lfs_off_t off, const void *buffer, lfs_size_t size); | ||||
|  | ||||
|     // Erase a block. A block must be erased before being programmed. | ||||
|     // The state of an erased block is undefined. Negative error codes | ||||
|     // are propogated to the user. | ||||
|     // are propagated to the user. | ||||
|     // May return LFS_ERR_CORRUPT if the block should be considered bad. | ||||
|     int (*erase)(const struct lfs_config *c, lfs_block_t block); | ||||
|  | ||||
|     // Sync the state of the underlying block device. Negative error codes | ||||
|     // are propogated to the user. | ||||
|     // are propagated to the user. | ||||
|     int (*sync)(const struct lfs_config *c); | ||||
|  | ||||
|     // Minimum size of a block read. All read operations will be a | ||||
| #ifdef LFS_THREADSAFE | ||||
|     // Lock the underlying block device. Negative error codes | ||||
|     // are propagated to the user. | ||||
|     int (*lock)(const struct lfs_config *c); | ||||
|  | ||||
|     // Unlock the underlying block device. Negative error codes | ||||
|     // are propagated to the user. | ||||
|     int (*unlock)(const struct lfs_config *c); | ||||
| #endif | ||||
|  | ||||
|     // Minimum size of a block read in bytes. All read operations will be a | ||||
|     // multiple of this value. | ||||
|     lfs_size_t read_size; | ||||
|  | ||||
|     // Minimum size of a block program. All program operations will be a | ||||
|     // multiple of this value. | ||||
|     // Minimum size of a block program in bytes. All program operations will be | ||||
|     // a multiple of this value. | ||||
|     lfs_size_t prog_size; | ||||
|  | ||||
|     // Size of an erasable block. This does not impact ram consumption and | ||||
|     // may be larger than the physical erase size. However, non-inlined files | ||||
|     // take up at minimum one block. Must be a multiple of the read | ||||
|     // and program sizes. | ||||
|     // Size of an erasable block in bytes. This does not impact ram consumption | ||||
|     // and may be larger than the physical erase size. However, non-inlined | ||||
|     // files take up at minimum one block. Must be a multiple of the read and | ||||
|     // program sizes. | ||||
|     lfs_size_t block_size; | ||||
|  | ||||
|     // Number of erasable blocks on the device. | ||||
|     lfs_size_t block_count; | ||||
|  | ||||
|     // Number of erase cycles before littlefs evicts metadata logs and moves  | ||||
|     // Number of erase cycles before littlefs evicts metadata logs and moves | ||||
|     // the metadata to another block. Suggested values are in the | ||||
|     // range 100-1000, with large values having better performance at the cost | ||||
|     // of less consistent wear distribution. | ||||
| @@ -199,11 +216,11 @@ struct lfs_config { | ||||
|     // Set to -1 to disable block-level wear-leveling. | ||||
|     int32_t block_cycles; | ||||
|  | ||||
|     // Size of block caches. Each cache buffers a portion of a block in RAM. | ||||
|     // The littlefs needs a read cache, a program cache, and one additional | ||||
|     // Size of block caches in bytes. Each cache buffers a portion of a block in | ||||
|     // RAM. The littlefs needs a read cache, a program cache, and one additional | ||||
|     // cache per file. Larger caches can improve performance by storing more | ||||
|     // data and reducing the number of disk accesses. Must be a multiple of | ||||
|     // the read and program sizes, and a factor of the block size. | ||||
|     // data and reducing the number of disk accesses. Must be a multiple of the | ||||
|     // read and program sizes, and a factor of the block size. | ||||
|     lfs_size_t cache_size; | ||||
|  | ||||
|     // Size of the lookahead buffer in bytes. A larger lookahead buffer | ||||
| @@ -240,6 +257,12 @@ struct lfs_config { | ||||
|     // larger attributes size but must be <= LFS_ATTR_MAX. Defaults to | ||||
|     // LFS_ATTR_MAX when zero. | ||||
|     lfs_size_t attr_max; | ||||
|  | ||||
|     // Optional upper limit on total space given to metadata pairs in bytes. On | ||||
|     // devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB) | ||||
|     // can help bound the metadata compaction time. Must be <= block_size. | ||||
|     // Defaults to block_size when zero. | ||||
|     lfs_size_t metadata_max; | ||||
| }; | ||||
|  | ||||
| // File info structure | ||||
| @@ -399,6 +422,7 @@ typedef struct lfs { | ||||
|  | ||||
| /// Filesystem functions /// | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Format a block device with the littlefs | ||||
| // | ||||
| // Requires a littlefs object and config struct. This clobbers the littlefs | ||||
| @@ -407,6 +431,7 @@ typedef struct lfs { | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_format(lfs_t *lfs, const struct lfs_config *config); | ||||
| #endif | ||||
|  | ||||
| // Mounts a littlefs | ||||
| // | ||||
| @@ -426,12 +451,15 @@ int lfs_unmount(lfs_t *lfs); | ||||
|  | ||||
| /// General operations /// | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Removes a file or directory | ||||
| // | ||||
| // If removing a directory, the directory must be empty. | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_remove(lfs_t *lfs, const char *path); | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Rename or move a file or directory | ||||
| // | ||||
| // If the destination exists, it must match the source in type. | ||||
| @@ -439,6 +467,7 @@ int lfs_remove(lfs_t *lfs, const char *path); | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath); | ||||
| #endif | ||||
|  | ||||
| // Find info about a file or directory | ||||
| // | ||||
| @@ -457,10 +486,11 @@ int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info); | ||||
| // Returns the size of the attribute, or a negative error code on failure. | ||||
| // Note, the returned size is the size of the attribute on disk, irrespective | ||||
| // of the size of the buffer. This can be used to dynamically allocate a buffer | ||||
| // or check for existance. | ||||
| // or check for existence. | ||||
| lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path, | ||||
|         uint8_t type, void *buffer, lfs_size_t size); | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Set custom attributes | ||||
| // | ||||
| // Custom attributes are uniquely identified by an 8-bit type and limited | ||||
| @@ -470,17 +500,21 @@ lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path, | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_setattr(lfs_t *lfs, const char *path, | ||||
|         uint8_t type, const void *buffer, lfs_size_t size); | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Removes a custom attribute | ||||
| // | ||||
| // If an attribute is not found, nothing happens. | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type); | ||||
| #endif | ||||
|  | ||||
|  | ||||
| /// File operations /// | ||||
|  | ||||
| #ifndef LFS_NO_MALLOC | ||||
| // Open a file | ||||
| // | ||||
| // The mode that the file is opened in is determined by the flags, which | ||||
| @@ -490,6 +524,10 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type); | ||||
| int lfs_file_open(lfs_t *lfs, lfs_file_t *file, | ||||
|         const char *path, int flags); | ||||
|  | ||||
| // if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM | ||||
| // thus use lfs_file_opencfg() with config.buffer set. | ||||
| #endif | ||||
|  | ||||
| // Open a file with extra configuration | ||||
| // | ||||
| // The mode that the file is opened in is determined by the flags, which | ||||
| @@ -525,6 +563,7 @@ int lfs_file_sync(lfs_t *lfs, lfs_file_t *file); | ||||
| lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file, | ||||
|         void *buffer, lfs_size_t size); | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Write data to file | ||||
| // | ||||
| // Takes a buffer and size indicating the data to write. The file will not | ||||
| @@ -533,6 +572,7 @@ lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file, | ||||
| // Returns the number of bytes written, or a negative error code on failure. | ||||
| lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, | ||||
|         const void *buffer, lfs_size_t size); | ||||
| #endif | ||||
|  | ||||
| // Change the position of the file | ||||
| // | ||||
| @@ -541,10 +581,12 @@ lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, | ||||
| lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file, | ||||
|         lfs_soff_t off, int whence); | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Truncates the size of the file to the specified size | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size); | ||||
| #endif | ||||
|  | ||||
| // Return the position of the file | ||||
| // | ||||
| @@ -567,10 +609,12 @@ lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file); | ||||
|  | ||||
| /// Directory operations /// | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Create a directory | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_mkdir(lfs_t *lfs, const char *path); | ||||
| #endif | ||||
|  | ||||
| // Open a directory | ||||
| // | ||||
| @@ -632,6 +676,7 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs); | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data); | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| #ifdef LFS_MIGRATE | ||||
| // Attempts to migrate a previous version of littlefs | ||||
| // | ||||
| @@ -646,6 +691,7 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data); | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg); | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
|  | ||||
| #ifdef __cplusplus | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * lfs util functions | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
|   | ||||
							
								
								
									
										41
									
								
								lfs_util.h
									
									
									
									
									
								
							
							
						
						
									
										41
									
								
								lfs_util.h
									
									
									
									
									
								
							| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * lfs utility functions | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -49,40 +50,54 @@ extern "C" | ||||
| // code footprint | ||||
|  | ||||
| // Logging functions | ||||
| #ifndef LFS_TRACE | ||||
| #ifdef LFS_YES_TRACE | ||||
| #define LFS_TRACE(fmt, ...) \ | ||||
|     printf("%s:%d:trace: " fmt "\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| #define LFS_TRACE_(fmt, ...) \ | ||||
|     printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| #define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "") | ||||
| #else | ||||
| #define LFS_TRACE(fmt, ...) | ||||
| #define LFS_TRACE(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_DEBUG | ||||
| #ifndef LFS_NO_DEBUG | ||||
| #define LFS_DEBUG(fmt, ...) \ | ||||
|     printf("%s:%d:debug: " fmt "\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| #define LFS_DEBUG_(fmt, ...) \ | ||||
|     printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| #define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "") | ||||
| #else | ||||
| #define LFS_DEBUG(fmt, ...) | ||||
| #define LFS_DEBUG(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_WARN | ||||
| #ifndef LFS_NO_WARN | ||||
| #define LFS_WARN(fmt, ...) \ | ||||
|     printf("%s:%d:warn: " fmt "\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| #define LFS_WARN_(fmt, ...) \ | ||||
|     printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| #define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "") | ||||
| #else | ||||
| #define LFS_WARN(fmt, ...) | ||||
| #define LFS_WARN(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_ERROR | ||||
| #ifndef LFS_NO_ERROR | ||||
| #define LFS_ERROR(fmt, ...) \ | ||||
|     printf("%s:%d:error: " fmt "\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| #define LFS_ERROR_(fmt, ...) \ | ||||
|     printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| #define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "") | ||||
| #else | ||||
| #define LFS_ERROR(fmt, ...) | ||||
| #define LFS_ERROR(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| // Runtime assertions | ||||
| #ifndef LFS_ASSERT | ||||
| #ifndef LFS_NO_ASSERT | ||||
| #define LFS_ASSERT(test) assert(test) | ||||
| #else | ||||
| #define LFS_ASSERT(test) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
|  | ||||
| // Builtin functions, these may be replaced by more efficient | ||||
| @@ -107,7 +122,7 @@ static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) { | ||||
|     return lfs_aligndown(a + alignment-1, alignment); | ||||
| } | ||||
|  | ||||
| // Find the next smallest power of 2 less than or equal to a | ||||
| // Find the smallest power of 2 greater than or equal to a | ||||
| static inline uint32_t lfs_npw2(uint32_t a) { | ||||
| #if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM)) | ||||
|     return 32 - __builtin_clz(a-1); | ||||
|   | ||||
							
								
								
									
										284
									
								
								scripts/code.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										284
									
								
								scripts/code.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,284 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find code size at the function level. Basically just a bit wrapper | ||||
| # around nm with some extra conveniences for comparing builds. Heavily inspired | ||||
| # by Linux's Bloat-O-Meter. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import subprocess as sp | ||||
| import shlex | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
|  | ||||
|  | ||||
| OBJ_PATHS = ['*.o'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     results = co.defaultdict(lambda: 0) | ||||
|     pattern = re.compile( | ||||
|         '^(?P<size>[0-9a-fA-F]+)' + | ||||
|         ' (?P<type>[%s])' % re.escape(args['type']) + | ||||
|         ' (?P<func>.+?)$') | ||||
|     for path in paths: | ||||
|         # note nm-tool may contain extra args | ||||
|         cmd = args['nm_tool'] + ['--size-sort', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True, | ||||
|             errors='replace') | ||||
|         for line in proc.stdout: | ||||
|             m = pattern.match(line) | ||||
|             if m: | ||||
|                 results[(path, m.group('func'))] += int(m.group('size'), 16) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     flat_results = [] | ||||
|     for (file, func), size in results.items(): | ||||
|         # map to source files | ||||
|         if args.get('build_dir'): | ||||
|             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) | ||||
|         # replace .o with .c, different scripts report .o/.c, we need to | ||||
|         # choose one if we want to deduplicate csv files | ||||
|         file = re.sub('\.o$', '.c', file) | ||||
|         # discard internal functions | ||||
|         if not args.get('everything'): | ||||
|             if func.startswith('__'): | ||||
|                 continue | ||||
|         # discard .8449 suffixes created by optimizer | ||||
|         func = re.sub('\.[0-9]+', '', func) | ||||
|  | ||||
|         flat_results.append((file, func, size)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .o files | ||||
|         paths = [] | ||||
|         for path in args['obj_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.o' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .obj files found in %r?' % args['obj_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['code_size'])) | ||||
|                 for result in r | ||||
|                 if result.get('code_size') not in {None, ''}] | ||||
|  | ||||
|     total = 0 | ||||
|     for _, _, size in results: | ||||
|         total += size | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['code_size'])) | ||||
|                     for result in r | ||||
|                     if result.get('code_size') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total = 0 | ||||
|         for _, _, size in prev_results: | ||||
|             prev_total += size | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         func = result.pop('name', '') | ||||
|                         result.pop('code_size', None) | ||||
|                         merged_results[(file, func)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, func, size in results: | ||||
|             merged_results[(file, func)]['code_size'] = size | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size']) | ||||
|             w.writeheader() | ||||
|             for (file, func), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': func, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: 0) | ||||
|         for file, func, size in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entries[entry] += size | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0)) | ||||
|         for name, new in news.items(): | ||||
|             diff[name] = (0, new, new, 1.0) | ||||
|         for name, old in olds.items(): | ||||
|             _, new, _, _ = diff[name] | ||||
|             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1], x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][1], x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][3], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s' % (by, 'size')) | ||||
|         else: | ||||
|             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, size): | ||||
|         print("%-36s %7d" % (name, size)) | ||||
|  | ||||
|     def print_diff_entry(name, old, new, diff, ratio): | ||||
|         print("%-36s %7s %7s %+7d%s" % (name, | ||||
|             old or "-", | ||||
|             new or "-", | ||||
|             diff, | ||||
|             ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, size in sorted_entries(entries.items()): | ||||
|                 print_entry(name, size) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for old, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, new, _, _ in diff.values() if not new))) | ||||
|             for name, (old, new, diff, ratio) in sorted_diff_entries( | ||||
|                     diff.items()): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print_diff_entry(name, old, new, diff, ratio) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total) | ||||
|         else: | ||||
|             ratio = (0.0 if not prev_total and not total | ||||
|                 else 1.0 if not prev_total | ||||
|                 else (total-prev_total)/prev_total) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total, total, | ||||
|                 total-prev_total, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find code size at the function level.") | ||||
|     parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, | ||||
|         help="Description of where to find *.o files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % OBJ_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't compile and find code sizes, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff code size against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--size-sort', action='store_true', | ||||
|         help="Sort by size.") | ||||
|     parser.add_argument('-S', '--reverse-size-sort', action='store_true', | ||||
|         help="Sort by size, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level code sizes. Note this does not include padding! " | ||||
|             "So sizes may differ from other tools.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total code size.") | ||||
|     parser.add_argument('--type', default='tTrRdD', | ||||
|         help="Type of symbols to report, this uses the same single-character " | ||||
|             "type-names emitted by nm. Defaults to %(default)r.") | ||||
|     parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(), | ||||
|         help="Path to the nm tool to use.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										323
									
								
								scripts/coverage.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										323
									
								
								scripts/coverage.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,323 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Parse and report coverage info from .info files generated by lcov | ||||
| # | ||||
| import os | ||||
| import glob | ||||
| import csv | ||||
| import re | ||||
| import collections as co | ||||
| import bisect as b | ||||
|  | ||||
|  | ||||
| INFO_PATHS = ['tests/*.toml.info'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     file = None | ||||
|     funcs = [] | ||||
|     lines = co.defaultdict(lambda: 0) | ||||
|     pattern = re.compile( | ||||
|         '^(?P<file>SF:/?(?P<file_name>.*))$' | ||||
|         '|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$' | ||||
|         '|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$') | ||||
|     for path in paths: | ||||
|         with open(path) as f: | ||||
|             for line in f: | ||||
|                 m = pattern.match(line) | ||||
|                 if m and m.group('file'): | ||||
|                     file = m.group('file_name') | ||||
|                 elif m and file and m.group('func'): | ||||
|                     funcs.append((file, int(m.group('func_lineno')), | ||||
|                         m.group('func_name'))) | ||||
|                 elif m and file and m.group('line'): | ||||
|                     lines[(file, int(m.group('line_lineno')))] += ( | ||||
|                         int(m.group('line_hits'))) | ||||
|  | ||||
|     # map line numbers to functions | ||||
|     funcs.sort() | ||||
|     def func_from_lineno(file, lineno): | ||||
|         i = b.bisect(funcs, (file, lineno)) | ||||
|         if i and funcs[i-1][0] == file: | ||||
|             return funcs[i-1][2] | ||||
|         else: | ||||
|             return None | ||||
|  | ||||
|     # reduce to function info | ||||
|     reduced_funcs = co.defaultdict(lambda: (0, 0)) | ||||
|     for (file, line_lineno), line_hits in lines.items(): | ||||
|         func = func_from_lineno(file, line_lineno) | ||||
|         if not func: | ||||
|             continue | ||||
|         hits, count = reduced_funcs[(file, func)] | ||||
|         reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1) | ||||
|  | ||||
|     results = [] | ||||
|     for (file, func), (hits, count) in reduced_funcs.items(): | ||||
|         # discard internal/testing functions (test_* injected with | ||||
|         # internal testing) | ||||
|         if not args.get('everything'): | ||||
|             if func.startswith('__') or func.startswith('test_'): | ||||
|                 continue | ||||
|         # discard .8449 suffixes created by optimizer | ||||
|         func = re.sub('\.[0-9]+', '', func) | ||||
|         results.append((file, func, hits, count)) | ||||
|  | ||||
|     return results | ||||
|  | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find coverage | ||||
|     if not args.get('use'): | ||||
|         # find *.info files | ||||
|         paths = [] | ||||
|         for path in args['info_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.gcov' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .info files found in %r?' % args['info_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['coverage_hits']), | ||||
|                     int(result['coverage_count'])) | ||||
|                 for result in r | ||||
|                 if result.get('coverage_hits') not in {None, ''} | ||||
|                 if result.get('coverage_count') not in {None, ''}] | ||||
|  | ||||
|     total_hits, total_count = 0, 0 | ||||
|     for _, _, hits, count in results: | ||||
|         total_hits += hits | ||||
|         total_count += count | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['coverage_hits']), | ||||
|                         int(result['coverage_count'])) | ||||
|                     for result in r | ||||
|                     if result.get('coverage_hits') not in {None, ''} | ||||
|                     if result.get('coverage_count') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total_hits, prev_total_count = 0, 0 | ||||
|         for _, _, hits, count in prev_results: | ||||
|             prev_total_hits += hits | ||||
|             prev_total_count += count | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         func = result.pop('name', '') | ||||
|                         result.pop('coverage_hits', None) | ||||
|                         result.pop('coverage_count', None) | ||||
|                         merged_results[(file, func)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, func, hits, count in results: | ||||
|             merged_results[(file, func)]['coverage_hits'] = hits | ||||
|             merged_results[(file, func)]['coverage_count'] = count | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count']) | ||||
|             w.writeheader() | ||||
|             for (file, func), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': func, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: (0, 0)) | ||||
|         for file, func, hits, count in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entry_hits, entry_count = entries[entry] | ||||
|             entries[entry] = (entry_hits + hits, entry_count + count) | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0)) | ||||
|         for name, (new_hits, new_count) in news.items(): | ||||
|             diff[name] = ( | ||||
|                 0, 0, | ||||
|                 new_hits, new_count, | ||||
|                 new_hits, new_count, | ||||
|                 (new_hits/new_count if new_count else 1.0) - 1.0) | ||||
|         for name, (old_hits, old_count) in olds.items(): | ||||
|             _, _, new_hits, new_count, _, _, _ = diff[name] | ||||
|             diff[name] = ( | ||||
|                 old_hits, old_count, | ||||
|                 new_hits, new_count, | ||||
|                 new_hits-old_hits, new_count-old_count, | ||||
|                 ((new_hits/new_count if new_count else 1.0) | ||||
|                     - (old_hits/old_count if old_count else 1.0))) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('coverage_sort'): | ||||
|             return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x)) | ||||
|         elif args.get('reverse_coverage_sort'): | ||||
|             return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('coverage_sort'): | ||||
|             return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x)) | ||||
|         elif args.get('reverse_coverage_sort'): | ||||
|             return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][6], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %19s' % (by, 'hits/line')) | ||||
|         else: | ||||
|             print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, hits, count): | ||||
|         print("%-36s %11s %7s" % (name, | ||||
|             '%d/%d' % (hits, count) | ||||
|                 if count else '-', | ||||
|             '%.1f%%' % (100*hits/count) | ||||
|                 if count else '-')) | ||||
|  | ||||
|     def print_diff_entry(name, | ||||
|             old_hits, old_count, | ||||
|             new_hits, new_count, | ||||
|             diff_hits, diff_count, | ||||
|             ratio): | ||||
|         print("%-36s %11s %7s %11s %7s %11s%s" % (name, | ||||
|             '%d/%d' % (old_hits, old_count) | ||||
|                 if old_count else '-', | ||||
|             '%.1f%%' % (100*old_hits/old_count) | ||||
|                 if old_count else '-', | ||||
|             '%d/%d' % (new_hits, new_count) | ||||
|                 if new_count else '-', | ||||
|             '%.1f%%' % (100*new_hits/new_count) | ||||
|                 if new_count else '-', | ||||
|             '%+d/%+d' % (diff_hits, diff_count), | ||||
|             ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, (hits, count) in sorted_entries(entries.items()): | ||||
|                 print_entry(name, hits, count) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for _, old, _, _, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, _, _, new, _, _, _ in diff.values() if not new))) | ||||
|             for name, ( | ||||
|                     old_hits, old_count, | ||||
|                     new_hits, new_count, | ||||
|                     diff_hits, diff_count, ratio) in sorted_diff_entries( | ||||
|                         diff.items()): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print_diff_entry(name, | ||||
|                         old_hits, old_count, | ||||
|                         new_hits, new_count, | ||||
|                         diff_hits, diff_count, | ||||
|                         ratio) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total_hits, total_count) | ||||
|         else: | ||||
|             ratio = ((total_hits/total_count | ||||
|                     if total_count else 1.0) | ||||
|                 - (prev_total_hits/prev_total_count | ||||
|                     if prev_total_count else 1.0)) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total_hits, prev_total_count, | ||||
|                 total_hits, total_count, | ||||
|                 total_hits-prev_total_hits, total_count-prev_total_count, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Parse and report coverage info from .info files \ | ||||
|             generated by lcov") | ||||
|     parser.add_argument('info_paths', nargs='*', default=INFO_PATHS, | ||||
|         help="Description of where to find *.info files. May be a directory \ | ||||
|             or list of paths. *.info files will be merged to show the total \ | ||||
|             coverage. Defaults to %r." % INFO_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't do any work, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff code size against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--coverage-sort', action='store_true', | ||||
|         help="Sort by coverage.") | ||||
|     parser.add_argument('-S', '--reverse-coverage-sort', action='store_true', | ||||
|         help="Sort by coverage, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level coverage.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total coverage.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										283
									
								
								scripts/data.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										283
									
								
								scripts/data.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,283 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find data size at the function level. Basically just a bit wrapper | ||||
| # around nm with some extra conveniences for comparing builds. Heavily inspired | ||||
| # by Linux's Bloat-O-Meter. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import subprocess as sp | ||||
| import shlex | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
|  | ||||
|  | ||||
| OBJ_PATHS = ['*.o'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     results = co.defaultdict(lambda: 0) | ||||
|     pattern = re.compile( | ||||
|         '^(?P<size>[0-9a-fA-F]+)' + | ||||
|         ' (?P<type>[%s])' % re.escape(args['type']) + | ||||
|         ' (?P<func>.+?)$') | ||||
|     for path in paths: | ||||
|         # note nm-tool may contain extra args | ||||
|         cmd = args['nm_tool'] + ['--size-sort', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True, | ||||
|             errors='replace') | ||||
|         for line in proc.stdout: | ||||
|             m = pattern.match(line) | ||||
|             if m: | ||||
|                 results[(path, m.group('func'))] += int(m.group('size'), 16) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     flat_results = [] | ||||
|     for (file, func), size in results.items(): | ||||
|         # map to source files | ||||
|         if args.get('build_dir'): | ||||
|             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) | ||||
|         # replace .o with .c, different scripts report .o/.c, we need to | ||||
|         # choose one if we want to deduplicate csv files | ||||
|         file = re.sub('\.o$', '.c', file) | ||||
|         # discard internal functions | ||||
|         if not args.get('everything'): | ||||
|             if func.startswith('__'): | ||||
|                 continue | ||||
|         # discard .8449 suffixes created by optimizer | ||||
|         func = re.sub('\.[0-9]+', '', func) | ||||
|         flat_results.append((file, func, size)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .o files | ||||
|         paths = [] | ||||
|         for path in args['obj_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.o' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .obj files found in %r?' % args['obj_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['data_size'])) | ||||
|                 for result in r | ||||
|                 if result.get('data_size') not in {None, ''}] | ||||
|  | ||||
|     total = 0 | ||||
|     for _, _, size in results: | ||||
|         total += size | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['data_size'])) | ||||
|                     for result in r | ||||
|                     if result.get('data_size') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total = 0 | ||||
|         for _, _, size in prev_results: | ||||
|             prev_total += size | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         func = result.pop('name', '') | ||||
|                         result.pop('data_size', None) | ||||
|                         merged_results[(file, func)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, func, size in results: | ||||
|             merged_results[(file, func)]['data_size'] = size | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size']) | ||||
|             w.writeheader() | ||||
|             for (file, func), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': func, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: 0) | ||||
|         for file, func, size in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entries[entry] += size | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0)) | ||||
|         for name, new in news.items(): | ||||
|             diff[name] = (0, new, new, 1.0) | ||||
|         for name, old in olds.items(): | ||||
|             _, new, _, _ = diff[name] | ||||
|             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1], x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][1], x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][3], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s' % (by, 'size')) | ||||
|         else: | ||||
|             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, size): | ||||
|         print("%-36s %7d" % (name, size)) | ||||
|  | ||||
|     def print_diff_entry(name, old, new, diff, ratio): | ||||
|         print("%-36s %7s %7s %+7d%s" % (name, | ||||
|             old or "-", | ||||
|             new or "-", | ||||
|             diff, | ||||
|             ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, size in sorted_entries(entries.items()): | ||||
|                 print_entry(name, size) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for old, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, new, _, _ in diff.values() if not new))) | ||||
|             for name, (old, new, diff, ratio) in sorted_diff_entries( | ||||
|                     diff.items()): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print_diff_entry(name, old, new, diff, ratio) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total) | ||||
|         else: | ||||
|             ratio = (0.0 if not prev_total and not total | ||||
|                 else 1.0 if not prev_total | ||||
|                 else (total-prev_total)/prev_total) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total, total, | ||||
|                 total-prev_total, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find data size at the function level.") | ||||
|     parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, | ||||
|         help="Description of where to find *.o files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % OBJ_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't compile and find data sizes, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff data size against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--size-sort', action='store_true', | ||||
|         help="Sort by size.") | ||||
|     parser.add_argument('-S', '--reverse-size-sort', action='store_true', | ||||
|         help="Sort by size, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level data sizes. Note this does not include padding! " | ||||
|             "So sizes may differ from other tools.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total data size.") | ||||
|     parser.add_argument('--type', default='dDbB', | ||||
|         help="Type of symbols to report, this uses the same single-character " | ||||
|             "type-names emitted by nm. Defaults to %(default)r.") | ||||
|     parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(), | ||||
|         help="Path to the nm tool to use.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
| @@ -166,8 +166,8 @@ def mkassert(type, comp, lh, rh, size=None): | ||||
|         'type': type.lower(), 'TYPE': type.upper(), | ||||
|         'comp': comp.lower(), 'COMP': comp.upper(), | ||||
|         'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(), | ||||
|         'lh': lh.strip(), | ||||
|         'rh': rh.strip(), | ||||
|         'lh': lh.strip(' '), | ||||
|         'rh': rh.strip(' '), | ||||
|         'size': size, | ||||
|     } | ||||
|     if size: | ||||
|   | ||||
| @@ -2,6 +2,7 @@ | ||||
|  | ||||
| import struct | ||||
| import binascii | ||||
| import sys | ||||
| import itertools as it | ||||
|  | ||||
| TAG_TYPES = { | ||||
| @@ -232,8 +233,8 @@ class MetadataPair: | ||||
|  | ||||
|     def __lt__(self, other): | ||||
|         # corrupt blocks don't count | ||||
|         if not self and other: | ||||
|             return True | ||||
|         if not self or not other: | ||||
|             return bool(other) | ||||
|  | ||||
|         # use sequence arithmetic to avoid overflow | ||||
|         return not ((other.rev - self.rev) & 0x80000000) | ||||
| @@ -271,37 +272,39 @@ class MetadataPair: | ||||
|  | ||||
|         raise KeyError(gmask, gtag) | ||||
|  | ||||
|     def _dump_tags(self, tags, truncate=True): | ||||
|         sys.stdout.write("%-8s  %-8s  %-13s %4s %4s  %s\n" % ( | ||||
|             'off', 'tag', 'type', 'id', 'len', | ||||
|             'data (truncated)' if truncate else 12*' '+'data')) | ||||
|     def _dump_tags(self, tags, f=sys.stdout, truncate=True): | ||||
|         f.write("%-8s  %-8s  %-13s %4s %4s" % ( | ||||
|             'off', 'tag', 'type', 'id', 'len')) | ||||
|         if truncate: | ||||
|             f.write('  data (truncated)') | ||||
|         f.write('\n') | ||||
|  | ||||
|         for tag in tags: | ||||
|             sys.stdout.write("%08x: %08x  %-13s %4s %4s" % ( | ||||
|             f.write("%08x: %08x  %-13s %4s %4s" % ( | ||||
|                 tag.off, tag, | ||||
|                 tag.typerepr(), tag.idrepr(), tag.sizerepr())) | ||||
|             if truncate: | ||||
|                 sys.stdout.write("  %-23s  %-8s\n" % ( | ||||
|                 f.write("  %-23s  %-8s\n" % ( | ||||
|                     ' '.join('%02x' % c for c in tag.data[:8]), | ||||
|                     ''.join(c if c >= ' ' and c <= '~' else '.' | ||||
|                         for c in map(chr, tag.data[:8])))) | ||||
|             else: | ||||
|                 sys.stdout.write("\n") | ||||
|                 f.write("\n") | ||||
|                 for i in range(0, len(tag.data), 16): | ||||
|                     sys.stdout.write("%08x: %-47s  %-16s\n" % ( | ||||
|                     f.write("  %08x: %-47s  %-16s\n" % ( | ||||
|                         tag.off+i, | ||||
|                         ' '.join('%02x' % c for c in tag.data[i:i+16]), | ||||
|                         ''.join(c if c >= ' ' and c <= '~' else '.' | ||||
|                             for c in map(chr, tag.data[i:i+16])))) | ||||
|  | ||||
|     def dump_tags(self, truncate=True): | ||||
|         self._dump_tags(self.tags, truncate=truncate) | ||||
|     def dump_tags(self, f=sys.stdout, truncate=True): | ||||
|         self._dump_tags(self.tags, f=f, truncate=truncate) | ||||
|  | ||||
|     def dump_log(self, truncate=True): | ||||
|         self._dump_tags(self.log, truncate=truncate) | ||||
|     def dump_log(self, f=sys.stdout, truncate=True): | ||||
|         self._dump_tags(self.log, f=f, truncate=truncate) | ||||
|  | ||||
|     def dump_all(self, truncate=True): | ||||
|         self._dump_tags(self.all_, truncate=truncate) | ||||
|     def dump_all(self, f=sys.stdout, truncate=True): | ||||
|         self._dump_tags(self.all_, f=f, truncate=truncate) | ||||
|  | ||||
| def main(args): | ||||
|     blocks = [] | ||||
| @@ -315,6 +318,24 @@ def main(args): | ||||
|  | ||||
|     # find most recent pair | ||||
|     mdir = MetadataPair(blocks) | ||||
|  | ||||
|     try: | ||||
|         mdir.tail = mdir[Tag('tail', 0, 0)] | ||||
|         if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff': | ||||
|             mdir.tail = None | ||||
|     except KeyError: | ||||
|         mdir.tail = None | ||||
|  | ||||
|     print("mdir {%s} rev %d%s%s%s" % ( | ||||
|         ', '.join('%#x' % b | ||||
|             for b in [args.block1, args.block2] | ||||
|             if b is not None), | ||||
|         mdir.rev, | ||||
|         ' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:]) | ||||
|         if len(mdir.pair) > 1 else '', | ||||
|         ' (corrupted!)' if not mdir else '', | ||||
|         ' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data) | ||||
|         if mdir.tail else '')) | ||||
|     if args.all: | ||||
|         mdir.dump_all(truncate=not args.no_truncate) | ||||
|     elif args.log: | ||||
| @@ -337,10 +358,10 @@ if __name__ == "__main__": | ||||
|         help="First block address for finding the metadata pair.") | ||||
|     parser.add_argument('block2', nargs='?', type=lambda x: int(x, 0), | ||||
|         help="Second block address for finding the metadata pair.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all tags in log, included tags in corrupted commits.") | ||||
|     parser.add_argument('-l', '--log', action='store_true', | ||||
|         help="Show tags in log.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all tags in log, included tags in corrupted commits.") | ||||
|     parser.add_argument('-T', '--no-truncate', action='store_true', | ||||
|         help="Don't truncate large amounts of data in tags.") | ||||
|         help="Don't truncate large amounts of data.") | ||||
|     sys.exit(main(parser.parse_args())) | ||||
|   | ||||
| @@ -7,120 +7,24 @@ import io | ||||
| import itertools as it | ||||
| from readmdir import Tag, MetadataPair | ||||
|  | ||||
| def popc(x): | ||||
|     return bin(x).count('1') | ||||
|  | ||||
| def ctz(x): | ||||
|     return len(bin(x)) - len(bin(x).rstrip('0')) | ||||
|  | ||||
| def dumptags(args, mdir, f): | ||||
|     if args.all: | ||||
|         tags = mdir.all_ | ||||
|     elif args.log: | ||||
|         tags = mdir.log | ||||
|     else: | ||||
|         tags = mdir.tags | ||||
|  | ||||
|     for k, tag in enumerate(tags): | ||||
|         f.write("tag %08x %s" % (tag, tag.typerepr())) | ||||
|         if tag.id != 0x3ff: | ||||
|             f.write(" id %d" % tag.id) | ||||
|         if tag.size != 0x3ff: | ||||
|             f.write(" size %d" % tag.size) | ||||
|         if tag.is_('name'): | ||||
|             f.write(" name %s" % | ||||
|                 json.dumps(tag.data.decode('utf8'))) | ||||
|         if tag.is_('dirstruct'): | ||||
|             f.write(" dir {%#x, %#x}" % struct.unpack( | ||||
|                 '<II', tag.data[:8].ljust(8, b'\xff'))) | ||||
|         if tag.is_('ctzstruct'): | ||||
|             f.write(" ctz {%#x} size %d" % struct.unpack( | ||||
|                 '<II', tag.data[:8].ljust(8, b'\xff'))) | ||||
|         if tag.is_('inlinestruct'): | ||||
|             f.write(" inline size %d" % tag.size) | ||||
|         if tag.is_('gstate'): | ||||
|             f.write(" 0x%s" % ''.join('%02x' % c for c in tag.data)) | ||||
|         if tag.is_('tail'): | ||||
|             f.write(" tail {%#x, %#x}" % struct.unpack( | ||||
|                 '<II', tag.data[:8].ljust(8, b'\xff'))) | ||||
|         f.write("\n") | ||||
|  | ||||
|         if args.data: | ||||
|             for i in range(0, len(tag.data), 16): | ||||
|                 f.write("  %-47s  %-16s\n" % ( | ||||
|                     ' '.join('%02x' % c for c in tag.data[i:i+16]), | ||||
|                     ''.join(c if c >= ' ' and c <= '~' else '.' | ||||
|                         for c in map(chr, tag.data[i:i+16])))) | ||||
|  | ||||
| def dumpentries(args, mdir, f): | ||||
|     for k, id_ in enumerate(mdir.ids): | ||||
|         name = mdir[Tag('name', id_, 0)] | ||||
|         struct_ = mdir[Tag('struct', id_, 0)] | ||||
|  | ||||
|         f.write("id %d %s %s" % ( | ||||
|             id_, name.typerepr(), | ||||
|             json.dumps(name.data.decode('utf8')))) | ||||
|         if struct_.is_('dirstruct'): | ||||
|             f.write(" dir {%#x, %#x}" % struct.unpack( | ||||
|                 '<II', struct_.data[:8].ljust(8, b'\xff'))) | ||||
|         if struct_.is_('ctzstruct'): | ||||
|             f.write(" ctz {%#x} size %d" % struct.unpack( | ||||
|                 '<II', struct_.data[:8].ljust(8, b'\xff'))) | ||||
|         if struct_.is_('inlinestruct'): | ||||
|             f.write(" inline size %d" % struct_.size) | ||||
|         f.write("\n") | ||||
|  | ||||
|         if args.data and struct_.is_('inlinestruct'): | ||||
|             for i in range(0, len(struct_.data), 16): | ||||
|                 f.write("  %-47s  %-16s\n" % ( | ||||
|                     ' '.join('%02x' % c for c in struct_.data[i:i+16]), | ||||
|                     ''.join(c if c >= ' ' and c <= '~' else '.' | ||||
|                         for c in map(chr, struct_.data[i:i+16])))) | ||||
|         elif args.data and struct_.is_('ctzstruct'): | ||||
|             block, size = struct.unpack( | ||||
|                 '<II', struct_.data[:8].ljust(8, b'\xff')) | ||||
|             data = [] | ||||
|             i = 0 if size == 0 else (size-1) // (args.block_size - 8) | ||||
|             if i != 0: | ||||
|                 i = ((size-1) - 4*popc(i-1)+2) // (args.block_size - 8) | ||||
|             with open(args.disk, 'rb') as f2: | ||||
|                 while i >= 0: | ||||
|                     f2.seek(block * args.block_size) | ||||
|                     dat = f2.read(args.block_size) | ||||
|                     data.append(dat[4*(ctz(i)+1) if i != 0 else 0:]) | ||||
|                     block, = struct.unpack('<I', dat[:4].ljust(4, b'\xff')) | ||||
|                     i -= 1 | ||||
|  | ||||
|             data = bytes(it.islice( | ||||
|                 it.chain.from_iterable(reversed(data)), size)) | ||||
|             for i in range(0, min(len(data), 256) | ||||
|                     if not args.no_truncate else len(data), 16): | ||||
|                 f.write("  %-47s  %-16s\n" % ( | ||||
|                     ' '.join('%02x' % c for c in data[i:i+16]), | ||||
|                     ''.join(c if c >= ' ' and c <= '~' else '.' | ||||
|                         for c in map(chr, data[i:i+16])))) | ||||
|  | ||||
|         for tag in mdir.tags: | ||||
|             if tag.id==id_ and tag.is_('userattr'): | ||||
|                 f.write("id %d %s size %d\n" % ( | ||||
|                     id_, tag.typerepr(), tag.size)) | ||||
|  | ||||
|                 if args.data: | ||||
|                     for i in range(0, len(tag.data), 16): | ||||
|                         f.write("  %-47s  %-16s\n" % ( | ||||
|                             ' '.join('%02x' % c for c in tag.data[i:i+16]), | ||||
|                             ''.join(c if c >= ' ' and c <= '~' else '.' | ||||
|                                 for c in map(chr, tag.data[i:i+16])))) | ||||
|  | ||||
| def main(args): | ||||
|     superblock = None | ||||
|     gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0' | ||||
|     dirs = [] | ||||
|     mdirs = [] | ||||
|     corrupted = [] | ||||
|     cycle = False | ||||
|     with open(args.disk, 'rb') as f: | ||||
|         dirs = [] | ||||
|         superblock = None | ||||
|         gstate = b'' | ||||
|         mdirs = [] | ||||
|         tail = (args.block1, args.block2) | ||||
|         hard = False | ||||
|         while True: | ||||
|             for m in it.chain((m for d in dirs for m in d), mdirs): | ||||
|                 if set(m.blocks) == set(tail): | ||||
|                     # cycle detected | ||||
|                     cycle = m.blocks | ||||
|             if cycle: | ||||
|                 break | ||||
|  | ||||
|             # load mdir | ||||
|             data = [] | ||||
|             blocks = {} | ||||
| @@ -129,6 +33,7 @@ def main(args): | ||||
|                 data.append(f.read(args.block_size) | ||||
|                     .ljust(args.block_size, b'\xff')) | ||||
|                 blocks[id(data[-1])] = block | ||||
|  | ||||
|             mdir = MetadataPair(data) | ||||
|             mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair) | ||||
|  | ||||
| @@ -156,6 +61,10 @@ def main(args): | ||||
|             except KeyError: | ||||
|                 pass | ||||
|  | ||||
|             # corrupted? | ||||
|             if not mdir: | ||||
|                 corrupted.append(mdir) | ||||
|  | ||||
|             # add to directories | ||||
|             mdirs.append(mdir) | ||||
|             if mdir.tail is None or not mdir.tail.is_('hardtail'): | ||||
| @@ -171,7 +80,7 @@ def main(args): | ||||
|     # find paths | ||||
|     dirtable = {} | ||||
|     for dir in dirs: | ||||
|         dirtable[tuple(sorted(dir[0].blocks))] = dir | ||||
|         dirtable[frozenset(dir[0].blocks)] = dir | ||||
|  | ||||
|     pending = [("/", dirs[0])] | ||||
|     while pending: | ||||
| @@ -183,67 +92,72 @@ def main(args): | ||||
|                         npath = tag.data.decode('utf8') | ||||
|                         dirstruct = mdir[Tag('dirstruct', tag.id, 0)] | ||||
|                         nblocks = struct.unpack('<II', dirstruct.data) | ||||
|                         nmdir = dirtable[tuple(sorted(nblocks))] | ||||
|                         nmdir = dirtable[frozenset(nblocks)] | ||||
|                         pending.append(((path + '/' + npath), nmdir)) | ||||
|                     except KeyError: | ||||
|                         pass | ||||
|  | ||||
|         dir[0].path = path.replace('//', '/') | ||||
|  | ||||
|     # dump tree | ||||
|     if not args.superblock and not args.gstate and not args.mdirs: | ||||
|         args.superblock = True | ||||
|         args.gstate = True | ||||
|         args.mdirs = True | ||||
|     # print littlefs + version info | ||||
|     version = ('?', '?') | ||||
|     if superblock: | ||||
|         version = tuple(reversed( | ||||
|             struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff')))) | ||||
|     print("%-47s%s" % ("littlefs v%s.%s" % version, | ||||
|         "data (truncated, if it fits)" | ||||
|         if not any([args.no_truncate, args.log, args.all]) else "")) | ||||
|  | ||||
|     if args.superblock and superblock: | ||||
|         print("superblock %s v%d.%d" % ( | ||||
|             json.dumps(superblock[0].data.decode('utf8')), | ||||
|             struct.unpack('<H', superblock[1].data[2:2+2])[0], | ||||
|             struct.unpack('<H', superblock[1].data[0:0+2])[0])) | ||||
|         print( | ||||
|             "  block_size %d\n" | ||||
|             "  block_count %d\n" | ||||
|             "  name_max %d\n" | ||||
|             "  file_max %d\n" | ||||
|             "  attr_max %d" % struct.unpack( | ||||
|                 '<IIIII', superblock[1].data[4:4+20].ljust(20, b'\xff'))) | ||||
|     # print gstate | ||||
|     print("gstate 0x%s" % ''.join('%02x' % c for c in gstate)) | ||||
|     tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0]) | ||||
|     blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff')) | ||||
|     if tag.size or not tag.isvalid: | ||||
|         print("  orphans >=%d" % max(tag.size, 1)) | ||||
|     if tag.type: | ||||
|         print("  move dir {%#x, %#x} id %d" % ( | ||||
|             blocks[0], blocks[1], tag.id)) | ||||
|  | ||||
|     if args.gstate and gstate: | ||||
|         print("gstate 0x%s" % ''.join('%02x' % c for c in gstate)) | ||||
|         tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0]) | ||||
|         blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff')) | ||||
|         if tag.size: | ||||
|             print("  orphans %d" % tag.size) | ||||
|         if tag.type: | ||||
|             print("  move dir {%#x, %#x} id %d" % ( | ||||
|                 blocks[0], blocks[1], tag.id)) | ||||
|     # print mdir info | ||||
|     for i, dir in enumerate(dirs): | ||||
|         print("dir %s" % (json.dumps(dir[0].path) | ||||
|             if hasattr(dir[0], 'path') else '(orphan)')) | ||||
|  | ||||
|     if args.mdirs: | ||||
|         for i, dir in enumerate(dirs): | ||||
|             print("dir %s" % (json.dumps(dir[0].path) | ||||
|                 if hasattr(dir[0], 'path') else '(orphan)')) | ||||
|         for j, mdir in enumerate(dir): | ||||
|             print("mdir {%#x, %#x} rev %d (was %d)%s%s" % ( | ||||
|                 mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev, | ||||
|                 ' (corrupted!)' if not mdir else '', | ||||
|                 ' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data) | ||||
|                 if mdir.tail else '')) | ||||
|  | ||||
|             for j, mdir in enumerate(dir): | ||||
|                 print("mdir {%#x, %#x} rev %d%s" % ( | ||||
|                     mdir.blocks[0], mdir.blocks[1], mdir.rev, | ||||
|                     ' (corrupted)' if not mdir else '')) | ||||
|             f = io.StringIO() | ||||
|             if args.log: | ||||
|                 mdir.dump_log(f, truncate=not args.no_truncate) | ||||
|             elif args.all: | ||||
|                 mdir.dump_all(f, truncate=not args.no_truncate) | ||||
|             else: | ||||
|                 mdir.dump_tags(f, truncate=not args.no_truncate) | ||||
|  | ||||
|                 f = io.StringIO() | ||||
|                 if args.tags or args.all or args.log: | ||||
|                     dumptags(args, mdir, f) | ||||
|                 else: | ||||
|                     dumpentries(args, mdir, f) | ||||
|             lines = list(filter(None, f.getvalue().split('\n'))) | ||||
|             for k, line in enumerate(lines): | ||||
|                 print("%s %s" % ( | ||||
|                     ' ' if j == len(dir)-1 else | ||||
|                     'v' if k == len(lines)-1 else | ||||
|                     '|', | ||||
|                     line)) | ||||
|  | ||||
|                 lines = list(filter(None, f.getvalue().split('\n'))) | ||||
|                 for k, line in enumerate(lines): | ||||
|                     print("%s %s" % ( | ||||
|                         ' ' if j == len(dir)-1 else | ||||
|                         'v' if k == len(lines)-1 else | ||||
|                         '|', | ||||
|                         line)) | ||||
|     errcode = 0 | ||||
|     for mdir in corrupted: | ||||
|         errcode = errcode or 1 | ||||
|         print("*** corrupted mdir {%#x, %#x}! ***" % ( | ||||
|             mdir.blocks[0], mdir.blocks[1])) | ||||
|  | ||||
|     return 0 if all(mdir for dir in dirs for mdir in dir) else 1 | ||||
|     if cycle: | ||||
|         errcode = errcode or 2 | ||||
|         print("*** cycle detected {%#x, %#x}! ***" % ( | ||||
|             cycle[0], cycle[1])) | ||||
|  | ||||
|     return errcode | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
| @@ -256,24 +170,14 @@ if __name__ == "__main__": | ||||
|         help="Size of a block in bytes.") | ||||
|     parser.add_argument('block1', nargs='?', default=0, | ||||
|         type=lambda x: int(x, 0), | ||||
|         help="Optional first block address for finding the root.") | ||||
|         help="Optional first block address for finding the superblock.") | ||||
|     parser.add_argument('block2', nargs='?', default=1, | ||||
|         type=lambda x: int(x, 0), | ||||
|         help="Optional second block address for finding the root.") | ||||
|     parser.add_argument('-s', '--superblock', action='store_true', | ||||
|         help="Show contents of the superblock.") | ||||
|     parser.add_argument('-g', '--gstate', action='store_true', | ||||
|         help="Show contents of global-state.") | ||||
|     parser.add_argument('-m', '--mdirs', action='store_true', | ||||
|         help="Show contents of metadata-pairs/directories.") | ||||
|     parser.add_argument('-t', '--tags', action='store_true', | ||||
|         help="Show metadata tags instead of reconstructing entries.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all tags in log, included tags in corrupted commits.") | ||||
|         help="Optional second block address for finding the superblock.") | ||||
|     parser.add_argument('-l', '--log', action='store_true', | ||||
|         help="Show tags in log.") | ||||
|     parser.add_argument('-d', '--data', action='store_true', | ||||
|         help="Also show the raw contents of files/attrs/tags.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all tags in log, included tags in corrupted commits.") | ||||
|     parser.add_argument('-T', '--no-truncate', action='store_true', | ||||
|         help="Don't truncate large amounts of data in files.") | ||||
|         help="Show the full contents of files/attrs/tags.") | ||||
|     sys.exit(main(parser.parse_args())) | ||||
|   | ||||
							
								
								
									
										430
									
								
								scripts/stack.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										430
									
								
								scripts/stack.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,430 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find stack usage at the function level. Will detect recursion and | ||||
| # report as infinite stack usage. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
| import math as m | ||||
|  | ||||
|  | ||||
| CI_PATHS = ['*.ci'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     # parse the vcg format | ||||
|     k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL) | ||||
|     v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL) | ||||
|     def parse_vcg(rest): | ||||
|         def parse_vcg(rest): | ||||
|             node = [] | ||||
|             while True: | ||||
|                 rest = rest.lstrip() | ||||
|                 m = k_pattern.match(rest) | ||||
|                 if not m: | ||||
|                     return (node, rest) | ||||
|                 k, rest = m.group(1), rest[m.end(0):] | ||||
|  | ||||
|                 rest = rest.lstrip() | ||||
|                 if rest.startswith('{'): | ||||
|                     v, rest = parse_vcg(rest[1:]) | ||||
|                     assert rest[0] == '}', "unexpected %r" % rest[0:1] | ||||
|                     rest = rest[1:] | ||||
|                     node.append((k, v)) | ||||
|                 else: | ||||
|                     m = v_pattern.match(rest) | ||||
|                     assert m, "unexpected %r" % rest[0:1] | ||||
|                     v, rest = m.group(1) or m.group(2), rest[m.end(0):] | ||||
|                     node.append((k, v)) | ||||
|  | ||||
|         node, rest = parse_vcg(rest) | ||||
|         assert rest == '', "unexpected %r" % rest[0:1] | ||||
|         return node | ||||
|  | ||||
|     # collect into functions | ||||
|     results = co.defaultdict(lambda: (None, None, 0, set())) | ||||
|     f_pattern = re.compile( | ||||
|         r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)') | ||||
|     for path in paths: | ||||
|         with open(path) as f: | ||||
|             vcg = parse_vcg(f.read()) | ||||
|         for k, graph in vcg: | ||||
|             if k != 'graph': | ||||
|                 continue | ||||
|             for k, info in graph: | ||||
|                 if k == 'node': | ||||
|                     info = dict(info) | ||||
|                     m = f_pattern.match(info['label']) | ||||
|                     if m: | ||||
|                         function, file, size, type = m.groups() | ||||
|                         if not args.get('quiet') and type != 'static': | ||||
|                             print('warning: found non-static stack for %s (%s)' | ||||
|                                 % (function, type)) | ||||
|                         _, _, _, targets = results[info['title']] | ||||
|                         results[info['title']] = ( | ||||
|                             file, function, int(size), targets) | ||||
|                 elif k == 'edge': | ||||
|                     info = dict(info) | ||||
|                     _, _, _, targets = results[info['sourcename']] | ||||
|                     targets.add(info['targetname']) | ||||
|                 else: | ||||
|                     continue | ||||
|  | ||||
|     if not args.get('everything'): | ||||
|         for source, (s_file, s_function, _, _) in list(results.items()): | ||||
|             # discard internal functions | ||||
|             if s_file.startswith('<') or s_file.startswith('/usr/include'): | ||||
|                 del results[source] | ||||
|  | ||||
|     # find maximum stack size recursively, this requires also detecting cycles | ||||
|     # (in case of recursion) | ||||
|     def find_limit(source, seen=None): | ||||
|         seen = seen or set() | ||||
|         if source not in results: | ||||
|             return 0 | ||||
|         _, _, frame, targets = results[source] | ||||
|  | ||||
|         limit = 0 | ||||
|         for target in targets: | ||||
|             if target in seen: | ||||
|                 # found a cycle | ||||
|                 return float('inf') | ||||
|             limit_ = find_limit(target, seen | {target}) | ||||
|             limit = max(limit, limit_) | ||||
|  | ||||
|         return frame + limit | ||||
|  | ||||
|     def find_deps(targets): | ||||
|         deps = set() | ||||
|         for target in targets: | ||||
|             if target in results: | ||||
|                 t_file, t_function, _, _ = results[target] | ||||
|                 deps.add((t_file, t_function)) | ||||
|         return deps | ||||
|  | ||||
|     # flatten into a list | ||||
|     flat_results = [] | ||||
|     for source, (s_file, s_function, frame, targets) in results.items(): | ||||
|         limit = find_limit(source) | ||||
|         deps = find_deps(targets) | ||||
|         flat_results.append((s_file, s_function, frame, limit, deps)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .ci files | ||||
|         paths = [] | ||||
|         for path in args['ci_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.ci' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .ci files found in %r?' % args['ci_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['stack_frame']), | ||||
|                     float(result['stack_limit']), # note limit can be inf | ||||
|                     set()) | ||||
|                 for result in r | ||||
|                 if result.get('stack_frame') not in {None, ''} | ||||
|                 if result.get('stack_limit') not in {None, ''}] | ||||
|  | ||||
|     total_frame = 0 | ||||
|     total_limit = 0 | ||||
|     for _, _, frame, limit, _ in results: | ||||
|         total_frame += frame | ||||
|         total_limit = max(total_limit, limit) | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['stack_frame']), | ||||
|                         float(result['stack_limit']), | ||||
|                         set()) | ||||
|                     for result in r | ||||
|                     if result.get('stack_frame') not in {None, ''} | ||||
|                     if result.get('stack_limit') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total_frame = 0 | ||||
|         prev_total_limit = 0 | ||||
|         for _, _, frame, limit, _ in prev_results: | ||||
|             prev_total_frame += frame | ||||
|             prev_total_limit = max(prev_total_limit, limit) | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         func = result.pop('name', '') | ||||
|                         result.pop('stack_frame', None) | ||||
|                         result.pop('stack_limit', None) | ||||
|                         merged_results[(file, func)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, func, frame, limit, _ in results: | ||||
|             merged_results[(file, func)]['stack_frame'] = frame | ||||
|             merged_results[(file, func)]['stack_limit'] = limit | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit']) | ||||
|             w.writeheader() | ||||
|             for (file, func), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': func, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: (0, 0, set())) | ||||
|         for file, func, frame, limit, deps in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entry_frame, entry_limit, entry_deps = entries[entry] | ||||
|             entries[entry] = ( | ||||
|                 entry_frame + frame, | ||||
|                 max(entry_limit, limit), | ||||
|                 entry_deps | {file if by == 'file' else func | ||||
|                     for file, func in deps}) | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set())) | ||||
|         for name, (new_frame, new_limit, deps) in news.items(): | ||||
|             diff[name] = ( | ||||
|                 None, None, | ||||
|                 new_frame, new_limit, | ||||
|                 new_frame, new_limit, | ||||
|                 1.0, | ||||
|                 deps) | ||||
|         for name, (old_frame, old_limit, _) in olds.items(): | ||||
|             _, _, new_frame, new_limit, _, _, _, deps = diff[name] | ||||
|             diff[name] = ( | ||||
|                 old_frame, old_limit, | ||||
|                 new_frame, new_limit, | ||||
|                 (new_frame or 0) - (old_frame or 0), | ||||
|                 0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0) | ||||
|                     else (new_limit or 0) - (old_limit or 0), | ||||
|                 0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0) | ||||
|                     else +float('inf') if m.isinf(new_limit or 0) | ||||
|                     else -float('inf') if m.isinf(old_limit or 0) | ||||
|                     else +0.0 if not old_limit and not new_limit | ||||
|                     else +1.0 if not old_limit | ||||
|                     else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0), | ||||
|                 deps) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('limit_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][1], x)) | ||||
|         elif args.get('reverse_limit_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][1], x)) | ||||
|         elif args.get('frame_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][0], x)) | ||||
|         elif args.get('reverse_frame_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][0], x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('limit_sort'): | ||||
|             return sorted(entries, key=lambda x: (-(x[1][3] or 0), x)) | ||||
|         elif args.get('reverse_limit_sort'): | ||||
|             return sorted(entries, key=lambda x: (+(x[1][3] or 0), x)) | ||||
|         elif args.get('frame_sort'): | ||||
|             return sorted(entries, key=lambda x: (-(x[1][2] or 0), x)) | ||||
|         elif args.get('reverse_frame_sort'): | ||||
|             return sorted(entries, key=lambda x: (+(x[1][2] or 0), x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][6], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s %7s' % (by, 'frame', 'limit')) | ||||
|         else: | ||||
|             print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, frame, limit): | ||||
|         print("%-36s %7d %7s" % (name, | ||||
|             frame, '∞' if m.isinf(limit) else int(limit))) | ||||
|  | ||||
|     def print_diff_entry(name, | ||||
|             old_frame, old_limit, | ||||
|             new_frame, new_limit, | ||||
|             diff_frame, diff_limit, | ||||
|             ratio): | ||||
|         print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name, | ||||
|             old_frame if old_frame is not None else "-", | ||||
|             ('∞' if m.isinf(old_limit) else int(old_limit)) | ||||
|                 if old_limit is not None else "-", | ||||
|             new_frame if new_frame is not None else "-", | ||||
|             ('∞' if m.isinf(new_limit) else int(new_limit)) | ||||
|                 if new_limit is not None else "-", | ||||
|             diff_frame, | ||||
|             ('+∞' if diff_limit > 0 and m.isinf(diff_limit) | ||||
|                 else '-∞' if diff_limit < 0 and m.isinf(diff_limit) | ||||
|                 else '%+d' % diff_limit), | ||||
|             '' if not ratio | ||||
|                 else ' (+∞%)' if ratio > 0 and m.isinf(ratio) | ||||
|                 else ' (-∞%)' if ratio < 0 and m.isinf(ratio) | ||||
|                 else ' (%+.1f%%)' % (100*ratio))) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         # build optional tree of dependencies | ||||
|         def print_deps(entries, depth, print, | ||||
|                 filter=lambda _: True, | ||||
|                 prefixes=('', '', '', '')): | ||||
|             entries = entries if isinstance(entries, list) else list(entries) | ||||
|             filtered_entries = [(name, entry) | ||||
|                 for name, entry in entries | ||||
|                 if filter(name)] | ||||
|             for i, (name, entry) in enumerate(filtered_entries): | ||||
|                 last = (i == len(filtered_entries)-1) | ||||
|                 print(prefixes[0+last] + name, entry) | ||||
|  | ||||
|                 if depth > 0: | ||||
|                     deps = entry[-1] | ||||
|                     print_deps(entries, depth-1, print, | ||||
|                         lambda name: name in deps, | ||||
|                         (   prefixes[2+last] + "|-> ", | ||||
|                             prefixes[2+last] + "'-> ", | ||||
|                             prefixes[2+last] + "|   ", | ||||
|                             prefixes[2+last] + "    ")) | ||||
|  | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             print_deps( | ||||
|                 sorted_entries(entries.items()), | ||||
|                 args.get('depth') or 0, | ||||
|                 lambda name, entry: print_entry(name, *entry[:-1])) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|  | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None), | ||||
|                 sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None))) | ||||
|             print_deps( | ||||
|                 filter( | ||||
|                     lambda x: x[1][6] or args.get('all'), | ||||
|                     sorted_diff_entries(diff.items())), | ||||
|                 args.get('depth') or 0, | ||||
|                 lambda name, entry: print_diff_entry(name, *entry[:-1])) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total_frame, total_limit) | ||||
|         else: | ||||
|             diff_frame = total_frame - prev_total_frame | ||||
|             diff_limit = ( | ||||
|                 0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0) | ||||
|                     else (total_limit or 0) - (prev_total_limit or 0)) | ||||
|             ratio = ( | ||||
|                 0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0) | ||||
|                     else +float('inf') if m.isinf(total_limit or 0) | ||||
|                     else -float('inf') if m.isinf(prev_total_limit or 0) | ||||
|                     else 0.0 if not prev_total_limit and not total_limit | ||||
|                     else 1.0 if not prev_total_limit | ||||
|                     else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0)) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total_frame, prev_total_limit, | ||||
|                 total_frame, total_limit, | ||||
|                 diff_frame, diff_limit, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find stack usage at the function level.") | ||||
|     parser.add_argument('ci_paths', nargs='*', default=CI_PATHS, | ||||
|         help="Description of where to find *.ci files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % CI_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't parse callgraph files, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--limit-sort', action='store_true', | ||||
|         help="Sort by stack limit.") | ||||
|     parser.add_argument('-S', '--reverse-limit-sort', action='store_true', | ||||
|         help="Sort by stack limit, but backwards.") | ||||
|     parser.add_argument('--frame-sort', action='store_true', | ||||
|         help="Sort by stack frame size.") | ||||
|     parser.add_argument('--reverse-frame-sort', action='store_true', | ||||
|         help="Sort by stack frame size, but backwards.") | ||||
|     parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0), | ||||
|         nargs='?', const=float('inf'), | ||||
|         help="Depth of dependencies to show.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level calls.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total stack size.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										331
									
								
								scripts/structs.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										331
									
								
								scripts/structs.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,331 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find struct sizes. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import subprocess as sp | ||||
| import shlex | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
|  | ||||
|  | ||||
| OBJ_PATHS = ['*.o'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     decl_pattern = re.compile( | ||||
|         '^\s+(?P<no>[0-9]+)' | ||||
|             '\s+(?P<dir>[0-9]+)' | ||||
|             '\s+.*' | ||||
|             '\s+(?P<file>[^\s]+)$') | ||||
|     struct_pattern = re.compile( | ||||
|         '^(?:.*DW_TAG_(?P<tag>[a-z_]+).*' | ||||
|             '|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*' | ||||
|             '|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*' | ||||
|             '|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$') | ||||
|  | ||||
|     results = co.defaultdict(lambda: 0) | ||||
|     for path in paths: | ||||
|         # find decl, we want to filter by structs in .h files | ||||
|         decls = {} | ||||
|         # note objdump-tool may contain extra args | ||||
|         cmd = args['objdump_tool'] + ['--dwarf=rawline', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True, | ||||
|             errors='replace') | ||||
|         for line in proc.stdout: | ||||
|             # find file numbers | ||||
|             m = decl_pattern.match(line) | ||||
|             if m: | ||||
|                 decls[int(m.group('no'))] = m.group('file') | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         # collect structs as we parse dwarf info | ||||
|         found = False | ||||
|         name = None | ||||
|         decl = None | ||||
|         size = None | ||||
|  | ||||
|         # note objdump-tool may contain extra args | ||||
|         cmd = args['objdump_tool'] + ['--dwarf=info', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True, | ||||
|             errors='replace') | ||||
|         for line in proc.stdout: | ||||
|             # state machine here to find structs | ||||
|             m = struct_pattern.match(line) | ||||
|             if m: | ||||
|                 if m.group('tag'): | ||||
|                     if (name is not None | ||||
|                             and decl is not None | ||||
|                             and size is not None): | ||||
|                         decl = decls.get(decl, '?') | ||||
|                         results[(decl, name)] = size | ||||
|                     found = (m.group('tag') == 'structure_type') | ||||
|                     name = None | ||||
|                     decl = None | ||||
|                     size = None | ||||
|                 elif found and m.group('name'): | ||||
|                     name = m.group('name') | ||||
|                 elif found and name and m.group('decl'): | ||||
|                     decl = int(m.group('decl')) | ||||
|                 elif found and name and m.group('size'): | ||||
|                     size = int(m.group('size')) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     flat_results = [] | ||||
|     for (file, struct), size in results.items(): | ||||
|         # map to source files | ||||
|         if args.get('build_dir'): | ||||
|             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) | ||||
|         # only include structs declared in header files in the current | ||||
|         # directory, ignore internal-only # structs (these are represented | ||||
|         # in other measurements) | ||||
|         if not args.get('everything'): | ||||
|             if not file.endswith('.h'): | ||||
|                 continue | ||||
|         # replace .o with .c, different scripts report .o/.c, we need to | ||||
|         # choose one if we want to deduplicate csv files | ||||
|         file = re.sub('\.o$', '.c', file) | ||||
|  | ||||
|         flat_results.append((file, struct, size)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .o files | ||||
|         paths = [] | ||||
|         for path in args['obj_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.o' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .obj files found in %r?' % args['obj_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['struct_size'])) | ||||
|                 for result in r | ||||
|                 if result.get('struct_size') not in {None, ''}] | ||||
|  | ||||
|     total = 0 | ||||
|     for _, _, size in results: | ||||
|         total += size | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['struct_size'])) | ||||
|                     for result in r | ||||
|                     if result.get('struct_size') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total = 0 | ||||
|         for _, _, size in prev_results: | ||||
|             prev_total += size | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         struct = result.pop('name', '') | ||||
|                         result.pop('struct_size', None) | ||||
|                         merged_results[(file, struct)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, struct, size in results: | ||||
|             merged_results[(file, struct)]['struct_size'] = size | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size']) | ||||
|             w.writeheader() | ||||
|             for (file, struct), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': struct, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: 0) | ||||
|         for file, struct, size in results: | ||||
|             entry = (file if by == 'file' else struct) | ||||
|             entries[entry] += size | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0)) | ||||
|         for name, new in news.items(): | ||||
|             diff[name] = (0, new, new, 1.0) | ||||
|         for name, old in olds.items(): | ||||
|             _, new, _, _ = diff[name] | ||||
|             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1], x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][1], x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][3], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s' % (by, 'size')) | ||||
|         else: | ||||
|             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, size): | ||||
|         print("%-36s %7d" % (name, size)) | ||||
|  | ||||
|     def print_diff_entry(name, old, new, diff, ratio): | ||||
|         print("%-36s %7s %7s %+7d%s" % (name, | ||||
|             old or "-", | ||||
|             new or "-", | ||||
|             diff, | ||||
|             ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, size in sorted_entries(entries.items()): | ||||
|                 print_entry(name, size) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for old, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, new, _, _ in diff.values() if not new))) | ||||
|             for name, (old, new, diff, ratio) in sorted_diff_entries( | ||||
|                     diff.items()): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print_diff_entry(name, old, new, diff, ratio) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total) | ||||
|         else: | ||||
|             ratio = (0.0 if not prev_total and not total | ||||
|                 else 1.0 if not prev_total | ||||
|                 else (total-prev_total)/prev_total) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total, total, | ||||
|                 total-prev_total, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find struct sizes.") | ||||
|     parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, | ||||
|         help="Description of where to find *.o files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % OBJ_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't compile and find struct sizes, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff struct size against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--size-sort', action='store_true', | ||||
|         help="Sort by size.") | ||||
|     parser.add_argument('-S', '--reverse-size-sort', action='store_true', | ||||
|         help="Sort by size, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level struct sizes.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total struct size.") | ||||
|     parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(), | ||||
|         help="Path to the objdump tool to use.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										279
									
								
								scripts/summary.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										279
									
								
								scripts/summary.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,279 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to summarize the outputs of other scripts. Operates on CSV files. | ||||
| # | ||||
|  | ||||
| import functools as ft | ||||
| import collections as co | ||||
| import os | ||||
| import csv | ||||
| import re | ||||
| import math as m | ||||
|  | ||||
| # displayable fields | ||||
| Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio') | ||||
| FIELDS = [ | ||||
|     # name, parse, accumulate, fmt, print, null | ||||
|     Field('code', | ||||
|         lambda r: int(r['code_size']), | ||||
|         sum, | ||||
|         lambda r: r, | ||||
|         '%7s', | ||||
|         lambda r: r, | ||||
|         '-', | ||||
|         lambda old, new: (new-old)/old), | ||||
|     Field('data', | ||||
|         lambda r: int(r['data_size']), | ||||
|         sum, | ||||
|         lambda r: r, | ||||
|         '%7s', | ||||
|         lambda r: r, | ||||
|         '-', | ||||
|         lambda old, new: (new-old)/old), | ||||
|     Field('stack', | ||||
|         lambda r: float(r['stack_limit']), | ||||
|         max, | ||||
|         lambda r: r, | ||||
|         '%7s', | ||||
|         lambda r: '∞' if m.isinf(r) else int(r), | ||||
|         '-', | ||||
|         lambda old, new: (new-old)/old), | ||||
|     Field('structs', | ||||
|         lambda r: int(r['struct_size']), | ||||
|         sum, | ||||
|         lambda r: r, | ||||
|         '%8s', | ||||
|         lambda r: r, | ||||
|         '-', | ||||
|         lambda old, new: (new-old)/old), | ||||
|     Field('coverage', | ||||
|         lambda r: (int(r['coverage_hits']), int(r['coverage_count'])), | ||||
|         lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs), | ||||
|         lambda r: r[0]/r[1], | ||||
|         '%19s', | ||||
|         lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])), | ||||
|         '%11s %7s' % ('-', '-'), | ||||
|         lambda old, new: ((new[0]/new[1]) - (old[0]/old[1]))) | ||||
| ] | ||||
|  | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find results | ||||
|     results = co.defaultdict(lambda: {}) | ||||
|     for path in args.get('csv_paths', '-'): | ||||
|         try: | ||||
|             with openio(path) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 for result in r: | ||||
|                     file = result.pop('file', '') | ||||
|                     name = result.pop('name', '') | ||||
|                     prev = results[(file, name)] | ||||
|                     for field in FIELDS: | ||||
|                         try: | ||||
|                             r = field.parse(result) | ||||
|                             if field.name in prev: | ||||
|                                 results[(file, name)][field.name] = field.acc( | ||||
|                                     [prev[field.name], r]) | ||||
|                             else: | ||||
|                                 results[(file, name)][field.name] = r | ||||
|                         except (KeyError, ValueError): | ||||
|                             pass | ||||
|         except FileNotFoundError: | ||||
|             pass | ||||
|  | ||||
|     # find fields | ||||
|     if args.get('all_fields'): | ||||
|         fields = FIELDS | ||||
|     elif args.get('fields') is not None: | ||||
|         fields_dict = {field.name: field for field in FIELDS} | ||||
|         fields = [fields_dict[f] for f in args['fields']] | ||||
|     else: | ||||
|         fields = [] | ||||
|         for field in FIELDS: | ||||
|             if any(field.name in result for result in results.values()): | ||||
|                 fields.append(field) | ||||
|  | ||||
|     # find total for every field | ||||
|     total = {} | ||||
|     for result in results.values(): | ||||
|         for field in fields: | ||||
|             if field.name in result and field.name in total: | ||||
|                 total[field.name] = field.acc( | ||||
|                     [total[field.name], result[field.name]]) | ||||
|             elif field.name in result: | ||||
|                 total[field.name] = result[field.name] | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         prev_results = co.defaultdict(lambda: {}) | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 for result in r: | ||||
|                     file = result.pop('file', '') | ||||
|                     name = result.pop('name', '') | ||||
|                     prev = prev_results[(file, name)] | ||||
|                     for field in FIELDS: | ||||
|                         try: | ||||
|                             r = field.parse(result) | ||||
|                             if field.name in prev: | ||||
|                                 prev_results[(file, name)][field.name] = field.acc( | ||||
|                                     [prev[field.name], r]) | ||||
|                             else: | ||||
|                                 prev_results[(file, name)][field.name] = r | ||||
|                         except (KeyError, ValueError): | ||||
|                             pass | ||||
|         except FileNotFoundError: | ||||
|             pass | ||||
|  | ||||
|         prev_total = {} | ||||
|         for result in prev_results.values(): | ||||
|             for field in fields: | ||||
|                 if field.name in result and field.name in prev_total: | ||||
|                     prev_total[field.name] = field.acc( | ||||
|                         [prev_total[field.name], result[field.name]]) | ||||
|                 elif field.name in result: | ||||
|                     prev_total[field.name] = result[field.name] | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: {}) | ||||
|         for (file, func), result in results.items(): | ||||
|             entry = (file if by == 'file' else func) | ||||
|             prev = entries[entry] | ||||
|             for field in fields: | ||||
|                 if field.name in result and field.name in prev: | ||||
|                     entries[entry][field.name] = field.acc( | ||||
|                         [prev[field.name], result[field.name]]) | ||||
|                 elif field.name in result: | ||||
|                     entries[entry][field.name] = result[field.name] | ||||
|         return entries | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('sort') is not None: | ||||
|             field = {field.name: field for field in FIELDS}[args['sort']] | ||||
|             return sorted(entries, key=lambda x: ( | ||||
|                 -(field.key(x[1][field.name])) if field.name in x[1] else -1, x)) | ||||
|         elif args.get('reverse_sort') is not None: | ||||
|             field = {field.name: field for field in FIELDS}[args['reverse_sort']] | ||||
|             return sorted(entries, key=lambda x: ( | ||||
|                 +(field.key(x[1][field.name])) if field.name in x[1] else -1, x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s' % by, end='') | ||||
|             for field in fields: | ||||
|                 print((' '+field.fmt) % field.name, end='') | ||||
|             print() | ||||
|         else: | ||||
|             print('%-36s' % by, end='') | ||||
|             for field in fields: | ||||
|                 print((' '+field.fmt) % field.name, end='') | ||||
|                 print(' %-9s' % '', end='') | ||||
|             print() | ||||
|  | ||||
|     def print_entry(name, result): | ||||
|         print('%-36s' % name, end='') | ||||
|         for field in fields: | ||||
|             r = result.get(field.name) | ||||
|             if r is not None: | ||||
|                 print((' '+field.fmt) % field.repr(r), end='') | ||||
|             else: | ||||
|                 print((' '+field.fmt) % '-', end='') | ||||
|         print() | ||||
|  | ||||
|     def print_diff_entry(name, old, new): | ||||
|         print('%-36s' % name, end='') | ||||
|         for field in fields: | ||||
|             n = new.get(field.name) | ||||
|             if n is not None: | ||||
|                 print((' '+field.fmt) % field.repr(n), end='') | ||||
|             else: | ||||
|                 print((' '+field.fmt) % '-', end='') | ||||
|             o = old.get(field.name) | ||||
|             ratio = ( | ||||
|                 0.0 if m.isinf(o or 0) and m.isinf(n or 0) | ||||
|                     else +float('inf') if m.isinf(n or 0) | ||||
|                     else -float('inf') if m.isinf(o or 0) | ||||
|                     else 0.0 if not o and not n | ||||
|                     else +1.0 if not o | ||||
|                     else -1.0 if not n | ||||
|                     else field.ratio(o, n)) | ||||
|             print(' %-9s' % ( | ||||
|                 '' if not ratio | ||||
|                     else '(+∞%)' if ratio > 0 and m.isinf(ratio) | ||||
|                     else '(-∞%)' if ratio < 0 and m.isinf(ratio) | ||||
|                     else '(%+.1f%%)' % (100*ratio)), end='') | ||||
|         print() | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, result in sorted_entries(entries.items()): | ||||
|                 print_entry(name, result) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for name in entries if name not in prev_entries), | ||||
|                 sum(1 for name in prev_entries if name not in entries))) | ||||
|             for name, result in sorted_entries(entries.items()): | ||||
|                 if args.get('all') or result != prev_entries.get(name, {}): | ||||
|                     print_diff_entry(name, prev_entries.get(name, {}), result) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total) | ||||
|         else: | ||||
|             print_diff_entry('TOTAL', prev_total, total) | ||||
|  | ||||
|     if args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Summarize measurements") | ||||
|     parser.add_argument('csv_paths', nargs='*', default='-', | ||||
|         help="Description of where to find *.csv files. May be a directory \ | ||||
|             or list of paths. *.csv files will be merged to show the total \ | ||||
|             coverage.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff against.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all objects, not just the ones that changed.") | ||||
|     parser.add_argument('-e', '--all-fields', action='store_true', | ||||
|         help="Show all fields, even those with no results.") | ||||
|     parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x), | ||||
|         help="Comma separated list of fields to print, by default all fields \ | ||||
|             that are found in the CSV files are printed.") | ||||
|     parser.add_argument('-s', '--sort', | ||||
|         help="Sort by this field.") | ||||
|     parser.add_argument('-S', '--reverse-sort', | ||||
|         help="Sort by this field, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level calls.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the totals.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										322
									
								
								scripts/test.py
									
									
									
									
									
								
							
							
						
						
									
										322
									
								
								scripts/test.py
									
									
									
									
									
								
							| @@ -20,19 +20,50 @@ import pty | ||||
| import errno | ||||
| import signal | ||||
|  | ||||
| TESTDIR = 'tests' | ||||
| TEST_PATHS = 'tests' | ||||
| RULES = """ | ||||
| # add block devices to sources | ||||
| TESTSRC ?= $(SRC) $(wildcard bd/*.c) | ||||
|  | ||||
| define FLATTEN | ||||
| tests/%$(subst /,.,$(target)): $(target) | ||||
| %(path)s%%$(subst /,.,$(target)): $(target) | ||||
|     ./scripts/explode_asserts.py $$< -o $$@ | ||||
| endef | ||||
| $(foreach target,$(SRC),$(eval $(FLATTEN))) | ||||
|  | ||||
| -include tests/*.d | ||||
| $(foreach target,$(TESTSRC),$(eval $(FLATTEN))) | ||||
|  | ||||
| -include %(path)s*.d | ||||
| .SECONDARY: | ||||
| %.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f) | ||||
|  | ||||
| %(path)s.test: %(path)s.test.o \\ | ||||
|         $(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t) | ||||
|     $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ | ||||
|  | ||||
| # needed in case builddir is different | ||||
| %(path)s%%.o: %(path)s%%.c | ||||
|     $(CC) -c -MMD $(CFLAGS) $< -o $@ | ||||
| """ | ||||
| COVERAGE_RULES = """ | ||||
| %(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage | ||||
|  | ||||
| # delete lingering coverage | ||||
| %(path)s.test: | %(path)s.info.clean | ||||
| .PHONY: %(path)s.info.clean | ||||
| %(path)s.info.clean: | ||||
|     rm -f %(path)s*.gcda | ||||
|  | ||||
| # accumulate coverage info | ||||
| .PHONY: %(path)s.info | ||||
| %(path)s.info: | ||||
|     $(strip $(LCOV) -c \\ | ||||
|         $(addprefix -d ,$(wildcard %(path)s*.gcda)) \\ | ||||
|         --rc 'geninfo_adjust_src_path=$(shell pwd)' \\ | ||||
|         -o $@) | ||||
|     $(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@ | ||||
| ifdef COVERAGETARGET | ||||
|     $(strip $(LCOV) -a $@ \\ | ||||
|         $(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\ | ||||
|         -o $(COVERAGETARGET)) | ||||
| endif | ||||
| """ | ||||
| GLOBALS = """ | ||||
| //////////////// AUTOGENERATED TEST //////////////// | ||||
| @@ -52,7 +83,7 @@ DEFINES = { | ||||
|     'LFS_LOOKAHEAD_SIZE': 16, | ||||
|     'LFS_ERASE_VALUE': 0xff, | ||||
|     'LFS_ERASE_CYCLES': 0, | ||||
|     'LFS_BADBLOCK_BEHAVIOR': 'LFS_TESTBD_BADBLOCK_NOPROG', | ||||
|     'LFS_BADBLOCK_BEHAVIOR': 'LFS_TESTBD_BADBLOCK_PROGERROR', | ||||
| } | ||||
| PROLOGUE = """ | ||||
|     // prologue | ||||
| @@ -119,6 +150,8 @@ class TestCase: | ||||
|         self.if_ = config.get('if', None) | ||||
|         self.in_ = config.get('in', None) | ||||
|  | ||||
|         self.result = None | ||||
|  | ||||
|     def __str__(self): | ||||
|         if hasattr(self, 'permno'): | ||||
|             if any(k not in self.case.defines for k in self.defines): | ||||
| @@ -179,30 +212,46 @@ class TestCase: | ||||
|                 len(self.filter) >= 2 and | ||||
|                 self.filter[1] != self.permno): | ||||
|             return False | ||||
|         elif args.get('no_internal', False) and self.in_ is not None: | ||||
|         elif args.get('no_internal') and self.in_ is not None: | ||||
|             return False | ||||
|         elif self.if_ is not None: | ||||
|             return eval(self.if_, None, self.defines.copy()) | ||||
|             if_ = self.if_ | ||||
|             while True: | ||||
|                 for k, v in sorted(self.defines.items(), | ||||
|                         key=lambda x: len(x[0]), reverse=True): | ||||
|                     if k in if_: | ||||
|                         if_ = if_.replace(k, '(%s)' % v) | ||||
|                         break | ||||
|                 else: | ||||
|                     break | ||||
|             if_ = ( | ||||
|                 re.sub('(\&\&|\?)', ' and ', | ||||
|                 re.sub('(\|\||:)', ' or ', | ||||
|                 re.sub('!(?!=)', ' not ', if_)))) | ||||
|             return eval(if_) | ||||
|         else: | ||||
|             return True | ||||
|  | ||||
|     def test(self, exec=[], persist=False, cycles=None, | ||||
|             gdb=False, failure=None, **args): | ||||
|             gdb=False, failure=None, disk=None, **args): | ||||
|         # build command | ||||
|         cmd = exec + ['./%s.test' % self.suite.path, | ||||
|             repr(self.caseno), repr(self.permno)] | ||||
|  | ||||
|         # persist disk or keep in RAM for speed? | ||||
|         if persist: | ||||
|             if not disk: | ||||
|                 disk = self.suite.path + '.disk' | ||||
|             if persist != 'noerase': | ||||
|                 try: | ||||
|                     os.remove(self.suite.path + '.disk') | ||||
|                     if args.get('verbose', False): | ||||
|                         print('rm', self.suite.path + '.disk') | ||||
|                     with open(disk, 'w') as f: | ||||
|                         f.truncate(0) | ||||
|                     if args.get('verbose'): | ||||
|                         print('truncate --size=0', disk) | ||||
|                 except FileNotFoundError: | ||||
|                     pass | ||||
|  | ||||
|             cmd.append(self.suite.path + '.disk') | ||||
|             cmd.append(disk) | ||||
|  | ||||
|         # simulate power-loss after n cycles? | ||||
|         if cycles: | ||||
| @@ -215,53 +264,59 @@ class TestCase: | ||||
|                 ncmd.extend(['-ex', 'r']) | ||||
|                 if failure.assert_: | ||||
|                     ncmd.extend(['-ex', 'up 2']) | ||||
|             elif gdb == 'start': | ||||
|             elif gdb == 'main': | ||||
|                 ncmd.extend([ | ||||
|                     '-ex', 'b %s:%d' % (self.suite.path, self.code_lineno), | ||||
|                     '-ex', 'r']) | ||||
|             ncmd.extend(['--args'] + cmd) | ||||
|  | ||||
|             if args.get('verbose', False): | ||||
|             if args.get('verbose'): | ||||
|                 print(' '.join(shlex.quote(c) for c in ncmd)) | ||||
|             signal.signal(signal.SIGINT, signal.SIG_IGN) | ||||
|             sys.exit(sp.call(ncmd)) | ||||
|  | ||||
|         # run test case! | ||||
|         mpty, spty = pty.openpty() | ||||
|         if args.get('verbose', False): | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, stdout=spty, stderr=spty) | ||||
|         os.close(spty) | ||||
|         mpty = os.fdopen(mpty, 'r', 1) | ||||
|         stdout = [] | ||||
|         assert_ = None | ||||
|         while True: | ||||
|             try: | ||||
|                 line = mpty.readline() | ||||
|             except OSError as e: | ||||
|                 if e.errno == errno.EIO: | ||||
|                     break | ||||
|                 raise | ||||
|             stdout.append(line) | ||||
|             if args.get('verbose', False): | ||||
|                 sys.stdout.write(line) | ||||
|             # intercept asserts | ||||
|             m = re.match( | ||||
|                 '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$' | ||||
|                 .format('(?:\033\[[\d;]*.| )*', 'assert'), | ||||
|                 line) | ||||
|             if m and assert_ is None: | ||||
|         try: | ||||
|             while True: | ||||
|                 try: | ||||
|                     with open(m.group(1)) as f: | ||||
|                         lineno = int(m.group(2)) | ||||
|                         line = next(it.islice(f, lineno-1, None)).strip('\n') | ||||
|                     assert_ = { | ||||
|                         'path': m.group(1), | ||||
|                         'line': line, | ||||
|                         'lineno': lineno, | ||||
|                         'message': m.group(3)} | ||||
|                 except: | ||||
|                     pass | ||||
|                     line = mpty.readline() | ||||
|                 except OSError as e: | ||||
|                     if e.errno == errno.EIO: | ||||
|                         break | ||||
|                     raise | ||||
|                 if not line: | ||||
|                     break; | ||||
|                 stdout.append(line) | ||||
|                 if args.get('verbose'): | ||||
|                     sys.stdout.write(line) | ||||
|                 # intercept asserts | ||||
|                 m = re.match( | ||||
|                     '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$' | ||||
|                     .format('(?:\033\[[\d;]*.| )*', 'assert'), | ||||
|                     line) | ||||
|                 if m and assert_ is None: | ||||
|                     try: | ||||
|                         with open(m.group(1)) as f: | ||||
|                             lineno = int(m.group(2)) | ||||
|                             line = (next(it.islice(f, lineno-1, None)) | ||||
|                                 .strip('\n')) | ||||
|                         assert_ = { | ||||
|                             'path': m.group(1), | ||||
|                             'line': line, | ||||
|                             'lineno': lineno, | ||||
|                             'message': m.group(3)} | ||||
|                     except: | ||||
|                         pass | ||||
|         except KeyboardInterrupt: | ||||
|             raise TestFailure(self, 1, stdout, None) | ||||
|         proc.wait() | ||||
|  | ||||
|         # did we pass? | ||||
| @@ -279,11 +334,17 @@ class ValgrindTestCase(TestCase): | ||||
|         return not self.leaky and super().shouldtest(**args) | ||||
|  | ||||
|     def test(self, exec=[], **args): | ||||
|         exec = exec + [ | ||||
|         verbose = args.get('verbose') | ||||
|         uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1) | ||||
|         exec = [ | ||||
|             'valgrind', | ||||
|             '--leak-check=full', | ||||
|             ] + (['--undef-value-errors=no'] if uninit else []) + [ | ||||
|             ] + (['--track-origins=yes'] if not uninit else []) + [ | ||||
|             '--error-exitcode=4', | ||||
|             '-q'] | ||||
|             '--error-limit=no', | ||||
|             ] + (['--num-callers=1'] if not verbose else []) + [ | ||||
|             '-q'] + exec | ||||
|         return super().test(exec=exec, **args) | ||||
|  | ||||
| class ReentrantTestCase(TestCase): | ||||
| @@ -294,7 +355,7 @@ class ReentrantTestCase(TestCase): | ||||
|     def shouldtest(self, **args): | ||||
|         return self.reentrant and super().shouldtest(**args) | ||||
|  | ||||
|     def test(self, exec=[], persist=False, gdb=False, failure=None, **args): | ||||
|     def test(self, persist=False, gdb=False, failure=None, **args): | ||||
|         for cycles in it.count(1): | ||||
|             # clear disk first? | ||||
|             if cycles == 1 and persist != 'noerase': | ||||
| @@ -325,12 +386,17 @@ class TestSuite: | ||||
|         self.name = os.path.basename(path) | ||||
|         if self.name.endswith('.toml'): | ||||
|             self.name = self.name[:-len('.toml')] | ||||
|         self.path = path | ||||
|         if args.get('build_dir'): | ||||
|             self.toml = path | ||||
|             self.path = args['build_dir'] + '/' + path | ||||
|         else: | ||||
|             self.toml = path | ||||
|             self.path = path | ||||
|         self.classes = classes | ||||
|         self.defines = defines.copy() | ||||
|         self.filter = filter | ||||
|  | ||||
|         with open(path) as f: | ||||
|         with open(self.toml) as f: | ||||
|             # load tests | ||||
|             config = toml.load(f) | ||||
|  | ||||
| @@ -360,10 +426,11 @@ class TestSuite: | ||||
|             # code lineno? | ||||
|             if 'code' in case: | ||||
|                 case['code_lineno'] = code_linenos.pop() | ||||
|             # give our case's config a copy of our "global" config | ||||
|             for k, v in config.items(): | ||||
|                 if k not in case: | ||||
|                     case[k] = v | ||||
|             # merge conditions if necessary | ||||
|             if 'if' in config and 'if' in case: | ||||
|                 case['if'] = '(%s) && (%s)' % (config['if'], case['if']) | ||||
|             elif 'if' in config: | ||||
|                 case['if'] = config['if'] | ||||
|             # initialize test case | ||||
|             self.cases.append(TestCase(case, filter=filter, | ||||
|                 suite=self, caseno=i+1, lineno=lineno, **args)) | ||||
| @@ -440,7 +507,7 @@ class TestSuite: | ||||
|  | ||||
|     def build(self, **args): | ||||
|         # build test files | ||||
|         tf = open(self.path + '.test.c.t', 'w') | ||||
|         tf = open(self.path + '.test.tc', 'w') | ||||
|         tf.write(GLOBALS) | ||||
|         if self.code is not None: | ||||
|             tf.write('#line %d "%s"\n' % (self.code_lineno, self.path)) | ||||
| @@ -450,7 +517,7 @@ class TestSuite: | ||||
|         for case in self.cases: | ||||
|             if case.in_ not in tfs: | ||||
|                 tfs[case.in_] = open(self.path+'.'+ | ||||
|                     case.in_.replace('/', '.')+'.t', 'w') | ||||
|                     re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w') | ||||
|                 tfs[case.in_].write('#line 1 "%s"\n' % case.in_) | ||||
|                 with open(case.in_) as f: | ||||
|                     for line in f: | ||||
| @@ -489,25 +556,33 @@ class TestSuite: | ||||
|  | ||||
|         # write makefiles | ||||
|         with open(self.path + '.mk', 'w') as mk: | ||||
|             mk.write(RULES.replace(4*' ', '\t')) | ||||
|             mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path)) | ||||
|             mk.write('\n') | ||||
|  | ||||
|             # add truely global defines globally | ||||
|             # add coverage hooks? | ||||
|             if args.get('coverage'): | ||||
|                 mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict( | ||||
|                     path=self.path)) | ||||
|                 mk.write('\n') | ||||
|  | ||||
|             # add truly global defines globally | ||||
|             for k, v in sorted(self.defines.items()): | ||||
|                 mk.write('%s: override CFLAGS += -D%s=%r\n' % ( | ||||
|                     self.path+'.test', k, v)) | ||||
|                 mk.write('%s.test: override CFLAGS += -D%s=%r\n' | ||||
|                     % (self.path, k, v)) | ||||
|  | ||||
|             for path in tfs: | ||||
|                 if path is None: | ||||
|                     mk.write('%s: %s | %s\n' % ( | ||||
|                         self.path+'.test.c', | ||||
|                         self.path, | ||||
|                         self.path+'.test.c.t')) | ||||
|                         self.toml, | ||||
|                         self.path+'.test.tc')) | ||||
|                 else: | ||||
|                     mk.write('%s: %s %s | %s\n' % ( | ||||
|                         self.path+'.'+path.replace('/', '.'), | ||||
|                         self.path, path, | ||||
|                         self.path+'.'+path.replace('/', '.')+'.t')) | ||||
|                         self.toml, | ||||
|                         path, | ||||
|                         self.path+'.'+re.sub('(\.c)?$', '.tc', | ||||
|                             path.replace('/', '.')))) | ||||
|                 mk.write('\t./scripts/explode_asserts.py $| -o $@\n') | ||||
|  | ||||
|         self.makefile = self.path + '.mk' | ||||
| @@ -530,7 +605,7 @@ class TestSuite: | ||||
|                 if not args.get('verbose', True): | ||||
|                     sys.stdout.write(FAIL) | ||||
|                     sys.stdout.flush() | ||||
|                 if not args.get('keep_going', False): | ||||
|                 if not args.get('keep_going'): | ||||
|                     if not args.get('verbose', True): | ||||
|                         sys.stdout.write('\n') | ||||
|                     raise | ||||
| @@ -552,36 +627,36 @@ def main(**args): | ||||
|  | ||||
|     # and what class of TestCase to run | ||||
|     classes = [] | ||||
|     if args.get('normal', False): | ||||
|     if args.get('normal'): | ||||
|         classes.append(TestCase) | ||||
|     if args.get('reentrant', False): | ||||
|     if args.get('reentrant'): | ||||
|         classes.append(ReentrantTestCase) | ||||
|     if args.get('valgrind', False): | ||||
|     if args.get('valgrind'): | ||||
|         classes.append(ValgrindTestCase) | ||||
|     if not classes: | ||||
|         classes = [TestCase] | ||||
|  | ||||
|     suites = [] | ||||
|     for testpath in args['testpaths']: | ||||
|     for testpath in args['test_paths']: | ||||
|         # optionally specified test case/perm | ||||
|         testpath, *filter = testpath.split('#') | ||||
|         filter = [int(f) for f in filter] | ||||
|  | ||||
|         # figure out the suite's toml file | ||||
|         if os.path.isdir(testpath): | ||||
|             testpath = testpath + '/test_*.toml' | ||||
|             testpath = testpath + '/*.toml' | ||||
|         elif os.path.isfile(testpath): | ||||
|             testpath = testpath | ||||
|         elif testpath.endswith('.toml'): | ||||
|             testpath = TESTDIR + '/' + testpath | ||||
|             testpath = TEST_PATHS + '/' + testpath | ||||
|         else: | ||||
|             testpath = TESTDIR + '/' + testpath + '.toml' | ||||
|             testpath = TEST_PATHS + '/' + testpath + '.toml' | ||||
|  | ||||
|         # find tests | ||||
|         for path in glob.glob(testpath): | ||||
|             suites.append(TestSuite(path, classes, defines, filter, **args)) | ||||
|  | ||||
|     # sort for reproducability | ||||
|     # sort for reproducibility | ||||
|     suites = sorted(suites) | ||||
|  | ||||
|     # generate permutations | ||||
| @@ -601,7 +676,7 @@ def main(**args): | ||||
|         list(it.chain.from_iterable(['-f', m] for m in makefiles)) + | ||||
|         [target for target in targets]) | ||||
|     mpty, spty = pty.openpty() | ||||
|     if args.get('verbose', False): | ||||
|     if args.get('verbose'): | ||||
|         print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|     proc = sp.Popen(cmd, stdout=spty, stderr=spty) | ||||
|     os.close(spty) | ||||
| @@ -614,15 +689,17 @@ def main(**args): | ||||
|             if e.errno == errno.EIO: | ||||
|                 break | ||||
|             raise | ||||
|         if not line: | ||||
|             break; | ||||
|         stdout.append(line) | ||||
|         if args.get('verbose', False): | ||||
|         if args.get('verbose'): | ||||
|             sys.stdout.write(line) | ||||
|         # intercept warnings | ||||
|         m = re.match( | ||||
|             '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$' | ||||
|             .format('(?:\033\[[\d;]*.| )*', 'warning'), | ||||
|             line) | ||||
|         if m and not args.get('verbose', False): | ||||
|         if m and not args.get('verbose'): | ||||
|             try: | ||||
|                 with open(m.group(1)) as f: | ||||
|                     lineno = int(m.group(2)) | ||||
| @@ -635,24 +712,27 @@ def main(**args): | ||||
|             except: | ||||
|                 pass | ||||
|     proc.wait() | ||||
|  | ||||
|     if proc.returncode != 0: | ||||
|         if not args.get('verbose', False): | ||||
|         if not args.get('verbose'): | ||||
|             for line in stdout: | ||||
|                 sys.stdout.write(line) | ||||
|         sys.exit(-3) | ||||
|         sys.exit(-1) | ||||
|  | ||||
|     print('built %d test suites, %d test cases, %d permutations' % ( | ||||
|         len(suites), | ||||
|         sum(len(suite.cases) for suite in suites), | ||||
|         sum(len(suite.perms) for suite in suites))) | ||||
|  | ||||
|     filtered = 0 | ||||
|     total = 0 | ||||
|     for suite in suites: | ||||
|         for perm in suite.perms: | ||||
|             filtered += perm.shouldtest(**args) | ||||
|     if filtered != sum(len(suite.perms) for suite in suites): | ||||
|         print('filtered down to %d permutations' % filtered) | ||||
|             total += perm.shouldtest(**args) | ||||
|     if total != sum(len(suite.perms) for suite in suites): | ||||
|         print('filtered down to %d permutations' % total) | ||||
|  | ||||
|     # only requested to build? | ||||
|     if args.get('build'): | ||||
|         return 0 | ||||
|  | ||||
|     print('====== testing ======') | ||||
|     try: | ||||
| @@ -666,38 +746,59 @@ def main(**args): | ||||
|     failed = 0 | ||||
|     for suite in suites: | ||||
|         for perm in suite.perms: | ||||
|             if not hasattr(perm, 'result'): | ||||
|                 continue | ||||
|  | ||||
|             if perm.result == PASS: | ||||
|                 passed += 1 | ||||
|             else: | ||||
|             elif isinstance(perm.result, TestFailure): | ||||
|                 sys.stdout.write( | ||||
|                     "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m " | ||||
|                     "{perm} failed with {returncode}\n".format( | ||||
|                     "{perm} failed\n".format( | ||||
|                         perm=perm, path=perm.suite.path, lineno=perm.lineno, | ||||
|                         returncode=perm.result.returncode or 0)) | ||||
|                 if perm.result.stdout: | ||||
|                     for line in (perm.result.stdout | ||||
|                             if not perm.result.assert_ | ||||
|                             else perm.result.stdout[:-1]): | ||||
|                     if perm.result.assert_: | ||||
|                         stdout = perm.result.stdout[:-1] | ||||
|                     else: | ||||
|                         stdout = perm.result.stdout | ||||
|                     for line in stdout[-5:]: | ||||
|                         sys.stdout.write(line) | ||||
|                 if perm.result.assert_: | ||||
|                     sys.stdout.write( | ||||
|                         "\033[01m{path}:{lineno}:\033[01;31massert:\033[m " | ||||
|                         "{message}\n{line}\n".format( | ||||
|                             **perm.result.assert_)) | ||||
|                 else: | ||||
|                     for line in perm.result.stdout: | ||||
|                         sys.stdout.write(line) | ||||
|                 sys.stdout.write('\n') | ||||
|                 failed += 1 | ||||
|  | ||||
|     if args.get('gdb', False): | ||||
|     if args.get('coverage'): | ||||
|         # collect coverage info | ||||
|         # why -j1? lcov doesn't work in parallel because of gcov limitations | ||||
|         cmd = (['make', '-j1', '-f', 'Makefile'] + | ||||
|             list(it.chain.from_iterable(['-f', m] for m in makefiles)) + | ||||
|             (['COVERAGETARGET=%s' % args['coverage']] | ||||
|                 if isinstance(args['coverage'], str) else []) + | ||||
|             [suite.path + '.info' for suite in suites | ||||
|                 if any(perm.result == PASS for perm in suite.perms)]) | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE if not args.get('verbose') else None, | ||||
|             stderr=sp.STDOUT if not args.get('verbose') else None, | ||||
|             universal_newlines=True) | ||||
|         stdout = [] | ||||
|         for line in proc.stdout: | ||||
|             stdout.append(line) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in stdout: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     if args.get('gdb'): | ||||
|         failure = None | ||||
|         for suite in suites: | ||||
|             for perm in suite.perms: | ||||
|                 if getattr(perm, 'result', PASS) != PASS: | ||||
|                 if isinstance(perm.result, TestFailure): | ||||
|                     failure = perm.result | ||||
|         if failure is not None: | ||||
|             print('======= gdb ======') | ||||
| @@ -705,20 +806,22 @@ def main(**args): | ||||
|             failure.case.test(failure=failure, **args) | ||||
|             sys.exit(0) | ||||
|  | ||||
|     print('tests passed: %d' % passed) | ||||
|     print('tests failed: %d' % failed) | ||||
|     print('tests passed %d/%d (%.1f%%)' % (passed, total, | ||||
|         100*(passed/total if total else 1.0))) | ||||
|     print('tests failed %d/%d (%.1f%%)' % (failed, total, | ||||
|         100*(failed/total if total else 1.0))) | ||||
|     return 1 if failed > 0 else 0 | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Run parameterized tests in various configurations.") | ||||
|     parser.add_argument('testpaths', nargs='*', default=[TESTDIR], | ||||
|     parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS], | ||||
|         help="Description of test(s) to run. By default, this is all tests \ | ||||
|             found in the \"{0}\" directory. Here, you can specify a different \ | ||||
|             directory of tests, a specific file, a suite by name, and even a \ | ||||
|             specific test case by adding brackets. For example \ | ||||
|             \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) | ||||
|             directory of tests, a specific file, a suite by name, and even \ | ||||
|             specific test cases and permutations. For example \ | ||||
|             \"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS)) | ||||
|     parser.add_argument('-D', action='append', default=[], | ||||
|         help="Overriding parameter definitions.") | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
| @@ -728,7 +831,9 @@ if __name__ == "__main__": | ||||
|     parser.add_argument('-p', '--persist', choices=['erase', 'noerase'], | ||||
|         nargs='?', const='erase', | ||||
|         help="Store disk image in a file.") | ||||
|     parser.add_argument('-g', '--gdb', choices=['init', 'start', 'assert'], | ||||
|     parser.add_argument('-b', '--build', action='store_true', | ||||
|         help="Only build the tests, do not execute.") | ||||
|     parser.add_argument('-g', '--gdb', choices=['init', 'main', 'assert'], | ||||
|         nargs='?', const='assert', | ||||
|         help="Drop into gdb on test failure.") | ||||
|     parser.add_argument('--no-internal', action='store_true', | ||||
| @@ -737,8 +842,19 @@ if __name__ == "__main__": | ||||
|         help="Run tests normally.") | ||||
|     parser.add_argument('-r', '--reentrant', action='store_true', | ||||
|         help="Run reentrant tests with simulated power-loss.") | ||||
|     parser.add_argument('-V', '--valgrind', action='store_true', | ||||
|     parser.add_argument('--valgrind', action='store_true', | ||||
|         help="Run non-leaky tests under valgrind to check for memory leaks.") | ||||
|     parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '), | ||||
|     parser.add_argument('--exec', default=[], type=lambda e: e.split(), | ||||
|         help="Run tests with another executable prefixed on the command line.") | ||||
|     parser.add_argument('--disk', | ||||
|         help="Specify a file to use for persistent/reentrant tests.") | ||||
|     parser.add_argument('--coverage', type=lambda x: x if x else True, | ||||
|         nargs='?', const='', | ||||
|         help="Collect coverage information during testing. This uses lcov/gcov \ | ||||
|             to accumulate coverage information into *.info files. May also \ | ||||
|             a path to a *.info file to accumulate coverage info into.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Build relative to the specified directory instead of the \ | ||||
|             current directory.") | ||||
|  | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
|   | ||||
| @@ -1,9 +1,10 @@ | ||||
| # allocator tests | ||||
| # note for these to work there are many constraints on the device geometry | ||||
| # note for these to work there are a number constraints on the device geometry | ||||
| if = 'LFS_BLOCK_CYCLES == -1' | ||||
|  | ||||
| [[case]] # parallel allocation test | ||||
| define.FILES = 3 | ||||
| define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)' | ||||
| define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)' | ||||
| code = ''' | ||||
|     const char *names[FILES] = {"bacon", "eggs", "pancakes"}; | ||||
|     lfs_file_t files[FILES]; | ||||
| @@ -46,7 +47,7 @@ code = ''' | ||||
|  | ||||
| [[case]] # serial allocation test | ||||
| define.FILES = 3 | ||||
| define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)' | ||||
| define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)' | ||||
| code = ''' | ||||
|     const char *names[FILES] = {"bacon", "eggs", "pancakes"}; | ||||
|  | ||||
| @@ -85,7 +86,7 @@ code = ''' | ||||
|  | ||||
| [[case]] # parallel allocation reuse test | ||||
| define.FILES = 3 | ||||
| define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)' | ||||
| define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)' | ||||
| define.CYCLES = [1, 10] | ||||
| code = ''' | ||||
|     const char *names[FILES] = {"bacon", "eggs", "pancakes"}; | ||||
| @@ -140,7 +141,7 @@ code = ''' | ||||
|  | ||||
| [[case]] # serial allocation reuse test | ||||
| define.FILES = 3 | ||||
| define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-4)) / FILES)' | ||||
| define.SIZE = '(((LFS_BLOCK_SIZE-8)*(LFS_BLOCK_COUNT-6)) / FILES)' | ||||
| define.CYCLES = [1, 10] | ||||
| code = ''' | ||||
|     const char *names[FILES] = {"bacon", "eggs", "pancakes"}; | ||||
| @@ -322,6 +323,90 @@ code = ''' | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # what if we have a bad block during an allocation scan? | ||||
| in = "lfs.c" | ||||
| define.LFS_ERASE_CYCLES = 0xffffffff | ||||
| define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_READERROR' | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     // first fill to exhaustion to find available space | ||||
|     lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0; | ||||
|     strcpy((char*)buffer, "waka"); | ||||
|     size = strlen("waka"); | ||||
|     lfs_size_t filesize = 0; | ||||
|     while (true) { | ||||
|         lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size); | ||||
|         assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC); | ||||
|         if (res == LFS_ERR_NOSPC) { | ||||
|             break; | ||||
|         } | ||||
|         filesize += size; | ||||
|     } | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     // now fill all but a couple of blocks of the filesystem with data | ||||
|     filesize -= 3*LFS_BLOCK_SIZE; | ||||
|     lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0; | ||||
|     strcpy((char*)buffer, "waka"); | ||||
|     size = strlen("waka"); | ||||
|     for (lfs_size_t i = 0; i < filesize/size; i++) { | ||||
|         lfs_file_write(&lfs, &file, buffer, size) => size; | ||||
|     } | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     // also save head of file so we can error during lookahead scan | ||||
|     lfs_block_t fileblock = file.ctz.head; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // remount to force an alloc scan | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|  | ||||
|     // but mark the head of our file as a "bad block", this is force our | ||||
|     // scan to bail early | ||||
|     lfs_testbd_setwear(&cfg, fileblock, 0xffffffff) => 0; | ||||
|     lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0; | ||||
|     strcpy((char*)buffer, "chomp"); | ||||
|     size = strlen("chomp"); | ||||
|     while (true) { | ||||
|         lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size); | ||||
|         assert(res == (lfs_ssize_t)size || res == LFS_ERR_CORRUPT); | ||||
|         if (res == LFS_ERR_CORRUPT) { | ||||
|             break; | ||||
|         } | ||||
|     } | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|  | ||||
|     // now reverse the "bad block" and try to write the file again until we | ||||
|     // run out of space | ||||
|     lfs_testbd_setwear(&cfg, fileblock, 0) => 0; | ||||
|     lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0; | ||||
|     strcpy((char*)buffer, "chomp"); | ||||
|     size = strlen("chomp"); | ||||
|     while (true) { | ||||
|         lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size); | ||||
|         assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC); | ||||
|         if (res == LFS_ERR_NOSPC) { | ||||
|             break; | ||||
|         } | ||||
|     } | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|  | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // check that the disk isn't hurt | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0; | ||||
|     strcpy((char*)buffer, "waka"); | ||||
|     size = strlen("waka"); | ||||
|     for (lfs_size_t i = 0; i < filesize/size; i++) { | ||||
|         uint8_t rbuffer[4]; | ||||
|         lfs_file_read(&lfs, &file, rbuffer, size) => size; | ||||
|         assert(memcmp(rbuffer, buffer, size) == 0); | ||||
|     } | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
|  | ||||
| # Below, I don't like these tests. They're fragile and depend _heavily_ | ||||
| # on the geometry of the block device. But they are valuable. Eventually they | ||||
| # should be removed and replaced with generalized tests. | ||||
| @@ -329,7 +414,7 @@ code = ''' | ||||
| [[case]] # chained dir exhaustion test | ||||
| define.LFS_BLOCK_SIZE = 512 | ||||
| define.LFS_BLOCK_COUNT = 1024 | ||||
| if = 'LFS_BLOCK_SIZE == 512 and LFS_BLOCK_COUNT == 1024' | ||||
| if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -400,7 +485,7 @@ code = ''' | ||||
| [[case]] # split dir test | ||||
| define.LFS_BLOCK_SIZE = 512 | ||||
| define.LFS_BLOCK_COUNT = 1024 | ||||
| if = 'LFS_BLOCK_SIZE == 512 and LFS_BLOCK_COUNT == 1024' | ||||
| if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -445,7 +530,7 @@ code = ''' | ||||
| [[case]] # outdated lookahead test | ||||
| define.LFS_BLOCK_SIZE = 512 | ||||
| define.LFS_BLOCK_COUNT = 1024 | ||||
| if = 'LFS_BLOCK_SIZE == 512 and LFS_BLOCK_COUNT == 1024' | ||||
| if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -510,7 +595,7 @@ code = ''' | ||||
| [[case]] # outdated lookahead and split dir test | ||||
| define.LFS_BLOCK_SIZE = 512 | ||||
| define.LFS_BLOCK_COUNT = 1024 | ||||
| if = 'LFS_BLOCK_SIZE == 512 and LFS_BLOCK_COUNT == 1024' | ||||
| if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|   | ||||
| @@ -1,6 +1,17 @@ | ||||
| # bad blocks with block cycles should be tested in test_relocations | ||||
| if = 'LFS_BLOCK_CYCLES == -1' | ||||
|  | ||||
| [[case]] # single bad blocks | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster | ||||
| define.LFS_ERASE_CYCLES = 0xffffffff | ||||
| define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_NOPROG' | ||||
| define.LFS_ERASE_VALUE = [0x00, 0xff, -1] | ||||
| define.LFS_BADBLOCK_BEHAVIOR = [ | ||||
|     'LFS_TESTBD_BADBLOCK_PROGERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASEERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_READERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGNOOP', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASENOOP', | ||||
| ] | ||||
| define.NAMEMULT = 64 | ||||
| define.FILEMULT = 1 | ||||
| code = ''' | ||||
| @@ -64,144 +75,16 @@ code = ''' | ||||
|     } | ||||
| ''' | ||||
|  | ||||
| [[case]] # single persistent blocks (can't erase) | ||||
| define.LFS_ERASE_CYCLES = 0xffffffff | ||||
| define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_NOERASE' | ||||
| define.NAMEMULT = 64 | ||||
| define.FILEMULT = 1 | ||||
| code = ''' | ||||
|     for (lfs_block_t badblock = 2; badblock < LFS_BLOCK_COUNT; badblock++) { | ||||
|         lfs_testbd_setwear(&cfg, badblock-1, 0) => 0; | ||||
|         lfs_testbd_setwear(&cfg, badblock, 0xffffffff) => 0; | ||||
|          | ||||
|         lfs_format(&lfs, &cfg) => 0; | ||||
|  | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|         for (int i = 1; i < 10; i++) { | ||||
|             for (int j = 0; j < NAMEMULT; j++) { | ||||
|                 buffer[j] = '0'+i; | ||||
|             } | ||||
|             buffer[NAMEMULT] = '\0'; | ||||
|             lfs_mkdir(&lfs, (char*)buffer) => 0; | ||||
|  | ||||
|             buffer[NAMEMULT] = '/'; | ||||
|             for (int j = 0; j < NAMEMULT; j++) { | ||||
|                 buffer[j+NAMEMULT+1] = '0'+i; | ||||
|             } | ||||
|             buffer[2*NAMEMULT+1] = '\0'; | ||||
|             lfs_file_open(&lfs, &file, (char*)buffer, | ||||
|                     LFS_O_WRONLY | LFS_O_CREAT) => 0; | ||||
|              | ||||
|             size = NAMEMULT; | ||||
|             for (int j = 0; j < i*FILEMULT; j++) { | ||||
|                 lfs_file_write(&lfs, &file, buffer, size) => size; | ||||
|             } | ||||
|  | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|         for (int i = 1; i < 10; i++) { | ||||
|             for (int j = 0; j < NAMEMULT; j++) { | ||||
|                 buffer[j] = '0'+i; | ||||
|             } | ||||
|             buffer[NAMEMULT] = '\0'; | ||||
|             lfs_stat(&lfs, (char*)buffer, &info) => 0; | ||||
|             info.type => LFS_TYPE_DIR; | ||||
|  | ||||
|             buffer[NAMEMULT] = '/'; | ||||
|             for (int j = 0; j < NAMEMULT; j++) { | ||||
|                 buffer[j+NAMEMULT+1] = '0'+i; | ||||
|             } | ||||
|             buffer[2*NAMEMULT+1] = '\0'; | ||||
|             lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0; | ||||
|              | ||||
|             size = NAMEMULT; | ||||
|             for (int j = 0; j < i*FILEMULT; j++) { | ||||
|                 uint8_t rbuffer[1024]; | ||||
|                 lfs_file_read(&lfs, &file, rbuffer, size) => size; | ||||
|                 memcmp(buffer, rbuffer, size) => 0; | ||||
|             } | ||||
|  | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|     } | ||||
| ''' | ||||
|  | ||||
| [[case]] # single unreadable blocks (can't read) | ||||
| define.LFS_ERASE_CYCLES = 0xffffffff | ||||
| define.LFS_BADBLOCK_BEHAVIOR = 'LFS_TESTBD_BADBLOCK_NOREAD' | ||||
| define.NAMEMULT = 64 | ||||
| define.FILEMULT = 1 | ||||
| code = ''' | ||||
|     for (lfs_block_t badblock = 2; badblock < LFS_BLOCK_COUNT/2; badblock++) { | ||||
|         lfs_testbd_setwear(&cfg, badblock-1, 0) => 0; | ||||
|         lfs_testbd_setwear(&cfg, badblock, 0xffffffff) => 0; | ||||
|          | ||||
|         lfs_format(&lfs, &cfg) => 0; | ||||
|  | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|         for (int i = 1; i < 10; i++) { | ||||
|             for (int j = 0; j < NAMEMULT; j++) { | ||||
|                 buffer[j] = '0'+i; | ||||
|             } | ||||
|             buffer[NAMEMULT] = '\0'; | ||||
|             lfs_mkdir(&lfs, (char*)buffer) => 0; | ||||
|  | ||||
|             buffer[NAMEMULT] = '/'; | ||||
|             for (int j = 0; j < NAMEMULT; j++) { | ||||
|                 buffer[j+NAMEMULT+1] = '0'+i; | ||||
|             } | ||||
|             buffer[2*NAMEMULT+1] = '\0'; | ||||
|             lfs_file_open(&lfs, &file, (char*)buffer, | ||||
|                     LFS_O_WRONLY | LFS_O_CREAT) => 0; | ||||
|              | ||||
|             size = NAMEMULT; | ||||
|             for (int j = 0; j < i*FILEMULT; j++) { | ||||
|                 lfs_file_write(&lfs, &file, buffer, size) => size; | ||||
|             } | ||||
|  | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|         for (int i = 1; i < 10; i++) { | ||||
|             for (int j = 0; j < NAMEMULT; j++) { | ||||
|                 buffer[j] = '0'+i; | ||||
|             } | ||||
|             buffer[NAMEMULT] = '\0'; | ||||
|             lfs_stat(&lfs, (char*)buffer, &info) => 0; | ||||
|             info.type => LFS_TYPE_DIR; | ||||
|  | ||||
|             buffer[NAMEMULT] = '/'; | ||||
|             for (int j = 0; j < NAMEMULT; j++) { | ||||
|                 buffer[j+NAMEMULT+1] = '0'+i; | ||||
|             } | ||||
|             buffer[2*NAMEMULT+1] = '\0'; | ||||
|             lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0; | ||||
|              | ||||
|             size = NAMEMULT; | ||||
|             for (int j = 0; j < i*FILEMULT; j++) { | ||||
|                 uint8_t rbuffer[1024]; | ||||
|                 lfs_file_read(&lfs, &file, rbuffer, size) => size; | ||||
|                 memcmp(buffer, rbuffer, size) => 0; | ||||
|             } | ||||
|  | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|     } | ||||
| ''' | ||||
|  | ||||
| [[case]] # region corruption (causes cascading failures) | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster | ||||
| define.LFS_ERASE_CYCLES = 0xffffffff | ||||
| define.LFS_ERASE_VALUE = [0x00, 0xff, -1] | ||||
| define.LFS_BADBLOCK_BEHAVIOR = [ | ||||
|     'LFS_TESTBD_BADBLOCK_NOPROG', | ||||
|     'LFS_TESTBD_BADBLOCK_NOERASE', | ||||
|     'LFS_TESTBD_BADBLOCK_NOREAD', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASEERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_READERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGNOOP', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASENOOP', | ||||
| ] | ||||
| define.NAMEMULT = 64 | ||||
| define.FILEMULT = 1 | ||||
| @@ -266,11 +149,15 @@ code = ''' | ||||
| ''' | ||||
|  | ||||
| [[case]] # alternating corruption (causes cascading failures) | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster | ||||
| define.LFS_ERASE_CYCLES = 0xffffffff | ||||
| define.LFS_ERASE_VALUE = [0x00, 0xff, -1] | ||||
| define.LFS_BADBLOCK_BEHAVIOR = [ | ||||
|     'LFS_TESTBD_BADBLOCK_NOPROG', | ||||
|     'LFS_TESTBD_BADBLOCK_NOERASE', | ||||
|     'LFS_TESTBD_BADBLOCK_NOREAD', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASEERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_READERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGNOOP', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASENOOP', | ||||
| ] | ||||
| define.NAMEMULT = 64 | ||||
| define.FILEMULT = 1 | ||||
| @@ -337,10 +224,13 @@ code = ''' | ||||
| # other corner cases | ||||
| [[case]] # bad superblocks (corrupt 1 or 0) | ||||
| define.LFS_ERASE_CYCLES = 0xffffffff | ||||
| define.LFS_ERASE_VALUE = [0x00, 0xff, -1] | ||||
| define.LFS_BADBLOCK_BEHAVIOR = [ | ||||
|     'LFS_TESTBD_BADBLOCK_NOPROG', | ||||
|     'LFS_TESTBD_BADBLOCK_NOERASE', | ||||
|     'LFS_TESTBD_BADBLOCK_NOREAD', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASEERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_READERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGNOOP', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASENOOP', | ||||
| ] | ||||
| code = ''' | ||||
|     lfs_testbd_setwear(&cfg, 0, 0xffffffff) => 0; | ||||
|   | ||||
| @@ -155,7 +155,7 @@ code = ''' | ||||
| ''' | ||||
|  | ||||
| [[case]] # reentrant many directory creation/rename/removal | ||||
| define.N = [5, 25] | ||||
| define.N = [5, 11] | ||||
| reentrant = true | ||||
| code = ''' | ||||
|     err = lfs_mount(&lfs, &cfg); | ||||
|   | ||||
| @@ -3,7 +3,7 @@ | ||||
| # still pass with other inline sizes but wouldn't be testing anything. | ||||
|  | ||||
| define.LFS_CACHE_SIZE = 512 | ||||
| if = 'LFS_CACHE_SIZE == 512' | ||||
| if = 'LFS_CACHE_SIZE % LFS_PROG_SIZE == 0 && LFS_CACHE_SIZE == 512' | ||||
|  | ||||
| [[case]] # entry grow test | ||||
| code = ''' | ||||
|   | ||||
							
								
								
									
										288
									
								
								tests/test_evil.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										288
									
								
								tests/test_evil.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,288 @@ | ||||
| # Tests for recovering from conditions which shouldn't normally | ||||
| # happen during normal operation of littlefs | ||||
|  | ||||
| # invalid pointer tests (outside of block_count) | ||||
|  | ||||
| [[case]] # invalid tail-pointer test | ||||
| define.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL'] | ||||
| define.INVALSET = [0x3, 0x1, 0x2] | ||||
| in = "lfs.c" | ||||
| code = ''' | ||||
|     // create littlefs | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|  | ||||
|     // change tail-pointer to invalid pointers | ||||
|     lfs_init(&lfs, &cfg) => 0; | ||||
|     lfs_mdir_t mdir; | ||||
|     lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0; | ||||
|     lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS( | ||||
|             {LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), | ||||
|                 (lfs_block_t[2]){ | ||||
|                     (INVALSET & 0x1) ? 0xcccccccc : 0, | ||||
|                     (INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0; | ||||
|     lfs_deinit(&lfs) => 0; | ||||
|  | ||||
|     // test that mount fails gracefully | ||||
|     lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT; | ||||
| ''' | ||||
|  | ||||
| [[case]] # invalid dir pointer test | ||||
| define.INVALSET = [0x3, 0x1, 0x2] | ||||
| in = "lfs.c" | ||||
| code = ''' | ||||
|     // create littlefs | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     // make a dir | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_mkdir(&lfs, "dir_here") => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // change the dir pointer to be invalid | ||||
|     lfs_init(&lfs, &cfg) => 0; | ||||
|     lfs_mdir_t mdir; | ||||
|     lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0; | ||||
|     // make sure id 1 == our directory | ||||
|     lfs_dir_get(&lfs, &mdir, | ||||
|             LFS_MKTAG(0x700, 0x3ff, 0), | ||||
|             LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("dir_here")), buffer) | ||||
|                 => LFS_MKTAG(LFS_TYPE_DIR, 1, strlen("dir_here")); | ||||
|     assert(memcmp((char*)buffer, "dir_here", strlen("dir_here")) == 0); | ||||
|     // change dir pointer | ||||
|     lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS( | ||||
|             {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, 8), | ||||
|                 (lfs_block_t[2]){ | ||||
|                     (INVALSET & 0x1) ? 0xcccccccc : 0, | ||||
|                     (INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0; | ||||
|     lfs_deinit(&lfs) => 0; | ||||
|  | ||||
|     // test that accessing our bad dir fails, note there's a number | ||||
|     // of ways to access the dir, some can fail, but some don't | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_stat(&lfs, "dir_here", &info) => 0; | ||||
|     assert(strcmp(info.name, "dir_here") == 0); | ||||
|     assert(info.type == LFS_TYPE_DIR); | ||||
|  | ||||
|     lfs_dir_open(&lfs, &dir, "dir_here") => LFS_ERR_CORRUPT; | ||||
|     lfs_stat(&lfs, "dir_here/file_here", &info) => LFS_ERR_CORRUPT; | ||||
|     lfs_dir_open(&lfs, &dir, "dir_here/dir_here") => LFS_ERR_CORRUPT; | ||||
|     lfs_file_open(&lfs, &file, "dir_here/file_here", | ||||
|             LFS_O_RDONLY) => LFS_ERR_CORRUPT; | ||||
|     lfs_file_open(&lfs, &file, "dir_here/file_here", | ||||
|             LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_CORRUPT; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # invalid file pointer test | ||||
| in = "lfs.c" | ||||
| define.SIZE = [10, 1000, 100000] # faked file size | ||||
| code = ''' | ||||
|     // create littlefs | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     // make a file | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "file_here", | ||||
|             LFS_O_WRONLY | LFS_O_CREAT) => 0; | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // change the file pointer to be invalid | ||||
|     lfs_init(&lfs, &cfg) => 0; | ||||
|     lfs_mdir_t mdir; | ||||
|     lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0; | ||||
|     // make sure id 1 == our file | ||||
|     lfs_dir_get(&lfs, &mdir, | ||||
|             LFS_MKTAG(0x700, 0x3ff, 0), | ||||
|             LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer) | ||||
|                 => LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here")); | ||||
|     assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0); | ||||
|     // change file pointer | ||||
|     lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS( | ||||
|             {LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz)), | ||||
|                 &(struct lfs_ctz){0xcccccccc, lfs_tole32(SIZE)}})) => 0; | ||||
|     lfs_deinit(&lfs) => 0; | ||||
|  | ||||
|     // test that accessing our bad file fails, note there's a number | ||||
|     // of ways to access the dir, some can fail, but some don't | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_stat(&lfs, "file_here", &info) => 0; | ||||
|     assert(strcmp(info.name, "file_here") == 0); | ||||
|     assert(info.type == LFS_TYPE_REG); | ||||
|     assert(info.size == SIZE); | ||||
|  | ||||
|     lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0; | ||||
|     lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT; | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|  | ||||
|     // any allocs that traverse CTZ must unfortunately must fail | ||||
|     if (SIZE > 2*LFS_BLOCK_SIZE) { | ||||
|         lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT; | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # invalid pointer in CTZ skip-list test | ||||
| define.SIZE = ['2*LFS_BLOCK_SIZE', '3*LFS_BLOCK_SIZE', '4*LFS_BLOCK_SIZE'] | ||||
| in = "lfs.c" | ||||
| code = ''' | ||||
|     // create littlefs | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     // make a file | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "file_here", | ||||
|             LFS_O_WRONLY | LFS_O_CREAT) => 0; | ||||
|     for (int i = 0; i < SIZE; i++) { | ||||
|         char c = 'c'; | ||||
|         lfs_file_write(&lfs, &file, &c, 1) => 1; | ||||
|     } | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|     // change pointer in CTZ skip-list to be invalid | ||||
|     lfs_init(&lfs, &cfg) => 0; | ||||
|     lfs_mdir_t mdir; | ||||
|     lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0; | ||||
|     // make sure id 1 == our file and get our CTZ structure | ||||
|     lfs_dir_get(&lfs, &mdir, | ||||
|             LFS_MKTAG(0x700, 0x3ff, 0), | ||||
|             LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer) | ||||
|                 => LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here")); | ||||
|     assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0); | ||||
|     struct lfs_ctz ctz; | ||||
|     lfs_dir_get(&lfs, &mdir, | ||||
|             LFS_MKTAG(0x700, 0x3ff, 0), | ||||
|             LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz) | ||||
|                 => LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz)); | ||||
|     lfs_ctz_fromle32(&ctz); | ||||
|     // rewrite block to contain bad pointer | ||||
|     uint8_t bbuffer[LFS_BLOCK_SIZE]; | ||||
|     cfg.read(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0; | ||||
|     uint32_t bad = lfs_tole32(0xcccccccc); | ||||
|     memcpy(&bbuffer[0], &bad, sizeof(bad)); | ||||
|     memcpy(&bbuffer[4], &bad, sizeof(bad)); | ||||
|     cfg.erase(&cfg, ctz.head) => 0; | ||||
|     cfg.prog(&cfg, ctz.head, 0, bbuffer, LFS_BLOCK_SIZE) => 0; | ||||
|     lfs_deinit(&lfs) => 0; | ||||
|  | ||||
|     // test that accessing our bad file fails, note there's a number | ||||
|     // of ways to access the dir, some can fail, but some don't | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_stat(&lfs, "file_here", &info) => 0; | ||||
|     assert(strcmp(info.name, "file_here") == 0); | ||||
|     assert(info.type == LFS_TYPE_REG); | ||||
|     assert(info.size == SIZE); | ||||
|  | ||||
|     lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0; | ||||
|     lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT; | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|  | ||||
|     // any allocs that traverse CTZ must unfortunately must fail | ||||
|     if (SIZE > 2*LFS_BLOCK_SIZE) { | ||||
|         lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT; | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
|  | ||||
| [[case]] # invalid gstate pointer | ||||
| define.INVALSET = [0x3, 0x1, 0x2] | ||||
| in = "lfs.c" | ||||
| code = ''' | ||||
|     // create littlefs | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|  | ||||
|     // create an invalid gstate | ||||
|     lfs_init(&lfs, &cfg) => 0; | ||||
|     lfs_mdir_t mdir; | ||||
|     lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0; | ||||
|     lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){ | ||||
|             (INVALSET & 0x1) ? 0xcccccccc : 0, | ||||
|             (INVALSET & 0x2) ? 0xcccccccc : 0}); | ||||
|     lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0; | ||||
|     lfs_deinit(&lfs) => 0; | ||||
|  | ||||
|     // test that mount fails gracefully | ||||
|     // mount may not fail, but our first alloc should fail when | ||||
|     // we try to fix the gstate | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| # cycle detection/recovery tests | ||||
|  | ||||
| [[case]] # metadata-pair threaded-list loop test | ||||
| in = "lfs.c" | ||||
| code = ''' | ||||
|     // create littlefs | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|  | ||||
|     // change tail-pointer to point to ourself | ||||
|     lfs_init(&lfs, &cfg) => 0; | ||||
|     lfs_mdir_t mdir; | ||||
|     lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0; | ||||
|     lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS( | ||||
|             {LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), | ||||
|                 (lfs_block_t[2]){0, 1}})) => 0; | ||||
|     lfs_deinit(&lfs) => 0; | ||||
|  | ||||
|     // test that mount fails gracefully | ||||
|     lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT; | ||||
| ''' | ||||
|  | ||||
| [[case]] # metadata-pair threaded-list 2-length loop test | ||||
| in = "lfs.c" | ||||
| code = ''' | ||||
|     // create littlefs with child dir | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_mkdir(&lfs, "child") => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // find child | ||||
|     lfs_init(&lfs, &cfg) => 0; | ||||
|     lfs_mdir_t mdir; | ||||
|     lfs_block_t pair[2]; | ||||
|     lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0; | ||||
|     lfs_dir_get(&lfs, &mdir, | ||||
|             LFS_MKTAG(0x7ff, 0x3ff, 0), | ||||
|             LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair) | ||||
|                 => LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)); | ||||
|     lfs_pair_fromle32(pair); | ||||
|     // change tail-pointer to point to root | ||||
|     lfs_dir_fetch(&lfs, &mdir, pair) => 0; | ||||
|     lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS( | ||||
|             {LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), | ||||
|                 (lfs_block_t[2]){0, 1}})) => 0; | ||||
|     lfs_deinit(&lfs) => 0; | ||||
|  | ||||
|     // test that mount fails gracefully | ||||
|     lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT; | ||||
| ''' | ||||
|  | ||||
| [[case]] # metadata-pair threaded-list 1-length child loop test | ||||
| in = "lfs.c" | ||||
| code = ''' | ||||
|     // create littlefs with child dir | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_mkdir(&lfs, "child") => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // find child | ||||
|     lfs_init(&lfs, &cfg) => 0; | ||||
|     lfs_mdir_t mdir; | ||||
|     lfs_block_t pair[2]; | ||||
|     lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0; | ||||
|     lfs_dir_get(&lfs, &mdir, | ||||
|             LFS_MKTAG(0x7ff, 0x3ff, 0), | ||||
|             LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair) | ||||
|                 => LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)); | ||||
|     lfs_pair_fromle32(pair); | ||||
|     // change tail-pointer to point to ourself | ||||
|     lfs_dir_fetch(&lfs, &mdir, pair) => 0; | ||||
|     lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS( | ||||
|             {LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), pair})) => 0; | ||||
|     lfs_deinit(&lfs) => 0; | ||||
|  | ||||
|     // test that mount fails gracefully | ||||
|     lfs_mount(&lfs, &cfg) => LFS_ERR_CORRUPT; | ||||
| ''' | ||||
| @@ -1,11 +1,13 @@ | ||||
| [[case]] # test running a filesystem to exhaustion | ||||
| define.LFS_ERASE_CYCLES = 10 | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so it runs faster | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster | ||||
| define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2' | ||||
| define.LFS_BADBLOCK_BEHAVIOR = [ | ||||
|     'LFS_TESTBD_BADBLOCK_NOPROG', | ||||
|     'LFS_TESTBD_BADBLOCK_NOERASE', | ||||
|     'LFS_TESTBD_BADBLOCK_NOREAD', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASEERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_READERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGNOOP', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASENOOP', | ||||
| ] | ||||
| define.FILES = 10 | ||||
| code = ''' | ||||
| @@ -31,6 +33,9 @@ code = ''' | ||||
|                 lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1); | ||||
|                 assert(res == 1 || res == LFS_ERR_NOSPC); | ||||
|                 if (res == LFS_ERR_NOSPC) { | ||||
|                     err = lfs_file_close(&lfs, &file); | ||||
|                     assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|                     lfs_unmount(&lfs) => 0; | ||||
|                     goto exhausted; | ||||
|                 } | ||||
|             } | ||||
| @@ -38,9 +43,10 @@ code = ''' | ||||
|             err = lfs_file_close(&lfs, &file); | ||||
|             assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|             if (err == LFS_ERR_NOSPC) { | ||||
|                 lfs_unmount(&lfs) => 0; | ||||
|                 goto exhausted; | ||||
|             } | ||||
|         }     | ||||
|         } | ||||
|  | ||||
|         for (uint32_t i = 0; i < FILES; i++) { | ||||
|             // check for errors | ||||
| @@ -57,7 +63,7 @@ code = ''' | ||||
|             } | ||||
|  | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         }     | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|         cycle += 1; | ||||
| @@ -70,7 +76,7 @@ exhausted: | ||||
|         // check for errors | ||||
|         sprintf(path, "roadrunner/test%d", i); | ||||
|         lfs_stat(&lfs, path, &info) => 0; | ||||
|     }     | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     LFS_WARN("completed %d cycles", cycle); | ||||
| @@ -79,12 +85,14 @@ exhausted: | ||||
| [[case]] # test running a filesystem to exhaustion | ||||
|          # which also requires expanding superblocks | ||||
| define.LFS_ERASE_CYCLES = 10 | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so it runs faster | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster | ||||
| define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2' | ||||
| define.LFS_BADBLOCK_BEHAVIOR = [ | ||||
|     'LFS_TESTBD_BADBLOCK_NOPROG', | ||||
|     'LFS_TESTBD_BADBLOCK_NOERASE', | ||||
|     'LFS_TESTBD_BADBLOCK_NOREAD', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASEERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_READERROR', | ||||
|     'LFS_TESTBD_BADBLOCK_PROGNOOP', | ||||
|     'LFS_TESTBD_BADBLOCK_ERASENOOP', | ||||
| ] | ||||
| define.FILES = 10 | ||||
| code = ''' | ||||
| @@ -107,6 +115,9 @@ code = ''' | ||||
|                 lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1); | ||||
|                 assert(res == 1 || res == LFS_ERR_NOSPC); | ||||
|                 if (res == LFS_ERR_NOSPC) { | ||||
|                     err = lfs_file_close(&lfs, &file); | ||||
|                     assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|                     lfs_unmount(&lfs) => 0; | ||||
|                     goto exhausted; | ||||
|                 } | ||||
|             } | ||||
| @@ -114,9 +125,10 @@ code = ''' | ||||
|             err = lfs_file_close(&lfs, &file); | ||||
|             assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|             if (err == LFS_ERR_NOSPC) { | ||||
|                 lfs_unmount(&lfs) => 0; | ||||
|                 goto exhausted; | ||||
|             } | ||||
|         }     | ||||
|         } | ||||
|  | ||||
|         for (uint32_t i = 0; i < FILES; i++) { | ||||
|             // check for errors | ||||
| @@ -133,7 +145,7 @@ code = ''' | ||||
|             } | ||||
|  | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         }     | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|         cycle += 1; | ||||
| @@ -146,7 +158,7 @@ exhausted: | ||||
|         // check for errors | ||||
|         sprintf(path, "test%d", i); | ||||
|         lfs_stat(&lfs, path, &info) => 0; | ||||
|     }     | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     LFS_WARN("completed %d cycles", cycle); | ||||
| @@ -158,21 +170,11 @@ exhausted: | ||||
| # check for. | ||||
|  | ||||
| [[case]] # wear-level test running a filesystem to exhaustion | ||||
| define.LFS_ERASE_CYCLES = 10 | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so it runs faster | ||||
| define.LFS_ERASE_CYCLES = 20 | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster | ||||
| define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2' | ||||
| define.LFS_BADBLOCK_BEHAVIOR = [ | ||||
|     'LFS_TESTBD_BADBLOCK_NOPROG', | ||||
|     'LFS_TESTBD_BADBLOCK_NOERASE', | ||||
|     'LFS_TESTBD_BADBLOCK_NOREAD', | ||||
| ] | ||||
| define.FILES = 10 | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_mkdir(&lfs, "roadrunner") => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     uint32_t run_cycles[2]; | ||||
|     const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT}; | ||||
|  | ||||
| @@ -182,6 +184,11 @@ code = ''' | ||||
|                     (b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0; | ||||
|         } | ||||
|  | ||||
|         lfs_format(&lfs, &cfg) => 0; | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|         lfs_mkdir(&lfs, "roadrunner") => 0; | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|         uint32_t cycle = 0; | ||||
|         while (true) { | ||||
|             lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -199,6 +206,9 @@ code = ''' | ||||
|                     lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1); | ||||
|                     assert(res == 1 || res == LFS_ERR_NOSPC); | ||||
|                     if (res == LFS_ERR_NOSPC) { | ||||
|                         err = lfs_file_close(&lfs, &file); | ||||
|                         assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|                         lfs_unmount(&lfs) => 0; | ||||
|                         goto exhausted; | ||||
|                     } | ||||
|                 } | ||||
| @@ -206,9 +216,10 @@ code = ''' | ||||
|                 err = lfs_file_close(&lfs, &file); | ||||
|                 assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|                 if (err == LFS_ERR_NOSPC) { | ||||
|                     lfs_unmount(&lfs) => 0; | ||||
|                     goto exhausted; | ||||
|                 } | ||||
|             }     | ||||
|             } | ||||
|  | ||||
|             for (uint32_t i = 0; i < FILES; i++) { | ||||
|                 // check for errors | ||||
| @@ -225,7 +236,7 @@ code = ''' | ||||
|                 } | ||||
|  | ||||
|                 lfs_file_close(&lfs, &file) => 0; | ||||
|             }     | ||||
|             } | ||||
|             lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|             cycle += 1; | ||||
| @@ -238,7 +249,7 @@ exhausted: | ||||
|             // check for errors | ||||
|             sprintf(path, "roadrunner/test%d", i); | ||||
|             lfs_stat(&lfs, path, &info) => 0; | ||||
|         }     | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|         run_cycles[run] = cycle; | ||||
| @@ -247,22 +258,15 @@ exhausted: | ||||
|     } | ||||
|  | ||||
|     // check we increased the lifetime by 2x with ~10% error | ||||
|     LFS_ASSERT(run_cycles[1] > 2*run_cycles[0]-run_cycles[0]/10); | ||||
|     LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]); | ||||
| ''' | ||||
|  | ||||
| [[case]] # wear-level test + expanding superblock | ||||
| define.LFS_ERASE_CYCLES = 10 | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so it runs faster | ||||
| define.LFS_ERASE_CYCLES = 20 | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster | ||||
| define.LFS_BLOCK_CYCLES = 'LFS_ERASE_CYCLES / 2' | ||||
| define.LFS_BADBLOCK_BEHAVIOR = [ | ||||
|     'LFS_TESTBD_BADBLOCK_NOPROG', | ||||
|     'LFS_TESTBD_BADBLOCK_NOERASE', | ||||
|     'LFS_TESTBD_BADBLOCK_NOREAD', | ||||
| ] | ||||
| define.FILES = 10 | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|  | ||||
|     uint32_t run_cycles[2]; | ||||
|     const uint32_t run_block_count[2] = {LFS_BLOCK_COUNT/2, LFS_BLOCK_COUNT}; | ||||
|  | ||||
| @@ -272,6 +276,8 @@ code = ''' | ||||
|                     (b < run_block_count[run]) ? 0 : LFS_ERASE_CYCLES) => 0; | ||||
|         } | ||||
|  | ||||
|         lfs_format(&lfs, &cfg) => 0; | ||||
|  | ||||
|         uint32_t cycle = 0; | ||||
|         while (true) { | ||||
|             lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -289,6 +295,9 @@ code = ''' | ||||
|                     lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1); | ||||
|                     assert(res == 1 || res == LFS_ERR_NOSPC); | ||||
|                     if (res == LFS_ERR_NOSPC) { | ||||
|                         err = lfs_file_close(&lfs, &file); | ||||
|                         assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|                         lfs_unmount(&lfs) => 0; | ||||
|                         goto exhausted; | ||||
|                     } | ||||
|                 } | ||||
| @@ -296,9 +305,10 @@ code = ''' | ||||
|                 err = lfs_file_close(&lfs, &file); | ||||
|                 assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|                 if (err == LFS_ERR_NOSPC) { | ||||
|                     lfs_unmount(&lfs) => 0; | ||||
|                     goto exhausted; | ||||
|                 } | ||||
|             }     | ||||
|             } | ||||
|  | ||||
|             for (uint32_t i = 0; i < FILES; i++) { | ||||
|                 // check for errors | ||||
| @@ -315,7 +325,7 @@ code = ''' | ||||
|                 } | ||||
|  | ||||
|                 lfs_file_close(&lfs, &file) => 0; | ||||
|             }     | ||||
|             } | ||||
|             lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|             cycle += 1; | ||||
| @@ -328,7 +338,7 @@ exhausted: | ||||
|             // check for errors | ||||
|             sprintf(path, "test%d", i); | ||||
|             lfs_stat(&lfs, path, &info) => 0; | ||||
|         }     | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|         run_cycles[run] = cycle; | ||||
| @@ -337,5 +347,119 @@ exhausted: | ||||
|     } | ||||
|  | ||||
|     // check we increased the lifetime by 2x with ~10% error | ||||
|     LFS_ASSERT(run_cycles[1] > 2*run_cycles[0]-run_cycles[0]/10); | ||||
|     LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]); | ||||
| ''' | ||||
|  | ||||
| [[case]] # test that we wear blocks roughly evenly | ||||
| define.LFS_ERASE_CYCLES = 0xffffffff | ||||
| define.LFS_BLOCK_COUNT = 256 # small bd so test runs faster | ||||
| define.LFS_BLOCK_CYCLES = [5, 4, 3, 2, 1] | ||||
| define.CYCLES = 100 | ||||
| define.FILES = 10 | ||||
| if = 'LFS_BLOCK_CYCLES < CYCLES/10' | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_mkdir(&lfs, "roadrunner") => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     uint32_t cycle = 0; | ||||
|     while (cycle < CYCLES) { | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|         for (uint32_t i = 0; i < FILES; i++) { | ||||
|             // chose name, roughly random seed, and random 2^n size | ||||
|             sprintf(path, "roadrunner/test%d", i); | ||||
|             srand(cycle * i); | ||||
|             size = 1 << 4; //((rand() % 10)+2); | ||||
|  | ||||
|             lfs_file_open(&lfs, &file, path, | ||||
|                     LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0; | ||||
|  | ||||
|             for (lfs_size_t j = 0; j < size; j++) { | ||||
|                 char c = 'a' + (rand() % 26); | ||||
|                 lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1); | ||||
|                 assert(res == 1 || res == LFS_ERR_NOSPC); | ||||
|                 if (res == LFS_ERR_NOSPC) { | ||||
|                     err = lfs_file_close(&lfs, &file); | ||||
|                     assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|                     lfs_unmount(&lfs) => 0; | ||||
|                     goto exhausted; | ||||
|                 } | ||||
|             } | ||||
|  | ||||
|             err = lfs_file_close(&lfs, &file); | ||||
|             assert(err == 0 || err == LFS_ERR_NOSPC); | ||||
|             if (err == LFS_ERR_NOSPC) { | ||||
|                 lfs_unmount(&lfs) => 0; | ||||
|                 goto exhausted; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         for (uint32_t i = 0; i < FILES; i++) { | ||||
|             // check for errors | ||||
|             sprintf(path, "roadrunner/test%d", i); | ||||
|             srand(cycle * i); | ||||
|             size = 1 << 4; //((rand() % 10)+2); | ||||
|  | ||||
|             lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0; | ||||
|             for (lfs_size_t j = 0; j < size; j++) { | ||||
|                 char c = 'a' + (rand() % 26); | ||||
|                 char r; | ||||
|                 lfs_file_read(&lfs, &file, &r, 1) => 1; | ||||
|                 assert(r == c); | ||||
|             } | ||||
|  | ||||
|             lfs_file_close(&lfs, &file) => 0; | ||||
|         } | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|         cycle += 1; | ||||
|     } | ||||
|  | ||||
| exhausted: | ||||
|     // should still be readable | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     for (uint32_t i = 0; i < FILES; i++) { | ||||
|         // check for errors | ||||
|         sprintf(path, "roadrunner/test%d", i); | ||||
|         lfs_stat(&lfs, path, &info) => 0; | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     LFS_WARN("completed %d cycles", cycle); | ||||
|  | ||||
|     // check the wear on our block device | ||||
|     lfs_testbd_wear_t minwear = -1; | ||||
|     lfs_testbd_wear_t totalwear = 0; | ||||
|     lfs_testbd_wear_t maxwear = 0; | ||||
|     // skip 0 and 1 as superblock movement is intentionally avoided | ||||
|     for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) { | ||||
|         lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b); | ||||
|         printf("%08x: wear %d\n", b, wear); | ||||
|         assert(wear >= 0); | ||||
|         if (wear < minwear) { | ||||
|             minwear = wear; | ||||
|         } | ||||
|         if (wear > maxwear) { | ||||
|             maxwear = wear; | ||||
|         } | ||||
|         totalwear += wear; | ||||
|     } | ||||
|     lfs_testbd_wear_t avgwear = totalwear / LFS_BLOCK_COUNT; | ||||
|     LFS_WARN("max wear: %d cycles", maxwear); | ||||
|     LFS_WARN("avg wear: %d cycles", totalwear / LFS_BLOCK_COUNT); | ||||
|     LFS_WARN("min wear: %d cycles", minwear); | ||||
|  | ||||
|     // find standard deviation^2 | ||||
|     lfs_testbd_wear_t dev2 = 0; | ||||
|     for (lfs_block_t b = 2; b < LFS_BLOCK_COUNT; b++) { | ||||
|         lfs_testbd_wear_t wear = lfs_testbd_getwear(&cfg, b); | ||||
|         assert(wear >= 0); | ||||
|         lfs_testbd_swear_t diff = wear - avgwear; | ||||
|         dev2 += diff*diff; | ||||
|     } | ||||
|     dev2 /= totalwear; | ||||
|     LFS_WARN("std dev^2: %d", dev2); | ||||
|     assert(dev2 < 8); | ||||
| ''' | ||||
|  | ||||
|   | ||||
| @@ -148,6 +148,7 @@ code = ''' | ||||
|  | ||||
| [[case]] # move file corrupt source and dest | ||||
| in = "lfs.c" | ||||
| if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -239,6 +240,7 @@ code = ''' | ||||
|  | ||||
| [[case]] # move file after corrupt | ||||
| in = "lfs.c" | ||||
| if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -593,6 +595,7 @@ code = ''' | ||||
|  | ||||
| [[case]] # move dir corrupt source and dest | ||||
| in = "lfs.c" | ||||
| if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -692,6 +695,7 @@ code = ''' | ||||
|  | ||||
| [[case]] # move dir after corrupt | ||||
| in = "lfs.c" | ||||
| if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|   | ||||
| @@ -1,5 +1,6 @@ | ||||
| [[case]] # orphan test | ||||
| in = "lfs.c" | ||||
| if = 'LFS_PROG_SIZE <= 0x3fe' # only works with one crc per commit | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
| @@ -57,10 +58,12 @@ code = ''' | ||||
|  | ||||
| [[case]] # reentrant testing for orphans, basically just spam mkdir/remove | ||||
| reentrant = true | ||||
| # TODO fix this case, caused by non-DAG trees | ||||
| if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' | ||||
| define = [ | ||||
|     {FILES=6,  DEPTH=1, CYCLES=50}, | ||||
|     {FILES=26, DEPTH=1, CYCLES=50}, | ||||
|     {FILES=3,  DEPTH=3, CYCLES=50}, | ||||
|     {FILES=6,  DEPTH=1, CYCLES=20}, | ||||
|     {FILES=26, DEPTH=1, CYCLES=20}, | ||||
|     {FILES=3,  DEPTH=3, CYCLES=20}, | ||||
| ] | ||||
| code = ''' | ||||
|     err = lfs_mount(&lfs, &cfg); | ||||
|   | ||||
| @@ -247,14 +247,14 @@ code = ''' | ||||
|     lfs_mkdir(&lfs, "coffee/coldcoffee") => 0; | ||||
|  | ||||
|     memset(path, 'w', LFS_NAME_MAX+1); | ||||
|     path[LFS_NAME_MAX+2] = '\0'; | ||||
|     path[LFS_NAME_MAX+1] = '\0'; | ||||
|     lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG; | ||||
|     lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT) | ||||
|             => LFS_ERR_NAMETOOLONG; | ||||
|  | ||||
|     memcpy(path, "coffee/", strlen("coffee/")); | ||||
|     memset(path+strlen("coffee/"), 'w', LFS_NAME_MAX+1); | ||||
|     path[strlen("coffee/")+LFS_NAME_MAX+2] = '\0'; | ||||
|     path[strlen("coffee/")+LFS_NAME_MAX+1] = '\0'; | ||||
|     lfs_mkdir(&lfs, path) => LFS_ERR_NAMETOOLONG; | ||||
|     lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT) | ||||
|             => LFS_ERR_NAMETOOLONG; | ||||
| @@ -270,7 +270,6 @@ code = ''' | ||||
|     lfs_mkdir(&lfs, "coffee/warmcoffee") => 0; | ||||
|     lfs_mkdir(&lfs, "coffee/coldcoffee") => 0; | ||||
|  | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     memset(path, 'w', LFS_NAME_MAX); | ||||
|     path[LFS_NAME_MAX] = '\0'; | ||||
|     lfs_mkdir(&lfs, path) => 0; | ||||
|   | ||||
| @@ -147,10 +147,12 @@ code = ''' | ||||
|          # orphan testing, except here we also set block_cycles so that | ||||
|          # almost every tree operation needs a relocation | ||||
| reentrant = true | ||||
| # TODO fix this case, caused by non-DAG trees | ||||
| if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' | ||||
| define = [ | ||||
|     {FILES=6,  DEPTH=1, CYCLES=50, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=26, DEPTH=1, CYCLES=50, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=3,  DEPTH=3, CYCLES=50, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=6,  DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=3,  DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
| ] | ||||
| code = ''' | ||||
|     err = lfs_mount(&lfs, &cfg); | ||||
| @@ -207,10 +209,12 @@ code = ''' | ||||
|  | ||||
| [[case]] # reentrant testing for relocations, but now with random renames! | ||||
| reentrant = true | ||||
| # TODO fix this case, caused by non-DAG trees | ||||
| if = '!(DEPTH == 3 && LFS_CACHE_SIZE != 64)' | ||||
| define = [ | ||||
|     {FILES=6,  DEPTH=1, CYCLES=50, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=26, DEPTH=1, CYCLES=50, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=3,  DEPTH=3, CYCLES=50, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=6,  DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=26, DEPTH=1, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
|     {FILES=3,  DEPTH=3, CYCLES=20, LFS_BLOCK_CYCLES=1}, | ||||
| ] | ||||
| code = ''' | ||||
|     err = lfs_mount(&lfs, &cfg); | ||||
|   | ||||
| @@ -27,41 +27,55 @@ code = ''' | ||||
| ''' | ||||
| 
 | ||||
| [[case]] # expanding superblock | ||||
| define.BLOCK_CYCLES = [32, 33, 1] | ||||
| define.LFS_BLOCK_CYCLES = [32, 33, 1] | ||||
| define.N = [10, 100, 1000] | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     for (int i = 0; i < N; i++) { | ||||
|         lfs_mkdir(&lfs, "dummy") => 0; | ||||
|         lfs_file_open(&lfs, &file, "dummy", | ||||
|                 LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; | ||||
|         lfs_file_close(&lfs, &file) => 0; | ||||
|         lfs_stat(&lfs, "dummy", &info) => 0; | ||||
|         assert(strcmp(info.name, "dummy") == 0); | ||||
|         assert(info.type == LFS_TYPE_REG); | ||||
|         lfs_remove(&lfs, "dummy") => 0; | ||||
|     } | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| 
 | ||||
|     // one last check after power-cycle | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_mkdir(&lfs, "dummy") => 0; | ||||
|     lfs_file_open(&lfs, &file, "dummy", | ||||
|             LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_stat(&lfs, "dummy", &info) => 0; | ||||
|     assert(strcmp(info.name, "dummy") == 0); | ||||
|     assert(info.type == LFS_TYPE_REG); | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
| 
 | ||||
| [[case]] # expanding superblock with power cycle | ||||
| define.BLOCK_CYCLES = [32, 33, 1] | ||||
| define.LFS_BLOCK_CYCLES = [32, 33, 1] | ||||
| define.N = [10, 100, 1000] | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     for (int i = 0; i < N; i++) { | ||||
|         lfs_mount(&lfs, &cfg) => 0; | ||||
|         // remove lingering dummy? | ||||
|         err = lfs_remove(&lfs, "dummy"); | ||||
|         err = lfs_stat(&lfs, "dummy", &info); | ||||
|         assert(err == 0 || (err == LFS_ERR_NOENT && i == 0)); | ||||
|          | ||||
|         lfs_mkdir(&lfs, "dummy") => 0; | ||||
|         if (!err) { | ||||
|             assert(strcmp(info.name, "dummy") == 0); | ||||
|             assert(info.type == LFS_TYPE_REG); | ||||
|             lfs_remove(&lfs, "dummy") => 0; | ||||
|         } | ||||
| 
 | ||||
|         lfs_file_open(&lfs, &file, "dummy", | ||||
|                 LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; | ||||
|         lfs_file_close(&lfs, &file) => 0; | ||||
|         lfs_stat(&lfs, "dummy", &info) => 0; | ||||
|         assert(strcmp(info.name, "dummy") == 0); | ||||
|         assert(info.type == LFS_TYPE_REG); | ||||
|         lfs_unmount(&lfs) => 0; | ||||
|     } | ||||
| 
 | ||||
| @@ -69,11 +83,12 @@ code = ''' | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_stat(&lfs, "dummy", &info) => 0; | ||||
|     assert(strcmp(info.name, "dummy") == 0); | ||||
|     assert(info.type == LFS_TYPE_REG); | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
| 
 | ||||
| [[case]] # reentrant expanding superblock | ||||
| define.BLOCK_CYCLES = [2, 1] | ||||
| define.LFS_BLOCK_CYCLES = [2, 1] | ||||
| define.N = 24 | ||||
| reentrant = true | ||||
| code = ''' | ||||
| @@ -85,12 +100,20 @@ code = ''' | ||||
| 
 | ||||
|     for (int i = 0; i < N; i++) { | ||||
|         // remove lingering dummy? | ||||
|         err = lfs_remove(&lfs, "dummy"); | ||||
|         err = lfs_stat(&lfs, "dummy", &info); | ||||
|         assert(err == 0 || (err == LFS_ERR_NOENT && i == 0)); | ||||
|          | ||||
|         lfs_mkdir(&lfs, "dummy") => 0; | ||||
|         if (!err) { | ||||
|             assert(strcmp(info.name, "dummy") == 0); | ||||
|             assert(info.type == LFS_TYPE_REG); | ||||
|             lfs_remove(&lfs, "dummy") => 0; | ||||
|         } | ||||
| 
 | ||||
|         lfs_file_open(&lfs, &file, "dummy", | ||||
|                 LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; | ||||
|         lfs_file_close(&lfs, &file) => 0; | ||||
|         lfs_stat(&lfs, "dummy", &info) => 0; | ||||
|         assert(strcmp(info.name, "dummy") == 0); | ||||
|         assert(info.type == LFS_TYPE_REG); | ||||
|     } | ||||
| 
 | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| @@ -99,5 +122,6 @@ code = ''' | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_stat(&lfs, "dummy", &info) => 0; | ||||
|     assert(strcmp(info.name, "dummy") == 0); | ||||
|     assert(info.type == LFS_TYPE_REG); | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
| @@ -100,7 +100,7 @@ code = ''' | ||||
|     lfs_file_open(&lfs, &file, "sequence", | ||||
|             LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0; | ||||
|  | ||||
|     size = lfs.cfg->cache_size; | ||||
|     size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2); | ||||
|     lfs_size_t qsize = size / 4; | ||||
|     uint8_t *wb = buffer; | ||||
|     uint8_t *rb = buffer + size; | ||||
| @@ -392,3 +392,48 @@ code = ''' | ||||
|  | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # noop truncate | ||||
| define.MEDIUMSIZE = [32, 2048] | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "baldynoop", | ||||
|             LFS_O_RDWR | LFS_O_CREAT) => 0; | ||||
|  | ||||
|     strcpy((char*)buffer, "hair"); | ||||
|     size = strlen((char*)buffer); | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_write(&lfs, &file, buffer, size) => size; | ||||
|  | ||||
|         // this truncate should do nothing | ||||
|         lfs_file_truncate(&lfs, &file, j+size) => 0; | ||||
|     } | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|  | ||||
|     lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0; | ||||
|     // should do nothing again | ||||
|     lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0; | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|  | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_read(&lfs, &file, buffer, size) => size; | ||||
|         memcmp(buffer, "hair", size) => 0; | ||||
|     } | ||||
|     lfs_file_read(&lfs, &file, buffer, size) => 0; | ||||
|  | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // still there after reboot? | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0; | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_read(&lfs, &file, buffer, size) => size; | ||||
|         memcmp(buffer, "hair", size) => 0; | ||||
|     } | ||||
|     lfs_file_read(&lfs, &file, buffer, size) => 0; | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|   | ||||
		Reference in New Issue
	
	Block a user