mirror of
				https://github.com/eledio-devices/thirdparty-littlefs.git
				synced 2025-10-31 16:14:16 +01:00 
			
		
		
		
	Compare commits
	
		
			135 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 40dba4a556 | ||
|  | 148e312ea3 | ||
|  | abbfe8e92e | ||
|  | c60c977c25 | ||
|  | 3ce64d1ac0 | ||
|  | 0ced3623d4 | ||
|  | 5451a6d503 | ||
|  | 1e038c81fc | ||
|  | f28ac3ea7d | ||
|  | a94fbda1cd | ||
|  | cc025653ed | ||
|  | bfb9bd2483 | ||
|  | f40b854ab5 | ||
|  | c2fa1bb7df | ||
|  | 3b62ec1c47 | ||
|  | b898977fd8 | ||
|  | cf274e6ec6 | ||
|  | 425dc810a5 | ||
|  | a6f01b7d6e | ||
|  | 9c7e232086 | ||
|  | c676bcee4c | ||
|  | 03f088b92c | ||
|  | e955b9f65d | ||
|  | 99f58139cb | ||
|  | 5801169348 | ||
|  | 2d6f4ead13 | ||
|  | 3d1b89b41a | ||
|  | 45cefb825d | ||
|  | bbb9e3873e | ||
|  | c6d3c48939 | ||
|  | 2db5dc80c2 | ||
|  | 1363c9f9d4 | ||
|  | 5bc682a0d4 | ||
|  | 8109f28266 | ||
|  | fedf646c79 | ||
|  | 84da4c0b1a | ||
|  | 554e4b1444 | ||
|  | fe8f3d4f18 | ||
|  | 316b019f41 | ||
|  | 8475c8064d | ||
|  | 563af5f364 | ||
|  | 3b495bab79 | ||
|  | e4adefd1d7 | ||
|  | 9d54603ce2 | ||
|  | 7ea2b515aa | ||
|  | 55b3c538d5 | ||
|  | eb8be9f351 | ||
|  | 50ad2adc96 | ||
|  | 0a2ff3b6ff | ||
|  | d7582efec8 | ||
|  | f4c7af76f8 | ||
|  | 20c58dcbaa | ||
|  | f5286abe7a | ||
|  | 2cdabe810d | ||
|  | b045436c23 | ||
|  | 1877c40aac | ||
|  | e29e7aeefa | ||
|  | e334983767 | ||
|  | 4977fa0c0e | ||
|  | fdda3b4aa2 | ||
|  | 487df12dde | ||
|  | 3efb8e44f3 | ||
|  | fb2c311bb4 | ||
|  | ead50807f1 | ||
|  | 2f7596811d | ||
|  | 1e423bae58 | ||
|  | 3bee4d9a19 | ||
|  | 1863dc7883 | ||
|  | 3d4e4f2085 | ||
|  | a2c744c8f8 | ||
|  | c0cc0a417e | ||
|  | bca64d76cf | ||
|  | cab1d6cca6 | ||
|  | c9eed1f181 | ||
|  | e7e4b352bd | ||
|  | 9449ef4be4 | ||
|  | cfe779fc08 | ||
|  | 0db6466984 | ||
|  | 21488d9e06 | ||
|  | 10a08833c6 | ||
|  | 47d6b2fcf3 | ||
|  | 745d98cde0 | ||
|  | 3216b07c3b | ||
|  | 6592719d28 | ||
|  | c9110617b3 | ||
|  | 104d65113d | ||
|  | 6d3e4ac33e | ||
|  | 9d6546071b | ||
|  | b84fb6bcc5 | ||
|  | 887f3660ed | ||
|  | eeeceb9e30 | ||
|  | b2235e956d | ||
|  | 6bb4043154 | ||
|  | 2b804537b0 | ||
|  | d804c2d3b7 | ||
|  | 37f4de2976 | ||
|  | 6b16dafb4d | ||
|  | 1a59954ec6 | ||
|  | 6a7012774d | ||
|  | 288a5cbc8d | ||
|  | 5783eea0de | ||
|  | 2bb523421e | ||
|  | 7388b2938a | ||
|  | ce425a56c3 | ||
|  | a99a93fb27 | ||
|  | 45afded784 | ||
|  | 00a9ba7826 | ||
|  | fc6988c7c3 | ||
|  | d0f055d321 | ||
|  | b9fa33f9bc | ||
|  | 2efebf8e9b | ||
|  | 754b4c3cda | ||
|  | 584eb26efc | ||
|  | 008ebc37df | ||
|  | 66272067ab | ||
|  | e273a82679 | ||
|  | 1dc6ae94b9 | ||
|  | 817ef02d24 | ||
|  | b8dcf10974 | ||
|  | 0aba71d0d6 | ||
|  | 0ea2871e24 | ||
|  | d04c1392c0 | ||
|  | f215027fd4 | ||
|  | 1ae4b36f2a | ||
|  | 480cdd9f81 | ||
|  | 6303558aee | ||
|  | 4bd653dd00 | ||
|  | 8e6826c4e2 | ||
|  | 10ac6b9cf0 | ||
|  | 87a2cb0e41 | ||
|  | 6d0ec5e851 | ||
|  | 4c9146ea53 | ||
|  | 5a9f38df01 | ||
|  | 1b033e9ab6 | ||
|  | 64f70f51b0 | 
							
								
								
									
										26
									
								
								.github/workflows/post-release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								.github/workflows/post-release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| name: post-release | ||||
| on: | ||||
|   release: | ||||
|     branches: [master] | ||||
|     types: [released] | ||||
|  | ||||
| jobs: | ||||
|   post-release: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     steps: | ||||
|       # trigger post-release in dependency repo, this indirection allows the | ||||
|       # dependency repo to be updated often without affecting this repo. At | ||||
|       # the time of this comment, the dependency repo is responsible for | ||||
|       # creating PRs for other dependent repos post-release. | ||||
|       - name: trigger-post-release | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|             "$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \ | ||||
|             -d "$(jq -n '{ | ||||
|               event_type: "post-release", | ||||
|               client_payload: { | ||||
|                 repo: env.GITHUB_REPOSITORY, | ||||
|                 version: "${{github.event.release.tag_name}}"}}' \ | ||||
|               | tee /dev/stderr)" | ||||
|  | ||||
							
								
								
									
										196
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										196
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,196 @@ | ||||
| name: release | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: [test] | ||||
|     branches: [master] | ||||
|     types: [completed] | ||||
|  | ||||
| jobs: | ||||
|   release: | ||||
|     runs-on: ubuntu-20.04 | ||||
|  | ||||
|     # need to manually check for a couple things | ||||
|     # - tests passed? | ||||
|     # - we are the most recent commit on master? | ||||
|     if: ${{github.event.workflow_run.conclusion == 'success' && | ||||
|       github.event.workflow_run.head_sha == github.sha}} | ||||
|  | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           ref: ${{github.event.workflow_run.head_sha}} | ||||
|           # need workflow access since we push branches | ||||
|           # containing workflows | ||||
|           token: ${{secrets.BOT_TOKEN}} | ||||
|           # need all tags | ||||
|           fetch-depth: 0 | ||||
|  | ||||
|       # try to get results from tests | ||||
|       - uses: dawidd6/action-download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           workflow: ${{github.event.workflow_run.name}} | ||||
|           run_id: ${{github.event.workflow_run.id}} | ||||
|           name: results | ||||
|           path: results | ||||
|  | ||||
|       - name: find-version | ||||
|         run: | | ||||
|           # rip version from lfs.h | ||||
|           LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \ | ||||
|             | awk '{print $3}')" | ||||
|           LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))" | ||||
|           LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >>  0)))" | ||||
|  | ||||
|           # find a new patch version based on what we find in our tags | ||||
|           LFS_VERSION_PATCH="$( \ | ||||
|             ( git describe --tags --abbrev=0 \ | ||||
|                 --match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \ | ||||
|               || echo 'v0.0.-1' ) \ | ||||
|             | awk -F '.' '{print $3+1}')" | ||||
|  | ||||
|           # found new version | ||||
|           LFS_VERSION="v$LFS_VERSION_MAJOR` | ||||
|             `.$LFS_VERSION_MINOR` | ||||
|             `.$LFS_VERSION_PATCH" | ||||
|           echo "LFS_VERSION=$LFS_VERSION" | ||||
|           echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV | ||||
|           echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV | ||||
|  | ||||
|       # try to find previous version? | ||||
|       - name: find-prev-version | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')" | ||||
|           echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" | ||||
|           echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV | ||||
|  | ||||
|       # try to find results from tests | ||||
|       - name: collect-results | ||||
|         run: | | ||||
|           # previous results to compare against? | ||||
|           [ -n "$LFS_PREV_VERSION" ] && curl -sS \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/` | ||||
|               `status/$LFS_PREV_VERSION?per_page=100" \ | ||||
|             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \ | ||||
|             >> prev-results.json \ | ||||
|             || true | ||||
|  | ||||
|           # build table for GitHub | ||||
|           echo "<table>" >> results.txt | ||||
|           echo "<thead>" >> results.txt | ||||
|           echo "<tr>" >> results.txt | ||||
|           echo "<th align=left>Configuration</th>" >> results.txt | ||||
|           for r in Code Stack Structs Coverage | ||||
|           do | ||||
|             echo "<th align=right>$r</th>" >> results.txt | ||||
|           done | ||||
|           echo "</tr>" >> results.txt | ||||
|           echo "</thead>" >> results.txt | ||||
|  | ||||
|           echo "<tbody>" >> results.txt | ||||
|           for c in "" readonly threadsafe migrate error-asserts | ||||
|           do | ||||
|             echo "<tr>" >> results.txt | ||||
|             c_or_default=${c:-default} | ||||
|             echo "<td align=left>${c_or_default^}</td>" >> results.txt | ||||
|             for r in code stack structs | ||||
|             do | ||||
|               # per-config results | ||||
|               echo "<td align=right>" >> results.txt | ||||
|               [ -e results/thumb${c:+-$c}.csv ] && ( \ | ||||
|                 export PREV="$(jq -re ' | ||||
|                       select(.context == "'"results (thumb${c:+, $c}) / $r"'").description | ||||
|                       | capture("(?<result>[0-9∞]+)").result' \ | ||||
|                     prev-results.json || echo 0)" | ||||
|                 ./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk ' | ||||
|                   NR==2 {printf "%s B",$2} | ||||
|                   NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                     printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} | ||||
|                   NR==2 {printf "\n"}' \ | ||||
|                 | sed -e 's/ /\ /g' \ | ||||
|                 >> results.txt) | ||||
|               echo "</td>" >> results.txt | ||||
|             done | ||||
|             # coverage results | ||||
|             if [ -z $c ] | ||||
|             then | ||||
|               echo "<td rowspan=0 align=right>" >> results.txt | ||||
|               [ -e results/coverage.csv ] && ( \ | ||||
|                 export PREV="$(jq -re ' | ||||
|                       select(.context == "results / coverage").description | ||||
|                       | capture("(?<result>[0-9\\.]+)").result' \ | ||||
|                     prev-results.json || echo 0)" | ||||
|                 ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' ' | ||||
|                   NR==2 {printf "%.1f%% of %d lines",$4,$3} | ||||
|                   NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                     printf " (%+.1f%%)",$4-ENVIRON["PREV"]} | ||||
|                   NR==2 {printf "\n"}' \ | ||||
|                 | sed -e 's/ /\ /g' \ | ||||
|                 >> results.txt) | ||||
|               echo "</td>" >> results.txt | ||||
|             fi | ||||
|             echo "</tr>" >> results.txt | ||||
|           done | ||||
|           echo "</tbody>" >> results.txt | ||||
|           echo "</table>" >> results.txt | ||||
|  | ||||
|           cat results.txt | ||||
|  | ||||
|       # find changes from history | ||||
|       - name: collect-changes | ||||
|         run: | | ||||
|           [ -n "$LFS_PREV_VERSION" ] || exit 0 | ||||
|           # use explicit link to github commit so that release notes can | ||||
|           # be copied elsewhere | ||||
|           git log "$LFS_PREV_VERSION.." \ | ||||
|             --grep='^Merge' --invert-grep \ | ||||
|             --format="format:[\`%h\`](` | ||||
|               `https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \ | ||||
|             > changes.txt | ||||
|           echo "CHANGES:" | ||||
|           cat changes.txt | ||||
|  | ||||
|       # create and update major branches (vN and vN-prefix) | ||||
|       - name: create-major-branches | ||||
|         run: | | ||||
|           # create major branch | ||||
|           git branch "v$LFS_VERSION_MAJOR" HEAD | ||||
|  | ||||
|           # create major prefix branch | ||||
|           git config user.name ${{secrets.BOT_USER}} | ||||
|           git config user.email ${{secrets.BOT_EMAIL}} | ||||
|           git fetch "https://github.com/$GITHUB_REPOSITORY.git" \ | ||||
|             "v$LFS_VERSION_MAJOR-prefix" || true | ||||
|           ./scripts/prefix.py "lfs$LFS_VERSION_MAJOR" | ||||
|           git branch "v$LFS_VERSION_MAJOR-prefix" $( \ | ||||
|             git commit-tree $(git write-tree) \ | ||||
|               $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \ | ||||
|               -p HEAD \ | ||||
|               -m "Generated v$LFS_VERSION_MAJOR prefixes") | ||||
|           git reset --hard | ||||
|  | ||||
|           # push! | ||||
|           git push --atomic origin \ | ||||
|             "v$LFS_VERSION_MAJOR" \ | ||||
|             "v$LFS_VERSION_MAJOR-prefix" | ||||
|  | ||||
|       # build release notes | ||||
|       - name: create-release | ||||
|         run: | | ||||
|           # create release and patch version tag (vN.N.N) | ||||
|           # only draft if not a patch release | ||||
|           [ -e results.txt ] && export RESULTS="$(cat results.txt)" | ||||
|           [ -e changes.txt ] && export CHANGES="$(cat changes.txt)" | ||||
|           curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \ | ||||
|             -d "$(jq -n '{ | ||||
|               tag_name: env.LFS_VERSION, | ||||
|               name: env.LFS_VERSION | rtrimstr(".0"), | ||||
|               target_commitish: "${{github.event.workflow_run.head_sha}}", | ||||
|               draft: env.LFS_VERSION | endswith(".0"), | ||||
|               body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \ | ||||
|               | tee /dev/stderr)" | ||||
|  | ||||
							
								
								
									
										55
									
								
								.github/workflows/status.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								.github/workflows/status.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,55 @@ | ||||
| name: status | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: [test] | ||||
|     types: [completed] | ||||
|  | ||||
| jobs: | ||||
|   status: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     steps: | ||||
|       # custom statuses? | ||||
|       - uses: dawidd6/action-download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           workflow: ${{github.event.workflow_run.name}} | ||||
|           run_id: ${{github.event.workflow_run.id}} | ||||
|           name: status | ||||
|           path: status | ||||
|       - name: update-status | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           ls status | ||||
|           for s in $(shopt -s nullglob ; echo status/*.json) | ||||
|           do | ||||
|             # parse requested status | ||||
|             export STATE="$(jq -er '.state' $s)" | ||||
|             export CONTEXT="$(jq -er '.context' $s)" | ||||
|             export DESCRIPTION="$(jq -er '.description' $s)" | ||||
|             # help lookup URL for job/steps because GitHub makes | ||||
|             # it VERY HARD to link to specific jobs | ||||
|             export TARGET_URL="$( | ||||
|               jq -er '.target_url // empty' $s || ( | ||||
|                 export TARGET_JOB="$(jq -er '.target_job' $s)" | ||||
|                 export TARGET_STEP="$(jq -er '.target_step // ""' $s)" | ||||
|                 curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|                   "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/` | ||||
|                     `${{github.event.workflow_run.id}}/jobs" \ | ||||
|                   | jq -er '.jobs[] | ||||
|                     | select(.name == env.TARGET_JOB) | ||||
|                     | .html_url | ||||
|                       + "?check_suite_focus=true" | ||||
|                       + ((.steps[] | ||||
|                         | select(.name == env.TARGET_STEP) | ||||
|                         | "#step:\(.number):0") // "")'))" | ||||
|             # update status | ||||
|             curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ | ||||
|               "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` | ||||
|                 `${{github.event.workflow_run.head_sha}}" \ | ||||
|               -d "$(jq -n '{ | ||||
|                 state: env.STATE, | ||||
|                 context: env.CONTEXT, | ||||
|                 description: env.DESCRIPTION, | ||||
|                 target_url: env.TARGET_URL}' \ | ||||
|                 | tee /dev/stderr)" | ||||
|           done | ||||
							
								
								
									
										472
									
								
								.github/workflows/test.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										472
									
								
								.github/workflows/test.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,472 @@ | ||||
| name: test | ||||
| on: [push, pull_request] | ||||
|  | ||||
| env: | ||||
|   CFLAGS: -Werror | ||||
|   MAKEFLAGS: -j | ||||
|  | ||||
| jobs: | ||||
|   # run tests | ||||
|   test: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         arch: [x86_64, thumb, mips, powerpc] | ||||
|  | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need a few additional tools | ||||
|           # | ||||
|           # note this includes gcc-10, which is required for -fcallgraph-info=su | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq gcc-10 python3 python3-pip lcov | ||||
|           sudo pip3 install toml | ||||
|           echo "CC=gcc-10" >> $GITHUB_ENV | ||||
|           gcc-10 --version | ||||
|           lcov --version | ||||
|           python3 --version | ||||
|  | ||||
|           # need newer lcov version for gcc-10 | ||||
|           #sudo apt-get remove lcov | ||||
|           #wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb | ||||
|           #sudo apt install ./lcov_1.15-1_all.deb | ||||
|           #lcov --version | ||||
|           #which lcov | ||||
|           #ls -lha /usr/bin/lcov | ||||
|           wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz | ||||
|           tar xf lcov-1.15.tar.gz | ||||
|           sudo make -C lcov-1.15 install | ||||
|  | ||||
|           # setup a ram-backed disk to speed up reentrant tests | ||||
|           mkdir disks | ||||
|           sudo mount -t tmpfs -o size=100m tmpfs disks | ||||
|           TESTFLAGS="$TESTFLAGS --disk=disks/disk" | ||||
|  | ||||
|           # collect coverage | ||||
|           mkdir -p coverage | ||||
|           TESTFLAGS="$TESTFLAGS --coverage=` | ||||
|             `coverage/${{github.job}}-${{matrix.arch}}.info" | ||||
|  | ||||
|           echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV | ||||
|  | ||||
|       # cross-compile with ARM Thumb (32-bit, little-endian) | ||||
|       - name: install-thumb | ||||
|         if: ${{matrix.arch == 'thumb'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-10-arm-linux-gnueabi \ | ||||
|             libc6-dev-armel-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-arm" >> $GITHUB_ENV | ||||
|           arm-linux-gnueabi-gcc-10 --version | ||||
|           qemu-arm -version | ||||
|       # cross-compile with MIPS (32-bit, big-endian) | ||||
|       - name: install-mips | ||||
|         if: ${{matrix.arch == 'mips'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-10-mips-linux-gnu \ | ||||
|             libc6-dev-mips-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-mips" >> $GITHUB_ENV | ||||
|           mips-linux-gnu-gcc-10 --version | ||||
|           qemu-mips -version | ||||
|       # cross-compile with PowerPC (32-bit, big-endian) | ||||
|       - name: install-powerpc | ||||
|         if: ${{matrix.arch == 'powerpc'}} | ||||
|         run: | | ||||
|           sudo apt-get install -qq \ | ||||
|             gcc-10-powerpc-linux-gnu \ | ||||
|             libc6-dev-powerpc-cross \ | ||||
|             qemu-user | ||||
|           echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV | ||||
|           echo "EXEC=qemu-ppc" >> $GITHUB_ENV | ||||
|           powerpc-linux-gnu-gcc-10 --version | ||||
|           qemu-ppc -version | ||||
|  | ||||
|       # make sure example can at least compile | ||||
|       - name: test-example | ||||
|         run: | | ||||
|           sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c | ||||
|           make all CFLAGS+=" \ | ||||
|             -Duser_provided_block_device_read=NULL \ | ||||
|             -Duser_provided_block_device_prog=NULL \ | ||||
|             -Duser_provided_block_device_erase=NULL \ | ||||
|             -Duser_provided_block_device_sync=NULL \ | ||||
|             -include stdio.h" | ||||
|           rm test.c | ||||
|  | ||||
|       # test configurations | ||||
|       # normal+reentrant tests | ||||
|       - name: test-default | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk" | ||||
|       # NOR flash: read/prog = 1 block = 4KiB | ||||
|       - name: test-nor | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" | ||||
|       # SD/eMMC: read/prog = 512 block = 512 | ||||
|       - name: test-emmc | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" | ||||
|       # NAND flash: read/prog = 4KiB block = 32KiB | ||||
|       - name: test-nand | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" | ||||
|       # other extreme geometries that are useful for various corner cases | ||||
|       - name: test-no-intrinsics | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_NO_INTRINSICS" | ||||
|       - name: test-byte-writes | ||||
|         # it just takes too long to test byte-level writes when in qemu, | ||||
|         # should be plenty covered by the other configurations | ||||
|         if: ${{matrix.arch == 'x86_64'}} | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" | ||||
|       - name: test-block-cycles | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_BLOCK_CYCLES=1" | ||||
|       - name: test-odd-block-count | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" | ||||
|       - name: test-odd-block-size | ||||
|         run: | | ||||
|           make clean | ||||
|           make test TESTFLAGS+="-nrk \ | ||||
|             -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" | ||||
|  | ||||
|       # upload coverage for later coverage | ||||
|       - name: upload-coverage | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: coverage | ||||
|           path: coverage | ||||
|           retention-days: 1 | ||||
|  | ||||
|       # update results | ||||
|       - name: results | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR" | ||||
|           cp lfs.csv results/${{matrix.arch}}.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}.csv | ||||
|       - name: results-readonly | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_READONLY" | ||||
|           cp lfs.csv results/${{matrix.arch}}-readonly.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}-readonly.csv | ||||
|       - name: results-threadsafe | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_THREADSAFE" | ||||
|           cp lfs.csv results/${{matrix.arch}}-threadsafe.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv | ||||
|       - name: results-migrate | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_ASSERT \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -DLFS_MIGRATE" | ||||
|           cp lfs.csv results/${{matrix.arch}}-migrate.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}-migrate.csv | ||||
|       - name: results-error-asserts | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           make clean | ||||
|           make lfs.csv \ | ||||
|             CFLAGS+=" \ | ||||
|               -DLFS_NO_DEBUG \ | ||||
|               -DLFS_NO_WARN \ | ||||
|               -DLFS_NO_ERROR \ | ||||
|               -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" | ||||
|           cp lfs.csv results/${{matrix.arch}}-error-asserts.csv | ||||
|           ./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv | ||||
|       - name: upload-results | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: results | ||||
|           path: results | ||||
|  | ||||
|       # create statuses with results | ||||
|       - name: collect-status | ||||
|         run: | | ||||
|           mkdir -p status | ||||
|           for f in $(shopt -s nullglob ; echo results/*.csv) | ||||
|           do | ||||
|             export STEP="results$( | ||||
|               echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')" | ||||
|             for r in code stack structs | ||||
|             do | ||||
|               export CONTEXT="results (${{matrix.arch}}$( | ||||
|                 echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r" | ||||
|               export PREV="$(curl -sS \ | ||||
|                 "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \ | ||||
|                 | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | ||||
|                   | select(.context == env.CONTEXT).description | ||||
|                   | capture("(?<result>[0-9∞]+)").result' \ | ||||
|                 || echo 0)" | ||||
|               export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk ' | ||||
|                 NR==2 {printf "%s B",$2} | ||||
|                 NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                   printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')" | ||||
|               jq -n '{ | ||||
|                 state: "success", | ||||
|                 context: env.CONTEXT, | ||||
|                 description: env.DESCRIPTION, | ||||
|                 target_job: "${{github.job}} (${{matrix.arch}})", | ||||
|                 target_step: env.STEP}' \ | ||||
|                 | tee status/$r-${{matrix.arch}}$( | ||||
|                   echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json | ||||
|             done | ||||
|           done | ||||
|       - name: upload-status | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: status | ||||
|           path: status | ||||
|           retention-days: 1 | ||||
|  | ||||
|   # run under Valgrind to check for memory errors | ||||
|   valgrind: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip | ||||
|           sudo pip3 install toml | ||||
|       - name: install-valgrind | ||||
|         run: | | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq valgrind | ||||
|           valgrind --version | ||||
|       # normal tests, we don't need to test all geometries | ||||
|       - name: test-valgrind | ||||
|         run: make test TESTFLAGS+="-k --valgrind" | ||||
|  | ||||
|   # self-host with littlefs-fuse for a fuzz-like test | ||||
|   fuse: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     if: ${{!endsWith(github.ref, '-prefix')}} | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip libfuse-dev | ||||
|           sudo pip3 install toml | ||||
|           fusermount -V | ||||
|           gcc --version | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v2 | ||||
|           path: littlefs-fuse | ||||
|       - name: setup | ||||
|         run: | | ||||
|           # copy our new version into littlefs-fuse | ||||
|           rm -rf littlefs-fuse/littlefs/* | ||||
|           cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs | ||||
|  | ||||
|           # setup disk for littlefs-fuse | ||||
|           mkdir mount | ||||
|           LOOP=$(sudo losetup -f) | ||||
|           sudo chmod a+rw $LOOP | ||||
|           dd if=/dev/zero bs=512 count=128K of=disk | ||||
|           losetup $LOOP disk | ||||
|           echo "LOOP=$LOOP" >> $GITHUB_ENV | ||||
|       - name: test | ||||
|         run: | | ||||
|           # self-host test | ||||
|           make -C littlefs-fuse | ||||
|  | ||||
|           littlefs-fuse/lfs --format $LOOP | ||||
|           littlefs-fuse/lfs $LOOP mount | ||||
|  | ||||
|           ls mount | ||||
|           mkdir mount/littlefs | ||||
|           cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|   # test migration using littlefs-fuse | ||||
|   migrate: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     if: ${{!endsWith(github.ref, '-prefix')}} | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           # need toml, also pip3 isn't installed by default? | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip libfuse-dev | ||||
|           sudo pip3 install toml | ||||
|           fusermount -V | ||||
|           gcc --version | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v2 | ||||
|           path: v2 | ||||
|       - uses: actions/checkout@v2 | ||||
|         with: | ||||
|           repository: littlefs-project/littlefs-fuse | ||||
|           ref: v1 | ||||
|           path: v1 | ||||
|       - name: setup | ||||
|         run: | | ||||
|           # copy our new version into littlefs-fuse | ||||
|           rm -rf v2/littlefs/* | ||||
|           cp -r $(git ls-tree --name-only HEAD) v2/littlefs | ||||
|  | ||||
|           # setup disk for littlefs-fuse | ||||
|           mkdir mount | ||||
|           LOOP=$(sudo losetup -f) | ||||
|           sudo chmod a+rw $LOOP | ||||
|           dd if=/dev/zero bs=512 count=128K of=disk | ||||
|           losetup $LOOP disk | ||||
|           echo "LOOP=$LOOP" >> $GITHUB_ENV | ||||
|       - name: test | ||||
|         run: | | ||||
|           # compile v1 and v2 | ||||
|           make -C v1 | ||||
|           make -C v2 | ||||
|  | ||||
|           # run self-host test with v1 | ||||
|           v1/lfs --format $LOOP | ||||
|           v1/lfs $LOOP mount | ||||
|  | ||||
|           ls mount | ||||
|           mkdir mount/littlefs | ||||
|           cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|           # attempt to migrate | ||||
|           cd ../.. | ||||
|           fusermount -u mount | ||||
|  | ||||
|           v2/lfs --migrate $LOOP | ||||
|           v2/lfs $LOOP mount | ||||
|  | ||||
|           # run self-host test with v2 right where we left off | ||||
|           ls mount | ||||
|           cd mount/littlefs | ||||
|           stat . | ||||
|           ls -flh | ||||
|           make -B test | ||||
|  | ||||
|   # collect coverage info | ||||
|   coverage: | ||||
|     runs-on: ubuntu-20.04 | ||||
|     needs: [test] | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: install | ||||
|         run: | | ||||
|           sudo apt-get update -qq | ||||
|           sudo apt-get install -qq python3 python3-pip lcov | ||||
|           sudo pip3 install toml | ||||
|       # yes we continue-on-error nearly every step, continue-on-error | ||||
|       # at job level apparently still marks a job as failed, which isn't | ||||
|       # what we want | ||||
|       - uses: actions/download-artifact@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           name: coverage | ||||
|           path: coverage | ||||
|       - name: results-coverage | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           mkdir -p results | ||||
|           lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \ | ||||
|             -o results/coverage.info | ||||
|           ./scripts/coverage.py results/coverage.info -o results/coverage.csv | ||||
|       - name: upload-results | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: results | ||||
|           path: results | ||||
|       - name: collect-status | ||||
|         run: | | ||||
|           mkdir -p status | ||||
|           [ -e results/coverage.csv ] || exit 0 | ||||
|           export STEP="results-coverage" | ||||
|           export CONTEXT="results / coverage" | ||||
|           export PREV="$(curl -sS \ | ||||
|             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \ | ||||
|             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | ||||
|               | select(.context == env.CONTEXT).description | ||||
|               | capture("(?<result>[0-9\\.]+)").result' \ | ||||
|             || echo 0)" | ||||
|           export DESCRIPTION="$( | ||||
|             ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' ' | ||||
|               NR==2 {printf "%.1f%% of %d lines",$4,$3} | ||||
|               NR==2 && ENVIRON["PREV"]+0 != 0 { | ||||
|                 printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" | ||||
|           jq -n '{ | ||||
|             state: "success", | ||||
|             context: env.CONTEXT, | ||||
|             description: env.DESCRIPTION, | ||||
|             target_job: "${{github.job}}", | ||||
|             target_step: env.STEP}' \ | ||||
|             | tee status/coverage.json | ||||
|       - name: upload-status | ||||
|         uses: actions/upload-artifact@v2 | ||||
|         with: | ||||
|           name: status | ||||
|           path: status | ||||
|           retention-days: 1 | ||||
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -2,6 +2,8 @@ | ||||
| *.o | ||||
| *.d | ||||
| *.a | ||||
| *.ci | ||||
| *.csv | ||||
|  | ||||
| # Testing things | ||||
| blocks/ | ||||
|   | ||||
							
								
								
									
										429
									
								
								.travis.yml
									
									
									
									
									
								
							
							
						
						
									
										429
									
								
								.travis.yml
									
									
									
									
									
								
							| @@ -1,429 +0,0 @@ | ||||
| # environment variables | ||||
| env: | ||||
|   global: | ||||
|     - CFLAGS=-Werror | ||||
|     - MAKEFLAGS=-j | ||||
|  | ||||
| # cache installation dirs | ||||
| cache: | ||||
|   pip: true | ||||
|   directories: | ||||
|     - $HOME/.cache/apt | ||||
|  | ||||
| # common installation | ||||
| _: &install-common | ||||
|   # need toml, also pip3 isn't installed by default? | ||||
|   - sudo apt-get install python3 python3-pip | ||||
|   - sudo pip3 install toml | ||||
|   # setup a ram-backed disk to speed up reentrant tests | ||||
|   - mkdir disks | ||||
|   - sudo mount -t tmpfs -o size=100m tmpfs disks | ||||
|   - export TFLAGS="$TFLAGS --disk=disks/disk" | ||||
|  | ||||
| # test cases | ||||
| _: &test-example | ||||
|   # make sure example can at least compile | ||||
|   - sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c && | ||||
|     make all CFLAGS+=" | ||||
|         -Duser_provided_block_device_read=NULL | ||||
|         -Duser_provided_block_device_prog=NULL | ||||
|         -Duser_provided_block_device_erase=NULL | ||||
|         -Duser_provided_block_device_sync=NULL | ||||
|         -include stdio.h" | ||||
| # default tests | ||||
| _: &test-default | ||||
|   # normal+reentrant tests | ||||
|   - make test TFLAGS+="-nrk" | ||||
| # common real-life geometries | ||||
| _: &test-nor | ||||
|   # NOR flash: read/prog = 1 block = 4KiB | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" | ||||
| _: &test-emmc | ||||
|   # eMMC: read/prog = 512 block = 512 | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" | ||||
| _: &test-nand | ||||
|   # NAND flash: read/prog = 4KiB block = 32KiB | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" | ||||
| # other extreme geometries that are useful for testing various corner cases | ||||
| _: &test-no-intrinsics | ||||
|   - make test TFLAGS+="-nrk -DLFS_NO_INTRINSICS" | ||||
| _: &test-no-inline | ||||
|   - make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0" | ||||
| _: &test-byte-writes | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" | ||||
| _: &test-block-cycles | ||||
|   - make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1" | ||||
| _: &test-odd-block-count | ||||
|   - make test TFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" | ||||
| _: &test-odd-block-size | ||||
|   - make test TFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" | ||||
|  | ||||
| # report size  | ||||
| _: &report-size | ||||
|   # compile and find the code size with the smallest configuration | ||||
|   - make -j1 clean size | ||||
|         OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" | ||||
|         CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR" | ||||
|         | tee sizes | ||||
|   # update status if we succeeded, compare with master if possible | ||||
|   - | | ||||
|     if [ "$TRAVIS_TEST_RESULT" -eq 0 ] | ||||
|     then | ||||
|         CURR=$(tail -n1 sizes | awk '{print $1}') | ||||
|         PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \ | ||||
|             | jq -re "select(.sha != \"$TRAVIS_COMMIT\") | ||||
|                 | .statuses[] | select(.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\").description | ||||
|                 | capture(\"code size is (?<size>[0-9]+)\").size" \ | ||||
|             || echo 0) | ||||
|    | ||||
|         STATUS="Passed, code size is ${CURR}B" | ||||
|         if [ "$PREV" -ne 0 ] | ||||
|         then | ||||
|             STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)" | ||||
|         fi | ||||
|     fi | ||||
|  | ||||
| # stage control | ||||
| stages: | ||||
|   - name: test | ||||
|   - name: deploy | ||||
|     if: branch = master AND type = push | ||||
|  | ||||
| # job control | ||||
| jobs: | ||||
|   # native testing | ||||
|   - &x86 | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-x86 | ||||
|     install: *install-common | ||||
|     script: [*test-example, *report-size] | ||||
|   - {<<: *x86, script: [*test-default,          *report-size]} | ||||
|   - {<<: *x86, script: [*test-nor,              *report-size]} | ||||
|   - {<<: *x86, script: [*test-emmc,             *report-size]} | ||||
|   - {<<: *x86, script: [*test-nand,             *report-size]} | ||||
|   - {<<: *x86, script: [*test-no-intrinsics,    *report-size]} | ||||
|   - {<<: *x86, script: [*test-no-inline,        *report-size]} | ||||
|   - {<<: *x86, script: [*test-byte-writes,      *report-size]} | ||||
|   - {<<: *x86, script: [*test-block-cycles,     *report-size]} | ||||
|   - {<<: *x86, script: [*test-odd-block-count,  *report-size]} | ||||
|   - {<<: *x86, script: [*test-odd-block-size,   *report-size]} | ||||
|  | ||||
|   # cross-compile with ARM (thumb mode) | ||||
|   - &arm | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-arm | ||||
|       - CC="arm-linux-gnueabi-gcc --static -mthumb" | ||||
|       - TFLAGS="$TFLAGS --exec=qemu-arm" | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install | ||||
|             gcc-arm-linux-gnueabi | ||||
|             libc6-dev-armel-cross | ||||
|             qemu-user | ||||
|       - arm-linux-gnueabi-gcc --version | ||||
|       - qemu-arm -version | ||||
|     script: [*test-example, *report-size] | ||||
|   - {<<: *arm, script: [*test-default,          *report-size]} | ||||
|   - {<<: *arm, script: [*test-nor,              *report-size]} | ||||
|   - {<<: *arm, script: [*test-emmc,             *report-size]} | ||||
|   - {<<: *arm, script: [*test-nand,             *report-size]} | ||||
|   - {<<: *arm, script: [*test-no-intrinsics,    *report-size]} | ||||
|   - {<<: *arm, script: [*test-no-inline,        *report-size]} | ||||
|   # it just takes way to long to run byte-level writes in qemu, | ||||
|   # note this is still tested in the native tests | ||||
|   #- {<<: *arm, script: [*test-byte-writes,      *report-size]} | ||||
|   - {<<: *arm, script: [*test-block-cycles,     *report-size]} | ||||
|   - {<<: *arm, script: [*test-odd-block-count,  *report-size]} | ||||
|   - {<<: *arm, script: [*test-odd-block-size,   *report-size]} | ||||
|  | ||||
|   # cross-compile with MIPS | ||||
|   - &mips | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-mips | ||||
|       - CC="mips-linux-gnu-gcc --static" | ||||
|       - TFLAGS="$TFLAGS --exec=qemu-mips" | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install | ||||
|             gcc-mips-linux-gnu | ||||
|             libc6-dev-mips-cross | ||||
|             qemu-user | ||||
|       - mips-linux-gnu-gcc --version | ||||
|       - qemu-mips -version | ||||
|     script: [*test-example, *report-size] | ||||
|   - {<<: *mips, script: [*test-default,          *report-size]} | ||||
|   - {<<: *mips, script: [*test-nor,              *report-size]} | ||||
|   - {<<: *mips, script: [*test-emmc,             *report-size]} | ||||
|   - {<<: *mips, script: [*test-nand,             *report-size]} | ||||
|   - {<<: *mips, script: [*test-no-intrinsics,    *report-size]} | ||||
|   - {<<: *mips, script: [*test-no-inline,        *report-size]} | ||||
|   # it just takes way to long to run byte-level writes in qemu, | ||||
|   # note this is still tested in the native tests | ||||
|   #- {<<: *mips, script: [*test-byte-writes,      *report-size]} | ||||
|   - {<<: *mips, script: [*test-block-cycles,     *report-size]} | ||||
|   - {<<: *mips, script: [*test-odd-block-count,  *report-size]} | ||||
|   - {<<: *mips, script: [*test-odd-block-size,   *report-size]} | ||||
|  | ||||
|   # cross-compile with PowerPC | ||||
|   - &powerpc | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-powerpc | ||||
|       - CC="powerpc-linux-gnu-gcc --static" | ||||
|       - TFLAGS="$TFLAGS --exec=qemu-ppc" | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install | ||||
|             gcc-powerpc-linux-gnu | ||||
|             libc6-dev-powerpc-cross | ||||
|             qemu-user | ||||
|       - powerpc-linux-gnu-gcc --version | ||||
|       - qemu-ppc -version | ||||
|     script: [*test-example, *report-size] | ||||
|   - {<<: *powerpc, script: [*test-default,          *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-nor,              *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-emmc,             *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-nand,             *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-no-intrinsics,    *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-no-inline,        *report-size]} | ||||
|   # it just takes way to long to run byte-level writes in qemu, | ||||
|   # note this is still tested in the native tests | ||||
|   #- {<<: *powerpc, script: [*test-byte-writes,      *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-block-cycles,     *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-odd-block-count,  *report-size]} | ||||
|   - {<<: *powerpc, script: [*test-odd-block-size,   *report-size]} | ||||
|  | ||||
|   # test under valgrind, checking for memory errors | ||||
|   - &valgrind | ||||
|     stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-valgrind | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install valgrind | ||||
|       - valgrind --version | ||||
|     script: | ||||
|       - make test TFLAGS+="-k --valgrind" | ||||
|  | ||||
|   # self-host with littlefs-fuse for fuzz test | ||||
|   - stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-fuse | ||||
|     if: branch !~ -prefix$ | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install libfuse-dev | ||||
|       - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 | ||||
|       - fusermount -V | ||||
|       - gcc --version | ||||
|  | ||||
|       # setup disk for littlefs-fuse | ||||
|       - rm -rf littlefs-fuse/littlefs/* | ||||
|       - cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs | ||||
|  | ||||
|       - mkdir mount | ||||
|       - sudo chmod a+rw /dev/loop0 | ||||
|       - dd if=/dev/zero bs=512 count=128K of=disk | ||||
|       - losetup /dev/loop0 disk | ||||
|     script: | ||||
|       # self-host test | ||||
|       - make -C littlefs-fuse | ||||
|  | ||||
|       - littlefs-fuse/lfs --format /dev/loop0 | ||||
|       - littlefs-fuse/lfs /dev/loop0 mount | ||||
|  | ||||
|       - ls mount | ||||
|       - mkdir mount/littlefs | ||||
|       - cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|       - cd mount/littlefs | ||||
|       - stat . | ||||
|       - ls -flh | ||||
|       - make -B test | ||||
|  | ||||
|   # test migration using littlefs-fuse | ||||
|   - stage: test | ||||
|     env: | ||||
|       - NAME=littlefs-migration | ||||
|     if: branch !~ -prefix$ | ||||
|     install: | ||||
|       - *install-common | ||||
|       - sudo apt-get install libfuse-dev | ||||
|       - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2 | ||||
|       - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1 | ||||
|       - fusermount -V | ||||
|       - gcc --version | ||||
|  | ||||
|       # setup disk for littlefs-fuse | ||||
|       - rm -rf v2/littlefs/* | ||||
|       - cp -r $(git ls-tree --name-only HEAD) v2/littlefs | ||||
|  | ||||
|       - mkdir mount | ||||
|       - sudo chmod a+rw /dev/loop0 | ||||
|       - dd if=/dev/zero bs=512 count=128K of=disk | ||||
|       - losetup /dev/loop0 disk | ||||
|     script: | ||||
|       # compile v1 and v2 | ||||
|       - make -C v1 | ||||
|       - make -C v2 | ||||
|  | ||||
|       # run self-host test with v1 | ||||
|       - v1/lfs --format /dev/loop0 | ||||
|       - v1/lfs /dev/loop0 mount | ||||
|  | ||||
|       - ls mount | ||||
|       - mkdir mount/littlefs | ||||
|       - cp -r $(git ls-tree --name-only HEAD) mount/littlefs | ||||
|       - cd mount/littlefs | ||||
|       - stat . | ||||
|       - ls -flh | ||||
|       - make -B test | ||||
|  | ||||
|       # attempt to migrate | ||||
|       - cd ../.. | ||||
|       - fusermount -u mount | ||||
|  | ||||
|       - v2/lfs --migrate /dev/loop0 | ||||
|       - v2/lfs /dev/loop0 mount | ||||
|  | ||||
|       # run self-host test with v2 right where we left off | ||||
|       - ls mount | ||||
|       - cd mount/littlefs | ||||
|       - stat . | ||||
|       - ls -flh | ||||
|       - make -B test | ||||
|  | ||||
|   # automatically create releases | ||||
|   - stage: deploy | ||||
|     env: | ||||
|       - NAME=deploy | ||||
|     script: | ||||
|       - | | ||||
|         bash << 'SCRIPT' | ||||
|         set -ev | ||||
|         # Find version defined in lfs.h | ||||
|         LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3) | ||||
|         LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16))) | ||||
|         LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >>  0))) | ||||
|         # Grab latests patch from repo tags, default to 0, needs finagling | ||||
|         # to get past github's pagination api | ||||
|         PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR. | ||||
|         PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \ | ||||
|             | sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \ | ||||
|             || echo $PREV_URL) | ||||
|         LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \ | ||||
|             | jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g") | ||||
|                 .captures[].string | tonumber) | max + 1' \ | ||||
|             || echo 0) | ||||
|         # We have our new version | ||||
|         LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH" | ||||
|         echo "VERSION $LFS_VERSION" | ||||
|         # Check that we're the most recent commit | ||||
|         CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \ | ||||
|             | jq -re '.sha') | ||||
|         [ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0 | ||||
|         # Create major branch | ||||
|         git branch v$LFS_VERSION_MAJOR HEAD | ||||
|         # Create major prefix branch | ||||
|         git config user.name "geky bot" | ||||
|         git config user.email "bot@geky.net" | ||||
|         git fetch https://github.com/$TRAVIS_REPO_SLUG.git \ | ||||
|             --depth=50 v$LFS_VERSION_MAJOR-prefix || true | ||||
|         ./scripts/prefix.py lfs$LFS_VERSION_MAJOR | ||||
|         git branch v$LFS_VERSION_MAJOR-prefix $( \ | ||||
|             git commit-tree $(git write-tree) \ | ||||
|                 $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \ | ||||
|                 -p HEAD \ | ||||
|                 -m "Generated v$LFS_VERSION_MAJOR prefixes") | ||||
|         git reset --hard | ||||
|         # Update major version branches (vN and vN-prefix) | ||||
|         git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \ | ||||
|             v$LFS_VERSION_MAJOR \ | ||||
|             v$LFS_VERSION_MAJOR-prefix | ||||
|         # Build release notes | ||||
|         PREV=$(git tag --sort=-v:refname -l "v*" | head -1) | ||||
|         if [ ! -z "$PREV" ] | ||||
|         then | ||||
|             echo "PREV $PREV" | ||||
|             CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep) | ||||
|             printf "CHANGES\n%s\n\n" "$CHANGES" | ||||
|         fi | ||||
|         case ${GEKY_BOT_DRAFT:-minor} in | ||||
|             true)  DRAFT=true ;; | ||||
|             minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;; | ||||
|             false) DRAFT=false ;; | ||||
|         esac | ||||
|         # Create the release and patch version tag (vN.N.N) | ||||
|         curl -f -u "$GEKY_BOT_RELEASES" -X POST \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \ | ||||
|             -d "{ | ||||
|                 \"tag_name\": \"$LFS_VERSION\", | ||||
|                 \"name\": \"${LFS_VERSION%.0}\", | ||||
|                 \"target_commitish\": \"$TRAVIS_COMMIT\", | ||||
|                 \"draft\": $DRAFT, | ||||
|                 \"body\": $(jq -sR '.' <<< "$CHANGES") | ||||
|             }" #" | ||||
|         SCRIPT | ||||
|  | ||||
| # manage statuses | ||||
| before_install: | ||||
|   - | | ||||
|     # don't clobber other (not us) failures | ||||
|     if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         | jq -e ".statuses[] | select( | ||||
|             .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and | ||||
|             .state == \"failure\" and | ||||
|             (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" | ||||
|     then | ||||
|         curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|             -d "{ | ||||
|                 \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", | ||||
|                 \"state\": \"pending\", | ||||
|                 \"description\": \"${STATUS:-In progress}\", | ||||
|                 \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" | ||||
|             }" | ||||
|     fi | ||||
|  | ||||
| after_failure: | ||||
|   - | | ||||
|     # don't clobber other (not us) failures | ||||
|     if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         | jq -e ".statuses[] | select( | ||||
|             .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and | ||||
|             .state == \"failure\" and | ||||
|             (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" | ||||
|     then | ||||
|         curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|             -d "{ | ||||
|                 \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", | ||||
|                 \"state\": \"failure\", | ||||
|                 \"description\": \"${STATUS:-Failed}\", | ||||
|                 \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" | ||||
|             }" | ||||
|     fi | ||||
|  | ||||
| after_success: | ||||
|   - | | ||||
|     # don't clobber other (not us) failures | ||||
|     # only update if we were last job to mark in progress, | ||||
|     # this isn't perfect but is probably good enough | ||||
|     if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|         | jq -e ".statuses[] | select( | ||||
|             .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and | ||||
|             (.state == \"failure\" or .state == \"pending\") and | ||||
|             (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" | ||||
|     then | ||||
|         curl -u "$GEKY_BOT_STATUSES" -X POST \ | ||||
|             https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ | ||||
|             -d "{ | ||||
|                 \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", | ||||
|                 \"state\": \"success\", | ||||
|                 \"description\": \"${STATUS:-Passed}\", | ||||
|                 \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" | ||||
|             }" | ||||
|     fi | ||||
| @@ -1,3 +1,4 @@ | ||||
| Copyright (c) 2022, The littlefs authors.   | ||||
| Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without modification, | ||||
|   | ||||
							
								
								
									
										156
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										156
									
								
								Makefile
									
									
									
									
									
								
							| @@ -1,69 +1,173 @@ | ||||
| TARGET = lfs.a | ||||
| ifneq ($(wildcard test.c main.c),) | ||||
| override TARGET = lfs | ||||
| ifdef BUILDDIR | ||||
| # make sure BUILDDIR ends with a slash | ||||
| override BUILDDIR := $(BUILDDIR)/ | ||||
| # bit of a hack, but we want to make sure BUILDDIR directory structure | ||||
| # is correct before any commands | ||||
| $(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \ | ||||
| 	$(BUILDDIR) \ | ||||
| 	$(BUILDDIR)bd \ | ||||
| 	$(BUILDDIR)tests)) | ||||
| endif | ||||
|  | ||||
| CC ?= gcc | ||||
| AR ?= ar | ||||
| SIZE ?= size | ||||
| # overridable target/src/tools/flags/etc | ||||
| ifneq ($(wildcard test.c main.c),) | ||||
| TARGET ?= $(BUILDDIR)lfs | ||||
| else | ||||
| TARGET ?= $(BUILDDIR)lfs.a | ||||
| endif | ||||
|  | ||||
| SRC += $(wildcard *.c bd/*.c) | ||||
| OBJ := $(SRC:.c=.o) | ||||
| DEP := $(SRC:.c=.d) | ||||
| ASM := $(SRC:.c=.s) | ||||
|  | ||||
| CC      ?= gcc | ||||
| AR      ?= ar | ||||
| SIZE    ?= size | ||||
| CTAGS   ?= ctags | ||||
| NM      ?= nm | ||||
| OBJDUMP ?= objdump | ||||
| LCOV    ?= lcov | ||||
|  | ||||
| SRC ?= $(wildcard *.c) | ||||
| OBJ := $(SRC:%.c=$(BUILDDIR)%.o) | ||||
| DEP := $(SRC:%.c=$(BUILDDIR)%.d) | ||||
| ASM := $(SRC:%.c=$(BUILDDIR)%.s) | ||||
| CGI := $(SRC:%.c=$(BUILDDIR)%.ci) | ||||
|  | ||||
| ifdef DEBUG | ||||
| override CFLAGS += -O0 -g3 | ||||
| override CFLAGS += -O0 | ||||
| else | ||||
| override CFLAGS += -Os | ||||
| endif | ||||
| ifdef WORD | ||||
| override CFLAGS += -m$(WORD) | ||||
| endif | ||||
| ifdef TRACE | ||||
| override CFLAGS += -DLFS_YES_TRACE | ||||
| endif | ||||
| override CFLAGS += -g3 | ||||
| override CFLAGS += -I. | ||||
| override CFLAGS += -std=c99 -Wall -pedantic | ||||
| override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef | ||||
| # Remove missing-field-initializers because of GCC bug | ||||
| override CFLAGS += -Wno-missing-field-initializers | ||||
|  | ||||
| ifdef VERBOSE | ||||
| override TFLAGS += -v | ||||
| override TESTFLAGS     += -v | ||||
| override CALLSFLAGS    += -v | ||||
| override CODEFLAGS     += -v | ||||
| override DATAFLAGS     += -v | ||||
| override STACKFLAGS    += -v | ||||
| override STRUCTSFLAGS  += -v | ||||
| override COVERAGEFLAGS += -v | ||||
| endif | ||||
| ifdef EXEC | ||||
| override TESTFLAGS += --exec="$(EXEC)" | ||||
| endif | ||||
| ifdef COVERAGE | ||||
| override TESTFLAGS += --coverage | ||||
| endif | ||||
| ifdef BUILDDIR | ||||
| override TESTFLAGS     += --build-dir="$(BUILDDIR:/=)" | ||||
| override CALLSFLAGS    += --build-dir="$(BUILDDIR:/=)" | ||||
| override CODEFLAGS     += --build-dir="$(BUILDDIR:/=)" | ||||
| override DATAFLAGS     += --build-dir="$(BUILDDIR:/=)" | ||||
| override STACKFLAGS    += --build-dir="$(BUILDDIR:/=)" | ||||
| override STRUCTSFLAGS  += --build-dir="$(BUILDDIR:/=)" | ||||
| override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)" | ||||
| endif | ||||
| ifneq ($(NM),nm) | ||||
| override CODEFLAGS += --nm-tool="$(NM)" | ||||
| override DATAFLAGS += --nm-tool="$(NM)" | ||||
| endif | ||||
| ifneq ($(OBJDUMP),objdump) | ||||
| override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)" | ||||
| endif | ||||
|  | ||||
|  | ||||
| all: $(TARGET) | ||||
| # commands | ||||
| .PHONY: all build | ||||
| all build: $(TARGET) | ||||
|  | ||||
| .PHONY: asm | ||||
| asm: $(ASM) | ||||
|  | ||||
| .PHONY: size | ||||
| size: $(OBJ) | ||||
| 	$(SIZE) -t $^ | ||||
|  | ||||
| .PHONY: tags | ||||
| tags: | ||||
| 	$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC) | ||||
|  | ||||
| .PHONY: calls | ||||
| calls: $(CGI) | ||||
| 	./scripts/calls.py $^ $(CALLSFLAGS) | ||||
|  | ||||
| .PHONY: test | ||||
| test: | ||||
| 	./scripts/test.py $(TFLAGS) | ||||
| 	./scripts/test.py $(TESTFLAGS) | ||||
| .SECONDEXPANSION: | ||||
| test%: tests/test$$(firstword $$(subst \#, ,%)).toml | ||||
| 	./scripts/test.py $@ $(TFLAGS) | ||||
| 	./scripts/test.py $@ $(TESTFLAGS) | ||||
|  | ||||
| .PHONY: code | ||||
| code: $(OBJ) | ||||
| 	./scripts/code.py $^ -S $(CODEFLAGS) | ||||
|  | ||||
| .PHONY: data | ||||
| data: $(OBJ) | ||||
| 	./scripts/data.py $^ -S $(DATAFLAGS) | ||||
|  | ||||
| .PHONY: stack | ||||
| stack: $(CGI) | ||||
| 	./scripts/stack.py $^ -S $(STACKFLAGS) | ||||
|  | ||||
| .PHONY: structs | ||||
| structs: $(OBJ) | ||||
| 	./scripts/structs.py $^ -S $(STRUCTSFLAGS) | ||||
|  | ||||
| .PHONY: coverage | ||||
| coverage: | ||||
| 	./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS) | ||||
|  | ||||
| .PHONY: summary | ||||
| summary: $(BUILDDIR)lfs.csv | ||||
| 	./scripts/summary.py -Y $^ $(SUMMARYFLAGS) | ||||
|  | ||||
|  | ||||
| # rules | ||||
| -include $(DEP) | ||||
| .SUFFIXES: | ||||
|  | ||||
| lfs: $(OBJ) | ||||
| $(BUILDDIR)lfs: $(OBJ) | ||||
| 	$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ | ||||
|  | ||||
| %.a: $(OBJ) | ||||
| $(BUILDDIR)lfs.a: $(OBJ) | ||||
| 	$(AR) rcs $@ $^ | ||||
|  | ||||
| %.o: %.c | ||||
| $(BUILDDIR)lfs.csv: $(OBJ) $(CGI) | ||||
| 	./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@ | ||||
| 	./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@ | ||||
| 	./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@ | ||||
| 	./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@ | ||||
| 	$(if $(COVERAGE),\ | ||||
| 		./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \ | ||||
| 			-q -m $@ $(COVERAGEFLAGS) -o $@) | ||||
|  | ||||
| $(BUILDDIR)%.o: %.c | ||||
| 	$(CC) -c -MMD $(CFLAGS) $< -o $@ | ||||
|  | ||||
| %.s: %.c | ||||
| $(BUILDDIR)%.s: %.c | ||||
| 	$(CC) -S $(CFLAGS) $< -o $@ | ||||
|  | ||||
| # gcc depends on the output file for intermediate file names, so | ||||
| # we can't omit to .o output. We also need to serialize with the | ||||
| # normal .o rule because otherwise we can end up with multiprocess | ||||
| # problems with two instances of gcc modifying the same .o | ||||
| $(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o | ||||
| 	$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $| | ||||
|  | ||||
| # clean everything | ||||
| .PHONY: clean | ||||
| clean: | ||||
| 	rm -f $(TARGET) | ||||
| 	rm -f $(BUILDDIR)lfs | ||||
| 	rm -f $(BUILDDIR)lfs.a | ||||
| 	rm -f $(BUILDDIR)lfs.csv | ||||
| 	rm -f $(OBJ) | ||||
| 	rm -f $(CGI) | ||||
| 	rm -f $(DEP) | ||||
| 	rm -f $(ASM) | ||||
| 	rm -f tests/*.toml.* | ||||
| 	rm -f $(BUILDDIR)tests/*.toml.* | ||||
|   | ||||
| @@ -192,7 +192,7 @@ More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and | ||||
| ## Testing | ||||
|  | ||||
| The littlefs comes with a test suite designed to run on a PC using the | ||||
| [emulated block device](emubd/lfs_emubd.h) found in the emubd directory. | ||||
| [emulated block device](bd/lfs_testbd.h) found in the `bd` directory. | ||||
| The tests assume a Linux environment and can be started with make: | ||||
|  | ||||
| ``` bash | ||||
| @@ -221,6 +221,11 @@ License Identifiers that are here available: http://spdx.org/licenses/ | ||||
| - [littlefs-js] - A javascript wrapper for littlefs. I'm not sure why you would | ||||
|   want this, but it is handy for demos.  You can see it in action | ||||
|   [here][littlefs-js-demo]. | ||||
|    | ||||
| - [littlefs-python] - A Python wrapper for littlefs. The project allows you | ||||
|   to create images of the filesystem on your PC. Check if littlefs will fit | ||||
|   your needs, create images for a later download to the target memory or | ||||
|   inspect the content of a binary image of the target memory. | ||||
|  | ||||
| - [mklfs] - A command line tool built by the [Lua RTOS] guys for making | ||||
|   littlefs images from a host PC. Supports Windows, Mac OS, and Linux. | ||||
| @@ -250,3 +255,4 @@ License Identifiers that are here available: http://spdx.org/licenses/ | ||||
| [LittleFileSystem]: https://os.mbed.com/docs/mbed-os/v5.12/apis/littlefilesystem.html | ||||
| [SPIFFS]: https://github.com/pellepl/spiffs | ||||
| [Dhara]: https://github.com/dlbeer/dhara | ||||
| [littlefs-python]: https://pypi.org/project/littlefs-python/ | ||||
|   | ||||
							
								
								
									
										14
									
								
								SPEC.md
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								SPEC.md
									
									
									
									
									
								
							| @@ -233,19 +233,19 @@ Metadata tag fields: | ||||
|    into a 3-bit abstract type and an 8-bit chunk field. Note that the value | ||||
|    `0x000` is invalid and not assigned a type. | ||||
|  | ||||
| 3. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into | ||||
|    8 categories that facilitate bitmasked lookups. | ||||
|     1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into | ||||
|        8 categories that facilitate bitmasked lookups. | ||||
|  | ||||
| 4. **Chunk (8-bits)** - Chunk field used for various purposes by the different | ||||
|    abstract types.  type1+chunk+id form a unique identifier for each tag in the | ||||
|    metadata block. | ||||
|     2. **Chunk (8-bits)** - Chunk field used for various purposes by the different | ||||
|        abstract types.  type1+chunk+id form a unique identifier for each tag in the | ||||
|        metadata block. | ||||
|  | ||||
| 5. **Id (10-bits)** - File id associated with the tag. Each file in a metadata | ||||
| 3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata | ||||
|    block gets a unique id which is used to associate tags with that file. The | ||||
|    special value `0x3ff` is used for any tags that are not associated with a | ||||
|    file, such as directory and global metadata. | ||||
|  | ||||
| 6. **Length (10-bits)** - Length of the data in bytes. The special value | ||||
| 4. **Length (10-bits)** - Length of the data in bytes. The special value | ||||
|    `0x3ff` indicates that this tag has been deleted. | ||||
|  | ||||
| ## Metadata types | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * Block device emulated in a file | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -10,6 +11,10 @@ | ||||
| #include <unistd.h> | ||||
| #include <errno.h> | ||||
|  | ||||
| #ifdef _WIN32 | ||||
| #include <windows.h> | ||||
| #endif | ||||
|  | ||||
| int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path, | ||||
|         const struct lfs_filebd_config *bdcfg) { | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, " | ||||
| @@ -27,7 +32,12 @@ int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path, | ||||
|     bd->cfg = bdcfg; | ||||
|  | ||||
|     // open file | ||||
|     #ifdef _WIN32 | ||||
|     bd->fd = open(path, O_RDWR | O_CREAT | O_BINARY, 0666); | ||||
|     #else | ||||
|     bd->fd = open(path, O_RDWR | O_CREAT, 0666); | ||||
|     #endif | ||||
|  | ||||
|     if (bd->fd < 0) { | ||||
|         int err = -errno; | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err); | ||||
| @@ -80,7 +90,7 @@ int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block, | ||||
|     LFS_ASSERT(size % cfg->read_size == 0); | ||||
|     LFS_ASSERT(block < cfg->block_count); | ||||
|  | ||||
|     // zero for reproducability (in case file is truncated) | ||||
|     // zero for reproducibility (in case file is truncated) | ||||
|     if (bd->cfg->erase_value != -1) { | ||||
|         memset(buffer, bd->cfg->erase_value, size); | ||||
|     } | ||||
| @@ -193,7 +203,11 @@ int lfs_filebd_sync(const struct lfs_config *cfg) { | ||||
|     LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg); | ||||
|     // file sync | ||||
|     lfs_filebd_t *bd = cfg->context; | ||||
|     #ifdef _WIN32 | ||||
|     int err = FlushFileBuffers((HANDLE) _get_osfhandle(fd)) ? 0 : -1; | ||||
|     #else | ||||
|     int err = fsync(bd->fd); | ||||
|     #endif | ||||
|     if (err) { | ||||
|         err = -errno; | ||||
|         LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0); | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * Block device emulated in a file | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * Block device emulated in RAM | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -32,10 +33,12 @@ int lfs_rambd_createcfg(const struct lfs_config *cfg, | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // zero for reproducability? | ||||
|     // zero for reproducibility? | ||||
|     if (bd->cfg->erase_value != -1) { | ||||
|         memset(bd->buffer, bd->cfg->erase_value, | ||||
|                 cfg->block_size * cfg->block_count); | ||||
|     } else { | ||||
|         memset(bd->buffer, 0, cfg->block_size * cfg->block_count); | ||||
|     } | ||||
|  | ||||
|     LFS_RAMBD_TRACE("lfs_rambd_createcfg -> %d", 0); | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * Block device emulated in RAM | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
|   | ||||
| @@ -2,6 +2,7 @@ | ||||
|  * Testing block device, wraps filebd and rambd while providing a bunch | ||||
|  * of hooks for testing littlefs in various conditions. | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -207,7 +208,7 @@ int lfs_testbd_prog(const struct lfs_config *cfg, lfs_block_t block, | ||||
|         bd->power_cycles -= 1; | ||||
|         if (bd->power_cycles == 0) { | ||||
|             // sync to make sure we persist the last changes | ||||
|             assert(lfs_testbd_rawsync(cfg) == 0); | ||||
|             LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0); | ||||
|             // simulate power loss | ||||
|             exit(33); | ||||
|         } | ||||
| @@ -254,7 +255,7 @@ int lfs_testbd_erase(const struct lfs_config *cfg, lfs_block_t block) { | ||||
|         bd->power_cycles -= 1; | ||||
|         if (bd->power_cycles == 0) { | ||||
|             // sync to make sure we persist the last changes | ||||
|             assert(lfs_testbd_rawsync(cfg) == 0); | ||||
|             LFS_ASSERT(lfs_testbd_rawsync(cfg) == 0); | ||||
|             // simulate power loss | ||||
|             exit(33); | ||||
|         } | ||||
|   | ||||
| @@ -2,6 +2,7 @@ | ||||
|  * Testing block device, wraps filebd and rambd while providing a bunch | ||||
|  * of hooks for testing littlefs in various conditions. | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
|   | ||||
							
								
								
									
										86
									
								
								lfs.h
									
									
									
									
									
								
							
							
						
						
									
										86
									
								
								lfs.h
									
									
									
									
									
								
							| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * The little filesystem | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -9,6 +10,7 @@ | ||||
|  | ||||
| #include <stdint.h> | ||||
| #include <stdbool.h> | ||||
| #include "lfs_util.h" | ||||
|  | ||||
| #ifdef __cplusplus | ||||
| extern "C" | ||||
| @@ -21,7 +23,7 @@ extern "C" | ||||
| // Software library version | ||||
| // Major (top-nibble), incremented on backwards incompatible changes | ||||
| // Minor (bottom-nibble), incremented on feature additions | ||||
| #define LFS_VERSION 0x00020002 | ||||
| #define LFS_VERSION 0x00020005 | ||||
| #define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16)) | ||||
| #define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >>  0)) | ||||
|  | ||||
| @@ -123,20 +125,25 @@ enum lfs_type { | ||||
| enum lfs_open_flags { | ||||
|     // open flags | ||||
|     LFS_O_RDONLY = 1,         // Open a file as read only | ||||
| #ifndef LFS_READONLY | ||||
|     LFS_O_WRONLY = 2,         // Open a file as write only | ||||
|     LFS_O_RDWR   = 3,         // Open a file as read and write | ||||
|     LFS_O_CREAT  = 0x0100,    // Create a file if it does not exist | ||||
|     LFS_O_EXCL   = 0x0200,    // Fail if a file already exists | ||||
|     LFS_O_TRUNC  = 0x0400,    // Truncate the existing file to zero size | ||||
|     LFS_O_APPEND = 0x0800,    // Move to end of file on every write | ||||
| #endif | ||||
|  | ||||
|     // internally used flags | ||||
| #ifndef LFS_READONLY | ||||
|     LFS_F_DIRTY   = 0x010000, // File does not match storage | ||||
|     LFS_F_WRITING = 0x020000, // File has been written since last flush | ||||
| #endif | ||||
|     LFS_F_READING = 0x040000, // File has been read since last flush | ||||
|     LFS_F_ERRED   = 0x080000, // An error occured during write | ||||
| #ifndef LFS_READONLY | ||||
|     LFS_F_ERRED   = 0x080000, // An error occurred during write | ||||
| #endif | ||||
|     LFS_F_INLINE  = 0x100000, // Currently inlined in directory entry | ||||
|     LFS_F_OPENED  = 0x200000, // File has been opened | ||||
| }; | ||||
|  | ||||
| // File seek flags | ||||
| @@ -153,45 +160,55 @@ struct lfs_config { | ||||
|     // information to the block device operations | ||||
|     void *context; | ||||
|  | ||||
|     // Read a region in a block. Negative error codes are propogated | ||||
|     // Read a region in a block. Negative error codes are propagated | ||||
|     // to the user. | ||||
|     int (*read)(const struct lfs_config *c, lfs_block_t block, | ||||
|             lfs_off_t off, void *buffer, lfs_size_t size); | ||||
|  | ||||
|     // Program a region in a block. The block must have previously | ||||
|     // been erased. Negative error codes are propogated to the user. | ||||
|     // been erased. Negative error codes are propagated to the user. | ||||
|     // May return LFS_ERR_CORRUPT if the block should be considered bad. | ||||
|     int (*prog)(const struct lfs_config *c, lfs_block_t block, | ||||
|             lfs_off_t off, const void *buffer, lfs_size_t size); | ||||
|  | ||||
|     // Erase a block. A block must be erased before being programmed. | ||||
|     // The state of an erased block is undefined. Negative error codes | ||||
|     // are propogated to the user. | ||||
|     // are propagated to the user. | ||||
|     // May return LFS_ERR_CORRUPT if the block should be considered bad. | ||||
|     int (*erase)(const struct lfs_config *c, lfs_block_t block); | ||||
|  | ||||
|     // Sync the state of the underlying block device. Negative error codes | ||||
|     // are propogated to the user. | ||||
|     // are propagated to the user. | ||||
|     int (*sync)(const struct lfs_config *c); | ||||
|  | ||||
|     // Minimum size of a block read. All read operations will be a | ||||
| #ifdef LFS_THREADSAFE | ||||
|     // Lock the underlying block device. Negative error codes | ||||
|     // are propagated to the user. | ||||
|     int (*lock)(const struct lfs_config *c); | ||||
|  | ||||
|     // Unlock the underlying block device. Negative error codes | ||||
|     // are propagated to the user. | ||||
|     int (*unlock)(const struct lfs_config *c); | ||||
| #endif | ||||
|  | ||||
|     // Minimum size of a block read in bytes. All read operations will be a | ||||
|     // multiple of this value. | ||||
|     lfs_size_t read_size; | ||||
|  | ||||
|     // Minimum size of a block program. All program operations will be a | ||||
|     // multiple of this value. | ||||
|     // Minimum size of a block program in bytes. All program operations will be | ||||
|     // a multiple of this value. | ||||
|     lfs_size_t prog_size; | ||||
|  | ||||
|     // Size of an erasable block. This does not impact ram consumption and | ||||
|     // may be larger than the physical erase size. However, non-inlined files | ||||
|     // take up at minimum one block. Must be a multiple of the read | ||||
|     // and program sizes. | ||||
|     // Size of an erasable block in bytes. This does not impact ram consumption | ||||
|     // and may be larger than the physical erase size. However, non-inlined | ||||
|     // files take up at minimum one block. Must be a multiple of the read and | ||||
|     // program sizes. | ||||
|     lfs_size_t block_size; | ||||
|  | ||||
|     // Number of erasable blocks on the device. | ||||
|     lfs_size_t block_count; | ||||
|  | ||||
|     // Number of erase cycles before littlefs evicts metadata logs and moves  | ||||
|     // Number of erase cycles before littlefs evicts metadata logs and moves | ||||
|     // the metadata to another block. Suggested values are in the | ||||
|     // range 100-1000, with large values having better performance at the cost | ||||
|     // of less consistent wear distribution. | ||||
| @@ -199,11 +216,11 @@ struct lfs_config { | ||||
|     // Set to -1 to disable block-level wear-leveling. | ||||
|     int32_t block_cycles; | ||||
|  | ||||
|     // Size of block caches. Each cache buffers a portion of a block in RAM. | ||||
|     // The littlefs needs a read cache, a program cache, and one additional | ||||
|     // Size of block caches in bytes. Each cache buffers a portion of a block in | ||||
|     // RAM. The littlefs needs a read cache, a program cache, and one additional | ||||
|     // cache per file. Larger caches can improve performance by storing more | ||||
|     // data and reducing the number of disk accesses. Must be a multiple of | ||||
|     // the read and program sizes, and a factor of the block size. | ||||
|     // data and reducing the number of disk accesses. Must be a multiple of the | ||||
|     // read and program sizes, and a factor of the block size. | ||||
|     lfs_size_t cache_size; | ||||
|  | ||||
|     // Size of the lookahead buffer in bytes. A larger lookahead buffer | ||||
| @@ -240,6 +257,12 @@ struct lfs_config { | ||||
|     // larger attributes size but must be <= LFS_ATTR_MAX. Defaults to | ||||
|     // LFS_ATTR_MAX when zero. | ||||
|     lfs_size_t attr_max; | ||||
|  | ||||
|     // Optional upper limit on total space given to metadata pairs in bytes. On | ||||
|     // devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB) | ||||
|     // can help bound the metadata compaction time. Must be <= block_size. | ||||
|     // Defaults to block_size when zero. | ||||
|     lfs_size_t metadata_max; | ||||
| }; | ||||
|  | ||||
| // File info structure | ||||
| @@ -399,6 +422,7 @@ typedef struct lfs { | ||||
|  | ||||
| /// Filesystem functions /// | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Format a block device with the littlefs | ||||
| // | ||||
| // Requires a littlefs object and config struct. This clobbers the littlefs | ||||
| @@ -407,6 +431,7 @@ typedef struct lfs { | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_format(lfs_t *lfs, const struct lfs_config *config); | ||||
| #endif | ||||
|  | ||||
| // Mounts a littlefs | ||||
| // | ||||
| @@ -426,12 +451,15 @@ int lfs_unmount(lfs_t *lfs); | ||||
|  | ||||
| /// General operations /// | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Removes a file or directory | ||||
| // | ||||
| // If removing a directory, the directory must be empty. | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_remove(lfs_t *lfs, const char *path); | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Rename or move a file or directory | ||||
| // | ||||
| // If the destination exists, it must match the source in type. | ||||
| @@ -439,6 +467,7 @@ int lfs_remove(lfs_t *lfs, const char *path); | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath); | ||||
| #endif | ||||
|  | ||||
| // Find info about a file or directory | ||||
| // | ||||
| @@ -457,10 +486,11 @@ int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info); | ||||
| // Returns the size of the attribute, or a negative error code on failure. | ||||
| // Note, the returned size is the size of the attribute on disk, irrespective | ||||
| // of the size of the buffer. This can be used to dynamically allocate a buffer | ||||
| // or check for existance. | ||||
| // or check for existence. | ||||
| lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path, | ||||
|         uint8_t type, void *buffer, lfs_size_t size); | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Set custom attributes | ||||
| // | ||||
| // Custom attributes are uniquely identified by an 8-bit type and limited | ||||
| @@ -470,17 +500,21 @@ lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path, | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_setattr(lfs_t *lfs, const char *path, | ||||
|         uint8_t type, const void *buffer, lfs_size_t size); | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Removes a custom attribute | ||||
| // | ||||
| // If an attribute is not found, nothing happens. | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type); | ||||
| #endif | ||||
|  | ||||
|  | ||||
| /// File operations /// | ||||
|  | ||||
| #ifndef LFS_NO_MALLOC | ||||
| // Open a file | ||||
| // | ||||
| // The mode that the file is opened in is determined by the flags, which | ||||
| @@ -490,6 +524,10 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type); | ||||
| int lfs_file_open(lfs_t *lfs, lfs_file_t *file, | ||||
|         const char *path, int flags); | ||||
|  | ||||
| // if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM | ||||
| // thus use lfs_file_opencfg() with config.buffer set. | ||||
| #endif | ||||
|  | ||||
| // Open a file with extra configuration | ||||
| // | ||||
| // The mode that the file is opened in is determined by the flags, which | ||||
| @@ -525,6 +563,7 @@ int lfs_file_sync(lfs_t *lfs, lfs_file_t *file); | ||||
| lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file, | ||||
|         void *buffer, lfs_size_t size); | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Write data to file | ||||
| // | ||||
| // Takes a buffer and size indicating the data to write. The file will not | ||||
| @@ -533,6 +572,7 @@ lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file, | ||||
| // Returns the number of bytes written, or a negative error code on failure. | ||||
| lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, | ||||
|         const void *buffer, lfs_size_t size); | ||||
| #endif | ||||
|  | ||||
| // Change the position of the file | ||||
| // | ||||
| @@ -541,10 +581,12 @@ lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, | ||||
| lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file, | ||||
|         lfs_soff_t off, int whence); | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Truncates the size of the file to the specified size | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size); | ||||
| #endif | ||||
|  | ||||
| // Return the position of the file | ||||
| // | ||||
| @@ -567,10 +609,12 @@ lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file); | ||||
|  | ||||
| /// Directory operations /// | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| // Create a directory | ||||
| // | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_mkdir(lfs_t *lfs, const char *path); | ||||
| #endif | ||||
|  | ||||
| // Open a directory | ||||
| // | ||||
| @@ -632,6 +676,7 @@ lfs_ssize_t lfs_fs_size(lfs_t *lfs); | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data); | ||||
|  | ||||
| #ifndef LFS_READONLY | ||||
| #ifdef LFS_MIGRATE | ||||
| // Attempts to migrate a previous version of littlefs | ||||
| // | ||||
| @@ -646,6 +691,7 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data); | ||||
| // Returns a negative error code on failure. | ||||
| int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg); | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
|  | ||||
| #ifdef __cplusplus | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * lfs util functions | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
|   | ||||
							
								
								
									
										11
									
								
								lfs_util.h
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								lfs_util.h
									
									
									
									
									
								
							| @@ -1,6 +1,7 @@ | ||||
| /* | ||||
|  * lfs utility functions | ||||
|  * | ||||
|  * Copyright (c) 2022, The littlefs authors. | ||||
|  * Copyright (c) 2017, Arm Limited. All rights reserved. | ||||
|  * SPDX-License-Identifier: BSD-3-Clause | ||||
|  */ | ||||
| @@ -49,6 +50,7 @@ extern "C" | ||||
| // code footprint | ||||
|  | ||||
| // Logging functions | ||||
| #ifndef LFS_TRACE | ||||
| #ifdef LFS_YES_TRACE | ||||
| #define LFS_TRACE_(fmt, ...) \ | ||||
|     printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| @@ -56,7 +58,9 @@ extern "C" | ||||
| #else | ||||
| #define LFS_TRACE(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_DEBUG | ||||
| #ifndef LFS_NO_DEBUG | ||||
| #define LFS_DEBUG_(fmt, ...) \ | ||||
|     printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| @@ -64,7 +68,9 @@ extern "C" | ||||
| #else | ||||
| #define LFS_DEBUG(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_WARN | ||||
| #ifndef LFS_NO_WARN | ||||
| #define LFS_WARN_(fmt, ...) \ | ||||
|     printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| @@ -72,7 +78,9 @@ extern "C" | ||||
| #else | ||||
| #define LFS_WARN(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #ifndef LFS_ERROR | ||||
| #ifndef LFS_NO_ERROR | ||||
| #define LFS_ERROR_(fmt, ...) \ | ||||
|     printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) | ||||
| @@ -80,13 +88,16 @@ extern "C" | ||||
| #else | ||||
| #define LFS_ERROR(...) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| // Runtime assertions | ||||
| #ifndef LFS_ASSERT | ||||
| #ifndef LFS_NO_ASSERT | ||||
| #define LFS_ASSERT(test) assert(test) | ||||
| #else | ||||
| #define LFS_ASSERT(test) | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
|  | ||||
| // Builtin functions, these may be replaced by more efficient | ||||
|   | ||||
							
								
								
									
										284
									
								
								scripts/code.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										284
									
								
								scripts/code.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,284 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find code size at the function level. Basically just a bit wrapper | ||||
| # around nm with some extra conveniences for comparing builds. Heavily inspired | ||||
| # by Linux's Bloat-O-Meter. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import subprocess as sp | ||||
| import shlex | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
|  | ||||
|  | ||||
| OBJ_PATHS = ['*.o'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     results = co.defaultdict(lambda: 0) | ||||
|     pattern = re.compile( | ||||
|         '^(?P<size>[0-9a-fA-F]+)' + | ||||
|         ' (?P<type>[%s])' % re.escape(args['type']) + | ||||
|         ' (?P<func>.+?)$') | ||||
|     for path in paths: | ||||
|         # note nm-tool may contain extra args | ||||
|         cmd = args['nm_tool'] + ['--size-sort', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True, | ||||
|             errors='replace') | ||||
|         for line in proc.stdout: | ||||
|             m = pattern.match(line) | ||||
|             if m: | ||||
|                 results[(path, m.group('func'))] += int(m.group('size'), 16) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     flat_results = [] | ||||
|     for (file, func), size in results.items(): | ||||
|         # map to source files | ||||
|         if args.get('build_dir'): | ||||
|             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) | ||||
|         # replace .o with .c, different scripts report .o/.c, we need to | ||||
|         # choose one if we want to deduplicate csv files | ||||
|         file = re.sub('\.o$', '.c', file) | ||||
|         # discard internal functions | ||||
|         if not args.get('everything'): | ||||
|             if func.startswith('__'): | ||||
|                 continue | ||||
|         # discard .8449 suffixes created by optimizer | ||||
|         func = re.sub('\.[0-9]+', '', func) | ||||
|  | ||||
|         flat_results.append((file, func, size)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .o files | ||||
|         paths = [] | ||||
|         for path in args['obj_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.o' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .obj files found in %r?' % args['obj_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['code_size'])) | ||||
|                 for result in r | ||||
|                 if result.get('code_size') not in {None, ''}] | ||||
|  | ||||
|     total = 0 | ||||
|     for _, _, size in results: | ||||
|         total += size | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['code_size'])) | ||||
|                     for result in r | ||||
|                     if result.get('code_size') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total = 0 | ||||
|         for _, _, size in prev_results: | ||||
|             prev_total += size | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         func = result.pop('name', '') | ||||
|                         result.pop('code_size', None) | ||||
|                         merged_results[(file, func)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, func, size in results: | ||||
|             merged_results[(file, func)]['code_size'] = size | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size']) | ||||
|             w.writeheader() | ||||
|             for (file, func), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': func, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: 0) | ||||
|         for file, func, size in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entries[entry] += size | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0)) | ||||
|         for name, new in news.items(): | ||||
|             diff[name] = (0, new, new, 1.0) | ||||
|         for name, old in olds.items(): | ||||
|             _, new, _, _ = diff[name] | ||||
|             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1], x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][1], x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][3], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s' % (by, 'size')) | ||||
|         else: | ||||
|             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, size): | ||||
|         print("%-36s %7d" % (name, size)) | ||||
|  | ||||
|     def print_diff_entry(name, old, new, diff, ratio): | ||||
|         print("%-36s %7s %7s %+7d%s" % (name, | ||||
|             old or "-", | ||||
|             new or "-", | ||||
|             diff, | ||||
|             ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, size in sorted_entries(entries.items()): | ||||
|                 print_entry(name, size) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for old, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, new, _, _ in diff.values() if not new))) | ||||
|             for name, (old, new, diff, ratio) in sorted_diff_entries( | ||||
|                     diff.items()): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print_diff_entry(name, old, new, diff, ratio) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total) | ||||
|         else: | ||||
|             ratio = (0.0 if not prev_total and not total | ||||
|                 else 1.0 if not prev_total | ||||
|                 else (total-prev_total)/prev_total) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total, total, | ||||
|                 total-prev_total, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find code size at the function level.") | ||||
|     parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, | ||||
|         help="Description of where to find *.o files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % OBJ_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't compile and find code sizes, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff code size against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--size-sort', action='store_true', | ||||
|         help="Sort by size.") | ||||
|     parser.add_argument('-S', '--reverse-size-sort', action='store_true', | ||||
|         help="Sort by size, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level code sizes. Note this does not include padding! " | ||||
|             "So sizes may differ from other tools.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total code size.") | ||||
|     parser.add_argument('--type', default='tTrRdD', | ||||
|         help="Type of symbols to report, this uses the same single-character " | ||||
|             "type-names emitted by nm. Defaults to %(default)r.") | ||||
|     parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(), | ||||
|         help="Path to the nm tool to use.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										323
									
								
								scripts/coverage.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										323
									
								
								scripts/coverage.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,323 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Parse and report coverage info from .info files generated by lcov | ||||
| # | ||||
| import os | ||||
| import glob | ||||
| import csv | ||||
| import re | ||||
| import collections as co | ||||
| import bisect as b | ||||
|  | ||||
|  | ||||
| INFO_PATHS = ['tests/*.toml.info'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     file = None | ||||
|     funcs = [] | ||||
|     lines = co.defaultdict(lambda: 0) | ||||
|     pattern = re.compile( | ||||
|         '^(?P<file>SF:/?(?P<file_name>.*))$' | ||||
|         '|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$' | ||||
|         '|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$') | ||||
|     for path in paths: | ||||
|         with open(path) as f: | ||||
|             for line in f: | ||||
|                 m = pattern.match(line) | ||||
|                 if m and m.group('file'): | ||||
|                     file = m.group('file_name') | ||||
|                 elif m and file and m.group('func'): | ||||
|                     funcs.append((file, int(m.group('func_lineno')), | ||||
|                         m.group('func_name'))) | ||||
|                 elif m and file and m.group('line'): | ||||
|                     lines[(file, int(m.group('line_lineno')))] += ( | ||||
|                         int(m.group('line_hits'))) | ||||
|  | ||||
|     # map line numbers to functions | ||||
|     funcs.sort() | ||||
|     def func_from_lineno(file, lineno): | ||||
|         i = b.bisect(funcs, (file, lineno)) | ||||
|         if i and funcs[i-1][0] == file: | ||||
|             return funcs[i-1][2] | ||||
|         else: | ||||
|             return None | ||||
|  | ||||
|     # reduce to function info | ||||
|     reduced_funcs = co.defaultdict(lambda: (0, 0)) | ||||
|     for (file, line_lineno), line_hits in lines.items(): | ||||
|         func = func_from_lineno(file, line_lineno) | ||||
|         if not func: | ||||
|             continue | ||||
|         hits, count = reduced_funcs[(file, func)] | ||||
|         reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1) | ||||
|  | ||||
|     results = [] | ||||
|     for (file, func), (hits, count) in reduced_funcs.items(): | ||||
|         # discard internal/testing functions (test_* injected with | ||||
|         # internal testing) | ||||
|         if not args.get('everything'): | ||||
|             if func.startswith('__') or func.startswith('test_'): | ||||
|                 continue | ||||
|         # discard .8449 suffixes created by optimizer | ||||
|         func = re.sub('\.[0-9]+', '', func) | ||||
|         results.append((file, func, hits, count)) | ||||
|  | ||||
|     return results | ||||
|  | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find coverage | ||||
|     if not args.get('use'): | ||||
|         # find *.info files | ||||
|         paths = [] | ||||
|         for path in args['info_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.gcov' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .info files found in %r?' % args['info_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['coverage_hits']), | ||||
|                     int(result['coverage_count'])) | ||||
|                 for result in r | ||||
|                 if result.get('coverage_hits') not in {None, ''} | ||||
|                 if result.get('coverage_count') not in {None, ''}] | ||||
|  | ||||
|     total_hits, total_count = 0, 0 | ||||
|     for _, _, hits, count in results: | ||||
|         total_hits += hits | ||||
|         total_count += count | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['coverage_hits']), | ||||
|                         int(result['coverage_count'])) | ||||
|                     for result in r | ||||
|                     if result.get('coverage_hits') not in {None, ''} | ||||
|                     if result.get('coverage_count') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total_hits, prev_total_count = 0, 0 | ||||
|         for _, _, hits, count in prev_results: | ||||
|             prev_total_hits += hits | ||||
|             prev_total_count += count | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         func = result.pop('name', '') | ||||
|                         result.pop('coverage_hits', None) | ||||
|                         result.pop('coverage_count', None) | ||||
|                         merged_results[(file, func)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, func, hits, count in results: | ||||
|             merged_results[(file, func)]['coverage_hits'] = hits | ||||
|             merged_results[(file, func)]['coverage_count'] = count | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count']) | ||||
|             w.writeheader() | ||||
|             for (file, func), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': func, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: (0, 0)) | ||||
|         for file, func, hits, count in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entry_hits, entry_count = entries[entry] | ||||
|             entries[entry] = (entry_hits + hits, entry_count + count) | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0)) | ||||
|         for name, (new_hits, new_count) in news.items(): | ||||
|             diff[name] = ( | ||||
|                 0, 0, | ||||
|                 new_hits, new_count, | ||||
|                 new_hits, new_count, | ||||
|                 (new_hits/new_count if new_count else 1.0) - 1.0) | ||||
|         for name, (old_hits, old_count) in olds.items(): | ||||
|             _, _, new_hits, new_count, _, _, _ = diff[name] | ||||
|             diff[name] = ( | ||||
|                 old_hits, old_count, | ||||
|                 new_hits, new_count, | ||||
|                 new_hits-old_hits, new_count-old_count, | ||||
|                 ((new_hits/new_count if new_count else 1.0) | ||||
|                     - (old_hits/old_count if old_count else 1.0))) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('coverage_sort'): | ||||
|             return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x)) | ||||
|         elif args.get('reverse_coverage_sort'): | ||||
|             return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('coverage_sort'): | ||||
|             return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x)) | ||||
|         elif args.get('reverse_coverage_sort'): | ||||
|             return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][6], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %19s' % (by, 'hits/line')) | ||||
|         else: | ||||
|             print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, hits, count): | ||||
|         print("%-36s %11s %7s" % (name, | ||||
|             '%d/%d' % (hits, count) | ||||
|                 if count else '-', | ||||
|             '%.1f%%' % (100*hits/count) | ||||
|                 if count else '-')) | ||||
|  | ||||
|     def print_diff_entry(name, | ||||
|             old_hits, old_count, | ||||
|             new_hits, new_count, | ||||
|             diff_hits, diff_count, | ||||
|             ratio): | ||||
|         print("%-36s %11s %7s %11s %7s %11s%s" % (name, | ||||
|             '%d/%d' % (old_hits, old_count) | ||||
|                 if old_count else '-', | ||||
|             '%.1f%%' % (100*old_hits/old_count) | ||||
|                 if old_count else '-', | ||||
|             '%d/%d' % (new_hits, new_count) | ||||
|                 if new_count else '-', | ||||
|             '%.1f%%' % (100*new_hits/new_count) | ||||
|                 if new_count else '-', | ||||
|             '%+d/%+d' % (diff_hits, diff_count), | ||||
|             ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, (hits, count) in sorted_entries(entries.items()): | ||||
|                 print_entry(name, hits, count) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for _, old, _, _, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, _, _, new, _, _, _ in diff.values() if not new))) | ||||
|             for name, ( | ||||
|                     old_hits, old_count, | ||||
|                     new_hits, new_count, | ||||
|                     diff_hits, diff_count, ratio) in sorted_diff_entries( | ||||
|                         diff.items()): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print_diff_entry(name, | ||||
|                         old_hits, old_count, | ||||
|                         new_hits, new_count, | ||||
|                         diff_hits, diff_count, | ||||
|                         ratio) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total_hits, total_count) | ||||
|         else: | ||||
|             ratio = ((total_hits/total_count | ||||
|                     if total_count else 1.0) | ||||
|                 - (prev_total_hits/prev_total_count | ||||
|                     if prev_total_count else 1.0)) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total_hits, prev_total_count, | ||||
|                 total_hits, total_count, | ||||
|                 total_hits-prev_total_hits, total_count-prev_total_count, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Parse and report coverage info from .info files \ | ||||
|             generated by lcov") | ||||
|     parser.add_argument('info_paths', nargs='*', default=INFO_PATHS, | ||||
|         help="Description of where to find *.info files. May be a directory \ | ||||
|             or list of paths. *.info files will be merged to show the total \ | ||||
|             coverage. Defaults to %r." % INFO_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't do any work, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff code size against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--coverage-sort', action='store_true', | ||||
|         help="Sort by coverage.") | ||||
|     parser.add_argument('-S', '--reverse-coverage-sort', action='store_true', | ||||
|         help="Sort by coverage, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level coverage.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total coverage.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										283
									
								
								scripts/data.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										283
									
								
								scripts/data.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,283 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find data size at the function level. Basically just a bit wrapper | ||||
| # around nm with some extra conveniences for comparing builds. Heavily inspired | ||||
| # by Linux's Bloat-O-Meter. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import subprocess as sp | ||||
| import shlex | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
|  | ||||
|  | ||||
| OBJ_PATHS = ['*.o'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     results = co.defaultdict(lambda: 0) | ||||
|     pattern = re.compile( | ||||
|         '^(?P<size>[0-9a-fA-F]+)' + | ||||
|         ' (?P<type>[%s])' % re.escape(args['type']) + | ||||
|         ' (?P<func>.+?)$') | ||||
|     for path in paths: | ||||
|         # note nm-tool may contain extra args | ||||
|         cmd = args['nm_tool'] + ['--size-sort', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True, | ||||
|             errors='replace') | ||||
|         for line in proc.stdout: | ||||
|             m = pattern.match(line) | ||||
|             if m: | ||||
|                 results[(path, m.group('func'))] += int(m.group('size'), 16) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     flat_results = [] | ||||
|     for (file, func), size in results.items(): | ||||
|         # map to source files | ||||
|         if args.get('build_dir'): | ||||
|             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) | ||||
|         # replace .o with .c, different scripts report .o/.c, we need to | ||||
|         # choose one if we want to deduplicate csv files | ||||
|         file = re.sub('\.o$', '.c', file) | ||||
|         # discard internal functions | ||||
|         if not args.get('everything'): | ||||
|             if func.startswith('__'): | ||||
|                 continue | ||||
|         # discard .8449 suffixes created by optimizer | ||||
|         func = re.sub('\.[0-9]+', '', func) | ||||
|         flat_results.append((file, func, size)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .o files | ||||
|         paths = [] | ||||
|         for path in args['obj_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.o' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .obj files found in %r?' % args['obj_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['data_size'])) | ||||
|                 for result in r | ||||
|                 if result.get('data_size') not in {None, ''}] | ||||
|  | ||||
|     total = 0 | ||||
|     for _, _, size in results: | ||||
|         total += size | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['data_size'])) | ||||
|                     for result in r | ||||
|                     if result.get('data_size') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total = 0 | ||||
|         for _, _, size in prev_results: | ||||
|             prev_total += size | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         func = result.pop('name', '') | ||||
|                         result.pop('data_size', None) | ||||
|                         merged_results[(file, func)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, func, size in results: | ||||
|             merged_results[(file, func)]['data_size'] = size | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size']) | ||||
|             w.writeheader() | ||||
|             for (file, func), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': func, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: 0) | ||||
|         for file, func, size in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entries[entry] += size | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0)) | ||||
|         for name, new in news.items(): | ||||
|             diff[name] = (0, new, new, 1.0) | ||||
|         for name, old in olds.items(): | ||||
|             _, new, _, _ = diff[name] | ||||
|             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1], x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][1], x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][3], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s' % (by, 'size')) | ||||
|         else: | ||||
|             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, size): | ||||
|         print("%-36s %7d" % (name, size)) | ||||
|  | ||||
|     def print_diff_entry(name, old, new, diff, ratio): | ||||
|         print("%-36s %7s %7s %+7d%s" % (name, | ||||
|             old or "-", | ||||
|             new or "-", | ||||
|             diff, | ||||
|             ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, size in sorted_entries(entries.items()): | ||||
|                 print_entry(name, size) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for old, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, new, _, _ in diff.values() if not new))) | ||||
|             for name, (old, new, diff, ratio) in sorted_diff_entries( | ||||
|                     diff.items()): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print_diff_entry(name, old, new, diff, ratio) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total) | ||||
|         else: | ||||
|             ratio = (0.0 if not prev_total and not total | ||||
|                 else 1.0 if not prev_total | ||||
|                 else (total-prev_total)/prev_total) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total, total, | ||||
|                 total-prev_total, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find data size at the function level.") | ||||
|     parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, | ||||
|         help="Description of where to find *.o files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % OBJ_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't compile and find data sizes, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff data size against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--size-sort', action='store_true', | ||||
|         help="Sort by size.") | ||||
|     parser.add_argument('-S', '--reverse-size-sort', action='store_true', | ||||
|         help="Sort by size, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level data sizes. Note this does not include padding! " | ||||
|             "So sizes may differ from other tools.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total data size.") | ||||
|     parser.add_argument('--type', default='dDbB', | ||||
|         help="Type of symbols to report, this uses the same single-character " | ||||
|             "type-names emitted by nm. Defaults to %(default)r.") | ||||
|     parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(), | ||||
|         help="Path to the nm tool to use.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
| @@ -106,7 +106,7 @@ def main(args): | ||||
|             struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff')))) | ||||
|     print("%-47s%s" % ("littlefs v%s.%s" % version, | ||||
|         "data (truncated, if it fits)" | ||||
|         if not any([args.no_truncate, args.tags, args.log, args.all]) else "")) | ||||
|         if not any([args.no_truncate, args.log, args.all]) else "")) | ||||
|  | ||||
|     # print gstate | ||||
|     print("gstate 0x%s" % ''.join('%02x' % c for c in gstate)) | ||||
|   | ||||
							
								
								
									
										430
									
								
								scripts/stack.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										430
									
								
								scripts/stack.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,430 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find stack usage at the function level. Will detect recursion and | ||||
| # report as infinite stack usage. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
| import math as m | ||||
|  | ||||
|  | ||||
| CI_PATHS = ['*.ci'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     # parse the vcg format | ||||
|     k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL) | ||||
|     v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL) | ||||
|     def parse_vcg(rest): | ||||
|         def parse_vcg(rest): | ||||
|             node = [] | ||||
|             while True: | ||||
|                 rest = rest.lstrip() | ||||
|                 m = k_pattern.match(rest) | ||||
|                 if not m: | ||||
|                     return (node, rest) | ||||
|                 k, rest = m.group(1), rest[m.end(0):] | ||||
|  | ||||
|                 rest = rest.lstrip() | ||||
|                 if rest.startswith('{'): | ||||
|                     v, rest = parse_vcg(rest[1:]) | ||||
|                     assert rest[0] == '}', "unexpected %r" % rest[0:1] | ||||
|                     rest = rest[1:] | ||||
|                     node.append((k, v)) | ||||
|                 else: | ||||
|                     m = v_pattern.match(rest) | ||||
|                     assert m, "unexpected %r" % rest[0:1] | ||||
|                     v, rest = m.group(1) or m.group(2), rest[m.end(0):] | ||||
|                     node.append((k, v)) | ||||
|  | ||||
|         node, rest = parse_vcg(rest) | ||||
|         assert rest == '', "unexpected %r" % rest[0:1] | ||||
|         return node | ||||
|  | ||||
|     # collect into functions | ||||
|     results = co.defaultdict(lambda: (None, None, 0, set())) | ||||
|     f_pattern = re.compile( | ||||
|         r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)') | ||||
|     for path in paths: | ||||
|         with open(path) as f: | ||||
|             vcg = parse_vcg(f.read()) | ||||
|         for k, graph in vcg: | ||||
|             if k != 'graph': | ||||
|                 continue | ||||
|             for k, info in graph: | ||||
|                 if k == 'node': | ||||
|                     info = dict(info) | ||||
|                     m = f_pattern.match(info['label']) | ||||
|                     if m: | ||||
|                         function, file, size, type = m.groups() | ||||
|                         if not args.get('quiet') and type != 'static': | ||||
|                             print('warning: found non-static stack for %s (%s)' | ||||
|                                 % (function, type)) | ||||
|                         _, _, _, targets = results[info['title']] | ||||
|                         results[info['title']] = ( | ||||
|                             file, function, int(size), targets) | ||||
|                 elif k == 'edge': | ||||
|                     info = dict(info) | ||||
|                     _, _, _, targets = results[info['sourcename']] | ||||
|                     targets.add(info['targetname']) | ||||
|                 else: | ||||
|                     continue | ||||
|  | ||||
|     if not args.get('everything'): | ||||
|         for source, (s_file, s_function, _, _) in list(results.items()): | ||||
|             # discard internal functions | ||||
|             if s_file.startswith('<') or s_file.startswith('/usr/include'): | ||||
|                 del results[source] | ||||
|  | ||||
|     # find maximum stack size recursively, this requires also detecting cycles | ||||
|     # (in case of recursion) | ||||
|     def find_limit(source, seen=None): | ||||
|         seen = seen or set() | ||||
|         if source not in results: | ||||
|             return 0 | ||||
|         _, _, frame, targets = results[source] | ||||
|  | ||||
|         limit = 0 | ||||
|         for target in targets: | ||||
|             if target in seen: | ||||
|                 # found a cycle | ||||
|                 return float('inf') | ||||
|             limit_ = find_limit(target, seen | {target}) | ||||
|             limit = max(limit, limit_) | ||||
|  | ||||
|         return frame + limit | ||||
|  | ||||
|     def find_deps(targets): | ||||
|         deps = set() | ||||
|         for target in targets: | ||||
|             if target in results: | ||||
|                 t_file, t_function, _, _ = results[target] | ||||
|                 deps.add((t_file, t_function)) | ||||
|         return deps | ||||
|  | ||||
|     # flatten into a list | ||||
|     flat_results = [] | ||||
|     for source, (s_file, s_function, frame, targets) in results.items(): | ||||
|         limit = find_limit(source) | ||||
|         deps = find_deps(targets) | ||||
|         flat_results.append((s_file, s_function, frame, limit, deps)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .ci files | ||||
|         paths = [] | ||||
|         for path in args['ci_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.ci' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .ci files found in %r?' % args['ci_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['stack_frame']), | ||||
|                     float(result['stack_limit']), # note limit can be inf | ||||
|                     set()) | ||||
|                 for result in r | ||||
|                 if result.get('stack_frame') not in {None, ''} | ||||
|                 if result.get('stack_limit') not in {None, ''}] | ||||
|  | ||||
|     total_frame = 0 | ||||
|     total_limit = 0 | ||||
|     for _, _, frame, limit, _ in results: | ||||
|         total_frame += frame | ||||
|         total_limit = max(total_limit, limit) | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['stack_frame']), | ||||
|                         float(result['stack_limit']), | ||||
|                         set()) | ||||
|                     for result in r | ||||
|                     if result.get('stack_frame') not in {None, ''} | ||||
|                     if result.get('stack_limit') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total_frame = 0 | ||||
|         prev_total_limit = 0 | ||||
|         for _, _, frame, limit, _ in prev_results: | ||||
|             prev_total_frame += frame | ||||
|             prev_total_limit = max(prev_total_limit, limit) | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         func = result.pop('name', '') | ||||
|                         result.pop('stack_frame', None) | ||||
|                         result.pop('stack_limit', None) | ||||
|                         merged_results[(file, func)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, func, frame, limit, _ in results: | ||||
|             merged_results[(file, func)]['stack_frame'] = frame | ||||
|             merged_results[(file, func)]['stack_limit'] = limit | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit']) | ||||
|             w.writeheader() | ||||
|             for (file, func), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': func, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: (0, 0, set())) | ||||
|         for file, func, frame, limit, deps in results: | ||||
|             entry = (file if by == 'file' else func) | ||||
|             entry_frame, entry_limit, entry_deps = entries[entry] | ||||
|             entries[entry] = ( | ||||
|                 entry_frame + frame, | ||||
|                 max(entry_limit, limit), | ||||
|                 entry_deps | {file if by == 'file' else func | ||||
|                     for file, func in deps}) | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set())) | ||||
|         for name, (new_frame, new_limit, deps) in news.items(): | ||||
|             diff[name] = ( | ||||
|                 None, None, | ||||
|                 new_frame, new_limit, | ||||
|                 new_frame, new_limit, | ||||
|                 1.0, | ||||
|                 deps) | ||||
|         for name, (old_frame, old_limit, _) in olds.items(): | ||||
|             _, _, new_frame, new_limit, _, _, _, deps = diff[name] | ||||
|             diff[name] = ( | ||||
|                 old_frame, old_limit, | ||||
|                 new_frame, new_limit, | ||||
|                 (new_frame or 0) - (old_frame or 0), | ||||
|                 0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0) | ||||
|                     else (new_limit or 0) - (old_limit or 0), | ||||
|                 0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0) | ||||
|                     else +float('inf') if m.isinf(new_limit or 0) | ||||
|                     else -float('inf') if m.isinf(old_limit or 0) | ||||
|                     else +0.0 if not old_limit and not new_limit | ||||
|                     else +1.0 if not old_limit | ||||
|                     else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0), | ||||
|                 deps) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('limit_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][1], x)) | ||||
|         elif args.get('reverse_limit_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][1], x)) | ||||
|         elif args.get('frame_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][0], x)) | ||||
|         elif args.get('reverse_frame_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][0], x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('limit_sort'): | ||||
|             return sorted(entries, key=lambda x: (-(x[1][3] or 0), x)) | ||||
|         elif args.get('reverse_limit_sort'): | ||||
|             return sorted(entries, key=lambda x: (+(x[1][3] or 0), x)) | ||||
|         elif args.get('frame_sort'): | ||||
|             return sorted(entries, key=lambda x: (-(x[1][2] or 0), x)) | ||||
|         elif args.get('reverse_frame_sort'): | ||||
|             return sorted(entries, key=lambda x: (+(x[1][2] or 0), x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][6], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s %7s' % (by, 'frame', 'limit')) | ||||
|         else: | ||||
|             print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, frame, limit): | ||||
|         print("%-36s %7d %7s" % (name, | ||||
|             frame, '∞' if m.isinf(limit) else int(limit))) | ||||
|  | ||||
|     def print_diff_entry(name, | ||||
|             old_frame, old_limit, | ||||
|             new_frame, new_limit, | ||||
|             diff_frame, diff_limit, | ||||
|             ratio): | ||||
|         print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name, | ||||
|             old_frame if old_frame is not None else "-", | ||||
|             ('∞' if m.isinf(old_limit) else int(old_limit)) | ||||
|                 if old_limit is not None else "-", | ||||
|             new_frame if new_frame is not None else "-", | ||||
|             ('∞' if m.isinf(new_limit) else int(new_limit)) | ||||
|                 if new_limit is not None else "-", | ||||
|             diff_frame, | ||||
|             ('+∞' if diff_limit > 0 and m.isinf(diff_limit) | ||||
|                 else '-∞' if diff_limit < 0 and m.isinf(diff_limit) | ||||
|                 else '%+d' % diff_limit), | ||||
|             '' if not ratio | ||||
|                 else ' (+∞%)' if ratio > 0 and m.isinf(ratio) | ||||
|                 else ' (-∞%)' if ratio < 0 and m.isinf(ratio) | ||||
|                 else ' (%+.1f%%)' % (100*ratio))) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         # build optional tree of dependencies | ||||
|         def print_deps(entries, depth, print, | ||||
|                 filter=lambda _: True, | ||||
|                 prefixes=('', '', '', '')): | ||||
|             entries = entries if isinstance(entries, list) else list(entries) | ||||
|             filtered_entries = [(name, entry) | ||||
|                 for name, entry in entries | ||||
|                 if filter(name)] | ||||
|             for i, (name, entry) in enumerate(filtered_entries): | ||||
|                 last = (i == len(filtered_entries)-1) | ||||
|                 print(prefixes[0+last] + name, entry) | ||||
|  | ||||
|                 if depth > 0: | ||||
|                     deps = entry[-1] | ||||
|                     print_deps(entries, depth-1, print, | ||||
|                         lambda name: name in deps, | ||||
|                         (   prefixes[2+last] + "|-> ", | ||||
|                             prefixes[2+last] + "'-> ", | ||||
|                             prefixes[2+last] + "|   ", | ||||
|                             prefixes[2+last] + "    ")) | ||||
|  | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             print_deps( | ||||
|                 sorted_entries(entries.items()), | ||||
|                 args.get('depth') or 0, | ||||
|                 lambda name, entry: print_entry(name, *entry[:-1])) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|  | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None), | ||||
|                 sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None))) | ||||
|             print_deps( | ||||
|                 filter( | ||||
|                     lambda x: x[1][6] or args.get('all'), | ||||
|                     sorted_diff_entries(diff.items())), | ||||
|                 args.get('depth') or 0, | ||||
|                 lambda name, entry: print_diff_entry(name, *entry[:-1])) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total_frame, total_limit) | ||||
|         else: | ||||
|             diff_frame = total_frame - prev_total_frame | ||||
|             diff_limit = ( | ||||
|                 0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0) | ||||
|                     else (total_limit or 0) - (prev_total_limit or 0)) | ||||
|             ratio = ( | ||||
|                 0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0) | ||||
|                     else +float('inf') if m.isinf(total_limit or 0) | ||||
|                     else -float('inf') if m.isinf(prev_total_limit or 0) | ||||
|                     else 0.0 if not prev_total_limit and not total_limit | ||||
|                     else 1.0 if not prev_total_limit | ||||
|                     else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0)) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total_frame, prev_total_limit, | ||||
|                 total_frame, total_limit, | ||||
|                 diff_frame, diff_limit, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find stack usage at the function level.") | ||||
|     parser.add_argument('ci_paths', nargs='*', default=CI_PATHS, | ||||
|         help="Description of where to find *.ci files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % CI_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't parse callgraph files, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--limit-sort', action='store_true', | ||||
|         help="Sort by stack limit.") | ||||
|     parser.add_argument('-S', '--reverse-limit-sort', action='store_true', | ||||
|         help="Sort by stack limit, but backwards.") | ||||
|     parser.add_argument('--frame-sort', action='store_true', | ||||
|         help="Sort by stack frame size.") | ||||
|     parser.add_argument('--reverse-frame-sort', action='store_true', | ||||
|         help="Sort by stack frame size, but backwards.") | ||||
|     parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0), | ||||
|         nargs='?', const=float('inf'), | ||||
|         help="Depth of dependencies to show.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level calls.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total stack size.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										331
									
								
								scripts/structs.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										331
									
								
								scripts/structs.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,331 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to find struct sizes. | ||||
| # | ||||
|  | ||||
| import os | ||||
| import glob | ||||
| import itertools as it | ||||
| import subprocess as sp | ||||
| import shlex | ||||
| import re | ||||
| import csv | ||||
| import collections as co | ||||
|  | ||||
|  | ||||
| OBJ_PATHS = ['*.o'] | ||||
|  | ||||
| def collect(paths, **args): | ||||
|     decl_pattern = re.compile( | ||||
|         '^\s+(?P<no>[0-9]+)' | ||||
|             '\s+(?P<dir>[0-9]+)' | ||||
|             '\s+.*' | ||||
|             '\s+(?P<file>[^\s]+)$') | ||||
|     struct_pattern = re.compile( | ||||
|         '^(?:.*DW_TAG_(?P<tag>[a-z_]+).*' | ||||
|             '|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*' | ||||
|             '|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*' | ||||
|             '|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$') | ||||
|  | ||||
|     results = co.defaultdict(lambda: 0) | ||||
|     for path in paths: | ||||
|         # find decl, we want to filter by structs in .h files | ||||
|         decls = {} | ||||
|         # note objdump-tool may contain extra args | ||||
|         cmd = args['objdump_tool'] + ['--dwarf=rawline', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True, | ||||
|             errors='replace') | ||||
|         for line in proc.stdout: | ||||
|             # find file numbers | ||||
|             m = decl_pattern.match(line) | ||||
|             if m: | ||||
|                 decls[int(m.group('no'))] = m.group('file') | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         # collect structs as we parse dwarf info | ||||
|         found = False | ||||
|         name = None | ||||
|         decl = None | ||||
|         size = None | ||||
|  | ||||
|         # note objdump-tool may contain extra args | ||||
|         cmd = args['objdump_tool'] + ['--dwarf=info', path] | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE, | ||||
|             stderr=sp.PIPE if not args.get('verbose') else None, | ||||
|             universal_newlines=True, | ||||
|             errors='replace') | ||||
|         for line in proc.stdout: | ||||
|             # state machine here to find structs | ||||
|             m = struct_pattern.match(line) | ||||
|             if m: | ||||
|                 if m.group('tag'): | ||||
|                     if (name is not None | ||||
|                             and decl is not None | ||||
|                             and size is not None): | ||||
|                         decl = decls.get(decl, '?') | ||||
|                         results[(decl, name)] = size | ||||
|                     found = (m.group('tag') == 'structure_type') | ||||
|                     name = None | ||||
|                     decl = None | ||||
|                     size = None | ||||
|                 elif found and m.group('name'): | ||||
|                     name = m.group('name') | ||||
|                 elif found and name and m.group('decl'): | ||||
|                     decl = int(m.group('decl')) | ||||
|                 elif found and name and m.group('size'): | ||||
|                     size = int(m.group('size')) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in proc.stderr: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     flat_results = [] | ||||
|     for (file, struct), size in results.items(): | ||||
|         # map to source files | ||||
|         if args.get('build_dir'): | ||||
|             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) | ||||
|         # only include structs declared in header files in the current | ||||
|         # directory, ignore internal-only # structs (these are represented | ||||
|         # in other measurements) | ||||
|         if not args.get('everything'): | ||||
|             if not file.endswith('.h'): | ||||
|                 continue | ||||
|         # replace .o with .c, different scripts report .o/.c, we need to | ||||
|         # choose one if we want to deduplicate csv files | ||||
|         file = re.sub('\.o$', '.c', file) | ||||
|  | ||||
|         flat_results.append((file, struct, size)) | ||||
|  | ||||
|     return flat_results | ||||
|  | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find sizes | ||||
|     if not args.get('use', None): | ||||
|         # find .o files | ||||
|         paths = [] | ||||
|         for path in args['obj_paths']: | ||||
|             if os.path.isdir(path): | ||||
|                 path = path + '/*.o' | ||||
|  | ||||
|             for path in glob.glob(path): | ||||
|                 paths.append(path) | ||||
|  | ||||
|         if not paths: | ||||
|             print('no .obj files found in %r?' % args['obj_paths']) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|         results = collect(paths, **args) | ||||
|     else: | ||||
|         with openio(args['use']) as f: | ||||
|             r = csv.DictReader(f) | ||||
|             results = [ | ||||
|                 (   result['file'], | ||||
|                     result['name'], | ||||
|                     int(result['struct_size'])) | ||||
|                 for result in r | ||||
|                 if result.get('struct_size') not in {None, ''}] | ||||
|  | ||||
|     total = 0 | ||||
|     for _, _, size in results: | ||||
|         total += size | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 prev_results = [ | ||||
|                     (   result['file'], | ||||
|                         result['name'], | ||||
|                         int(result['struct_size'])) | ||||
|                     for result in r | ||||
|                     if result.get('struct_size') not in {None, ''}] | ||||
|         except FileNotFoundError: | ||||
|             prev_results = [] | ||||
|  | ||||
|         prev_total = 0 | ||||
|         for _, _, size in prev_results: | ||||
|             prev_total += size | ||||
|  | ||||
|     # write results to CSV | ||||
|     if args.get('output'): | ||||
|         merged_results = co.defaultdict(lambda: {}) | ||||
|         other_fields = [] | ||||
|  | ||||
|         # merge? | ||||
|         if args.get('merge'): | ||||
|             try: | ||||
|                 with openio(args['merge']) as f: | ||||
|                     r = csv.DictReader(f) | ||||
|                     for result in r: | ||||
|                         file = result.pop('file', '') | ||||
|                         struct = result.pop('name', '') | ||||
|                         result.pop('struct_size', None) | ||||
|                         merged_results[(file, struct)] = result | ||||
|                         other_fields = result.keys() | ||||
|             except FileNotFoundError: | ||||
|                 pass | ||||
|  | ||||
|         for file, struct, size in results: | ||||
|             merged_results[(file, struct)]['struct_size'] = size | ||||
|  | ||||
|         with openio(args['output'], 'w') as f: | ||||
|             w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size']) | ||||
|             w.writeheader() | ||||
|             for (file, struct), result in sorted(merged_results.items()): | ||||
|                 w.writerow({'file': file, 'name': struct, **result}) | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: 0) | ||||
|         for file, struct, size in results: | ||||
|             entry = (file if by == 'file' else struct) | ||||
|             entries[entry] += size | ||||
|         return entries | ||||
|  | ||||
|     def diff_entries(olds, news): | ||||
|         diff = co.defaultdict(lambda: (0, 0, 0, 0)) | ||||
|         for name, new in news.items(): | ||||
|             diff[name] = (0, new, new, 1.0) | ||||
|         for name, old in olds.items(): | ||||
|             _, new, _, _ = diff[name] | ||||
|             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) | ||||
|         return diff | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1], x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def sorted_diff_entries(entries): | ||||
|         if args.get('size_sort'): | ||||
|             return sorted(entries, key=lambda x: (-x[1][1], x)) | ||||
|         elif args.get('reverse_size_sort'): | ||||
|             return sorted(entries, key=lambda x: (+x[1][1], x)) | ||||
|         else: | ||||
|             return sorted(entries, key=lambda x: (-x[1][3], x)) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s %7s' % (by, 'size')) | ||||
|         else: | ||||
|             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) | ||||
|  | ||||
|     def print_entry(name, size): | ||||
|         print("%-36s %7d" % (name, size)) | ||||
|  | ||||
|     def print_diff_entry(name, old, new, diff, ratio): | ||||
|         print("%-36s %7s %7s %+7d%s" % (name, | ||||
|             old or "-", | ||||
|             new or "-", | ||||
|             diff, | ||||
|             ' (%+.1f%%)' % (100*ratio) if ratio else '')) | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, size in sorted_entries(entries.items()): | ||||
|                 print_entry(name, size) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             diff = diff_entries(prev_entries, entries) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for old, _, _, _ in diff.values() if not old), | ||||
|                 sum(1 for _, new, _, _ in diff.values() if not new))) | ||||
|             for name, (old, new, diff, ratio) in sorted_diff_entries( | ||||
|                     diff.items()): | ||||
|                 if ratio or args.get('all'): | ||||
|                     print_diff_entry(name, old, new, diff, ratio) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total) | ||||
|         else: | ||||
|             ratio = (0.0 if not prev_total and not total | ||||
|                 else 1.0 if not prev_total | ||||
|                 else (total-prev_total)/prev_total) | ||||
|             print_diff_entry('TOTAL', | ||||
|                 prev_total, total, | ||||
|                 total-prev_total, | ||||
|                 ratio) | ||||
|  | ||||
|     if args.get('quiet'): | ||||
|         pass | ||||
|     elif args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Find struct sizes.") | ||||
|     parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, | ||||
|         help="Description of where to find *.o files. May be a directory \ | ||||
|             or a list of paths. Defaults to %r." % OBJ_PATHS) | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
|         help="Output commands that run behind the scenes.") | ||||
|     parser.add_argument('-q', '--quiet', action='store_true', | ||||
|         help="Don't show anything, useful with -o.") | ||||
|     parser.add_argument('-o', '--output', | ||||
|         help="Specify CSV file to store results.") | ||||
|     parser.add_argument('-u', '--use', | ||||
|         help="Don't compile and find struct sizes, instead use this CSV file.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff struct size against.") | ||||
|     parser.add_argument('-m', '--merge', | ||||
|         help="Merge with an existing CSV file when writing to output.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all functions, not just the ones that changed.") | ||||
|     parser.add_argument('-A', '--everything', action='store_true', | ||||
|         help="Include builtin and libc specific symbols.") | ||||
|     parser.add_argument('-s', '--size-sort', action='store_true', | ||||
|         help="Sort by size.") | ||||
|     parser.add_argument('-S', '--reverse-size-sort', action='store_true', | ||||
|         help="Sort by size, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level struct sizes.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the total struct size.") | ||||
|     parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(), | ||||
|         help="Path to the objdump tool to use.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Specify the relative build directory. Used to map object files \ | ||||
|             to the correct source files.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										279
									
								
								scripts/summary.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										279
									
								
								scripts/summary.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,279 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Script to summarize the outputs of other scripts. Operates on CSV files. | ||||
| # | ||||
|  | ||||
| import functools as ft | ||||
| import collections as co | ||||
| import os | ||||
| import csv | ||||
| import re | ||||
| import math as m | ||||
|  | ||||
| # displayable fields | ||||
| Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio') | ||||
| FIELDS = [ | ||||
|     # name, parse, accumulate, fmt, print, null | ||||
|     Field('code', | ||||
|         lambda r: int(r['code_size']), | ||||
|         sum, | ||||
|         lambda r: r, | ||||
|         '%7s', | ||||
|         lambda r: r, | ||||
|         '-', | ||||
|         lambda old, new: (new-old)/old), | ||||
|     Field('data', | ||||
|         lambda r: int(r['data_size']), | ||||
|         sum, | ||||
|         lambda r: r, | ||||
|         '%7s', | ||||
|         lambda r: r, | ||||
|         '-', | ||||
|         lambda old, new: (new-old)/old), | ||||
|     Field('stack', | ||||
|         lambda r: float(r['stack_limit']), | ||||
|         max, | ||||
|         lambda r: r, | ||||
|         '%7s', | ||||
|         lambda r: '∞' if m.isinf(r) else int(r), | ||||
|         '-', | ||||
|         lambda old, new: (new-old)/old), | ||||
|     Field('structs', | ||||
|         lambda r: int(r['struct_size']), | ||||
|         sum, | ||||
|         lambda r: r, | ||||
|         '%8s', | ||||
|         lambda r: r, | ||||
|         '-', | ||||
|         lambda old, new: (new-old)/old), | ||||
|     Field('coverage', | ||||
|         lambda r: (int(r['coverage_hits']), int(r['coverage_count'])), | ||||
|         lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs), | ||||
|         lambda r: r[0]/r[1], | ||||
|         '%19s', | ||||
|         lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])), | ||||
|         '%11s %7s' % ('-', '-'), | ||||
|         lambda old, new: ((new[0]/new[1]) - (old[0]/old[1]))) | ||||
| ] | ||||
|  | ||||
|  | ||||
| def main(**args): | ||||
|     def openio(path, mode='r'): | ||||
|         if path == '-': | ||||
|             if 'r' in mode: | ||||
|                 return os.fdopen(os.dup(sys.stdin.fileno()), 'r') | ||||
|             else: | ||||
|                 return os.fdopen(os.dup(sys.stdout.fileno()), 'w') | ||||
|         else: | ||||
|             return open(path, mode) | ||||
|  | ||||
|     # find results | ||||
|     results = co.defaultdict(lambda: {}) | ||||
|     for path in args.get('csv_paths', '-'): | ||||
|         try: | ||||
|             with openio(path) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 for result in r: | ||||
|                     file = result.pop('file', '') | ||||
|                     name = result.pop('name', '') | ||||
|                     prev = results[(file, name)] | ||||
|                     for field in FIELDS: | ||||
|                         try: | ||||
|                             r = field.parse(result) | ||||
|                             if field.name in prev: | ||||
|                                 results[(file, name)][field.name] = field.acc( | ||||
|                                     [prev[field.name], r]) | ||||
|                             else: | ||||
|                                 results[(file, name)][field.name] = r | ||||
|                         except (KeyError, ValueError): | ||||
|                             pass | ||||
|         except FileNotFoundError: | ||||
|             pass | ||||
|  | ||||
|     # find fields | ||||
|     if args.get('all_fields'): | ||||
|         fields = FIELDS | ||||
|     elif args.get('fields') is not None: | ||||
|         fields_dict = {field.name: field for field in FIELDS} | ||||
|         fields = [fields_dict[f] for f in args['fields']] | ||||
|     else: | ||||
|         fields = [] | ||||
|         for field in FIELDS: | ||||
|             if any(field.name in result for result in results.values()): | ||||
|                 fields.append(field) | ||||
|  | ||||
|     # find total for every field | ||||
|     total = {} | ||||
|     for result in results.values(): | ||||
|         for field in fields: | ||||
|             if field.name in result and field.name in total: | ||||
|                 total[field.name] = field.acc( | ||||
|                     [total[field.name], result[field.name]]) | ||||
|             elif field.name in result: | ||||
|                 total[field.name] = result[field.name] | ||||
|  | ||||
|     # find previous results? | ||||
|     if args.get('diff'): | ||||
|         prev_results = co.defaultdict(lambda: {}) | ||||
|         try: | ||||
|             with openio(args['diff']) as f: | ||||
|                 r = csv.DictReader(f) | ||||
|                 for result in r: | ||||
|                     file = result.pop('file', '') | ||||
|                     name = result.pop('name', '') | ||||
|                     prev = prev_results[(file, name)] | ||||
|                     for field in FIELDS: | ||||
|                         try: | ||||
|                             r = field.parse(result) | ||||
|                             if field.name in prev: | ||||
|                                 prev_results[(file, name)][field.name] = field.acc( | ||||
|                                     [prev[field.name], r]) | ||||
|                             else: | ||||
|                                 prev_results[(file, name)][field.name] = r | ||||
|                         except (KeyError, ValueError): | ||||
|                             pass | ||||
|         except FileNotFoundError: | ||||
|             pass | ||||
|  | ||||
|         prev_total = {} | ||||
|         for result in prev_results.values(): | ||||
|             for field in fields: | ||||
|                 if field.name in result and field.name in prev_total: | ||||
|                     prev_total[field.name] = field.acc( | ||||
|                         [prev_total[field.name], result[field.name]]) | ||||
|                 elif field.name in result: | ||||
|                     prev_total[field.name] = result[field.name] | ||||
|  | ||||
|     # print results | ||||
|     def dedup_entries(results, by='name'): | ||||
|         entries = co.defaultdict(lambda: {}) | ||||
|         for (file, func), result in results.items(): | ||||
|             entry = (file if by == 'file' else func) | ||||
|             prev = entries[entry] | ||||
|             for field in fields: | ||||
|                 if field.name in result and field.name in prev: | ||||
|                     entries[entry][field.name] = field.acc( | ||||
|                         [prev[field.name], result[field.name]]) | ||||
|                 elif field.name in result: | ||||
|                     entries[entry][field.name] = result[field.name] | ||||
|         return entries | ||||
|  | ||||
|     def sorted_entries(entries): | ||||
|         if args.get('sort') is not None: | ||||
|             field = {field.name: field for field in FIELDS}[args['sort']] | ||||
|             return sorted(entries, key=lambda x: ( | ||||
|                 -(field.key(x[1][field.name])) if field.name in x[1] else -1, x)) | ||||
|         elif args.get('reverse_sort') is not None: | ||||
|             field = {field.name: field for field in FIELDS}[args['reverse_sort']] | ||||
|             return sorted(entries, key=lambda x: ( | ||||
|                 +(field.key(x[1][field.name])) if field.name in x[1] else -1, x)) | ||||
|         else: | ||||
|             return sorted(entries) | ||||
|  | ||||
|     def print_header(by=''): | ||||
|         if not args.get('diff'): | ||||
|             print('%-36s' % by, end='') | ||||
|             for field in fields: | ||||
|                 print((' '+field.fmt) % field.name, end='') | ||||
|             print() | ||||
|         else: | ||||
|             print('%-36s' % by, end='') | ||||
|             for field in fields: | ||||
|                 print((' '+field.fmt) % field.name, end='') | ||||
|                 print(' %-9s' % '', end='') | ||||
|             print() | ||||
|  | ||||
|     def print_entry(name, result): | ||||
|         print('%-36s' % name, end='') | ||||
|         for field in fields: | ||||
|             r = result.get(field.name) | ||||
|             if r is not None: | ||||
|                 print((' '+field.fmt) % field.repr(r), end='') | ||||
|             else: | ||||
|                 print((' '+field.fmt) % '-', end='') | ||||
|         print() | ||||
|  | ||||
|     def print_diff_entry(name, old, new): | ||||
|         print('%-36s' % name, end='') | ||||
|         for field in fields: | ||||
|             n = new.get(field.name) | ||||
|             if n is not None: | ||||
|                 print((' '+field.fmt) % field.repr(n), end='') | ||||
|             else: | ||||
|                 print((' '+field.fmt) % '-', end='') | ||||
|             o = old.get(field.name) | ||||
|             ratio = ( | ||||
|                 0.0 if m.isinf(o or 0) and m.isinf(n or 0) | ||||
|                     else +float('inf') if m.isinf(n or 0) | ||||
|                     else -float('inf') if m.isinf(o or 0) | ||||
|                     else 0.0 if not o and not n | ||||
|                     else +1.0 if not o | ||||
|                     else -1.0 if not n | ||||
|                     else field.ratio(o, n)) | ||||
|             print(' %-9s' % ( | ||||
|                 '' if not ratio | ||||
|                     else '(+∞%)' if ratio > 0 and m.isinf(ratio) | ||||
|                     else '(-∞%)' if ratio < 0 and m.isinf(ratio) | ||||
|                     else '(%+.1f%%)' % (100*ratio)), end='') | ||||
|         print() | ||||
|  | ||||
|     def print_entries(by='name'): | ||||
|         entries = dedup_entries(results, by=by) | ||||
|  | ||||
|         if not args.get('diff'): | ||||
|             print_header(by=by) | ||||
|             for name, result in sorted_entries(entries.items()): | ||||
|                 print_entry(name, result) | ||||
|         else: | ||||
|             prev_entries = dedup_entries(prev_results, by=by) | ||||
|             print_header(by='%s (%d added, %d removed)' % (by, | ||||
|                 sum(1 for name in entries if name not in prev_entries), | ||||
|                 sum(1 for name in prev_entries if name not in entries))) | ||||
|             for name, result in sorted_entries(entries.items()): | ||||
|                 if args.get('all') or result != prev_entries.get(name, {}): | ||||
|                     print_diff_entry(name, prev_entries.get(name, {}), result) | ||||
|  | ||||
|     def print_totals(): | ||||
|         if not args.get('diff'): | ||||
|             print_entry('TOTAL', total) | ||||
|         else: | ||||
|             print_diff_entry('TOTAL', prev_total, total) | ||||
|  | ||||
|     if args.get('summary'): | ||||
|         print_header() | ||||
|         print_totals() | ||||
|     elif args.get('files'): | ||||
|         print_entries(by='file') | ||||
|         print_totals() | ||||
|     else: | ||||
|         print_entries(by='name') | ||||
|         print_totals() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     import sys | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Summarize measurements") | ||||
|     parser.add_argument('csv_paths', nargs='*', default='-', | ||||
|         help="Description of where to find *.csv files. May be a directory \ | ||||
|             or list of paths. *.csv files will be merged to show the total \ | ||||
|             coverage.") | ||||
|     parser.add_argument('-d', '--diff', | ||||
|         help="Specify CSV file to diff against.") | ||||
|     parser.add_argument('-a', '--all', action='store_true', | ||||
|         help="Show all objects, not just the ones that changed.") | ||||
|     parser.add_argument('-e', '--all-fields', action='store_true', | ||||
|         help="Show all fields, even those with no results.") | ||||
|     parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x), | ||||
|         help="Comma separated list of fields to print, by default all fields \ | ||||
|             that are found in the CSV files are printed.") | ||||
|     parser.add_argument('-s', '--sort', | ||||
|         help="Sort by this field.") | ||||
|     parser.add_argument('-S', '--reverse-sort', | ||||
|         help="Sort by this field, but backwards.") | ||||
|     parser.add_argument('-F', '--files', action='store_true', | ||||
|         help="Show file-level calls.") | ||||
|     parser.add_argument('-Y', '--summary', action='store_true', | ||||
|         help="Only show the totals.") | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
							
								
								
									
										202
									
								
								scripts/test.py
									
									
									
									
									
								
							
							
						
						
									
										202
									
								
								scripts/test.py
									
									
									
									
									
								
							| @@ -20,19 +20,50 @@ import pty | ||||
| import errno | ||||
| import signal | ||||
|  | ||||
| TESTDIR = 'tests' | ||||
| TEST_PATHS = 'tests' | ||||
| RULES = """ | ||||
| # add block devices to sources | ||||
| TESTSRC ?= $(SRC) $(wildcard bd/*.c) | ||||
|  | ||||
| define FLATTEN | ||||
| tests/%$(subst /,.,$(target)): $(target) | ||||
| %(path)s%%$(subst /,.,$(target)): $(target) | ||||
|     ./scripts/explode_asserts.py $$< -o $$@ | ||||
| endef | ||||
| $(foreach target,$(SRC),$(eval $(FLATTEN))) | ||||
|  | ||||
| -include tests/*.d | ||||
| $(foreach target,$(TESTSRC),$(eval $(FLATTEN))) | ||||
|  | ||||
| -include %(path)s*.d | ||||
| .SECONDARY: | ||||
| %.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f) | ||||
|  | ||||
| %(path)s.test: %(path)s.test.o \\ | ||||
|         $(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t) | ||||
|     $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ | ||||
|  | ||||
| # needed in case builddir is different | ||||
| %(path)s%%.o: %(path)s%%.c | ||||
|     $(CC) -c -MMD $(CFLAGS) $< -o $@ | ||||
| """ | ||||
| COVERAGE_RULES = """ | ||||
| %(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage | ||||
|  | ||||
| # delete lingering coverage | ||||
| %(path)s.test: | %(path)s.info.clean | ||||
| .PHONY: %(path)s.info.clean | ||||
| %(path)s.info.clean: | ||||
|     rm -f %(path)s*.gcda | ||||
|  | ||||
| # accumulate coverage info | ||||
| .PHONY: %(path)s.info | ||||
| %(path)s.info: | ||||
|     $(strip $(LCOV) -c \\ | ||||
|         $(addprefix -d ,$(wildcard %(path)s*.gcda)) \\ | ||||
|         --rc 'geninfo_adjust_src_path=$(shell pwd)' \\ | ||||
|         -o $@) | ||||
|     $(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@ | ||||
| ifdef COVERAGETARGET | ||||
|     $(strip $(LCOV) -a $@ \\ | ||||
|         $(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\ | ||||
|         -o $(COVERAGETARGET)) | ||||
| endif | ||||
| """ | ||||
| GLOBALS = """ | ||||
| //////////////// AUTOGENERATED TEST //////////////// | ||||
| @@ -119,6 +150,8 @@ class TestCase: | ||||
|         self.if_ = config.get('if', None) | ||||
|         self.in_ = config.get('in', None) | ||||
|  | ||||
|         self.result = None | ||||
|  | ||||
|     def __str__(self): | ||||
|         if hasattr(self, 'permno'): | ||||
|             if any(k not in self.case.defines for k in self.defines): | ||||
| @@ -179,7 +212,7 @@ class TestCase: | ||||
|                 len(self.filter) >= 2 and | ||||
|                 self.filter[1] != self.permno): | ||||
|             return False | ||||
|         elif args.get('no_internal', False) and self.in_ is not None: | ||||
|         elif args.get('no_internal') and self.in_ is not None: | ||||
|             return False | ||||
|         elif self.if_ is not None: | ||||
|             if_ = self.if_ | ||||
| @@ -213,7 +246,7 @@ class TestCase: | ||||
|                 try: | ||||
|                     with open(disk, 'w') as f: | ||||
|                         f.truncate(0) | ||||
|                     if args.get('verbose', False): | ||||
|                     if args.get('verbose'): | ||||
|                         print('truncate --size=0', disk) | ||||
|                 except FileNotFoundError: | ||||
|                     pass | ||||
| @@ -237,14 +270,14 @@ class TestCase: | ||||
|                     '-ex', 'r']) | ||||
|             ncmd.extend(['--args'] + cmd) | ||||
|  | ||||
|             if args.get('verbose', False): | ||||
|             if args.get('verbose'): | ||||
|                 print(' '.join(shlex.quote(c) for c in ncmd)) | ||||
|             signal.signal(signal.SIGINT, signal.SIG_IGN) | ||||
|             sys.exit(sp.call(ncmd)) | ||||
|  | ||||
|         # run test case! | ||||
|         mpty, spty = pty.openpty() | ||||
|         if args.get('verbose', False): | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, stdout=spty, stderr=spty) | ||||
|         os.close(spty) | ||||
| @@ -259,8 +292,10 @@ class TestCase: | ||||
|                     if e.errno == errno.EIO: | ||||
|                         break | ||||
|                     raise | ||||
|                 if not line: | ||||
|                     break; | ||||
|                 stdout.append(line) | ||||
|                 if args.get('verbose', False): | ||||
|                 if args.get('verbose'): | ||||
|                     sys.stdout.write(line) | ||||
|                 # intercept asserts | ||||
|                 m = re.match( | ||||
| @@ -299,7 +334,7 @@ class ValgrindTestCase(TestCase): | ||||
|         return not self.leaky and super().shouldtest(**args) | ||||
|  | ||||
|     def test(self, exec=[], **args): | ||||
|         verbose = args.get('verbose', False) | ||||
|         verbose = args.get('verbose') | ||||
|         uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1) | ||||
|         exec = [ | ||||
|             'valgrind', | ||||
| @@ -351,12 +386,17 @@ class TestSuite: | ||||
|         self.name = os.path.basename(path) | ||||
|         if self.name.endswith('.toml'): | ||||
|             self.name = self.name[:-len('.toml')] | ||||
|         self.path = path | ||||
|         if args.get('build_dir'): | ||||
|             self.toml = path | ||||
|             self.path = args['build_dir'] + '/' + path | ||||
|         else: | ||||
|             self.toml = path | ||||
|             self.path = path | ||||
|         self.classes = classes | ||||
|         self.defines = defines.copy() | ||||
|         self.filter = filter | ||||
|  | ||||
|         with open(path) as f: | ||||
|         with open(self.toml) as f: | ||||
|             # load tests | ||||
|             config = toml.load(f) | ||||
|  | ||||
| @@ -467,7 +507,7 @@ class TestSuite: | ||||
|  | ||||
|     def build(self, **args): | ||||
|         # build test files | ||||
|         tf = open(self.path + '.test.c.t', 'w') | ||||
|         tf = open(self.path + '.test.tc', 'w') | ||||
|         tf.write(GLOBALS) | ||||
|         if self.code is not None: | ||||
|             tf.write('#line %d "%s"\n' % (self.code_lineno, self.path)) | ||||
| @@ -477,7 +517,7 @@ class TestSuite: | ||||
|         for case in self.cases: | ||||
|             if case.in_ not in tfs: | ||||
|                 tfs[case.in_] = open(self.path+'.'+ | ||||
|                     case.in_.replace('/', '.')+'.t', 'w') | ||||
|                     re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w') | ||||
|                 tfs[case.in_].write('#line 1 "%s"\n' % case.in_) | ||||
|                 with open(case.in_) as f: | ||||
|                     for line in f: | ||||
| @@ -516,25 +556,33 @@ class TestSuite: | ||||
|  | ||||
|         # write makefiles | ||||
|         with open(self.path + '.mk', 'w') as mk: | ||||
|             mk.write(RULES.replace(4*' ', '\t')) | ||||
|             mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path)) | ||||
|             mk.write('\n') | ||||
|  | ||||
|             # add truely global defines globally | ||||
|             # add coverage hooks? | ||||
|             if args.get('coverage'): | ||||
|                 mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict( | ||||
|                     path=self.path)) | ||||
|                 mk.write('\n') | ||||
|  | ||||
|             # add truly global defines globally | ||||
|             for k, v in sorted(self.defines.items()): | ||||
|                 mk.write('%s: override CFLAGS += -D%s=%r\n' % ( | ||||
|                     self.path+'.test', k, v)) | ||||
|                 mk.write('%s.test: override CFLAGS += -D%s=%r\n' | ||||
|                     % (self.path, k, v)) | ||||
|  | ||||
|             for path in tfs: | ||||
|                 if path is None: | ||||
|                     mk.write('%s: %s | %s\n' % ( | ||||
|                         self.path+'.test.c', | ||||
|                         self.path, | ||||
|                         self.path+'.test.c.t')) | ||||
|                         self.toml, | ||||
|                         self.path+'.test.tc')) | ||||
|                 else: | ||||
|                     mk.write('%s: %s %s | %s\n' % ( | ||||
|                         self.path+'.'+path.replace('/', '.'), | ||||
|                         self.path, path, | ||||
|                         self.path+'.'+path.replace('/', '.')+'.t')) | ||||
|                         self.toml, | ||||
|                         path, | ||||
|                         self.path+'.'+re.sub('(\.c)?$', '.tc', | ||||
|                             path.replace('/', '.')))) | ||||
|                 mk.write('\t./scripts/explode_asserts.py $| -o $@\n') | ||||
|  | ||||
|         self.makefile = self.path + '.mk' | ||||
| @@ -557,7 +605,7 @@ class TestSuite: | ||||
|                 if not args.get('verbose', True): | ||||
|                     sys.stdout.write(FAIL) | ||||
|                     sys.stdout.flush() | ||||
|                 if not args.get('keep_going', False): | ||||
|                 if not args.get('keep_going'): | ||||
|                     if not args.get('verbose', True): | ||||
|                         sys.stdout.write('\n') | ||||
|                     raise | ||||
| @@ -579,36 +627,36 @@ def main(**args): | ||||
|  | ||||
|     # and what class of TestCase to run | ||||
|     classes = [] | ||||
|     if args.get('normal', False): | ||||
|     if args.get('normal'): | ||||
|         classes.append(TestCase) | ||||
|     if args.get('reentrant', False): | ||||
|     if args.get('reentrant'): | ||||
|         classes.append(ReentrantTestCase) | ||||
|     if args.get('valgrind', False): | ||||
|     if args.get('valgrind'): | ||||
|         classes.append(ValgrindTestCase) | ||||
|     if not classes: | ||||
|         classes = [TestCase] | ||||
|  | ||||
|     suites = [] | ||||
|     for testpath in args['testpaths']: | ||||
|     for testpath in args['test_paths']: | ||||
|         # optionally specified test case/perm | ||||
|         testpath, *filter = testpath.split('#') | ||||
|         filter = [int(f) for f in filter] | ||||
|  | ||||
|         # figure out the suite's toml file | ||||
|         if os.path.isdir(testpath): | ||||
|             testpath = testpath + '/test_*.toml' | ||||
|             testpath = testpath + '/*.toml' | ||||
|         elif os.path.isfile(testpath): | ||||
|             testpath = testpath | ||||
|         elif testpath.endswith('.toml'): | ||||
|             testpath = TESTDIR + '/' + testpath | ||||
|             testpath = TEST_PATHS + '/' + testpath | ||||
|         else: | ||||
|             testpath = TESTDIR + '/' + testpath + '.toml' | ||||
|             testpath = TEST_PATHS + '/' + testpath + '.toml' | ||||
|  | ||||
|         # find tests | ||||
|         for path in glob.glob(testpath): | ||||
|             suites.append(TestSuite(path, classes, defines, filter, **args)) | ||||
|  | ||||
|     # sort for reproducability | ||||
|     # sort for reproducibility | ||||
|     suites = sorted(suites) | ||||
|  | ||||
|     # generate permutations | ||||
| @@ -628,7 +676,7 @@ def main(**args): | ||||
|         list(it.chain.from_iterable(['-f', m] for m in makefiles)) + | ||||
|         [target for target in targets]) | ||||
|     mpty, spty = pty.openpty() | ||||
|     if args.get('verbose', False): | ||||
|     if args.get('verbose'): | ||||
|         print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|     proc = sp.Popen(cmd, stdout=spty, stderr=spty) | ||||
|     os.close(spty) | ||||
| @@ -641,15 +689,17 @@ def main(**args): | ||||
|             if e.errno == errno.EIO: | ||||
|                 break | ||||
|             raise | ||||
|         if not line: | ||||
|             break; | ||||
|         stdout.append(line) | ||||
|         if args.get('verbose', False): | ||||
|         if args.get('verbose'): | ||||
|             sys.stdout.write(line) | ||||
|         # intercept warnings | ||||
|         m = re.match( | ||||
|             '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$' | ||||
|             .format('(?:\033\[[\d;]*.| )*', 'warning'), | ||||
|             line) | ||||
|         if m and not args.get('verbose', False): | ||||
|         if m and not args.get('verbose'): | ||||
|             try: | ||||
|                 with open(m.group(1)) as f: | ||||
|                     lineno = int(m.group(2)) | ||||
| @@ -662,27 +712,26 @@ def main(**args): | ||||
|             except: | ||||
|                 pass | ||||
|     proc.wait() | ||||
|  | ||||
|     if proc.returncode != 0: | ||||
|         if not args.get('verbose', False): | ||||
|         if not args.get('verbose'): | ||||
|             for line in stdout: | ||||
|                 sys.stdout.write(line) | ||||
|         sys.exit(-3) | ||||
|         sys.exit(-1) | ||||
|  | ||||
|     print('built %d test suites, %d test cases, %d permutations' % ( | ||||
|         len(suites), | ||||
|         sum(len(suite.cases) for suite in suites), | ||||
|         sum(len(suite.perms) for suite in suites))) | ||||
|  | ||||
|     filtered = 0 | ||||
|     total = 0 | ||||
|     for suite in suites: | ||||
|         for perm in suite.perms: | ||||
|             filtered += perm.shouldtest(**args) | ||||
|     if filtered != sum(len(suite.perms) for suite in suites): | ||||
|         print('filtered down to %d permutations' % filtered) | ||||
|             total += perm.shouldtest(**args) | ||||
|     if total != sum(len(suite.perms) for suite in suites): | ||||
|         print('filtered down to %d permutations' % total) | ||||
|  | ||||
|     # only requested to build? | ||||
|     if args.get('build', False): | ||||
|     if args.get('build'): | ||||
|         return 0 | ||||
|  | ||||
|     print('====== testing ======') | ||||
| @@ -697,15 +746,12 @@ def main(**args): | ||||
|     failed = 0 | ||||
|     for suite in suites: | ||||
|         for perm in suite.perms: | ||||
|             if not hasattr(perm, 'result'): | ||||
|                 continue | ||||
|  | ||||
|             if perm.result == PASS: | ||||
|                 passed += 1 | ||||
|             else: | ||||
|             elif isinstance(perm.result, TestFailure): | ||||
|                 sys.stdout.write( | ||||
|                     "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m " | ||||
|                     "{perm} failed with {returncode}\n".format( | ||||
|                     "{perm} failed\n".format( | ||||
|                         perm=perm, path=perm.suite.path, lineno=perm.lineno, | ||||
|                         returncode=perm.result.returncode or 0)) | ||||
|                 if perm.result.stdout: | ||||
| @@ -723,11 +769,36 @@ def main(**args): | ||||
|                 sys.stdout.write('\n') | ||||
|                 failed += 1 | ||||
|  | ||||
|     if args.get('gdb', False): | ||||
|     if args.get('coverage'): | ||||
|         # collect coverage info | ||||
|         # why -j1? lcov doesn't work in parallel because of gcov limitations | ||||
|         cmd = (['make', '-j1', '-f', 'Makefile'] + | ||||
|             list(it.chain.from_iterable(['-f', m] for m in makefiles)) + | ||||
|             (['COVERAGETARGET=%s' % args['coverage']] | ||||
|                 if isinstance(args['coverage'], str) else []) + | ||||
|             [suite.path + '.info' for suite in suites | ||||
|                 if any(perm.result == PASS for perm in suite.perms)]) | ||||
|         if args.get('verbose'): | ||||
|             print(' '.join(shlex.quote(c) for c in cmd)) | ||||
|         proc = sp.Popen(cmd, | ||||
|             stdout=sp.PIPE if not args.get('verbose') else None, | ||||
|             stderr=sp.STDOUT if not args.get('verbose') else None, | ||||
|             universal_newlines=True) | ||||
|         stdout = [] | ||||
|         for line in proc.stdout: | ||||
|             stdout.append(line) | ||||
|         proc.wait() | ||||
|         if proc.returncode != 0: | ||||
|             if not args.get('verbose'): | ||||
|                 for line in stdout: | ||||
|                     sys.stdout.write(line) | ||||
|             sys.exit(-1) | ||||
|  | ||||
|     if args.get('gdb'): | ||||
|         failure = None | ||||
|         for suite in suites: | ||||
|             for perm in suite.perms: | ||||
|                 if getattr(perm, 'result', PASS) != PASS: | ||||
|                 if isinstance(perm.result, TestFailure): | ||||
|                     failure = perm.result | ||||
|         if failure is not None: | ||||
|             print('======= gdb ======') | ||||
| @@ -735,20 +806,22 @@ def main(**args): | ||||
|             failure.case.test(failure=failure, **args) | ||||
|             sys.exit(0) | ||||
|  | ||||
|     print('tests passed: %d' % passed) | ||||
|     print('tests failed: %d' % failed) | ||||
|     print('tests passed %d/%d (%.1f%%)' % (passed, total, | ||||
|         100*(passed/total if total else 1.0))) | ||||
|     print('tests failed %d/%d (%.1f%%)' % (failed, total, | ||||
|         100*(failed/total if total else 1.0))) | ||||
|     return 1 if failed > 0 else 0 | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     import argparse | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Run parameterized tests in various configurations.") | ||||
|     parser.add_argument('testpaths', nargs='*', default=[TESTDIR], | ||||
|     parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS], | ||||
|         help="Description of test(s) to run. By default, this is all tests \ | ||||
|             found in the \"{0}\" directory. Here, you can specify a different \ | ||||
|             directory of tests, a specific file, a suite by name, and even a \ | ||||
|             specific test case by adding brackets. For example \ | ||||
|             \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) | ||||
|             directory of tests, a specific file, a suite by name, and even \ | ||||
|             specific test cases and permutations. For example \ | ||||
|             \"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS)) | ||||
|     parser.add_argument('-D', action='append', default=[], | ||||
|         help="Overriding parameter definitions.") | ||||
|     parser.add_argument('-v', '--verbose', action='store_true', | ||||
| @@ -769,10 +842,19 @@ if __name__ == "__main__": | ||||
|         help="Run tests normally.") | ||||
|     parser.add_argument('-r', '--reentrant', action='store_true', | ||||
|         help="Run reentrant tests with simulated power-loss.") | ||||
|     parser.add_argument('-V', '--valgrind', action='store_true', | ||||
|     parser.add_argument('--valgrind', action='store_true', | ||||
|         help="Run non-leaky tests under valgrind to check for memory leaks.") | ||||
|     parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '), | ||||
|     parser.add_argument('--exec', default=[], type=lambda e: e.split(), | ||||
|         help="Run tests with another executable prefixed on the command line.") | ||||
|     parser.add_argument('-d', '--disk', | ||||
|     parser.add_argument('--disk', | ||||
|         help="Specify a file to use for persistent/reentrant tests.") | ||||
|     parser.add_argument('--coverage', type=lambda x: x if x else True, | ||||
|         nargs='?', const='', | ||||
|         help="Collect coverage information during testing. This uses lcov/gcov \ | ||||
|             to accumulate coverage information into *.info files. May also \ | ||||
|             a path to a *.info file to accumulate coverage info into.") | ||||
|     parser.add_argument('--build-dir', | ||||
|         help="Build relative to the specified directory instead of the \ | ||||
|             current directory.") | ||||
|  | ||||
|     sys.exit(main(**vars(parser.parse_args()))) | ||||
|   | ||||
| @@ -392,3 +392,48 @@ code = ''' | ||||
|  | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|  | ||||
| [[case]] # noop truncate | ||||
| define.MEDIUMSIZE = [32, 2048] | ||||
| code = ''' | ||||
|     lfs_format(&lfs, &cfg) => 0; | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "baldynoop", | ||||
|             LFS_O_RDWR | LFS_O_CREAT) => 0; | ||||
|  | ||||
|     strcpy((char*)buffer, "hair"); | ||||
|     size = strlen((char*)buffer); | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_write(&lfs, &file, buffer, size) => size; | ||||
|  | ||||
|         // this truncate should do nothing | ||||
|         lfs_file_truncate(&lfs, &file, j+size) => 0; | ||||
|     } | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|  | ||||
|     lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0; | ||||
|     // should do nothing again | ||||
|     lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0; | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|  | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_read(&lfs, &file, buffer, size) => size; | ||||
|         memcmp(buffer, "hair", size) => 0; | ||||
|     } | ||||
|     lfs_file_read(&lfs, &file, buffer, size) => 0; | ||||
|  | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
|  | ||||
|     // still there after reboot? | ||||
|     lfs_mount(&lfs, &cfg) => 0; | ||||
|     lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0; | ||||
|     lfs_file_size(&lfs, &file) => MEDIUMSIZE; | ||||
|     for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { | ||||
|         lfs_file_read(&lfs, &file, buffer, size) => size; | ||||
|         memcmp(buffer, "hair", size) => 0; | ||||
|     } | ||||
|     lfs_file_read(&lfs, &file, buffer, size) => 0; | ||||
|     lfs_file_close(&lfs, &file) => 0; | ||||
|     lfs_unmount(&lfs) => 0; | ||||
| ''' | ||||
|   | ||||
		Reference in New Issue
	
	Block a user