Compare commits

..

1 Commits

Author SHA1 Message Date
Christopher Haster
2db5dc80c2 Update copyright notice 2022-03-20 23:03:52 -05:00
24 changed files with 295 additions and 1817 deletions

View File

@@ -6,7 +6,7 @@ on:
jobs: jobs:
post-release: post-release:
runs-on: ubuntu-20.04 runs-on: ubuntu-18.04
steps: steps:
# trigger post-release in dependency repo, this indirection allows the # trigger post-release in dependency repo, this indirection allows the
# dependency repo to be updated often without affecting this repo. At # dependency repo to be updated often without affecting this repo. At

View File

@@ -7,7 +7,7 @@ on:
jobs: jobs:
release: release:
runs-on: ubuntu-20.04 runs-on: ubuntu-18.04
# need to manually check for a couple things # need to manually check for a couple things
# - tests passed? # - tests passed?
@@ -73,70 +73,89 @@ jobs:
# previous results to compare against? # previous results to compare against?
[ -n "$LFS_PREV_VERSION" ] && curl -sS \ [ -n "$LFS_PREV_VERSION" ] && curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/` "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
`status/$LFS_PREV_VERSION?per_page=100" \ `status/$LFS_PREV_VERSION" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
>> prev-results.json \ >> prev-results.json \
|| true || true
# build table for GitHub # unfortunately these each have their own format
echo "<table>" >> results.txt [ -e results/code-thumb.csv ] && ( \
echo "<thead>" >> results.txt export PREV="$(jq -re '
echo "<tr>" >> results.txt select(.context == "results / code").description
echo "<th align=left>Configuration</th>" >> results.txt | capture("Code size is (?<result>[0-9]+)").result' \
for r in Code Stack Structs Coverage prev-results.json || echo 0)"
do ./scripts/code.py -u results/code-thumb.csv -s | awk '
echo "<th align=right>$r</th>" >> results.txt NR==2 {printf "Code size,%d B",$2}
done NR==2 && ENVIRON["PREV"]+0 != 0 {
echo "</tr>" >> results.txt printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
echo "</thead>" >> results.txt NR==2 {printf "\n"}' \
>> results.csv)
echo "<tbody>" >> results.txt [ -e results/code-thumb-readonly.csv ] && ( \
for c in "" readonly threadsafe migrate error-asserts export PREV="$(jq -re '
do select(.context == "results / code (readonly)").description
echo "<tr>" >> results.txt | capture("Code size is (?<result>[0-9]+)").result' \
c_or_default=${c:-default} prev-results.json || echo 0)"
echo "<td align=left>${c_or_default^}</td>" >> results.txt ./scripts/code.py -u results/code-thumb-readonly.csv -s | awk '
for r in code stack structs NR==2 {printf "Code size<br/>(readonly),%d B",$2}
do NR==2 && ENVIRON["PREV"]+0 != 0 {
# per-config results printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
echo "<td align=right>" >> results.txt NR==2 {printf "\n"}' \
[ -e results/thumb${c:+-$c}.csv ] && ( \ >> results.csv)
export PREV="$(jq -re ' [ -e results/code-thumb-threadsafe.csv ] && ( \
select(.context == "'"results (thumb${c:+, $c}) / $r"'").description export PREV="$(jq -re '
| capture("(?<result>[0-9∞]+)").result' \ select(.context == "results / code (threadsafe)").description
prev-results.json || echo 0)" | capture("Code size is (?<result>[0-9]+)").result' \
./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk ' prev-results.json || echo 0)"
NR==2 {printf "%s B",$2} ./scripts/code.py -u results/code-thumb-threadsafe.csv -s | awk '
NR==2 && ENVIRON["PREV"]+0 != 0 { NR==2 {printf "Code size<br/>(threadsafe),%d B",$2}
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} NR==2 && ENVIRON["PREV"]+0 != 0 {
NR==2 {printf "\n"}' \ printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
| sed -e 's/ /\&nbsp;/g' \ NR==2 {printf "\n"}' \
>> results.txt) >> results.csv)
echo "</td>" >> results.txt [ -e results/code-thumb-migrate.csv ] && ( \
done export PREV="$(jq -re '
# coverage results select(.context == "results / code (migrate)").description
if [ -z $c ] | capture("Code size is (?<result>[0-9]+)").result' \
then prev-results.json || echo 0)"
echo "<td rowspan=0 align=right>" >> results.txt ./scripts/code.py -u results/code-thumb-migrate.csv -s | awk '
[ -e results/coverage.csv ] && ( \ NR==2 {printf "Code size<br/>(migrate),%d B",$2}
export PREV="$(jq -re ' NR==2 && ENVIRON["PREV"]+0 != 0 {
select(.context == "results / coverage").description printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
| capture("(?<result>[0-9\\.]+)").result' \ NR==2 {printf "\n"}' \
prev-results.json || echo 0)" >> results.csv)
./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' ' [ -e results/code-thumb-error-asserts.csv ] && ( \
NR==2 {printf "%.1f%% of %d lines",$4,$3} export PREV="$(jq -re '
NR==2 && ENVIRON["PREV"]+0 != 0 { select(.context == "results / code (error-asserts)").description
printf " (%+.1f%%)",$4-ENVIRON["PREV"]} | capture("Code size is (?<result>[0-9]+)").result' \
NR==2 {printf "\n"}' \ prev-results.json || echo 0)"
| sed -e 's/ /\&nbsp;/g' \ ./scripts/code.py -u results/code-thumb-error-asserts.csv -s | awk '
>> results.txt) NR==2 {printf "Code size<br/>(error-asserts),%d B",$2}
echo "</td>" >> results.txt NR==2 && ENVIRON["PREV"]+0 != 0 {
fi printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
echo "</tr>" >> results.txt NR==2 {printf "\n"}' \
done >> results.csv)
echo "</tbody>" >> results.txt [ -e results/coverage.csv ] && ( \
echo "</table>" >> results.txt export PREV="$(jq -re '
select(.context == "results / coverage").description
| capture("Coverage is (?<result>[0-9\\.]+)").result' \
prev-results.json || echo 0)"
./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
NR==2 {printf "Coverage,%.1f%% of %d lines",$4,$3}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
>> results.csv)
# transpose to GitHub table
[ -e results.csv ] || exit 0
awk -F ',' '
{label[NR]=$1; value[NR]=$2}
END {
for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n";
for (r=1; r<=NR; r++) {printf "|:--"}; printf "|\n";
for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \
results.csv > results.txt
echo "RESULTS:"
cat results.txt cat results.txt
# find changes from history # find changes from history

View File

@@ -6,7 +6,7 @@ on:
jobs: jobs:
status: status:
runs-on: ubuntu-20.04 runs-on: ubuntu-18.04
steps: steps:
# custom statuses? # custom statuses?
- uses: dawidd6/action-download-artifact@v2 - uses: dawidd6/action-download-artifact@v2

View File

@@ -8,7 +8,7 @@ env:
jobs: jobs:
# run tests # run tests
test: test:
runs-on: ubuntu-20.04 runs-on: ubuntu-18.04
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@@ -18,27 +18,11 @@ jobs:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: install - name: install
run: | run: |
# need a few additional tools # need toml, also pip3 isn't installed by default?
#
# note this includes gcc-10, which is required for -fcallgraph-info=su
sudo apt-get update -qq sudo apt-get update -qq
sudo apt-get install -qq gcc-10 python3 python3-pip lcov sudo apt-get install -qq python3 python3-pip lcov
sudo pip3 install toml sudo pip3 install toml
echo "CC=gcc-10" >> $GITHUB_ENV gcc --version
gcc-10 --version
lcov --version
python3 --version
# need newer lcov version for gcc-10
#sudo apt-get remove lcov
#wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb
#sudo apt install ./lcov_1.15-1_all.deb
#lcov --version
#which lcov
#ls -lha /usr/bin/lcov
wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz
tar xf lcov-1.15.tar.gz
sudo make -C lcov-1.15 install
# setup a ram-backed disk to speed up reentrant tests # setup a ram-backed disk to speed up reentrant tests
mkdir disks mkdir disks
@@ -57,36 +41,36 @@ jobs:
if: ${{matrix.arch == 'thumb'}} if: ${{matrix.arch == 'thumb'}}
run: | run: |
sudo apt-get install -qq \ sudo apt-get install -qq \
gcc-10-arm-linux-gnueabi \ gcc-arm-linux-gnueabi \
libc6-dev-armel-cross \ libc6-dev-armel-cross \
qemu-user qemu-user
echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
echo "EXEC=qemu-arm" >> $GITHUB_ENV echo "EXEC=qemu-arm" >> $GITHUB_ENV
arm-linux-gnueabi-gcc-10 --version arm-linux-gnueabi-gcc --version
qemu-arm -version qemu-arm -version
# cross-compile with MIPS (32-bit, big-endian) # cross-compile with MIPS (32-bit, big-endian)
- name: install-mips - name: install-mips
if: ${{matrix.arch == 'mips'}} if: ${{matrix.arch == 'mips'}}
run: | run: |
sudo apt-get install -qq \ sudo apt-get install -qq \
gcc-10-mips-linux-gnu \ gcc-mips-linux-gnu \
libc6-dev-mips-cross \ libc6-dev-mips-cross \
qemu-user qemu-user
echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
echo "EXEC=qemu-mips" >> $GITHUB_ENV echo "EXEC=qemu-mips" >> $GITHUB_ENV
mips-linux-gnu-gcc-10 --version mips-linux-gnu-gcc --version
qemu-mips -version qemu-mips -version
# cross-compile with PowerPC (32-bit, big-endian) # cross-compile with PowerPC (32-bit, big-endian)
- name: install-powerpc - name: install-powerpc
if: ${{matrix.arch == 'powerpc'}} if: ${{matrix.arch == 'powerpc'}}
run: | run: |
sudo apt-get install -qq \ sudo apt-get install -qq \
gcc-10-powerpc-linux-gnu \ gcc-powerpc-linux-gnu \
libc6-dev-powerpc-cross \ libc6-dev-powerpc-cross \
qemu-user qemu-user
echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
echo "EXEC=qemu-ppc" >> $GITHUB_ENV echo "EXEC=qemu-ppc" >> $GITHUB_ENV
powerpc-linux-gnu-gcc-10 --version powerpc-linux-gnu-gcc --version
qemu-ppc -version qemu-ppc -version
# make sure example can at least compile # make sure example can at least compile
@@ -164,108 +148,102 @@ jobs:
retention-days: 1 retention-days: 1
# update results # update results
- name: results - name: results-code
run: | run: |
mkdir -p results mkdir -p results
make clean make clean
make lfs.csv \ make code \
CFLAGS+=" \ CFLAGS+=" \
-DLFS_NO_ASSERT \ -DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR" -DLFS_NO_ERROR" \
cp lfs.csv results/${{matrix.arch}}.csv CODEFLAGS+="-o results/code-${{matrix.arch}}.csv"
./scripts/summary.py results/${{matrix.arch}}.csv - name: results-code-readonly
- name: results-readonly
run: | run: |
mkdir -p results mkdir -p results
make clean make clean
make lfs.csv \ make code \
CFLAGS+=" \ CFLAGS+=" \
-DLFS_NO_ASSERT \ -DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_READONLY" -DLFS_READONLY" \
cp lfs.csv results/${{matrix.arch}}-readonly.csv CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv"
./scripts/summary.py results/${{matrix.arch}}-readonly.csv - name: results-code-threadsafe
- name: results-threadsafe
run: | run: |
mkdir -p results mkdir -p results
make clean make clean
make lfs.csv \ make code \
CFLAGS+=" \ CFLAGS+=" \
-DLFS_NO_ASSERT \ -DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_THREADSAFE" -DLFS_THREADSAFE" \
cp lfs.csv results/${{matrix.arch}}-threadsafe.csv CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv"
./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv - name: results-code-migrate
- name: results-migrate
run: | run: |
mkdir -p results mkdir -p results
make clean make clean
make lfs.csv \ make code \
CFLAGS+=" \ CFLAGS+=" \
-DLFS_NO_ASSERT \ -DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_MIGRATE" -DLFS_MIGRATE" \
cp lfs.csv results/${{matrix.arch}}-migrate.csv CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv"
./scripts/summary.py results/${{matrix.arch}}-migrate.csv - name: results-code-error-asserts
- name: results-error-asserts
run: | run: |
mkdir -p results mkdir -p results
make clean make clean
make lfs.csv \ make code \
CFLAGS+=" \ CFLAGS+=" \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
cp lfs.csv results/${{matrix.arch}}-error-asserts.csv CODEFLAGS+="-o results/code-${{matrix.arch}}-error-asserts.csv"
./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv
- name: upload-results - name: upload-results
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: results name: results
path: results path: results
# limit reporting to Thumb, otherwise there would be too many numbers
# create statuses with results # flying around for the results to be easily readable
- name: collect-status - name: collect-status
if: ${{matrix.arch == 'thumb'}}
run: | run: |
mkdir -p status mkdir -p status
for f in $(shopt -s nullglob ; echo results/*.csv) for f in $(shopt -s nullglob ; echo results/code*.csv)
do do
export STEP="results$( export STEP="results-code$(
echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')" echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')"
for r in code stack structs export CONTEXT="results / code$(
do echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')"
export CONTEXT="results (${{matrix.arch}}$( export PREV="$(curl -sS \
echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r" "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
export PREV="$(curl -sS \ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \ | select(.context == env.CONTEXT).description
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | capture("Code size is (?<result>[0-9]+)").result' \
| select(.context == env.CONTEXT).description || echo 0)"
| capture("(?<result>[0-9∞]+)").result' \ export DESCRIPTION="$(./scripts/code.py -u $f -s | awk '
|| echo 0)" NR==2 {printf "Code size is %d B",$2}
export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk ' NR==2 && ENVIRON["PREV"]+0 != 0 {
NR==2 {printf "%s B",$2} printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
NR==2 && ENVIRON["PREV"]+0 != 0 { jq -n '{
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')" state: "success",
jq -n '{ context: env.CONTEXT,
state: "success", description: env.DESCRIPTION,
context: env.CONTEXT, target_job: "${{github.job}} (${{matrix.arch}})",
description: env.DESCRIPTION, target_step: env.STEP}' \
target_job: "${{github.job}} (${{matrix.arch}})", | tee status/code$(
target_step: env.STEP}' \ echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json
| tee status/$r-${{matrix.arch}}$(
echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json
done
done done
- name: upload-status - name: upload-status
if: ${{matrix.arch == 'thumb'}}
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: status name: status
@@ -274,7 +252,7 @@ jobs:
# run under Valgrind to check for memory errors # run under Valgrind to check for memory errors
valgrind: valgrind:
runs-on: ubuntu-20.04 runs-on: ubuntu-18.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: install - name: install
@@ -294,7 +272,7 @@ jobs:
# self-host with littlefs-fuse for a fuzz-like test # self-host with littlefs-fuse for a fuzz-like test
fuse: fuse:
runs-on: ubuntu-20.04 runs-on: ubuntu-18.04
if: ${{!endsWith(github.ref, '-prefix')}} if: ${{!endsWith(github.ref, '-prefix')}}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
@@ -319,18 +297,16 @@ jobs:
# setup disk for littlefs-fuse # setup disk for littlefs-fuse
mkdir mount mkdir mount
LOOP=$(sudo losetup -f) sudo chmod a+rw /dev/loop0
sudo chmod a+rw $LOOP
dd if=/dev/zero bs=512 count=128K of=disk dd if=/dev/zero bs=512 count=128K of=disk
losetup $LOOP disk losetup /dev/loop0 disk
echo "LOOP=$LOOP" >> $GITHUB_ENV
- name: test - name: test
run: | run: |
# self-host test # self-host test
make -C littlefs-fuse make -C littlefs-fuse
littlefs-fuse/lfs --format $LOOP littlefs-fuse/lfs --format /dev/loop0
littlefs-fuse/lfs $LOOP mount littlefs-fuse/lfs /dev/loop0 mount
ls mount ls mount
mkdir mount/littlefs mkdir mount/littlefs
@@ -342,7 +318,7 @@ jobs:
# test migration using littlefs-fuse # test migration using littlefs-fuse
migrate: migrate:
runs-on: ubuntu-20.04 runs-on: ubuntu-18.04
if: ${{!endsWith(github.ref, '-prefix')}} if: ${{!endsWith(github.ref, '-prefix')}}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
@@ -372,11 +348,9 @@ jobs:
# setup disk for littlefs-fuse # setup disk for littlefs-fuse
mkdir mount mkdir mount
LOOP=$(sudo losetup -f) sudo chmod a+rw /dev/loop0
sudo chmod a+rw $LOOP
dd if=/dev/zero bs=512 count=128K of=disk dd if=/dev/zero bs=512 count=128K of=disk
losetup $LOOP disk losetup /dev/loop0 disk
echo "LOOP=$LOOP" >> $GITHUB_ENV
- name: test - name: test
run: | run: |
# compile v1 and v2 # compile v1 and v2
@@ -384,8 +358,8 @@ jobs:
make -C v2 make -C v2
# run self-host test with v1 # run self-host test with v1
v1/lfs --format $LOOP v1/lfs --format /dev/loop0
v1/lfs $LOOP mount v1/lfs /dev/loop0 mount
ls mount ls mount
mkdir mount/littlefs mkdir mount/littlefs
@@ -399,8 +373,8 @@ jobs:
cd ../.. cd ../..
fusermount -u mount fusermount -u mount
v2/lfs --migrate $LOOP v2/lfs --migrate /dev/loop0
v2/lfs $LOOP mount v2/lfs /dev/loop0 mount
# run self-host test with v2 right where we left off # run self-host test with v2 right where we left off
ls mount ls mount
@@ -411,7 +385,7 @@ jobs:
# collect coverage info # collect coverage info
coverage: coverage:
runs-on: ubuntu-20.04 runs-on: ubuntu-18.04
needs: [test] needs: [test]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
@@ -447,14 +421,14 @@ jobs:
export STEP="results-coverage" export STEP="results-coverage"
export CONTEXT="results / coverage" export CONTEXT="results / coverage"
export PREV="$(curl -sS \ export PREV="$(curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
| select(.context == env.CONTEXT).description | select(.context == env.CONTEXT).description
| capture("(?<result>[0-9\\.]+)").result' \ | capture("Coverage is (?<result>[0-9\\.]+)").result' \
|| echo 0)" || echo 0)"
export DESCRIPTION="$( export DESCRIPTION="$(
./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' ' ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
NR==2 {printf "%.1f%% of %d lines",$4,$3} NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3}
NR==2 && ENVIRON["PREV"]+0 != 0 { NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
jq -n '{ jq -n '{

2
.gitignore vendored
View File

@@ -2,8 +2,6 @@
*.o *.o
*.d *.d
*.a *.a
*.ci
*.csv
# Testing things # Testing things
blocks/ blocks/

View File

@@ -1,3 +1,4 @@
Copyright (c) 2022, The littlefs authors.
Copyright (c) 2017, Arm Limited. All rights reserved. Copyright (c) 2017, Arm Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, Redistribution and use in source and binary forms, with or without modification,

View File

@@ -17,63 +17,44 @@ TARGET ?= $(BUILDDIR)lfs.a
endif endif
CC ?= gcc CC ?= gcc
AR ?= ar AR ?= ar
SIZE ?= size SIZE ?= size
CTAGS ?= ctags CTAGS ?= ctags
NM ?= nm NM ?= nm
OBJDUMP ?= objdump LCOV ?= lcov
LCOV ?= lcov
SRC ?= $(wildcard *.c) SRC ?= $(wildcard *.c)
OBJ := $(SRC:%.c=$(BUILDDIR)%.o) OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
DEP := $(SRC:%.c=$(BUILDDIR)%.d) DEP := $(SRC:%.c=$(BUILDDIR)%.d)
ASM := $(SRC:%.c=$(BUILDDIR)%.s) ASM := $(SRC:%.c=$(BUILDDIR)%.s)
CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
ifdef DEBUG ifdef DEBUG
override CFLAGS += -O0 override CFLAGS += -O0 -g3
else else
override CFLAGS += -Os override CFLAGS += -Os
endif endif
ifdef TRACE ifdef TRACE
override CFLAGS += -DLFS_YES_TRACE override CFLAGS += -DLFS_YES_TRACE
endif endif
override CFLAGS += -g3
override CFLAGS += -I. override CFLAGS += -I.
override CFLAGS += -std=c99 -Wall -pedantic override CFLAGS += -std=c99 -Wall -pedantic
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
ifdef VERBOSE ifdef VERBOSE
override TESTFLAGS += -v override TESTFLAGS += -v
override CALLSFLAGS += -v override CODEFLAGS += -v
override CODEFLAGS += -v
override DATAFLAGS += -v
override STACKFLAGS += -v
override STRUCTSFLAGS += -v
override COVERAGEFLAGS += -v override COVERAGEFLAGS += -v
endif endif
ifdef EXEC ifdef EXEC
override TESTFLAGS += --exec="$(EXEC)" override TESTFLAGS += --exec="$(EXEC)"
endif endif
ifdef COVERAGE
override TESTFLAGS += --coverage
endif
ifdef BUILDDIR ifdef BUILDDIR
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)" override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
override CALLSFLAGS += --build-dir="$(BUILDDIR:/=)" override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
override DATAFLAGS += --build-dir="$(BUILDDIR:/=)"
override STACKFLAGS += --build-dir="$(BUILDDIR:/=)"
override STRUCTSFLAGS += --build-dir="$(BUILDDIR:/=)"
override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
endif endif
ifneq ($(NM),nm) ifneq ($(NM),nm)
override CODEFLAGS += --nm-tool="$(NM)" override CODEFLAGS += --nm-tool="$(NM)"
override DATAFLAGS += --nm-tool="$(NM)"
endif
ifneq ($(OBJDUMP),objdump)
override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
endif endif
@@ -92,9 +73,9 @@ size: $(OBJ)
tags: tags:
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC) $(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
.PHONY: calls .PHONY: code
calls: $(CGI) code: $(OBJ)
./scripts/calls.py $^ $(CALLSFLAGS) ./scripts/code.py $^ $(CODEFLAGS)
.PHONY: test .PHONY: test
test: test:
@@ -103,30 +84,9 @@ test:
test%: tests/test$$(firstword $$(subst \#, ,%)).toml test%: tests/test$$(firstword $$(subst \#, ,%)).toml
./scripts/test.py $@ $(TESTFLAGS) ./scripts/test.py $@ $(TESTFLAGS)
.PHONY: code
code: $(OBJ)
./scripts/code.py $^ -S $(CODEFLAGS)
.PHONY: data
data: $(OBJ)
./scripts/data.py $^ -S $(DATAFLAGS)
.PHONY: stack
stack: $(CGI)
./scripts/stack.py $^ -S $(STACKFLAGS)
.PHONY: structs
structs: $(OBJ)
./scripts/structs.py $^ -S $(STRUCTSFLAGS)
.PHONY: coverage .PHONY: coverage
coverage: coverage:
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS) ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS)
.PHONY: summary
summary: $(BUILDDIR)lfs.csv
./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
# rules # rules
-include $(DEP) -include $(DEP)
@@ -135,39 +95,20 @@ summary: $(BUILDDIR)lfs.csv
$(BUILDDIR)lfs: $(OBJ) $(BUILDDIR)lfs: $(OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
$(BUILDDIR)lfs.a: $(OBJ) $(BUILDDIR)%.a: $(OBJ)
$(AR) rcs $@ $^ $(AR) rcs $@ $^
$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
$(if $(COVERAGE),\
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
-q -m $@ $(COVERAGEFLAGS) -o $@)
$(BUILDDIR)%.o: %.c $(BUILDDIR)%.o: %.c
$(CC) -c -MMD $(CFLAGS) $< -o $@ $(CC) -c -MMD $(CFLAGS) $< -o $@
$(BUILDDIR)%.s: %.c $(BUILDDIR)%.s: %.c
$(CC) -S $(CFLAGS) $< -o $@ $(CC) -S $(CFLAGS) $< -o $@
# gcc depends on the output file for intermediate file names, so
# we can't omit to .o output. We also need to serialize with the
# normal .o rule because otherwise we can end up with multiprocess
# problems with two instances of gcc modifying the same .o
$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
# clean everything # clean everything
.PHONY: clean .PHONY: clean
clean: clean:
rm -f $(BUILDDIR)lfs rm -f $(TARGET)
rm -f $(BUILDDIR)lfs.a
rm -f $(BUILDDIR)lfs.csv
rm -f $(OBJ) rm -f $(OBJ)
rm -f $(CGI)
rm -f $(DEP) rm -f $(DEP)
rm -f $(ASM) rm -f $(ASM)
rm -f $(BUILDDIR)tests/*.toml.* rm -f $(BUILDDIR)tests/*.toml.*

View File

@@ -1,6 +1,7 @@
/* /*
* Block device emulated in a file * Block device emulated in a file
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

View File

@@ -1,6 +1,7 @@
/* /*
* Block device emulated in a file * Block device emulated in a file
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

View File

@@ -1,6 +1,7 @@
/* /*
* Block device emulated in RAM * Block device emulated in RAM
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

View File

@@ -1,6 +1,7 @@
/* /*
* Block device emulated in RAM * Block device emulated in RAM
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

View File

@@ -2,6 +2,7 @@
* Testing block device, wraps filebd and rambd while providing a bunch * Testing block device, wraps filebd and rambd while providing a bunch
* of hooks for testing littlefs in various conditions. * of hooks for testing littlefs in various conditions.
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

View File

@@ -2,6 +2,7 @@
* Testing block device, wraps filebd and rambd while providing a bunch * Testing block device, wraps filebd and rambd while providing a bunch
* of hooks for testing littlefs in various conditions. * of hooks for testing littlefs in various conditions.
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

1
lfs.c
View File

@@ -1,6 +1,7 @@
/* /*
* The little filesystem * The little filesystem
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

1
lfs.h
View File

@@ -1,6 +1,7 @@
/* /*
* The little filesystem * The little filesystem
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

View File

@@ -1,6 +1,7 @@
/* /*
* lfs util functions * lfs util functions
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

View File

@@ -1,6 +1,7 @@
/* /*
* lfs utility functions * lfs utility functions
* *
* Copyright (c) 2022, The littlefs authors.
* Copyright (c) 2017, Arm Limited. All rights reserved. * Copyright (c) 2017, Arm Limited. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */

View File

@@ -15,7 +15,7 @@ import csv
import collections as co import collections as co
OBJ_PATHS = ['*.o'] OBJ_PATHS = ['*.o', 'bd/*.o']
def collect(paths, **args): def collect(paths, **args):
results = co.defaultdict(lambda: 0) results = co.defaultdict(lambda: 0)
@@ -31,8 +31,7 @@ def collect(paths, **args):
proc = sp.Popen(cmd, proc = sp.Popen(cmd,
stdout=sp.PIPE, stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None, stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True, universal_newlines=True)
errors='replace')
for line in proc.stdout: for line in proc.stdout:
m = pattern.match(line) m = pattern.match(line)
if m: if m:
@@ -49,30 +48,16 @@ def collect(paths, **args):
# map to source files # map to source files
if args.get('build_dir'): if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
# discard internal functions # discard internal functions
if not args.get('everything'): if func.startswith('__'):
if func.startswith('__'): continue
continue
# discard .8449 suffixes created by optimizer # discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func) func = re.sub('\.[0-9]+', '', func)
flat_results.append((file, func, size)) flat_results.append((file, func, size))
return flat_results return flat_results
def main(**args): def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find sizes # find sizes
if not args.get('use', None): if not args.get('use', None):
# find .o files # find .o files
@@ -90,14 +75,13 @@ def main(**args):
results = collect(paths, **args) results = collect(paths, **args)
else: else:
with openio(args['use']) as f: with open(args['use']) as f:
r = csv.DictReader(f) r = csv.DictReader(f)
results = [ results = [
( result['file'], ( result['file'],
result['name'], result['function'],
int(result['code_size'])) int(result['size']))
for result in r for result in r]
if result.get('code_size') not in {None, ''}]
total = 0 total = 0
for _, _, size in results: for _, _, size in results:
@@ -105,17 +89,13 @@ def main(**args):
# find previous results? # find previous results?
if args.get('diff'): if args.get('diff'):
try: with open(args['diff']) as f:
with openio(args['diff']) as f: r = csv.DictReader(f)
r = csv.DictReader(f) prev_results = [
prev_results = [ ( result['file'],
( result['file'], result['function'],
result['name'], int(result['size']))
int(result['code_size'])) for result in r]
for result in r
if result.get('code_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0 prev_total = 0
for _, _, size in prev_results: for _, _, size in prev_results:
@@ -123,34 +103,14 @@ def main(**args):
# write results to CSV # write results to CSV
if args.get('output'): if args.get('output'):
merged_results = co.defaultdict(lambda: {}) with open(args['output'], 'w') as f:
other_fields = [] w = csv.writer(f)
w.writerow(['file', 'function', 'size'])
# merge? for file, func, size in sorted(results):
if args.get('merge'): w.writerow((file, func, size))
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('code_size', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, size in results:
merged_results[(file, func)]['code_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
# print results # print results
def dedup_entries(results, by='name'): def dedup_entries(results, by='function'):
entries = co.defaultdict(lambda: 0) entries = co.defaultdict(lambda: 0)
for file, func, size in results: for file, func, size in results:
entry = (file if by == 'file' else func) entry = (file if by == 'file' else func)
@@ -166,67 +126,45 @@ def main(**args):
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff return diff
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''): def print_header(by=''):
if not args.get('diff'): if not args.get('diff'):
print('%-36s %7s' % (by, 'size')) print('%-36s %7s' % (by, 'size'))
else: else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size): def print_entries(by='function'):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by) entries = dedup_entries(results, by=by)
if not args.get('diff'): if not args.get('diff'):
print_header(by=by) print_header(by=by)
for name, size in sorted_entries(entries.items()): for name, size in sorted(entries.items()):
print_entry(name, size) print("%-36s %7d" % (name, size))
else: else:
prev_entries = dedup_entries(prev_results, by=by) prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries) diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by, print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old), sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new))) sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries( for name, (old, new, diff, ratio) in sorted(diff.items(),
diff.items()): key=lambda x: (-x[1][3], x)):
if ratio or args.get('all'): if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio) print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_totals(): def print_totals():
if not args.get('diff'): if not args.get('diff'):
print_entry('TOTAL', total) print("%-36s %7d" % ('TOTAL', total))
else: else:
ratio = (0.0 if not prev_total and not total ratio = (total-prev_total)/prev_total if prev_total else 1.0
else 1.0 if not prev_total print("%-36s %7s %7s %+7d%s" % (
else (total-prev_total)/prev_total) 'TOTAL',
print_diff_entry('TOTAL', prev_total if prev_total else '-',
prev_total, total, total if total else '-',
total-prev_total, total-prev_total,
ratio) ' (%+.1f%%)' % (100*ratio) if ratio else ''))
if args.get('quiet'): if args.get('quiet'):
pass pass
@@ -237,7 +175,7 @@ def main(**args):
print_entries(by='file') print_entries(by='file')
print_totals() print_totals()
else: else:
print_entries(by='name') print_entries(by='function')
print_totals() print_totals()
if __name__ == "__main__": if __name__ == "__main__":
@@ -250,30 +188,22 @@ if __name__ == "__main__":
or a list of paths. Defaults to %r." % OBJ_PATHS) or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true', parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.") help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output', parser.add_argument('-o', '--output',
help="Specify CSV file to store results.") help="Specify CSV file to store results.")
parser.add_argument('-u', '--use', parser.add_argument('-u', '--use',
help="Don't compile and find code sizes, instead use this CSV file.") help="Don't compile and find code sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff', parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.") help="Specify CSV file to diff code size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true', parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.") help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true', parser.add_argument('--files', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level code sizes. Note this does not include padding! " help="Show file-level code sizes. Note this does not include padding! "
"So sizes may differ from other tools.") "So sizes may differ from other tools.")
parser.add_argument('-Y', '--summary', action='store_true', parser.add_argument('-s', '--summary', action='store_true',
help="Only show the total code size.") help="Only show the total code size.")
parser.add_argument('--type', default='tTrRdD', parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('--type', default='tTrRdDbB',
help="Type of symbols to report, this uses the same single-character " help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %(default)r.") "type-names emitted by nm. Defaults to %(default)r.")
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(), parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),

View File

@@ -55,9 +55,8 @@ def collect(paths, **args):
for (file, func), (hits, count) in reduced_funcs.items(): for (file, func), (hits, count) in reduced_funcs.items():
# discard internal/testing functions (test_* injected with # discard internal/testing functions (test_* injected with
# internal testing) # internal testing)
if not args.get('everything'): if func.startswith('__') or func.startswith('test_'):
if func.startswith('__') or func.startswith('test_'): continue
continue
# discard .8449 suffixes created by optimizer # discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func) func = re.sub('\.[0-9]+', '', func)
results.append((file, func, hits, count)) results.append((file, func, hits, count))
@@ -66,15 +65,6 @@ def collect(paths, **args):
def main(**args): def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find coverage # find coverage
if not args.get('use'): if not args.get('use'):
# find *.info files # find *.info files
@@ -92,16 +82,14 @@ def main(**args):
results = collect(paths, **args) results = collect(paths, **args)
else: else:
with openio(args['use']) as f: with open(args['use']) as f:
r = csv.DictReader(f) r = csv.DictReader(f)
results = [ results = [
( result['file'], ( result['file'],
result['name'], result['function'],
int(result['coverage_hits']), int(result['hits']),
int(result['coverage_count'])) int(result['count']))
for result in r for result in r]
if result.get('coverage_hits') not in {None, ''}
if result.get('coverage_count') not in {None, ''}]
total_hits, total_count = 0, 0 total_hits, total_count = 0, 0
for _, _, hits, count in results: for _, _, hits, count in results:
@@ -110,19 +98,14 @@ def main(**args):
# find previous results? # find previous results?
if args.get('diff'): if args.get('diff'):
try: with open(args['diff']) as f:
with openio(args['diff']) as f: r = csv.DictReader(f)
r = csv.DictReader(f) prev_results = [
prev_results = [ ( result['file'],
( result['file'], result['function'],
result['name'], int(result['hits']),
int(result['coverage_hits']), int(result['count']))
int(result['coverage_count'])) for result in r]
for result in r
if result.get('coverage_hits') not in {None, ''}
if result.get('coverage_count') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total_hits, prev_total_count = 0, 0 prev_total_hits, prev_total_count = 0, 0
for _, _, hits, count in prev_results: for _, _, hits, count in prev_results:
@@ -131,36 +114,14 @@ def main(**args):
# write results to CSV # write results to CSV
if args.get('output'): if args.get('output'):
merged_results = co.defaultdict(lambda: {}) with open(args['output'], 'w') as f:
other_fields = [] w = csv.writer(f)
w.writerow(['file', 'function', 'hits', 'count'])
# merge? for file, func, hits, count in sorted(results):
if args.get('merge'): w.writerow((file, func, hits, count))
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('coverage_hits', None)
result.pop('coverage_count', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, hits, count in results:
merged_results[(file, func)]['coverage_hits'] = hits
merged_results[(file, func)]['coverage_count'] = count
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
# print results # print results
def dedup_entries(results, by='name'): def dedup_entries(results, by='function'):
entries = co.defaultdict(lambda: (0, 0)) entries = co.defaultdict(lambda: (0, 0))
for file, func, hits, count in results: for file, func, hits, count in results:
entry = (file if by == 'file' else func) entry = (file if by == 'file' else func)
@@ -186,59 +147,23 @@ def main(**args):
- (old_hits/old_count if old_count else 1.0))) - (old_hits/old_count if old_count else 1.0)))
return diff return diff
def sorted_entries(entries):
if args.get('coverage_sort'):
return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
elif args.get('reverse_coverage_sort'):
return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('coverage_sort'):
return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
elif args.get('reverse_coverage_sort'):
return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
else:
return sorted(entries, key=lambda x: (-x[1][6], x))
def print_header(by=''): def print_header(by=''):
if not args.get('diff'): if not args.get('diff'):
print('%-36s %19s' % (by, 'hits/line')) print('%-36s %19s' % (by, 'hits/line'))
else: else:
print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff')) print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
def print_entry(name, hits, count): def print_entries(by='function'):
print("%-36s %11s %7s" % (name,
'%d/%d' % (hits, count)
if count else '-',
'%.1f%%' % (100*hits/count)
if count else '-'))
def print_diff_entry(name,
old_hits, old_count,
new_hits, new_count,
diff_hits, diff_count,
ratio):
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
'%d/%d' % (old_hits, old_count)
if old_count else '-',
'%.1f%%' % (100*old_hits/old_count)
if old_count else '-',
'%d/%d' % (new_hits, new_count)
if new_count else '-',
'%.1f%%' % (100*new_hits/new_count)
if new_count else '-',
'%+d/%+d' % (diff_hits, diff_count),
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by) entries = dedup_entries(results, by=by)
if not args.get('diff'): if not args.get('diff'):
print_header(by=by) print_header(by=by)
for name, (hits, count) in sorted_entries(entries.items()): for name, (hits, count) in sorted(entries.items()):
print_entry(name, hits, count) print("%-36s %11s %7s" % (name,
'%d/%d' % (hits, count)
if count else '-',
'%.1f%%' % (100*hits/count)
if count else '-'))
else: else:
prev_entries = dedup_entries(prev_results, by=by) prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries) diff = diff_entries(prev_entries, entries)
@@ -248,28 +173,45 @@ def main(**args):
for name, ( for name, (
old_hits, old_count, old_hits, old_count,
new_hits, new_count, new_hits, new_count,
diff_hits, diff_count, ratio) in sorted_diff_entries( diff_hits, diff_count, ratio) in sorted(diff.items(),
diff.items()): key=lambda x: (-x[1][6], x)):
if ratio or args.get('all'): if ratio or args.get('all'):
print_diff_entry(name, print("%-36s %11s %7s %11s %7s %11s%s" % (name,
old_hits, old_count, '%d/%d' % (old_hits, old_count)
new_hits, new_count, if old_count else '-',
diff_hits, diff_count, '%.1f%%' % (100*old_hits/old_count)
ratio) if old_count else '-',
'%d/%d' % (new_hits, new_count)
if new_count else '-',
'%.1f%%' % (100*new_hits/new_count)
if new_count else '-',
'%+d/%+d' % (diff_hits, diff_count),
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_totals(): def print_totals():
if not args.get('diff'): if not args.get('diff'):
print_entry('TOTAL', total_hits, total_count) print("%-36s %11s %7s" % ('TOTAL',
'%d/%d' % (total_hits, total_count)
if total_count else '-',
'%.1f%%' % (100*total_hits/total_count)
if total_count else '-'))
else: else:
ratio = ((total_hits/total_count ratio = ((total_hits/total_count
if total_count else 1.0) if total_count else 1.0)
- (prev_total_hits/prev_total_count - (prev_total_hits/prev_total_count
if prev_total_count else 1.0)) if prev_total_count else 1.0))
print_diff_entry('TOTAL', print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL',
prev_total_hits, prev_total_count, '%d/%d' % (prev_total_hits, prev_total_count)
total_hits, total_count, if prev_total_count else '-',
total_hits-prev_total_hits, total_count-prev_total_count, '%.1f%%' % (100*prev_total_hits/prev_total_count)
ratio) if prev_total_count else '-',
'%d/%d' % (total_hits, total_count)
if total_count else '-',
'%.1f%%' % (100*total_hits/total_count)
if total_count else '-',
'%+d/%+d' % (total_hits-prev_total_hits,
total_count-prev_total_count),
' (%+.1f%%)' % (100*ratio) if ratio else ''))
if args.get('quiet'): if args.get('quiet'):
pass pass
@@ -280,7 +222,7 @@ def main(**args):
print_entries(by='file') print_entries(by='file')
print_totals() print_totals()
else: else:
print_entries(by='name') print_entries(by='function')
print_totals() print_totals()
if __name__ == "__main__": if __name__ == "__main__":
@@ -301,23 +243,12 @@ if __name__ == "__main__":
help="Don't do any work, instead use this CSV file.") help="Don't do any work, instead use this CSV file.")
parser.add_argument('-d', '--diff', parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.") help="Specify CSV file to diff code size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true', parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.") help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true', parser.add_argument('--files', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--coverage-sort', action='store_true',
help="Sort by coverage.")
parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
help="Sort by coverage, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level coverage.") help="Show file-level coverage.")
parser.add_argument('-Y', '--summary', action='store_true', parser.add_argument('-s', '--summary', action='store_true',
help="Only show the total coverage.") help="Only show the total coverage.")
parser.add_argument('-q', '--quiet', action='store_true', parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.") help="Don't show anything, useful with -o.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args()))) sys.exit(main(**vars(parser.parse_args())))

View File

@@ -1,283 +0,0 @@
#!/usr/bin/env python3
#
# Script to find data size at the function level. Basically just a bit wrapper
# around nm with some extra conveniences for comparing builds. Heavily inspired
# by Linux's Bloat-O-Meter.
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
OBJ_PATHS = ['*.o']
def collect(paths, **args):
results = co.defaultdict(lambda: 0)
pattern = re.compile(
'^(?P<size>[0-9a-fA-F]+)' +
' (?P<type>[%s])' % re.escape(args['type']) +
' (?P<func>.+?)$')
for path in paths:
# note nm-tool may contain extra args
cmd = args['nm_tool'] + ['--size-sort', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
for line in proc.stdout:
m = pattern.match(line)
if m:
results[(path, m.group('func'))] += int(m.group('size'), 16)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, func), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
# discard internal functions
if not args.get('everything'):
if func.startswith('__'):
continue
# discard .8449 suffixes created by optimizer
func = re.sub('\.[0-9]+', '', func)
flat_results.append((file, func, size))
return flat_results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['data_size']))
for result in r
if result.get('data_size') not in {None, ''}]
total = 0
for _, _, size in results:
total += size
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['data_size']))
for result in r
if result.get('data_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('data_size', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, size in results:
merged_results[(file, func)]['data_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: 0)
for file, func, size in results:
entry = (file if by == 'file' else func)
entries[entry] += size
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted_entries(entries.items()):
print_entry(name, size)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
ratio = (0.0 if not prev_total and not total
else 1.0 if not prev_total
else (total-prev_total)/prev_total)
print_diff_entry('TOTAL',
prev_total, total,
total-prev_total,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find data size at the function level.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find data sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff data size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level data sizes. Note this does not include padding! "
"So sizes may differ from other tools.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total data size.")
parser.add_argument('--type', default='dDbB',
help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %(default)r.")
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
help="Path to the nm tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

View File

@@ -1,430 +0,0 @@
#!/usr/bin/env python3
#
# Script to find stack usage at the function level. Will detect recursion and
# report as infinite stack usage.
#
import os
import glob
import itertools as it
import re
import csv
import collections as co
import math as m
CI_PATHS = ['*.ci']
def collect(paths, **args):
# parse the vcg format
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
def parse_vcg(rest):
def parse_vcg(rest):
node = []
while True:
rest = rest.lstrip()
m = k_pattern.match(rest)
if not m:
return (node, rest)
k, rest = m.group(1), rest[m.end(0):]
rest = rest.lstrip()
if rest.startswith('{'):
v, rest = parse_vcg(rest[1:])
assert rest[0] == '}', "unexpected %r" % rest[0:1]
rest = rest[1:]
node.append((k, v))
else:
m = v_pattern.match(rest)
assert m, "unexpected %r" % rest[0:1]
v, rest = m.group(1) or m.group(2), rest[m.end(0):]
node.append((k, v))
node, rest = parse_vcg(rest)
assert rest == '', "unexpected %r" % rest[0:1]
return node
# collect into functions
results = co.defaultdict(lambda: (None, None, 0, set()))
f_pattern = re.compile(
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
for path in paths:
with open(path) as f:
vcg = parse_vcg(f.read())
for k, graph in vcg:
if k != 'graph':
continue
for k, info in graph:
if k == 'node':
info = dict(info)
m = f_pattern.match(info['label'])
if m:
function, file, size, type = m.groups()
if not args.get('quiet') and type != 'static':
print('warning: found non-static stack for %s (%s)'
% (function, type))
_, _, _, targets = results[info['title']]
results[info['title']] = (
file, function, int(size), targets)
elif k == 'edge':
info = dict(info)
_, _, _, targets = results[info['sourcename']]
targets.add(info['targetname'])
else:
continue
if not args.get('everything'):
for source, (s_file, s_function, _, _) in list(results.items()):
# discard internal functions
if s_file.startswith('<') or s_file.startswith('/usr/include'):
del results[source]
# find maximum stack size recursively, this requires also detecting cycles
# (in case of recursion)
def find_limit(source, seen=None):
seen = seen or set()
if source not in results:
return 0
_, _, frame, targets = results[source]
limit = 0
for target in targets:
if target in seen:
# found a cycle
return float('inf')
limit_ = find_limit(target, seen | {target})
limit = max(limit, limit_)
return frame + limit
def find_deps(targets):
deps = set()
for target in targets:
if target in results:
t_file, t_function, _, _ = results[target]
deps.add((t_file, t_function))
return deps
# flatten into a list
flat_results = []
for source, (s_file, s_function, frame, targets) in results.items():
limit = find_limit(source)
deps = find_deps(targets)
flat_results.append((s_file, s_function, frame, limit, deps))
return flat_results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find sizes
if not args.get('use', None):
# find .ci files
paths = []
for path in args['ci_paths']:
if os.path.isdir(path):
path = path + '/*.ci'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .ci files found in %r?' % args['ci_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['stack_frame']),
float(result['stack_limit']), # note limit can be inf
set())
for result in r
if result.get('stack_frame') not in {None, ''}
if result.get('stack_limit') not in {None, ''}]
total_frame = 0
total_limit = 0
for _, _, frame, limit, _ in results:
total_frame += frame
total_limit = max(total_limit, limit)
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['stack_frame']),
float(result['stack_limit']),
set())
for result in r
if result.get('stack_frame') not in {None, ''}
if result.get('stack_limit') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total_frame = 0
prev_total_limit = 0
for _, _, frame, limit, _ in prev_results:
prev_total_frame += frame
prev_total_limit = max(prev_total_limit, limit)
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
func = result.pop('name', '')
result.pop('stack_frame', None)
result.pop('stack_limit', None)
merged_results[(file, func)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, func, frame, limit, _ in results:
merged_results[(file, func)]['stack_frame'] = frame
merged_results[(file, func)]['stack_limit'] = limit
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
w.writeheader()
for (file, func), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': func, **result})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: (0, 0, set()))
for file, func, frame, limit, deps in results:
entry = (file if by == 'file' else func)
entry_frame, entry_limit, entry_deps = entries[entry]
entries[entry] = (
entry_frame + frame,
max(entry_limit, limit),
entry_deps | {file if by == 'file' else func
for file, func in deps})
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
for name, (new_frame, new_limit, deps) in news.items():
diff[name] = (
None, None,
new_frame, new_limit,
new_frame, new_limit,
1.0,
deps)
for name, (old_frame, old_limit, _) in olds.items():
_, _, new_frame, new_limit, _, _, _, deps = diff[name]
diff[name] = (
old_frame, old_limit,
new_frame, new_limit,
(new_frame or 0) - (old_frame or 0),
0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
else (new_limit or 0) - (old_limit or 0),
0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
else +float('inf') if m.isinf(new_limit or 0)
else -float('inf') if m.isinf(old_limit or 0)
else +0.0 if not old_limit and not new_limit
else +1.0 if not old_limit
else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
deps)
return diff
def sorted_entries(entries):
if args.get('limit_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_limit_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
elif args.get('frame_sort'):
return sorted(entries, key=lambda x: (-x[1][0], x))
elif args.get('reverse_frame_sort'):
return sorted(entries, key=lambda x: (+x[1][0], x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('limit_sort'):
return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
elif args.get('reverse_limit_sort'):
return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
elif args.get('frame_sort'):
return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
elif args.get('reverse_frame_sort'):
return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
else:
return sorted(entries, key=lambda x: (-x[1][6], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s %7s' % (by, 'frame', 'limit'))
else:
print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
def print_entry(name, frame, limit):
print("%-36s %7d %7s" % (name,
frame, '' if m.isinf(limit) else int(limit)))
def print_diff_entry(name,
old_frame, old_limit,
new_frame, new_limit,
diff_frame, diff_limit,
ratio):
print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
old_frame if old_frame is not None else "-",
('' if m.isinf(old_limit) else int(old_limit))
if old_limit is not None else "-",
new_frame if new_frame is not None else "-",
('' if m.isinf(new_limit) else int(new_limit))
if new_limit is not None else "-",
diff_frame,
('+∞' if diff_limit > 0 and m.isinf(diff_limit)
else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
else '%+d' % diff_limit),
'' if not ratio
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
else ' (%+.1f%%)' % (100*ratio)))
def print_entries(by='name'):
# build optional tree of dependencies
def print_deps(entries, depth, print,
filter=lambda _: True,
prefixes=('', '', '', '')):
entries = entries if isinstance(entries, list) else list(entries)
filtered_entries = [(name, entry)
for name, entry in entries
if filter(name)]
for i, (name, entry) in enumerate(filtered_entries):
last = (i == len(filtered_entries)-1)
print(prefixes[0+last] + name, entry)
if depth > 0:
deps = entry[-1]
print_deps(entries, depth-1, print,
lambda name: name in deps,
( prefixes[2+last] + "|-> ",
prefixes[2+last] + "'-> ",
prefixes[2+last] + "| ",
prefixes[2+last] + " "))
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
print_deps(
sorted_entries(entries.items()),
args.get('depth') or 0,
lambda name, entry: print_entry(name, *entry[:-1]))
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
print_deps(
filter(
lambda x: x[1][6] or args.get('all'),
sorted_diff_entries(diff.items())),
args.get('depth') or 0,
lambda name, entry: print_diff_entry(name, *entry[:-1]))
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total_frame, total_limit)
else:
diff_frame = total_frame - prev_total_frame
diff_limit = (
0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
else (total_limit or 0) - (prev_total_limit or 0))
ratio = (
0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
else +float('inf') if m.isinf(total_limit or 0)
else -float('inf') if m.isinf(prev_total_limit or 0)
else 0.0 if not prev_total_limit and not total_limit
else 1.0 if not prev_total_limit
else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
print_diff_entry('TOTAL',
prev_total_frame, prev_total_limit,
total_frame, total_limit,
diff_frame, diff_limit,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find stack usage at the function level.")
parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
help="Description of where to find *.ci files. May be a directory \
or a list of paths. Defaults to %r." % CI_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't parse callgraph files, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--limit-sort', action='store_true',
help="Sort by stack limit.")
parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
help="Sort by stack limit, but backwards.")
parser.add_argument('--frame-sort', action='store_true',
help="Sort by stack frame size.")
parser.add_argument('--reverse-frame-sort', action='store_true',
help="Sort by stack frame size, but backwards.")
parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
nargs='?', const=float('inf'),
help="Depth of dependencies to show.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level calls.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total stack size.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

View File

@@ -1,331 +0,0 @@
#!/usr/bin/env python3
#
# Script to find struct sizes.
#
import os
import glob
import itertools as it
import subprocess as sp
import shlex
import re
import csv
import collections as co
OBJ_PATHS = ['*.o']
def collect(paths, **args):
decl_pattern = re.compile(
'^\s+(?P<no>[0-9]+)'
'\s+(?P<dir>[0-9]+)'
'\s+.*'
'\s+(?P<file>[^\s]+)$')
struct_pattern = re.compile(
'^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
results = co.defaultdict(lambda: 0)
for path in paths:
# find decl, we want to filter by structs in .h files
decls = {}
# note objdump-tool may contain extra args
cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
for line in proc.stdout:
# find file numbers
m = decl_pattern.match(line)
if m:
decls[int(m.group('no'))] = m.group('file')
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
# collect structs as we parse dwarf info
found = False
name = None
decl = None
size = None
# note objdump-tool may contain extra args
cmd = args['objdump_tool'] + ['--dwarf=info', path]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace')
for line in proc.stdout:
# state machine here to find structs
m = struct_pattern.match(line)
if m:
if m.group('tag'):
if (name is not None
and decl is not None
and size is not None):
decl = decls.get(decl, '?')
results[(decl, name)] = size
found = (m.group('tag') == 'structure_type')
name = None
decl = None
size = None
elif found and m.group('name'):
name = m.group('name')
elif found and name and m.group('decl'):
decl = int(m.group('decl'))
elif found and name and m.group('size'):
size = int(m.group('size'))
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
flat_results = []
for (file, struct), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# only include structs declared in header files in the current
# directory, ignore internal-only # structs (these are represented
# in other measurements)
if not args.get('everything'):
if not file.endswith('.h'):
continue
# replace .o with .c, different scripts report .o/.c, we need to
# choose one if we want to deduplicate csv files
file = re.sub('\.o$', '.c', file)
flat_results.append((file, struct, size))
return flat_results
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find sizes
if not args.get('use', None):
# find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
sys.exit(-1)
results = collect(paths, **args)
else:
with openio(args['use']) as f:
r = csv.DictReader(f)
results = [
( result['file'],
result['name'],
int(result['struct_size']))
for result in r
if result.get('struct_size') not in {None, ''}]
total = 0
for _, _, size in results:
total += size
# find previous results?
if args.get('diff'):
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
prev_results = [
( result['file'],
result['name'],
int(result['struct_size']))
for result in r
if result.get('struct_size') not in {None, ''}]
except FileNotFoundError:
prev_results = []
prev_total = 0
for _, _, size in prev_results:
prev_total += size
# write results to CSV
if args.get('output'):
merged_results = co.defaultdict(lambda: {})
other_fields = []
# merge?
if args.get('merge'):
try:
with openio(args['merge']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
struct = result.pop('name', '')
result.pop('struct_size', None)
merged_results[(file, struct)] = result
other_fields = result.keys()
except FileNotFoundError:
pass
for file, struct, size in results:
merged_results[(file, struct)]['struct_size'] = size
with openio(args['output'], 'w') as f:
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
w.writeheader()
for (file, struct), result in sorted(merged_results.items()):
w.writerow({'file': file, 'name': struct, **result})
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: 0)
for file, struct, size in results:
entry = (file if by == 'file' else struct)
entries[entry] += size
return entries
def diff_entries(olds, news):
diff = co.defaultdict(lambda: (0, 0, 0, 0))
for name, new in news.items():
diff[name] = (0, new, new, 1.0)
for name, old in olds.items():
_, new, _, _ = diff[name]
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff
def sorted_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1], x))
else:
return sorted(entries)
def sorted_diff_entries(entries):
if args.get('size_sort'):
return sorted(entries, key=lambda x: (-x[1][1], x))
elif args.get('reverse_size_sort'):
return sorted(entries, key=lambda x: (+x[1][1], x))
else:
return sorted(entries, key=lambda x: (-x[1][3], x))
def print_header(by=''):
if not args.get('diff'):
print('%-36s %7s' % (by, 'size'))
else:
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_entry(name, size):
print("%-36s %7d" % (name, size))
def print_diff_entry(name, old, new, diff, ratio):
print("%-36s %7s %7s %+7d%s" % (name,
old or "-",
new or "-",
diff,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, size in sorted_entries(entries.items()):
print_entry(name, size)
else:
prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for old, _, _, _ in diff.values() if not old),
sum(1 for _, new, _, _ in diff.values() if not new)))
for name, (old, new, diff, ratio) in sorted_diff_entries(
diff.items()):
if ratio or args.get('all'):
print_diff_entry(name, old, new, diff, ratio)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
ratio = (0.0 if not prev_total and not total
else 1.0 if not prev_total
else (total-prev_total)/prev_total)
print_diff_entry('TOTAL',
prev_total, total,
total-prev_total,
ratio)
if args.get('quiet'):
pass
elif args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Find struct sizes.")
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Description of where to find *.o files. May be a directory \
or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.")
parser.add_argument('-o', '--output',
help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find struct sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff struct size against.")
parser.add_argument('-m', '--merge',
help="Merge with an existing CSV file when writing to output.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.")
parser.add_argument('-A', '--everything', action='store_true',
help="Include builtin and libc specific symbols.")
parser.add_argument('-s', '--size-sort', action='store_true',
help="Sort by size.")
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
help="Sort by size, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level struct sizes.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the total struct size.")
parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
help="Path to the objdump tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args())))

View File

@@ -1,279 +0,0 @@
#!/usr/bin/env python3
#
# Script to summarize the outputs of other scripts. Operates on CSV files.
#
import functools as ft
import collections as co
import os
import csv
import re
import math as m
# displayable fields
Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
FIELDS = [
# name, parse, accumulate, fmt, print, null
Field('code',
lambda r: int(r['code_size']),
sum,
lambda r: r,
'%7s',
lambda r: r,
'-',
lambda old, new: (new-old)/old),
Field('data',
lambda r: int(r['data_size']),
sum,
lambda r: r,
'%7s',
lambda r: r,
'-',
lambda old, new: (new-old)/old),
Field('stack',
lambda r: float(r['stack_limit']),
max,
lambda r: r,
'%7s',
lambda r: '' if m.isinf(r) else int(r),
'-',
lambda old, new: (new-old)/old),
Field('structs',
lambda r: int(r['struct_size']),
sum,
lambda r: r,
'%8s',
lambda r: r,
'-',
lambda old, new: (new-old)/old),
Field('coverage',
lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
lambda r: r[0]/r[1],
'%19s',
lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
'%11s %7s' % ('-', '-'),
lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
]
def main(**args):
def openio(path, mode='r'):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
else:
return open(path, mode)
# find results
results = co.defaultdict(lambda: {})
for path in args.get('csv_paths', '-'):
try:
with openio(path) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
name = result.pop('name', '')
prev = results[(file, name)]
for field in FIELDS:
try:
r = field.parse(result)
if field.name in prev:
results[(file, name)][field.name] = field.acc(
[prev[field.name], r])
else:
results[(file, name)][field.name] = r
except (KeyError, ValueError):
pass
except FileNotFoundError:
pass
# find fields
if args.get('all_fields'):
fields = FIELDS
elif args.get('fields') is not None:
fields_dict = {field.name: field for field in FIELDS}
fields = [fields_dict[f] for f in args['fields']]
else:
fields = []
for field in FIELDS:
if any(field.name in result for result in results.values()):
fields.append(field)
# find total for every field
total = {}
for result in results.values():
for field in fields:
if field.name in result and field.name in total:
total[field.name] = field.acc(
[total[field.name], result[field.name]])
elif field.name in result:
total[field.name] = result[field.name]
# find previous results?
if args.get('diff'):
prev_results = co.defaultdict(lambda: {})
try:
with openio(args['diff']) as f:
r = csv.DictReader(f)
for result in r:
file = result.pop('file', '')
name = result.pop('name', '')
prev = prev_results[(file, name)]
for field in FIELDS:
try:
r = field.parse(result)
if field.name in prev:
prev_results[(file, name)][field.name] = field.acc(
[prev[field.name], r])
else:
prev_results[(file, name)][field.name] = r
except (KeyError, ValueError):
pass
except FileNotFoundError:
pass
prev_total = {}
for result in prev_results.values():
for field in fields:
if field.name in result and field.name in prev_total:
prev_total[field.name] = field.acc(
[prev_total[field.name], result[field.name]])
elif field.name in result:
prev_total[field.name] = result[field.name]
# print results
def dedup_entries(results, by='name'):
entries = co.defaultdict(lambda: {})
for (file, func), result in results.items():
entry = (file if by == 'file' else func)
prev = entries[entry]
for field in fields:
if field.name in result and field.name in prev:
entries[entry][field.name] = field.acc(
[prev[field.name], result[field.name]])
elif field.name in result:
entries[entry][field.name] = result[field.name]
return entries
def sorted_entries(entries):
if args.get('sort') is not None:
field = {field.name: field for field in FIELDS}[args['sort']]
return sorted(entries, key=lambda x: (
-(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
elif args.get('reverse_sort') is not None:
field = {field.name: field for field in FIELDS}[args['reverse_sort']]
return sorted(entries, key=lambda x: (
+(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
else:
return sorted(entries)
def print_header(by=''):
if not args.get('diff'):
print('%-36s' % by, end='')
for field in fields:
print((' '+field.fmt) % field.name, end='')
print()
else:
print('%-36s' % by, end='')
for field in fields:
print((' '+field.fmt) % field.name, end='')
print(' %-9s' % '', end='')
print()
def print_entry(name, result):
print('%-36s' % name, end='')
for field in fields:
r = result.get(field.name)
if r is not None:
print((' '+field.fmt) % field.repr(r), end='')
else:
print((' '+field.fmt) % '-', end='')
print()
def print_diff_entry(name, old, new):
print('%-36s' % name, end='')
for field in fields:
n = new.get(field.name)
if n is not None:
print((' '+field.fmt) % field.repr(n), end='')
else:
print((' '+field.fmt) % '-', end='')
o = old.get(field.name)
ratio = (
0.0 if m.isinf(o or 0) and m.isinf(n or 0)
else +float('inf') if m.isinf(n or 0)
else -float('inf') if m.isinf(o or 0)
else 0.0 if not o and not n
else +1.0 if not o
else -1.0 if not n
else field.ratio(o, n))
print(' %-9s' % (
'' if not ratio
else '(+∞%)' if ratio > 0 and m.isinf(ratio)
else '(-∞%)' if ratio < 0 and m.isinf(ratio)
else '(%+.1f%%)' % (100*ratio)), end='')
print()
def print_entries(by='name'):
entries = dedup_entries(results, by=by)
if not args.get('diff'):
print_header(by=by)
for name, result in sorted_entries(entries.items()):
print_entry(name, result)
else:
prev_entries = dedup_entries(prev_results, by=by)
print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for name in entries if name not in prev_entries),
sum(1 for name in prev_entries if name not in entries)))
for name, result in sorted_entries(entries.items()):
if args.get('all') or result != prev_entries.get(name, {}):
print_diff_entry(name, prev_entries.get(name, {}), result)
def print_totals():
if not args.get('diff'):
print_entry('TOTAL', total)
else:
print_diff_entry('TOTAL', prev_total, total)
if args.get('summary'):
print_header()
print_totals()
elif args.get('files'):
print_entries(by='file')
print_totals()
else:
print_entries(by='name')
print_totals()
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description="Summarize measurements")
parser.add_argument('csv_paths', nargs='*', default='-',
help="Description of where to find *.csv files. May be a directory \
or list of paths. *.csv files will be merged to show the total \
coverage.")
parser.add_argument('-d', '--diff',
help="Specify CSV file to diff against.")
parser.add_argument('-a', '--all', action='store_true',
help="Show all objects, not just the ones that changed.")
parser.add_argument('-e', '--all-fields', action='store_true',
help="Show all fields, even those with no results.")
parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
help="Comma separated list of fields to print, by default all fields \
that are found in the CSV files are printed.")
parser.add_argument('-s', '--sort',
help="Sort by this field.")
parser.add_argument('-S', '--reverse-sort',
help="Sort by this field, but backwards.")
parser.add_argument('-F', '--files', action='store_true',
help="Show file-level calls.")
parser.add_argument('-Y', '--summary', action='store_true',
help="Only show the totals.")
sys.exit(main(**vars(parser.parse_args())))

View File

@@ -784,13 +784,10 @@ def main(**args):
stdout=sp.PIPE if not args.get('verbose') else None, stdout=sp.PIPE if not args.get('verbose') else None,
stderr=sp.STDOUT if not args.get('verbose') else None, stderr=sp.STDOUT if not args.get('verbose') else None,
universal_newlines=True) universal_newlines=True)
stdout = []
for line in proc.stdout:
stdout.append(line)
proc.wait() proc.wait()
if proc.returncode != 0: if proc.returncode != 0:
if not args.get('verbose'): if not args.get('verbose'):
for line in stdout: for line in proc.stdout:
sys.stdout.write(line) sys.stdout.write(line)
sys.exit(-1) sys.exit(-1)
@@ -806,9 +803,9 @@ def main(**args):
failure.case.test(failure=failure, **args) failure.case.test(failure=failure, **args)
sys.exit(0) sys.exit(0)
print('tests passed %d/%d (%.1f%%)' % (passed, total, print('tests passed %d/%d (%.2f%%)' % (passed, total,
100*(passed/total if total else 1.0))) 100*(passed/total if total else 1.0)))
print('tests failed %d/%d (%.1f%%)' % (failed, total, print('tests failed %d/%d (%.2f%%)' % (failed, total,
100*(failed/total if total else 1.0))) 100*(failed/total if total else 1.0)))
return 1 if failed > 0 else 0 return 1 if failed > 0 else 0