Added BUILDDIR, a bit of script reworking

Now littlefs's Makefile can work with a custom build directory
for compilation output. Just set the BUILDDIR variable and the Makefile
will take care of the rest.

make BUILDDIR=build size

This makes it very easy to compare builds with different compile-time
configurations or different cross-compilers.

This meant most of code.py's build isolation is no longer needed,
so revisted the scripts and cleaned/tweaked a number of things.

Also bought code.py in line with coverage.py, fixing some of the
inconsistencies that were created while developing these scripts.

One change to note was removing the inline measuring logic, I realized
this feature is unnecessary thanks to GCC's -fkeep-static-functions and
-fno-inline flags.
This commit is contained in:
Christopher Haster
2021-01-01 23:50:59 -06:00
parent 887f3660ed
commit b84fb6bcc5
7 changed files with 571 additions and 498 deletions

View File

@@ -46,11 +46,11 @@ jobs:
`${{github.event.workflow_run.id}}/jobs" \ `${{github.event.workflow_run.id}}/jobs" \
| jq -er '.jobs[] | jq -er '.jobs[]
| select(.name == env.TARGET_JOB) | select(.name == env.TARGET_JOB)
| .html_url + ((.steps[] | .html_url
+ "?check_suite_focus=true"
+ ((.steps[]
| select(.name == env.TARGET_STEP) | select(.name == env.TARGET_STEP)
| "#step:\(.number):0") // "")' | "#step:\(.number):0") // "")'))"
)
)"
# TODO remove this # TODO remove this
# print for debugging # print for debugging
echo "$(jq -nc '{ echo "$(jq -nc '{

View File

@@ -4,7 +4,6 @@ on: [push, pull_request]
env: env:
CFLAGS: -Werror CFLAGS: -Werror
MAKEFLAGS: -j MAKEFLAGS: -j
COVERAGE: 1
jobs: jobs:
# run tests # run tests
@@ -14,21 +13,22 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
arch: [x86_64, thumb, mips, powerpc] arch: [x86_64, thumb, mips, powerpc]
env:
TESTFLAGS: --coverage
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: install - name: install
run: | run: |
# need toml, also pip3 isn't installed by default? # need toml, also pip3 isn't installed by default?
sudo apt-get update sudo apt-get update -qq
sudo apt-get install python3 python3-pip sudo apt-get install -qq python3 python3-pip lcov
sudo pip3 install toml sudo pip3 install toml
mkdir status
# cross-compile with ARM Thumb (32-bit, little-endian) # cross-compile with ARM Thumb (32-bit, little-endian)
- name: install-thumb - name: install-thumb
if: matrix.arch == 'thumb' if: matrix.arch == 'thumb'
run: | run: |
sudo apt-get install \ sudo apt-get install -qq \
gcc-arm-linux-gnueabi \ gcc-arm-linux-gnueabi \
libc6-dev-armel-cross \ libc6-dev-armel-cross \
qemu-user qemu-user
@@ -40,7 +40,7 @@ jobs:
- name: install-mips - name: install-mips
if: matrix.arch == 'mips' if: matrix.arch == 'mips'
run: | run: |
sudo apt-get install \ sudo apt-get install -qq \
gcc-mips-linux-gnu \ gcc-mips-linux-gnu \
libc6-dev-mips-cross \ libc6-dev-mips-cross \
qemu-user qemu-user
@@ -52,7 +52,7 @@ jobs:
- name: install-powerpc - name: install-powerpc
if: matrix.arch == 'powerpc' if: matrix.arch == 'powerpc'
run: | run: |
sudo apt-get install \ sudo apt-get install -qq \
gcc-powerpc-linux-gnu \ gcc-powerpc-linux-gnu \
libc6-dev-powerpc-cross \ libc6-dev-powerpc-cross \
qemu-user qemu-user
@@ -73,140 +73,118 @@ jobs:
-include stdio.h" -include stdio.h"
# normal+reentrant tests # normal+reentrant tests
- name: test-default - name: test-default
continue-on-error: true run: make test_dirs TESTFLAGS+="-nrk"
run: make test SCRIPTFLAGS+="-nrk" # NOR flash: read/prog = 1 block = 4KiB
# # NOR flash: read/prog = 1 block = 4KiB - name: test-nor
# - name: test-nor run: make test TESTFLAGS+="-nrk
# run: make test SCRIPTFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" VERBOSE=1
# -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" # SD/eMMC: read/prog = 512 block = 512
# # SD/eMMC: read/prog = 512 block = 512 - name: test-emmc
# - name: test-emmc run: make test TESTFLAGS+="-nrk
# run: make test SCRIPTFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
# -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" # NAND flash: read/prog = 4KiB block = 32KiB
# # NAND flash: read/prog = 4KiB block = 32KiB - name: test-nand
# - name: test-nand run: make test TESTFLAGS+="-nrk
# run: make test SCRIPTFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
# -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" # other extreme geometries that are useful for various corner cases
# # other extreme geometries that are useful for various corner cases - name: test-no-intrinsics
# - name: test-no-intrinsics run: make test TESTFLAGS+="-nrk
# run: make test SCRIPTFLAGS+="-nrk -DLFS_NO_INTRINSICS"
# -DLFS_NO_INTRINSICS" - name: test-byte-writes
# - name: test-byte-writes run: make test TESTFLAGS+="-nrk
# run: make test SCRIPTFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
# -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" - name: test-block-cycles
# - name: test-block-cycles run: make test TESTFLAGS+="-nrk
# run: make test SCRIPTFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1"
# -DLFS_BLOCK_CYCLES=1" - name: test-odd-block-count
# - name: test-odd-block-count run: make test TESTFLAGS+="-nrk
# run: make test SCRIPTFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
# -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" - name: test-odd-block-size
# - name: test-odd-block-size run: make test TESTFLAGS+="-nrk
# run: make test SCRIPTFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
# -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
- name: test-default-what
run: |
echo "version"
gcov --version
echo "tests"
ls tests
echo "hmm"
cat tests/*.gcov
echo "woah"
# collect coverage # collect coverage
- name: collect-coverage - name: collect-coverage
continue-on-error: true continue-on-error: true
run: | run: |
mkdir -p coverage mkdir -p coverage
mv results/coverage.gcov coverage/${{github.job}}.gcov lcov $(for f in tests/*.toml.cumul.info ; do echo "-a $f" ; done) \
-o coverage/${{github.job}}-${{matrix.arch}}.info
# we only care about littlefs's actual source
lcov -e coverage/${{github.job}}-${{matrix.arch}}.info \
$(for f in lfs*.c ; do echo "/$f" ; done) \
-o coverage/${{github.job}}-${{matrix.arch}}.info
- name: upload-coverage - name: upload-coverage
continue-on-error: true continue-on-error: true
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
name: coverage name: coverage
path: coverage path: coverage
retention-days: 1
# update results # update results
- uses: actions/checkout@v2
if: github.ref != 'refs/heads/master'
continue-on-error: true
with:
ref: master
path: master
- name: results-code - name: results-code
continue-on-error: true continue-on-error: true
run: | run: |
export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" mkdir -p results
export CFLAGS+=" \ # TODO remove the need for OBJ
make clean
make code \
OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \
CFLAGS+=" \
-DLFS_NO_ASSERT \ -DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR" -DLFS_NO_ERROR" \
if [ -d master ] CODEFLAGS+="-o results/code.csv"
then
make -C master clean code OBJ="$OBJ" \
SCRIPTFLAGS+="-qo code.csv" \
&& export SCRIPTFLAGS+="-d master/code.csv"
fi
make clean code OBJ="$OBJ" \
SCRIPTFLAGS+="-o code.csv"
- name: results-code-readonly - name: results-code-readonly
continue-on-error: true continue-on-error: true
run: | run: |
export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" mkdir -p results
export CFLAGS+=" \ make clean
make code \
OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \
CFLAGS+=" \
-DLFS_NO_ASSERT \ -DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_READONLY" -DLFS_READONLY" \
if [ -d master ] CODEFLAGS+="-o results/code-readonly.csv"
then
make -C master clean code OBJ="$OBJ" \
SCRIPTFLAGS+="-qo code-readonly.csv" \
&& export SCRIPTFLAGS+="-d master/code-readonly.csv"
fi
# TODO remove this OBJ
make clean code OBJ="$OBJ" \
SCRIPTFLAGS+="-o code-readonly.csv"
- name: results-code-threadsafe - name: results-code-threadsafe
continue-on-error: true continue-on-error: true
run: | run: |
export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" mkdir -p results
export CFLAGS+=" \ make clean
make code \
OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \
CFLAGS+=" \
-DLFS_NO_ASSERT \ -DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_THREADSAFE" -DLFS_THREADSAFE" \
if [ -d master ] CODEFLAGS+="-o results/code-threadsafe.csv"
then
make -C master clean code OBJ="$OBJ" \
SCRIPTFLAGS+="-qo code-threadsafe.csv" \
&& export SCRIPTFLAGS+="-d master/code-threadsafe.csv"
fi
make clean code OBJ="$OBJ" \
SCRIPTFLAGS+="-o code-threadsafe.csv"
- name: results-code-migrate - name: results-code-migrate
continue-on-error: true continue-on-error: true
run: | run: |
export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" mkdir -p results
export CFLAGS+=" \ make clean
make code \
OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \
CFLAGS+=" \
-DLFS_NO_ASSERT \ -DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_MIGRATE" -DLFS_MIGRATE" \
if [ -d master ] CODEFLAGS+="-o results/code-migrate.csv"
then - name: upload-results
make -C master clean code OBJ="$OBJ" \ continue-on-error: true
SCRIPTFLAGS+="-qo code-migrate.csv" \ uses: actions/upload-artifact@v2
&& export SCRIPTFLAGS+="-d master/code-migrate.csv" with:
fi name: results
make clean code OBJ="$OBJ" \ path: results
SCRIPTFLAGS+="-o code-migrate.csv"
# limit reporting to Thumb, otherwise there would be too many numbers # limit reporting to Thumb, otherwise there would be too many numbers
# flying around for the results to be easily readable # flying around for the results to be easily readable
- name: collect-status - name: collect-status
@@ -214,23 +192,31 @@ jobs:
if: matrix.arch == 'thumb' if: matrix.arch == 'thumb'
run: | run: |
mkdir -p status mkdir -p status
shopt -s nullglob for f in results/code*.csv
for f in code*.csv
do do
export STEP="results-code$( export STEP="results-code$(
echo $f | sed -n 's/code-\(.*\).csv/-\1/p')" echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p')"
export CONTEXT="results / code$( export CONTEXT="results / code$(
echo $f | sed -n 's/code-\(.*\).csv/ (\1)/p')" echo $f | sed -n 's/.*code-\(.*\).csv/ (\1)/p')"
export DESCRIPTION="Code size is $( export PREV="$(curl -sS \
./scripts/code.py -i $f -S $( "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
[ -e master/$f ] && echo "-d master/$f"))" | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[]
jq -nc '{ | select(.context == env.CONTEXT).description
| capture(\"Code size is (?<result>[0-9]+)\").result" \
|| echo 0)"
echo $PREV
export DESCRIPTION="$(./scripts/code.py -u $f -s | awk '
NR==2 {printf "Code size is %d B",$2}
NR==2 && ENVIRON["PREV"] != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')"
jq -n '{
state: "success", state: "success",
context: env.CONTEXT, context: env.CONTEXT,
description: env.DESCRIPTION, description: env.DESCRIPTION,
target_job: "test (${{matrix.arch}})", target_job: "${{github.job}} (${{matrix.arch}})",
target_step: env.STEP}' \ target_step: env.STEP}' \
> status/code$(echo $f | sed -n 's/code-\(.*\).csv/-\1/p').json | tee status/code$(
echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p').json
done done
- name: upload-status - name: upload-status
continue-on-error: true continue-on-error: true
@@ -244,20 +230,190 @@ jobs:
# run under Valgrind to check for memory errors # run under Valgrind to check for memory errors
valgrind: valgrind:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: install - name: install
run: | run: |
# need toml, also pip3 isn't installed by default? # need toml, also pip3 isn't installed by default?
sudo apt-get update sudo apt-get update -qq
sudo apt-get install python3 python3-pip sudo apt-get install -qq python3 python3-pip
sudo pip3 install toml sudo pip3 install toml
- name: install-valgrind - name: install-valgrind
run: | run: |
sudo apt-get update sudo apt-get update -qq
sudo apt-get install valgrind sudo apt-get install -qq valgrind
valgrind --version valgrind --version
# # normal tests, we don't need to test all geometries # normal tests, we don't need to test all geometries
# - name: test-valgrind - name: test-valgrind
# run: make test SCRIPTFLAGS+="-k --valgrind" run: make test TESTFLAGS+="-k --valgrind"
# self-host with littlefs-fuse for a fuzz-like test
fuse:
runs-on: ubuntu-latest
if: ${{!endsWith(github.ref, '-prefix')}}
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip libfuse-dev
sudo pip3 install toml
fusermount -V
gcc --version
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v2
path: littlefs-fuse
- name: setup
run: |
# copy our new version into littlefs-fuse
rm -rf littlefs-fuse/littlefs/*
cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
# setup disk for littlefs-fuse
mkdir mount
sudo chmod a+rw /dev/loop0
dd if=/dev/zero bs=512 count=128K of=disk
losetup /dev/loop0 disk
- name: test
run: |
# self-host test
make -C littlefs-fuse
littlefs-fuse/lfs --format /dev/loop0
littlefs-fuse/lfs /dev/loop0 mount
ls mount
mkdir mount/littlefs
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
cd mount/littlefs
stat .
ls -flh
make -B test
# test migration using littlefs-fuse
migrate:
runs-on: ubuntu-latest
if: ${{!endsWith(github.ref, '-prefix')}}
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip libfuse-dev
sudo pip3 install toml
fusermount -V
gcc --version
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v2
path: v2
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v1
path: v1
- name: setup
run: |
# copy our new version into littlefs-fuse
rm -rf v2/littlefs/*
cp -r $(git ls-tree --name-only HEAD) v2/littlefs
# setup disk for littlefs-fuse
mkdir mount
sudo chmod a+rw /dev/loop0
dd if=/dev/zero bs=512 count=128K of=disk
losetup /dev/loop0 disk
- name: test
run: |
# compile v1 and v2
make -C v1
make -C v2
# run self-host test with v1
v1/lfs --format /dev/loop0
v1/lfs /dev/loop0 mount
ls mount
mkdir mount/littlefs
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
cd mount/littlefs
stat .
ls -flh
make -B test
# attempt to migrate
cd ../..
fusermount -u mount
v2/lfs --migrate /dev/loop0
v2/lfs /dev/loop0 mount
# run self-host test with v2 right where we left off
ls mount
cd mount/littlefs
stat .
ls -flh
make -B test
# collect coverage info
coverage:
runs-on: ubuntu-latest
needs: [test]
continue-on-error: true
steps:
- uses: actions/checkout@v2
- name: install
run: |
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip lcov
sudo pip3 install toml
- uses: actions/download-artifact@v2
with:
name: coverage
path: coverage
- name: results-coverage
run: |
mkdir -p results
lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
-o results/coverage.info
./scripts/coverage.py results/coverage.info -o results/coverage.csv
- name: upload-results
continue-on-error: true
uses: actions/upload-artifact@v2
with:
name: results
path: results
- name: collect-status
run: |
mkdir -p status
export STEP="results-coverage"
export CONTEXT="results / coverage"
export PREV="$(curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
| jq -re "select(.sha != env.GITHUB_SHA) | .statuses[]
| select(.context == env.CONTEXT).description
| capture(\"Coverage is (?<result>[0-9\\\\.]+)\").result" \
|| echo 0)"
export DESCRIPTION="$(
./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3}
NR==2 && ENVIRON["PREV"] != 0 {
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
jq -n '{
state: "success",
context: env.CONTEXT,
description: env.DESCRIPTION,
target_job: "${{github.job}}",
target_step: env.STEP}' \
| tee status/coverage.json
- name: upload-status
uses: actions/upload-artifact@v2
with:
name: status
path: status
retention-days: 1

View File

@@ -1,28 +1,43 @@
TARGET = lfs.a ifdef BUILDDIR
# make sure BUILDDIR ends with a slash
override BUILDDIR := $(BUILDDIR)/
# bit of a hack, but we want to make sure BUILDDIR directory structure
# is correct before any commands
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
$(BUILDDIR) \
$(BUILDDIR)bd \
$(BUILDDIR)tests))
endif
# overridable target/src/tools/flags/etc
ifneq ($(wildcard test.c main.c),) ifneq ($(wildcard test.c main.c),)
override TARGET = lfs TARGET ?= $(BUILDDIR)lfs
else
TARGET ?= $(BUILDDIR)lfs.a
endif endif
CC ?= gcc CC ?= gcc
AR ?= ar AR ?= ar
SIZE ?= size SIZE ?= size
CTAGS ?= ctags
NM ?= nm NM ?= nm
GCOV ?= gcov
LCOV ?= lcov LCOV ?= lcov
SRC += $(wildcard *.c bd/*.c) SRC ?= $(wildcard *.c bd/*.c)
OBJ := $(SRC:.c=.o) OBJ := $(SRC:%.c=%.o)
DEP := $(SRC:.c=.d) DEP := $(SRC:%.c=%.d)
ASM := $(SRC:.c=.s) ASM := $(SRC:%.c=%.s)
ifdef BUILDDIR
override OBJ := $(addprefix $(BUILDDIR),$(OBJ))
override DEP := $(addprefix $(BUILDDIR),$(DEP))
override ASM := $(addprefix $(BUILDDIR),$(ASM))
endif
ifdef DEBUG ifdef DEBUG
override CFLAGS += -O0 -g3 override CFLAGS += -O0 -g3
else else
override CFLAGS += -Os override CFLAGS += -Os
endif endif
ifdef WORD
override CFLAGS += -m$(WORD)
endif
ifdef TRACE ifdef TRACE
override CFLAGS += -DLFS_YES_TRACE override CFLAGS += -DLFS_YES_TRACE
endif endif
@@ -31,13 +46,23 @@ override CFLAGS += -std=c99 -Wall -pedantic
override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
ifdef VERBOSE ifdef VERBOSE
override SCRIPTFLAGS += -v override TESTFLAGS += -v
override CODEFLAGS += -v
override COVERAGEFLAGS += -v
endif endif
ifdef EXEC ifdef EXEC
override TESTFLAGS += $(patsubst %,--exec=%,$(EXEC)) override TESTFLAGS += --exec="$(EXEC)"
endif
ifdef BUILDDIR
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
endif
ifneq ($(NM),nm)
override CODEFLAGS += --nm-tool="$(NM)"
endif endif
# commands
.PHONY: all build .PHONY: all build
all build: $(TARGET) all build: $(TARGET)
@@ -48,44 +73,46 @@ asm: $(ASM)
size: $(OBJ) size: $(OBJ)
$(SIZE) -t $^ $(SIZE) -t $^
.PHONY: tags
tags:
$(CTAGS) --totals --c-types=+p $(shell find -name '*.h') $(SRC)
.PHONY: code .PHONY: code
code: code: $(OBJ)
./scripts/code.py $(SCRIPTFLAGS) ./scripts/code.py $^ $(CODEFLAGS)
.PHONY: coverage .PHONY: coverage
coverage: coverage:
./scripts/coverage.py $(SCRIPTFLAGS) ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS)
.PHONY: test .PHONY: test
test: test:
./scripts/test.py $(TESTFLAGS) $(SCRIPTFLAGS) ./scripts/test.py $(TESTFLAGS)
.SECONDEXPANSION: .SECONDEXPANSION:
test%: tests/test$$(firstword $$(subst \#, ,%)).toml test%: tests/test$$(firstword $$(subst \#, ,%)).toml
./scripts/test.py $@ $(TESTFLAGS) $(SCRIPTFLAGS) ./scripts/test.py $@ $(TESTFLAGS)
# rules
-include $(DEP) -include $(DEP)
.SUFFIXES:
lfs: $(OBJ) $(BUILDDIR)lfs: $(OBJ)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
%.a: $(OBJ) $(BUILDDIR)%.a: $(OBJ)
$(AR) rcs $@ $^ $(AR) rcs $@ $^
%.o: %.c $(BUILDDIR)%.o: %.c
$(CC) -c -MMD $(CFLAGS) $< -o $@ $(CC) -c -MMD $(CFLAGS) $< -o $@
%.s: %.c $(BUILDDIR)%.s: %.c
$(CC) -S $(CFLAGS) $< -o $@ $(CC) -S $(CFLAGS) $< -o $@
%.gcda.gcov: %.gcda # clean everything
( cd $(dir $@) ; $(GCOV) -ri $(notdir $<) )
.PHONY: clean .PHONY: clean
clean: clean:
rm -f $(TARGET) rm -f $(TARGET)
rm -f $(OBJ) rm -f $(OBJ)
rm -f $(DEP) rm -f $(DEP)
rm -f $(ASM) rm -f $(ASM)
rm -f tests/*.toml.* rm -f $(BUILDDIR)tests/*.toml.*
rm -f sizes/*
rm -f results/*

View File

@@ -1,24 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# #
# This script finds the code size at the function level, with/without # Script to find code size at the function level. Basically just a bit wrapper
# static functions, and has some conveniences for comparing different # around nm with some extra conveniences for comparing builds. Heavily inspired
# versions. It's basically one big wrapper around nm, and may or may # by Linux's Bloat-O-Meter.
# not have been written out of jealousy of Linux's Bloat-O-Meter.
#
# Here's a useful bash script to use while developing:
# ./scripts/code_size.py -qo old.csv
# while true ; do ./code_scripts/size.py -d old.csv ; inotifywait -rqe modify * ; done
#
# Or even better, to automatically update results on commit:
# ./scripts/code_size.py -qo commit.csv
# while true ; do ./scripts/code_size.py -d commit.csv -o current.csv ; git diff --exit-code --quiet && cp current.csv commit.csv ; inotifywait -rqe modify * ; done
#
# Or my personal favorite:
# ./scripts/code_size.py -qo master.csv && cp master.csv commit.csv
# while true ; do ( ./scripts/code_size.py -i commit.csv -d master.csv -s ; ./scripts/code_size.py -i current.csv -d master.csv -s ; ./scripts/code_size.py -d master.csv -o current.csv -s ) | awk 'BEGIN {printf "%-16s %7s %7s %7s\n","","old","new","diff"} (NR==2 && $1="commit") || (NR==4 && $1="prev") || (NR==6 && $1="current") {printf "%-16s %7s %7s %7s %s\n",$1,$2,$3,$5,$6}' ; git diff --exit-code --quiet && cp current.csv commit.csv ; inotifywait -rqe modify * ; done
# #
import os import os
import glob
import itertools as it import itertools as it
import subprocess as sp import subprocess as sp
import shlex import shlex
@@ -26,267 +14,159 @@ import re
import csv import csv
import collections as co import collections as co
SIZEDIR = 'sizes'
RULES = """
define FLATTEN
%(sizedir)s/%(build)s.$(subst /,.,$(target)): $(target)
( echo "#line 1 \\"$$<\\"" ; %(cat)s $$< ) > $$@
%(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.size)): \\
%(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.o))
$(NM) --size-sort $$^ | sed 's/^/$(subst /,\\/,$(target:.c=.o)):/' > $$@
endef
$(foreach target,$(SRC),$(eval $(FLATTEN)))
-include %(sizedir)s/*.d OBJ_PATHS = ['*.o', 'bd/*.o']
.SECONDARY:
%%.size: $(foreach t,$(subst /,.,$(OBJ:.o=.size)),%%.$t) def collect(paths, **args):
cat $^ > $@ results = co.defaultdict(lambda: 0)
"""
CATS = {
'code': 'cat',
'code_inlined': 'sed \'s/^static\( inline\)\?//\'',
}
def build(**args):
# mkdir -p sizedir
os.makedirs(args['sizedir'], exist_ok=True)
if args.get('inlined', False):
builds = ['code', 'code_inlined']
else:
builds = ['code']
# write makefiles for the different types of builds
makefiles = []
targets = []
for build in builds:
path = args['sizedir'] + '/' + build
with open(path + '.mk', 'w') as mk:
mk.write(RULES.replace(4*' ', '\t') % dict(
sizedir=args['sizedir'],
build=build,
cat=CATS[build]))
mk.write('\n')
# pass on defines
for d in args['D']:
mk.write('%s: override CFLAGS += -D%s\n' % (
path+'.size', d))
makefiles.append(path + '.mk')
targets.append(path + '.size')
# build in parallel
cmd = (['make', '-f', 'Makefile'] +
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
[target for target in targets])
if args.get('verbose', False):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.DEVNULL if not args.get('verbose', False) else None)
proc.wait()
if proc.returncode != 0:
sys.exit(-1)
# find results
build_results = co.defaultdict(lambda: 0)
# notes
# - filters type
# - discards internal/debug functions (leading __)
pattern = re.compile( pattern = re.compile(
'^(?P<file>[^:]+)' + '^(?P<size>[0-9a-fA-F]+)' +
':(?P<size>[0-9a-fA-F]+)' +
' (?P<type>[%s])' % re.escape(args['type']) + ' (?P<type>[%s])' % re.escape(args['type']) +
' (?!__)(?P<name>.+?)$') ' (?P<func>.+?)$')
for build in builds: for path in paths:
path = args['sizedir'] + '/' + build # note nm-tool may contain extra args
with open(path + '.size') as size: cmd = args['nm_tool'] + ['--size-sort', path]
for line in size: if args.get('verbose'):
match = pattern.match(line) print(' '.join(shlex.quote(c) for c in cmd))
if match: proc = sp.Popen(cmd, stdout=sp.PIPE, universal_newlines=True)
file = match.group('file') for line in proc.stdout:
m = pattern.match(line)
if m:
results[(path, m.group('func'))] += int(m.group('size'), 16)
flat_results = []
for (file, func), size in results.items():
# map to source files
if args.get('build_dir'):
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
# discard internal functions
if func.startswith('__'):
continue
# discard .8449 suffixes created by optimizer # discard .8449 suffixes created by optimizer
name = re.sub('\.[0-9]+', '', match.group('name')) func = re.sub('\.[0-9]+', '', func)
size = int(match.group('size'), 16) flat_results.append((file, func, size))
build_results[(build, file, name)] += size
results = [] return flat_results
for (build, file, name), size in build_results.items():
if build == 'code':
results.append((file, name, size, False))
elif (build == 'code_inlined' and
('inlined', file, name) not in results):
results.append((file, name, size, True))
return results
def main(**args): def main(**args):
# find results # find sizes
if not args.get('input', None): if not args.get('use', None):
results = build(**args) # find .o files
paths = []
for path in args['obj_paths']:
if os.path.isdir(path):
path = path + '/*.o'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no .obj files found in %r?' % args['obj_paths'])
sys.exit(-1)
results = collect(paths, **args)
else: else:
with open(args['input']) as f: with open(args['use']) as f:
r = csv.DictReader(f) r = csv.DictReader(f)
results = [ results = [
( result['file'], ( result['file'],
result['name'], result['function'],
int(result['size']), int(result['size']))
bool(int(result.get('inlined', 0)))) for result in r]
for result in r
if (not bool(int(result.get('inlined', 0))) or
args.get('inlined', False))]
total = 0 total = 0
for _, _, size, inlined in results: for _, _, size in results:
if not inlined:
total += size total += size
# find previous results? # find previous results?
if args.get('diff', None): if args.get('diff'):
with open(args['diff']) as f: with open(args['diff']) as f:
r = csv.DictReader(f) r = csv.DictReader(f)
prev_results = [ prev_results = [
( result['file'], ( result['file'],
result['name'], result['function'],
int(result['size']), int(result['size']))
bool(int(result.get('inlined', 0)))) for result in r]
for result in r
if (not bool(int(result.get('inlined', 0))) or
args.get('inlined', False))]
prev_total = 0 prev_total = 0
for _, _, size, inlined in prev_results: for _, _, size in prev_results:
if not inlined:
prev_total += size prev_total += size
# write results to CSV # write results to CSV
if args.get('output', None): if args.get('output'):
results.sort(key=lambda x: (-x[2], x))
with open(args['output'], 'w') as f: with open(args['output'], 'w') as f:
w = csv.writer(f) w = csv.writer(f)
if args.get('inlined', False): w.writerow(['file', 'function', 'size'])
w.writerow(['file', 'name', 'size', 'inlined']) for file, func, size in sorted(results):
for file, name, size, inlined in results: w.writerow((file, func, size))
w.writerow((file, name, size, int(inlined)))
else:
w.writerow(['file', 'name', 'size'])
for file, name, size, inlined in results:
w.writerow((file, name, size))
# print results # print results
def dedup_functions(results): def dedup_entries(results, by='function'):
functions = co.defaultdict(lambda: (0, True)) entries = co.defaultdict(lambda: 0)
for _, name, size, inlined in results: for file, func, size in results:
if not inlined: entry = (file if by == 'file' else func)
functions[name] = (functions[name][0] + size, False) entries[entry] += size
for _, name, size, inlined in results: return entries
if inlined and functions[name][1]:
functions[name] = (functions[name][0] + size, True)
return functions
def dedup_files(results): def diff_entries(olds, news):
files = co.defaultdict(lambda: 0) diff = co.defaultdict(lambda: (0, 0, 0, 0))
for file, _, size, inlined in results:
if not inlined:
files[file] += size
return files
def diff_sizes(olds, news):
diff = co.defaultdict(lambda: (None, None, None))
for name, new in news.items(): for name, new in news.items():
diff[name] = (None, new, new) diff[name] = (0, new, new, 1.0)
for name, old in olds.items(): for name, old in olds.items():
new = diff[name][1] or 0 _, new, _, _ = diff[name]
diff[name] = (old, new, new-old) diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
return diff return diff
def print_header(name=''): def print_header(by=''):
if not args.get('diff', False): if not args.get('diff'):
print('%-40s %7s' % (name, 'size')) print('%-36s %7s' % (by, 'size'))
else: else:
print('%-40s %7s %7s %7s' % (name, 'old', 'new', 'diff')) print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
def print_functions(): def print_entries(by='function'):
functions = dedup_functions(results) entries = dedup_entries(results, by=by)
functions = {
name+' (inlined)' if inlined else name: size
for name, (size, inlined) in functions.items()}
if not args.get('diff', None): if not args.get('diff'):
print_header('function') print_header(by=by)
for name, size in sorted(functions.items(), for name, size in sorted(entries.items()):
key=lambda x: (-x[1], x)): print("%-36s %7d" % (name, size))
print("%-40s %7d" % (name, size))
else: else:
prev_functions = dedup_functions(prev_results) prev_entries = dedup_entries(prev_results, by=by)
prev_functions = { diff = diff_entries(prev_entries, entries)
name+' (inlined)' if inlined else name: size print_header(by='%s (%d added, %d removed)' % (by,
for name, (size, inlined) in prev_functions.items()} sum(1 for old, _, _, _ in diff.values() if not old),
diff = diff_sizes(functions, prev_functions) sum(1 for _, new, _, _ in diff.values() if not new)))
print_header('function (%d added, %d removed)' % ( for name, (old, new, diff, ratio) in sorted(diff.items(),
sum(1 for old, _, _ in diff.values() if not old), key=lambda x: (-x[1][3], x)):
sum(1 for _, new, _ in diff.values() if not new))) if ratio or args.get('all'):
for name, (old, new, diff) in sorted(diff.items(), print("%-36s %7s %7s %+7d%s" % (name,
key=lambda x: (-(x[1][2] or 0), x)): old or "-",
if diff or args.get('all', False): new or "-",
print("%-40s %7s %7s %+7d%s" % ( diff,
name, old or "-", new or "-", diff, ' (%+.1f%%)' % (100*ratio) if ratio else ''))
' (%+.2f%%)' % (100*((new-old)/old))
if old and new else
''))
def print_files():
files = dedup_files(results)
if not args.get('diff', None):
print_header('file')
for file, size in sorted(files.items(),
key=lambda x: (-x[1], x)):
print("%-40s %7d" % (file, size))
else:
prev_files = dedup_files(prev_results)
diff = diff_sizes(files, prev_files)
print_header('file (%d added, %d removed)' % (
sum(1 for old, _, _ in diff.values() if not old),
sum(1 for _, new, _ in diff.values() if not new)))
for name, (old, new, diff) in sorted(diff.items(),
key=lambda x: (-(x[1][2] or 0), x)):
if diff or args.get('all', False):
print("%-40s %7s %7s %+7d%s" % (
name, old or "-", new or "-", diff,
' (%+.2f%%)' % (100*((new-old)/old))
if old and new else
''))
def print_totals(): def print_totals():
if not args.get('diff', None): if not args.get('diff'):
print("%-40s %7d" % ('TOTALS', total)) print("%-36s %7d" % ('TOTAL', total))
else: else:
print("%-40s %7s %7s %+7d%s" % ( ratio = (total-prev_total)/prev_total if prev_total else 1.0
'TOTALS', prev_total, total, total-prev_total, print("%-36s %7s %7s %+7d%s" % (
' (%+.2f%%)' % (100*((total-prev_total)/total)) 'TOTAL',
if prev_total and total else prev_total if prev_total else '-',
'')) total if total else '-',
total-prev_total,
' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_status(): if args.get('quiet'):
if not args.get('diff', None):
print(total)
else:
print("%d (%+.2f%%)" % (total, 100*((total-prev_total)/total)))
if args.get('quiet', False):
pass pass
elif args.get('status', False): elif args.get('summary'):
print_status()
elif args.get('summary', False):
print_header() print_header()
print_totals() print_totals()
elif args.get('files', False): elif args.get('files'):
print_files() print_entries(by='file')
print_totals() print_totals()
else: else:
print_functions() print_entries(by='function')
print_totals() print_totals()
if __name__ == "__main__": if __name__ == "__main__":
@@ -294,35 +174,32 @@ if __name__ == "__main__":
import sys import sys
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Find code size at the function level.") description="Find code size at the function level.")
parser.add_argument('sizedir', nargs='?', default=SIZEDIR, parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
help="Directory to store intermediary results. Defaults " help="Description of where to find *.o files. May be a directory \
"to \"%s\"." % SIZEDIR) or a list of paths. Defaults to %r." % OBJ_PATHS)
parser.add_argument('-D', action='append', default=[],
help="Specify compile-time define.")
parser.add_argument('-v', '--verbose', action='store_true', parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.") help="Output commands that run behind the scenes.")
parser.add_argument('-i', '--input',
help="Don't compile and find code sizes, instead use this CSV file.")
parser.add_argument('-o', '--output', parser.add_argument('-o', '--output',
help="Specify CSV file to store results.") help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't compile and find code sizes, instead use this CSV file.")
parser.add_argument('-d', '--diff', parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.") help="Specify CSV file to diff code size against.")
parser.add_argument('-a', '--all', action='store_true', parser.add_argument('-a', '--all', action='store_true',
help="Show all functions, not just the ones that changed.") help="Show all functions, not just the ones that changed.")
parser.add_argument('--inlined', action='store_true',
help="Run a second compilation to find the sizes of functions normally "
"removed by optimizations. These will be shown as \"*.inlined\" "
"functions, and will not be included in the total.")
parser.add_argument('--files', action='store_true', parser.add_argument('--files', action='store_true',
help="Show file-level code sizes. Note this does not include padding! " help="Show file-level code sizes. Note this does not include padding! "
"So sizes may differ from other tools.") "So sizes may differ from other tools.")
parser.add_argument('-s', '--summary', action='store_true', parser.add_argument('-s', '--summary', action='store_true',
help="Only show the total code size.") help="Only show the total code size.")
parser.add_argument('-S', '--status', action='store_true',
help="Show minimum info useful for a single-line status.")
parser.add_argument('-q', '--quiet', action='store_true', parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.") help="Don't show anything, useful with -o.")
parser.add_argument('--type', default='tTrRdDbB', parser.add_argument('--type', default='tTrRdDbB',
help="Type of symbols to report, this uses the same single-character " help="Type of symbols to report, this uses the same single-character "
"type-names emitted by nm. Defaults to %(default)r.") "type-names emitted by nm. Defaults to %(default)r.")
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
help="Path to the nm tool to use.")
parser.add_argument('--build-dir',
help="Specify the relative build directory. Used to map object files \
to the correct source files.")
sys.exit(main(**vars(parser.parse_args()))) sys.exit(main(**vars(parser.parse_args())))

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# #
# Parse and report coverage info from .info files generated by lcov
#
import os import os
import glob import glob
import csv import csv
@@ -8,8 +9,8 @@ import re
import collections as co import collections as co
import bisect as b import bisect as b
INFO_PATHS = 'tests/*.toml.info'
INFO_PATHS = ['tests/*.toml.info']
def collect(paths, **args): def collect(paths, **args):
file = None file = None
@@ -65,14 +66,14 @@ def collect(paths, **args):
def main(**args): def main(**args):
# find coverage # find coverage
if not args.get('input', None): if not args.get('use'):
# find *.info files # find *.info files
paths = [] paths = []
for path in args['info_paths']: for path in args['info_paths']:
if os.path.isdir(path): if os.path.isdir(path):
path = path + '/*.gcov' path = path + '/*.gcov'
for path in glob.glob(path, recursive=True): for path in glob.glob(path):
paths.append(path) paths.append(path)
if not paths: if not paths:
@@ -81,7 +82,7 @@ def main(**args):
results = collect(paths, **args) results = collect(paths, **args)
else: else:
with open(args['input']) as f: with open(args['use']) as f:
r = csv.DictReader(f) r = csv.DictReader(f)
results = [ results = [
( result['file'], ( result['file'],
@@ -96,7 +97,7 @@ def main(**args):
total_count += count total_count += count
# find previous results? # find previous results?
if args.get('diff', None): if args.get('diff'):
with open(args['diff']) as f: with open(args['diff']) as f:
r = csv.DictReader(f) r = csv.DictReader(f)
prev_results = [ prev_results = [
@@ -112,12 +113,11 @@ def main(**args):
prev_total_count += count prev_total_count += count
# write results to CSV # write results to CSV
if args.get('output', None): if args.get('output'):
results.sort(key=lambda x: (-(x[3]-x[2]), -x[3], x))
with open(args['output'], 'w') as f: with open(args['output'], 'w') as f:
w = csv.writer(f) w = csv.writer(f)
w.writerow(['file', 'function', 'hits', 'count']) w.writerow(['file', 'function', 'hits', 'count'])
for file, func, hits, count in results: for file, func, hits, count in sorted(results):
w.writerow((file, func, hits, count)) w.writerow((file, func, hits, count))
# print results # print results
@@ -130,97 +130,95 @@ def main(**args):
return entries return entries
def diff_entries(olds, news): def diff_entries(olds, news):
diff = co.defaultdict(lambda: (None, None, None, None, None, None)) diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
for name, (new_hits, new_count) in news.items(): for name, (new_hits, new_count) in news.items():
diff[name] = ( diff[name] = (
0, 0, 0, 0,
new_hits, new_count, new_hits, new_count,
new_hits, new_count) new_hits, new_count,
(new_hits/new_count if new_count else 1.0) - 1.0)
for name, (old_hits, old_count) in olds.items(): for name, (old_hits, old_count) in olds.items():
new_hits = diff[name][2] or 0 _, _, new_hits, new_count, _, _, _ = diff[name]
new_count = diff[name][3] or 0
diff[name] = ( diff[name] = (
old_hits, old_count, old_hits, old_count,
new_hits, new_count, new_hits, new_count,
new_hits-old_hits, new_count-old_count) new_hits-old_hits, new_count-old_count,
((new_hits/new_count if new_count else 1.0)
- (old_hits/old_count if old_count else 1.0)))
return diff return diff
def print_header(by=''): def print_header(by=''):
if not args.get('diff', False): if not args.get('diff'):
print('%-36s %11s' % (by, 'hits/count')) print('%-36s %19s' % (by, 'hits/line'))
else: else:
print('%-36s %11s %11s %11s' % (by, 'old', 'new', 'diff')) print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
def print_entries(by='function'): def print_entries(by='function'):
entries = dedup_entries(results, by=by) entries = dedup_entries(results, by=by)
if not args.get('diff', None): if not args.get('diff'):
print_header(by=by) print_header(by=by)
for name, (hits, count) in sorted(entries.items(), for name, (hits, count) in sorted(entries.items()):
key=lambda x: (-(x[1][1]-x[1][0]), -x[1][1], x)): print("%-36s %11s %7s" % (name,
print("%-36s %11s (%.2f%%)" % (name, '%d/%d' % (hits, count)
'%d/%d' % (hits, count), if count else '-',
100*(hits/count if count else 1.0))) '%.1f%%' % (100*hits/count)
if count else '-'))
else: else:
prev_entries = dedup_entries(prev_results, by=by) prev_entries = dedup_entries(prev_results, by=by)
diff = diff_entries(prev_entries, entries) diff = diff_entries(prev_entries, entries)
print_header(by='%s (%d added, %d removed)' % (by, print_header(by='%s (%d added, %d removed)' % (by,
sum(1 for _, old, _, _, _, _ in diff.values() if not old), sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
sum(1 for _, _, _, new, _, _ in diff.values() if not new))) sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
for name, ( for name, (
old_hits, old_count, old_hits, old_count,
new_hits, new_count, new_hits, new_count,
diff_hits, diff_count) in sorted(diff.items(), diff_hits, diff_count, ratio) in sorted(diff.items(),
key=lambda x: ( key=lambda x: (-x[1][6], x)):
-(x[1][5]-x[1][4]), -x[1][5], -x[1][3], x)): if ratio or args.get('all'):
ratio = ((new_hits/new_count if new_count else 1.0) print("%-36s %11s %7s %11s %7s %11s%s" % (name,
- (old_hits/old_count if old_count else 1.0))
if diff_hits or diff_count or args.get('all', False):
print("%-36s %11s %11s %11s%s" % (name,
'%d/%d' % (old_hits, old_count) '%d/%d' % (old_hits, old_count)
if old_count else '-', if old_count else '-',
'%.1f%%' % (100*old_hits/old_count)
if old_count else '-',
'%d/%d' % (new_hits, new_count) '%d/%d' % (new_hits, new_count)
if new_count else '-', if new_count else '-',
'%.1f%%' % (100*new_hits/new_count)
if new_count else '-',
'%+d/%+d' % (diff_hits, diff_count), '%+d/%+d' % (diff_hits, diff_count),
' (%+.2f%%)' % (100*ratio) if ratio else '')) ' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_totals(): def print_totals():
if not args.get('diff', None): if not args.get('diff'):
print("%-36s %11s (%.2f%%)" % ('TOTALS', print("%-36s %11s %7s" % ('TOTAL',
'%d/%d' % (total_hits, total_count), '%d/%d' % (total_hits, total_count)
100*(total_hits/total_count if total_count else 1.0))) if total_count else '-',
'%.1f%%' % (100*total_hits/total_count)
if total_count else '-'))
else: else:
ratio = ((total_hits/total_count ratio = ((total_hits/total_count
if total_count else 1.0) if total_count else 1.0)
- (prev_total_hits/prev_total_count - (prev_total_hits/prev_total_count
if prev_total_count else 1.0)) if prev_total_count else 1.0))
print("%-36s %11s %11s %11s%s" % ('TOTALS', print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL',
'%d/%d' % (prev_total_hits, prev_total_count), '%d/%d' % (prev_total_hits, prev_total_count)
'%d/%d' % (total_hits, total_count), if prev_total_count else '-',
'%.1f%%' % (100*prev_total_hits/prev_total_count)
if prev_total_count else '-',
'%d/%d' % (total_hits, total_count)
if total_count else '-',
'%.1f%%' % (100*total_hits/total_count)
if total_count else '-',
'%+d/%+d' % (total_hits-prev_total_hits, '%+d/%+d' % (total_hits-prev_total_hits,
total_count-prev_total_count), total_count-prev_total_count),
' (%+.2f%%)' % (100*ratio) if ratio else '')) ' (%+.1f%%)' % (100*ratio) if ratio else ''))
def print_status(): if args.get('quiet'):
if not args.get('diff', None):
print("%d/%d (%.2f%%)" % (total_hits, total_count,
100*(total_hits/total_count if total_count else 1.0)))
else:
ratio = ((total_hits/total_count
if total_count else 1.0)
- (prev_total_hits/prev_total_count
if prev_total_count else 1.0))
print("%d/%d (%+.2f%%)" % (total_hits, total_count,
(100*ratio) if ratio else ''))
if args.get('quiet', False):
pass pass
elif args.get('status', False): elif args.get('summary'):
print_status()
elif args.get('summary', False):
print_header() print_header()
print_totals() print_totals()
elif args.get('files', False): elif args.get('files'):
print_entries(by='file') print_entries(by='file')
print_totals() print_totals()
else: else:
@@ -231,17 +229,18 @@ if __name__ == "__main__":
import argparse import argparse
import sys import sys
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Show/manipulate coverage info") description="Parse and report coverage info from .info files \
parser.add_argument('info_paths', nargs='*', default=[INFO_PATHS], generated by lcov")
parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
help="Description of where to find *.info files. May be a directory \ help="Description of where to find *.info files. May be a directory \
or list of paths. *.info files will be merged to show the total \ or list of paths. *.info files will be merged to show the total \
coverage. Defaults to \"%s\"." % INFO_PATHS) coverage. Defaults to %r." % INFO_PATHS)
parser.add_argument('-v', '--verbose', action='store_true', parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.") help="Output commands that run behind the scenes.")
parser.add_argument('-i', '--input',
help="Don't do any work, instead use this CSV file.")
parser.add_argument('-o', '--output', parser.add_argument('-o', '--output',
help="Specify CSV file to store results.") help="Specify CSV file to store results.")
parser.add_argument('-u', '--use',
help="Don't do any work, instead use this CSV file.")
parser.add_argument('-d', '--diff', parser.add_argument('-d', '--diff',
help="Specify CSV file to diff code size against.") help="Specify CSV file to diff code size against.")
parser.add_argument('-a', '--all', action='store_true', parser.add_argument('-a', '--all', action='store_true',
@@ -250,8 +249,6 @@ if __name__ == "__main__":
help="Show file-level coverage.") help="Show file-level coverage.")
parser.add_argument('-s', '--summary', action='store_true', parser.add_argument('-s', '--summary', action='store_true',
help="Only show the total coverage.") help="Only show the total coverage.")
parser.add_argument('-S', '--status', action='store_true',
help="Show minimum info useful for a single-line status.")
parser.add_argument('-q', '--quiet', action='store_true', parser.add_argument('-q', '--quiet', action='store_true',
help="Don't show anything, useful with -o.") help="Don't show anything, useful with -o.")
sys.exit(main(**vars(parser.parse_args()))) sys.exit(main(**vars(parser.parse_args())))

View File

@@ -20,7 +20,7 @@ import pty
import errno import errno
import signal import signal
TESTDIR = 'tests' TEST_PATHS = 'tests'
RULES = """ RULES = """
define FLATTEN define FLATTEN
%(path)s%%$(subst /,.,$(target)): $(target) %(path)s%%$(subst /,.,$(target)): $(target)
@@ -31,14 +31,15 @@ $(foreach target,$(SRC),$(eval $(FLATTEN)))
-include %(path)s*.d -include %(path)s*.d
.SECONDARY: .SECONDARY:
%(path)s.test: %(path)s.test.o $(foreach t,$(subst /,.,$(OBJ)),%(path)s.$t) %(path)s.test: %(path)s.test.o \\
$(foreach t,$(subst /,.,$(SRC:.c=.o)),%(path)s.$t)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
""" """
COVERAGE_RULES = """ COVERAGE_RULES = """
%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage %(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
# delete lingering coverage # delete lingering coverage
%(path)s.test: | %(path)s.info.clean %(path)s.test: | %(path)s.clean
.PHONY: %(path)s.clean .PHONY: %(path)s.clean
%(path)s.clean: %(path)s.clean:
rm -f %(path)s*.gcda rm -f %(path)s*.gcda
@@ -373,12 +374,17 @@ class TestSuite:
self.name = os.path.basename(path) self.name = os.path.basename(path)
if self.name.endswith('.toml'): if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')] self.name = self.name[:-len('.toml')]
if args.get('build_dir'):
self.toml = path
self.path = args['build_dir'] + '/' + path
else:
self.toml = path
self.path = path self.path = path
self.classes = classes self.classes = classes
self.defines = defines.copy() self.defines = defines.copy()
self.filter = filter self.filter = filter
with open(path) as f: with open(self.toml) as f:
# load tests # load tests
config = toml.load(f) config = toml.load(f)
@@ -489,7 +495,7 @@ class TestSuite:
def build(self, **args): def build(self, **args):
# build test files # build test files
tf = open(self.path + '.test.c.t', 'w') tf = open(self.path + '.test.tc', 'w')
tf.write(GLOBALS) tf.write(GLOBALS)
if self.code is not None: if self.code is not None:
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path)) tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
@@ -499,7 +505,7 @@ class TestSuite:
for case in self.cases: for case in self.cases:
if case.in_ not in tfs: if case.in_ not in tfs:
tfs[case.in_] = open(self.path+'.'+ tfs[case.in_] = open(self.path+'.'+
case.in_.replace('/', '.')+'.t', 'w') re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
tfs[case.in_].write('#line 1 "%s"\n' % case.in_) tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
with open(case.in_) as f: with open(case.in_) as f:
for line in f: for line in f:
@@ -556,13 +562,15 @@ class TestSuite:
if path is None: if path is None:
mk.write('%s: %s | %s\n' % ( mk.write('%s: %s | %s\n' % (
self.path+'.test.c', self.path+'.test.c',
self.path, self.toml,
self.path+'.test.c.t')) self.path+'.test.tc'))
else: else:
mk.write('%s: %s %s | %s\n' % ( mk.write('%s: %s %s | %s\n' % (
self.path+'.'+path.replace('/', '.'), self.path+'.'+path.replace('/', '.'),
self.path, path, self.toml,
self.path+'.'+path.replace('/', '.')+'.t')) path,
self.path+'.'+re.sub('(\.c)?$', '.tc',
path.replace('/', '.'))))
mk.write('\t./scripts/explode_asserts.py $| -o $@\n') mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
self.makefile = self.path + '.mk' self.makefile = self.path + '.mk'
@@ -617,7 +625,7 @@ def main(**args):
classes = [TestCase] classes = [TestCase]
suites = [] suites = []
for testpath in args['testpaths']: for testpath in args['test_paths']:
# optionally specified test case/perm # optionally specified test case/perm
testpath, *filter = testpath.split('#') testpath, *filter = testpath.split('#')
filter = [int(f) for f in filter] filter = [int(f) for f in filter]
@@ -628,9 +636,9 @@ def main(**args):
elif os.path.isfile(testpath): elif os.path.isfile(testpath):
testpath = testpath testpath = testpath
elif testpath.endswith('.toml'): elif testpath.endswith('.toml'):
testpath = TESTDIR + '/' + testpath testpath = TEST_PATHS + '/' + testpath
else: else:
testpath = TESTDIR + '/' + testpath + '.toml' testpath = TEST_PATHS + '/' + testpath + '.toml'
# find tests # find tests
for path in glob.glob(testpath): for path in glob.glob(testpath):
@@ -695,7 +703,7 @@ def main(**args):
if not args.get('verbose', False): if not args.get('verbose', False):
for line in stdout: for line in stdout:
sys.stdout.write(line) sys.stdout.write(line)
sys.exit(-3) sys.exit(-1)
print('built %d test suites, %d test cases, %d permutations' % ( print('built %d test suites, %d test cases, %d permutations' % (
len(suites), len(suites),
@@ -707,7 +715,7 @@ def main(**args):
for perm in suite.perms: for perm in suite.perms:
total += perm.shouldtest(**args) total += perm.shouldtest(**args)
if total != sum(len(suite.perms) for suite in suites): if total != sum(len(suite.perms) for suite in suites):
print('total down to %d permutations' % total) print('filtered down to %d permutations' % total)
# only requested to build? # only requested to build?
if args.get('build', False): if args.get('build', False):
@@ -733,7 +741,7 @@ def main(**args):
else: else:
sys.stdout.write( sys.stdout.write(
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m " "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
"{perm} failed with {returncode}\n".format( "{perm} failed\n".format(
perm=perm, path=perm.suite.path, lineno=perm.lineno, perm=perm, path=perm.suite.path, lineno=perm.lineno,
returncode=perm.result.returncode or 0)) returncode=perm.result.returncode or 0))
if perm.result.stdout: if perm.result.stdout:
@@ -753,7 +761,8 @@ def main(**args):
if args.get('coverage', False): if args.get('coverage', False):
# collect coverage info # collect coverage info
cmd = (['make', '-f', 'Makefile'] + # why -j1? lcov doesn't work in parallel because of gcov issues
cmd = (['make', '-j1', '-f', 'Makefile'] +
list(it.chain.from_iterable(['-f', m] for m in makefiles)) + list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
[re.sub('\.test$', '.cumul.info', target) for target in targets]) [re.sub('\.test$', '.cumul.info', target) for target in targets])
if args.get('verbose', False): if args.get('verbose', False):
@@ -762,7 +771,7 @@ def main(**args):
stdout=sp.DEVNULL if not args.get('verbose', False) else None) stdout=sp.DEVNULL if not args.get('verbose', False) else None)
proc.wait() proc.wait()
if proc.returncode != 0: if proc.returncode != 0:
sys.exit(-3) sys.exit(-1)
if args.get('gdb', False): if args.get('gdb', False):
failure = None failure = None
@@ -786,12 +795,12 @@ if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Run parameterized tests in various configurations.") description="Run parameterized tests in various configurations.")
parser.add_argument('testpaths', nargs='*', default=[TESTDIR], parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
help="Description of test(s) to run. By default, this is all tests \ help="Description of test(s) to run. By default, this is all tests \
found in the \"{0}\" directory. Here, you can specify a different \ found in the \"{0}\" directory. Here, you can specify a different \
directory of tests, a specific file, a suite by name, and even a \ directory of tests, a specific file, a suite by name, and even a \
specific test case by adding brackets. For example \ specific test case by adding brackets. For example \
\"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TEST_PATHS))
parser.add_argument('-D', action='append', default=[], parser.add_argument('-D', action='append', default=[],
help="Overriding parameter definitions.") help="Overriding parameter definitions.")
parser.add_argument('-v', '--verbose', action='store_true', parser.add_argument('-v', '--verbose', action='store_true',
@@ -823,4 +832,8 @@ if __name__ == "__main__":
to accumulate coverage information into *.info files. Note \ to accumulate coverage information into *.info files. Note \
coverage is not reset between runs, allowing multiple runs to \ coverage is not reset between runs, allowing multiple runs to \
contribute to coverage.") contribute to coverage.")
parser.add_argument('--build-dir',
help="Build relative to the specified directory instead of the \
current directory.")
sys.exit(main(**vars(parser.parse_args()))) sys.exit(main(**vars(parser.parse_args())))

View File

@@ -485,7 +485,8 @@ code = '''
[[case]] # split dir test [[case]] # split dir test
define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024 define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' if = 'False'
#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
code = ''' code = '''
lfs_format(&lfs, &cfg) => 0; lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
@@ -530,7 +531,8 @@ code = '''
[[case]] # outdated lookahead test [[case]] # outdated lookahead test
define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024 define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' if = 'False'
#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
code = ''' code = '''
lfs_format(&lfs, &cfg) => 0; lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;
@@ -595,7 +597,8 @@ code = '''
[[case]] # outdated lookahead and split dir test [[case]] # outdated lookahead and split dir test
define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_SIZE = 512
define.LFS_BLOCK_COUNT = 1024 define.LFS_BLOCK_COUNT = 1024
if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' if = 'False'
#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024'
code = ''' code = '''
lfs_format(&lfs, &cfg) => 0; lfs_format(&lfs, &cfg) => 0;
lfs_mount(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;