diff --git a/.github/workflows/status.yml b/.github/workflows/status.yml index 8864b29..493f5a8 100644 --- a/.github/workflows/status.yml +++ b/.github/workflows/status.yml @@ -46,11 +46,11 @@ jobs: `${{github.event.workflow_run.id}}/jobs" \ | jq -er '.jobs[] | select(.name == env.TARGET_JOB) - | .html_url + ((.steps[] - | select(.name == env.TARGET_STEP) - | "#step:\(.number):0") // "")' - ) - )" + | .html_url + + "?check_suite_focus=true" + + ((.steps[] + | select(.name == env.TARGET_STEP) + | "#step:\(.number):0") // "")'))" # TODO remove this # print for debugging echo "$(jq -nc '{ diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 231cd2c..47ee4b4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,7 +4,6 @@ on: [push, pull_request] env: CFLAGS: -Werror MAKEFLAGS: -j - COVERAGE: 1 jobs: # run tests @@ -14,21 +13,22 @@ jobs: fail-fast: false matrix: arch: [x86_64, thumb, mips, powerpc] + env: + TESTFLAGS: --coverage steps: - uses: actions/checkout@v2 - name: install run: | # need toml, also pip3 isn't installed by default? - sudo apt-get update - sudo apt-get install python3 python3-pip + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip lcov sudo pip3 install toml - mkdir status # cross-compile with ARM Thumb (32-bit, little-endian) - name: install-thumb if: matrix.arch == 'thumb' run: | - sudo apt-get install \ + sudo apt-get install -qq \ gcc-arm-linux-gnueabi \ libc6-dev-armel-cross \ qemu-user @@ -40,7 +40,7 @@ jobs: - name: install-mips if: matrix.arch == 'mips' run: | - sudo apt-get install \ + sudo apt-get install -qq \ gcc-mips-linux-gnu \ libc6-dev-mips-cross \ qemu-user @@ -52,7 +52,7 @@ jobs: - name: install-powerpc if: matrix.arch == 'powerpc' run: | - sudo apt-get install \ + sudo apt-get install -qq \ gcc-powerpc-linux-gnu \ libc6-dev-powerpc-cross \ qemu-user @@ -73,140 +73,118 @@ jobs: -include stdio.h" # normal+reentrant tests - name: test-default - continue-on-error: true - run: make test SCRIPTFLAGS+="-nrk" -# # NOR flash: read/prog = 1 block = 4KiB -# - name: test-nor -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" -# # SD/eMMC: read/prog = 512 block = 512 -# - name: test-emmc -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" -# # NAND flash: read/prog = 4KiB block = 32KiB -# - name: test-nand -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" -# # other extreme geometries that are useful for various corner cases -# - name: test-no-intrinsics -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_NO_INTRINSICS" -# - name: test-byte-writes -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" -# - name: test-block-cycles -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_BLOCK_CYCLES=1" -# - name: test-odd-block-count -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" -# - name: test-odd-block-size -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" - - - name: test-default-what - run: | - echo "version" - gcov --version - echo "tests" - ls tests - echo "hmm" - cat tests/*.gcov - echo "woah" + run: make test_dirs TESTFLAGS+="-nrk" + # NOR flash: read/prog = 1 block = 4KiB + - name: test-nor + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" VERBOSE=1 + # SD/eMMC: read/prog = 512 block = 512 + - name: test-emmc + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" + # NAND flash: read/prog = 4KiB block = 32KiB + - name: test-nand + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" + # other extreme geometries that are useful for various corner cases + - name: test-no-intrinsics + run: make test TESTFLAGS+="-nrk + -DLFS_NO_INTRINSICS" + - name: test-byte-writes + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" + - name: test-block-cycles + run: make test TESTFLAGS+="-nrk + -DLFS_BLOCK_CYCLES=1" + - name: test-odd-block-count + run: make test TESTFLAGS+="-nrk + -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" + - name: test-odd-block-size + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" # collect coverage - name: collect-coverage continue-on-error: true run: | mkdir -p coverage - mv results/coverage.gcov coverage/${{github.job}}.gcov + lcov $(for f in tests/*.toml.cumul.info ; do echo "-a $f" ; done) \ + -o coverage/${{github.job}}-${{matrix.arch}}.info + # we only care about littlefs's actual source + lcov -e coverage/${{github.job}}-${{matrix.arch}}.info \ + $(for f in lfs*.c ; do echo "/$f" ; done) \ + -o coverage/${{github.job}}-${{matrix.arch}}.info - name: upload-coverage continue-on-error: true uses: actions/upload-artifact@v2 with: name: coverage path: coverage + retention-days: 1 # update results - - uses: actions/checkout@v2 - if: github.ref != 'refs/heads/master' - continue-on-error: true - with: - ref: master - path: master - - name: results-code continue-on-error: true run: | - export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - export CFLAGS+=" \ - -DLFS_NO_ASSERT \ - -DLFS_NO_DEBUG \ - -DLFS_NO_WARN \ - -DLFS_NO_ERROR" - if [ -d master ] - then - make -C master clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-qo code.csv" \ - && export SCRIPTFLAGS+="-d master/code.csv" - fi - make clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-o code.csv" + mkdir -p results + # TODO remove the need for OBJ + make clean + make code \ + OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR" \ + CODEFLAGS+="-o results/code.csv" - name: results-code-readonly continue-on-error: true run: | - export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - export CFLAGS+=" \ - -DLFS_NO_ASSERT \ - -DLFS_NO_DEBUG \ - -DLFS_NO_WARN \ - -DLFS_NO_ERROR \ - -DLFS_READONLY" - if [ -d master ] - then - make -C master clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-qo code-readonly.csv" \ - && export SCRIPTFLAGS+="-d master/code-readonly.csv" - fi - # TODO remove this OBJ - make clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-o code-readonly.csv" + mkdir -p results + make clean + make code \ + OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_READONLY" \ + CODEFLAGS+="-o results/code-readonly.csv" - name: results-code-threadsafe continue-on-error: true run: | - export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - export CFLAGS+=" \ - -DLFS_NO_ASSERT \ - -DLFS_NO_DEBUG \ - -DLFS_NO_WARN \ - -DLFS_NO_ERROR \ - -DLFS_THREADSAFE" - if [ -d master ] - then - make -C master clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-qo code-threadsafe.csv" \ - && export SCRIPTFLAGS+="-d master/code-threadsafe.csv" - fi - make clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-o code-threadsafe.csv" + mkdir -p results + make clean + make code \ + OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_THREADSAFE" \ + CODEFLAGS+="-o results/code-threadsafe.csv" - name: results-code-migrate continue-on-error: true run: | - export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - export CFLAGS+=" \ - -DLFS_NO_ASSERT \ - -DLFS_NO_DEBUG \ - -DLFS_NO_WARN \ - -DLFS_NO_ERROR \ - -DLFS_MIGRATE" - if [ -d master ] - then - make -C master clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-qo code-migrate.csv" \ - && export SCRIPTFLAGS+="-d master/code-migrate.csv" - fi - make clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-o code-migrate.csv" + mkdir -p results + make clean + make code \ + OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_MIGRATE" \ + CODEFLAGS+="-o results/code-migrate.csv" + - name: upload-results + continue-on-error: true + uses: actions/upload-artifact@v2 + with: + name: results + path: results # limit reporting to Thumb, otherwise there would be too many numbers # flying around for the results to be easily readable - name: collect-status @@ -214,23 +192,31 @@ jobs: if: matrix.arch == 'thumb' run: | mkdir -p status - shopt -s nullglob - for f in code*.csv + for f in results/code*.csv do export STEP="results-code$( - echo $f | sed -n 's/code-\(.*\).csv/-\1/p')" + echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p')" export CONTEXT="results / code$( - echo $f | sed -n 's/code-\(.*\).csv/ (\1)/p')" - export DESCRIPTION="Code size is $( - ./scripts/code.py -i $f -S $( - [ -e master/$f ] && echo "-d master/$f"))" - jq -nc '{ + echo $f | sed -n 's/.*code-\(.*\).csv/ (\1)/p')" + export PREV="$(curl -sS \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ + | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | select(.context == env.CONTEXT).description + | capture(\"Code size is (?[0-9]+)\").result" \ + || echo 0)" + echo $PREV + export DESCRIPTION="$(./scripts/code.py -u $f -s | awk ' + NR==2 {printf "Code size is %d B",$2} + NR==2 && ENVIRON["PREV"] != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')" + jq -n '{ state: "success", context: env.CONTEXT, description: env.DESCRIPTION, - target_job: "test (${{matrix.arch}})", + target_job: "${{github.job}} (${{matrix.arch}})", target_step: env.STEP}' \ - > status/code$(echo $f | sed -n 's/code-\(.*\).csv/-\1/p').json + | tee status/code$( + echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p').json done - name: upload-status continue-on-error: true @@ -244,20 +230,190 @@ jobs: # run under Valgrind to check for memory errors valgrind: runs-on: ubuntu-latest - steps: - uses: actions/checkout@v2 - name: install run: | # need toml, also pip3 isn't installed by default? - sudo apt-get update - sudo apt-get install python3 python3-pip + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip sudo pip3 install toml - name: install-valgrind run: | - sudo apt-get update - sudo apt-get install valgrind + sudo apt-get update -qq + sudo apt-get install -qq valgrind valgrind --version -# # normal tests, we don't need to test all geometries -# - name: test-valgrind -# run: make test SCRIPTFLAGS+="-k --valgrind" + # normal tests, we don't need to test all geometries + - name: test-valgrind + run: make test TESTFLAGS+="-k --valgrind" + + # self-host with littlefs-fuse for a fuzz-like test + fuse: + runs-on: ubuntu-latest + if: ${{!endsWith(github.ref, '-prefix')}} + steps: + - uses: actions/checkout@v2 + - name: install + run: | + # need toml, also pip3 isn't installed by default? + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip libfuse-dev + sudo pip3 install toml + fusermount -V + gcc --version + - uses: actions/checkout@v2 + with: + repository: littlefs-project/littlefs-fuse + ref: v2 + path: littlefs-fuse + - name: setup + run: | + # copy our new version into littlefs-fuse + rm -rf littlefs-fuse/littlefs/* + cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs + + # setup disk for littlefs-fuse + mkdir mount + sudo chmod a+rw /dev/loop0 + dd if=/dev/zero bs=512 count=128K of=disk + losetup /dev/loop0 disk + - name: test + run: | + # self-host test + make -C littlefs-fuse + + littlefs-fuse/lfs --format /dev/loop0 + littlefs-fuse/lfs /dev/loop0 mount + + ls mount + mkdir mount/littlefs + cp -r $(git ls-tree --name-only HEAD) mount/littlefs + cd mount/littlefs + stat . + ls -flh + make -B test + + # test migration using littlefs-fuse + migrate: + runs-on: ubuntu-latest + if: ${{!endsWith(github.ref, '-prefix')}} + steps: + - uses: actions/checkout@v2 + - name: install + run: | + # need toml, also pip3 isn't installed by default? + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip libfuse-dev + sudo pip3 install toml + fusermount -V + gcc --version + - uses: actions/checkout@v2 + with: + repository: littlefs-project/littlefs-fuse + ref: v2 + path: v2 + - uses: actions/checkout@v2 + with: + repository: littlefs-project/littlefs-fuse + ref: v1 + path: v1 + - name: setup + run: | + # copy our new version into littlefs-fuse + rm -rf v2/littlefs/* + cp -r $(git ls-tree --name-only HEAD) v2/littlefs + + # setup disk for littlefs-fuse + mkdir mount + sudo chmod a+rw /dev/loop0 + dd if=/dev/zero bs=512 count=128K of=disk + losetup /dev/loop0 disk + - name: test + run: | + # compile v1 and v2 + make -C v1 + make -C v2 + + # run self-host test with v1 + v1/lfs --format /dev/loop0 + v1/lfs /dev/loop0 mount + + ls mount + mkdir mount/littlefs + cp -r $(git ls-tree --name-only HEAD) mount/littlefs + cd mount/littlefs + stat . + ls -flh + make -B test + + # attempt to migrate + cd ../.. + fusermount -u mount + + v2/lfs --migrate /dev/loop0 + v2/lfs /dev/loop0 mount + + # run self-host test with v2 right where we left off + ls mount + cd mount/littlefs + stat . + ls -flh + make -B test + + # collect coverage info + coverage: + runs-on: ubuntu-latest + needs: [test] + continue-on-error: true + steps: + - uses: actions/checkout@v2 + - name: install + run: | + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip lcov + sudo pip3 install toml + - uses: actions/download-artifact@v2 + with: + name: coverage + path: coverage + - name: results-coverage + run: | + mkdir -p results + lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \ + -o results/coverage.info + ./scripts/coverage.py results/coverage.info -o results/coverage.csv + - name: upload-results + continue-on-error: true + uses: actions/upload-artifact@v2 + with: + name: results + path: results + - name: collect-status + run: | + mkdir -p status + export STEP="results-coverage" + export CONTEXT="results / coverage" + export PREV="$(curl -sS \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ + | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | select(.context == env.CONTEXT).description + | capture(\"Coverage is (?[0-9\\\\.]+)\").result" \ + || echo 0)" + export DESCRIPTION="$( + ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' + NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3} + NR==2 && ENVIRON["PREV"] != 0 { + printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" + jq -n '{ + state: "success", + context: env.CONTEXT, + description: env.DESCRIPTION, + target_job: "${{github.job}}", + target_step: env.STEP}' \ + | tee status/coverage.json + - name: upload-status + uses: actions/upload-artifact@v2 + with: + name: status + path: status + retention-days: 1 diff --git a/Makefile b/Makefile index 17da8c2..2455a19 100644 --- a/Makefile +++ b/Makefile @@ -1,28 +1,43 @@ -TARGET = lfs.a +ifdef BUILDDIR +# make sure BUILDDIR ends with a slash +override BUILDDIR := $(BUILDDIR)/ +# bit of a hack, but we want to make sure BUILDDIR directory structure +# is correct before any commands +$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \ + $(BUILDDIR) \ + $(BUILDDIR)bd \ + $(BUILDDIR)tests)) +endif + +# overridable target/src/tools/flags/etc ifneq ($(wildcard test.c main.c),) -override TARGET = lfs +TARGET ?= $(BUILDDIR)lfs +else +TARGET ?= $(BUILDDIR)lfs.a endif CC ?= gcc AR ?= ar SIZE ?= size +CTAGS ?= ctags NM ?= nm -GCOV ?= gcov LCOV ?= lcov -SRC += $(wildcard *.c bd/*.c) -OBJ := $(SRC:.c=.o) -DEP := $(SRC:.c=.d) -ASM := $(SRC:.c=.s) +SRC ?= $(wildcard *.c bd/*.c) +OBJ := $(SRC:%.c=%.o) +DEP := $(SRC:%.c=%.d) +ASM := $(SRC:%.c=%.s) +ifdef BUILDDIR +override OBJ := $(addprefix $(BUILDDIR),$(OBJ)) +override DEP := $(addprefix $(BUILDDIR),$(DEP)) +override ASM := $(addprefix $(BUILDDIR),$(ASM)) +endif ifdef DEBUG override CFLAGS += -O0 -g3 else override CFLAGS += -Os endif -ifdef WORD -override CFLAGS += -m$(WORD) -endif ifdef TRACE override CFLAGS += -DLFS_YES_TRACE endif @@ -31,13 +46,23 @@ override CFLAGS += -std=c99 -Wall -pedantic override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef ifdef VERBOSE -override SCRIPTFLAGS += -v +override TESTFLAGS += -v +override CODEFLAGS += -v +override COVERAGEFLAGS += -v endif ifdef EXEC -override TESTFLAGS += $(patsubst %,--exec=%,$(EXEC)) +override TESTFLAGS += --exec="$(EXEC)" +endif +ifdef BUILDDIR +override TESTFLAGS += --build-dir="$(BUILDDIR:/=)" +override CODEFLAGS += --build-dir="$(BUILDDIR:/=)" +endif +ifneq ($(NM),nm) +override CODEFLAGS += --nm-tool="$(NM)" endif +# commands .PHONY: all build all build: $(TARGET) @@ -48,44 +73,46 @@ asm: $(ASM) size: $(OBJ) $(SIZE) -t $^ +.PHONY: tags +tags: + $(CTAGS) --totals --c-types=+p $(shell find -name '*.h') $(SRC) + .PHONY: code -code: - ./scripts/code.py $(SCRIPTFLAGS) +code: $(OBJ) + ./scripts/code.py $^ $(CODEFLAGS) .PHONY: coverage coverage: - ./scripts/coverage.py $(SCRIPTFLAGS) + ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS) .PHONY: test test: - ./scripts/test.py $(TESTFLAGS) $(SCRIPTFLAGS) + ./scripts/test.py $(TESTFLAGS) .SECONDEXPANSION: test%: tests/test$$(firstword $$(subst \#, ,%)).toml - ./scripts/test.py $@ $(TESTFLAGS) $(SCRIPTFLAGS) + ./scripts/test.py $@ $(TESTFLAGS) +# rules -include $(DEP) +.SUFFIXES: -lfs: $(OBJ) +$(BUILDDIR)lfs: $(OBJ) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ -%.a: $(OBJ) +$(BUILDDIR)%.a: $(OBJ) $(AR) rcs $@ $^ -%.o: %.c +$(BUILDDIR)%.o: %.c $(CC) -c -MMD $(CFLAGS) $< -o $@ -%.s: %.c +$(BUILDDIR)%.s: %.c $(CC) -S $(CFLAGS) $< -o $@ -%.gcda.gcov: %.gcda - ( cd $(dir $@) ; $(GCOV) -ri $(notdir $<) ) - +# clean everything .PHONY: clean clean: rm -f $(TARGET) rm -f $(OBJ) rm -f $(DEP) rm -f $(ASM) - rm -f tests/*.toml.* - rm -f sizes/* - rm -f results/* + rm -f $(BUILDDIR)tests/*.toml.* diff --git a/scripts/code.py b/scripts/code.py index 46459a5..b61615e 100755 --- a/scripts/code.py +++ b/scripts/code.py @@ -1,24 +1,12 @@ #!/usr/bin/env python3 # -# This script finds the code size at the function level, with/without -# static functions, and has some conveniences for comparing different -# versions. It's basically one big wrapper around nm, and may or may -# not have been written out of jealousy of Linux's Bloat-O-Meter. -# -# Here's a useful bash script to use while developing: -# ./scripts/code_size.py -qo old.csv -# while true ; do ./code_scripts/size.py -d old.csv ; inotifywait -rqe modify * ; done -# -# Or even better, to automatically update results on commit: -# ./scripts/code_size.py -qo commit.csv -# while true ; do ./scripts/code_size.py -d commit.csv -o current.csv ; git diff --exit-code --quiet && cp current.csv commit.csv ; inotifywait -rqe modify * ; done -# -# Or my personal favorite: -# ./scripts/code_size.py -qo master.csv && cp master.csv commit.csv -# while true ; do ( ./scripts/code_size.py -i commit.csv -d master.csv -s ; ./scripts/code_size.py -i current.csv -d master.csv -s ; ./scripts/code_size.py -d master.csv -o current.csv -s ) | awk 'BEGIN {printf "%-16s %7s %7s %7s\n","","old","new","diff"} (NR==2 && $1="commit") || (NR==4 && $1="prev") || (NR==6 && $1="current") {printf "%-16s %7s %7s %7s %s\n",$1,$2,$3,$5,$6}' ; git diff --exit-code --quiet && cp current.csv commit.csv ; inotifywait -rqe modify * ; done +# Script to find code size at the function level. Basically just a bit wrapper +# around nm with some extra conveniences for comparing builds. Heavily inspired +# by Linux's Bloat-O-Meter. # import os +import glob import itertools as it import subprocess as sp import shlex @@ -26,267 +14,159 @@ import re import csv import collections as co -SIZEDIR = 'sizes' -RULES = """ -define FLATTEN -%(sizedir)s/%(build)s.$(subst /,.,$(target)): $(target) - ( echo "#line 1 \\"$$<\\"" ; %(cat)s $$< ) > $$@ -%(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.size)): \\ - %(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.o)) - $(NM) --size-sort $$^ | sed 's/^/$(subst /,\\/,$(target:.c=.o)):/' > $$@ -endef -$(foreach target,$(SRC),$(eval $(FLATTEN))) --include %(sizedir)s/*.d -.SECONDARY: +OBJ_PATHS = ['*.o', 'bd/*.o'] -%%.size: $(foreach t,$(subst /,.,$(OBJ:.o=.size)),%%.$t) - cat $^ > $@ -""" -CATS = { - 'code': 'cat', - 'code_inlined': 'sed \'s/^static\( inline\)\?//\'', -} - -def build(**args): - # mkdir -p sizedir - os.makedirs(args['sizedir'], exist_ok=True) - - if args.get('inlined', False): - builds = ['code', 'code_inlined'] - else: - builds = ['code'] - - # write makefiles for the different types of builds - makefiles = [] - targets = [] - for build in builds: - path = args['sizedir'] + '/' + build - with open(path + '.mk', 'w') as mk: - mk.write(RULES.replace(4*' ', '\t') % dict( - sizedir=args['sizedir'], - build=build, - cat=CATS[build])) - mk.write('\n') - - # pass on defines - for d in args['D']: - mk.write('%s: override CFLAGS += -D%s\n' % ( - path+'.size', d)) - - makefiles.append(path + '.mk') - targets.append(path + '.size') - - # build in parallel - cmd = (['make', '-f', 'Makefile'] + - list(it.chain.from_iterable(['-f', m] for m in makefiles)) + - [target for target in targets]) - if args.get('verbose', False): - print(' '.join(shlex.quote(c) for c in cmd)) - proc = sp.Popen(cmd, - stdout=sp.DEVNULL if not args.get('verbose', False) else None) - proc.wait() - if proc.returncode != 0: - sys.exit(-1) - - # find results - build_results = co.defaultdict(lambda: 0) - # notes - # - filters type - # - discards internal/debug functions (leading __) +def collect(paths, **args): + results = co.defaultdict(lambda: 0) pattern = re.compile( - '^(?P[^:]+)' + - ':(?P[0-9a-fA-F]+)' + + '^(?P[0-9a-fA-F]+)' + ' (?P[%s])' % re.escape(args['type']) + - ' (?!__)(?P.+?)$') - for build in builds: - path = args['sizedir'] + '/' + build - with open(path + '.size') as size: - for line in size: - match = pattern.match(line) - if match: - file = match.group('file') - # discard .8449 suffixes created by optimizer - name = re.sub('\.[0-9]+', '', match.group('name')) - size = int(match.group('size'), 16) - build_results[(build, file, name)] += size + ' (?P.+?)$') + for path in paths: + # note nm-tool may contain extra args + cmd = args['nm_tool'] + ['--size-sort', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, stdout=sp.PIPE, universal_newlines=True) + for line in proc.stdout: + m = pattern.match(line) + if m: + results[(path, m.group('func'))] += int(m.group('size'), 16) - results = [] - for (build, file, name), size in build_results.items(): - if build == 'code': - results.append((file, name, size, False)) - elif (build == 'code_inlined' and - ('inlined', file, name) not in results): - results.append((file, name, size, True)) + flat_results = [] + for (file, func), size in results.items(): + # map to source files + if args.get('build_dir'): + file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) + # discard internal functions + if func.startswith('__'): + continue + # discard .8449 suffixes created by optimizer + func = re.sub('\.[0-9]+', '', func) + flat_results.append((file, func, size)) - return results + return flat_results def main(**args): - # find results - if not args.get('input', None): - results = build(**args) + # find sizes + if not args.get('use', None): + # find .o files + paths = [] + for path in args['obj_paths']: + if os.path.isdir(path): + path = path + '/*.o' + + for path in glob.glob(path): + paths.append(path) + + if not paths: + print('no .obj files found in %r?' % args['obj_paths']) + sys.exit(-1) + + results = collect(paths, **args) else: - with open(args['input']) as f: + with open(args['use']) as f: r = csv.DictReader(f) results = [ ( result['file'], - result['name'], - int(result['size']), - bool(int(result.get('inlined', 0)))) - for result in r - if (not bool(int(result.get('inlined', 0))) or - args.get('inlined', False))] + result['function'], + int(result['size'])) + for result in r] total = 0 - for _, _, size, inlined in results: - if not inlined: - total += size + for _, _, size in results: + total += size # find previous results? - if args.get('diff', None): + if args.get('diff'): with open(args['diff']) as f: r = csv.DictReader(f) prev_results = [ ( result['file'], - result['name'], - int(result['size']), - bool(int(result.get('inlined', 0)))) - for result in r - if (not bool(int(result.get('inlined', 0))) or - args.get('inlined', False))] + result['function'], + int(result['size'])) + for result in r] prev_total = 0 - for _, _, size, inlined in prev_results: - if not inlined: - prev_total += size + for _, _, size in prev_results: + prev_total += size # write results to CSV - if args.get('output', None): - results.sort(key=lambda x: (-x[2], x)) + if args.get('output'): with open(args['output'], 'w') as f: w = csv.writer(f) - if args.get('inlined', False): - w.writerow(['file', 'name', 'size', 'inlined']) - for file, name, size, inlined in results: - w.writerow((file, name, size, int(inlined))) - else: - w.writerow(['file', 'name', 'size']) - for file, name, size, inlined in results: - w.writerow((file, name, size)) + w.writerow(['file', 'function', 'size']) + for file, func, size in sorted(results): + w.writerow((file, func, size)) # print results - def dedup_functions(results): - functions = co.defaultdict(lambda: (0, True)) - for _, name, size, inlined in results: - if not inlined: - functions[name] = (functions[name][0] + size, False) - for _, name, size, inlined in results: - if inlined and functions[name][1]: - functions[name] = (functions[name][0] + size, True) - return functions + def dedup_entries(results, by='function'): + entries = co.defaultdict(lambda: 0) + for file, func, size in results: + entry = (file if by == 'file' else func) + entries[entry] += size + return entries - def dedup_files(results): - files = co.defaultdict(lambda: 0) - for file, _, size, inlined in results: - if not inlined: - files[file] += size - return files - - def diff_sizes(olds, news): - diff = co.defaultdict(lambda: (None, None, None)) + def diff_entries(olds, news): + diff = co.defaultdict(lambda: (0, 0, 0, 0)) for name, new in news.items(): - diff[name] = (None, new, new) + diff[name] = (0, new, new, 1.0) for name, old in olds.items(): - new = diff[name][1] or 0 - diff[name] = (old, new, new-old) + _, new, _, _ = diff[name] + diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) return diff - def print_header(name=''): - if not args.get('diff', False): - print('%-40s %7s' % (name, 'size')) + def print_header(by=''): + if not args.get('diff'): + print('%-36s %7s' % (by, 'size')) else: - print('%-40s %7s %7s %7s' % (name, 'old', 'new', 'diff')) + print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) - def print_functions(): - functions = dedup_functions(results) - functions = { - name+' (inlined)' if inlined else name: size - for name, (size, inlined) in functions.items()} + def print_entries(by='function'): + entries = dedup_entries(results, by=by) - if not args.get('diff', None): - print_header('function') - for name, size in sorted(functions.items(), - key=lambda x: (-x[1], x)): - print("%-40s %7d" % (name, size)) + if not args.get('diff'): + print_header(by=by) + for name, size in sorted(entries.items()): + print("%-36s %7d" % (name, size)) else: - prev_functions = dedup_functions(prev_results) - prev_functions = { - name+' (inlined)' if inlined else name: size - for name, (size, inlined) in prev_functions.items()} - diff = diff_sizes(functions, prev_functions) - print_header('function (%d added, %d removed)' % ( - sum(1 for old, _, _ in diff.values() if not old), - sum(1 for _, new, _ in diff.values() if not new))) - for name, (old, new, diff) in sorted(diff.items(), - key=lambda x: (-(x[1][2] or 0), x)): - if diff or args.get('all', False): - print("%-40s %7s %7s %+7d%s" % ( - name, old or "-", new or "-", diff, - ' (%+.2f%%)' % (100*((new-old)/old)) - if old and new else - '')) - - def print_files(): - files = dedup_files(results) - - if not args.get('diff', None): - print_header('file') - for file, size in sorted(files.items(), - key=lambda x: (-x[1], x)): - print("%-40s %7d" % (file, size)) - else: - prev_files = dedup_files(prev_results) - diff = diff_sizes(files, prev_files) - print_header('file (%d added, %d removed)' % ( - sum(1 for old, _, _ in diff.values() if not old), - sum(1 for _, new, _ in diff.values() if not new))) - for name, (old, new, diff) in sorted(diff.items(), - key=lambda x: (-(x[1][2] or 0), x)): - if diff or args.get('all', False): - print("%-40s %7s %7s %+7d%s" % ( - name, old or "-", new or "-", diff, - ' (%+.2f%%)' % (100*((new-old)/old)) - if old and new else - '')) + prev_entries = dedup_entries(prev_results, by=by) + diff = diff_entries(prev_entries, entries) + print_header(by='%s (%d added, %d removed)' % (by, + sum(1 for old, _, _, _ in diff.values() if not old), + sum(1 for _, new, _, _ in diff.values() if not new))) + for name, (old, new, diff, ratio) in sorted(diff.items(), + key=lambda x: (-x[1][3], x)): + if ratio or args.get('all'): + print("%-36s %7s %7s %+7d%s" % (name, + old or "-", + new or "-", + diff, + ' (%+.1f%%)' % (100*ratio) if ratio else '')) def print_totals(): - if not args.get('diff', None): - print("%-40s %7d" % ('TOTALS', total)) + if not args.get('diff'): + print("%-36s %7d" % ('TOTAL', total)) else: - print("%-40s %7s %7s %+7d%s" % ( - 'TOTALS', prev_total, total, total-prev_total, - ' (%+.2f%%)' % (100*((total-prev_total)/total)) - if prev_total and total else - '')) + ratio = (total-prev_total)/prev_total if prev_total else 1.0 + print("%-36s %7s %7s %+7d%s" % ( + 'TOTAL', + prev_total if prev_total else '-', + total if total else '-', + total-prev_total, + ' (%+.1f%%)' % (100*ratio) if ratio else '')) - def print_status(): - if not args.get('diff', None): - print(total) - else: - print("%d (%+.2f%%)" % (total, 100*((total-prev_total)/total))) - - if args.get('quiet', False): + if args.get('quiet'): pass - elif args.get('status', False): - print_status() - elif args.get('summary', False): + elif args.get('summary'): print_header() print_totals() - elif args.get('files', False): - print_files() + elif args.get('files'): + print_entries(by='file') print_totals() else: - print_functions() + print_entries(by='function') print_totals() if __name__ == "__main__": @@ -294,35 +174,32 @@ if __name__ == "__main__": import sys parser = argparse.ArgumentParser( description="Find code size at the function level.") - parser.add_argument('sizedir', nargs='?', default=SIZEDIR, - help="Directory to store intermediary results. Defaults " - "to \"%s\"." % SIZEDIR) - parser.add_argument('-D', action='append', default=[], - help="Specify compile-time define.") + parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, + help="Description of where to find *.o files. May be a directory \ + or a list of paths. Defaults to %r." % OBJ_PATHS) parser.add_argument('-v', '--verbose', action='store_true', help="Output commands that run behind the scenes.") - parser.add_argument('-i', '--input', - help="Don't compile and find code sizes, instead use this CSV file.") parser.add_argument('-o', '--output', help="Specify CSV file to store results.") + parser.add_argument('-u', '--use', + help="Don't compile and find code sizes, instead use this CSV file.") parser.add_argument('-d', '--diff', help="Specify CSV file to diff code size against.") parser.add_argument('-a', '--all', action='store_true', help="Show all functions, not just the ones that changed.") - parser.add_argument('--inlined', action='store_true', - help="Run a second compilation to find the sizes of functions normally " - "removed by optimizations. These will be shown as \"*.inlined\" " - "functions, and will not be included in the total.") parser.add_argument('--files', action='store_true', help="Show file-level code sizes. Note this does not include padding! " "So sizes may differ from other tools.") parser.add_argument('-s', '--summary', action='store_true', help="Only show the total code size.") - parser.add_argument('-S', '--status', action='store_true', - help="Show minimum info useful for a single-line status.") parser.add_argument('-q', '--quiet', action='store_true', help="Don't show anything, useful with -o.") parser.add_argument('--type', default='tTrRdDbB', help="Type of symbols to report, this uses the same single-character " "type-names emitted by nm. Defaults to %(default)r.") + parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(), + help="Path to the nm tool to use.") + parser.add_argument('--build-dir', + help="Specify the relative build directory. Used to map object files \ + to the correct source files.") sys.exit(main(**vars(parser.parse_args()))) diff --git a/scripts/coverage.py b/scripts/coverage.py index 6e51372..6f1f54f 100755 --- a/scripts/coverage.py +++ b/scripts/coverage.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # - +# Parse and report coverage info from .info files generated by lcov +# import os import glob import csv @@ -8,8 +9,8 @@ import re import collections as co import bisect as b -INFO_PATHS = 'tests/*.toml.info' +INFO_PATHS = ['tests/*.toml.info'] def collect(paths, **args): file = None @@ -65,14 +66,14 @@ def collect(paths, **args): def main(**args): # find coverage - if not args.get('input', None): + if not args.get('use'): # find *.info files paths = [] for path in args['info_paths']: if os.path.isdir(path): path = path + '/*.gcov' - for path in glob.glob(path, recursive=True): + for path in glob.glob(path): paths.append(path) if not paths: @@ -81,7 +82,7 @@ def main(**args): results = collect(paths, **args) else: - with open(args['input']) as f: + with open(args['use']) as f: r = csv.DictReader(f) results = [ ( result['file'], @@ -96,7 +97,7 @@ def main(**args): total_count += count # find previous results? - if args.get('diff', None): + if args.get('diff'): with open(args['diff']) as f: r = csv.DictReader(f) prev_results = [ @@ -112,12 +113,11 @@ def main(**args): prev_total_count += count # write results to CSV - if args.get('output', None): - results.sort(key=lambda x: (-(x[3]-x[2]), -x[3], x)) + if args.get('output'): with open(args['output'], 'w') as f: w = csv.writer(f) w.writerow(['file', 'function', 'hits', 'count']) - for file, func, hits, count in results: + for file, func, hits, count in sorted(results): w.writerow((file, func, hits, count)) # print results @@ -130,97 +130,95 @@ def main(**args): return entries def diff_entries(olds, news): - diff = co.defaultdict(lambda: (None, None, None, None, None, None)) + diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0)) for name, (new_hits, new_count) in news.items(): diff[name] = ( 0, 0, new_hits, new_count, - new_hits, new_count) + new_hits, new_count, + (new_hits/new_count if new_count else 1.0) - 1.0) for name, (old_hits, old_count) in olds.items(): - new_hits = diff[name][2] or 0 - new_count = diff[name][3] or 0 + _, _, new_hits, new_count, _, _, _ = diff[name] diff[name] = ( old_hits, old_count, new_hits, new_count, - new_hits-old_hits, new_count-old_count) + new_hits-old_hits, new_count-old_count, + ((new_hits/new_count if new_count else 1.0) + - (old_hits/old_count if old_count else 1.0))) return diff def print_header(by=''): - if not args.get('diff', False): - print('%-36s %11s' % (by, 'hits/count')) + if not args.get('diff'): + print('%-36s %19s' % (by, 'hits/line')) else: - print('%-36s %11s %11s %11s' % (by, 'old', 'new', 'diff')) + print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff')) def print_entries(by='function'): entries = dedup_entries(results, by=by) - if not args.get('diff', None): + if not args.get('diff'): print_header(by=by) - for name, (hits, count) in sorted(entries.items(), - key=lambda x: (-(x[1][1]-x[1][0]), -x[1][1], x)): - print("%-36s %11s (%.2f%%)" % (name, - '%d/%d' % (hits, count), - 100*(hits/count if count else 1.0))) + for name, (hits, count) in sorted(entries.items()): + print("%-36s %11s %7s" % (name, + '%d/%d' % (hits, count) + if count else '-', + '%.1f%%' % (100*hits/count) + if count else '-')) else: prev_entries = dedup_entries(prev_results, by=by) diff = diff_entries(prev_entries, entries) print_header(by='%s (%d added, %d removed)' % (by, - sum(1 for _, old, _, _, _, _ in diff.values() if not old), - sum(1 for _, _, _, new, _, _ in diff.values() if not new))) + sum(1 for _, old, _, _, _, _, _ in diff.values() if not old), + sum(1 for _, _, _, new, _, _, _ in diff.values() if not new))) for name, ( old_hits, old_count, new_hits, new_count, - diff_hits, diff_count) in sorted(diff.items(), - key=lambda x: ( - -(x[1][5]-x[1][4]), -x[1][5], -x[1][3], x)): - ratio = ((new_hits/new_count if new_count else 1.0) - - (old_hits/old_count if old_count else 1.0)) - if diff_hits or diff_count or args.get('all', False): - print("%-36s %11s %11s %11s%s" % (name, + diff_hits, diff_count, ratio) in sorted(diff.items(), + key=lambda x: (-x[1][6], x)): + if ratio or args.get('all'): + print("%-36s %11s %7s %11s %7s %11s%s" % (name, '%d/%d' % (old_hits, old_count) if old_count else '-', + '%.1f%%' % (100*old_hits/old_count) + if old_count else '-', '%d/%d' % (new_hits, new_count) if new_count else '-', + '%.1f%%' % (100*new_hits/new_count) + if new_count else '-', '%+d/%+d' % (diff_hits, diff_count), - ' (%+.2f%%)' % (100*ratio) if ratio else '')) + ' (%+.1f%%)' % (100*ratio) if ratio else '')) def print_totals(): - if not args.get('diff', None): - print("%-36s %11s (%.2f%%)" % ('TOTALS', - '%d/%d' % (total_hits, total_count), - 100*(total_hits/total_count if total_count else 1.0))) + if not args.get('diff'): + print("%-36s %11s %7s" % ('TOTAL', + '%d/%d' % (total_hits, total_count) + if total_count else '-', + '%.1f%%' % (100*total_hits/total_count) + if total_count else '-')) else: ratio = ((total_hits/total_count if total_count else 1.0) - (prev_total_hits/prev_total_count if prev_total_count else 1.0)) - print("%-36s %11s %11s %11s%s" % ('TOTALS', - '%d/%d' % (prev_total_hits, prev_total_count), - '%d/%d' % (total_hits, total_count), + print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL', + '%d/%d' % (prev_total_hits, prev_total_count) + if prev_total_count else '-', + '%.1f%%' % (100*prev_total_hits/prev_total_count) + if prev_total_count else '-', + '%d/%d' % (total_hits, total_count) + if total_count else '-', + '%.1f%%' % (100*total_hits/total_count) + if total_count else '-', '%+d/%+d' % (total_hits-prev_total_hits, total_count-prev_total_count), - ' (%+.2f%%)' % (100*ratio) if ratio else '')) + ' (%+.1f%%)' % (100*ratio) if ratio else '')) - def print_status(): - if not args.get('diff', None): - print("%d/%d (%.2f%%)" % (total_hits, total_count, - 100*(total_hits/total_count if total_count else 1.0))) - else: - ratio = ((total_hits/total_count - if total_count else 1.0) - - (prev_total_hits/prev_total_count - if prev_total_count else 1.0)) - print("%d/%d (%+.2f%%)" % (total_hits, total_count, - (100*ratio) if ratio else '')) - - if args.get('quiet', False): + if args.get('quiet'): pass - elif args.get('status', False): - print_status() - elif args.get('summary', False): + elif args.get('summary'): print_header() print_totals() - elif args.get('files', False): + elif args.get('files'): print_entries(by='file') print_totals() else: @@ -231,17 +229,18 @@ if __name__ == "__main__": import argparse import sys parser = argparse.ArgumentParser( - description="Show/manipulate coverage info") - parser.add_argument('info_paths', nargs='*', default=[INFO_PATHS], + description="Parse and report coverage info from .info files \ + generated by lcov") + parser.add_argument('info_paths', nargs='*', default=INFO_PATHS, help="Description of where to find *.info files. May be a directory \ or list of paths. *.info files will be merged to show the total \ - coverage. Defaults to \"%s\"." % INFO_PATHS) + coverage. Defaults to %r." % INFO_PATHS) parser.add_argument('-v', '--verbose', action='store_true', help="Output commands that run behind the scenes.") - parser.add_argument('-i', '--input', - help="Don't do any work, instead use this CSV file.") parser.add_argument('-o', '--output', help="Specify CSV file to store results.") + parser.add_argument('-u', '--use', + help="Don't do any work, instead use this CSV file.") parser.add_argument('-d', '--diff', help="Specify CSV file to diff code size against.") parser.add_argument('-a', '--all', action='store_true', @@ -250,8 +249,6 @@ if __name__ == "__main__": help="Show file-level coverage.") parser.add_argument('-s', '--summary', action='store_true', help="Only show the total coverage.") - parser.add_argument('-S', '--status', action='store_true', - help="Show minimum info useful for a single-line status.") parser.add_argument('-q', '--quiet', action='store_true', help="Don't show anything, useful with -o.") sys.exit(main(**vars(parser.parse_args()))) diff --git a/scripts/test.py b/scripts/test.py index 957702b..65c8104 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -20,7 +20,7 @@ import pty import errno import signal -TESTDIR = 'tests' +TEST_PATHS = 'tests' RULES = """ define FLATTEN %(path)s%%$(subst /,.,$(target)): $(target) @@ -31,14 +31,15 @@ $(foreach target,$(SRC),$(eval $(FLATTEN))) -include %(path)s*.d .SECONDARY: -%(path)s.test: %(path)s.test.o $(foreach t,$(subst /,.,$(OBJ)),%(path)s.$t) +%(path)s.test: %(path)s.test.o \\ + $(foreach t,$(subst /,.,$(SRC:.c=.o)),%(path)s.$t) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ """ COVERAGE_RULES = """ %(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage # delete lingering coverage -%(path)s.test: | %(path)s.info.clean +%(path)s.test: | %(path)s.clean .PHONY: %(path)s.clean %(path)s.clean: rm -f %(path)s*.gcda @@ -373,12 +374,17 @@ class TestSuite: self.name = os.path.basename(path) if self.name.endswith('.toml'): self.name = self.name[:-len('.toml')] - self.path = path + if args.get('build_dir'): + self.toml = path + self.path = args['build_dir'] + '/' + path + else: + self.toml = path + self.path = path self.classes = classes self.defines = defines.copy() self.filter = filter - with open(path) as f: + with open(self.toml) as f: # load tests config = toml.load(f) @@ -489,7 +495,7 @@ class TestSuite: def build(self, **args): # build test files - tf = open(self.path + '.test.c.t', 'w') + tf = open(self.path + '.test.tc', 'w') tf.write(GLOBALS) if self.code is not None: tf.write('#line %d "%s"\n' % (self.code_lineno, self.path)) @@ -499,7 +505,7 @@ class TestSuite: for case in self.cases: if case.in_ not in tfs: tfs[case.in_] = open(self.path+'.'+ - case.in_.replace('/', '.')+'.t', 'w') + re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w') tfs[case.in_].write('#line 1 "%s"\n' % case.in_) with open(case.in_) as f: for line in f: @@ -556,13 +562,15 @@ class TestSuite: if path is None: mk.write('%s: %s | %s\n' % ( self.path+'.test.c', - self.path, - self.path+'.test.c.t')) + self.toml, + self.path+'.test.tc')) else: mk.write('%s: %s %s | %s\n' % ( self.path+'.'+path.replace('/', '.'), - self.path, path, - self.path+'.'+path.replace('/', '.')+'.t')) + self.toml, + path, + self.path+'.'+re.sub('(\.c)?$', '.tc', + path.replace('/', '.')))) mk.write('\t./scripts/explode_asserts.py $| -o $@\n') self.makefile = self.path + '.mk' @@ -617,7 +625,7 @@ def main(**args): classes = [TestCase] suites = [] - for testpath in args['testpaths']: + for testpath in args['test_paths']: # optionally specified test case/perm testpath, *filter = testpath.split('#') filter = [int(f) for f in filter] @@ -628,9 +636,9 @@ def main(**args): elif os.path.isfile(testpath): testpath = testpath elif testpath.endswith('.toml'): - testpath = TESTDIR + '/' + testpath + testpath = TEST_PATHS + '/' + testpath else: - testpath = TESTDIR + '/' + testpath + '.toml' + testpath = TEST_PATHS + '/' + testpath + '.toml' # find tests for path in glob.glob(testpath): @@ -695,7 +703,7 @@ def main(**args): if not args.get('verbose', False): for line in stdout: sys.stdout.write(line) - sys.exit(-3) + sys.exit(-1) print('built %d test suites, %d test cases, %d permutations' % ( len(suites), @@ -707,7 +715,7 @@ def main(**args): for perm in suite.perms: total += perm.shouldtest(**args) if total != sum(len(suite.perms) for suite in suites): - print('total down to %d permutations' % total) + print('filtered down to %d permutations' % total) # only requested to build? if args.get('build', False): @@ -733,7 +741,7 @@ def main(**args): else: sys.stdout.write( "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m " - "{perm} failed with {returncode}\n".format( + "{perm} failed\n".format( perm=perm, path=perm.suite.path, lineno=perm.lineno, returncode=perm.result.returncode or 0)) if perm.result.stdout: @@ -753,7 +761,8 @@ def main(**args): if args.get('coverage', False): # collect coverage info - cmd = (['make', '-f', 'Makefile'] + + # why -j1? lcov doesn't work in parallel because of gcov issues + cmd = (['make', '-j1', '-f', 'Makefile'] + list(it.chain.from_iterable(['-f', m] for m in makefiles)) + [re.sub('\.test$', '.cumul.info', target) for target in targets]) if args.get('verbose', False): @@ -762,7 +771,7 @@ def main(**args): stdout=sp.DEVNULL if not args.get('verbose', False) else None) proc.wait() if proc.returncode != 0: - sys.exit(-3) + sys.exit(-1) if args.get('gdb', False): failure = None @@ -786,12 +795,12 @@ if __name__ == "__main__": import argparse parser = argparse.ArgumentParser( description="Run parameterized tests in various configurations.") - parser.add_argument('testpaths', nargs='*', default=[TESTDIR], + parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS], help="Description of test(s) to run. By default, this is all tests \ found in the \"{0}\" directory. Here, you can specify a different \ directory of tests, a specific file, a suite by name, and even a \ specific test case by adding brackets. For example \ - \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) + \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TEST_PATHS)) parser.add_argument('-D', action='append', default=[], help="Overriding parameter definitions.") parser.add_argument('-v', '--verbose', action='store_true', @@ -823,4 +832,8 @@ if __name__ == "__main__": to accumulate coverage information into *.info files. Note \ coverage is not reset between runs, allowing multiple runs to \ contribute to coverage.") + parser.add_argument('--build-dir', + help="Build relative to the specified directory instead of the \ + current directory.") + sys.exit(main(**vars(parser.parse_args()))) diff --git a/tests/test_alloc.toml b/tests/test_alloc.toml index fa92da5..ab6660e 100644 --- a/tests/test_alloc.toml +++ b/tests/test_alloc.toml @@ -485,7 +485,8 @@ code = ''' [[case]] # split dir test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'False' +#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0; @@ -530,7 +531,8 @@ code = ''' [[case]] # outdated lookahead test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'False' +#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0; @@ -595,7 +597,8 @@ code = ''' [[case]] # outdated lookahead and split dir test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'False' +#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0;