Generated v2 prefixes

This commit is contained in:
geky-bot
2021-01-20 02:41:58 +00:00
13 changed files with 1556 additions and 584 deletions

26
.github/workflows/post-release.yml vendored Normal file
View File

@@ -0,0 +1,26 @@
name: post-release
on:
release:
branches: [master]
types: [released]
jobs:
post-release:
runs-on: ubuntu-18.04
steps:
# trigger post-release in dependency repo, this indirection allows the
# dependency repo to be updated often without affecting this repo. At
# the time of this comment, the dependency repo is responsible for
# creating PRs for other dependent repos post-release.
- name: trigger-post-release
continue-on-error: true
run: |
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \
-d "$(jq -n '{
event_type: "post-release",
client_payload: {
repo: env.GITHUB_REPOSITORY,
version: "${{github.event.release.tag_name}}"}}' \
| tee /dev/stderr)"

215
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,215 @@
name: release
on:
workflow_run:
workflows: [test]
branches: [master]
types: [completed]
jobs:
release:
runs-on: ubuntu-18.04
# need to manually check for a couple things
# - tests passed?
# - we are the most recent commit on master?
if: ${{github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.head_sha == github.sha}}
steps:
- uses: actions/checkout@v2
with:
ref: ${{github.event.workflow_run.head_sha}}
# need workflow access since we push branches
# containing workflows
token: ${{secrets.BOT_TOKEN}}
# need all tags
fetch-depth: 0
# try to get results from tests
- uses: dawidd6/action-download-artifact@v2
continue-on-error: true
with:
workflow: ${{github.event.workflow_run.name}}
run_id: ${{github.event.workflow_run.id}}
name: results
path: results
- name: find-version
run: |
# rip version from lfs2.h
LFS2_VERSION="$(grep -o '^#define LFS2_VERSION .*$' lfs2.h \
| awk '{print $3}')"
LFS2_VERSION_MAJOR="$((0xffff & ($LFS2_VERSION >> 16)))"
LFS2_VERSION_MINOR="$((0xffff & ($LFS2_VERSION >> 0)))"
# find a new patch version based on what we find in our tags
LFS2_VERSION_PATCH="$( \
( git describe --tags --abbrev=0 \
--match="v$LFS2_VERSION_MAJOR.$LFS2_VERSION_MINOR.*" \
|| echo 'v0.0.-1' ) \
| awk -F '.' '{print $3+1}')"
# found new version
LFS2_VERSION="v$LFS2_VERSION_MAJOR`
`.$LFS2_VERSION_MINOR`
`.$LFS2_VERSION_PATCH"
echo "LFS2_VERSION=$LFS2_VERSION"
echo "LFS2_VERSION=$LFS2_VERSION" >> $GITHUB_ENV
echo "LFS2_VERSION_MAJOR=$LFS2_VERSION_MAJOR" >> $GITHUB_ENV
echo "LFS2_VERSION_MINOR=$LFS2_VERSION_MINOR" >> $GITHUB_ENV
echo "LFS2_VERSION_PATCH=$LFS2_VERSION_PATCH" >> $GITHUB_ENV
# try to find previous version?
- name: find-prev-version
continue-on-error: true
run: |
LFS2_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')"
echo "LFS2_PREV_VERSION=$LFS2_PREV_VERSION"
echo "LFS2_PREV_VERSION=$LFS2_PREV_VERSION" >> $GITHUB_ENV
# try to find results from tests
- name: collect-results
run: |
# previous results to compare against?
[ -n "$LFS2_PREV_VERSION" ] && curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
`status/$LFS2_PREV_VERSION" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
>> prev-results.json \
|| true
# unfortunately these each have their own format
[ -e results/code-thumb.csv ] && ( \
export PREV="$(jq -re '
select(.context == "results / code").description
| capture("Code size is (?<result>[0-9]+)").result' \
prev-results.json || echo 0)"
./scripts/code.py -u results/code-thumb.csv -s | awk '
NR==2 {printf "Code size,%d B",$2}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
>> results.csv)
[ -e results/code-thumb-readonly.csv ] && ( \
export PREV="$(jq -re '
select(.context == "results / code (readonly)").description
| capture("Code size is (?<result>[0-9]+)").result' \
prev-results.json || echo 0)"
./scripts/code.py -u results/code-thumb-readonly.csv -s | awk '
NR==2 {printf "Code size<br/>(readonly),%d B",$2}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
>> results.csv)
[ -e results/code-thumb-threadsafe.csv ] && ( \
export PREV="$(jq -re '
select(.context == "results / code (threadsafe)").description
| capture("Code size is (?<result>[0-9]+)").result' \
prev-results.json || echo 0)"
./scripts/code.py -u results/code-thumb-threadsafe.csv -s | awk '
NR==2 {printf "Code size<br/>(threadsafe),%d B",$2}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
>> results.csv)
[ -e results/code-thumb-migrate.csv ] && ( \
export PREV="$(jq -re '
select(.context == "results / code (migrate)").description
| capture("Code size is (?<result>[0-9]+)").result' \
prev-results.json || echo 0)"
./scripts/code.py -u results/code-thumb-migrate.csv -s | awk '
NR==2 {printf "Code size<br/>(migrate),%d B",$2}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
>> results.csv)
[ -e results/code-thumb-error-asserts.csv ] && ( \
export PREV="$(jq -re '
select(.context == "results / code (error-asserts)").description
| capture("Code size is (?<result>[0-9]+)").result' \
prev-results.json || echo 0)"
./scripts/code.py -u results/code-thumb-error-asserts.csv -s | awk '
NR==2 {printf "Code size<br/>(error-asserts),%d B",$2}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
>> results.csv)
[ -e results/coverage.csv ] && ( \
export PREV="$(jq -re '
select(.context == "results / coverage").description
| capture("Coverage is (?<result>[0-9\\.]+)").result' \
prev-results.json || echo 0)"
./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
NR==2 {printf "Coverage,%.1f%% of %d lines",$4,$3}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
NR==2 {printf "\n"}' \
>> results.csv)
# transpose to GitHub table
[ -e results.csv ] || exit 0
awk -F ',' '
{label[NR]=$1; value[NR]=$2}
END {
for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n";
for (r=1; r<=NR; r++) {printf "|:--"}; printf "|\n";
for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \
results.csv > results.txt
echo "RESULTS:"
cat results.txt
# find changes from history
- name: collect-changes
run: |
[ -n "$LFS2_PREV_VERSION" ] || exit 0
# use explicit link to github commit so that release notes can
# be copied elsewhere
git log "$LFS2_PREV_VERSION.." \
--grep='^Merge' --invert-grep \
--format="format:[\`%h\`](`
`https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \
> changes.txt
echo "CHANGES:"
cat changes.txt
# create and update major branches (vN and vN-prefix)
- name: create-major-branches
run: |
# create major branch
git branch "v$LFS2_VERSION_MAJOR" HEAD
# create major prefix branch
git config user.name ${{secrets.BOT_USER}}
git config user.email ${{secrets.BOT_EMAIL}}
git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
"v$LFS2_VERSION_MAJOR-prefix" || true
./scripts/prefix.py "lfs2$LFS2_VERSION_MAJOR"
git branch "v$LFS2_VERSION_MAJOR-prefix" $( \
git commit-tree $(git write-tree) \
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
-p HEAD \
-m "Generated v$LFS2_VERSION_MAJOR prefixes")
git reset --hard
# push!
git push --atomic origin \
"v$LFS2_VERSION_MAJOR" \
"v$LFS2_VERSION_MAJOR-prefix"
# build release notes
- name: create-release
run: |
# create release and patch version tag (vN.N.N)
# only draft if not a patch release
[ -e results.txt ] && export RESULTS="$(cat results.txt)"
[ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
-d "$(jq -n '{
tag_name: env.LFS2_VERSION,
name: env.LFS2_VERSION | rtrimstr(".0"),
target_commitish: "${{github.event.workflow_run.head_sha}}",
draft: env.LFS2_VERSION | endswith(".0"),
body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
| tee /dev/stderr)"

55
.github/workflows/status.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: status
on:
workflow_run:
workflows: [test]
types: [completed]
jobs:
status:
runs-on: ubuntu-18.04
steps:
# custom statuses?
- uses: dawidd6/action-download-artifact@v2
continue-on-error: true
with:
workflow: ${{github.event.workflow_run.name}}
run_id: ${{github.event.workflow_run.id}}
name: status
path: status
- name: update-status
continue-on-error: true
run: |
ls status
for s in $(shopt -s nullglob ; echo status/*.json)
do
# parse requested status
export STATE="$(jq -er '.state' $s)"
export CONTEXT="$(jq -er '.context' $s)"
export DESCRIPTION="$(jq -er '.description' $s)"
# help lookup URL for job/steps because GitHub makes
# it VERY HARD to link to specific jobs
export TARGET_URL="$(
jq -er '.target_url // empty' $s || (
export TARGET_JOB="$(jq -er '.target_job' $s)"
export TARGET_STEP="$(jq -er '.target_step // ""' $s)"
curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/`
`${{github.event.workflow_run.id}}/jobs" \
| jq -er '.jobs[]
| select(.name == env.TARGET_JOB)
| .html_url
+ "?check_suite_focus=true"
+ ((.steps[]
| select(.name == env.TARGET_STEP)
| "#step:\(.number):0") // "")'))"
# update status
curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/`
`${{github.event.workflow_run.head_sha}}" \
-d "$(jq -n '{
state: env.STATE,
context: env.CONTEXT,
description: env.DESCRIPTION,
target_url: env.TARGET_URL}' \
| tee /dev/stderr)"
done

446
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,446 @@
name: test
on: [push, pull_request]
env:
CFLAGS: -Werror
MAKEFLAGS: -j
jobs:
# run tests
test:
runs-on: ubuntu-18.04
strategy:
fail-fast: false
matrix:
arch: [x86_64, thumb, mips, powerpc]
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip lcov
sudo pip3 install toml
gcc --version
# setup a ram-backed disk to speed up reentrant tests
mkdir disks
sudo mount -t tmpfs -o size=100m tmpfs disks
TESTFLAGS="$TESTFLAGS --disk=disks/disk"
# collect coverage
mkdir -p coverage
TESTFLAGS="$TESTFLAGS --coverage=`
`coverage/${{github.job}}-${{matrix.arch}}.info"
echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
# cross-compile with ARM Thumb (32-bit, little-endian)
- name: install-thumb
if: ${{matrix.arch == 'thumb'}}
run: |
sudo apt-get install -qq \
gcc-arm-linux-gnueabi \
libc6-dev-armel-cross \
qemu-user
echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
echo "EXEC=qemu-arm" >> $GITHUB_ENV
arm-linux-gnueabi-gcc --version
qemu-arm -version
# cross-compile with MIPS (32-bit, big-endian)
- name: install-mips
if: ${{matrix.arch == 'mips'}}
run: |
sudo apt-get install -qq \
gcc-mips-linux-gnu \
libc6-dev-mips-cross \
qemu-user
echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
echo "EXEC=qemu-mips" >> $GITHUB_ENV
mips-linux-gnu-gcc --version
qemu-mips -version
# cross-compile with PowerPC (32-bit, big-endian)
- name: install-powerpc
if: ${{matrix.arch == 'powerpc'}}
run: |
sudo apt-get install -qq \
gcc-powerpc-linux-gnu \
libc6-dev-powerpc-cross \
qemu-user
echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
echo "EXEC=qemu-ppc" >> $GITHUB_ENV
powerpc-linux-gnu-gcc --version
qemu-ppc -version
# make sure example can at least compile
- name: test-example
run: |
sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
make all CFLAGS+=" \
-Duser_provided_block_device_read=NULL \
-Duser_provided_block_device_prog=NULL \
-Duser_provided_block_device_erase=NULL \
-Duser_provided_block_device_sync=NULL \
-include stdio.h"
rm test.c
# test configurations
# normal+reentrant tests
- name: test-default
run: |
make clean
make test TESTFLAGS+="-nrk"
# NOR flash: read/prog = 1 block = 4KiB
- name: test-nor
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS2_READ_SIZE=1 -DLFS2_BLOCK_SIZE=4096"
# SD/eMMC: read/prog = 512 block = 512
- name: test-emmc
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS2_READ_SIZE=512 -DLFS2_BLOCK_SIZE=512"
# NAND flash: read/prog = 4KiB block = 32KiB
- name: test-nand
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS2_READ_SIZE=4096 -DLFS2_BLOCK_SIZE=\(32*1024\)"
# other extreme geometries that are useful for various corner cases
- name: test-no-intrinsics
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS2_NO_INTRINSICS"
- name: test-byte-writes
# it just takes too long to test byte-level writes when in qemu,
# should be plenty covered by the other configurations
if: ${{matrix.arch == 'x86_64'}}
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS2_READ_SIZE=1 -DLFS2_CACHE_SIZE=1"
- name: test-block-cycles
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS2_BLOCK_CYCLES=1"
- name: test-odd-block-count
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS2_BLOCK_COUNT=1023 -DLFS2_LOOKAHEAD_SIZE=256"
- name: test-odd-block-size
run: |
make clean
make test TESTFLAGS+="-nrk \
-DLFS2_READ_SIZE=11 -DLFS2_BLOCK_SIZE=704"
# upload coverage for later coverage
- name: upload-coverage
uses: actions/upload-artifact@v2
with:
name: coverage
path: coverage
retention-days: 1
# update results
- name: results-code
run: |
mkdir -p results
make clean
make code \
CFLAGS+=" \
-DLFS2_NO_ASSERT \
-DLFS2_NO_DEBUG \
-DLFS2_NO_WARN \
-DLFS2_NO_ERROR" \
CODEFLAGS+="-o results/code-${{matrix.arch}}.csv"
- name: results-code-readonly
run: |
mkdir -p results
make clean
make code \
CFLAGS+=" \
-DLFS2_NO_ASSERT \
-DLFS2_NO_DEBUG \
-DLFS2_NO_WARN \
-DLFS2_NO_ERROR \
-DLFS2_READONLY" \
CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv"
- name: results-code-threadsafe
run: |
mkdir -p results
make clean
make code \
CFLAGS+=" \
-DLFS2_NO_ASSERT \
-DLFS2_NO_DEBUG \
-DLFS2_NO_WARN \
-DLFS2_NO_ERROR \
-DLFS2_THREADSAFE" \
CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv"
- name: results-code-migrate
run: |
mkdir -p results
make clean
make code \
CFLAGS+=" \
-DLFS2_NO_ASSERT \
-DLFS2_NO_DEBUG \
-DLFS2_NO_WARN \
-DLFS2_NO_ERROR \
-DLFS2_MIGRATE" \
CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv"
- name: results-code-error-asserts
run: |
mkdir -p results
make clean
make code \
CFLAGS+=" \
-DLFS2_NO_DEBUG \
-DLFS2_NO_WARN \
-DLFS2_NO_ERROR \
-D'LFS2_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
CODEFLAGS+="-o results/code-${{matrix.arch}}-error-asserts.csv"
- name: upload-results
uses: actions/upload-artifact@v2
with:
name: results
path: results
# limit reporting to Thumb, otherwise there would be too many numbers
# flying around for the results to be easily readable
- name: collect-status
if: ${{matrix.arch == 'thumb'}}
run: |
mkdir -p status
for f in $(shopt -s nullglob ; echo results/code*.csv)
do
export STEP="results-code$(
echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')"
export CONTEXT="results / code$(
echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')"
export PREV="$(curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
| select(.context == env.CONTEXT).description
| capture("Code size is (?<result>[0-9]+)").result' \
|| echo 0)"
export DESCRIPTION="$(./scripts/code.py -u $f -s | awk '
NR==2 {printf "Code size is %d B",$2}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
jq -n '{
state: "success",
context: env.CONTEXT,
description: env.DESCRIPTION,
target_job: "${{github.job}} (${{matrix.arch}})",
target_step: env.STEP}' \
| tee status/code$(
echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json
done
- name: upload-status
if: ${{matrix.arch == 'thumb'}}
uses: actions/upload-artifact@v2
with:
name: status
path: status
retention-days: 1
# run under Valgrind to check for memory errors
valgrind:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip
sudo pip3 install toml
- name: install-valgrind
run: |
sudo apt-get update -qq
sudo apt-get install -qq valgrind
valgrind --version
# normal tests, we don't need to test all geometries
- name: test-valgrind
run: make test TESTFLAGS+="-k --valgrind"
# self-host with littlefs-fuse for a fuzz-like test
fuse:
runs-on: ubuntu-18.04
if: ${{!endsWith(github.ref, '-prefix')}}
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip libfuse-dev
sudo pip3 install toml
fusermount -V
gcc --version
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v2
path: littlefs-fuse
- name: setup
run: |
# copy our new version into littlefs-fuse
rm -rf littlefs-fuse/littlefs/*
cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
# setup disk for littlefs-fuse
mkdir mount
sudo chmod a+rw /dev/loop0
dd if=/dev/zero bs=512 count=128K of=disk
losetup /dev/loop0 disk
- name: test
run: |
# self-host test
make -C littlefs-fuse
littlefs-fuse/lfs2 --format /dev/loop0
littlefs-fuse/lfs2 /dev/loop0 mount
ls mount
mkdir mount/littlefs
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
cd mount/littlefs
stat .
ls -flh
make -B test
# test migration using littlefs-fuse
migrate:
runs-on: ubuntu-18.04
if: ${{!endsWith(github.ref, '-prefix')}}
steps:
- uses: actions/checkout@v2
- name: install
run: |
# need toml, also pip3 isn't installed by default?
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip libfuse-dev
sudo pip3 install toml
fusermount -V
gcc --version
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v2
path: v2
- uses: actions/checkout@v2
with:
repository: littlefs-project/littlefs-fuse
ref: v1
path: v1
- name: setup
run: |
# copy our new version into littlefs-fuse
rm -rf v2/littlefs/*
cp -r $(git ls-tree --name-only HEAD) v2/littlefs
# setup disk for littlefs-fuse
mkdir mount
sudo chmod a+rw /dev/loop0
dd if=/dev/zero bs=512 count=128K of=disk
losetup /dev/loop0 disk
- name: test
run: |
# compile v1 and v2
make -C v1
make -C v2
# run self-host test with v1
v1/lfs2 --format /dev/loop0
v1/lfs2 /dev/loop0 mount
ls mount
mkdir mount/littlefs
cp -r $(git ls-tree --name-only HEAD) mount/littlefs
cd mount/littlefs
stat .
ls -flh
make -B test
# attempt to migrate
cd ../..
fusermount -u mount
v2/lfs2 --migrate /dev/loop0
v2/lfs2 /dev/loop0 mount
# run self-host test with v2 right where we left off
ls mount
cd mount/littlefs
stat .
ls -flh
make -B test
# collect coverage info
coverage:
runs-on: ubuntu-18.04
needs: [test]
steps:
- uses: actions/checkout@v2
- name: install
run: |
sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip lcov
sudo pip3 install toml
# yes we continue-on-error nearly every step, continue-on-error
# at job level apparently still marks a job as failed, which isn't
# what we want
- uses: actions/download-artifact@v2
continue-on-error: true
with:
name: coverage
path: coverage
- name: results-coverage
continue-on-error: true
run: |
mkdir -p results
lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
-o results/coverage.info
./scripts/coverage.py results/coverage.info -o results/coverage.csv
- name: upload-results
uses: actions/upload-artifact@v2
with:
name: results
path: results
- name: collect-status
run: |
mkdir -p status
[ -e results/coverage.csv ] || exit 0
export STEP="results-coverage"
export CONTEXT="results / coverage"
export PREV="$(curl -sS \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
| jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
| select(.context == env.CONTEXT).description
| capture("Coverage is (?<result>[0-9\\.]+)").result' \
|| echo 0)"
export DESCRIPTION="$(
./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3}
NR==2 && ENVIRON["PREV"]+0 != 0 {
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
jq -n '{
state: "success",
context: env.CONTEXT,
description: env.DESCRIPTION,
target_job: "${{github.job}}",
target_step: env.STEP}' \
| tee status/coverage.json
- name: upload-status
uses: actions/upload-artifact@v2
with:
name: status
path: status
retention-days: 1