Brought over the release workflow

This is pretty much a cleaned up version of the release script that ran
on Travis.

This biggest change is that now the release script also collecs the
build results into a table as part of the change notes, which is a nice
addition.
This commit is contained in:
Christopher Haster
2021-01-03 21:14:49 -06:00
parent 9d6546071b
commit 6d3e4ac33e
3 changed files with 275 additions and 96 deletions

163
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,163 @@
name: release
on:
workflow_run:
workflows: [test]
branches: [master]
types: [completed]
jobs:
release:
runs-on: ubuntu-latest
# need to manually check for a couple things
# - tests passed?
# - we are the most recent commit on master?
if: |
github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.head_sha == github.sha
steps:
- uses: actions/checkout@v2
with:
ref: ${{github.event.workflow_run.head_sha}}
# need workflow access since we push branches
# containing workflows
token: ${{secrets.BOT_TOKEN}}
# need all tags
fetch-depth: 0
# try to get results from tests
- uses: dawidd6/action-download-artifact@v2
continue-on-error: true
with:
workflow: ${{github.event.workflow_run.name}}
run_id: ${{github.event.workflow_run.id}}
name: results
path: results
- name: find-version
run: |
# rip version from lfs.h
LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \
| awk '{print $3}')"
LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))"
LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >> 0)))"
# find a new patch version based on what we find in our tags
LFS_VERSION_PATCH="$( \
( git describe --tags --abbrev=0 \
--match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \
|| echo 'v0.0.-1' ) \
| awk -F '.' '{print $3+1}')"
# found new version
LFS_VERSION="v$LFS_VERSION_MAJOR`
`.$LFS_VERSION_MINOR`
`.$LFS_VERSION_PATCH"
echo "LFS_VERSION=$LFS_VERSION"
echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV
echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV
echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV
echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV
# try to find previous version?
- name: find-prev-version
continue-on-error: true
run: |
LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')"
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION"
echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV
# try to find results from tests
- name: collect-results
run: |
[ -e results/code-thumb.csv ] && \
./scripts/code.py -u results/code-thumb.csv -s \
| awk 'NR==2 {printf "Code size,%d B\n",$2}' \
>> results.csv
[ -e results/code-thumb-readonly.csv ] && \
./scripts/code.py -u results/code-thumb-readonly.csv -s \
| awk 'NR==2 {printf "Code size (readonly),%d B\n",$2}' \
>> results.csv
[ -e results/code-thumb-threadsafe.csv ] && \
./scripts/code.py -u results/code-thumb-threadsafe.csv -s \
| awk 'NR==2 {printf "Code size (threadsafe),%d B\n",$2}' \
>> results.csv
[ -e results/code-thumb-migrate.csv ] && \
./scripts/code.py -u results/code-thumb-migrate.csv -s \
| awk 'NR==2 {printf "Code size (migrate),%d B\n",$2}' \
>> results.csv
[ -e results/coverage.csv ] && \
./scripts/coverage.py -u results/coverage.csv -s \
| awk 'NR==2 {printf "Coverage,%.1f%% of %d lines\n",$4,$3}' \
>> results.csv
[ -e results.csv ] || exit 0
awk -F ',' '
{label[NR]=$1; value[NR]=$2}
END {
for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n";
for (r=1; r<=NR; r++) {printf "|--:"}; printf "|\n";
for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \
results.csv > results.txt
echo "RESULTS:"
cat results.txt
# find changes from history
- name: collect-changes
run: |
[ ! -z "$LFS_PREV_VERSION" ] || exit 0
git log --oneline "$LFS_PREV_VERSION.." \
--grep='^Merge' --invert-grep > changes.txt
echo "CHANGES:"
cat changes.txt
# create and update major branches (vN and vN-prefix)
- name: build-major-branches
run: |
# create major branch
git branch "v$LFS_VERSION_MAJOR" HEAD
# create major prefix branch
git config user.name ${{secrets.BOT_USERNAME}}
git config user.email ${{secrets.BOT_EMAIL}}
git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
"v$LFS_VERSION_MAJOR-prefix" || true
./scripts/prefix.py "lfs$LFS_VERSION_MAJOR"
git branch "v$LFS_VERSION_MAJOR-prefix" $( \
git commit-tree $(git write-tree) \
$(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
-p HEAD \
-m "Generated v$LFS_VERSION_MAJOR prefixes")
git reset --hard
# push!
git push --atomic origin \
"v$LFS_VERSION_MAJOR" \
"v$LFS_VERSION_MAJOR-prefix"
# build release notes
- name: build-release
run: |
# find changes since last release
#if [ ! -z "$LFS_PREV_VERSION" ]
#then
# export CHANGES="$(git log --oneline "$LFS_PREV_VERSION.." \
# --grep='^Merge' --invert-grep)"
# printf "CHANGES\n%s\n\n" "$CHANGES"
#fi
# create release and patch version tag (vN.N.N)
# only draft if not a patch release
[ -e results.txt ] && export RESULTS="$(cat results.txt)"
[ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
-d "$(jq -sR '{
tag_name: env.LFS_VERSION,
name: env.LFS_VERSION | rtrimstr(".0"),
target_commitish: "${{github.event.workflow_run.head_sha}}",
draft: env.LFS_VERSION | endswith(".0"),
body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
| tee /dev/stderr)" > /dev/null

View File

@@ -1,8 +1,8 @@
name: status name: status
on: on:
workflow_run: workflow_run:
workflows: test workflows: [test]
types: completed types: [completed]
jobs: jobs:
status: status:
@@ -41,7 +41,7 @@ jobs:
jq -er '.target_url // empty' $s || ( jq -er '.target_url // empty' $s || (
export TARGET_JOB="$(jq -er '.target_job' $s)" export TARGET_JOB="$(jq -er '.target_job' $s)"
export TARGET_STEP="$(jq -er '.target_step // ""' $s)" export TARGET_STEP="$(jq -er '.target_step // ""' $s)"
curl -sS -H "authorization: token ${{secrets.GITHUB_TOKEN}}" \ curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/` "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/`
`${{github.event.workflow_run.id}}/jobs" \ `${{github.event.workflow_run.id}}/jobs" \
| jq -er '.jobs[] | jq -er '.jobs[]
@@ -59,10 +59,9 @@ jobs:
description: env.DESCRIPTION, description: env.DESCRIPTION,
target_url: env.TARGET_URL}')" target_url: env.TARGET_URL}')"
# update status # update status
curl -sS -H "authorization: token ${{secrets.GITHUB_TOKEN}}" \ curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
-X POST \ -X POST "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/`
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` `${{github.event.workflow_run.head_sha}}" \
`${{github.event.workflow_run.head_sha}}" \
-d "$(jq -nc '{ -d "$(jq -nc '{
state: env.STATE, state: env.STATE,
context: env.CONTEXT, context: env.CONTEXT,

View File

@@ -24,10 +24,17 @@ jobs:
sudo pip3 install toml sudo pip3 install toml
gcc --version gcc --version
# setup a ram-backed disk to speed up reentrant tests
mkdir disks
sudo mount -t tmpfs -o size=100m tmpfs disks
TESTFLAGS="$TESTFLAGS --disk=disks/disk"
# collect coverage # collect coverage
mkdir -p coverage mkdir -p coverage
echo "TESTFLAGS=$TESTFLAGS --coverage=` TESTFLAGS="$TESTFLAGS --coverage=`
`coverage/${{github.job}}-${{matrix.arch}}.info" >> $GITHUB_ENV `coverage/${{github.job}}-${{matrix.arch}}.info"
echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
# cross-compile with ARM Thumb (32-bit, little-endian) # cross-compile with ARM Thumb (32-bit, little-endian)
- name: install-thumb - name: install-thumb
@@ -77,59 +84,59 @@ jobs:
-Duser_provided_block_device_sync=NULL \ -Duser_provided_block_device_sync=NULL \
-include stdio.h" -include stdio.h"
# test configurations # # test configurations
# normal+reentrant tests # # normal+reentrant tests
- name: test-default # - name: test-default
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk" # make test TESTFLAGS+="-nrk"
# NOR flash: read/prog = 1 block = 4KiB # # NOR flash: read/prog = 1 block = 4KiB
- name: test-nor # - name: test-nor
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk \ # make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" # -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
# SD/eMMC: read/prog = 512 block = 512 # # SD/eMMC: read/prog = 512 block = 512
- name: test-emmc # - name: test-emmc
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk \ # make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" # -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
# NAND flash: read/prog = 4KiB block = 32KiB # # NAND flash: read/prog = 4KiB block = 32KiB
- name: test-nand # - name: test-nand
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk \ # make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" # -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
# other extreme geometries that are useful for various corner cases # # other extreme geometries that are useful for various corner cases
- name: test-no-intrinsics # - name: test-no-intrinsics
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk \ # make test TESTFLAGS+="-nrk \
-DLFS_NO_INTRINSICS" # -DLFS_NO_INTRINSICS"
- name: test-byte-writes # - name: test-byte-writes
# it just takes too long to test byte-level writes when in qemu, # # it just takes too long to test byte-level writes when in qemu,
# should be plenty covered by the other configurations # # should be plenty covered by the other configurations
if: matrix.arch == 'x86_64' # if: matrix.arch == 'x86_64'
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk \ # make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" # -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
- name: test-block-cycles # - name: test-block-cycles
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk \ # make test TESTFLAGS+="-nrk \
-DLFS_BLOCK_CYCLES=1" # -DLFS_BLOCK_CYCLES=1"
- name: test-odd-block-count # - name: test-odd-block-count
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk \ # make test TESTFLAGS+="-nrk \
-DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" # -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
- name: test-odd-block-size # - name: test-odd-block-size
run: | # run: |
make clean # make clean
make test TESTFLAGS+="-nrk \ # make test TESTFLAGS+="-nrk \
-DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" # -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
# collect coverage # collect coverage
- name: collect-coverage - name: collect-coverage
@@ -161,7 +168,7 @@ jobs:
-DLFS_NO_DEBUG \ -DLFS_NO_DEBUG \
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR" \ -DLFS_NO_ERROR" \
CODEFLAGS+="-o results/code.csv" CODEFLAGS+="-o results/code-${{matrix.arch}}.csv"
- name: results-code-readonly - name: results-code-readonly
continue-on-error: true continue-on-error: true
run: | run: |
@@ -175,7 +182,7 @@ jobs:
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_READONLY" \ -DLFS_READONLY" \
CODEFLAGS+="-o results/code-readonly.csv" CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv"
- name: results-code-threadsafe - name: results-code-threadsafe
continue-on-error: true continue-on-error: true
run: | run: |
@@ -189,7 +196,7 @@ jobs:
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_THREADSAFE" \ -DLFS_THREADSAFE" \
CODEFLAGS+="-o results/code-threadsafe.csv" CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv"
- name: results-code-migrate - name: results-code-migrate
continue-on-error: true continue-on-error: true
run: | run: |
@@ -203,7 +210,7 @@ jobs:
-DLFS_NO_WARN \ -DLFS_NO_WARN \
-DLFS_NO_ERROR \ -DLFS_NO_ERROR \
-DLFS_MIGRATE" \ -DLFS_MIGRATE" \
CODEFLAGS+="-o results/code-migrate.csv" CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv"
- name: upload-results - name: upload-results
continue-on-error: true continue-on-error: true
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
@@ -219,29 +226,30 @@ jobs:
mkdir -p status mkdir -p status
for f in results/code*.csv for f in results/code*.csv
do do
export STEP="results-code$( [ -e "$f" ] || continue
echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p')" export STEP="results-code$(
export CONTEXT="results / code$( echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')"
echo $f | sed -n 's/.*code-\(.*\).csv/ (\1)/p')" export CONTEXT="results / code$(
export PREV="$(curl -sS \ echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')"
"$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ export PREV="$(curl -sS \
| jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
| select(.context == env.CONTEXT).description | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[]
| capture(\"Code size is (?<result>[0-9]+)\").result" \ | select(.context == env.CONTEXT).description
|| echo 0)" | capture(\"Code size is (?<result>[0-9]+)\").result" \
echo $PREV || echo 0)"
export DESCRIPTION="$(./scripts/code.py -u $f -s | awk ' echo $PREV
NR==2 {printf "Code size is %d B",$2} export DESCRIPTION="$(./scripts/code.py -u $f -s | awk '
NR==2 && ENVIRON["PREV"] != 0 { NR==2 {printf "Code size is %d B",$2}
printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')" NR==2 && ENVIRON["PREV"] != 0 {
jq -n '{ printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')"
state: "success", jq -n '{
context: env.CONTEXT, state: "success",
description: env.DESCRIPTION, context: env.CONTEXT,
target_job: "${{github.job}} (${{matrix.arch}})", description: env.DESCRIPTION,
target_step: env.STEP}' \ target_job: "${{github.job}} (${{matrix.arch}})",
| tee status/code$( target_step: env.STEP}' \
echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p').json | tee status/code$(
echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json
done done
- name: upload-status - name: upload-status
continue-on-error: true continue-on-error: true
@@ -268,14 +276,14 @@ jobs:
sudo apt-get update -qq sudo apt-get update -qq
sudo apt-get install -qq valgrind sudo apt-get install -qq valgrind
valgrind --version valgrind --version
# normal tests, we don't need to test all geometries # # normal tests, we don't need to test all geometries
- name: test-valgrind # - name: test-valgrind
run: make test TESTFLAGS+="-k --valgrind" # run: make test TESTFLAGS+="-k --valgrind"
# self-host with littlefs-fuse for a fuzz-like test # self-host with littlefs-fuse for a fuzz-like test
fuse: fuse:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{!endsWith(github.ref, '-prefix')}} if: "!endsWith(github.ref, '-prefix')"
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: install - name: install
@@ -321,7 +329,7 @@ jobs:
# test migration using littlefs-fuse # test migration using littlefs-fuse
migrate: migrate:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{!endsWith(github.ref, '-prefix')}} if: "!endsWith(github.ref, '-prefix')"
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: install - name: install
@@ -397,25 +405,32 @@ jobs:
sudo apt-get update -qq sudo apt-get update -qq
sudo apt-get install -qq python3 python3-pip lcov sudo apt-get install -qq python3 python3-pip lcov
sudo pip3 install toml sudo pip3 install toml
# yes we continue-on-error on every step, continue-on-error
# at job level apparently still marks a job as failed, which isn't
# what we want
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
continue-on-error: true
with: with:
name: coverage name: coverage
path: coverage path: coverage
- name: results-coverage - name: results-coverage
continue-on-error: true
run: | run: |
mkdir -p results mkdir -p results
lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \ lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
-o results/coverage.info -o results/coverage.info
./scripts/coverage.py results/coverage.info -o results/coverage.csv ./scripts/coverage.py results/coverage.info -o results/coverage.csv
- name: upload-results - name: upload-results
continue-on-error: true
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
continue-on-error: true
with: with:
name: results name: results
path: results path: results
- name: collect-status - name: collect-status
continue-on-error: true
run: | run: |
mkdir -p status mkdir -p status
[ -e results/coverage.csv ] || exit 0
export STEP="results-coverage" export STEP="results-coverage"
export CONTEXT="results / coverage" export CONTEXT="results / coverage"
export PREV="$(curl -sS \ export PREV="$(curl -sS \
@@ -425,7 +440,8 @@ jobs:
| capture(\"Coverage is (?<result>[0-9\\\\.]+)\").result" \ | capture(\"Coverage is (?<result>[0-9\\\\.]+)\").result" \
|| echo 0)" || echo 0)"
export DESCRIPTION="$( export DESCRIPTION="$(
./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' ./scripts/coverage.py -u results/coverage.csv -s \
| awk -F '[ /%]+' '
NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3} NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3}
NR==2 && ENVIRON["PREV"] != 0 { NR==2 && ENVIRON["PREV"] != 0 {
printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
@@ -438,6 +454,7 @@ jobs:
| tee status/coverage.json | tee status/coverage.json
- name: upload-status - name: upload-status
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
continue-on-error: true
with: with:
name: status name: status
path: status path: status