Skip to content

CI

CI #6

Workflow file for this run

name: CI
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
pull_request:
branches:
- main
push:
branches:
- main
release:
types: [published]
schedule:
# Nightly releases.
# These must run after the AllenNLP nightly releases, since the corresponding AllenNLP
# release will be a dependency.
- cron: '48 12 * * 1,2,3,4,5' # early morning (12:48 UTC / 5:48 AM PDT) Monday - Friday
env:
# NOTE: Need to update `TORCH_VERSION` and `TORCH_VISION_VERSION` for new torch releases.
TORCH_VERSION: 1.12.0
TORCH_VISION_VERSION: 0.13.0
# Change this to invalidate the current cache.
CACHE_PREFIX: v1
# Disable tokenizers parallelism because this doesn't help, and can cause issues in distributed tests.
TOKENIZERS_PARALLELISM: 'false'
# Disable multithreading with OMP because this can lead to dead-locks in distributed tests.
OMP_NUM_THREADS: '1'
jobs:
changelog:
name: CHANGELOG
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v1
- name: Check if source files have changed
run: |
git diff --name-only $(git merge-base origin/main HEAD) | grep '^allennlp_models/.*\.py$' && echo "source_files_changed=true" >> $GITHUB_ENV || echo "source_files_changed=false" >> $GITHUB_ENV
- name: Check that CHANGELOG has been updated
if: env.source_files_changed == 'true'
run: |
# If this step fails, this means you haven't updated the CHANGELOG.md
# file with notes on your contribution.
git diff --name-only $(git merge-base origin/main HEAD) | grep '^CHANGELOG.md$' && echo "Thanks for helping keep our CHANGELOG up-to-date!"
readme:
name: README
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v1
- name: Check that models list is up-to-date
run: |
# If this step fails, you need to run `python scripts/update_readme_model_list.py`.
python scripts/update_readme_model_list.py
git diff --exit-code
style:
name: Style
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Install requirements
run: |
grep -E '^black' dev-requirements.txt | xargs pip install
- name: Debug info
run: |
pip freeze
- name: Run black
run: |
make format
checks:
name: ${{ matrix.task.name }}
runs-on: ${{ matrix.task.runs_on }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
task:
- name: Lint
runs_on: ubuntu-latest
coverage_report: false
torch_platform: cpu
run: |
make flake8
make typecheck
- name: CPU Tests
runs_on: ubuntu-latest
coverage_report: true
torch_platform: cpu
run: make test-with-cov
- name: Pretrained Tests
runs_on: [self-hosted, CPU-only]
coverage_report: true
torch_platform: cpu
run: make test-pretrained
- name: Validate Configs
runs_on: [self-hosted, CPU-only]
coverage_report: true
torch_platform: cpu
run: make test-configs
- name: GPU Tests
runs_on: [self-hosted, GPU]
coverage_report: true
torch_platform: cu113 # Our self-hosted GPU runners currently support CUDA 11.*
run: make gpu-tests
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
env:
# Log useful debugging information.
ACTIONS_STEP_DEBUG: 'true'
with:
python-version: 3.8
- name: Install prerequisites
run: |
pip install --upgrade pip setuptools wheel virtualenv
- name: Set build variables
shell: bash
run: |
# Get the exact Python version to use in the cache key.
echo "PYTHON_VERSION=$(python --version)" >> $GITHUB_ENV
echo "RUNNER_ARCH=$(uname -m)" >> $GITHUB_ENV
# Use week number in cache key so we can refresh the cache weekly.
echo "WEEK_NUMBER=$(date +%V)" >> $GITHUB_ENV
- uses: actions/cache@v2
id: virtualenv-cache
with:
path: .venv
key: ${{ env.CACHE_PREFIX }}-${{ env.WEEK_NUMBER }}-${{ runner.os }}-${{ env.RUNNER_ARCH }}-${{ env.PYTHON_VERSION }}-${{ matrix.task.torch_platform }}-${{ hashFiles('requirements.txt', 'constraints.txt') }}-${{ hashFiles('dev-requirements.txt') }}
- name: Create virtual environment
if: steps.virtualenv-cache.outputs.cache-hit != 'true'
run: |
test -d .venv || virtualenv -p $(which python) --copies --reset-app-data .venv
pip install torch==${{ env.TORCH_VERSION }} torchvision==${{ env.TORCH_VISION_VERSION }} --extra-index-url https://download.pytorch.org/whl/${{ matrix.task.torch_platform }}
- name: Install requirements
run: |
. .venv/bin/activate
pip install -r requirements.txt -r dev-requirements.txt -c constraints.txt
python -c 'import nltk; nltk.download("omw-1.4")'
- name: Show environment info
if: always()
run: |
. .venv/bin/activate
which python
pip freeze
- name: ${{ matrix.task.name }}
run: |
. .venv/bin/activate
${{ matrix.task.run }}
- name: Prepare coverage report
if: matrix.task.coverage_report
run: |
mkdir coverage
mv coverage.xml coverage/
- name: Save coverage report
if: matrix.task.coverage_report
uses: actions/upload-artifact@v1
with:
name: ${{ matrix.task.name }}-coverage
path: ./coverage
- name: Clean up
if: always()
run: |
. .venv/bin/activate
pip uninstall -y allennlp
upload_coverage:
name: Upload Coverage Report
timeout-minutes: 10
if: github.repository == 'allenai/allennlp-models' && (github.event_name == 'push' || github.event_name == 'pull_request')
runs-on: ubuntu-latest
needs: [checks]
steps:
# Need to checkout code to get the coverage config.
- uses: actions/checkout@v2
- name: Download coverage report from CPU tests
uses: actions/download-artifact@v1
with:
name: CPU Tests-coverage
path: coverage/cpu_tests
- name: Download coverage report from GPU Tests
uses: actions/download-artifact@v1
with:
name: GPU Tests-coverage
path: coverage/gpu_tests
- name: Download coverage report from pretrained tests
uses: actions/download-artifact@v1
with:
name: Pretrained Tests-coverage
path: coverage/pretrained_tests
- name: Download coverage report from config tests
uses: actions/download-artifact@v1
with:
name: Validate Configs-coverage
path: coverage/validate_configs
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
files: coverage/cpu_tests/coverage.xml,coverage/gpu_tests/coverage.xml,coverage/pretrained_tests/coverage.xml,coverage/validate_configs/coverage.xml
# Ignore codecov failures as the codecov server is not
# very reliable but we don't want to report a failure
# in the github UI just because the coverage report failed to
# be published.
fail_ci_if_error: false
# Builds the API documentation and pushes it to the appropriate folder in the
# allennlp-docs repo.
docs:
name: Docs
runs-on: ubuntu-latest
if: github.repository == 'allenai/allennlp-models'
timeout-minutes: 20
env:
TORCH_PLATFORM: cpu
steps:
- uses: actions/checkout@v1
- name: Setup SSH Client 🔑
if: github.event_name == 'push' || github.event_name == 'release'
uses: webfactory/[email protected]
with:
ssh-private-key: ${{ secrets.DOCS_DEPLOY_KEY }}
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install prerequisites
run: |
pip install --upgrade pip setuptools wheel virtualenv
- name: Set build variables
shell: bash
run: |
echo "PYTHON_VERSION=$(python --version)" >> $GITHUB_ENV
echo "RUNNER_ARCH=$(uname -m)" >> $GITHUB_ENV
echo "WEEK_NUMBER=$(date +%V)" >> $GITHUB_ENV
- uses: actions/cache@v2
id: virtualenv-cache
with:
path: .venv
key: ${{ env.CACHE_PREFIX }}-${{ env.WEEK_NUMBER }}-${{ runner.os }}-${{ env.RUNNER_ARCH }}-${{ env.PYTHON_VERSION }}-${{ env.TORCH_PLATFORM }}-${{ hashFiles('requirements.txt', 'constraints.txt') }}-${{ hashFiles('dev-requirements.txt') }}
- name: Create virtual environment
if: steps.virtualenv-cache.outputs.cache-hit != 'true'
run: |
test -d .venv || virtualenv -p $(which python) --copies --reset-app-data .venv
pip install torch==${{ env.TORCH_VERSION }} torchvision==${{ env.TORCH_VISION_VERSION }} -f https://download.pytorch.org/whl/${{ env.TORCH_PLATFORM }}
- name: Install requirements
run: |
. .venv/bin/activate
pip install -r requirements.txt -r dev-requirements.txt -c constraints.txt
- name: Show environment info
run: |
. .venv/bin/activate
which python
pip freeze
- name: Prepare build environment
run: |
. .venv/bin/activate
echo "BASE_MODULE=allennlp_models" >> $GITHUB_ENV
if [[ $GITHUB_EVENT_NAME == 'release' ]]; then
echo "DOCS_FOLDER=models/${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV;
echo "BASE_SOURCE_LINK=https://github.com/allenai/allennlp-models/blob/${GITHUB_REF#refs/tags/}/allennlp_models/" >> $GITHUB_ENV;
else
echo "DOCS_FOLDER=models/main" >> $GITHUB_ENV;
echo "BASE_SOURCE_LINK=https://github.com/allenai/allennlp-models/blob/main/allennlp_models/" >> $GITHUB_ENV;
fi
- name: Build docs
run: |
. .venv/bin/activate
make build-docs
- name: Configure Git
if: github.event_name == 'push' || github.event_name == 'release'
run: |
git config --global user.email "[email protected]"
git config --global user.name "ai2service"
git config --global push.default simple
- name: Stage docs
if: github.event_name == 'push' || github.event_name == 'release'
run: |
echo "Staging docs to $DOCS_FOLDER"
# Checkout allennlp-docs to /allennlp-docs
git clone [email protected]:allenai/allennlp-docs.git ~/allennlp-docs
# Copy the generated docs to the checked out docs repo
rm -rf ~/allennlp-docs/$DOCS_FOLDER/
mkdir -p ~/allennlp-docs/$DOCS_FOLDER
cp -r site/* ~/allennlp-docs/$DOCS_FOLDER
- name: Update shortcuts
if: github.event_name == 'release'
run: |
# Fail immediately if any step fails.
set -e
. .venv/bin/activate
LATEST=models/$(./scripts/get_version.py latest)
STABLE=models/$(./scripts/get_version.py stable)
cd ~/allennlp-docs/
echo "Updating models/latest/index.html to point to $LATEST"
mkdir -p models/latest
cat >models/latest/index.html << EOL
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Refresh" content="0; url=/${LATEST}/" />
</head>
<body>
<p>Please follow <a href="/${LATEST}/">this link</a>.</p>
</body>
</html>
EOL
echo "Updating models/stable/index.html to point to $STABLE"
mkdir -p models/stable
cat >models/stable/index.html << EOL
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Refresh" content="0; url=/${STABLE}/" />
</head>
<body>
<p>Please follow <a href="/${STABLE}/">this link</a>.</p>
</body>
</html>
EOL
- name: Deploy docs
if: github.event_name == 'push' || github.event_name == 'release'
run: |
# And push them up to GitHub
cd ~/allennlp-docs/
git add -A
git commit -m "automated update of the models docs"
git push
- name: Clean up
if: always()
run: |
. .venv/bin/activate
pip uninstall -y allennlp
build_package:
name: Build package
if: github.repository == 'allenai/allennlp-models'
runs-on: ubuntu-latest
timeout-minutes: 20
env:
TORCH_PLATFORM: cpu
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install prerequisites
run: |
pip install --upgrade pip setuptools wheel virtualenv
- name: Set build variables
shell: bash
run: |
echo "PYTHON_VERSION=$(python --version)" >> $GITHUB_ENV
echo "RUNNER_ARCH=$(uname -m)" >> $GITHUB_ENV
echo "WEEK_NUMBER=$(date +%V)" >> $GITHUB_ENV
- uses: actions/cache@v2
id: virtualenv-cache
with:
path: .venv
key: ${{ env.CACHE_PREFIX }}-${{ env.WEEK_NUMBER }}-${{ runner.os }}-${{ env.RUNNER_ARCH }}-${{ env.PYTHON_VERSION }}-${{ env.TORCH_PLATFORM }}-${{ hashFiles('requirements.txt', 'constraints.txt') }}-${{ hashFiles('dev-requirements.txt') }}
- name: Create virtual environment
if: steps.virtualenv-cache.outputs.cache-hit != 'true'
run: |
test -d .venv || virtualenv -p $(which python) --copies --reset-app-data .venv
pip install torch==${{ env.TORCH_VERSION }} torchvision==${{ env.TORCH_VISION_VERSION }} -f https://download.pytorch.org/whl/${{ env.TORCH_PLATFORM }}
- name: Install requirements
run: |
. .venv/bin/activate
pip install -r requirements.txt -r dev-requirements.txt -c constraints.txt
- name: Show environment info
run: |
. .venv/bin/activate
which python
pip freeze
- name: Check and set nightly version
if: github.event_name == 'schedule'
run: |
# Verify that current version is ahead of the last release.
. .venv/bin/activate
LATEST=$(scripts/get_version.py latest)
CURRENT=$(scripts/get_version.py current)
if [ "$CURRENT" == "$LATEST" ]; then
echo "Current version needs to be ahead of latest release in order to build nightly release";
exit 1;
fi
echo "ALLENNLP_MODELS_VERSION_SUFFIX=.dev$(date -u +%Y%m%d)" >> $GITHUB_ENV
- name: Check version and release tag match
if: github.event_name == 'release'
run: |
# Remove 'refs/tags/' to get the actual tag from the release.
. .venv/bin/activate
TAG=${GITHUB_REF#refs/tags/};
VERSION=$(scripts/get_version.py current)
if [ "$TAG" != "$VERSION" ]; then
echo "Bad tag or version. Tag $TAG does not match $VERSION";
exit 1;
fi
- name: Set AllenNLP version override
if: github.event_name == 'release' || github.event_name == 'schedule'
run: |
ALLENNLP_VERSION_OVERRIDE="allennlp$(./scripts/get_version.py current --as-range)"
echo "Setting ALLENNLP_VERSION_OVERRIDE to $ALLENNLP_VERSION_OVERRIDE"
echo "ALLENNLP_VERSION_OVERRIDE=$ALLENNLP_VERSION_OVERRIDE" >> $GITHUB_ENV
- name: Build Package
run: |
. .venv/bin/activate
python setup.py bdist_wheel sdist
- name: Save package
uses: actions/upload-artifact@v1
with:
name: models-package
path: dist
- name: Clean up
if: always()
run: |
. .venv/bin/activate
pip uninstall -y allennlp
test_package:
name: Test Package
if: github.repository == 'allenai/allennlp-models'
needs: [build_package] # needs the package artifact created from 'build_package' job.
runs-on: ubuntu-latest
strategy:
matrix:
python: ['3.7', '3.8', '3.9']
steps:
- uses: actions/checkout@v2
- name: Cleanup directory
run: |
# Remove the source code so that it doesn't conflict with the wheel
# installation.
rm -rf allennlp_models/
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install requirements
run: |
pip install --upgrade pip setuptools wheel
- name: Download models package
uses: actions/download-artifact@v1
with:
name: models-package
path: dist
- name: Install models package
run: |
pip install $(ls dist/*.whl)
python -c 'import nltk; nltk.download("omw-1.4")'
- name: Patch AllenNLP dependency
if: github.event_name == 'push' || github.event_name == 'pull_request'
run: |
pip install --upgrade "allennlp[all] @ git+https://github.com/allenai/allennlp.git@main"
- name: Debug info
run: |
pip freeze
- name: Ensure models automatically loaded
run: |
./scripts/ensure_models_found.py
- name: Ensure versions match
if: github.event_name == 'release'
run: |
./scripts/ensure_versions_match.py
docker:
name: Docker
# Only build and push images for releases.
if: github.repository == 'allenai/allennlp-models' && (github.event_name == 'release' || github.event_name == 'schedule')
needs: [build_package] # needs the package artifact created from 'build_package' job.
runs-on: ubuntu-latest
strategy:
matrix:
cuda: ['11.3']
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Download models package
uses: actions/download-artifact@v1
with:
name: models-package
path: dist
- name: Set build environment
env:
CUDA: ${{ matrix.cuda }}
run: |
VERSION="$(scripts/get_version.py current)-cuda${CUDA}"
echo "DOCKER_IMAGE_NAME=allennlp/models:${VERSION}" >> $GITHUB_ENV
echo "ALLENNLP_TAG=${VERSION}" >> $GITHUB_ENV
- name: Build image
# HACK: we don't publish images for releases in the core library at the moment so this step would fail.
if: github.event_name == 'release'
run: |
make docker-image DOCKER_IMAGE_NAME="$DOCKER_IMAGE_NAME" ALLENNLP_TAG="$ALLENNLP_TAG"
- name: Authenticate with Docker Hub
run: |
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
- name: Upload release image
# HACK: we don't publish images for releases in the core library at the moment so this step would fail.
if: github.event_name == 'release'
run: |
docker push $DOCKER_IMAGE_NAME
- name: Upload latest image
# HACK: we don't publish images for releases in the core library at the moment so this step would fail.
# The CUDA condition is because CUDA 10.2 is currently our default.
if: github.event_name == 'release' && matrix.cuda == '11.3'
run: |
docker tag $DOCKER_IMAGE_NAME allennlp/models:latest
docker push allennlp/models:latest
publish:
name: PyPI
if: github.repository == 'allenai/allennlp-models' && (github.event_name == 'release' || github.event_name == 'schedule')
needs: [style, checks, build_package, test_package, docker, docs]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install requirements
run: |
pip install --upgrade pip setuptools wheel twine
- name: Download models package
uses: actions/download-artifact@v1
with:
name: models-package
path: dist
- name: Upload to PyPI
env:
PYPI_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
run: twine upload -u allennlp -p $PYPI_PASSWORD dist/*