Skip to content

Update Helm release jupyterhub to v4.0.0 #5

Update Helm release jupyterhub to v4.0.0

Update Helm release jupyterhub to v4.0.0 #5

name: "Local Integration Tests"
env:
TEST_USERNAME: "test-user"
TEST_PASSWORD: "P@sswo3d"
NEBARI_IMAGE_TAG: "main"
on:
pull_request:
paths:
- ".github/workflows/test_local_integration.yaml"
- "tests/**"
- "scripts/**"
- "src/**"
- "pyproject.toml"
- "pytest.ini"
- ".cirun.yml"
push:
branches:
- main
- release/\d{4}.\d{1,2}.\d{1,2}
paths:
- ".github/workflows/test_local_integration.yaml"
- "tests/**"
- "scripts/**"
- "src/**"
- "pyproject.toml"
- "pytest.ini"
- ".cirun.yml"
workflow_call:
inputs:
pr_number:
required: true
type: string
workflow_dispatch:
# When the cancel-in-progress: true option is specified, any concurrent jobs or workflows using the same
# concurrency group will cancel both the pending and currently running jobs or workflows. This allows only
# one job or workflow in the concurrency group to be in progress at a time.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test-local-integration:
runs-on: "cirun-runner--${{ github.run_id }}"
defaults:
run:
shell: bash -l {0}
steps:
- name: "Checkout Infrastructure"
uses: actions/checkout@main
with:
fetch-depth: 0
- name: Setup runner for local deployment
uses: ./.github/actions/setup-local
- name: Checkout the branch from the PR that triggered the job
if: ${{ github.event_name == 'issue_comment' }}
run: |
hub version
hub pr checkout ${{ inputs.pr_number }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Python
uses: conda-incubator/setup-miniconda@v3
env:
CONDA: /home/runnerx/miniconda3
with:
auto-update-conda: true
python-version: "3.11"
miniconda-version: "latest"
- name: Install JQ
run: |
sudo apt-get update
sudo apt-get install jq -y
- name: Install Nebari and playwright
run: |
pip install .[dev]
playwright install
- name: Initialize Nebari config for local deployment
id: init
uses: ./.github/actions/init-local
- name: Deploy Nebari
working-directory: ${{ steps.init.outputs.directory }}
run: nebari deploy --config ${{ steps.init.outputs.config }} --disable-prompt
- name: Health check
uses: ./.github/actions/health-check
with:
domain: ${{ steps.init.outputs.domain }}
- name: Create example-user
working-directory: ${{ steps.init.outputs.directory }}
run: |
nebari keycloak adduser --user "${TEST_USERNAME}" "${TEST_PASSWORD}" --config ${{ steps.init.outputs.config }}
nebari keycloak listusers --config ${{ steps.init.outputs.config }}
- name: Await Workloads
uses: jupyterhub/action-k8s-await-workloads@v3
with:
workloads: "" # all
namespace: "dev"
timeout: 60
max-restarts: 0
### DEPLOYMENT TESTS
- name: Deployment Pytests
env:
NEBARI_CONFIG_PATH: ${{ steps.init.outputs.config }}
KEYCLOAK_USERNAME: ${{ env.TEST_USERNAME }}
KEYCLOAK_PASSWORD: ${{ env.TEST_PASSWORD }}
run: |
pytest tests/tests_deployment/ -v -s
### USER-JOURNEY TESTS
- uses: actions/setup-node@v4
with:
node-version: 20
- name: Playwright Tests
env:
KEYCLOAK_USERNAME: ${{ env.TEST_USERNAME }}
KEYCLOAK_PASSWORD: ${{ env.TEST_PASSWORD }}
NEBARI_FULL_URL: "https://${{ steps.init.outputs.domain }}/"
working-directory: tests/tests_e2e/playwright
run: |
# create environment file
envsubst < .env.tpl > .env
# run playwright pytest tests in headed mode with the chromium browser
xvfb-run pytest --browser chromium --slowmo 300 --headed
- name: Save Playwright recording artifacts
if: always()
uses: actions/[email protected]
with:
name: e2e-playwright
path: |
./tests/tests_e2e/playwright/videos/
### CLEANUP AFTER TESTS
- name: Cleanup nebari deployment
# Since this is not critical for most pull requests and takes more than half of the time
# in the CI, it makes sense to only run on merge to main or workflow_dispatch to speed
# up feedback cycle
if: github.ref_name == 'main' || github.event_name == 'workflow_dispatch'
working-directory: ${{ steps.init.outputs.directory }}
run: nebari destroy --config ${{ steps.init.outputs.config }} --disable-prompt