name: Full sync test on: workflow_dispatch: inputs: network: default: 'Mainnet' description: 'Network to deploy: Mainnet or Testnet' required: true checkpoint_sync: default: 'true' description: 'Configures `zebrad` to use as many checkpoints as possible' required: true pull_request: branches: - main paths: # code and tests (including full sync acceptance test changes) # TODO: ignore changes in test code that isn't used in the full sync test - '**/*.rs' # hard-coded checkpoints # TODO: ignore changes to proptest seed .txt files - '**/*.txt' # dependencies - '**/Cargo.toml' - '**/Cargo.lock' # workflow definitions - 'docker/**' - '.github/workflows/test-full-sync.yml' push: branches: - main paths: # code and tests (including full sync acceptance test changes) # TODO: ignore changes in test code that isn't used in the full sync test - '**/*.rs' # hard-coded checkpoints # TODO: ignore changes to proptest seed .txt files - '**/*.txt' # dependencies - '**/Cargo.toml' - '**/Cargo.lock' # workflow definitions - 'docker/**' - '.github/workflows/test-full-sync.yml' - '.github/workflows/docker-image-build.yml' env: NETWORK: Mainnet PROJECT_ID: zealous-zebra IMAGE_NAME: zebrad-test GAR_BASE: us-docker.pkg.dev/zealous-zebra/zebra REGION: us-central1 ZONE: us-central1-a MACHINE_TYPE: c2d-standard-16 jobs: build: # TODO add `startsWith(github.head_ref, 'mergify/merge-queue/')` to the condition to # only run on Mergify head branches, and on manual dispatch: # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }} uses: ./.github/workflows/docker-image-build.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: tester image_name: zebrad-test network: Mainnet checkpoint_sync: true rust_backtrace: full rust_lib_backtrace: full colorbt_show_hidden: '1' zebra_skip_ipv6_tests: '1' rust_log: info # Test that Zebra can run a full mainnet sync after a PR is approved test-full-sync: name: Test full Mainnet sync runs-on: ubuntu-latest needs: [build] permissions: contents: 'read' id-token: 'write' steps: - uses: actions/checkout@v3.0.2 with: persist-credentials: false - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 with: short-length: 7 - name: Downcase network name for disks run: | NETWORK_CAPS=${{ github.event.inputs.network || env.NETWORK }} echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth uses: google-github-actions/auth@v0.7.1 with: workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' # Creates Compute Engine virtual machine instance w/ disks - name: Create GCP compute instance run: | gcloud compute instances create-with-container "full-sync-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --boot-disk-size 100GB \ --boot-disk-type pd-ssd \ --create-disk name="zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${{ env.NETWORK }}-tip",device-name="zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${{ env.NETWORK }}-tip",size=100GB,type=pd-ssd \ --container-mount-disk mount-path="/zebrad-cache",name="zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${{ env.NETWORK }}-tip" \ --container-image debian:buster \ --container-restart-policy=never \ --machine-type ${{ env.MACHINE_TYPE }} \ --scopes cloud-platform \ --metadata=google-monitoring-enabled=true,google-logging-enabled=true \ --tags zebrad \ --zone "${{ env.ZONE }}" sleep 60 - name: Full sync id: full-sync run: | gcloud compute ssh \ full-sync-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ env.ZONE }} \ --quiet \ --ssh-flag="-o ServerAliveInterval=5" \ --command \ "docker run -e TEST_FULL_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=600 -t --name full-sync \ --mount type=bind,source=/mnt/disks/gce-containers-mounts/gce-persistent-disks/zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${{ env.NETWORK }}-tip,target=/zebrad-cache \ ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }}" EXIT_CODE=$(\ gcloud compute ssh \ full-sync-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ env.ZONE }} \ --quiet \ --ssh-flag="-o ServerAliveInterval=5" \ --command="docker wait full-sync") exit ${EXIT_CODE} - name: Get state version from constants.rs run: | STATE_VERSION="" LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" $GITHUB_WORKSPACE/zebra-state/src/constants.rs | grep -oE "[0-9]+" | tail -n1) echo "STATE_VERSION: $LOCAL_STATE_VERSION" echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> $GITHUB_ENV - name: Get sync height from logs run: | SYNC_HEIGHT="" DOCKER_LOGS=$(\ gcloud compute ssh \ full-sync-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ env.ZONE }} \ --quiet \ --ssh-flag="-o ServerAliveInterval=5" \ --command="docker logs full-sync --tail 20") SYNC_HEIGHT=$(echo $DOCKER_LOGS | grep -oE 'finished initial sync to chain tip, using gossiped blocks sync_percent=100.000 % current_height=Height\([0-9]+\)' | grep -oE '[0-9]+' | tail -1 || [[ $? == 1 ]]) echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> $GITHUB_ENV # Create image from disk # Force the image creation as the disk is still attached, even though is not being used by the container - name: Create image from state disk run: | gcloud compute images create zebrad-cache-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-tip \ --force \ --source-disk=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${{ env.NETWORK }}-tip \ --source-disk-zone=${{ env.ZONE }} \ --storage-location=us \ --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }}" - name: Delete test instance # If the `full-sync` step timeouts (+6 hours) the previous step (creating the image) willl be skipped. # Even if the instance continues running, no image will be created, so it's better to delete it. if: always() continue-on-error: true run: | INSTANCE=$(gcloud compute instances list --filter=full-sync-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} --format='value(NAME)') if [ -z "${INSTANCE}" ]; then echo "No instance to delete" else gcloud compute instances delete "${INSTANCE}" --zone "${{ env.ZONE }}" --delete-disks all --quiet fi